repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
xvedejas/limbo4
|
refs/heads/master
|
Lib/logging/handlers.py
|
11
|
# Copyright 2001-2013 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
Additional handlers for the logging package for Python. The core package is
based on PEP 282 and comments thereto in comp.lang.python.
Copyright (C) 2001-2013 Vinay Sajip. All Rights Reserved.
To use, simply 'import logging.handlers' and log away!
"""
import errno, logging, socket, os, pickle, struct, time, re
from codecs import BOM_UTF8
from stat import ST_DEV, ST_INO, ST_MTIME
import queue
try:
import threading
except ImportError: #pragma: no cover
threading = None
#
# Some constants...
#
DEFAULT_TCP_LOGGING_PORT = 9020
DEFAULT_UDP_LOGGING_PORT = 9021
DEFAULT_HTTP_LOGGING_PORT = 9022
DEFAULT_SOAP_LOGGING_PORT = 9023
SYSLOG_UDP_PORT = 514
SYSLOG_TCP_PORT = 514
_MIDNIGHT = 24 * 60 * 60 # number of seconds in a day
class BaseRotatingHandler(logging.FileHandler):
"""
Base class for handlers that rotate log files at a certain point.
Not meant to be instantiated directly. Instead, use RotatingFileHandler
or TimedRotatingFileHandler.
"""
def __init__(self, filename, mode, encoding=None, delay=False):
"""
Use the specified filename for streamed logging
"""
logging.FileHandler.__init__(self, filename, mode, encoding, delay)
self.mode = mode
self.encoding = encoding
self.namer = None
self.rotator = None
def emit(self, record):
"""
Emit a record.
Output the record to the file, catering for rollover as described
in doRollover().
"""
try:
if self.shouldRollover(record):
self.doRollover()
logging.FileHandler.emit(self, record)
except (KeyboardInterrupt, SystemExit): #pragma: no cover
raise
except:
self.handleError(record)
def rotation_filename(self, default_name):
"""
Modify the filename of a log file when rotating.
This is provided so that a custom filename can be provided.
The default implementation calls the 'namer' attribute of the
handler, if it's callable, passing the default name to
it. If the attribute isn't callable (the default is None), the name
is returned unchanged.
:param default_name: The default name for the log file.
"""
if not callable(self.namer):
result = default_name
else:
result = self.namer(default_name)
return result
def rotate(self, source, dest):
"""
When rotating, rotate the current log.
The default implementation calls the 'rotator' attribute of the
handler, if it's callable, passing the source and dest arguments to
it. If the attribute isn't callable (the default is None), the source
is simply renamed to the destination.
:param source: The source filename. This is normally the base
filename, e.g. 'test.log'
:param dest: The destination filename. This is normally
what the source is rotated to, e.g. 'test.log.1'.
"""
if not callable(self.rotator):
# Issue 18940: A file may not have been created if delay is True.
if os.path.exists(source):
os.rename(source, dest)
else:
self.rotator(source, dest)
class RotatingFileHandler(BaseRotatingHandler):
"""
Handler for logging to a set of files, which switches from one file
to the next when the current file reaches a certain size.
"""
def __init__(self, filename, mode='a', maxBytes=0, backupCount=0, encoding=None, delay=False):
"""
Open the specified file and use it as the stream for logging.
By default, the file grows indefinitely. You can specify particular
values of maxBytes and backupCount to allow the file to rollover at
a predetermined size.
Rollover occurs whenever the current log file is nearly maxBytes in
length. If backupCount is >= 1, the system will successively create
new files with the same pathname as the base file, but with extensions
".1", ".2" etc. appended to it. For example, with a backupCount of 5
and a base file name of "app.log", you would get "app.log",
"app.log.1", "app.log.2", ... through to "app.log.5". The file being
written to is always "app.log" - when it gets filled up, it is closed
and renamed to "app.log.1", and if files "app.log.1", "app.log.2" etc.
exist, then they are renamed to "app.log.2", "app.log.3" etc.
respectively.
If maxBytes is zero, rollover never occurs.
"""
# If rotation/rollover is wanted, it doesn't make sense to use another
# mode. If for example 'w' were specified, then if there were multiple
# runs of the calling application, the logs from previous runs would be
# lost if the 'w' is respected, because the log file would be truncated
# on each run.
if maxBytes > 0:
mode = 'a'
BaseRotatingHandler.__init__(self, filename, mode, encoding, delay)
self.maxBytes = maxBytes
self.backupCount = backupCount
def doRollover(self):
"""
Do a rollover, as described in __init__().
"""
if self.stream:
self.stream.close()
self.stream = None
if self.backupCount > 0:
for i in range(self.backupCount - 1, 0, -1):
sfn = self.rotation_filename("%s.%d" % (self.baseFilename, i))
dfn = self.rotation_filename("%s.%d" % (self.baseFilename,
i + 1))
if os.path.exists(sfn):
if os.path.exists(dfn):
os.remove(dfn)
os.rename(sfn, dfn)
dfn = self.rotation_filename(self.baseFilename + ".1")
if os.path.exists(dfn):
os.remove(dfn)
self.rotate(self.baseFilename, dfn)
if not self.delay:
self.stream = self._open()
def shouldRollover(self, record):
"""
Determine if rollover should occur.
Basically, see if the supplied record would cause the file to exceed
the size limit we have.
"""
if self.stream is None: # delay was set...
self.stream = self._open()
if self.maxBytes > 0: # are we rolling over?
msg = "%s\n" % self.format(record)
self.stream.seek(0, 2) #due to non-posix-compliant Windows feature
if self.stream.tell() + len(msg) >= self.maxBytes:
return 1
return 0
class TimedRotatingFileHandler(BaseRotatingHandler):
"""
Handler for logging to a file, rotating the log file at certain timed
intervals.
If backupCount is > 0, when rollover is done, no more than backupCount
files are kept - the oldest ones are deleted.
"""
def __init__(self, filename, when='h', interval=1, backupCount=0, encoding=None, delay=False, utc=False):
BaseRotatingHandler.__init__(self, filename, 'a', encoding, delay)
self.when = when.upper()
self.backupCount = backupCount
self.utc = utc
# Calculate the real rollover interval, which is just the number of
# seconds between rollovers. Also set the filename suffix used when
# a rollover occurs. Current 'when' events supported:
# S - Seconds
# M - Minutes
# H - Hours
# D - Days
# midnight - roll over at midnight
# W{0-6} - roll over on a certain day; 0 - Monday
#
# Case of the 'when' specifier is not important; lower or upper case
# will work.
if self.when == 'S':
self.interval = 1 # one second
self.suffix = "%Y-%m-%d_%H-%M-%S"
self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}-\d{2}(\.\w+)?$"
elif self.when == 'M':
self.interval = 60 # one minute
self.suffix = "%Y-%m-%d_%H-%M"
self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}(\.\w+)?$"
elif self.when == 'H':
self.interval = 60 * 60 # one hour
self.suffix = "%Y-%m-%d_%H"
self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}(\.\w+)?$"
elif self.when == 'D' or self.when == 'MIDNIGHT':
self.interval = 60 * 60 * 24 # one day
self.suffix = "%Y-%m-%d"
self.extMatch = r"^\d{4}-\d{2}-\d{2}(\.\w+)?$"
elif self.when.startswith('W'):
self.interval = 60 * 60 * 24 * 7 # one week
if len(self.when) != 2:
raise ValueError("You must specify a day for weekly rollover from 0 to 6 (0 is Monday): %s" % self.when)
if self.when[1] < '0' or self.when[1] > '6':
raise ValueError("Invalid day specified for weekly rollover: %s" % self.when)
self.dayOfWeek = int(self.when[1])
self.suffix = "%Y-%m-%d"
self.extMatch = r"^\d{4}-\d{2}-\d{2}(\.\w+)?$"
else:
raise ValueError("Invalid rollover interval specified: %s" % self.when)
self.extMatch = re.compile(self.extMatch, re.ASCII)
self.interval = self.interval * interval # multiply by units requested
if os.path.exists(filename):
t = os.stat(filename)[ST_MTIME]
else:
t = int(time.time())
self.rolloverAt = self.computeRollover(t)
def computeRollover(self, currentTime):
"""
Work out the rollover time based on the specified time.
"""
result = currentTime + self.interval
# If we are rolling over at midnight or weekly, then the interval is already known.
# What we need to figure out is WHEN the next interval is. In other words,
# if you are rolling over at midnight, then your base interval is 1 day,
# but you want to start that one day clock at midnight, not now. So, we
# have to fudge the rolloverAt value in order to trigger the first rollover
# at the right time. After that, the regular interval will take care of
# the rest. Note that this code doesn't care about leap seconds. :)
if self.when == 'MIDNIGHT' or self.when.startswith('W'):
# This could be done with less code, but I wanted it to be clear
if self.utc:
t = time.gmtime(currentTime)
else:
t = time.localtime(currentTime)
currentHour = t[3]
currentMinute = t[4]
currentSecond = t[5]
# r is the number of seconds left between now and midnight
r = _MIDNIGHT - ((currentHour * 60 + currentMinute) * 60 +
currentSecond)
result = currentTime + r
# If we are rolling over on a certain day, add in the number of days until
# the next rollover, but offset by 1 since we just calculated the time
# until the next day starts. There are three cases:
# Case 1) The day to rollover is today; in this case, do nothing
# Case 2) The day to rollover is further in the interval (i.e., today is
# day 2 (Wednesday) and rollover is on day 6 (Sunday). Days to
# next rollover is simply 6 - 2 - 1, or 3.
# Case 3) The day to rollover is behind us in the interval (i.e., today
# is day 5 (Saturday) and rollover is on day 3 (Thursday).
# Days to rollover is 6 - 5 + 3, or 4. In this case, it's the
# number of days left in the current week (1) plus the number
# of days in the next week until the rollover day (3).
# The calculations described in 2) and 3) above need to have a day added.
# This is because the above time calculation takes us to midnight on this
# day, i.e. the start of the next day.
if self.when.startswith('W'):
day = t[6] # 0 is Monday
if day != self.dayOfWeek:
if day < self.dayOfWeek:
daysToWait = self.dayOfWeek - day
else:
daysToWait = 6 - day + self.dayOfWeek + 1
newRolloverAt = result + (daysToWait * (60 * 60 * 24))
if not self.utc:
dstNow = t[-1]
dstAtRollover = time.localtime(newRolloverAt)[-1]
if dstNow != dstAtRollover:
if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour
addend = -3600
else: # DST bows out before next rollover, so we need to add an hour
addend = 3600
newRolloverAt += addend
result = newRolloverAt
return result
def shouldRollover(self, record):
"""
Determine if rollover should occur.
record is not used, as we are just comparing times, but it is needed so
the method signatures are the same
"""
t = int(time.time())
if t >= self.rolloverAt:
return 1
return 0
def getFilesToDelete(self):
"""
Determine the files to delete when rolling over.
More specific than the earlier method, which just used glob.glob().
"""
dirName, baseName = os.path.split(self.baseFilename)
fileNames = os.listdir(dirName)
result = []
prefix = baseName + "."
plen = len(prefix)
for fileName in fileNames:
if fileName[:plen] == prefix:
suffix = fileName[plen:]
if self.extMatch.match(suffix):
result.append(os.path.join(dirName, fileName))
result.sort()
if len(result) < self.backupCount:
result = []
else:
result = result[:len(result) - self.backupCount]
return result
def doRollover(self):
"""
do a rollover; in this case, a date/time stamp is appended to the filename
when the rollover happens. However, you want the file to be named for the
start of the interval, not the current time. If there is a backup count,
then we have to get a list of matching filenames, sort them and remove
the one with the oldest suffix.
"""
if self.stream:
self.stream.close()
self.stream = None
# get the time that this sequence started at and make it a TimeTuple
currentTime = int(time.time())
dstNow = time.localtime(currentTime)[-1]
t = self.rolloverAt - self.interval
if self.utc:
timeTuple = time.gmtime(t)
else:
timeTuple = time.localtime(t)
dstThen = timeTuple[-1]
if dstNow != dstThen:
if dstNow:
addend = 3600
else:
addend = -3600
timeTuple = time.localtime(t + addend)
dfn = self.rotation_filename(self.baseFilename + "." +
time.strftime(self.suffix, timeTuple))
if os.path.exists(dfn):
os.remove(dfn)
self.rotate(self.baseFilename, dfn)
if self.backupCount > 0:
for s in self.getFilesToDelete():
os.remove(s)
if not self.delay:
self.stream = self._open()
newRolloverAt = self.computeRollover(currentTime)
while newRolloverAt <= currentTime:
newRolloverAt = newRolloverAt + self.interval
#If DST changes and midnight or weekly rollover, adjust for this.
if (self.when == 'MIDNIGHT' or self.when.startswith('W')) and not self.utc:
dstAtRollover = time.localtime(newRolloverAt)[-1]
if dstNow != dstAtRollover:
if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour
addend = -3600
else: # DST bows out before next rollover, so we need to add an hour
addend = 3600
newRolloverAt += addend
self.rolloverAt = newRolloverAt
class WatchedFileHandler(logging.FileHandler):
"""
A handler for logging to a file, which watches the file
to see if it has changed while in use. This can happen because of
usage of programs such as newsyslog and logrotate which perform
log file rotation. This handler, intended for use under Unix,
watches the file to see if it has changed since the last emit.
(A file has changed if its device or inode have changed.)
If it has changed, the old file stream is closed, and the file
opened to get a new stream.
This handler is not appropriate for use under Windows, because
under Windows open files cannot be moved or renamed - logging
opens the files with exclusive locks - and so there is no need
for such a handler. Furthermore, ST_INO is not supported under
Windows; stat always returns zero for this value.
This handler is based on a suggestion and patch by Chad J.
Schroeder.
"""
def __init__(self, filename, mode='a', encoding=None, delay=False):
logging.FileHandler.__init__(self, filename, mode, encoding, delay)
self.dev, self.ino = -1, -1
self._statstream()
def _statstream(self):
if self.stream:
sres = os.fstat(self.stream.fileno())
self.dev, self.ino = sres[ST_DEV], sres[ST_INO]
def emit(self, record):
"""
Emit a record.
First check if the underlying file has changed, and if it
has, close the old stream and reopen the file to get the
current stream.
"""
# Reduce the chance of race conditions by stat'ing by path only
# once and then fstat'ing our new fd if we opened a new log stream.
# See issue #14632: Thanks to John Mulligan for the problem report
# and patch.
try:
# stat the file by path, checking for existence
sres = os.stat(self.baseFilename)
except OSError as err:
if err.errno == errno.ENOENT:
sres = None
else:
raise
# compare file system stat with that of our stream file handle
if not sres or sres[ST_DEV] != self.dev or sres[ST_INO] != self.ino:
if self.stream is not None:
# we have an open file handle, clean it up
self.stream.flush()
self.stream.close()
# open a new file handle and get new stat info from that fd
self.stream = self._open()
self._statstream()
logging.FileHandler.emit(self, record)
class SocketHandler(logging.Handler):
"""
A handler class which writes logging records, in pickle format, to
a streaming socket. The socket is kept open across logging calls.
If the peer resets it, an attempt is made to reconnect on the next call.
The pickle which is sent is that of the LogRecord's attribute dictionary
(__dict__), so that the receiver does not need to have the logging module
installed in order to process the logging event.
To unpickle the record at the receiving end into a LogRecord, use the
makeLogRecord function.
"""
def __init__(self, host, port):
"""
Initializes the handler with a specific host address and port.
When the attribute *closeOnError* is set to True - if a socket error
occurs, the socket is silently closed and then reopened on the next
logging call.
"""
logging.Handler.__init__(self)
self.host = host
self.port = port
self.sock = None
self.closeOnError = False
self.retryTime = None
#
# Exponential backoff parameters.
#
self.retryStart = 1.0
self.retryMax = 30.0
self.retryFactor = 2.0
def makeSocket(self, timeout=1):
"""
A factory method which allows subclasses to define the precise
type of socket they want.
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if hasattr(s, 'settimeout'):
s.settimeout(timeout)
try:
s.connect((self.host, self.port))
return s
except socket.error:
s.close()
raise
def createSocket(self):
"""
Try to create a socket, using an exponential backoff with
a max retry time. Thanks to Robert Olson for the original patch
(SF #815911) which has been slightly refactored.
"""
now = time.time()
# Either retryTime is None, in which case this
# is the first time back after a disconnect, or
# we've waited long enough.
if self.retryTime is None:
attempt = True
else:
attempt = (now >= self.retryTime)
if attempt:
try:
self.sock = self.makeSocket()
self.retryTime = None # next time, no delay before trying
except socket.error:
#Creation failed, so set the retry time and return.
if self.retryTime is None:
self.retryPeriod = self.retryStart
else:
self.retryPeriod = self.retryPeriod * self.retryFactor
if self.retryPeriod > self.retryMax:
self.retryPeriod = self.retryMax
self.retryTime = now + self.retryPeriod
def send(self, s):
"""
Send a pickled string to the socket.
This function allows for partial sends which can happen when the
network is busy.
"""
if self.sock is None:
self.createSocket()
#self.sock can be None either because we haven't reached the retry
#time yet, or because we have reached the retry time and retried,
#but are still unable to connect.
if self.sock:
try:
if hasattr(self.sock, "sendall"):
self.sock.sendall(s)
else: #pragma: no cover
sentsofar = 0
left = len(s)
while left > 0:
sent = self.sock.send(s[sentsofar:])
sentsofar = sentsofar + sent
left = left - sent
except socket.error: #pragma: no cover
self.sock.close()
self.sock = None # so we can call createSocket next time
def makePickle(self, record):
"""
Pickles the record in binary format with a length prefix, and
returns it ready for transmission across the socket.
"""
ei = record.exc_info
if ei:
# just to get traceback text into record.exc_text ...
dummy = self.format(record)
# See issue #14436: If msg or args are objects, they may not be
# available on the receiving end. So we convert the msg % args
# to a string, save it as msg and zap the args.
d = dict(record.__dict__)
d['msg'] = record.getMessage()
d['args'] = None
d['exc_info'] = None
s = pickle.dumps(d, 1)
slen = struct.pack(">L", len(s))
return slen + s
def handleError(self, record):
"""
Handle an error during logging.
An error has occurred during logging. Most likely cause -
connection lost. Close the socket so that we can retry on the
next event.
"""
if self.closeOnError and self.sock:
self.sock.close()
self.sock = None #try to reconnect next time
else:
logging.Handler.handleError(self, record)
def emit(self, record):
"""
Emit a record.
Pickles the record and writes it to the socket in binary format.
If there is an error with the socket, silently drop the packet.
If there was a problem with the socket, re-establishes the
socket.
"""
try:
s = self.makePickle(record)
self.send(s)
except (KeyboardInterrupt, SystemExit): #pragma: no cover
raise
except:
self.handleError(record)
def close(self):
"""
Closes the socket.
"""
self.acquire()
try:
if self.sock:
self.sock.close()
self.sock = None
logging.Handler.close(self)
finally:
self.release()
class DatagramHandler(SocketHandler):
"""
A handler class which writes logging records, in pickle format, to
a datagram socket. The pickle which is sent is that of the LogRecord's
attribute dictionary (__dict__), so that the receiver does not need to
have the logging module installed in order to process the logging event.
To unpickle the record at the receiving end into a LogRecord, use the
makeLogRecord function.
"""
def __init__(self, host, port):
"""
Initializes the handler with a specific host address and port.
"""
SocketHandler.__init__(self, host, port)
self.closeOnError = False
def makeSocket(self):
"""
The factory method of SocketHandler is here overridden to create
a UDP socket (SOCK_DGRAM).
"""
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return s
def send(self, s):
"""
Send a pickled string to a socket.
This function no longer allows for partial sends which can happen
when the network is busy - UDP does not guarantee delivery and
can deliver packets out of sequence.
"""
if self.sock is None:
self.createSocket()
self.sock.sendto(s, (self.host, self.port))
class SysLogHandler(logging.Handler):
"""
A handler class which sends formatted logging records to a syslog
server. Based on Sam Rushing's syslog module:
http://www.nightmare.com/squirl/python-ext/misc/syslog.py
Contributed by Nicolas Untz (after which minor refactoring changes
have been made).
"""
# from <linux/sys/syslog.h>:
# ======================================================================
# priorities/facilities are encoded into a single 32-bit quantity, where
# the bottom 3 bits are the priority (0-7) and the top 28 bits are the
# facility (0-big number). Both the priorities and the facilities map
# roughly one-to-one to strings in the syslogd(8) source code. This
# mapping is included in this file.
#
# priorities (these are ordered)
LOG_EMERG = 0 # system is unusable
LOG_ALERT = 1 # action must be taken immediately
LOG_CRIT = 2 # critical conditions
LOG_ERR = 3 # error conditions
LOG_WARNING = 4 # warning conditions
LOG_NOTICE = 5 # normal but significant condition
LOG_INFO = 6 # informational
LOG_DEBUG = 7 # debug-level messages
# facility codes
LOG_KERN = 0 # kernel messages
LOG_USER = 1 # random user-level messages
LOG_MAIL = 2 # mail system
LOG_DAEMON = 3 # system daemons
LOG_AUTH = 4 # security/authorization messages
LOG_SYSLOG = 5 # messages generated internally by syslogd
LOG_LPR = 6 # line printer subsystem
LOG_NEWS = 7 # network news subsystem
LOG_UUCP = 8 # UUCP subsystem
LOG_CRON = 9 # clock daemon
LOG_AUTHPRIV = 10 # security/authorization messages (private)
LOG_FTP = 11 # FTP daemon
# other codes through 15 reserved for system use
LOG_LOCAL0 = 16 # reserved for local use
LOG_LOCAL1 = 17 # reserved for local use
LOG_LOCAL2 = 18 # reserved for local use
LOG_LOCAL3 = 19 # reserved for local use
LOG_LOCAL4 = 20 # reserved for local use
LOG_LOCAL5 = 21 # reserved for local use
LOG_LOCAL6 = 22 # reserved for local use
LOG_LOCAL7 = 23 # reserved for local use
priority_names = {
"alert": LOG_ALERT,
"crit": LOG_CRIT,
"critical": LOG_CRIT,
"debug": LOG_DEBUG,
"emerg": LOG_EMERG,
"err": LOG_ERR,
"error": LOG_ERR, # DEPRECATED
"info": LOG_INFO,
"notice": LOG_NOTICE,
"panic": LOG_EMERG, # DEPRECATED
"warn": LOG_WARNING, # DEPRECATED
"warning": LOG_WARNING,
}
facility_names = {
"auth": LOG_AUTH,
"authpriv": LOG_AUTHPRIV,
"cron": LOG_CRON,
"daemon": LOG_DAEMON,
"ftp": LOG_FTP,
"kern": LOG_KERN,
"lpr": LOG_LPR,
"mail": LOG_MAIL,
"news": LOG_NEWS,
"security": LOG_AUTH, # DEPRECATED
"syslog": LOG_SYSLOG,
"user": LOG_USER,
"uucp": LOG_UUCP,
"local0": LOG_LOCAL0,
"local1": LOG_LOCAL1,
"local2": LOG_LOCAL2,
"local3": LOG_LOCAL3,
"local4": LOG_LOCAL4,
"local5": LOG_LOCAL5,
"local6": LOG_LOCAL6,
"local7": LOG_LOCAL7,
}
#The map below appears to be trivially lowercasing the key. However,
#there's more to it than meets the eye - in some locales, lowercasing
#gives unexpected results. See SF #1524081: in the Turkish locale,
#"INFO".lower() != "info"
priority_map = {
"DEBUG" : "debug",
"INFO" : "info",
"WARNING" : "warning",
"ERROR" : "error",
"CRITICAL" : "critical"
}
def __init__(self, address=('localhost', SYSLOG_UDP_PORT),
facility=LOG_USER, socktype=None):
"""
Initialize a handler.
If address is specified as a string, a UNIX socket is used. To log to a
local syslogd, "SysLogHandler(address="/dev/log")" can be used.
If facility is not specified, LOG_USER is used.
"""
logging.Handler.__init__(self)
self.address = address
self.facility = facility
self.socktype = socktype
if isinstance(address, str):
self.unixsocket = True
self._connect_unixsocket(address)
else:
self.unixsocket = False
if socktype is None:
socktype = socket.SOCK_DGRAM
self.socket = socket.socket(socket.AF_INET, socktype)
if socktype == socket.SOCK_STREAM:
self.socket.connect(address)
self.socktype = socktype
self.formatter = None
def _connect_unixsocket(self, address):
use_socktype = self.socktype
if use_socktype is None:
use_socktype = socket.SOCK_DGRAM
self.socket = socket.socket(socket.AF_UNIX, use_socktype)
try:
self.socket.connect(address)
# it worked, so set self.socktype to the used type
self.socktype = use_socktype
except socket.error:
self.socket.close()
if self.socktype is not None:
# user didn't specify falling back, so fail
raise
use_socktype = socket.SOCK_STREAM
self.socket = socket.socket(socket.AF_UNIX, use_socktype)
try:
self.socket.connect(address)
# it worked, so set self.socktype to the used type
self.socktype = use_socktype
except socket.error:
self.socket.close()
raise
def encodePriority(self, facility, priority):
"""
Encode the facility and priority. You can pass in strings or
integers - if strings are passed, the facility_names and
priority_names mapping dictionaries are used to convert them to
integers.
"""
if isinstance(facility, str):
facility = self.facility_names[facility]
if isinstance(priority, str):
priority = self.priority_names[priority]
return (facility << 3) | priority
def close (self):
"""
Closes the socket.
"""
self.acquire()
try:
self.socket.close()
logging.Handler.close(self)
finally:
self.release()
def mapPriority(self, levelName):
"""
Map a logging level name to a key in the priority_names map.
This is useful in two scenarios: when custom levels are being
used, and in the case where you can't do a straightforward
mapping by lowercasing the logging level name because of locale-
specific issues (see SF #1524081).
"""
return self.priority_map.get(levelName, "warning")
ident = '' # prepended to all messages
append_nul = True # some old syslog daemons expect a NUL terminator
def emit(self, record):
"""
Emit a record.
The record is formatted, and then sent to the syslog server. If
exception information is present, it is NOT sent to the server.
"""
msg = self.format(record)
if self.ident:
msg = self.ident + msg
if self.append_nul:
msg += '\000'
"""
We need to convert record level to lowercase, maybe this will
change in the future.
"""
prio = '<%d>' % self.encodePriority(self.facility,
self.mapPriority(record.levelname))
prio = prio.encode('utf-8')
# Message is a string. Convert to bytes as required by RFC 5424
msg = msg.encode('utf-8')
msg = prio + msg
try:
if self.unixsocket:
try:
self.socket.send(msg)
except socket.error:
self.socket.close()
self._connect_unixsocket(self.address)
self.socket.send(msg)
elif self.socktype == socket.SOCK_DGRAM:
self.socket.sendto(msg, self.address)
else:
self.socket.sendall(msg)
except (KeyboardInterrupt, SystemExit): #pragma: no cover
raise
except:
self.handleError(record)
class SMTPHandler(logging.Handler):
"""
A handler class which sends an SMTP email for each logging event.
"""
def __init__(self, mailhost, fromaddr, toaddrs, subject,
credentials=None, secure=None, timeout=5.0):
"""
Initialize the handler.
Initialize the instance with the from and to addresses and subject
line of the email. To specify a non-standard SMTP port, use the
(host, port) tuple format for the mailhost argument. To specify
authentication credentials, supply a (username, password) tuple
for the credentials argument. To specify the use of a secure
protocol (TLS), pass in a tuple for the secure argument. This will
only be used when authentication credentials are supplied. The tuple
will be either an empty tuple, or a single-value tuple with the name
of a keyfile, or a 2-value tuple with the names of the keyfile and
certificate file. (This tuple is passed to the `starttls` method).
A timeout in seconds can be specified for the SMTP connection (the
default is one second).
"""
logging.Handler.__init__(self)
if isinstance(mailhost, tuple):
self.mailhost, self.mailport = mailhost
else:
self.mailhost, self.mailport = mailhost, None
if isinstance(credentials, tuple):
self.username, self.password = credentials
else:
self.username = None
self.fromaddr = fromaddr
if isinstance(toaddrs, str):
toaddrs = [toaddrs]
self.toaddrs = toaddrs
self.subject = subject
self.secure = secure
self.timeout = timeout
def getSubject(self, record):
"""
Determine the subject for the email.
If you want to specify a subject line which is record-dependent,
override this method.
"""
return self.subject
def emit(self, record):
"""
Emit a record.
Format the record and send it to the specified addressees.
"""
try:
import smtplib
from email.utils import formatdate
port = self.mailport
if not port:
port = smtplib.SMTP_PORT
smtp = smtplib.SMTP(self.mailhost, port, timeout=self.timeout)
msg = self.format(record)
msg = "From: %s\r\nTo: %s\r\nSubject: %s\r\nDate: %s\r\n\r\n%s" % (
self.fromaddr,
",".join(self.toaddrs),
self.getSubject(record),
formatdate(), msg)
if self.username:
if self.secure is not None:
smtp.ehlo()
smtp.starttls(*self.secure)
smtp.ehlo()
smtp.login(self.username, self.password)
smtp.sendmail(self.fromaddr, self.toaddrs, msg)
smtp.quit()
except (KeyboardInterrupt, SystemExit): #pragma: no cover
raise
except:
self.handleError(record)
class NTEventLogHandler(logging.Handler):
"""
A handler class which sends events to the NT Event Log. Adds a
registry entry for the specified application name. If no dllname is
provided, win32service.pyd (which contains some basic message
placeholders) is used. Note that use of these placeholders will make
your event logs big, as the entire message source is held in the log.
If you want slimmer logs, you have to pass in the name of your own DLL
which contains the message definitions you want to use in the event log.
"""
def __init__(self, appname, dllname=None, logtype="Application"):
logging.Handler.__init__(self)
try:
import win32evtlogutil, win32evtlog
self.appname = appname
self._welu = win32evtlogutil
if not dllname:
dllname = os.path.split(self._welu.__file__)
dllname = os.path.split(dllname[0])
dllname = os.path.join(dllname[0], r'win32service.pyd')
self.dllname = dllname
self.logtype = logtype
self._welu.AddSourceToRegistry(appname, dllname, logtype)
self.deftype = win32evtlog.EVENTLOG_ERROR_TYPE
self.typemap = {
logging.DEBUG : win32evtlog.EVENTLOG_INFORMATION_TYPE,
logging.INFO : win32evtlog.EVENTLOG_INFORMATION_TYPE,
logging.WARNING : win32evtlog.EVENTLOG_WARNING_TYPE,
logging.ERROR : win32evtlog.EVENTLOG_ERROR_TYPE,
logging.CRITICAL: win32evtlog.EVENTLOG_ERROR_TYPE,
}
except ImportError:
print("The Python Win32 extensions for NT (service, event "\
"logging) appear not to be available.")
self._welu = None
def getMessageID(self, record):
"""
Return the message ID for the event record. If you are using your
own messages, you could do this by having the msg passed to the
logger being an ID rather than a formatting string. Then, in here,
you could use a dictionary lookup to get the message ID. This
version returns 1, which is the base message ID in win32service.pyd.
"""
return 1
def getEventCategory(self, record):
"""
Return the event category for the record.
Override this if you want to specify your own categories. This version
returns 0.
"""
return 0
def getEventType(self, record):
"""
Return the event type for the record.
Override this if you want to specify your own types. This version does
a mapping using the handler's typemap attribute, which is set up in
__init__() to a dictionary which contains mappings for DEBUG, INFO,
WARNING, ERROR and CRITICAL. If you are using your own levels you will
either need to override this method or place a suitable dictionary in
the handler's typemap attribute.
"""
return self.typemap.get(record.levelno, self.deftype)
def emit(self, record):
"""
Emit a record.
Determine the message ID, event category and event type. Then
log the message in the NT event log.
"""
if self._welu:
try:
id = self.getMessageID(record)
cat = self.getEventCategory(record)
type = self.getEventType(record)
msg = self.format(record)
self._welu.ReportEvent(self.appname, id, cat, type, [msg])
except (KeyboardInterrupt, SystemExit): #pragma: no cover
raise
except:
self.handleError(record)
def close(self):
"""
Clean up this handler.
You can remove the application name from the registry as a
source of event log entries. However, if you do this, you will
not be able to see the events as you intended in the Event Log
Viewer - it needs to be able to access the registry to get the
DLL name.
"""
#self._welu.RemoveSourceFromRegistry(self.appname, self.logtype)
logging.Handler.close(self)
class HTTPHandler(logging.Handler):
"""
A class which sends records to a Web server, using either GET or
POST semantics.
"""
def __init__(self, host, url, method="GET", secure=False, credentials=None):
"""
Initialize the instance with the host, the request URL, and the method
("GET" or "POST")
"""
logging.Handler.__init__(self)
method = method.upper()
if method not in ["GET", "POST"]:
raise ValueError("method must be GET or POST")
self.host = host
self.url = url
self.method = method
self.secure = secure
self.credentials = credentials
def mapLogRecord(self, record):
"""
Default implementation of mapping the log record into a dict
that is sent as the CGI data. Overwrite in your class.
Contributed by Franz Glasner.
"""
return record.__dict__
def emit(self, record):
"""
Emit a record.
Send the record to the Web server as a percent-encoded dictionary
"""
try:
import http.client, urllib.parse
host = self.host
if self.secure:
h = http.client.HTTPSConnection(host)
else:
h = http.client.HTTPConnection(host)
url = self.url
data = urllib.parse.urlencode(self.mapLogRecord(record))
if self.method == "GET":
if (url.find('?') >= 0):
sep = '&'
else:
sep = '?'
url = url + "%c%s" % (sep, data)
h.putrequest(self.method, url)
# support multiple hosts on one IP address...
# need to strip optional :port from host, if present
i = host.find(":")
if i >= 0:
host = host[:i]
h.putheader("Host", host)
if self.method == "POST":
h.putheader("Content-type",
"application/x-www-form-urlencoded")
h.putheader("Content-length", str(len(data)))
if self.credentials:
import base64
s = ('u%s:%s' % self.credentials).encode('utf-8')
s = 'Basic ' + base64.b64encode(s).strip()
h.putheader('Authorization', s)
h.endheaders()
if self.method == "POST":
h.send(data.encode('utf-8'))
h.getresponse() #can't do anything with the result
except (KeyboardInterrupt, SystemExit): #pragma: no cover
raise
except:
self.handleError(record)
class BufferingHandler(logging.Handler):
"""
A handler class which buffers logging records in memory. Whenever each
record is added to the buffer, a check is made to see if the buffer should
be flushed. If it should, then flush() is expected to do what's needed.
"""
def __init__(self, capacity):
"""
Initialize the handler with the buffer size.
"""
logging.Handler.__init__(self)
self.capacity = capacity
self.buffer = []
def shouldFlush(self, record):
"""
Should the handler flush its buffer?
Returns true if the buffer is up to capacity. This method can be
overridden to implement custom flushing strategies.
"""
return (len(self.buffer) >= self.capacity)
def emit(self, record):
"""
Emit a record.
Append the record. If shouldFlush() tells us to, call flush() to process
the buffer.
"""
self.buffer.append(record)
if self.shouldFlush(record):
self.flush()
def flush(self):
"""
Override to implement custom flushing behaviour.
This version just zaps the buffer to empty.
"""
self.acquire()
try:
self.buffer = []
finally:
self.release()
def close(self):
"""
Close the handler.
This version just flushes and chains to the parent class' close().
"""
self.flush()
logging.Handler.close(self)
class MemoryHandler(BufferingHandler):
"""
A handler class which buffers logging records in memory, periodically
flushing them to a target handler. Flushing occurs whenever the buffer
is full, or when an event of a certain severity or greater is seen.
"""
def __init__(self, capacity, flushLevel=logging.ERROR, target=None):
"""
Initialize the handler with the buffer size, the level at which
flushing should occur and an optional target.
Note that without a target being set either here or via setTarget(),
a MemoryHandler is no use to anyone!
"""
BufferingHandler.__init__(self, capacity)
self.flushLevel = flushLevel
self.target = target
def shouldFlush(self, record):
"""
Check for buffer full or a record at the flushLevel or higher.
"""
return (len(self.buffer) >= self.capacity) or \
(record.levelno >= self.flushLevel)
def setTarget(self, target):
"""
Set the target handler for this handler.
"""
self.target = target
def flush(self):
"""
For a MemoryHandler, flushing means just sending the buffered
records to the target, if there is one. Override if you want
different behaviour.
The record buffer is also cleared by this operation.
"""
self.acquire()
try:
if self.target:
for record in self.buffer:
self.target.handle(record)
self.buffer = []
finally:
self.release()
def close(self):
"""
Flush, set the target to None and lose the buffer.
"""
self.flush()
self.acquire()
try:
self.target = None
BufferingHandler.close(self)
finally:
self.release()
class QueueHandler(logging.Handler):
"""
This handler sends events to a queue. Typically, it would be used together
with a multiprocessing Queue to centralise logging to file in one process
(in a multi-process application), so as to avoid file write contention
between processes.
This code is new in Python 3.2, but this class can be copy pasted into
user code for use with earlier Python versions.
"""
def __init__(self, queue):
"""
Initialise an instance, using the passed queue.
"""
logging.Handler.__init__(self)
self.queue = queue
def enqueue(self, record):
"""
Enqueue a record.
The base implementation uses put_nowait. You may want to override
this method if you want to use blocking, timeouts or custom queue
implementations.
"""
self.queue.put_nowait(record)
def prepare(self, record):
"""
Prepares a record for queuing. The object returned by this method is
enqueued.
The base implementation formats the record to merge the message
and arguments, and removes unpickleable items from the record
in-place.
You might want to override this method if you want to convert
the record to a dict or JSON string, or send a modified copy
of the record while leaving the original intact.
"""
# The format operation gets traceback text into record.exc_text
# (if there's exception data), and also puts the message into
# record.message. We can then use this to replace the original
# msg + args, as these might be unpickleable. We also zap the
# exc_info attribute, as it's no longer needed and, if not None,
# will typically not be pickleable.
self.format(record)
record.msg = record.message
record.args = None
record.exc_info = None
return record
def emit(self, record):
"""
Emit a record.
Writes the LogRecord to the queue, preparing it for pickling first.
"""
try:
self.enqueue(self.prepare(record))
except (KeyboardInterrupt, SystemExit): #pragma: no cover
raise
except:
self.handleError(record)
if threading:
class QueueListener(object):
"""
This class implements an internal threaded listener which watches for
LogRecords being added to a queue, removes them and passes them to a
list of handlers for processing.
"""
_sentinel = None
def __init__(self, queue, *handlers):
"""
Initialise an instance with the specified queue and
handlers.
"""
self.queue = queue
self.handlers = handlers
self._stop = threading.Event()
self._thread = None
def dequeue(self, block):
"""
Dequeue a record and return it, optionally blocking.
The base implementation uses get. You may want to override this method
if you want to use timeouts or work with custom queue implementations.
"""
return self.queue.get(block)
def start(self):
"""
Start the listener.
This starts up a background thread to monitor the queue for
LogRecords to process.
"""
self._thread = t = threading.Thread(target=self._monitor)
t.setDaemon(True)
t.start()
def prepare(self , record):
"""
Prepare a record for handling.
This method just returns the passed-in record. You may want to
override this method if you need to do any custom marshalling or
manipulation of the record before passing it to the handlers.
"""
return record
def handle(self, record):
"""
Handle a record.
This just loops through the handlers offering them the record
to handle.
"""
record = self.prepare(record)
for handler in self.handlers:
handler.handle(record)
def _monitor(self):
"""
Monitor the queue for records, and ask the handler
to deal with them.
This method runs on a separate, internal thread.
The thread will terminate if it sees a sentinel object in the queue.
"""
q = self.queue
has_task_done = hasattr(q, 'task_done')
while not self._stop.isSet():
try:
record = self.dequeue(True)
if record is self._sentinel:
break
self.handle(record)
if has_task_done:
q.task_done()
except queue.Empty:
pass
# There might still be records in the queue.
while True:
try:
record = self.dequeue(False)
if record is self._sentinel:
break
self.handle(record)
if has_task_done:
q.task_done()
except queue.Empty:
break
def enqueue_sentinel(self):
"""
This is used to enqueue the sentinel record.
The base implementation uses put_nowait. You may want to override this
method if you want to use timeouts or work with custom queue
implementations.
"""
self.queue.put_nowait(self._sentinel)
def stop(self):
"""
Stop the listener.
This asks the thread to terminate, and then waits for it to do so.
Note that if you don't call this before your application exits, there
may be some records still left on the queue, which won't be processed.
"""
self._stop.set()
self.enqueue_sentinel()
self._thread.join()
self._thread = None
|
BotoX/Dahua-Firmware-Mod-Kit
|
refs/heads/master
|
configs/SD-Zi-Themis.py
|
1
|
from .config import *
DAHUA_FILES = OrderedDict([
("Install", {
"required": True,
"type": DAHUA_TYPE.Plain
}),
("kernel.img", {
"required": True,
"type": DAHUA_TYPE.Plain,
"size": 0x00500000
}),
("dhboot-min.bin.img", {
"required": True,
"type": DAHUA_TYPE.Plain,
"size": 0x00040000
}),
("dhboot.bin.img", {
"required": True,
"type": DAHUA_TYPE.Plain,
"size": 0x00040000
}),
("romfs-x.squashfs.img", {
"required": True,
"type": DAHUA_TYPE.uImage | DAHUA_TYPE.SquashFS,
"size": 0x00150000
}),
("user-x.squashfs.img", {
"required": True,
"type": DAHUA_TYPE.uImage | DAHUA_TYPE.SquashFS,
"size": 0x00830000
}),
("web-x.squashfs.img", {
"required": True,
"type": DAHUA_TYPE.uImage | DAHUA_TYPE.SquashFS,
"size": 0x00210000
}),
("pd-x.squashfs.img", {
"required": True,
"type": DAHUA_TYPE.uImage | DAHUA_TYPE.SquashFS,
"size": 0x00010000
}),
("custom-x.squashfs.img", {
"required": True,
"type": DAHUA_TYPE.uImage | DAHUA_TYPE.SquashFS,
"size": 0x00020000
}),
("partition-x.cramfs.img", {
"required": True,
"type": DAHUA_TYPE.uImage | DAHUA_TYPE.CramFS,
"size": 0x00010000
}),
("check.img", {
"required": True,
"type": DAHUA_TYPE.uImage | DAHUA_TYPE.Plain
})
])
|
DarkPurple141/Maps
|
refs/heads/master
|
MapGen/geom.py
|
1
|
#!/usr/bin/env python3
import random
"""
Map generating code using voronoi polygons.
Copyright (C) 2017 Alexander Walker Hinds
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
class Point:
"""docstring for Point."""
def __init__(self,x,y,ide=-1):
self.x = x
self.y = y
self.id = ide
def midpoint(self,b):
return Point(
(self.x+b.x)//2,
(self.y+b.y)//2)
def get_cords(self):
return (self.x,self.y)
def distance(self,point):
return ((self.y-point.y)**2+(self.x-point.x)**2)
def __lt__(self, other):
selfPriority = self.id
otherPriority = self.id
return selfPriority < otherPriority
class Corner(Point):
def __init__(self,point):
Point.__init__(self,point.x,point.y)
self.elevation = None
self.neighbours = set()
self.v_neighbours = set()
self.river = False
self.city = False
self.coast = False
self.road = False
class Triangle:
def __init__(self,a,b,c):
self.a = a
self.b = b
self.c = c
self.pts = [a,b,c]
def centre(self):
ab = self.a.midpoint(self.b)
centx = int(self.c.x + 2*(ab.x-self.c.x)/3)
centy = int(self.c.y + 2*(ab.y-self.c.y)/3)
return Point(centx,centy)
def main():
xp = []
yp = []
for i in range(3):
xp.append(random.randrange(15))
yp.append(random.randrange(15))
points = []
for i in range(3):
points.append(Point(xp.pop(),yp.pop()))
print([i.get_cords() for i in points])
tri = Triangle(points.pop(),points.pop(),points.pop())
mid = tri.centre()
for y in range(15):
for x in range(15):
if (tri.a.x == x and tri.a.y == y):
print('A',end="")
elif (tri.b.x == x and tri.b.y == y):
print('B',end="")
elif (tri.c.x == x and tri.c.y == y):
print('C',end="")
elif (mid.x == x and mid.y == y):
print(" .",end="")
else:
print(" ",end="")
print("")
if __name__ == '__main__':
main()
|
sbalde/edxplatform
|
refs/heads/master
|
common/djangoapps/embargo/migrations/0004_migrate_embargo_config.py
|
102
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
"""Move the current course embargo configuration to the new models. """
for old_course in orm.EmbargoedCourse.objects.all():
new_course, __ = orm.RestrictedCourse.objects.get_or_create(course_key=old_course.course_id)
# Set the message keys to 'embargo'
new_course.enroll_msg_key = 'embargo'
new_course.access_msg_key = 'embargo'
new_course.save()
for country in self._embargoed_countries_list(orm):
country_model = orm.Country.objects.get(country=country)
orm.CountryAccessRule.objects.get_or_create(
country=country_model,
rule_type='blacklist',
restricted_course=new_course
)
def backwards(self, orm):
"""No backwards migration required since the forward migration is idempotent. """
pass
def _embargoed_countries_list(self, orm):
"""Retrieve the list of embargoed countries from the existing tables. """
# We need to replicate some application logic here, because South
# doesn't give us access to class methods on the Django model objects.
try:
current_config = orm.EmbargoedState.objects.order_by('-change_date')[0]
if current_config.enabled and current_config.embargoed_countries:
return [
country.strip().upper() for country
in current_config.embargoed_countries.split(',')
]
except IndexError:
pass
return []
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'embargo.country': {
'Meta': {'ordering': "['country']", 'object_name': 'Country'},
'country': ('django_countries.fields.CountryField', [], {'unique': 'True', 'max_length': '2', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'embargo.countryaccessrule': {
'Meta': {'unique_together': "(('restricted_course', 'country'),)", 'object_name': 'CountryAccessRule'},
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['embargo.Country']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'restricted_course': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['embargo.RestrictedCourse']"}),
'rule_type': ('django.db.models.fields.CharField', [], {'default': "'blacklist'", 'max_length': '255'})
},
'embargo.embargoedcourse': {
'Meta': {'object_name': 'EmbargoedCourse'},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'embargoed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'embargo.embargoedstate': {
'Meta': {'object_name': 'EmbargoedState'},
'change_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'embargoed_countries': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'embargo.ipfilter': {
'Meta': {'object_name': 'IPFilter'},
'blacklist': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'change_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'whitelist': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
'embargo.restrictedcourse': {
'Meta': {'object_name': 'RestrictedCourse'},
'access_msg_key': ('django.db.models.fields.CharField', [], {'default': "'default'", 'max_length': '255'}),
'course_key': ('xmodule_django.models.CourseKeyField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'enroll_msg_key': ('django.db.models.fields.CharField', [], {'default': "'default'", 'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
}
}
complete_apps = ['embargo']
symmetrical = True
|
DeadSix27/python_cross_compile_script
|
refs/heads/master
|
packages/products/ffmpeg_shared.py
|
1
|
{
'repo_type' : 'git',
'url' : 'git://git.ffmpeg.org/ffmpeg.git',
'rename_folder' : 'ffmpeg_shared_git',
'configure_options' : '!VAR(ffmpeg_config)VAR! !VAR(ffmpeg_nonfree)VAR! --prefix={output_prefix}/ffmpeg_shared_git.installed --enable-opencl --enable-sdl --enable-shared --disable-static --disable-libbluray --disable-libgme',
'depends_on' : [ 'ffmpeg_depends', 'ffmpeg_depends_nonfree', 'sdl2', 'opencl_icd'],
'_info' : { 'version' : None, 'fancy_name' : 'ffmpeg (shared)' },
}
|
chunying/ffmpeg3
|
refs/heads/master
|
module/opencv.py
|
1
|
class opencv:
name = "OpenCV 3.2.0"
url = "https://github.com/opencv/opencv/archive/3.2.0.zip"
dirname = "opencv-3.2.0"
#ffmpeg_opts = [ "--enable-libopencv" ] # temporarily disabled for ffmpeg?
def skip(self, prefix, force):
if force: return False;
if file_exist(prefix + "/include/opencv/cv.h"): return True;
return False;
def configure(self, prefix):
runcmd("mkdir -p build");
runcmd("cd build; cmake -DCMAKE_INSTALL_PREFIX={} ..".format(prefix));
def make(self, prefix, opts):
runcmd("chdir build; make {}".format(opts));
def install(self, prefix):
runcmd("chdir build; make install");
deps.append(opencv());
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
|
shivam1111/odoo
|
refs/heads/8.0
|
addons/payment_ogone/tests/test_ogone.py
|
430
|
# -*- coding: utf-8 -*-
from lxml import objectify
import time
import urlparse
from openerp.addons.payment.models.payment_acquirer import ValidationError
from openerp.addons.payment.tests.common import PaymentAcquirerCommon
from openerp.addons.payment_ogone.controllers.main import OgoneController
from openerp.tools import mute_logger
class OgonePayment(PaymentAcquirerCommon):
def setUp(self):
super(OgonePayment, self).setUp()
cr, uid = self.cr, self.uid
self.base_url = self.registry('ir.config_parameter').get_param(cr, uid, 'web.base.url')
# get the adyen account
model, self.ogone_id = self.registry('ir.model.data').get_object_reference(cr, uid, 'payment_ogone', 'payment_acquirer_ogone')
def test_10_ogone_form_render(self):
cr, uid, context = self.cr, self.uid, {}
# be sure not to do stupid thing
ogone = self.payment_acquirer.browse(self.cr, self.uid, self.ogone_id, None)
self.assertEqual(ogone.environment, 'test', 'test without test environment')
# ----------------------------------------
# Test: button direct rendering + shasign
# ----------------------------------------
form_values = {
'PSPID': 'dummy',
'ORDERID': 'test_ref0',
'AMOUNT': '1',
'CURRENCY': 'EUR',
'LANGUAGE': 'en_US',
'CN': 'Norbert Buyer',
'EMAIL': 'norbert.buyer@example.com',
'OWNERZIP': '1000',
'OWNERADDRESS': 'Huge Street 2/543',
'OWNERCTY': 'Belgium',
'OWNERTOWN': 'Sin City',
'OWNERTELNO': '0032 12 34 56 78',
'SHASIGN': '815f67b8ff70d234ffcf437c13a9fa7f807044cc',
'ACCEPTURL': '%s' % urlparse.urljoin(self.base_url, OgoneController._accept_url),
'DECLINEURL': '%s' % urlparse.urljoin(self.base_url, OgoneController._decline_url),
'EXCEPTIONURL': '%s' % urlparse.urljoin(self.base_url, OgoneController._exception_url),
'CANCELURL': '%s' % urlparse.urljoin(self.base_url, OgoneController._cancel_url),
}
# render the button
res = self.payment_acquirer.render(
cr, uid, self.ogone_id,
'test_ref0', 0.01, self.currency_euro_id,
partner_id=None,
partner_values=self.buyer_values,
context=context)
# check form result
tree = objectify.fromstring(res)
self.assertEqual(tree.get('action'), 'https://secure.ogone.com/ncol/test/orderstandard.asp', 'ogone: wrong form POST url')
for form_input in tree.input:
if form_input.get('name') in ['submit']:
continue
self.assertEqual(
form_input.get('value'),
form_values[form_input.get('name')],
'ogone: wrong value for input %s: received %s instead of %s' % (form_input.get('name'), form_input.get('value'), form_values[form_input.get('name')])
)
# ----------------------------------------
# Test2: button using tx + validation
# ----------------------------------------
# create a new draft tx
tx_id = self.payment_transaction.create(
cr, uid, {
'amount': 0.01,
'acquirer_id': self.ogone_id,
'currency_id': self.currency_euro_id,
'reference': 'test_ref0',
'partner_id': self.buyer_id,
}, context=context
)
# render the button
res = self.payment_acquirer.render(
cr, uid, self.ogone_id,
'should_be_erased', 0.01, self.currency_euro,
tx_id=tx_id,
partner_id=None,
partner_values=self.buyer_values,
context=context)
# check form result
tree = objectify.fromstring(res)
self.assertEqual(tree.get('action'), 'https://secure.ogone.com/ncol/test/orderstandard.asp', 'ogone: wrong form POST url')
for form_input in tree.input:
if form_input.get('name') in ['submit']:
continue
self.assertEqual(
form_input.get('value'),
form_values[form_input.get('name')],
'ogone: wrong value for form input %s: received %s instead of %s' % (form_input.get('name'), form_input.get('value'), form_values[form_input.get('name')])
)
@mute_logger('openerp.addons.payment_ogone.models.ogone', 'ValidationError')
def test_20_ogone_form_management(self):
cr, uid, context = self.cr, self.uid, {}
# be sure not to do stupid thing
ogone = self.payment_acquirer.browse(self.cr, self.uid, self.ogone_id, None)
self.assertEqual(ogone.environment, 'test', 'test without test environment')
# typical data posted by ogone after client has successfully paid
ogone_post_data = {
'orderID': u'test_ref_2',
'STATUS': u'9',
'CARDNO': u'XXXXXXXXXXXX0002',
'PAYID': u'25381582',
'CN': u'Norbert Buyer',
'NCERROR': u'0',
'TRXDATE': u'11/15/13',
'IP': u'85.201.233.72',
'BRAND': u'VISA',
'ACCEPTANCE': u'test123',
'currency': u'EUR',
'amount': u'1.95',
'SHASIGN': u'7B7B0ED9CBC4A85543A9073374589033A62A05A5',
'ED': u'0315',
'PM': u'CreditCard'
}
# should raise error about unknown tx
with self.assertRaises(ValidationError):
self.payment_transaction.ogone_form_feedback(cr, uid, ogone_post_data, context=context)
# create tx
tx_id = self.payment_transaction.create(
cr, uid, {
'amount': 1.95,
'acquirer_id': self.ogone_id,
'currency_id': self.currency_euro_id,
'reference': 'test_ref_2',
'partner_name': 'Norbert Buyer',
'partner_country_id': self.country_france_id,
}, context=context
)
# validate it
self.payment_transaction.ogone_form_feedback(cr, uid, ogone_post_data, context=context)
# check state
tx = self.payment_transaction.browse(cr, uid, tx_id, context=context)
self.assertEqual(tx.state, 'done', 'ogone: validation did not put tx into done state')
self.assertEqual(tx.ogone_payid, ogone_post_data.get('PAYID'), 'ogone: validation did not update tx payid')
# reset tx
tx.write({'state': 'draft', 'date_validate': False, 'ogone_payid': False})
# now ogone post is ok: try to modify the SHASIGN
ogone_post_data['SHASIGN'] = 'a4c16bae286317b82edb49188d3399249a784691'
with self.assertRaises(ValidationError):
self.payment_transaction.ogone_form_feedback(cr, uid, ogone_post_data, context=context)
# simulate an error
ogone_post_data['STATUS'] = 2
ogone_post_data['SHASIGN'] = 'a4c16bae286317b82edb49188d3399249a784691'
self.payment_transaction.ogone_form_feedback(cr, uid, ogone_post_data, context=context)
# check state
tx = self.payment_transaction.browse(cr, uid, tx_id, context=context)
self.assertEqual(tx.state, 'error', 'ogone: erroneous validation did not put tx into error state')
def test_30_ogone_s2s(self):
test_ref = 'test_ref_%.15f' % time.time()
cr, uid, context = self.cr, self.uid, {}
# be sure not to do stupid thing
ogone = self.payment_acquirer.browse(self.cr, self.uid, self.ogone_id, None)
self.assertEqual(ogone.environment, 'test', 'test without test environment')
# create a new draft tx
tx_id = self.payment_transaction.create(
cr, uid, {
'amount': 0.01,
'acquirer_id': self.ogone_id,
'currency_id': self.currency_euro_id,
'reference': test_ref,
'partner_id': self.buyer_id,
'type': 'server2server',
}, context=context
)
# create an alias
res = self.payment_transaction.ogone_s2s_create_alias(
cr, uid, tx_id, {
'expiry_date_mm': '01',
'expiry_date_yy': '2015',
'holder_name': 'Norbert Poilu',
'number': '4000000000000002',
'brand': 'VISA',
}, context=context)
# check an alias is set, containing at least OPENERP
tx = self.payment_transaction.browse(cr, uid, tx_id, context=context)
self.assertIn('OPENERP', tx.partner_reference, 'ogone: wrong partner reference after creating an alias')
res = self.payment_transaction.ogone_s2s_execute(cr, uid, tx_id, {}, context=context)
# print res
# {
# 'orderID': u'reference',
# 'STATUS': u'9',
# 'CARDNO': u'XXXXXXXXXXXX0002',
# 'PAYID': u'24998692',
# 'CN': u'Norbert Poilu',
# 'NCERROR': u'0',
# 'TRXDATE': u'11/05/13',
# 'IP': u'85.201.233.72',
# 'BRAND': u'VISA',
# 'ACCEPTANCE': u'test123',
# 'currency': u'EUR',
# 'amount': u'1.95',
# 'SHASIGN': u'EFDC56879EF7DE72CCF4B397076B5C9A844CB0FA',
# 'ED': u'0314',
# 'PM': u'CreditCard'
# }
|
TheTypoMaster/chromium-crosswalk
|
refs/heads/master
|
build/android/pylib/gtest/local_device_gtest_run.py
|
27
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import itertools
import logging
import os
import posixpath
from pylib import constants
from pylib import ports
from pylib.base import test_run
from pylib.device import device_errors
from pylib.gtest import gtest_test_instance
from pylib.local import local_test_server_spawner
from pylib.local.device import local_device_environment
from pylib.local.device import local_device_test_run
from pylib.utils import device_temp_file
_COMMAND_LINE_FLAGS_SUPPORTED = True
_EXTRA_COMMAND_LINE_FILE = (
'org.chromium.native_test.NativeTestActivity.CommandLineFile')
_EXTRA_COMMAND_LINE_FLAGS = (
'org.chromium.native_test.NativeTestActivity.CommandLineFlags')
_EXTRA_TEST_LIST = (
'org.chromium.native_test.NativeTestInstrumentationTestRunner'
'.TestList')
_MAX_SHARD_SIZE = 256
# TODO(jbudorick): Move this up to the test instance if the net test server is
# handled outside of the APK for the remote_device environment.
_SUITE_REQUIRES_TEST_SERVER_SPAWNER = [
'components_browsertests', 'content_unittests', 'content_browsertests',
'net_unittests', 'unit_tests'
]
# TODO(jbudorick): Move this inside _ApkDelegate once TestPackageApk is gone.
def PullAppFilesImpl(device, package, files, directory):
device_dir = device.GetApplicationDataDirectory(package)
host_dir = os.path.join(directory, str(device))
for f in files:
device_file = posixpath.join(device_dir, f)
host_file = os.path.join(host_dir, *f.split(posixpath.sep))
host_file_base, ext = os.path.splitext(host_file)
for i in itertools.count():
host_file = '%s_%d%s' % (host_file_base, i, ext)
if not os.path.exists(host_file):
break
device.PullFile(device_file, host_file)
class _ApkDelegate(object):
def __init__(self, test_instance):
self._activity = test_instance.activity
self._apk = test_instance.apk
self._package = test_instance.package
self._runner = test_instance.runner
self._component = '%s/%s' % (self._package, self._runner)
self._extras = test_instance.extras
def Install(self, device):
device.Install(self._apk)
def Run(self, test, device, flags=None, **kwargs):
extras = dict(self._extras)
with device_temp_file.DeviceTempFile(device.adb) as command_line_file:
device.WriteFile(command_line_file.name, '_ %s' % flags if flags else '_')
extras[_EXTRA_COMMAND_LINE_FILE] = command_line_file.name
with device_temp_file.DeviceTempFile(device.adb) as test_list_file:
if test:
device.WriteFile(test_list_file.name, '\n'.join(test))
extras[_EXTRA_TEST_LIST] = test_list_file.name
return device.StartInstrumentation(
self._component, extras=extras, raw=False, **kwargs)
def PullAppFiles(self, device, files, directory):
PullAppFilesImpl(device, self._package, files, directory)
def Clear(self, device):
device.ClearApplicationState(self._package)
class _ExeDelegate(object):
def __init__(self, tr, exe):
self._exe_host_path = exe
self._exe_file_name = os.path.split(exe)[-1]
self._exe_device_path = '%s/%s' % (
constants.TEST_EXECUTABLE_DIR, self._exe_file_name)
deps_host_path = self._exe_host_path + '_deps'
if os.path.exists(deps_host_path):
self._deps_host_path = deps_host_path
self._deps_device_path = self._exe_device_path + '_deps'
else:
self._deps_host_path = None
self._test_run = tr
def Install(self, device):
# TODO(jbudorick): Look into merging this with normal data deps pushing if
# executables become supported on nonlocal environments.
host_device_tuples = [(self._exe_host_path, self._exe_device_path)]
if self._deps_host_path:
host_device_tuples.append((self._deps_host_path, self._deps_device_path))
device.PushChangedFiles(host_device_tuples)
def Run(self, test, device, flags=None, **kwargs):
cmd = [
self._test_run.GetTool(device).GetTestWrapper(),
self._exe_device_path,
]
if test:
cmd.append('--gtest_filter=%s' % ':'.join(test))
if flags:
cmd.append(flags)
cwd = constants.TEST_EXECUTABLE_DIR
env = {
'LD_LIBRARY_PATH':
'%s/%s_deps' % (constants.TEST_EXECUTABLE_DIR, self._exe_file_name),
}
try:
gcov_strip_depth = os.environ['NATIVE_COVERAGE_DEPTH_STRIP']
external = device.GetExternalStoragePath()
env['GCOV_PREFIX'] = '%s/gcov' % external
env['GCOV_PREFIX_STRIP'] = gcov_strip_depth
except (device_errors.CommandFailedError, KeyError):
pass
# TODO(jbudorick): Switch to just RunShellCommand once perezju@'s CL
# for long shell commands lands.
with device_temp_file.DeviceTempFile(device.adb) as script_file:
script_contents = ' '.join(cmd)
logging.info('script contents: %r' % script_contents)
device.WriteFile(script_file.name, script_contents)
output = device.RunShellCommand(['sh', script_file.name], cwd=cwd,
env=env, **kwargs)
return output
def PullAppFiles(self, device, files, directory):
pass
def Clear(self, device):
device.KillAll(self._exe_file_name, blocking=True, timeout=30, quiet=True)
class LocalDeviceGtestRun(local_device_test_run.LocalDeviceTestRun):
def __init__(self, env, test_instance):
assert isinstance(env, local_device_environment.LocalDeviceEnvironment)
assert isinstance(test_instance, gtest_test_instance.GtestTestInstance)
super(LocalDeviceGtestRun, self).__init__(env, test_instance)
if self._test_instance.apk:
self._delegate = _ApkDelegate(self._test_instance)
elif self._test_instance.exe:
self._delegate = _ExeDelegate(self, self._test_instance.exe)
self._servers = {}
#override
def TestPackage(self):
return self._test_instance.suite
#override
def SetUp(self):
def individual_device_set_up(dev, host_device_tuples):
# Install test APK.
self._delegate.Install(dev)
# Push data dependencies.
external_storage = dev.GetExternalStoragePath()
host_device_tuples = [
(h, d if d is not None else external_storage)
for h, d in host_device_tuples]
dev.PushChangedFiles(host_device_tuples)
self._servers[str(dev)] = []
if self.TestPackage() in _SUITE_REQUIRES_TEST_SERVER_SPAWNER:
self._servers[str(dev)].append(
local_test_server_spawner.LocalTestServerSpawner(
ports.AllocateTestServerPort(), dev, self.GetTool(dev)))
for s in self._servers[str(dev)]:
s.SetUp()
self._env.parallel_devices.pMap(individual_device_set_up,
self._test_instance.GetDataDependencies())
#override
def _ShouldShard(self):
return True
#override
def _CreateShards(self, tests):
device_count = len(self._env.devices)
shards = []
for i in xrange(0, device_count):
unbounded_shard = tests[i::device_count]
shards += [unbounded_shard[j:j+_MAX_SHARD_SIZE]
for j in xrange(0, len(unbounded_shard), _MAX_SHARD_SIZE)]
return shards
#override
def _GetTests(self):
tests = self._delegate.Run(
None, self._env.devices[0], flags='--gtest_list_tests')
tests = gtest_test_instance.ParseGTestListTests(tests)
tests = self._test_instance.FilterTests(tests)
return tests
#override
def _RunTest(self, device, test):
# Run the test.
output = self._delegate.Run(
test, device, timeout=900, retries=0)
for s in self._servers[str(device)]:
s.Reset()
if self._test_instance.app_files:
self._delegate.PullAppFiles(device, self._test_instance.app_files,
self._test_instance.app_file_dir)
self._delegate.Clear(device)
# Parse the output.
# TODO(jbudorick): Transition test scripts away from parsing stdout.
results = self._test_instance.ParseGTestOutput(output)
return results
#override
def TearDown(self):
def individual_device_tear_down(dev):
for s in self._servers[str(dev)]:
s.TearDown()
self._env.parallel_devices.pMap(individual_device_tear_down)
|
fwmiller/Conserver-Freescale-Linux-U-boot
|
refs/heads/master
|
rpm/BUILD/linux-3.0.35/scripts/tracing/draw_functrace.py
|
14679
|
#!/usr/bin/python
"""
Copyright 2008 (c) Frederic Weisbecker <fweisbec@gmail.com>
Licensed under the terms of the GNU GPL License version 2
This script parses a trace provided by the function tracer in
kernel/trace/trace_functions.c
The resulted trace is processed into a tree to produce a more human
view of the call stack by drawing textual but hierarchical tree of
calls. Only the functions's names and the the call time are provided.
Usage:
Be sure that you have CONFIG_FUNCTION_TRACER
# mount -t debugfs nodev /sys/kernel/debug
# echo function > /sys/kernel/debug/tracing/current_tracer
$ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func
Wait some times but not too much, the script is a bit slow.
Break the pipe (Ctrl + Z)
$ scripts/draw_functrace.py < raw_trace_func > draw_functrace
Then you have your drawn trace in draw_functrace
"""
import sys, re
class CallTree:
""" This class provides a tree representation of the functions
call stack. If a function has no parent in the kernel (interrupt,
syscall, kernel thread...) then it is attached to a virtual parent
called ROOT.
"""
ROOT = None
def __init__(self, func, time = None, parent = None):
self._func = func
self._time = time
if parent is None:
self._parent = CallTree.ROOT
else:
self._parent = parent
self._children = []
def calls(self, func, calltime):
""" If a function calls another one, call this method to insert it
into the tree at the appropriate place.
@return: A reference to the newly created child node.
"""
child = CallTree(func, calltime, self)
self._children.append(child)
return child
def getParent(self, func):
""" Retrieve the last parent of the current node that
has the name given by func. If this function is not
on a parent, then create it as new child of root
@return: A reference to the parent.
"""
tree = self
while tree != CallTree.ROOT and tree._func != func:
tree = tree._parent
if tree == CallTree.ROOT:
child = CallTree.ROOT.calls(func, None)
return child
return tree
def __repr__(self):
return self.__toString("", True)
def __toString(self, branch, lastChild):
if self._time is not None:
s = "%s----%s (%s)\n" % (branch, self._func, self._time)
else:
s = "%s----%s\n" % (branch, self._func)
i = 0
if lastChild:
branch = branch[:-1] + " "
while i < len(self._children):
if i != len(self._children) - 1:
s += "%s" % self._children[i].__toString(branch +\
" |", False)
else:
s += "%s" % self._children[i].__toString(branch +\
" |", True)
i += 1
return s
class BrokenLineException(Exception):
"""If the last line is not complete because of the pipe breakage,
we want to stop the processing and ignore this line.
"""
pass
class CommentLineException(Exception):
""" If the line is a comment (as in the beginning of the trace file),
just ignore it.
"""
pass
def parseLine(line):
line = line.strip()
if line.startswith("#"):
raise CommentLineException
m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line)
if m is None:
raise BrokenLineException
return (m.group(1), m.group(2), m.group(3))
def main():
CallTree.ROOT = CallTree("Root (Nowhere)", None, None)
tree = CallTree.ROOT
for line in sys.stdin:
try:
calltime, callee, caller = parseLine(line)
except BrokenLineException:
break
except CommentLineException:
continue
tree = tree.getParent(caller)
tree = tree.calls(callee, calltime)
print CallTree.ROOT
if __name__ == "__main__":
main()
|
PetePriority/home-assistant
|
refs/heads/dev
|
homeassistant/components/folder_watcher/__init__.py
|
6
|
"""
Component for monitoring activity on a folder.
For more details about this platform, refer to the documentation at
https://home-assistant.io/components/folder_watcher/
"""
import os
import logging
import voluptuous as vol
from homeassistant.const import (
EVENT_HOMEASSISTANT_START, EVENT_HOMEASSISTANT_STOP)
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['watchdog==0.8.3']
_LOGGER = logging.getLogger(__name__)
CONF_FOLDER = 'folder'
CONF_PATTERNS = 'patterns'
DEFAULT_PATTERN = '*'
DOMAIN = "folder_watcher"
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.All(cv.ensure_list, [vol.Schema({
vol.Required(CONF_FOLDER): cv.isdir,
vol.Optional(CONF_PATTERNS, default=[DEFAULT_PATTERN]):
vol.All(cv.ensure_list, [cv.string]),
})])
}, extra=vol.ALLOW_EXTRA)
def setup(hass, config):
"""Set up the folder watcher."""
conf = config[DOMAIN]
for watcher in conf:
path = watcher[CONF_FOLDER]
patterns = watcher[CONF_PATTERNS]
if not hass.config.is_allowed_path(path):
_LOGGER.error("folder %s is not valid or allowed", path)
return False
Watcher(path, patterns, hass)
return True
def create_event_handler(patterns, hass):
"""Return the Watchdog EventHandler object."""
from watchdog.events import PatternMatchingEventHandler
class EventHandler(PatternMatchingEventHandler):
"""Class for handling Watcher events."""
def __init__(self, patterns, hass):
"""Initialise the EventHandler."""
super().__init__(patterns)
self.hass = hass
def process(self, event):
"""On Watcher event, fire HA event."""
_LOGGER.debug("process(%s)", event)
if not event.is_directory:
folder, file_name = os.path.split(event.src_path)
self.hass.bus.fire(
DOMAIN, {
"event_type": event.event_type,
'path': event.src_path,
'file': file_name,
'folder': folder,
})
def on_modified(self, event):
"""File modified."""
self.process(event)
def on_moved(self, event):
"""File moved."""
self.process(event)
def on_created(self, event):
"""File created."""
self.process(event)
def on_deleted(self, event):
"""File deleted."""
self.process(event)
return EventHandler(patterns, hass)
class Watcher():
"""Class for starting Watchdog."""
def __init__(self, path, patterns, hass):
"""Initialise the watchdog observer."""
from watchdog.observers import Observer
self._observer = Observer()
self._observer.schedule(
create_event_handler(patterns, hass),
path,
recursive=True)
hass.bus.listen_once(EVENT_HOMEASSISTANT_START, self.startup)
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, self.shutdown)
def startup(self, event):
"""Start the watcher."""
self._observer.start()
def shutdown(self, event):
"""Shutdown the watcher."""
self._observer.stop()
self._observer.join()
|
boberfly/gaffer
|
refs/heads/master
|
python/GafferTest/ProcessMessageHandlerTest.py
|
8
|
##########################################################################
#
# Copyright (c) 2018, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of Image Engine Design Inc nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import inspect
import IECore
import Gaffer
import GafferTest
class ProcessMessageHandlerTest( GafferTest.TestCase ) :
def testMessageOutSideProcessIsForwardedUnmodified( self ) :
capturingMessageHandler = IECore.CapturingMessageHandler()
messageHandler = Gaffer.ProcessMessageHandler( capturingMessageHandler )
# if we log a message outside a compute or hash Process then we only get the original message
messageHandler.handle(IECore.MessageHandler.Level.Debug, "sending out an SOS", "message in a bottle")
self.assertEqual(len( capturingMessageHandler.messages ), 1 )
self.assertEqual(capturingMessageHandler.messages[0].level, IECore.MessageHandler.Level.Debug)
self.assertEqual(capturingMessageHandler.messages[0].context, "sending out an SOS")
self.assertEqual(capturingMessageHandler.messages[0].message, "message in a bottle")
def testMessageInProcessGetExtraDebugInfo( self ) :
capturingMessageHandler = IECore.CapturingMessageHandler()
messageHandler = Gaffer.ProcessMessageHandler( capturingMessageHandler )
scriptNode = Gaffer.ScriptNode()
expression = Gaffer.Expression( "Expression" )
node = Gaffer.Node( "Node" )
node["user"].addChild( Gaffer.IntPlug( "test", defaultValue = 0, flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic, ) )
scriptNode.addChild(expression)
scriptNode.addChild(node)
expression.setExpression( inspect.cleandoc(
"""
import IECore
IECore.MessageHandler.output( IECore.MessageHandler.Level.Error, "testA", "testB" )
parent["Node"]["user"]["test"] = len( context.get( "scene:path", [] ) )
"""
) )
with Gaffer.Context() as context :
with messageHandler :
self.assertEqual( node['user']['test'].getValue(), 0 )
self.assertEqual( len( capturingMessageHandler.messages ), 2 )
self.assertEqual( capturingMessageHandler.messages[0].level, IECore.MessageHandler.Level.Error )
self.assertEqual( capturingMessageHandler.messages[0].context, "testA" )
self.assertEqual( capturingMessageHandler.messages[0].message, "testB" )
self.assertEqual( capturingMessageHandler.messages[1].level, IECore.MessageHandler.Level.Debug )
self.assertEqual( capturingMessageHandler.messages[1].context, "Gaffer::Process" )
self.assertEqual( capturingMessageHandler.messages[1].message, "[ plug: 'ScriptNode.Expression.__execute', frame: 1 ]" )
del capturingMessageHandler.messages[:]
context["scene:path"] = IECore.InternedStringVectorData( [ "a", "b" ] )
self.assertEqual( node['user']['test'].getValue(), 2 )
self.assertEqual( len( capturingMessageHandler.messages ), 2 )
self.assertEqual( capturingMessageHandler.messages[0].level, IECore.MessageHandler.Level.Error )
self.assertEqual( capturingMessageHandler.messages[0].context, "testA" )
self.assertEqual( capturingMessageHandler.messages[0].message, "testB" )
self.assertEqual( capturingMessageHandler.messages[1].level, IECore.MessageHandler.Level.Debug )
self.assertEqual( capturingMessageHandler.messages[1].context, "Gaffer::Process" )
self.assertEqual( capturingMessageHandler.messages[1].message, "[ plug: 'ScriptNode.Expression.__execute', frame: 1, path: '/a/b' ]" )
del capturingMessageHandler.messages[:]
del context["frame"]
context["scene:path"] = IECore.InternedStringVectorData( [ "a", "b", "c" ] )
self.assertEqual( node['user']['test'].getValue(), 3 )
self.assertEqual( len( capturingMessageHandler.messages ), 2 )
self.assertEqual( capturingMessageHandler.messages[0].level, IECore.MessageHandler.Level.Error )
self.assertEqual( capturingMessageHandler.messages[0].context, "testA" )
self.assertEqual( capturingMessageHandler.messages[0].message, "testB" )
self.assertEqual( capturingMessageHandler.messages[1].level, IECore.MessageHandler.Level.Debug )
self.assertEqual( capturingMessageHandler.messages[1].context, "Gaffer::Process" )
self.assertEqual( capturingMessageHandler.messages[1].message, "[ plug: 'ScriptNode.Expression.__execute', path: '/a/b/c' ]" )
if __name__ == "__main__":
unittest.main()
|
taaviteska/django
|
refs/heads/master
|
tests/template_backends/apps/good/templatetags/good_tags.py
|
1426
|
from django.template import Library
register = Library()
|
ChawalitK/odoo
|
refs/heads/master
|
addons/l10n_hu/migrations/2.0/post-migrate_tags_on_taxes.py
|
536
|
from openerp.modules.registry import RegistryManager
def migrate(cr, version):
registry = RegistryManager.get(cr.dbname)
from openerp.addons.account.models.chart_template import migrate_tags_on_taxes
migrate_tags_on_taxes(cr, registry)
|
hackebrot/pytest-cookies
|
refs/heads/master
|
tests/test_help_message.py
|
1
|
# -*- coding: utf-8 -*-
def test_cookies_group(testdir):
result = testdir.runpytest("--help")
# fnmatch_lines does an assertion internally
result.stdout.fnmatch_lines(["cookies:", "*--template=TEMPLATE*"])
|
defionscode/ansible
|
refs/heads/devel
|
lib/ansible/modules/system/known_hosts.py
|
26
|
#!/usr/bin/python
# Copyright(c) 2014, Matthew Vernon <mcv21@cam.ac.uk>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: known_hosts
short_description: Add or remove a host from the C(known_hosts) file
description:
- The C(known_hosts) module lets you add or remove a host keys from the C(known_hosts) file.
- Starting at Ansible 2.2, multiple entries per host are allowed, but only one for each key type supported by ssh.
This is useful if you're going to want to use the M(git) module over ssh, for example.
- If you have a very large number of host keys to manage, you will find the M(template) module more useful.
version_added: "1.9"
options:
name:
aliases: [ 'host' ]
description:
- The host to add or remove (must match a host specified in key). It will be converted to lowercase so that ssh-keygen can find it.
required: true
key:
description:
- The SSH public host key, as a string (required if state=present, optional when state=absent, in which case all keys for the host are removed).
The key must be in the right format for ssh (see sshd(8), section "SSH_KNOWN_HOSTS FILE FORMAT").
Specifically, the key should not match the format that is found in an SSH pubkey file, but should rather have the hostname prepended to a
line that includes the pubkey, the same way that it would appear in the known_hosts file. The value prepended to the line must also match
the value of the name parameter.
path:
description:
- The known_hosts file to edit
default: "(homedir)+/.ssh/known_hosts"
hash_host:
description:
- Hash the hostname in the known_hosts file
type: bool
default: 'no'
version_added: "2.3"
state:
description:
- I(present) to add the host key, I(absent) to remove it.
choices: [ "present", "absent" ]
default: present
requirements: [ ]
author: "Matthew Vernon (@mcv21)"
'''
EXAMPLES = '''
- name: tell the host about our servers it might want to ssh to
known_hosts:
path: /etc/ssh/ssh_known_hosts
name: foo.com.invalid
key: "{{ lookup('file', 'pubkeys/foo.com.invalid') }}"
'''
# Makes sure public host keys are present or absent in the given known_hosts
# file.
#
# Arguments
# =========
# name = hostname whose key should be added (alias: host)
# key = line(s) to add to known_hosts file
# path = the known_hosts file to edit (default: ~/.ssh/known_hosts)
# hash_host = yes|no (default: no) hash the hostname in the known_hosts file
# state = absent|present (default: present)
import base64
import hashlib
import hmac
import os
import os.path
import tempfile
import errno
import re
from ansible.module_utils._text import to_bytes, to_native
from ansible.module_utils.basic import AnsibleModule
def enforce_state(module, params):
"""
Add or remove key.
"""
host = params["name"].lower()
key = params.get("key", None)
path = params.get("path")
hash_host = params.get("hash_host")
state = params.get("state")
# Find the ssh-keygen binary
sshkeygen = module.get_bin_path("ssh-keygen", True)
if not key and state != "absent":
module.fail_json(msg="No key specified when adding a host")
if key and hash_host:
key = hash_host_key(host, key)
# Trailing newline in files gets lost, so re-add if necessary
if key and not key.endswith('\n'):
key += '\n'
sanity_check(module, host, key, sshkeygen)
found, replace_or_add, found_line = search_for_host_key(module, host, key, path, sshkeygen)
params['diff'] = compute_diff(path, found_line, replace_or_add, state, key)
# We will change state if found==True & state!="present"
# or found==False & state=="present"
# i.e found XOR (state=="present")
# Alternatively, if replace is true (i.e. key present, and we must change
# it)
if module.check_mode:
module.exit_json(changed=replace_or_add or (state == "present") != found,
diff=params['diff'])
# Now do the work.
# Only remove whole host if found and no key provided
if found and not key and state == "absent":
module.run_command([sshkeygen, '-R', host, '-f', path], check_rc=True)
params['changed'] = True
# Next, add a new (or replacing) entry
if replace_or_add or found != (state == "present"):
try:
inf = open(path, "r")
except IOError as e:
if e.errno == errno.ENOENT:
inf = None
else:
module.fail_json(msg="Failed to read %s: %s" % (path, str(e)))
try:
with tempfile.NamedTemporaryFile(mode='w+', dir=os.path.dirname(path), delete=False) as outf:
if inf is not None:
for line_number, line in enumerate(inf):
if found_line == (line_number + 1) and (replace_or_add or state == 'absent'):
continue # skip this line to replace its key
outf.write(line)
inf.close()
if state == 'present':
outf.write(key)
except (IOError, OSError) as e:
module.fail_json(msg="Failed to write to file %s: %s" % (path, to_native(e)))
else:
module.atomic_move(outf.name, path)
params['changed'] = True
return params
def sanity_check(module, host, key, sshkeygen):
'''Check supplied key is sensible
host and key are parameters provided by the user; If the host
provided is inconsistent with the key supplied, then this function
quits, providing an error to the user.
sshkeygen is the path to ssh-keygen, found earlier with get_bin_path
'''
# If no key supplied, we're doing a removal, and have nothing to check here.
if not key:
return
# Rather than parsing the key ourselves, get ssh-keygen to do it
# (this is essential for hashed keys, but otherwise useful, as the
# key question is whether ssh-keygen thinks the key matches the host).
# The approach is to write the key to a temporary file,
# and then attempt to look up the specified host in that file.
if re.search(r'\S+(\s+)?,(\s+)?', host):
module.fail_json(msg="Comma separated list of names is not supported. "
"Please pass a single name to lookup in the known_hosts file.")
with tempfile.NamedTemporaryFile(mode='w+') as outf:
try:
outf.write(key)
outf.flush()
except IOError as e:
module.fail_json(msg="Failed to write to temporary file %s: %s" %
(outf.name, to_native(e)))
sshkeygen_command = [sshkeygen, '-F', host, '-f', outf.name]
rc, stdout, stderr = module.run_command(sshkeygen_command)
if stdout == '': # host not found
module.fail_json(msg="Host parameter does not match hashed host field in supplied key")
def search_for_host_key(module, host, key, path, sshkeygen):
'''search_for_host_key(module,host,key,path,sshkeygen) -> (found,replace_or_add,found_line)
Looks up host and keytype in the known_hosts file path; if it's there, looks to see
if one of those entries matches key. Returns:
found (Boolean): is host found in path?
replace_or_add (Boolean): is the key in path different to that supplied by user?
found_line (int or None): the line where a key of the same type was found
if found=False, then replace is always False.
sshkeygen is the path to ssh-keygen, found earlier with get_bin_path
'''
if os.path.exists(path) is False:
return False, False, None
sshkeygen_command = [sshkeygen, '-F', host, '-f', path]
# openssh >=6.4 has changed ssh-keygen behaviour such that it returns
# 1 if no host is found, whereas previously it returned 0
rc, stdout, stderr = module.run_command(sshkeygen_command, check_rc=False)
if stdout == '' and stderr == '' and (rc == 0 or rc == 1):
return False, False, None # host not found, no other errors
if rc != 0: # something went wrong
module.fail_json(msg="ssh-keygen failed (rc=%d, stdout='%s',stderr='%s')" % (rc, stdout, stderr))
# If user supplied no key, we don't want to try and replace anything with it
if not key:
return True, False, None
lines = stdout.split('\n')
new_key = normalize_known_hosts_key(key)
for lnum, l in enumerate(lines):
if l == '':
continue
elif l[0] == '#': # info output from ssh-keygen; contains the line number where key was found
try:
# This output format has been hardcoded in ssh-keygen since at least OpenSSH 4.0
# It always outputs the non-localized comment before the found key
found_line = int(re.search(r'found: line (\d+)', l).group(1))
except IndexError:
module.fail_json(msg="failed to parse output of ssh-keygen for line number: '%s'" % l)
else:
found_key = normalize_known_hosts_key(l)
if new_key['host'][:3] == '|1|' and found_key['host'][:3] == '|1|': # do not change host hash if already hashed
new_key['host'] = found_key['host']
if new_key == found_key: # found a match
return True, False, found_line # found exactly the same key, don't replace
elif new_key['type'] == found_key['type']: # found a different key for the same key type
return True, True, found_line
# No match found, return found and replace, but no line
return True, True, None
def hash_host_key(host, key):
hmac_key = os.urandom(20)
hashed_host = hmac.new(hmac_key, to_bytes(host), hashlib.sha1).digest()
parts = key.strip().split()
# @ indicates the optional marker field used for @cert-authority or @revoked
i = 1 if parts[0][0] == '@' else 0
parts[i] = '|1|%s|%s' % (to_native(base64.b64encode(hmac_key)), to_native(base64.b64encode(hashed_host)))
return ' '.join(parts)
def normalize_known_hosts_key(key):
'''
Transform a key, either taken from a known_host file or provided by the
user, into a normalized form.
The host part (which might include multiple hostnames or be hashed) gets
replaced by the provided host. Also, any spurious information gets removed
from the end (like the username@host tag usually present in hostkeys, but
absent in known_hosts files)
'''
key = key.strip() # trim trailing newline
k = key.split()
d = dict()
# The optional "marker" field, used for @cert-authority or @revoked
if k[0][0] == '@':
d['options'] = k[0]
d['host'] = k[1]
d['type'] = k[2]
d['key'] = k[3]
else:
d['host'] = k[0]
d['type'] = k[1]
d['key'] = k[2]
return d
def compute_diff(path, found_line, replace_or_add, state, key):
diff = {
'before_header': path,
'after_header': path,
'before': '',
'after': '',
}
try:
inf = open(path, "r")
except IOError as e:
if e.errno == errno.ENOENT:
diff['before_header'] = '/dev/null'
else:
diff['before'] = inf.read()
inf.close()
lines = diff['before'].splitlines(1)
if (replace_or_add or state == 'absent') and found_line is not None and 1 <= found_line <= len(lines):
del lines[found_line - 1]
if state == 'present' and (replace_or_add or found_line is None):
lines.append(key)
diff['after'] = ''.join(lines)
return diff
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True, type='str', aliases=['host']),
key=dict(required=False, type='str'),
path=dict(default="~/.ssh/known_hosts", type='path'),
hash_host=dict(required=False, type='bool', default=False),
state=dict(default='present', choices=['absent', 'present']),
),
supports_check_mode=True
)
results = enforce_state(module, module.params)
module.exit_json(**results)
if __name__ == '__main__':
main()
|
derekstavis/bluntly
|
refs/heads/master
|
vendor/github.com/youtube/vitess/test/cache_invalidation.py
|
2
|
#!/usr/bin/env python
#
# Copyright 2013, Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can
# be found in the LICENSE file.
"""This test provides a blue print for a service using a caching layer.
We use an in-memory cache to simplify the implementation, but using a
distributed memcache pool for instance would work the same way. Note
we always use CAS (compare and swap) on the cache values, and ignore
the failures. That way multiple processes trying to affect the cache
won't be an issue.
It starts an invalidation thread, that mimics what a cache
invalidation process would do.
It also has an application layer that can also write entries to the
cache if necessary.
"""
import logging
import threading
import time
import unittest
import environment
import tablet
import utils
from vtdb import dbexceptions
from vtdb import event_token
from vtdb import proto3_encoding
from vtdb import vtgate_client
from vtproto import topodata_pb2
from vtproto import query_pb2
master_tablet = tablet.Tablet()
replica_tablet = tablet.Tablet()
_create_vt_a = '''create table if not exists vt_a (
id bigint,
name varchar(128),
primary key(id)
) Engine=InnoDB'''
_create_vt_b = '''create table if not exists vt_b (
id bigint,
address varchar(128),
primary key(id)
) Engine=InnoDB'''
def setUpModule():
try:
environment.topo_server().setup()
setup_procs = [master_tablet.init_mysql(),
replica_tablet.init_mysql()]
utils.wait_procs(setup_procs)
# start a vtctld so the vtctl insert commands are just RPCs, not forks.
utils.Vtctld().start()
# Start up a master mysql and vttablet
logging.debug('Setting up tablets')
utils.run_vtctl(['CreateKeyspace', 'test_keyspace'])
master_tablet.init_tablet('replica', 'test_keyspace', '0', tablet_index=0)
replica_tablet.init_tablet('replica', 'test_keyspace', '0', tablet_index=1)
utils.run_vtctl(['RebuildKeyspaceGraph', 'test_keyspace'], auto_log=True)
master_tablet.create_db('vt_test_keyspace')
replica_tablet.create_db('vt_test_keyspace')
master_tablet.start_vttablet(wait_for_state=None)
replica_tablet.start_vttablet(wait_for_state=None)
master_tablet.wait_for_vttablet_state('NOT_SERVING')
replica_tablet.wait_for_vttablet_state('NOT_SERVING')
utils.run_vtctl(['InitShardMaster', '-force', 'test_keyspace/0',
master_tablet.tablet_alias], auto_log=True)
utils.wait_for_tablet_type(replica_tablet.tablet_alias, 'replica')
master_tablet.wait_for_vttablet_state('SERVING')
replica_tablet.wait_for_vttablet_state('SERVING')
master_tablet.mquery('vt_test_keyspace', _create_vt_a)
master_tablet.mquery('vt_test_keyspace', _create_vt_b)
utils.run_vtctl(['ReloadSchemaShard', 'test_keyspace/0'])
utils.run_vtctl(['RebuildVSchemaGraph'])
utils.VtGate().start(tablets=[master_tablet, replica_tablet])
utils.vtgate.wait_for_endpoints('test_keyspace.0.master', 1)
utils.vtgate.wait_for_endpoints('test_keyspace.0.replica', 1)
except:
tearDownModule()
raise
def tearDownModule():
utils.required_teardown()
if utils.options.skip_teardown:
return
logging.debug('Tearing down the servers and setup')
tablet.kill_tablets([master_tablet, replica_tablet])
teardown_procs = [master_tablet.teardown_mysql(),
replica_tablet.teardown_mysql()]
utils.wait_procs(teardown_procs, raise_on_error=False)
environment.topo_server().teardown()
utils.kill_sub_processes()
utils.remove_tmp_files()
master_tablet.remove_tree()
replica_tablet.remove_tree()
class Cache(object):
"""Cache is the in-memory cache for objects.
Its backing store is a dict, indexed by key '<table name>-<id>'.
Each value is a dict, with three values:
- 'version': a version number.
- 'token': an EventToken.
- 'value': an optional value. We use an empty tuple if the row is
not in the database.
"""
def __init__(self):
self.values = {}
self.stats = {
'cache_miss': 0,
'cache_hit': 0,
'add': 0,
'cas': 0,
'noop': 0,
}
def gets(self, table_name, row_id):
"""Returns a cache entry if it exists.
Args:
table_name: the name of the table.
row_id: the row id.
Returns:
version: entry version, or None if there is no entry.
token: the EventToken for that row.
value: an optional value, or None if not set.
"""
key = '%s-%d' % (table_name, row_id)
if key not in self.values:
self.stats['cache_miss'] += 1
return None, None, None
self.stats['cache_hit'] += 1
entry = self.values[key]
return entry['version'], entry['token'], entry['value']
def add(self, table_name, row_id, token, value):
"""Add a value to the cache, only if it doesn't exist.
Args:
table_name: the name of the table.
row_id: the row id.
token: the EventToken associated with the read / invalidation.
value: the actual value.
Raises:
KeyError: if the entry already exists.
"""
key = '%s-%d' % (table_name, row_id)
if key in self.values:
raise KeyError('add failed: key %s already in cache' % key)
self.stats['add'] += 1
self.values[key] = {
'version': 1,
'token': token,
'value': value,
}
def cas(self, table_name, row_id, version, token, value):
"""Update an entry in the cache.
Args:
table_name: the name of the table.
row_id: the row id.
version: the existing version to update.
token: the EventToken associated with the read / invalidation.
value: the actual value.
Raises:
KeyError: if the entry doesn't exist.
Exception: if the version is wrong.
"""
key = '%s-%d' % (table_name, row_id)
if key not in self.values:
raise KeyError('cas failed: key %s not in cache' % key)
if self.values[key]['version'] != version:
raise Exception(
'cas failed: invalid version %d, have version %d in cache' %
(version, self.values[key]['version']))
self.stats['cas'] += 1
self.values[key] = {
'version': version+1,
'token': token,
'value': value,
}
def noop(self):
"""Increments the noop counter."""
self.stats['noop'] += 1
def stats_copy(self):
"""Returns a copy of the cache stats."""
return self.stats.copy()
def stats_diff(self, before, **kwargs):
"""Returns true iff the before stats differ exactly with provided args."""
for name, value in kwargs.iteritems():
if self.stats[name] != before[name] + value:
return False
return True
class InvalidatorThread(threading.Thread):
def __init__(self, cache):
threading.Thread.__init__(self)
self.cache = cache
self.done = False
protocol, addr = utils.vtgate.rpc_endpoint(python=True)
self.conn = vtgate_client.connect(protocol, addr, 30.0)
self.timestamp = long(time.time())
self.start()
def run(self):
while True:
try:
for event, resume_timestamp in self.conn.update_stream(
'test_keyspace', topodata_pb2.REPLICA,
timestamp=self.timestamp,
shard='0'):
if self.done:
return
# Save the timestamp we get, so we can resume from it in case of
# restart.
self.timestamp = resume_timestamp
for statement in event.statements:
if statement.category == 1: # query_pb2.StreamEvent.DML
_, rows = proto3_encoding.convert_stream_event_statement(
statement)
for row in rows:
row_id = row[0]
self.invalidate(statement.table_name, row_id,
event.event_token)
except dbexceptions.DatabaseError:
logging.exception(
'InvalidatorThread got exception, continuing from timestamp %d',
self.timestamp)
def invalidate(self, table_name, row_id, token):
logging.debug('Invalidating %s(%d) - %s:', table_name, row_id, token)
version, cache_event_token, _ = self.cache.gets(table_name, row_id)
if version is None:
logging.debug(' no entry in cache, saving event_token')
self.cache.add(table_name, row_id, token, None)
return
if event_token.fresher(cache_event_token, token) >= 0:
# For invalidation, a couple things to consider:
# 1. If we can't compare the EventTokens, we want to store the
# invalidation, so it's safer.
# 2. If we have exactly the same EventToken, we do not want to
# store the ivalidation. We have either an invalidation or a
# value, in both cases we're fine keeping it.
logging.debug(' invalidation event is older or equal than cache value,'
' ignoring')
return
logging.debug(' updating entry in the cache')
self.cache.cas(table_name, row_id, version, token, None)
def kill(self):
"""Kill stops the invalidator. We force an event so we can exit the loop."""
logging.info('Stopping invalidator')
self.done = True
replica_tablet.mquery('vt_test_keyspace', 'flush logs')
self.join()
self.conn.close()
class TestCacheInvalidation(unittest.TestCase):
def setUp(self):
self.cache = Cache()
self.invalidator = InvalidatorThread(self.cache)
# Sleep a bit to be sure all binlog reading threads are going, and we
# eat up all previous invalidation messages.
time.sleep(1)
def tearDown(self):
self.invalidator.kill()
def _vtgate_connection(self):
protocol, addr = utils.vtgate.rpc_endpoint(python=True)
return vtgate_client.connect(protocol, addr, 30.0)
def _insert_value_a(self, row_id, name):
logging.debug('Inserting value %d into vt_a', row_id)
conn = self._vtgate_connection()
cursor = conn.cursor(tablet_type='master', keyspace='test_keyspace',
writable=True)
cursor.begin()
insert = 'insert into vt_a (id, name) values (:id, :name)'
bind_variables = {
'id': row_id,
'name': name,
}
cursor.execute(insert, bind_variables)
cursor.commit()
conn.close()
def _get_value(self, table_name, row_id):
"""Returns the value for a row, as an array.
Args:
table_name: the name of the table.
row_id: the row id.
Returns:
The value for the row as a tuple, or an empty tuple if the row
doesn't exist.
"""
logging.debug('Getting value %d from %s:', row_id, table_name)
# First look in the cache.
version, cache_event_token, value = self.cache.gets(table_name, row_id)
if value is not None:
logging.debug(' got value from cache: %s', value)
return value
# It's not in the cache, get it from the database.
conn = self._vtgate_connection()
cursor = conn.cursor(tablet_type='replica', keyspace='test_keyspace')
cursor.execute('select * from %s where id=:id' % table_name,
{'id': row_id}, include_event_token=True,
compare_event_token=cache_event_token)
result = cursor.fetchall()
if not result:
# Not in the database. Use an empty array.
logging.debug(' not in the database')
value = ()
else:
value = result[0]
conn.close()
# If there was no cached version, cache what we got,
# along with the event token.
if version is None:
logging.debug(' adding value to cache: %s', value)
self.cache.add(table_name, row_id, conn.event_token, value)
return value
# If there was a cached version, and the version we got is older,
# we can't update the cache.
if cache_event_token and not conn.fresher:
logging.debug(' database value is not fresher: %s', value)
self.cache.noop()
return value
# Save in the cache.
logging.debug(' setting value in cache: %s', value)
self.cache.cas(table_name, row_id, version, conn.event_token, value)
return value
def wait_for_cache_stats(self, stats, **kwargs):
timeout = 10
while True:
if self.cache.stats_diff(stats, **kwargs):
return
timeout = utils.wait_step('cache stats update %s' % str(kwargs), timeout)
def test_cache_invalidation(self):
"""Main test case in this suite."""
# Insert.
stats = self.cache.stats_copy()
self._insert_value_a(1, 'test_cache_invalidation object')
# Sleep a bit to be sure the value was propagated.
self.wait_for_cache_stats(stats, cache_miss=1, add=1)
# Then get the value. We cannot populate the cache here, as the
# timestamp of the latest replication EventToken is the same as
# the invalidation token (only one transaction in the binlogs so
# far).
stats = self.cache.stats_copy()
result = self._get_value('vt_a', 1)
self.assertEqual(result, (1, 'test_cache_invalidation object'))
self.assertTrue(self.cache.stats_diff(stats, cache_hit=1, noop=1))
# Insert a second value with a greater timestamp to move the
# current replica EventToken.
time.sleep(1)
stats = self.cache.stats_copy()
self._insert_value_a(2, 'second object')
# Sleep a bit to be sure the value was propagated.
self.wait_for_cache_stats(stats, cache_miss=1, add=1)
# This time, when we get the value, the current replica
# EventToken is ahead of the invalidation timestamp of the first
# object, so we should save it in the cache.
stats = self.cache.stats_copy()
result = self._get_value('vt_a', 1)
self.assertEqual(result, (1, 'test_cache_invalidation object'))
self.assertTrue(self.cache.stats_diff(stats, cache_hit=1, cas=1))
# Ask again, should be from cache now.
stats = self.cache.stats_copy()
result = self._get_value('vt_a', 1)
self.assertEqual(result, (1, 'test_cache_invalidation object'))
self.assertTrue(self.cache.stats_diff(stats, cache_hit=1,
add=0, cas=0, noop=0))
def test_empty_cache_value(self):
"""Tests a non-existing value is cached properly."""
# Try to read a non-existing value, should get a miss.
stats = self.cache.stats_copy()
result = self._get_value('vt_a', 3)
self.assertEqual(result, ())
self.assertTrue(self.cache.stats_diff(stats, cache_miss=1, add=1))
# Try to read again, should get a hit to an empty value.
stats = self.cache.stats_copy()
result = self._get_value('vt_a', 3)
self.assertEqual(result, ())
self.assertTrue(self.cache.stats_diff(stats, cache_hit=1,
add=0, cas=0, noop=0))
# Now create the value.
stats = self.cache.stats_copy()
self._insert_value_a(3, 'empty cache test object')
# Wait a bit for cache to get event, make sure we invalidated.
self.wait_for_cache_stats(stats, cache_hit=1, cas=1)
# Get the value, make sure we got it from DB, with no possible cache update.
stats = self.cache.stats_copy()
result = self._get_value('vt_a', 3)
self.assertEqual(result, (3, 'empty cache test object'))
self.assertTrue(self.cache.stats_diff(stats, cache_hit=1, noop=1))
# Insert a second value with a greater timestamp to move the
# current replica EventToken.
time.sleep(1)
stats = self.cache.stats_copy()
self._insert_value_a(4, 'second object')
# Sleep a bit to be sure the value was propagated.
self.wait_for_cache_stats(stats, cache_miss=1, add=1)
# This time, when we get the value, the current replica
# EventToken is ahead of the invalidation timestamp of the first
# object, so we should save it in the cache.
stats = self.cache.stats_copy()
result = self._get_value('vt_a', 3)
self.assertEqual(result, (3, 'empty cache test object'))
self.assertTrue(self.cache.stats_diff(stats, cache_hit=1, cas=1))
# Ask again, should be from cache now.
stats = self.cache.stats_copy()
result = self._get_value('vt_a', 3)
self.assertEqual(result, (3, 'empty cache test object'))
self.assertTrue(self.cache.stats_diff(stats, cache_hit=1,
add=0, cas=0, noop=0))
def test_event_token_fresher(self):
"""event_token.fresher test suite."""
test_cases = [
{
'ev1': None,
'ev2': None,
'expected': -1,
}, {
'ev1': query_pb2.EventToken(
timestamp=123,
),
'ev2': None,
'expected': -1,
}, {
'ev1': None,
'ev2': query_pb2.EventToken(
timestamp=123,
),
'expected': -1,
}, {
'ev1': query_pb2.EventToken(
timestamp=123,
),
'ev2': query_pb2.EventToken(
timestamp=123,
),
'expected': -1,
}, {
'ev1': query_pb2.EventToken(
timestamp=200,
),
'ev2': query_pb2.EventToken(
timestamp=100,
),
'expected': 100,
}, {
'ev1': query_pb2.EventToken(
timestamp=100,
),
'ev2': query_pb2.EventToken(
timestamp=200,
),
'expected': -100,
}, {
# Test cases with not enough information to compare.
'ev1': query_pb2.EventToken(
timestamp=100,
),
'ev2': query_pb2.EventToken(
timestamp=100,
),
'expected': -1,
}, {
'ev1': query_pb2.EventToken(
timestamp=100,
shard='s1',
),
'ev2': query_pb2.EventToken(
timestamp=100,
shard='s2',
),
'expected': -1,
}, {
'ev1': query_pb2.EventToken(
timestamp=100,
shard='s1',
),
'ev2': query_pb2.EventToken(
timestamp=100,
shard='s1',
),
'expected': -1,
}, {
'ev1': query_pb2.EventToken(
timestamp=100,
shard='s1',
position='pos1',
),
'ev2': query_pb2.EventToken(
timestamp=100,
shard='s1',
),
'expected': -1,
}, {
'ev1': query_pb2.EventToken(
timestamp=100,
shard='s1',
),
'ev2': query_pb2.EventToken(
timestamp=100,
shard='s1',
position='pos2',
),
'expected': -1,
}, {
'ev1': query_pb2.EventToken(
timestamp=100,
shard='s1',
position='pos1', # invalid on purpose
),
'ev2': query_pb2.EventToken(
timestamp=100,
shard='s1',
position='pos2', # invalid on purpose
),
'expected': -1,
}, {
'ev1': query_pb2.EventToken(
timestamp=100,
shard='s1',
position='MariaDB/0-1-123', # valid but different
),
'ev2': query_pb2.EventToken(
timestamp=100,
shard='s1',
position='MySQL56/33333333-3333-3333-3333-333333333333:456-789',
),
'expected': -1,
}, {
# MariaDB test cases.
'ev1': query_pb2.EventToken(
timestamp=100,
shard='s1',
position='MariaDB/0-1-200',
),
'ev2': query_pb2.EventToken(
timestamp=100,
shard='s1',
position='MariaDB/0-1-100',
),
'expected': 100,
}, {
'ev1': query_pb2.EventToken(
timestamp=100,
shard='s1',
position='MariaDB/0-1-100',
),
'ev2': query_pb2.EventToken(
timestamp=100,
shard='s1',
position='MariaDB/0-1-200',
),
'expected': -100,
}, {
'ev1': query_pb2.EventToken(
timestamp=100,
shard='s1',
position='MariaDB/0-1-100',
),
'ev2': query_pb2.EventToken(
timestamp=100,
shard='s1',
position='MariaDB/0-1-100',
),
'expected': 0,
}, {
# MySQL56 test cases, not supported yet.
'ev1': query_pb2.EventToken(
timestamp=100,
shard='s1',
position='MySQL56/33333333-3333-3333-3333-333333333333:1-200',
),
'ev2': query_pb2.EventToken(
timestamp=100,
shard='s1',
position='MySQL56/33333333-3333-3333-3333-333333333333:1-100',
),
'expected': -1, # Should be: 1,
}, {
'ev1': query_pb2.EventToken(
timestamp=100,
shard='s1',
position='MySQL56/33333333-3333-3333-3333-333333333333:1-100',
),
'ev2': query_pb2.EventToken(
timestamp=100,
shard='s1',
position='MySQL56/33333333-3333-3333-3333-333333333333:1-200',
),
'expected': -1,
}, {
'ev1': query_pb2.EventToken(
timestamp=100,
shard='s1',
position='MySQL56/33333333-3333-3333-3333-333333333333:1-100',
),
'ev2': query_pb2.EventToken(
timestamp=100,
shard='s1',
position='MySQL56/33333333-3333-3333-3333-333333333333:1-100',
),
'expected': -1, # Should be: 0,
}
]
for tcase in test_cases:
got = event_token.fresher(tcase['ev1'], tcase['ev2'])
self.assertEqual(got, tcase['expected'],
'got %d but expected %d for Fresher(%s, %s)' %
(got, tcase['expected'], tcase['ev1'], tcase['ev2']))
if __name__ == '__main__':
utils.main()
|
dharmabumstead/ansible
|
refs/heads/devel
|
lib/ansible/modules/identity/ipa/ipa_sudorule.py
|
28
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ipa_sudorule
author: Thomas Krahn (@Nosmoht)
short_description: Manage FreeIPA sudo rule
description:
- Add, modify or delete sudo rule within IPA server using IPA API.
options:
cn:
description:
- Canonical name.
- Can not be changed as it is the unique identifier.
required: true
aliases: ['name']
cmdcategory:
description:
- Command category the rule applies to.
choices: ['all']
cmd:
description:
- List of commands assigned to the rule.
- If an empty list is passed all commands will be removed from the rule.
- If option is omitted commands will not be checked or changed.
host:
description:
- List of hosts assigned to the rule.
- If an empty list is passed all hosts will be removed from the rule.
- If option is omitted hosts will not be checked or changed.
- Option C(hostcategory) must be omitted to assign hosts.
hostcategory:
description:
- Host category the rule applies to.
- If 'all' is passed one must omit C(host) and C(hostgroup).
- Option C(host) and C(hostgroup) must be omitted to assign 'all'.
choices: ['all']
hostgroup:
description:
- List of host groups assigned to the rule.
- If an empty list is passed all host groups will be removed from the rule.
- If option is omitted host groups will not be checked or changed.
- Option C(hostcategory) must be omitted to assign host groups.
runasusercategory:
description:
- RunAs User category the rule applies to.
choices: ['all']
version_added: "2.5"
runasgroupcategory:
description:
- RunAs Group category the rule applies to.
choices: ['all']
version_added: "2.5"
user:
description:
- List of users assigned to the rule.
- If an empty list is passed all users will be removed from the rule.
- If option is omitted users will not be checked or changed.
usercategory:
description:
- User category the rule applies to.
choices: ['all']
usergroup:
description:
- List of user groups assigned to the rule.
- If an empty list is passed all user groups will be removed from the rule.
- If option is omitted user groups will not be checked or changed.
state:
description: State to ensure
default: present
choices: ['present', 'absent', 'enabled', 'disabled']
extends_documentation_fragment: ipa.documentation
version_added: "2.3"
'''
EXAMPLES = '''
# Ensure sudo rule is present that's allows all every body to execute any command on any host without being asked for a password.
- ipa_sudorule:
name: sudo_all_nopasswd
cmdcategory: all
description: Allow to run every command with sudo without password
hostcategory: all
sudoopt:
- '!authenticate'
usercategory: all
ipa_host: ipa.example.com
ipa_user: admin
ipa_pass: topsecret
# Ensure user group developers can run every command on host group db-server as well as on host db01.example.com.
- ipa_sudorule:
name: sudo_dev_dbserver
description: Allow developers to run every command with sudo on all database server
cmdcategory: all
host:
- db01.example.com
hostgroup:
- db-server
sudoopt:
- '!authenticate'
usergroup:
- developers
ipa_host: ipa.example.com
ipa_user: admin
ipa_pass: topsecret
'''
RETURN = '''
sudorule:
description: Sudorule as returned by IPA
returned: always
type: dict
'''
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ipa import IPAClient, ipa_argument_spec
from ansible.module_utils._text import to_native
class SudoRuleIPAClient(IPAClient):
def __init__(self, module, host, port, protocol):
super(SudoRuleIPAClient, self).__init__(module, host, port, protocol)
def sudorule_find(self, name):
return self._post_json(method='sudorule_find', name=None, item={'all': True, 'cn': name})
def sudorule_add(self, name, item):
return self._post_json(method='sudorule_add', name=name, item=item)
def sudorule_mod(self, name, item):
return self._post_json(method='sudorule_mod', name=name, item=item)
def sudorule_del(self, name):
return self._post_json(method='sudorule_del', name=name)
def sudorule_add_option(self, name, item):
return self._post_json(method='sudorule_add_option', name=name, item=item)
def sudorule_add_option_ipasudoopt(self, name, item):
return self.sudorule_add_option(name=name, item={'ipasudoopt': item})
def sudorule_remove_option(self, name, item):
return self._post_json(method='sudorule_remove_option', name=name, item=item)
def sudorule_remove_option_ipasudoopt(self, name, item):
return self.sudorule_remove_option(name=name, item={'ipasudoopt': item})
def sudorule_add_host(self, name, item):
return self._post_json(method='sudorule_add_host', name=name, item=item)
def sudorule_add_host_host(self, name, item):
return self.sudorule_add_host(name=name, item={'host': item})
def sudorule_add_host_hostgroup(self, name, item):
return self.sudorule_add_host(name=name, item={'hostgroup': item})
def sudorule_remove_host(self, name, item):
return self._post_json(method='sudorule_remove_host', name=name, item=item)
def sudorule_remove_host_host(self, name, item):
return self.sudorule_remove_host(name=name, item={'host': item})
def sudorule_remove_host_hostgroup(self, name, item):
return self.sudorule_remove_host(name=name, item={'hostgroup': item})
def sudorule_add_allow_command(self, name, item):
return self._post_json(method='sudorule_add_allow_command', name=name, item={'sudocmd': item})
def sudorule_remove_allow_command(self, name, item):
return self._post_json(method='sudorule_remove_allow_command', name=name, item=item)
def sudorule_add_user(self, name, item):
return self._post_json(method='sudorule_add_user', name=name, item=item)
def sudorule_add_user_user(self, name, item):
return self.sudorule_add_user(name=name, item={'user': item})
def sudorule_add_user_group(self, name, item):
return self.sudorule_add_user(name=name, item={'group': item})
def sudorule_remove_user(self, name, item):
return self._post_json(method='sudorule_remove_user', name=name, item=item)
def sudorule_remove_user_user(self, name, item):
return self.sudorule_remove_user(name=name, item={'user': item})
def sudorule_remove_user_group(self, name, item):
return self.sudorule_remove_user(name=name, item={'group': item})
def get_sudorule_dict(cmdcategory=None, description=None, hostcategory=None, ipaenabledflag=None, usercategory=None,
runasgroupcategory=None, runasusercategory=None):
data = {}
if cmdcategory is not None:
data['cmdcategory'] = cmdcategory
if description is not None:
data['description'] = description
if hostcategory is not None:
data['hostcategory'] = hostcategory
if ipaenabledflag is not None:
data['ipaenabledflag'] = ipaenabledflag
if usercategory is not None:
data['usercategory'] = usercategory
if runasusercategory is not None:
data['ipasudorunasusercategory'] = runasusercategory
if runasgroupcategory is not None:
data['ipasudorunasgroupcategory'] = runasgroupcategory
return data
def category_changed(module, client, category_name, ipa_sudorule):
if ipa_sudorule.get(category_name, None) == ['all']:
if not module.check_mode:
# cn is returned as list even with only a single value.
client.sudorule_mod(name=ipa_sudorule.get('cn')[0], item={category_name: None})
return True
return False
def ensure(module, client):
state = module.params['state']
name = module.params['cn']
cmd = module.params['cmd']
cmdcategory = module.params['cmdcategory']
host = module.params['host']
hostcategory = module.params['hostcategory']
hostgroup = module.params['hostgroup']
runasusercategory = module.params['runasusercategory']
runasgroupcategory = module.params['runasgroupcategory']
if state in ['present', 'enabled']:
ipaenabledflag = 'TRUE'
else:
ipaenabledflag = 'FALSE'
sudoopt = module.params['sudoopt']
user = module.params['user']
usercategory = module.params['usercategory']
usergroup = module.params['usergroup']
module_sudorule = get_sudorule_dict(cmdcategory=cmdcategory,
description=module.params['description'],
hostcategory=hostcategory,
ipaenabledflag=ipaenabledflag,
usercategory=usercategory,
runasusercategory=runasusercategory,
runasgroupcategory=runasgroupcategory)
ipa_sudorule = client.sudorule_find(name=name)
changed = False
if state in ['present', 'disabled', 'enabled']:
if not ipa_sudorule:
changed = True
if not module.check_mode:
ipa_sudorule = client.sudorule_add(name=name, item=module_sudorule)
else:
diff = client.get_diff(ipa_sudorule, module_sudorule)
if len(diff) > 0:
changed = True
if not module.check_mode:
if 'hostcategory' in diff:
if ipa_sudorule.get('memberhost_host', None) is not None:
client.sudorule_remove_host_host(name=name, item=ipa_sudorule.get('memberhost_host'))
if ipa_sudorule.get('memberhost_hostgroup', None) is not None:
client.sudorule_remove_host_hostgroup(name=name,
item=ipa_sudorule.get('memberhost_hostgroup'))
client.sudorule_mod(name=name, item=module_sudorule)
if cmd is not None:
changed = category_changed(module, client, 'cmdcategory', ipa_sudorule) or changed
if not module.check_mode:
client.sudorule_add_allow_command(name=name, item=cmd)
if runasusercategory is not None:
changed = category_changed(module, client, 'iparunasusercategory', ipa_sudorule) or changed
if runasgroupcategory is not None:
changed = category_changed(module, client, 'iparunasgroupcategory', ipa_sudorule) or changed
if host is not None:
changed = category_changed(module, client, 'hostcategory', ipa_sudorule) or changed
changed = client.modify_if_diff(name, ipa_sudorule.get('memberhost_host', []), host,
client.sudorule_add_host_host,
client.sudorule_remove_host_host) or changed
if hostgroup is not None:
changed = category_changed(module, client, 'hostcategory', ipa_sudorule) or changed
changed = client.modify_if_diff(name, ipa_sudorule.get('memberhost_hostgroup', []), hostgroup,
client.sudorule_add_host_hostgroup,
client.sudorule_remove_host_hostgroup) or changed
if sudoopt is not None:
# client.modify_if_diff does not work as each option must be removed/added by its own
ipa_list = ipa_sudorule.get('ipasudoopt', [])
module_list = sudoopt
diff = list(set(ipa_list) - set(module_list))
if len(diff) > 0:
changed = True
if not module.check_mode:
for item in diff:
client.sudorule_remove_option_ipasudoopt(name, item)
diff = list(set(module_list) - set(ipa_list))
if len(diff) > 0:
changed = True
if not module.check_mode:
for item in diff:
client.sudorule_add_option_ipasudoopt(name, item)
if user is not None:
changed = category_changed(module, client, 'usercategory', ipa_sudorule) or changed
changed = client.modify_if_diff(name, ipa_sudorule.get('memberuser_user', []), user,
client.sudorule_add_user_user,
client.sudorule_remove_user_user) or changed
if usergroup is not None:
changed = category_changed(module, client, 'usercategory', ipa_sudorule) or changed
changed = client.modify_if_diff(name, ipa_sudorule.get('memberuser_group', []), usergroup,
client.sudorule_add_user_group,
client.sudorule_remove_user_group) or changed
else:
if ipa_sudorule:
changed = True
if not module.check_mode:
client.sudorule_del(name)
return changed, client.sudorule_find(name)
def main():
argument_spec = ipa_argument_spec()
argument_spec.update(cmd=dict(type='list'),
cmdcategory=dict(type='str', choices=['all']),
cn=dict(type='str', required=True, aliases=['name']),
description=dict(type='str'),
host=dict(type='list'),
hostcategory=dict(type='str', choices=['all']),
hostgroup=dict(type='list'),
runasusercategory=dict(type='str', choices=['all']),
runasgroupcategory=dict(type='str', choices=['all']),
sudoopt=dict(type='list'),
state=dict(type='str', default='present', choices=['present', 'absent', 'enabled', 'disabled']),
user=dict(type='list'),
usercategory=dict(type='str', choices=['all']),
usergroup=dict(type='list'))
module = AnsibleModule(argument_spec=argument_spec,
mutually_exclusive=[['cmdcategory', 'cmd'],
['hostcategory', 'host'],
['hostcategory', 'hostgroup'],
['usercategory', 'user'],
['usercategory', 'usergroup']],
supports_check_mode=True)
client = SudoRuleIPAClient(module=module,
host=module.params['ipa_host'],
port=module.params['ipa_port'],
protocol=module.params['ipa_prot'])
try:
client.login(username=module.params['ipa_user'],
password=module.params['ipa_pass'])
changed, sudorule = ensure(module, client)
module.exit_json(changed=changed, sudorule=sudorule)
except Exception as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
if __name__ == '__main__':
main()
|
nikolas/smc
|
refs/heads/master
|
salvus/scripts/create_project_user.py
|
6
|
#!/usr/bin/env python
###############################################################################
#
# SageMathCloud: A collaborative web-based interface to Sage, IPython, LaTeX and the Terminal.
#
# Copyright (C) 2014, William Stein
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
"""
Create a user corresponding to a given project_id.
create_storage_user.py [project-id]
You should put the following in visudo:
salvus ALL=(ALL) NOPASSWD: /usr/local/bin/create_project_user.py *
"""
import argparse, hashlib, os, random, time
from subprocess import Popen, PIPE
def uid(uuid):
# We take the sha-512 of the uuid just to make it harder to force a collision. Thus even if a
# user could somehow generate an account id of their choosing, this wouldn't help them get the
# same uid as another user.
# 2^32-2=max uid, as keith determined by a program + experimentation.
n = hash(hashlib.sha512(uuid).digest()) % (4294967294-1000)
return n + 1001
def cmd(s):
out = Popen(s, stdin=PIPE, stdout=PIPE, stderr=PIPE, shell=not isinstance(s, list))
x = out.stdout.read() + out.stderr.read()
e = out.wait()
if e:
raise RuntimeError(s+x)
return x
def home(project_id):
return os.path.join('/projects', project_id)
def zfs_home_is_mounted(project_id):
h = home(project_id)
if not os.path.exists(os.path.join(h, '.zfs')):
raise RuntimeError("ZFS filesystem %s is not mounted"%h[1:])
def username(project_id):
return project_id.replace('-','')
def create_user(project_id):
"""
Create the user the contains the given project data. It is safe to
call this function even if the user already exists.
"""
name = username(project_id)
id = uid(project_id)
r = open('/etc/passwd').read()
i = r.find(name)
if i != -1:
r = r[i:]
i = r.find('\n')
u = int(r[:i].split(':')[2])
else:
u = 0
if u == id:
# user already exists and has correct id
return
if u != 0:
# there's the username but with wrong id
## during migration deleting that user would be a disaster!
raise RuntimeError("user %s already exists but with wrong id"%name)
#cmd("userdel %s"%name) # this also deletes the group
# Now make the correct user. The -o makes it so in the incredibly unlikely
# event of a collision, no big deal.
c = "groupadd -g %s -o %s"%(id, name)
for i in range(3):
try:
cmd(c)
break
except:
time.sleep(random.random())
# minimal attemp to avoid locking issues
c = "useradd -u %s -g %s -o -d %s %s"%(id, id, home(project_id), name)
for i in range(3):
try:
cmd(c)
break
except:
time.sleep(random.random())
# Save account info so it persists through reboots/upgrades/etc. that replaces the ephemeral root fs.
if os.path.exists("/mnt/home/etc/"): # UW nodes
cmd("cp /etc/passwd /etc/shadow /etc/group /mnt/home/etc/")
if os.path.exists("/mnt/conf/etc/"): # GCE nodes
cmd("cp /etc/passwd /etc/shadow /etc/group /mnt/conf/etc/")
def chown_all(project_id):
zfs_home_is_mounted(project_id)
cmd("zfs set snapdir=hidden %s"%home(project_id).lstrip('/')) # needed for historical reasons
id = uid(project_id)
cmd('chown %s:%s -R %s'%(id, id, home(project_id)))
def write_info_json(project_id, host='', base_url=''):
zfs_home_is_mounted(project_id)
if not host:
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('10.1.1.1',80))
host = s.getsockname()[0]
path = os.path.join(home(project_id), '.sagemathcloud' + ('-local' if base_url else ''))
info_json = os.path.join(path, 'info.json')
if not os.path.exists(path):
os.makedirs(path)
obj = {"project_id":project_id,"location":{"host":host,"username":username(project_id),"port":22,"path":"."},"base_url":base_url}
import json
open(info_json,'w').write(json.dumps(obj, separators=(',',':')))
def ensure_ssh_access(project_id):
zfs_home_is_mounted(project_id)
# If possible, make some attempts to ensure ssh access to this account.
h = home(project_id)
if not os.path.exists(h):
# there is nothing we can possibly do yet -- filesystem not mounted
return
ssh_path = os.path.join(h, '.ssh')
authorized_keys2 = os.path.join(ssh_path, 'authorized_keys2')
public_key = open('/home/salvus/.ssh/id_rsa.pub').read().strip()
add_public_key = '\n#Added by SageMath Cloud\n' + public_key + '\n'
if not os.path.exists(ssh_path):
os.makedirs(ssh_path)
if not os.path.exists(authorized_keys2):
open(authorized_keys2,'w').write(add_public_key)
elif public_key not in open(authorized_keys2).read():
open(authorized_keys2,'a').write(add_public_key)
os.system('chown -R %s. %s'%(username(project_id), ssh_path))
os.system('chmod og-rwx -R %s'%ssh_path)
def killall_user(project_id):
u = uid(project_id)
os.system("pkill -u %s; sleep 1; pkill -9 -u %s; killall -u %s"%(u,u,username(project_id)))
def umount_user_home(project_id):
os.system("umount %s"%home(project_id))
def copy_skeleton(project_id):
zfs_home_is_mounted(project_id)
h = home(project_id)
u = username(project_id)
if not os.path.exists(h):
raise RuntimeError("home directory %s doesn't exist"%h)
os.system("rsync -axH --update /home/salvus/salvus/salvus/scripts/skel/ %s/"%h) # update so we don't overwrite newer versions
# TODO: must fix this -- it could overwrite a user bash or ssh stuff. BAD.
cmd("chown -R %s. %s/.sagemathcloud/ %s/.ssh %s/.bashrc"%(u, h, h, h))
cmd("chown %s. %s"%(u, h))
def cgroup(project_id, cpu=1024, memory='8G'):
"""
Create a cgroup for the given project, and ensure all of the project's processes are in the cgroup.
INPUT:
- project_id -- uuid of the project
- cpu -- (default: 1024) total number of cpu.shares allocated to this project (across all processes)
- memory -- (default: '8G') total amount of RAM allocated to this project (across all processes)
"""
if not os.path.exists('/sys/fs/cgroup/memory'):
# do nothing on platforms where cgroups isn't supported (GCE right now, I'm looking at you.)
return
uname = username(project_id)
shares=100000
if os.path.exists('/projects/%s/coin'%project_id):
shares = 1000
if os.path.exists('/projects/%s/minerd'%project_id):
shares = 1000
if os.path.exists('/projects/%s/sh'%project_id):
shares = 1000
cmd("cgcreate -g memory,cpu:%s"%uname)
cmd('echo "%s" > /sys/fs/cgroup/memory/%s/memory.limit_in_bytes'%(memory, uname))
cmd('echo "%s" > /sys/fs/cgroup/cpu/%s/cpu.shares'%(cpu, uname))
cmd('echo "%s" > /sys/fs/cgroup/cpu/%s/cpu.cfs_quota_us'%(shares, uname))
z = "\n%s cpu,memory %s\n"%(uname, uname)
cur = open("/etc/cgrules.conf").read()
if z not in cur:
open("/etc/cgrules.conf",'a').write(z)
cmd('service cgred restart')
try:
pids = cmd("ps -o pid -u %s"%uname).split()[1:]
except RuntimeError:
# ps returns an error code if there are NO processes at all (a common condition).
pids = []
if pids:
try:
cmd("cgclassify %s"%(' '.join(pids)))
# ignore cgclassify errors, since processes come and go, etc.
except RuntimeError:
pass
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Project user control script")
parser.add_argument("--kill", help="kill all processes owned by the user", default=False, action="store_const", const=True)
parser.add_argument("--umount", help="run umount on the project's home as root", default=False, action="store_const", const=True)
parser.add_argument("--skel", help="rsync /home/salvus/salvus/salvus/scripts/skel/ to the home directory of the project", default=False, action="store_const", const=True)
parser.add_argument("--create", help="create the project user", default=False, action="store_const", const=True)
parser.add_argument("--base_url", help="the base url (default:'')", default="", type=str)
parser.add_argument("--host", help="the host ip address on the tinc vpn (default: auto-detect)", default="", type=str)
parser.add_argument("--chown", help="chown all the files in /projects/projectid", default=False, action="store_const", const=True)
parser.add_argument("--cgroup", help="put project in given control group (format: --cgroup=cpu:1024,memory:10G)", default="", type=str)
parser.add_argument("project_id", help="the uuid of the project", type=str)
args = parser.parse_args()
if args.create:
create_user(args.project_id)
write_info_json(project_id=args.project_id, host=args.host, base_url=args.base_url)
ensure_ssh_access(args.project_id)
if args.skel:
copy_skeleton(args.project_id)
if args.kill:
killall_user(args.project_id)
if args.umount:
umount_user_home(args.project_id)
if args.chown:
chown_all(args.project_id)
if args.cgroup:
kwds = dict([x.split(':') for x in args.cgroup.split(',')])
cgroup(args.project_id, **kwds)
|
attackjz/Cocos2d-x_CustomSliderList
|
refs/heads/master
|
CustomSliderList/cocos2d/plugin/tools/toolsForGame/main.py
|
265
|
import sys, string, os
from Tkinter import *
import steps
Plugins = sys.argv[1]
print Plugins
pluginList = Plugins.split(':')
maxStep = 2
curStep = 1
stepList = []
# functions
# show step on the num index
def showStep(num):
global stepList
stepNum = len(stepList)
if num >= stepNum or num <= 0 :
pass
i = 0
while i < stepNum:
if i == num:
stepList[i].stepFrame.pack(fill=BOTH, anchor='nw')
else:
stepList[i].stepFrame.pack_forget()
i += 1
# update the pre & next buttons status
def updateBtnState():
global curStep
global btnNextStep
global btnPreStep
if curStep == 1:
btnPreStep['state'] = DISABLED
btnNextStep['state'] = NORMAL
btnNextStep['text'] = 'Next'
elif curStep == maxStep:
btnPreStep['state'] = NORMAL
btnNextStep['state'] = NORMAL
btnNextStep['text'] = 'Finish'
else:
btnPreStep['state'] = NORMAL
btnNextStep['state'] = NORMAL
btnNextStep['text'] = 'Next'
# next button clicked
def nextStep():
if btnNextStep['text'] == 'close':
root.quit()
return
global curStep
nowStepObj = stepList[curStep - 1]
bRet = nowStepObj.checkStep()
if bRet != None:
stepError['text'] = bRet
return
else:
stepError['text'] = ''
if curStep < maxStep:
curStep += 1
showStep(curStep - 1)
updateBtnState()
elif curStep == maxStep:
# disable buttons when process
btnPreStep['state'] = DISABLED
btnNextStep['state'] = DISABLED
# get user input arguments
projPath = stepList[0].getPath()
plugins = stepList[1].getSelectedPlugins()
strPlugins = ''
i = 0
while i < len(plugins):
strPlugins += "plugins/"
strPlugins += plugins[i]
if i != (len(plugins) - 1):
strPlugins += ':'
i += 1
# process shell script to modify the game project
ret = os.system('bash ./toolsForGame/addPluginForGame.sh ' + projPath + ' ' + strPlugins)
if ret != 0:
# enable buttons after process
btnPreStep['state'] = NORMAL
btnNextStep['state'] = NORMAL
stepError['text'] = 'Error during process'
else:
# enable next button & change text to close
btnNextStep['state'] = NORMAL
btnNextStep['text'] = 'close'
stepError['text'] = 'Process Successful!'
# pre button clicked
def preStep():
global curStep
global stepError
stepError['text'] = ''
if curStep > 1:
curStep -= 1
showStep(curStep - 1)
updateBtnState()
# init root view
root = Tk()
root.title('Plugin-x Integration Guide')
root.geometry("600x400")
rootFrame = Frame(root)
rootFrame.pack(fill=BOTH)
# steps view
MyStep1 = steps.step1()
MyStep1.initStep(rootFrame)
MyStep2 = steps.step2()
MyStep2.initStep(rootFrame, pluginList)
stepList.append(MyStep1)
stepList.append(MyStep2)
MyStep1.stepFrame.pack(fill=BOTH, anchor='nw')
# add step error message
controlFrame = Frame(root)
controlFrame.pack(side=BOTTOM, fill=X, anchor='s')
stepError = Label(controlFrame)
stepError.pack(side=LEFT, padx=30)
# add step button
btnNextStep = Button(controlFrame, text='Next', command=nextStep)
btnPreStep = Button(controlFrame, text='Back', command=preStep, state=DISABLED)
btnNextStep.pack(side=RIGHT, padx=30)
btnPreStep.pack(side=RIGHT)
root.mainloop()
|
xparedesfortuny/pylines
|
refs/heads/master
|
setup.py
|
1
|
# Author: Xavier Paredes-Fortuny (xparedesfortuny@gmail.com)
# License: MIT, see LICENSE.md
params = {
'input_file': 'PULSAR',
'nick_name': 'steady',
'output_file': 'current_lines',
'test_rhd': 0,
'binary_output_file': 1,
'tstep': 1.0,
'itemax': 10000,
'resamp': 200, # 0 OFF
'nlines': -1, # -1 OFF
'CGS_units': 1,
'fB0': 1.0,
'tr0': 9.0,
'int_method': 1,
'int_test': 0,
'make_plots': 1,
'plots_path': 'plots/',
'plot_maps': 1,
'plot_lines': 1,
'plot_profiles': 1,
}
|
RobberPhex/shadowsocks
|
refs/heads/master
|
tests/graceful_cli.py
|
977
|
#!/usr/bin/python
import socks
import time
SERVER_IP = '127.0.0.1'
SERVER_PORT = 8001
if __name__ == '__main__':
s = socks.socksocket()
s.set_proxy(socks.SOCKS5, SERVER_IP, 1081)
s.connect((SERVER_IP, SERVER_PORT))
s.send(b'test')
time.sleep(30)
s.close()
|
stanzinofree/Digital_Archive_Creator
|
refs/heads/master
|
auth.py
|
1
|
# -*- encoding: UTF-8 -*-
"""
*Version* : *0.1.4*
This lib give the api for the autentication and consultation of user
"""
import hashlib
import string
import random
import cherrypy
import redis
import libs.db_connection
import libs.local_variables
from base_config import html_header
r = redis.StrictRedis(host='localhost', port=6379, db=3)
def id_generator(size=6, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for x in range(size))
SESSION_KEY = '_cp_username'
def check_credentials(username, password):
"""
Verifies credentials for username and password.
Returns None on success or a string describing the error on failure
"""
cur = libs.db_connection.connect_db()
cursor = cur.cursor()
quer = "SELECT * FROM operators WHERE ID='" + username + "'"
cursor.execute(quer)
passwords = []
for record in cursor.fetchall():
pwd = str(record[3])
nome = str(record[1] + " " + record[2])
passwords.append(pwd)
r.set(username, nome)
cursor.close()
pwd = hashlib.md5(password).hexdigest()
try:
passwords[0]
except IndexError:
msg = "Username does not exist".encode('utf-8')
username = None
return msg, username
if pwd == passwords[0]:
return None
else:
msg = "Incorrect username or password.".encode('utf-8')
return msg
def check_auth(*args, **kwargs):
"""
A tool that looks in config for 'auth.require'. If found and it
is not None, a login is required and the entry is evaluated as a list of
conditions that the user must fulfill
"""
conditions = cherrypy.request.config.get('auth.require', None)
if conditions is not None:
username = cherrypy.session.get(SESSION_KEY)
if username:
cherrypy.request.login = username
for condition in conditions:
# A condition is just a callable that returns true or false
if not condition():
raise cherrypy.HTTPRedirect("/auth/login")
else:
raise cherrypy.HTTPRedirect("/auth/login")
cherrypy.tools.auth = cherrypy.Tool('before_handler', check_auth)
def require(*conditions):
"""
A decorator that appends conditions to the auth.require config
variable.
"""
def decorate(f):
if not hasattr(f, '_cp_config'):
f._cp_config = dict()
if 'auth.require' not in f._cp_config:
f._cp_config['auth.require'] = []
f._cp_config['auth.require'].extend(conditions)
return f
return decorate
def member_of(groupname):
def check():
# replace with actual check if <username> is in <groupname>
admin = libs.local_variables.admin_user
print admin
print cherrypy.request.login
if groupname == "admin":
for i in admin:
try:
return cherrypy.request.login in admin
except:
pass
else:
return cherrypy.request.login in groupname
return check
def name_is(reqd_username):
return lambda: reqd_username == cherrypy.request.login
# These might be handy
def any_of(*conditions):
"""Returns True if any of the conditions match"""
def check():
for c in conditions:
if c():
return True
return False
return check
# By default all conditions are required, but this might still be
# needed if you want to use it inside of an any_of(...) condition
def all_of(*conditions):
"""Returns True if all of the conditions match"""
def check():
for c in conditions:
if not c():
return False
return True
return check
# Controller to provide login and logout actions
class AuthController(object):
def on_login(self, username):
"""Called on successful login"""
def on_logout(self, username):
"""Called on logout"""
def get_loginform(self, username, msg="", from_page="/"):
key = cherrypy.session.get('_cp_username')
nome = r.get(key)
footer = u"""
</div><div class="clear"></div>
<div id="footer">
<div class="grid flex">
<div class="col_10">© Copyright 2012–2013 All Rights Reserved.</div>
<div class="col_2">Utente: Anonimo</div>
</div></div></body></html>"""
html_body = ""
if msg == "":
html_body += """
<div class="col_12">
<div class="col_4"></div>
<div class="col_4"><h3 class="center">ACCESSO</h3></div>
<div class="col_4"></div>
<div class="tab-content clearfix">
<form class="vertical" method="post" action="/auth/login">
<div class="col_4"></div>
<div class="col_4"><input type="hidden" name="from_page" value="%(from_page)s " />%(msg)s<br />
<input type="text" name="username" placeholder="USERNAME" value="%(username)s" />
<input type="password" name="password" placeholder="PASSWORD" id="password" />
<button class="blue center" type="submit">Submit</button></div>
<div class="col_4"></div>
</form>
</div>
</div>""" % locals()
else:
html_body += """
<h1>ACCESSO</h1>
<form class="vertical" method="post" action="/auth/login">
<input type="hidden" name="from_page" value="%(from_page)s " />
<div class="notice error"><i class="icon-remove-sign icon-large"></i>Wrong Username or Password <a href="#close" class="icon-remove"></a></div>
<input class="error" type="text" name="username" placeholder="USERNAME" value="%(username)s" />
<input class="error" type="password" name="password" placeholder="PASSWORD" id="password"/><br/><button type="submit">Submit</button></form><br/>""" % locals()
html = html_header + html_body + footer
return html
@cherrypy.expose
def login(self, username=None, password=None, from_page="/"):
if username is None or password is None:
return self.get_loginform("", from_page=from_page)
error_msg = check_credentials(username, password)
if error_msg:
return self.get_loginform(username, error_msg, from_page)
else:
cherrypy.session[SESSION_KEY] = cherrypy.request.login = username
self.on_login(username)
raise cherrypy.HTTPRedirect(from_page or "/")
@cherrypy.expose
def logout(self, from_page="/"):
sess = cherrypy.session
username = sess.get(SESSION_KEY, None)
sess[SESSION_KEY] = None
if username:
cherrypy.request.login = None
self.on_logout(username)
raise cherrypy.HTTPRedirect(from_page or "/")
|
luotao1/Paddle
|
refs/heads/develop
|
python/paddle/distributed/fleet/meta_optimizers/sharding/__init__.py
|
2
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
frewsxcv/keyczar
|
refs/heads/master
|
cpp/src/tools/swtoolkit/test/collada_dom_test.py
|
9
|
#!/usr/bin/python2.4
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Test for atlmfc_vc80. These are MEDIUM tests."""
import sys
import TestFramework
def TestSConstruct(scons_globals):
"""Test SConstruct file.
Args:
scons_globals: Global variables dict from the SConscript file.
"""
# Get globals from SCons
Environment = scons_globals['Environment']
env = Environment(tools=['component_setup'])
# Make sure including the tool doesn't cause a failure on any platform
# Run hermetically
env1 = env.Clone(COLLADA_DIR = '.')
env1.Tool('collada_dom')
# Run non-hermetically
env2 = env.Clone(COLLADA_DIR = None)
env2.Tool('collada_dom')
def main():
test = TestFramework.TestFramework()
base = 'test'
test.subdir(base)
test.WriteSConscript(base + '/SConstruct', TestSConstruct)
test.run(chdir=base)
test.pass_test()
if __name__ == '__main__':
main()
|
vitmod/enigma2-test
|
refs/heads/master
|
lib/python/Components/Renderer/Progress.py
|
45
|
from Components.VariableValue import VariableValue
from Renderer import Renderer
from enigma import eSlider
class Progress(VariableValue, Renderer):
def __init__(self):
Renderer.__init__(self)
VariableValue.__init__(self)
self.__start = 0
self.__end = 100
GUI_WIDGET = eSlider
def changed(self, what):
if what[0] == self.CHANGED_CLEAR:
(self.range, self.value) = ((0, 1), 0)
return
range = self.source.range or 100
value = self.source.value
if value is None:
value = 0
(self.range, self.value) = ((0, range), value)
def postWidgetCreate(self, instance):
instance.setRange(self.__start, self.__end)
def setRange(self, range):
(self.__start, self.__end) = range
if self.instance is not None:
self.instance.setRange(self.__start, self.__end)
def getRange(self):
return self.__start, self.__end
range = property(getRange, setRange)
|
mapler/simple-page-monitor
|
refs/heads/master
|
test/test_sender.py
|
1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from sender import (
Output,
FileOutput,
SenderManager,
SenderConfError,
SENDERS,
)
import unittest
import mock
import sys
import os
import shutil
class OutputTestCase(unittest.TestCase):
def setUp(self):
self.output_sender = Output()
def test_output(self):
self.output_sender.output(sys.stdout, 'test')
if not hasattr(sys.stdout, "getvalue"):
self.fail("need to run in buffered mode") # pragma: no cover
output = sys.stdout.getvalue().strip()
assert 'test' == output
def test_send(self):
self.output_sender.send('test')
if not hasattr(sys.stdout, "getvalue"):
self.fail("need to run in buffered mode") # pragma: no cover
output = sys.stdout.getvalue().strip()
assert 'test' == output
class FileOutputTestCase(unittest.TestCase):
def setUp(self):
self.file_output = FileOutput()
def tearDown(self):
open(self.file_output.conf.FILE_PATH, 'w').close()
def test_send(self):
test_message = 'test message'
self.file_output.send(test_message)
with open(self.file_output.conf.FILE_PATH, 'r') as f:
saved_message = f.read().strip()
assert test_message == saved_message
class SenderManagerTestCase(unittest.TestCase):
def setUp(self):
self.sender_manager = SenderManager()
def tearDown(self):
file_path = SENDERS['File']().conf.FILE_PATH
open(file_path, 'w').close()
def test_add_sender(self):
sender1 = 'fake_sender'
sender2 = 'fake_sender2'
self.sender_manager.add_sender(sender1)
assert self.sender_manager.senders == [sender1]
self.sender_manager.add_sender(sender2)
assert self.sender_manager.senders == [sender1, sender2]
def test_set_senders(self):
self.sender_manager.methods = ['File', 'Output', 'Mailgun']
self.sender_manager.set_senders()
assert sorted([sender.__class__ for sender in self.sender_manager.senders]) == sorted([SENDERS['File'], SENDERS['Output'], SENDERS['Mailgun']])
def test_set_senders_exception(self):
self.sender_manager.methods = ['Fake']
self.assertRaises(SenderConfError, self.sender_manager.set_senders)
def test_send(self):
self.sender_manager.methods = ['Output', 'File']
self.sender_manager.send('test')
if not hasattr(sys.stdout, "getvalue"):
self.fail("need to run in buffered mode") # pragma: no cover
output = sys.stdout.getvalue().strip()
assert 'test' == output
for sender in self.sender_manager.senders:
if hasattr(sender.conf, 'FILE_PATH'):
file_path = sender.conf.FILE_PATH
with open(file_path, 'r') as f:
saved_message = f.read().strip()
assert 'test' == saved_message
def test_send_exception(self):
exception_message = 'test exception'
self.sender_manager.methods = ['Mailgun']
self.sender_manager.set_senders()
for sender in self.sender_manager.senders:
sender.send = mock.MagicMock(side_effect=ValueError(exception_message))
self.sender_manager.set_senders = mock.Mock(return_value=None)
self.sender_manager.send('test')
if not hasattr(sys.stdout, "getvalue"):
self.fail("need to run in buffered mode") # pragma: no cover
output = sys.stdout.getvalue().strip()
assert exception_message == output
def test_send_check_message_repeated(self):
file_path = SENDERS['File']().conf.FILE_PATH
test_message = 'test'
with open(file_path, 'w') as f:
f.write(test_message)
is_repeated = self.sender_manager.check_message_repeated(test_message)
assert is_repeated == True
def test_send_check_message_repeated_fail(self):
file_path = SENDERS['File']().conf.FILE_PATH
test_message_1 = 'test'
test_message_2 = 'test2'
with open(file_path, 'w') as f:
f.write(test_message_1)
is_repeated = self.sender_manager.check_message_repeated(test_message_2)
assert is_repeated == False
def test_send_check_message_repeated_when_file_not_exist(self):
file_path = SENDERS['File']().conf.FILE_PATH
bak_path = file_path + '.bak'
shutil.copyfile(file_path, bak_path)
os.remove(file_path)
test_message = 'test'
is_repeated = self.sender_manager.check_message_repeated(test_message)
assert is_repeated == False
shutil.copyfile(bak_path, file_path)
os.remove(bak_path)
def test_send_when_repeated(self):
self.sender_manager.methods = ['File']
self.sender_manager.set_senders()
self.sender_manager.send('test')
if not hasattr(sys.stdout, "getvalue"):
self.fail("need to run in buffered mode") # pragma: no cover
self.sender_manager.send('test')
output = sys.stdout.getvalue().strip()
assert "'{}' again. Stop sending.".format('test') == output
|
ai-ku/uwsd
|
refs/heads/master
|
run/fetch-semeval10-aw.py
|
1
|
#! /usr/bin/python
# -*- coding: utf-8 -*-
__author__ = "Osman Baskaya"
from bs4 import BeautifulSoup
f = '../data/semeval10/aw/test/English/EnglishAW.test.xml'
soup = BeautifulSoup(open(f), 'xml')
sentences = soup.find_all('s')
for sentence in sentences:
print u' '.join(sentence.text.split()).encode('utf-8').strip()
|
ProfessorX/Electronic-Books
|
refs/heads/master
|
Misc/HW3_Q2_2.py
|
1
|
'''
Created on May 5, 2014
@author: Nengbao
'''
class EdgeWeightedDigraph(object):
'''Edge weighted Directed Graph'''
def __init__(self, numV):
if numV < 0:
raise 'Number of vertices should not be negative.'
self.numV = numV
self.numE = 0
self.adj = [[] for _ in range(numV)] #NOTE: verify
def V(self):
return self.numV
def E(self):
return self.numE
def addEdge(self, e):
v = e.fro()
self.adj[v].append(e)
self.numE += 1
class DirectedEdge(object):
'''Directed Edge'''
def __init__(self, v, w, weight):
if (v < 0 or w < 0):
raise 'Vertex names should be nonnegative integers.'
self.v = v
self.w = w
self.weight = weight
# def weight(self):
# return self.weight
def fro(self):
return self.v
def to(self):
return self.w
class EdgeWeightedDirectedCycle(object):
''' Detect cycles in digraph'''
def __init__(self, G):
self.onStack = [False] * G.V()
self.edgeTo = [None] * G.V()
self.marked = [False] * G.V()
self.cycle = []
for v in range(G.V()):
if not self.marked[v]:
self.dfs(G, v)
def dfs(self, G, v):
self.onStack[v] = True
self.marked[v] = True
for e in G.adj[v]:
w = e.to()
if(self.cycle != []):
return
elif (not self.marked[w]):
self.edgeTo[w] = e
self.dfs(G, w)
elif self.onStack[w]:
while (e.fro() != w):
self.cycle += [e]
e = self.edgeTo[e.fro()]
self.cycle += [w] + [v]
self.onStack[v] = False
def hasCycle(self):
return self.cycle != []
def Cycle(self):
return self.cycle
class CPM(object):
'''Critical path method for longest path problem'''
def CPM(self, test_in):
utils = Utils()
N, Dur, Pre = utils.read_file(test_in)
G = EdgeWeightedDigraph(2*N + 2)
s = 2*N
t = 2*N + 1
for i in range(N):
G.addEdge(DirectedEdge(i, i+N, Dur[i]))
G.addEdge(DirectedEdge(s, i, 0))
G.addEdge(DirectedEdge(i+N, t, 0))
for j in range(len(Pre[i])):
successor = Pre[i][j]
G.addEdge(DirectedEdge(i+N, successor, 0))
ewdc = EdgeWeightedDirectedCycle(G)
if ewdc.hasCycle():
lp = None
else:
lp = AcyclicLP(G, s)
utils.write_file(lp)
class AcyclicLP(object):
'''Longest path in a DAG'''
def __init__(self, G, s):
self.edgeTo = [None] * G.V()
self.distTo = [float("-inf")] * G.V()
self.distTo[s] = 0
top = Topological(G)
for v in top.order:
for e in G.adj[v]:
self.relax(e)
def relax(self, e):
v = e.fro()
w = e.to()
if(self.distTo[w] < self.distTo[v] + e.weight):
self.distTo[w] = self.distTo[v] + e.weight
self.edgeTo[w] = e
class Topological(object):
def __init__(self, G):
finder = EdgeWeightedDirectedCycle(G)
if(not finder.hasCycle()):
dfs = DepthFirstOrder(G)
self.order = dfs.reversePost()
# def order(self):
# return self.order
class DepthFirstOrder(object):
def __init__(self, G):
self.postorder = []
self.preorder = []
self.marked = [False] * G.V()
for v in range(G.V()):
if(not self.marked[v]):
self.dfs(G, v)
def dfs(self, G, v):
self.marked[v] = True
self.preorder.append(v)
for e in G.adj[v]:
w = e.to()
if(not self.marked[w]):
self.dfs(G, w)
self.postorder.append(v)
def reversePost(self):
reverse = self.postorder[::-1]
return reverse
class Utils(object):
def read_file(self, test_in):
lines = open(test_in, 'r').read().splitlines()
N = int(lines[0])
Dur = []
Pre = []
for line in lines[1:]:
tmp = line.split()
Dur.append(float(tmp[0]))
tmp1 = list(map(int, tmp[1:]))
Pre.append(tmp1)
return N, Dur, Pre
def write_file(self, lp):
to = open('test.out', 'w')
if lp == None:
to.write('INFEASIBLE')
to.close
else:
N = len(lp.distTo)
N = (N-2)/2 # number of vertices
for i in range(N):
to.write(str(lp.distTo[i]) + '\n')
to.close()
print lp.distTo[:N]
cpm = CPM()
cpm.CPM('test.in')
|
quadflor/Quadflor
|
refs/heads/master
|
Code/lucid_ml/weighting/SpreadingActivation.py
|
1
|
#!/usr/bin/env/python3
# -*- coding:utf-8 -*-
import networkx as nx
from collections import defaultdict, deque
from math import log
import numpy as np
import scipy.sparse as sp
from sklearn.base import BaseEstimator, TransformerMixin
from math import log
class SpreadingActivationTransformer(BaseEstimator, TransformerMixin):
'''
Create a SpreadingActivation object
parameters:
hierarchy -- the hierarchy of concepts as a network x graph
root -- the root node of the hierarchy
method -- activation method: one of 'basic', 'bell', 'bellog', 'children'
decay -- decay factor used by the 'basic' activation method
vocabulary (optional) -- mapping from hierarchy nodes to matrix indices
feature_names (optional) -- mapping from matrix indices to hierarchy nodes
'''
def __init__(self, hierarchy, root, method='basic', decay=1.0, vocabulary=None, feature_names=None):
self.method = method.lower()
if self.method not in ["basic", "bell", "belllog", "children", "binary"]:
raise ValueError
self.hierarchy = hierarchy
self.root = root
# if thesaurus does not use matrix indices as nodes,
# we need some vocabulary and feature_names mappings
self.vocabulary = vocabulary
self.feature_names = feature_names
# decay is used for basic activation method
self.decay = decay
def _score(self, freq, scores, row, col, memoization=None):
mem = memoization if memoization is not None else [False] * scores.shape[1]
# memoization hit
if mem[col]: return scores[row, col]
children = self.hierarchy.successors(self.feature_names[col] if self.feature_names else col)
if len(children) == 0:
# Base case for leaves
scores[row, col] = freq[row, col]
mem[col] = True
return scores[row, col]
# recursively compute children score
score = float(0)
for child in children:
child_idx = self.vocabulary[child] if self.vocabulary else child
score += self._score(freq, scores, row, child_idx, memoization=mem)
# scale them with some method specific factor
if self.method in ["bell", "belllog"]:
k = nx.shortest_path_length(self.hierarchy, self.root, self.feature_names[col] if self.feature_names else col)
print(k+1, self.levels[k+1])
print("Count of children:", len(children))
denom = self.levels[k+1]
if self.method == "belllog": denom = log(denom, 10) #TODO problem when zero
score *= 1.0 / denom
elif self.method == "children":
score *= 1.0 / len(children)
elif self.method == "basic":
score *= self.decay
# add the freq of the concept just now since it should not be scaled
score += freq[row, col]
scores[row, col] = score
mem[col] = True
return scores[row, col]
def partial_fit(self, X, y=None):
return self
def fit(self, X, y=None):
# the bell methods require additional information
if self.method in ["bell", "belllog"]:
# precompute node count by level
self.levels = defaultdict(int)
for node in self.hierarchy.nodes():
l = nx.shortest_path_length(self.hierarchy, self.root, node)
self.levels[l] += 1
print(self.levels)
return self
def transform(self, X, y=None):
n_records, n_features = X.shape
# lil matrix can be modified efficiently
# especially when row indices are sorted
scores = sp.lil_matrix((n_records, n_features), dtype=np.float32)
for row in range(n_records):
self._score(X, scores, row, self.root)
return sp.csr_matrix(scores)
def fit_transform(self, X, y=None):
self.fit(X, y)
return self.transform(X, y)
def write_dotfile(path, data, shape):
def identifier(record, node):
return str(record) + '.' + str(node)
nx, ny = shape
with open(path, 'w') as f:
print("digraph G {", file=f)
print("node [shape=rect]", file=f)
for record in range(nx):
for feature in range(ny):
s = identifier(record, feature)
s += " [label=\""
for key, value in data.items():
s += key + ":\t%.2f"%value[record,feature] + "\\n"
s += "\"]"
print(s, file=f)
for edge in toy.edges():
src, dst = edge
print(identifier(record, src), "->", identifier(record, dst), file=f)
print("}", file=f)
if __name__ == "__main__":
import random
# toy hierarchy
toy = nx.DiGraph()
toy.add_nodes_from([0,1,2,3,4,5,6,7,8,9,10,11,12])
toy.add_edges_from([(0,1), (0,2), (0,3), (1,4), (1, 5), (2,6), (2,7), (2,8), (2,9), (2,10),
(3,7),(4,11),(4,12)])
# toy shape
n_records = 3
n_features = len(toy.nodes())
# fill with random values
freq = np.ndarray(shape=(n_records, n_features), dtype=np.int8)
for i in range(n_records):
for j in range(n_features):
freq[i,j] = random.randint(0,4)
freq = sp.csr_matrix(freq)
print("Initial frequency values as CSR matrix")
print("=" * 42)
print(freq)
print("=" * 42)
# initialize methods
basic = SpreadingActivationTransformer(toy, 0, method="basic")
bell = SpreadingActivationTransformer(toy, 0, method="bell")
belllog = SpreadingActivationTransformer(toy, 0, method="belllog")
children = SpreadingActivationTransformer(toy, 0, method="children")
# apply them
basic_scores = basic.fit_transform(freq)
children_scores = children.fit_transform(freq)
bell_scores = bell.fit_transform(freq)
belllog_scores = belllog.fit_transform(freq)
print("Computed values as CSR matrix (with children spreading activation)")
print("=" * 42)
print(children_scores)
print("=" * 42)
# put them in a dict
data_dict = {
"freq" : freq,
"basic" : basic_scores,
"children" : children_scores,
"bell" : bell_scores,
"bellog" : bell_scores }
# for some pretty output
write_dotfile("more_toys.dot", data_dict, shape=freq.shape)
class InverseSpreadingActivation(BaseEstimator, TransformerMixin):
def __init__(self, hierarchy, multilabelbinarizer, decay=0.4, firing_threshold=1.0, verbose=0, use_weights=True):
self.hierarchy = hierarchy
self.decay = decay
self.firing_threshold = firing_threshold
self.use_weights = use_weights
self.verbose = verbose
self.mlb = multilabelbinarizer
def fit(self, X, Y):
n_samples = X.shape[0]
F = self.firing_threshold
decay = self.decay
coef_ = np.zeros(shape=(X.shape[1]), dtype=np.float64)
fired_ = np.zeros(shape=(X.shape[1]), dtype=np.bool_)
_, I, V = sp.find(Y)
coef_[I] += np.divide(V[I], X.shape[0])
markers = deque(I)
while markers:
i = markers.popleft()
if coef_[i] >= F and not fired[i]:
#fire
for j in self.hierarchy.neighbors(i):
if self.use_weights:
coef_[j] += coef[i] * decay * hierarchy[i][j]['weight']
else:
coef_[j] += coef[i] * decay
if coef_[j] >= F:
coef_[j] = F
markers.append(n)
self.coef_ = coef_
return self
def transform(self, X):
Xt = X + X * self.coef_
return Xt
def fit_transform(self, X, Y):
self.fit(X, Y)
return self.transform(X)
def bell_reweighting(tree, root, sublinear=False):
# convert the hierarchy to a tree if make_bfs_tree is true
distance_by_target = nx.shortest_path_length(tree, source=root)
level_count = defaultdict(int)
for val in distance_by_target.values():
level_count[val] += 1
for edge in tree.edges():
parent, child = edge
if sublinear:
# use smoothed logarithm
tree[parent][child]['weight'] = 1.0 / log(1 + level_count[distance_by_target[child]], 10)
else:
tree[parent][child]['weight'] = 1.0 / level_count[distance_by_target[child]]
return tree
def children_reweighting(tree):
for node in tree.nodes():
children = tree.successors(node)
n_children = len(children)
for child in children:
tree[node][child]['weight'] = 1.0 / n_children
return tree
class SpreadingActivation(BaseEstimator, TransformerMixin):
'''
weighting == None implies equal weights to all edges
weighting == bell, belllog requires root to be defined and assert_tree should be true
'''
def __init__(self, hierarchy, decay=1, firing_threshold=0, verbose=10, weighting=None, root=None, strict=False):
self.hierarchy = hierarchy
self.decay = decay
self.firing_threshold = firing_threshold
self.verbose = verbose
self.strict = strict
self.root = root
self.weighting = weighting.lower() if weighting is not None else None
assert self.weighting in [None, "bell", "belllog", "children", "basic"]
def fit(self, X, y=None):
if self.weighting == "bell":
assert self.root is not None
self.hierarchy = bell_reweighting(self.hierarchy, self.root, sublinear=False)
elif self.weighting == "belllog":
assert self.root is not None
self.hierarchy = bell_reweighting(self.hierarchy, self.root, sublinear=True)
elif self.weighting == "children":
self.hierarchy = children_reweighting(self.hierarchy)
return self
def transform(self, X):
F = self.firing_threshold
hierarchy = self.hierarchy
decay = self.decay
if self.verbose: print("[SA] %.4f concepts per sample."%(float(X.getnnz()) / X.shape[0]))
if self.verbose: print("[SA] Starting Spreading Activation")
X_out = sp.lil_matrix(X.shape,dtype=X.dtype)
fired = sp.lil_matrix(X.shape,dtype=np.bool_)
I, J, V = sp.find(X)
X_out[I,J] = V
markers = deque(zip(I,J))
while markers:
i, j = markers.popleft()
if X_out[i,j] >= F and not fired[i,j]:
#markers.extend(self._fire(X_out, i, j))
fired[i,j] = True
for target in hierarchy.predecessors(j):
if self.weighting:
X_out[i,target] += X_out[i,j] * decay * hierarchy[target][j]['weight']
else:
X_out[i,target] += X_out[i,j] * decay
if X_out[i, target] >= F:
if self.strict: A[i,target] = F
markers.append((i,target))
if self.verbose: print("[SA] %.4f fired per sample."%(float(fired.getnnz()) / X.shape[0]))
return sp.csr_matrix(X_out)
def _fire(self, A, i, j):
F = self.firing_threshold
hierarchy = self.hierarchy
decay = self.decay
markers = deque()
for target in hierarchy.predecessors(j):
if self.weighting:
A[i,target] += A[i,j] * decay * hierarchy[target][j]['weight']
else:
A[i,target] += A[i,j] * decay
if A[i, target] >= F:
if self.strict: A[i,target] = F
markers.append((i, target))
return markers
class OneHopActivation(BaseEstimator, TransformerMixin):
def __init__(self, hierarchy, decay=0.4, child_treshold=2,verbose=0):
self.hierarchy = hierarchy
self.decay = decay
self.child_threshold = child_treshold
self.verbose = verbose
def fit(self, X, y=None):
return self
def transform(self, X):
hierarchy = self.hierarchy
decay = self.decay
threshold = self.child_threshold
verbose = self.verbose
n_hops = 0
if verbose: print("[OneHopActivation]")
X_out = sp.lil_matrix(X.shape, dtype=X.dtype)
I, J, _ = sp.find(X)
for i, j in zip(I,J):
n_children = 0
sum_children = 0
for child in hierarchy.successors(j):
if X[i, child] > 0: # same row i
n_children += 1
sum_children += X[i, child]
if n_children >= threshold:
if verbose: print("Hop", end=" ")
n_hops += 1
X_out[i,j] = X[i,j] + sum_children * decay
else:
X_out[i,j] = X[i,j]
if verbose: print("\n[OneHopActivation] %d hops." % n_hops)
return sp.csr_matrix(X_out)
class BinarySA(BaseEstimator, TransformerMixin):
''' Binary Spreading Activation Transformer
+ works in place and on sparse data
'''
def __init__(self, hierarchy, assert_tree=False, root=None):
self.hierarchy = hierarchy
self.assert_tree = assert_tree
self.root = root
def fit(self, X, y=None):
if self.assert_tree:
assert self.root is not None
self.hierarchy = nx.bfs_tree(self.hierarchy, self.root)
return self
def transform(self, X, y=None):
''' From each value in the feature matrix,
traverse upwards in the hierarchy (including multiple parents in DAGs),
and set all nodes to one'''
hierarchy = self.hierarchy
X_out = np.zeros(X.shape, dtype=np.bool_)
samples, relevant_topics, _ = sp.find(X)
for sample, topic in zip(samples, relevant_topics):
X_out[sample, topic] = 1
ancestors = nx.ancestors(hierarchy, topic)
for ancestor in ancestors:
X_out[sample, ancestor] = 1
return X_out
|
iwm911/plaso
|
refs/heads/master
|
plaso/parsers/mac_wifi_test.py
|
1
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2013 The Plaso Project Authors.
# Please see the AUTHORS file for details on individual authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the Mac wifi.log parser."""
import pytz
import unittest
# pylint: disable=unused-import
from plaso.formatters import mac_wifi as mac_wifi_formatter
from plaso.lib import event
from plaso.lib import eventdata
from plaso.lib import timelib_test
from plaso.parsers import mac_wifi as mac_wifi_parser
from plaso.parsers import test_lib
class MacWifiUnitTest(test_lib.ParserTestCase):
"""Tests for the Mac wifi.log parser."""
def setUp(self):
"""Sets up the needed objects used throughout the test."""
pre_obj = event.PreprocessObject()
pre_obj.year = 2013
pre_obj.zone = pytz.timezone('UTC')
self._parser = mac_wifi_parser.MacWifiLogParser(pre_obj, None)
def testParse(self):
"""Tests the Parse function."""
test_file = self._GetTestFilePath(['wifi.log'])
event_generator = self._ParseFile(self._parser, test_file)
event_objects = self._GetEventObjects(event_generator)
self.assertEqual(len(event_objects), 9)
event_object = event_objects[0]
expected_timestamp = timelib_test.CopyStringToTimestamp(
'2013-11-14 20:36:37.222')
self.assertEqual(event_object.timestamp, expected_timestamp)
self.assertEqual(event_object.agent, u'airportd[88]')
self.assertEqual(event_object.function, u'airportdProcessDLILEvent')
self.assertEqual(event_object.action, u'Interface en0 turn up.')
self.assertEqual(event_object.text, u'en0 attached (up)')
expected_msg = (
u'Action: Interface en0 turn up. '
u'(airportdProcessDLILEvent) '
u'Log: en0 attached (up)')
expected_msg_short = (
u'Action: Interface en0 turn up.')
self._TestGetMessageStrings(event_object, expected_msg, expected_msg_short)
event_object = event_objects[1]
expected_timestamp = timelib_test.CopyStringToTimestamp(
'2013-11-14 20:36:43.818')
self.assertEqual(event_object.timestamp, expected_timestamp)
self.assertEqual(event_object.agent, u'airportd[88]')
self.assertEqual(event_object.function, u'_doAutoJoin')
self.assertEqual(event_object.action, u'Wifi connected to SSID CampusNet')
expected_text = (
u'Already associated to \u201cCampusNet\u201d. Bailing on auto-join.')
self.assertEqual(event_object.text, expected_text)
event_object = event_objects[2]
expected_timestamp = timelib_test.CopyStringToTimestamp(
'2013-11-14 21:50:52.395')
self.assertEqual(event_object.timestamp, expected_timestamp)
self.assertEqual(event_object.agent, u'airportd[88]')
self.assertEqual(event_object.function, u'_handleLinkEvent')
expected_string = (
u'Unable to process link event, op mode request returned -3903 '
u'(Operation not supported)')
self.assertEqual(event_object.action, expected_string)
self.assertEqual(event_object.text, expected_string)
event_object = event_objects[5]
expected_timestamp = timelib_test.CopyStringToTimestamp(
'2013-11-14 21:52:09.883')
self.assertEqual(event_object.timestamp, expected_timestamp)
self.assertEqual(u'airportd[88]', event_object.agent)
self.assertEqual(u'_processSystemPSKAssoc', event_object.function)
expected_action = (
u'New wifi configured. BSSID: 88:30:8a:7a:61:88, SSID: AndroidAP, '
u'Security: WPA2 Personal.')
self.assertEqual(event_object.action, expected_action)
expected_text = (
u'No password for network <CWNetwork: 0x7fdfe970b250> '
u'[ssid=AndroidAP, bssid=88:30:8a:7a:61:88, security=WPA2 '
u'Personal, rssi=-21, channel=<CWChannel: 0x7fdfe9712870> '
u'[channelNumber=11(2GHz), channelWidth={20MHz}], ibss=0] '
u'in the system keychain')
self.assertEqual(event_object.text, expected_text)
event_object = event_objects[7]
expected_timestamp = timelib_test.CopyStringToTimestamp(
'2013-12-31 23:59:38.165')
self.assertEqual(event_object.timestamp, expected_timestamp)
event_object = event_objects[8]
expected_timestamp = timelib_test.CopyStringToTimestamp(
'2014-01-01 01:12:17.311')
self.assertEqual(event_object.timestamp, expected_timestamp)
if __name__ == '__main__':
unittest.main()
|
nmrao/robotframework
|
refs/heads/master
|
src/robot/model/keyword.py
|
12
|
# Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from itertools import chain
from operator import attrgetter
from robot.utils import setter, unic
from .itemlist import ItemList
from .message import Message, Messages
from .modelobject import ModelObject
from .tags import Tags
class Keyword(ModelObject):
"""Base model for single keyword."""
__slots__ = ['parent', '_name', 'doc', 'args', 'assign', 'tags', 'timeout',
'type', '_sort_key', '_next_child_sort_key']
KEYWORD_TYPE = 'kw'
SETUP_TYPE = 'setup'
TEARDOWN_TYPE = 'teardown'
FOR_LOOP_TYPE = 'for'
FOR_ITEM_TYPE = 'foritem'
keyword_class = None
message_class = Message
def __init__(self, name='', doc='', args=(), assign=(), tags=(),
timeout=None, type='kw'):
#: :class:`~.model.testsuite.TestSuite` or
#: :class:`~.model.testcase.TestCase` or
#: :class:`~.model.keyword.Keyword` that contains this keyword.
self.parent = None
self._name = name
#: Keyword documentation.
self.doc = doc
#: Keyword arguments as a list of strings.
self.args = args
#: Assigned variables as a list of strings.
self.assign = assign
#: Keyword tags as a list like :class:`~.model.tags.Tags` object.
self.tags = tags
#: Keyword timeout.
self.timeout = timeout
#: Keyword type as a string. See class level ``XXX_TYPE`` constants.
self.type = type
#: Keyword messages as :class:`~.model.message.Message` instances.
self.messages = None
#: Child keywords as :class:`~.model.keyword.Keyword` instances.
self.keywords = None
self._sort_key = -1
self._next_child_sort_key = 0
@property
def name(self):
return self._name
@name.setter
def name(self, name):
self._name = name
@setter
def parent(self, parent):
if parent and parent is not self.parent:
self._sort_key = getattr(parent, '_child_sort_key', -1)
return parent
@property
def _child_sort_key(self):
self._next_child_sort_key += 1
return self._next_child_sort_key
@setter
def tags(self, tags):
return Tags(tags)
@setter
def keywords(self, keywords):
return Keywords(self.keyword_class or self.__class__, self, keywords)
@setter
def messages(self, messages):
return Messages(self.message_class, self, messages)
@property
def children(self):
"""Child keywords and messages in creation order."""
# It would be cleaner to store keywords/messages in same `children`
# list and turn `keywords` and `messages` to properties that pick items
# from it. That would require bigger changes to the model, though.
return sorted(chain(self.keywords, self.messages),
key=attrgetter('_sort_key'))
@property
def id(self):
if not self.parent:
return 'k1'
return '%s-k%d' % (self.parent.id, self.parent.keywords.index(self)+1)
def visit(self, visitor):
visitor.visit_keyword(self)
class Keywords(ItemList):
__slots__ = []
def __init__(self, keyword_class=Keyword, parent=None, keywords=None):
ItemList.__init__(self, keyword_class, {'parent': parent}, keywords)
@property
def setup(self):
return self[0] if (self and self[0].type == 'setup') else None
@property
def teardown(self):
return self[-1] if (self and self[-1].type == 'teardown') else None
@property
def all(self):
return self
@property
def normal(self):
kws = [kw for kw in self if kw.type not in ('setup', 'teardown')]
return Keywords(self._item_class, self._common_attrs['parent'], kws)
def __setitem__(self, index, item):
old = self[index]
ItemList.__setitem__(self, index, item)
self[index]._sort_key = old._sort_key
|
ageron/tensorflow
|
refs/heads/master
|
tensorflow/contrib/timeseries/python/timeseries/state_space_models/varma_test.py
|
25
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for VARMA.
Tests VARMA model building and utility functions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.timeseries.python.timeseries.feature_keys import TrainEvalFeatures
from tensorflow.contrib.timeseries.python.timeseries.state_space_models import state_space_model
from tensorflow.contrib.timeseries.python.timeseries.state_space_models import varma
from tensorflow.python.estimator import estimator_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class MakeModelTest(test.TestCase):
def test_ar_smaller(self):
model = varma.VARMA(
autoregressive_order=0,
moving_average_order=3)
model.initialize_graph()
outputs = model.define_loss(
features={
TrainEvalFeatures.TIMES: constant_op.constant([[1, 2]]),
TrainEvalFeatures.VALUES: constant_op.constant([[[1.], [2.]]])
},
mode=estimator_lib.ModeKeys.TRAIN)
initializer = variables.global_variables_initializer()
with self.cached_session() as sess:
sess.run([initializer])
outputs.loss.eval()
def test_ma_smaller(self):
model = varma.VARMA(
autoregressive_order=6,
moving_average_order=3,
configuration=state_space_model.StateSpaceModelConfiguration(
num_features=7))
model.initialize_graph()
outputs = model.define_loss(
features={
TrainEvalFeatures.TIMES: constant_op.constant([[1, 2]]),
TrainEvalFeatures.VALUES: constant_op.constant(
[[[1.] * 7, [2.] * 7]])
},
mode=estimator_lib.ModeKeys.TRAIN)
initializer = variables.global_variables_initializer()
with self.cached_session() as sess:
sess.run([initializer])
outputs.loss.eval()
def test_make_ensemble_no_errors(self):
with variable_scope.variable_scope("model_one"):
model_one = varma.VARMA(10, 5)
with variable_scope.variable_scope("model_two"):
model_two = varma.VARMA(0, 3)
configuration = state_space_model.StateSpaceModelConfiguration()
ensemble = state_space_model.StateSpaceIndependentEnsemble(
ensemble_members=[model_one, model_two],
configuration=configuration)
ensemble.initialize_graph()
outputs = ensemble.define_loss(
features={
TrainEvalFeatures.TIMES: constant_op.constant([[1, 2]]),
TrainEvalFeatures.VALUES: constant_op.constant([[[1.], [2.]]])},
mode=estimator_lib.ModeKeys.TRAIN)
initializer = variables.global_variables_initializer()
with self.cached_session() as sess:
sess.run([initializer])
outputs.loss.eval()
if __name__ == "__main__":
test.main()
|
korealerts1/sentry
|
refs/heads/master
|
tests/sentry/metrics/test_statsd.py
|
27
|
from __future__ import absolute_import
from mock import patch
from sentry.metrics.statsd import StatsdMetricsBackend
from sentry.testutils import TestCase
class StatsdMetricsBackendTest(TestCase):
def setUp(self):
self.backend = StatsdMetricsBackend(prefix='sentrytest.')
@patch('statsd.StatsClient.incr')
def test_incr(self, mock_incr):
self.backend.incr('foo')
mock_incr.assert_called_once_with('sentrytest.foo', 1, 1)
@patch('statsd.StatsClient.timing')
def test_timing(self, mock_timing):
self.backend.timing('foo', 30)
mock_timing.assert_called_once_with('sentrytest.foo', 30, 1)
|
PeterWangPo/Django-facebook
|
refs/heads/master
|
docs/docs_env/Lib/encodings/iso8859_5.py
|
593
|
""" Python Character Mapping Codec iso8859_5 generated from 'MAPPINGS/ISO8859/8859-5.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='iso8859-5',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\x80' # 0x80 -> <control>
u'\x81' # 0x81 -> <control>
u'\x82' # 0x82 -> <control>
u'\x83' # 0x83 -> <control>
u'\x84' # 0x84 -> <control>
u'\x85' # 0x85 -> <control>
u'\x86' # 0x86 -> <control>
u'\x87' # 0x87 -> <control>
u'\x88' # 0x88 -> <control>
u'\x89' # 0x89 -> <control>
u'\x8a' # 0x8A -> <control>
u'\x8b' # 0x8B -> <control>
u'\x8c' # 0x8C -> <control>
u'\x8d' # 0x8D -> <control>
u'\x8e' # 0x8E -> <control>
u'\x8f' # 0x8F -> <control>
u'\x90' # 0x90 -> <control>
u'\x91' # 0x91 -> <control>
u'\x92' # 0x92 -> <control>
u'\x93' # 0x93 -> <control>
u'\x94' # 0x94 -> <control>
u'\x95' # 0x95 -> <control>
u'\x96' # 0x96 -> <control>
u'\x97' # 0x97 -> <control>
u'\x98' # 0x98 -> <control>
u'\x99' # 0x99 -> <control>
u'\x9a' # 0x9A -> <control>
u'\x9b' # 0x9B -> <control>
u'\x9c' # 0x9C -> <control>
u'\x9d' # 0x9D -> <control>
u'\x9e' # 0x9E -> <control>
u'\x9f' # 0x9F -> <control>
u'\xa0' # 0xA0 -> NO-BREAK SPACE
u'\u0401' # 0xA1 -> CYRILLIC CAPITAL LETTER IO
u'\u0402' # 0xA2 -> CYRILLIC CAPITAL LETTER DJE
u'\u0403' # 0xA3 -> CYRILLIC CAPITAL LETTER GJE
u'\u0404' # 0xA4 -> CYRILLIC CAPITAL LETTER UKRAINIAN IE
u'\u0405' # 0xA5 -> CYRILLIC CAPITAL LETTER DZE
u'\u0406' # 0xA6 -> CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
u'\u0407' # 0xA7 -> CYRILLIC CAPITAL LETTER YI
u'\u0408' # 0xA8 -> CYRILLIC CAPITAL LETTER JE
u'\u0409' # 0xA9 -> CYRILLIC CAPITAL LETTER LJE
u'\u040a' # 0xAA -> CYRILLIC CAPITAL LETTER NJE
u'\u040b' # 0xAB -> CYRILLIC CAPITAL LETTER TSHE
u'\u040c' # 0xAC -> CYRILLIC CAPITAL LETTER KJE
u'\xad' # 0xAD -> SOFT HYPHEN
u'\u040e' # 0xAE -> CYRILLIC CAPITAL LETTER SHORT U
u'\u040f' # 0xAF -> CYRILLIC CAPITAL LETTER DZHE
u'\u0410' # 0xB0 -> CYRILLIC CAPITAL LETTER A
u'\u0411' # 0xB1 -> CYRILLIC CAPITAL LETTER BE
u'\u0412' # 0xB2 -> CYRILLIC CAPITAL LETTER VE
u'\u0413' # 0xB3 -> CYRILLIC CAPITAL LETTER GHE
u'\u0414' # 0xB4 -> CYRILLIC CAPITAL LETTER DE
u'\u0415' # 0xB5 -> CYRILLIC CAPITAL LETTER IE
u'\u0416' # 0xB6 -> CYRILLIC CAPITAL LETTER ZHE
u'\u0417' # 0xB7 -> CYRILLIC CAPITAL LETTER ZE
u'\u0418' # 0xB8 -> CYRILLIC CAPITAL LETTER I
u'\u0419' # 0xB9 -> CYRILLIC CAPITAL LETTER SHORT I
u'\u041a' # 0xBA -> CYRILLIC CAPITAL LETTER KA
u'\u041b' # 0xBB -> CYRILLIC CAPITAL LETTER EL
u'\u041c' # 0xBC -> CYRILLIC CAPITAL LETTER EM
u'\u041d' # 0xBD -> CYRILLIC CAPITAL LETTER EN
u'\u041e' # 0xBE -> CYRILLIC CAPITAL LETTER O
u'\u041f' # 0xBF -> CYRILLIC CAPITAL LETTER PE
u'\u0420' # 0xC0 -> CYRILLIC CAPITAL LETTER ER
u'\u0421' # 0xC1 -> CYRILLIC CAPITAL LETTER ES
u'\u0422' # 0xC2 -> CYRILLIC CAPITAL LETTER TE
u'\u0423' # 0xC3 -> CYRILLIC CAPITAL LETTER U
u'\u0424' # 0xC4 -> CYRILLIC CAPITAL LETTER EF
u'\u0425' # 0xC5 -> CYRILLIC CAPITAL LETTER HA
u'\u0426' # 0xC6 -> CYRILLIC CAPITAL LETTER TSE
u'\u0427' # 0xC7 -> CYRILLIC CAPITAL LETTER CHE
u'\u0428' # 0xC8 -> CYRILLIC CAPITAL LETTER SHA
u'\u0429' # 0xC9 -> CYRILLIC CAPITAL LETTER SHCHA
u'\u042a' # 0xCA -> CYRILLIC CAPITAL LETTER HARD SIGN
u'\u042b' # 0xCB -> CYRILLIC CAPITAL LETTER YERU
u'\u042c' # 0xCC -> CYRILLIC CAPITAL LETTER SOFT SIGN
u'\u042d' # 0xCD -> CYRILLIC CAPITAL LETTER E
u'\u042e' # 0xCE -> CYRILLIC CAPITAL LETTER YU
u'\u042f' # 0xCF -> CYRILLIC CAPITAL LETTER YA
u'\u0430' # 0xD0 -> CYRILLIC SMALL LETTER A
u'\u0431' # 0xD1 -> CYRILLIC SMALL LETTER BE
u'\u0432' # 0xD2 -> CYRILLIC SMALL LETTER VE
u'\u0433' # 0xD3 -> CYRILLIC SMALL LETTER GHE
u'\u0434' # 0xD4 -> CYRILLIC SMALL LETTER DE
u'\u0435' # 0xD5 -> CYRILLIC SMALL LETTER IE
u'\u0436' # 0xD6 -> CYRILLIC SMALL LETTER ZHE
u'\u0437' # 0xD7 -> CYRILLIC SMALL LETTER ZE
u'\u0438' # 0xD8 -> CYRILLIC SMALL LETTER I
u'\u0439' # 0xD9 -> CYRILLIC SMALL LETTER SHORT I
u'\u043a' # 0xDA -> CYRILLIC SMALL LETTER KA
u'\u043b' # 0xDB -> CYRILLIC SMALL LETTER EL
u'\u043c' # 0xDC -> CYRILLIC SMALL LETTER EM
u'\u043d' # 0xDD -> CYRILLIC SMALL LETTER EN
u'\u043e' # 0xDE -> CYRILLIC SMALL LETTER O
u'\u043f' # 0xDF -> CYRILLIC SMALL LETTER PE
u'\u0440' # 0xE0 -> CYRILLIC SMALL LETTER ER
u'\u0441' # 0xE1 -> CYRILLIC SMALL LETTER ES
u'\u0442' # 0xE2 -> CYRILLIC SMALL LETTER TE
u'\u0443' # 0xE3 -> CYRILLIC SMALL LETTER U
u'\u0444' # 0xE4 -> CYRILLIC SMALL LETTER EF
u'\u0445' # 0xE5 -> CYRILLIC SMALL LETTER HA
u'\u0446' # 0xE6 -> CYRILLIC SMALL LETTER TSE
u'\u0447' # 0xE7 -> CYRILLIC SMALL LETTER CHE
u'\u0448' # 0xE8 -> CYRILLIC SMALL LETTER SHA
u'\u0449' # 0xE9 -> CYRILLIC SMALL LETTER SHCHA
u'\u044a' # 0xEA -> CYRILLIC SMALL LETTER HARD SIGN
u'\u044b' # 0xEB -> CYRILLIC SMALL LETTER YERU
u'\u044c' # 0xEC -> CYRILLIC SMALL LETTER SOFT SIGN
u'\u044d' # 0xED -> CYRILLIC SMALL LETTER E
u'\u044e' # 0xEE -> CYRILLIC SMALL LETTER YU
u'\u044f' # 0xEF -> CYRILLIC SMALL LETTER YA
u'\u2116' # 0xF0 -> NUMERO SIGN
u'\u0451' # 0xF1 -> CYRILLIC SMALL LETTER IO
u'\u0452' # 0xF2 -> CYRILLIC SMALL LETTER DJE
u'\u0453' # 0xF3 -> CYRILLIC SMALL LETTER GJE
u'\u0454' # 0xF4 -> CYRILLIC SMALL LETTER UKRAINIAN IE
u'\u0455' # 0xF5 -> CYRILLIC SMALL LETTER DZE
u'\u0456' # 0xF6 -> CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I
u'\u0457' # 0xF7 -> CYRILLIC SMALL LETTER YI
u'\u0458' # 0xF8 -> CYRILLIC SMALL LETTER JE
u'\u0459' # 0xF9 -> CYRILLIC SMALL LETTER LJE
u'\u045a' # 0xFA -> CYRILLIC SMALL LETTER NJE
u'\u045b' # 0xFB -> CYRILLIC SMALL LETTER TSHE
u'\u045c' # 0xFC -> CYRILLIC SMALL LETTER KJE
u'\xa7' # 0xFD -> SECTION SIGN
u'\u045e' # 0xFE -> CYRILLIC SMALL LETTER SHORT U
u'\u045f' # 0xFF -> CYRILLIC SMALL LETTER DZHE
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
demon-ru/iml-crm
|
refs/heads/master
|
addons/stock/wizard/make_procurement_product.py
|
95
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv, orm
from openerp.tools.translate import _
class make_procurement(osv.osv_memory):
_name = 'make.procurement'
_description = 'Make Procurements'
def onchange_product_id(self, cr, uid, ids, prod_id):
""" On Change of Product ID getting the value of related UoM.
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param ids: List of IDs selected
@param prod_id: Changed ID of Product
@return: A dictionary which gives the UoM of the changed Product
"""
product = self.pool.get('product.product').browse(cr, uid, prod_id)
return {'value': {'uom_id': product.uom_id.id}}
_columns = {
'qty': fields.float('Quantity', digits=(16,2), required=True),
'product_id': fields.many2one('product.product', 'Product', required=True, readonly=1),
'uom_id': fields.many2one('product.uom', 'Unit of Measure', required=True),
'warehouse_id': fields.many2one('stock.warehouse', 'Warehouse', required=True),
'date_planned': fields.date('Planned Date', required=True),
}
_defaults = {
'date_planned': fields.date.context_today,
'qty': lambda *args: 1.0,
}
def make_procurement(self, cr, uid, ids, context=None):
""" Creates procurement order for selected product.
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param ids: List of IDs selected
@param context: A standard dictionary
@return: A dictionary which loads Procurement form view.
"""
user = self.pool.get('res.users').browse(cr, uid, uid, context=context).login
wh_obj = self.pool.get('stock.warehouse')
procurement_obj = self.pool.get('procurement.order')
data_obj = self.pool.get('ir.model.data')
for proc in self.browse(cr, uid, ids, context=context):
wh = wh_obj.browse(cr, uid, proc.warehouse_id.id, context=context)
procure_id = procurement_obj.create(cr, uid, {
'name':'INT: '+str(user),
'date_planned': proc.date_planned,
'product_id': proc.product_id.id,
'product_qty': proc.qty,
'product_uom': proc.uom_id.id,
'location_id': wh.lot_stock_id.id,
'company_id': wh.company_id.id,
})
procurement_obj.signal_workflow(cr, uid, [procure_id], 'button_confirm')
id2 = data_obj._get_id(cr, uid, 'procurement', 'procurement_tree_view')
id3 = data_obj._get_id(cr, uid, 'procurement', 'procurement_form_view')
if id2:
id2 = data_obj.browse(cr, uid, id2, context=context).res_id
if id3:
id3 = data_obj.browse(cr, uid, id3, context=context).res_id
return {
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'procurement.order',
'res_id' : procure_id,
'views': [(id3,'form'),(id2,'tree')],
'type': 'ir.actions.act_window',
}
def default_get(self, cr, uid, fields, context=None):
""" To get default values for the object.
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param fields: List of fields for which we want default values
@param context: A standard dictionary
@return: A dictionary which of fields with values.
"""
if context is None:
context = {}
record_id = context.get('active_id')
if context.get('active_model') == 'product.template':
product_ids = self.pool.get('product.product').search(cr, uid, [('product_tmpl_id', '=', context.get('active_id'))], context=context)
if len(product_ids) == 1:
record_id = product_ids[0]
else:
raise orm.except_orm(_('Warning'), _('Please use the Product Variant vue to request a procurement.'))
res = super(make_procurement, self).default_get(cr, uid, fields, context=context)
if record_id and 'product_id' in fields:
proxy = self.pool.get('product.product')
product_ids = proxy.search(cr, uid, [('id', '=', record_id)], context=context, limit=1)
if product_ids:
product_id = product_ids[0]
product = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
res['product_id'] = product.id
res['uom_id'] = product.uom_id.id
if 'warehouse_id' in fields:
warehouse_id = self.pool.get('stock.warehouse').search(cr, uid, [], context=context)
res['warehouse_id'] = warehouse_id[0] if warehouse_id else False
return res
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
marcoserafini/h-store
|
refs/heads/master
|
tools/traces/hstoretraces/__init__.py
|
9
|
# -*- coding: utf-8 -*-
from common import *
from storedprocedure import StoredProcedure
from traceelements import TransactionTrace, QueryTrace
|
bank-netforce/netforce
|
refs/heads/stable-3.1
|
netforce_purchase/netforce_purchase/models/purchase_order.py
|
2
|
# Copyright (c) 2012-2015 Netforce Co. Ltd.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
from netforce.model import Model, fields, get_model
from netforce.utils import get_data_path
import time
from netforce.access import get_active_company, get_active_user, set_active_user
from . import utils
from decimal import Decimal
class PurchaseOrder(Model):
_name = "purchase.order"
_string = "Purchase Order"
_audit_log = True
_name_field = "number"
_multi_company = True
_key = ["company_id", "number"]
_fields = {
"number": fields.Char("Number", required=True, search=True),
"ref": fields.Char("Ref", search=True),
"contact_id": fields.Many2One("contact", "Supplier", required=True, search=True),
"customer_id": fields.Many2One("contact", "Customer", search=True),
"date": fields.Date("Date", required=True, search=True),
"date_required": fields.Date("Required Date"),
"state": fields.Selection([("draft", "Draft"), ("confirmed", "Confirmed"), ("done", "Completed"), ("voided", "Voided")], "Status", required=True),
"lines": fields.One2Many("purchase.order.line", "order_id", "Lines"),
"amount_subtotal": fields.Decimal("Subtotal", function="get_amount", function_multi=True, store=True),
"amount_tax": fields.Decimal("Tax Amount", function="get_amount", function_multi=True, store=True),
"amount_total": fields.Decimal("Total", function="get_amount", function_multi=True, store=True),
"amount_total_cur": fields.Decimal("Total", function="get_amount", function_multi=True, store=True),
"amount_total_words": fields.Char("Total Words", function="get_amount_total_words"),
"qty_total": fields.Decimal("Total Quantity", function="get_qty_total"),
"currency_id": fields.Many2One("currency", "Currency", required=True),
"tax_type": fields.Selection([["tax_ex", "Tax Exclusive"], ["tax_in", "Tax Inclusive"], ["no_tax", "No Tax"]], "Tax Type", required=True),
"invoice_lines": fields.One2Many("account.invoice.line", "purch_id", "Invoice Lines"),
#"stock_moves": fields.One2Many("stock.move","purch_id","Stock Moves"),
"invoices": fields.One2Many("account.invoice", "related_id", "Invoices"),
"pickings": fields.Many2Many("stock.picking", "Stock Pickings", function="get_pickings"),
"is_delivered": fields.Boolean("Delivered", function="get_delivered"),
"is_paid": fields.Boolean("Paid", function="get_paid"),
"comments": fields.One2Many("message", "related_id", "Comments"),
"location_id": fields.Many2One("stock.location", "Warehouse"), # XXX: deprecated
"delivery_date": fields.Date("Delivery Date"),
"ship_method_id": fields.Many2One("ship.method", "Shipping Method"), # XXX: deprecated
"payment_terms": fields.Text("Payment Terms"),
"ship_term_id": fields.Many2One("ship.term", "Shipping Terms"),
"price_list_id": fields.Many2One("price.list", "Price List", condition=[["type", "=", "purchase"]]),
"documents": fields.One2Many("document", "related_id", "Documents"),
"company_id": fields.Many2One("company", "Company"),
"purchase_type_id": fields.Many2One("purchase.type", "Purchase Type"),
"other_info": fields.Text("Other Info"),
"bill_address_id": fields.Many2One("address", "Billing Address"),
"ship_address_id": fields.Many2One("address", "Shipping Address"),
"sequence_id": fields.Many2One("sequence", "Number Sequence"),
"stock_moves": fields.One2Many("stock.move", "related_id", "Stock Movements"),
"agg_amount_total": fields.Decimal("Total Amount", agg_function=["sum", "amount_total"]),
"year": fields.Char("Year", sql_function=["year", "date"]),
"quarter": fields.Char("Quarter", sql_function=["quarter", "date"]),
"month": fields.Char("Month", sql_function=["month", "date"]),
"week": fields.Char("Week", sql_function=["week", "date"]),
"agg_amount_subtotal": fields.Decimal("Total Amount w/o Tax", agg_function=["sum", "amount_subtotal"]),
"user_id": fields.Many2One("base.user", "Owner", search=True),
"emails": fields.One2Many("email.message", "related_id", "Emails"),
}
_order = "date desc,number desc"
_sql_constraints = [
("key_uniq", "unique (company_id, number)", "The number of each company must be unique!")
]
def _get_number(self, context={}):
seq_id = get_model("sequence").find_sequence(type="purchase_order")
if not seq_id:
return None
while 1:
num = get_model("sequence").get_next_number(seq_id,context=context)
user_id = get_active_user()
set_active_user(1)
res = self.search([["number", "=", num]])
set_active_user(user_id)
if not res:
return num
get_model("sequence").increment_number(seq_id,context=context)
def _get_currency(self, context={}):
settings = get_model("settings").browse(1)
return settings.currency_id.id
_defaults = {
"state": "draft",
"date": lambda *a: time.strftime("%Y-%m-%d"),
"number": _get_number,
"currency_id": _get_currency,
"tax_type": "tax_ex",
"company_id": lambda *a: get_active_company(),
"user_id": lambda *a: get_active_user(),
}
def create(self, vals, **kw):
id = super(PurchaseOrder, self).create(vals, **kw)
self.function_store([id])
return id
def write(self, ids, vals, **kw):
super(PurchaseOrder, self).write(ids, vals, **kw)
self.function_store(ids)
def confirm(self, ids, context={}):
settings = get_model("settings").browse(1)
for obj in self.browse(ids):
if obj.state != "draft":
raise Exception("Invalid state")
for line in obj.lines:
prod = line.product_id
if prod and prod.type in ("stock", "consumable", "bundle") and not line.location_id:
raise Exception("Missing location for product %s" % prod.code)
obj.write({"state": "confirmed"})
if settings.purchase_copy_picking:
res=obj.copy_to_picking()
picking_id=res["picking_id"]
get_model("stock.picking").pending([picking_id])
if settings.purchase_copy_invoice:
obj.copy_to_invoice()
obj.trigger("confirm")
def done(self, ids, context={}):
for obj in self.browse(ids):
if obj.state != "confirmed":
raise Exception("Invalid state")
obj.write({"state": "done"})
def reopen(self, ids, context={}):
for obj in self.browse(ids):
if obj.state != "done":
raise Exception("Invalid state")
obj.write({"state": "confirmed"})
def to_draft(self, ids, context={}):
for obj in self.browse(ids):
obj.write({"state": "draft"})
def get_amount(self, ids, context={}):
settings = get_model("settings").browse(1)
res = {}
for obj in self.browse(ids):
vals = {}
subtotal = 0
tax = 0
for line in obj.lines:
if line.tax_id:
line_tax = get_model("account.tax.rate").compute_tax(
line.tax_id.id, line.amount, tax_type=obj.tax_type)
else:
line_tax = 0
tax += line_tax
if obj.tax_type == "tax_in":
subtotal += line.amount - line_tax
else:
subtotal += line.amount
vals["amount_subtotal"] = subtotal
vals["amount_tax"] = tax
vals["amount_total"] = subtotal + tax
vals["amount_total_cur"] = get_model("currency").convert(
vals["amount_total"], obj.currency_id.id, settings.currency_id.id)
res[obj.id] = vals
return res
def get_qty_total(self, ids, context={}):
res = {}
for obj in self.browse(ids):
qty = sum([line.qty for line in obj.lines])
res[obj.id] = qty or 0
return res
def update_amounts(self, context):
settings=get_model("settings").browse(1)
data = context["data"]
data["amount_subtotal"] = 0
data["amount_tax"] = 0
tax_type = data["tax_type"]
for line in data["lines"]:
if not line:
continue
amt = Decimal(((line.get("qty") or 0) * (line.get("unit_price") or 0)) - (line.get("discount_amount") or 0))
if line.get("discount_percent"):
disc = amt * line["discount_percent"] / Decimal(100)
amt -= disc
line["amount"] = amt
new_cur=get_model("currency").convert(amt, int(data.get("currency_id")), settings.currency_id.id)
line['amount_cur']=new_cur and new_cur or None
tax_id = line.get("tax_id")
if tax_id:
tax = get_model("account.tax.rate").compute_tax(tax_id, amt, tax_type=tax_type)
data["amount_tax"] += tax
else:
tax = 0
if tax_type == "tax_in":
data["amount_subtotal"] += amt - tax
else:
data["amount_subtotal"] += amt
data["amount_total"] = data["amount_subtotal"] + data["amount_tax"]
return data
def onchange_product(self, context):
data = context["data"]
path = context["path"]
line = get_data_path(data, path, parent=True)
prod_id = line.get("product_id")
if not prod_id:
return {}
prod = get_model("product").browse(prod_id)
line["description"] = prod.description
line["qty"] = 1
if prod.uom_id is not None:
line["uom_id"] = prod.uom_id.id
pricelist_id = data["price_list_id"]
price = None
if pricelist_id:
price = get_model("price.list").get_price(pricelist_id, prod.id, 1)
price_list = get_model("price.list").browse(pricelist_id)
price_currency_id = price_list.currency_id.id
if price is None:
price = prod.purchase_price
settings = get_model("settings").browse(1)
price_currency_id = settings.currency_id.id
if price is not None:
currency_id = data["currency_id"]
price_cur = get_model("currency").convert(price, price_currency_id, currency_id)
line["unit_price"] = price_cur
if prod.purchase_tax_id is not None:
line["tax_id"] = prod.purchase_tax_id.id
if prod.location_id:
line["location_id"] = prod.location_id.id
elif prod.locations:
line["location_id"] = prod.locations[0].location_id.id
#TODO
data = self.update_amounts(context)
return data
def onchange_qty(self, context):
data = context["data"]
path = context["path"]
line = get_data_path(data, path, parent=True)
prod_id = line.get("product_id")
if not prod_id:
return {}
prod = get_model("product").browse(prod_id)
pricelist_id = data["price_list_id"]
qty = line["qty"]
price = None
if pricelist_id:
price = get_model("price.list").get_price(pricelist_id, prod.id, qty)
price_list = get_model("price.list").browse(pricelist_id)
price_currency_id = price_list.currency_id.id
if price is None:
price = prod.purchase_price
settings = get_model("settings").browse(1)
price_currency_id = settings.currency_id.id
if price is not None:
currency_id = data["currency_id"]
price_cur = get_model("currency").convert(price, price_currency_id, currency_id)
line["unit_price"] = price_cur
data = self.update_amounts(context)
return data
def copy_to_picking(self, ids, context={}):
settings=get_model("settings").browse(1)
obj = self.browse(ids[0])
contact = obj.contact_id
pick_vals = {
"type": "in",
"ref": obj.number,
"related_id": "purchase.order,%s" % obj.id,
"contact_id": contact.id,
"currency_id": obj.currency_id.id,
"ship_method_id": obj.ship_method_id.id,
"lines": [],
}
if obj.delivery_date:
pick_vals["date"]=obj.delivery_date
if contact and contact.pick_in_journal_id:
pick_vals["journal_id"] = contact.pick_in_journal_id.id
res = get_model("stock.location").search([["type", "=", "supplier"]],order="id")
if not res:
raise Exception("Supplier location not found")
supp_loc_id = res[0]
res = get_model("stock.location").search([["type", "=", "internal"]])
if not res:
raise Exception("Warehouse not found")
wh_loc_id = res[0]
if not settings.currency_id:
raise Exception("Missing currency in financial settings")
for line in obj.lines:
prod = line.product_id
if prod.type not in ("stock", "consumable"):
continue
remain_qty = (line.qty_stock or line.qty) - line.qty_received
if remain_qty <= 0:
continue
unit_price=line.amount/line.qty if line.qty else 0
if obj.tax_type=="tax_in":
if line.tax_id:
tax_amt = get_model("account.tax.rate").compute_tax(
line.tax_id.id, unit_price, tax_type=obj.tax_type)
else:
tax_amt = 0
cost_price_cur=round(unit_price-tax_amt,2)
else:
cost_price_cur=unit_price
if line.qty_stock:
purch_uom=prod.uom_id
if not prod.purchase_to_stock_uom_factor:
raise Exception("Missing purchase order to stock UoM factor for product %s"%prod.code)
cost_price_cur/=prod.purchase_to_stock_uom_factor
else:
purch_uom=line.uom_id
cost_price=get_model("currency").convert(cost_price_cur,obj.currency_id.id,settings.currency_id.id,date=pick_vals.get("date"))
cost_amount=cost_price*remain_qty
line_vals = {
"product_id": prod.id,
"qty": remain_qty,
"uom_id": purch_uom.id,
"cost_price_cur": cost_price_cur,
"cost_price": cost_price,
"cost_amount": cost_amount,
"location_from_id": supp_loc_id,
"location_to_id": line.location_id.id or wh_loc_id,
"related_id": "purchase.order,%s" % obj.id,
}
pick_vals["lines"].append(("create", line_vals))
if not pick_vals["lines"]:
raise Exception("Nothing left to receive")
pick_id = get_model("stock.picking").create(pick_vals, {"pick_type": "in"})
pick = get_model("stock.picking").browse(pick_id)
pick.set_currency_rate()
return {
"next": {
"name": "pick_in",
"mode": "form",
"active_id": pick_id,
},
"flash": "Goods receipt %s created from purchase order %s" % (pick.number, obj.number),
"picking_id": pick_id,
}
def copy_to_invoice(self, ids, context={}):
id = ids[0]
obj = self.browse(id)
contact = obj.contact_id
inv_vals = {
"type": "in",
"inv_type": "invoice",
"ref": obj.number,
"related_id": "purchase.order,%s" % obj.id,
"contact_id": obj.contact_id.id,
"currency_id": obj.currency_id.id,
"lines": [],
"tax_type": obj.tax_type,
}
if contact.purchase_journal_id:
inv_vals["journal_id"] = contact.purchase_journal_id.id
if contact.purchase_journal_id.sequence_id:
inv_vals["sequence_id"] = contact.purchase_journal_id.sequence_id.id
for line in obj.lines:
prod = line.product_id
remain_qty = line.qty - line.qty_invoiced
if remain_qty <= 0:
continue
# get account for purchase invoice
purch_acc_id=None
if prod:
# 1. get from product
purch_acc_id=prod.purchase_account_id and prod.purchase_account_id.id or None
# 2. if not get from master / parent product
if not purch_acc_id and prod.parent_id:
purch_acc_id=prod.parent_id.purchase_account_id.id
# 3. if not get from product category
categ=prod.categ_id
if categ and not purch_acc_id:
purch_acc_id= categ.purchase_account_id and categ.purchase_account_id.id or None
#if not purch_acc_id:
#raise Exception("Missing purchase account configure for product [%s]" % prod.name)
line_vals = {
"product_id": prod.id,
"description": line.description,
"qty": remain_qty,
"uom_id": line.uom_id.id,
"unit_price": line.unit_price,
"account_id": purch_acc_id,
"tax_id": line.tax_id.id,
"amount": line.amount,
}
inv_vals["lines"].append(("create", line_vals))
if not inv_vals["lines"]:
raise Exception("Nothing left to invoice")
inv_id = get_model("account.invoice").create(inv_vals, {"type": "in", "inv_type": "invoice"})
inv = get_model("account.invoice").browse(inv_id)
return {
"next": {
"name": "view_invoice",
"active_id": inv_id,
},
"flash": "Invoice %s created from purchase order %s" % (inv.number, obj.number),
}
def get_delivered(self, ids, context={}):
vals = {}
#import pdb; pdb.set_trace()
for obj in self.browse(ids):
is_delivered = True
for line in obj.lines:
prod = line.product_id
if prod.type not in ("stock", "consumable"):
continue
remain_qty = line.qty - line.qty_received
if remain_qty > 0:
is_delivered = False
break
vals[obj.id] = is_delivered
return vals
def get_paid(self, ids, context={}):
vals = {}
for obj in self.browse(ids):
amt_paid = 0
for inv in obj.invoices:
if inv.state != "paid":
continue
amt_paid += inv.amount_total
is_paid = amt_paid >= obj.amount_total
vals[obj.id] = is_paid
return vals
def void(self, ids, context={}):
obj = self.browse(ids)[0]
for pick in obj.pickings:
if pick.state != "voided":
raise Exception("There are still goods receipts for this purchase order")
for inv in obj.invoices:
if inv.state != "voided":
raise Exception("There are still invoices for this purchase order")
obj.write({"state": "voided"})
def copy(self, ids, context):
obj = self.browse(ids)[0]
vals = {
"contact_id": obj.contact_id.id,
"date": obj.date,
"ref": obj.ref,
"currency_id": obj.currency_id.id,
"tax_type": obj.tax_type,
"lines": [],
}
for line in obj.lines:
line_vals = {
"product_id": line.product_id.id,
"description": line.description,
"qty": line.qty,
"uom_id": line.uom_id.id,
"unit_price": line.unit_price,
"tax_id": line.tax_id.id,
}
vals["lines"].append(("create", line_vals))
new_id = self.create(vals)
new_obj = self.browse(new_id)
return {
"next": {
"name": "purchase",
"mode": "form",
"active_id": new_id,
},
"flash": "Purchase order %s copied to %s" % (obj.number, new_obj.number),
}
def get_invoices(self, ids, context={}):
vals = {}
for obj in self.browse(ids):
inv_ids = []
for inv_line in obj.invoice_lines:
inv_id = inv_line.invoice_id.id
if inv_id not in inv_ids:
inv_ids.append(inv_id)
vals[obj.id] = inv_ids
return vals
def get_pickings(self, ids, context={}):
vals = {}
for obj in self.browse(ids):
pick_ids = []
for move in obj.stock_moves:
pick_id = move.picking_id.id
if pick_id not in pick_ids:
pick_ids.append(pick_id)
vals[obj.id] = pick_ids
return vals
def onchange_contact(self, context):
data = context["data"]
contact_id = data.get("contact_id")
if not contact_id:
return {}
contact = get_model("contact").browse(contact_id)
data["payment_terms"] = contact.payment_terms
data["price_list_id"] = contact.purchase_price_list_id.id
if contact.currency_id:
data["currency_id"] = contact.currency_id.id
else:
settings = get_model("settings").browse(1)
data["currency_id"] = settings.currency_id.id
return data
def check_received_qtys(self, ids, context={}):
obj = self.browse(ids)[0]
for line in obj.lines:
if line.qty_received > (line.qty_stock or line.qty):
raise Exception("Can not receive excess quantity for purchase order %s and product %s (order qty: %s, received qty: %s)" % (
obj.number, line.product_id.code, line.qty_stock or line.qty, line.qty_received))
def get_purchase_form_template(self, ids, context={}):
obj = self.browse(ids)[0]
if obj.state == "draft":
return "rfq_form"
else:
return "purchase_form"
def get_amount_total_words(self, ids, context={}):
vals = {}
for obj in self.browse(ids):
amount_total_words = utils.num2word(obj.amount_total)
vals[obj.id] = amount_total_words
return vals
def onchange_sequence(self, context={}):
data = context["data"]
context['date'] = data['date']
seq_id = data["sequence_id"]
if not seq_id:
return None
while 1:
num = get_model("sequence").get_next_number(seq_id, context=context)
res = self.search([["number", "=", num]])
if not res:
break
get_model("sequence").increment_number(seq_id, context=context)
data["number"] = num
return data
def delete(self, ids, **kw):
for obj in self.browse(ids):
if obj.state in ("confirmed", "done"):
raise Exception("Can not delete purchase order in this status")
super().delete(ids, **kw)
def onchange_currency(self,context={}):
settings=get_model("settings").browse(1)
data = context["data"]
for line in data["lines"]:
if not line:
continue
amt = (line.get("qty") or 0) * (line.get("unit_price") or 0)
new_cur=get_model("currency").convert(amt, int(data.get("currency_id")), settings.currency_id.id)
line['amount_cur']=new_cur and new_cur or None
return data
def view_purchase(self, ids, context={}):
obj=get_model("purchase.order.line").browse(ids)[0]
return {
'next': {
'name': 'purchase',
'active_id': obj.order_id.id,
'mode': 'form',
},
}
def copy_to_purchase_return(self,ids,context={}):
seq_id = get_model("sequence").find_sequence(type="purchase_return")
if not seq_id:
raise Exception("Missing Sequence purchase return")
for obj in self.browse(ids):
order_vals = {}
order_vals = {
"contact_id":obj.contact_id.id,
"date":obj.date,
"ref":obj.number,
"currency_id":obj.currency_id.id,
"tax_type":obj.tax_type,
"bill_address_id":obj.bill_address_id.id,
"ship_address_id":obj.ship_address_id.id,
"price_list_id": obj.price_list_id.id,
"lines":[],
}
for line in obj.lines:
line_vals = {
"product_id":line.product_id.id,
"description":line.description,
"qty":line.qty,
"uom_id":line.uom_id.id,
"unit_price":line.unit_price,
"tax_id":line.tax_id.id,
"amount":line.amount,
"location_id":line.location_id.id,
}
order_vals["lines"].append(("create", line_vals))
purchase_id = get_model("purchase.return").create(order_vals)
purchase = get_model("purchase.return").browse(purchase_id)
return {
"next": {
"name": "purchase_return",
"mode": "form",
"active_id": purchase_id,
},
"flash": "Purchase Return %s created from purchases order %s" % (purchase.number, obj.number),
}
PurchaseOrder.register()
|
mgagne/nova
|
refs/heads/master
|
nova/tests/functional/api/__init__.py
|
178
|
# Copyright (c) 2011 Justin Santa Barbara
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
:mod:`api` -- OpenStack API client, for testing rather than production
=================================
"""
|
repotvsupertuga/repo
|
refs/heads/master
|
plugin.video.pancas/resources/lib/libraries/f4mproxy/flvlib/scripts/retimestamp_flv.py
|
98
|
import os
import sys
import shutil
import logging
import tempfile
from optparse import OptionParser
from flvlib import __versionstr__
from flvlib.constants import TAG_TYPE_AUDIO, TAG_TYPE_VIDEO, TAG_TYPE_SCRIPT
from flvlib.constants import AAC_PACKET_TYPE_SEQUENCE_HEADER
from flvlib.constants import H264_PACKET_TYPE_SEQUENCE_HEADER
from flvlib.primitives import make_ui8, make_ui24, make_si32_extended
from flvlib.astypes import MalformedFLV
from flvlib.tags import FLV, EndOfFile, AudioTag, VideoTag, ScriptTag
from flvlib.helpers import force_remove
log = logging.getLogger('flvlib.retimestamp-flv')
class_to_tag = {
AudioTag: TAG_TYPE_AUDIO,
VideoTag: TAG_TYPE_VIDEO,
ScriptTag: TAG_TYPE_SCRIPT
}
def is_nonheader_media(tag):
if isinstance(tag, ScriptTag):
return False
if isinstance(tag, AudioTag):
return tag.aac_packet_type != AAC_PACKET_TYPE_SEQUENCE_HEADER
if isinstance(tag, VideoTag):
return tag.h264_packet_type != H264_PACKET_TYPE_SEQUENCE_HEADER
def output_offset_tag(fi, fo, tag, offset):
new_timestamp = tag.timestamp - offset
# do not offset non-media and media header
if not is_nonheader_media(tag):
new_timestamp = tag.timestamp
# write the FLV tag value
fo.write(make_ui8(class_to_tag[tag.__class__]))
# the tag size remains unchanged
fo.write(make_ui24(tag.size))
# wirte the new timestamp
fo.write(make_si32_extended(new_timestamp))
# seek inside the input file
# seek position: tag offset + tag (1) + size (3) + timestamp (4)
fi.seek(tag.offset + 8, os.SEEK_SET)
# copy the tag content to the output file
# content size: tag size + stream ID (3) + previous tag size (4)
fo.write(fi.read(tag.size + 7))
def retimestamp_tags_inplace(f, fu):
flv = FLV(f)
offset = None
for tag in flv.iter_tags():
if offset is None and is_nonheader_media(tag):
offset = tag.timestamp
log.debug("Determined the offset to be %d", offset)
# optimise for offset == 0, which in case of inplace updating is a noop
if offset is not None and offset != 0:
fu.seek(tag.offset + 4, os.SEEK_SET)
fu.write(make_si32_extended(tag.timestamp - offset))
def retimestamp_file_inplace(inpath):
try:
f = open(inpath, 'rb')
fu = open(inpath, 'rb+')
except IOError, (errno, strerror):
log.error("Failed to open `%s': %s", inpath, strerror)
return False
try:
retimestamp_tags_inplace(f, fu)
except IOError, (errno, strerror):
log.error("Failed to create the retimestamped file: %s", strerror)
return False
except MalformedFLV, e:
message = e[0] % e[1:]
log.error("The file `%s' is not a valid FLV file: %s", inpath, message)
return False
except EndOfFile:
log.error("Unexpected end of file on file `%s'", inpath)
return False
f.close()
fu.close()
return True
def retimestamp_file_atomically(inpath, outpath):
try:
f = open(inpath, 'rb')
except IOError, (errno, strerror):
log.error("Failed to open `%s': %s", inpath, strerror)
return False
if outpath:
try:
fo = open(outpath, 'w+b')
except IOError, (errno, strerror):
log.error("Failed to open `%s': %s", outpath, strerror)
return False
else:
try:
fd, temppath = tempfile.mkstemp()
# preserve the permission bits
shutil.copymode(inpath, temppath)
fo = os.fdopen(fd, 'wb')
except EnvironmentError, (errno, strerror):
log.error("Failed to create temporary file: %s", strerror)
return False
try:
shutil.copyfileobj(f, fo)
except EnvironmentError, (errno, strerror):
log.error("Failed to create temporary copy: %s", strerror)
force_remove(temppath)
return False
f.seek(0)
fo.seek(0)
try:
retimestamp_tags_inplace(f, fo)
except IOError, (errno, strerror):
log.error("Failed to create the retimestamped file: %s", strerror)
if not outpath:
force_remove(temppath)
return False
except MalformedFLV, e:
message = e[0] % e[1:]
log.error("The file `%s' is not a valid FLV file: %s", inpath, message)
if not outpath:
force_remove(temppath)
return False
except EndOfFile:
log.error("Unexpected end of file on file `%s'", inpath)
if not outpath:
force_remove(temppath)
return False
f.close()
fo.close()
if not outpath:
# If we were not writing directly to the output file
# we need to overwrite the original
try:
shutil.move(temppath, inpath)
except EnvironmentError, (errno, strerror):
log.error("Failed to overwrite the original file "
"with the indexed version: %s", strerror)
return False
return True
def retimestamp_file(inpath, outpath=None, inplace=False):
out_text = (outpath and ("into file `%s'" % outpath)) or "and overwriting"
log.debug("Retimestamping file `%s' %s", inpath, out_text)
if inplace:
log.debug("Operating in inplace mode")
return retimestamp_file_inplace(inpath)
else:
log.debug("Not operating in inplace mode, using temporary files")
return retimestamp_file_atomically(inpath, outpath)
def process_options():
usage = "%prog [-i] [-U] file [outfile|file2 file3 ...]"
description = (
"""Rewrites timestamps in FLV files making by the first media tag timestamped
with 0. The rest of the tags is retimestamped relatively. With the -i
(inplace) option modifies the files without creating temporary copies. With
the -U (update) option operates on all parameters, updating the files in
place. Without the -U option accepts one input and one output file path.
""")
version = "%%prog flvlib %s" % __versionstr__
parser = OptionParser(usage=usage, description=description,
version=version)
parser.add_option("-i", "--inplace", action="store_true",
help=("inplace mode, does not create temporary files, but "
"risks corruption in case of errors"))
parser.add_option("-U", "--update", action="store_true",
help=("update mode, overwrites the given files "
"instead of writing to outfile"))
parser.add_option("-v", "--verbose", action="count",
default=0, dest="verbosity",
help="be more verbose, each -v increases verbosity")
options, args = parser.parse_args(sys.argv)
if len(args) < 2:
parser.error("You have to provide at least one file path")
if not options.update and options.inplace:
parser.error("You need to use the update mode if you are updating "
"files in place")
if not options.update and len(args) != 3:
parser.error("You need to provide one infile and one outfile "
"when not using the update mode")
if options.verbosity > 3:
options.verbosity = 3
log.setLevel({0: logging.ERROR, 1: logging.WARNING,
2: logging.INFO, 3: logging.DEBUG}[options.verbosity])
return options, args
def retimestamp_files():
options, args = process_options()
clean_run = True
if not options.update:
clean_run = retimestamp_file(args[1], args[2])
else:
for filename in args[1:]:
if not retimestamp_file(filename, inplace=options.inplace):
clean_run = False
return clean_run
def main():
try:
outcome = retimestamp_files()
except KeyboardInterrupt:
# give the right exit status, 128 + signal number
# signal.SIGINT = 2
sys.exit(128 + 2)
except EnvironmentError, (errno, strerror):
try:
print >>sys.stderr, strerror
except StandardError:
pass
sys.exit(2)
if outcome:
sys.exit(0)
else:
sys.exit(1)
|
cntnboys/410Lab6
|
refs/heads/master
|
build/django/django/core/validators.py
|
49
|
from __future__ import unicode_literals
import re
from django.core.exceptions import ValidationError
from django.utils.deconstruct import deconstructible
from django.utils.translation import ugettext_lazy as _, ungettext_lazy
from django.utils.encoding import force_text
from django.utils.ipv6 import is_valid_ipv6_address
from django.utils import six
from django.utils.six.moves.urllib.parse import urlsplit, urlunsplit
# These values, if given to validate(), will trigger the self.required check.
EMPTY_VALUES = (None, '', [], (), {})
@deconstructible
class RegexValidator(object):
regex = ''
message = _('Enter a valid value.')
code = 'invalid'
inverse_match = False
flags = 0
def __init__(self, regex=None, message=None, code=None, inverse_match=None, flags=None):
if regex is not None:
self.regex = regex
if message is not None:
self.message = message
if code is not None:
self.code = code
if inverse_match is not None:
self.inverse_match = inverse_match
if flags is not None:
self.flags = flags
if self.flags and not isinstance(self.regex, six.string_types):
raise TypeError("If the flags are set, regex must be a regular expression string.")
# Compile the regex if it was not passed pre-compiled.
if isinstance(self.regex, six.string_types):
self.regex = re.compile(self.regex, self.flags)
def __call__(self, value):
"""
Validates that the input matches the regular expression
if inverse_match is False, otherwise raises ValidationError.
"""
if not (self.inverse_match is not bool(self.regex.search(
force_text(value)))):
raise ValidationError(self.message, code=self.code)
def __eq__(self, other):
return (
isinstance(other, RegexValidator) and
self.regex.pattern == other.regex.pattern and
self.regex.flags == other.regex.flags and
(self.message == other.message) and
(self.code == other.code) and
(self.inverse_match == other.inverse_match)
)
def __ne__(self, other):
return not (self == other)
@deconstructible
class URLValidator(RegexValidator):
regex = re.compile(
r'^(?:[a-z0-9\.\-]*)://' # scheme is validated separately
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}(?<!-)\.?)|' # domain...
r'localhost|' # localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}|' # ...or ipv4
r'\[?[A-F0-9]*:[A-F0-9:]+\]?)' # ...or ipv6
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
message = _('Enter a valid URL.')
schemes = ['http', 'https', 'ftp', 'ftps']
def __init__(self, schemes=None, **kwargs):
super(URLValidator, self).__init__(**kwargs)
if schemes is not None:
self.schemes = schemes
def __call__(self, value):
value = force_text(value)
# Check first if the scheme is valid
scheme = value.split('://')[0].lower()
if scheme not in self.schemes:
raise ValidationError(self.message, code=self.code)
# Then check full URL
try:
super(URLValidator, self).__call__(value)
except ValidationError as e:
# Trivial case failed. Try for possible IDN domain
if value:
scheme, netloc, path, query, fragment = urlsplit(value)
try:
netloc = netloc.encode('idna').decode('ascii') # IDN -> ACE
except UnicodeError: # invalid domain part
raise e
url = urlunsplit((scheme, netloc, path, query, fragment))
super(URLValidator, self).__call__(url)
else:
raise
else:
url = value
def validate_integer(value):
try:
int(value)
except (ValueError, TypeError):
raise ValidationError(_('Enter a valid integer.'), code='invalid')
@deconstructible
class EmailValidator(object):
message = _('Enter a valid email address.')
code = 'invalid'
user_regex = re.compile(
r"(^[-!#$%&'*+/=?^_`{}|~0-9A-Z]+(\.[-!#$%&'*+/=?^_`{}|~0-9A-Z]+)*$" # dot-atom
r'|^"([\001-\010\013\014\016-\037!#-\[\]-\177]|\\[\001-\011\013\014\016-\177])*"$)', # quoted-string
re.IGNORECASE)
domain_regex = re.compile(
r'(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}|[A-Z0-9-]{2,}(?<!-))$',
re.IGNORECASE)
literal_regex = re.compile(
# literal form, ipv4 or ipv6 address (SMTP 4.1.3)
r'\[([A-f0-9:\.]+)\]$',
re.IGNORECASE)
domain_whitelist = ['localhost']
def __init__(self, message=None, code=None, whitelist=None):
if message is not None:
self.message = message
if code is not None:
self.code = code
if whitelist is not None:
self.domain_whitelist = whitelist
def __call__(self, value):
value = force_text(value)
if not value or '@' not in value:
raise ValidationError(self.message, code=self.code)
user_part, domain_part = value.rsplit('@', 1)
if not self.user_regex.match(user_part):
raise ValidationError(self.message, code=self.code)
if (domain_part not in self.domain_whitelist and
not self.validate_domain_part(domain_part)):
# Try for possible IDN domain-part
try:
domain_part = domain_part.encode('idna').decode('ascii')
if self.validate_domain_part(domain_part):
return
except UnicodeError:
pass
raise ValidationError(self.message, code=self.code)
def validate_domain_part(self, domain_part):
if self.domain_regex.match(domain_part):
return True
literal_match = self.literal_regex.match(domain_part)
if literal_match:
ip_address = literal_match.group(1)
try:
validate_ipv46_address(ip_address)
return True
except ValidationError:
pass
return False
def __eq__(self, other):
return isinstance(other, EmailValidator) and (self.domain_whitelist == other.domain_whitelist) and (self.message == other.message) and (self.code == other.code)
validate_email = EmailValidator()
slug_re = re.compile(r'^[-a-zA-Z0-9_]+$')
validate_slug = RegexValidator(slug_re, _("Enter a valid 'slug' consisting of letters, numbers, underscores or hyphens."), 'invalid')
ipv4_re = re.compile(r'^(25[0-5]|2[0-4]\d|[0-1]?\d?\d)(\.(25[0-5]|2[0-4]\d|[0-1]?\d?\d)){3}$')
validate_ipv4_address = RegexValidator(ipv4_re, _('Enter a valid IPv4 address.'), 'invalid')
def validate_ipv6_address(value):
if not is_valid_ipv6_address(value):
raise ValidationError(_('Enter a valid IPv6 address.'), code='invalid')
def validate_ipv46_address(value):
try:
validate_ipv4_address(value)
except ValidationError:
try:
validate_ipv6_address(value)
except ValidationError:
raise ValidationError(_('Enter a valid IPv4 or IPv6 address.'), code='invalid')
ip_address_validator_map = {
'both': ([validate_ipv46_address], _('Enter a valid IPv4 or IPv6 address.')),
'ipv4': ([validate_ipv4_address], _('Enter a valid IPv4 address.')),
'ipv6': ([validate_ipv6_address], _('Enter a valid IPv6 address.')),
}
def ip_address_validators(protocol, unpack_ipv4):
"""
Depending on the given parameters returns the appropriate validators for
the GenericIPAddressField.
This code is here, because it is exactly the same for the model and the form field.
"""
if protocol != 'both' and unpack_ipv4:
raise ValueError(
"You can only use `unpack_ipv4` if `protocol` is set to 'both'")
try:
return ip_address_validator_map[protocol.lower()]
except KeyError:
raise ValueError("The protocol '%s' is unknown. Supported: %s"
% (protocol, list(ip_address_validator_map)))
comma_separated_int_list_re = re.compile('^[\d,]+$')
validate_comma_separated_integer_list = RegexValidator(comma_separated_int_list_re, _('Enter only digits separated by commas.'), 'invalid')
@deconstructible
class BaseValidator(object):
compare = lambda self, a, b: a is not b
clean = lambda self, x: x
message = _('Ensure this value is %(limit_value)s (it is %(show_value)s).')
code = 'limit_value'
def __init__(self, limit_value):
self.limit_value = limit_value
def __call__(self, value):
cleaned = self.clean(value)
params = {'limit_value': self.limit_value, 'show_value': cleaned}
if self.compare(cleaned, self.limit_value):
raise ValidationError(self.message, code=self.code, params=params)
def __eq__(self, other):
return isinstance(other, self.__class__) and (self.limit_value == other.limit_value) and (self.message == other.message) and (self.code == other.code)
@deconstructible
class MaxValueValidator(BaseValidator):
compare = lambda self, a, b: a > b
message = _('Ensure this value is less than or equal to %(limit_value)s.')
code = 'max_value'
@deconstructible
class MinValueValidator(BaseValidator):
compare = lambda self, a, b: a < b
message = _('Ensure this value is greater than or equal to %(limit_value)s.')
code = 'min_value'
@deconstructible
class MinLengthValidator(BaseValidator):
compare = lambda self, a, b: a < b
clean = lambda self, x: len(x)
message = ungettext_lazy(
'Ensure this value has at least %(limit_value)d character (it has %(show_value)d).',
'Ensure this value has at least %(limit_value)d characters (it has %(show_value)d).',
'limit_value')
code = 'min_length'
@deconstructible
class MaxLengthValidator(BaseValidator):
compare = lambda self, a, b: a > b
clean = lambda self, x: len(x)
message = ungettext_lazy(
'Ensure this value has at most %(limit_value)d character (it has %(show_value)d).',
'Ensure this value has at most %(limit_value)d characters (it has %(show_value)d).',
'limit_value')
code = 'max_length'
|
hongbin/magnum
|
refs/heads/master
|
magnum/common/pythonk8sclient/client/models/V1beta3_EndpointPort.py
|
15
|
#!/usr/bin/env python
"""
Copyright 2015 Reverb Technologies, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class V1beta3_EndpointPort(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually."""
def __init__(self):
"""
Attributes:
swaggerTypes (dict): The key is attribute name and the value is attribute type.
attributeMap (dict): The key is attribute name and the value is json key in definition.
"""
self.swaggerTypes = {
'name': 'str',
'port': 'int',
'protocol': 'str'
}
self.attributeMap = {
'name': 'name',
'port': 'port',
'protocol': 'protocol'
}
#name of this port
self.name = None # str
#port number of the endpoint
self.port = None # int
#protocol for this port; must be UDP or TCP; TCP if unspecified
self.protocol = None # str
|
cevaris/pants
|
refs/heads/master
|
tests/python/pants_test/backend/graph_info/tasks/test_paths.py
|
16
|
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants.backend.graph_info.tasks.paths import Path, Paths
from pants.base.exceptions import TaskError
from pants_test.tasks.task_test_base import ConsoleTaskTestBase
class PathsTest(ConsoleTaskTestBase):
@classmethod
def task_type(cls):
return Paths
def test_only_one_target(self):
target_a = self.make_target('a')
with self.assertRaises(TaskError) as cm:
self.execute_console_task(targets=[target_a])
self.assertIn('Specify two targets please', str(cm.exception))
self.assertIn('found 1', str(cm.exception))
def test_three_targets(self):
target_a = self.make_target('a')
target_b = self.make_target('b')
target_c = self.make_target('c')
with self.assertRaises(TaskError) as cm:
self.execute_console_task(targets=[target_a, target_b, target_c])
self.assertIn('Specify two targets please', str(cm.exception))
self.assertIn('found 3', str(cm.exception))
def test_path_dependency_first_finds_no_paths(self):
# Not sure if I like this behavior, but adding to document it
target_b = self.make_target('b')
target_a = self.make_target('a', dependencies=[target_b])
self.assert_console_output('Found 0 paths', targets=[target_b, target_a])
def test_single_edge_path(self):
target_b = self.make_target('b')
target_a = self.make_target('a', dependencies=[target_b])
self.assert_console_output('Found 1 path',
'',
'\t[a, b]',
targets=[target_a, target_b])
def test_same_target_path(self):
target_b = self.make_target('b')
self.assert_console_output('Found 1 path',
'',
'\t[b]',
targets=[target_b, target_b])
def test_two_paths(self):
target_b = self.make_target('b')
target_inner_1 = self.make_target('inner1', dependencies=[target_b])
target_inner_2 = self.make_target('inner2', dependencies=[target_b])
target_a = self.make_target('a', dependencies=[target_inner_1, target_inner_2])
self.assert_console_output('Found 2 paths',
'',
'\t[a, inner1, b]',
'\t[a, inner2, b]',
targets=[target_a, target_b])
def test_cycle_no_path(self):
target_b = self.make_target('b')
target_inner_1 = self.make_target('inner1')
target_inner_2 = self.make_target('inner2', dependencies=[target_inner_1])
target_a = self.make_target('a', dependencies=[target_inner_1])
target_inner_1.inject_dependency(target_inner_2.address)
self.assert_console_output('Found 0 paths',
targets=[target_a, target_b])
def test_cycle_path(self):
target_b = self.make_target('b')
target_inner_1 = self.make_target('inner1', dependencies=[target_b])
target_inner_2 = self.make_target('inner2', dependencies=[target_inner_1, target_b])
target_inner_1.inject_dependency(target_inner_2.address)
target_a = self.make_target('a', dependencies=[target_inner_1])
self.assert_console_output('Found 3 paths',
'',
'\t[a, inner1, b]',
'\t[a, inner1, inner2, b]',
'\t[a, inner1, inner2, inner1, b]',
targets=[target_a, target_b])
def test_overlapping_paths(self):
target_b = self.make_target('b')
target_inner_1 = self.make_target('inner1', dependencies=[target_b])
target_inner_2 = self.make_target('inner2', dependencies=[target_inner_1])
target_a = self.make_target('a', dependencies=[target_inner_1, target_inner_2])
self.assert_console_output('Found 2 paths',
'',
'\t[a, inner1, b]',
'\t[a, inner2, inner1, b]',
targets=[target_a, target_b])
class PathTest(ConsoleTaskTestBase):
@classmethod
def task_type(cls):
return Path
def test_only_returns_first_path(self):
target_b = self.make_target('b')
target_inner_1 = self.make_target('inner1', dependencies=[target_b])
target_inner_2 = self.make_target('inner2', dependencies=[target_inner_1])
target_a = self.make_target('a', dependencies=[target_inner_1, target_inner_2])
self.assert_console_output('[a, inner1, b]',
targets=[target_a, target_b])
def test_when_no_path(self):
target_b = self.make_target('b')
target_a = self.make_target('a')
self.assert_console_output('No path found from a to b!',
targets=[target_a, target_b])
|
dkodnik/Ant
|
refs/heads/master
|
openerp/tools/func.py
|
63
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2010 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
__all__ = ['synchronized']
from functools import wraps
from inspect import getsourcefile
def synchronized(lock_attr='_lock'):
def decorator(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
lock = getattr(self, lock_attr)
try:
lock.acquire()
return func(self, *args, **kwargs)
finally:
lock.release()
return wrapper
return decorator
def frame_codeinfo(fframe, back=0):
""" Return a (filename, line) pair for a previous frame .
@return (filename, lineno) where lineno is either int or string==''
"""
try:
if not fframe:
return "<unknown>", ''
for i in range(back):
fframe = fframe.f_back
try:
fname = getsourcefile(fframe)
except TypeError:
fname = '<builtin>'
lineno = fframe.f_lineno or ''
return fname, lineno
except Exception:
return "<unknown>", ''
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
vetalypp/e2openplugin-CrossEPG
|
refs/heads/master
|
src/enigma2/python/crossepg_setup.py
|
1
|
from enigma import getDesktop
from Screens.Screen import Screen
from Screens.MessageBox import MessageBox
from Components.config import KEY_LEFT, KEY_RIGHT, KEY_HOME, KEY_END, KEY_0, ConfigYesNo, ConfigSelection, ConfigClock, config, configfile
from Components.ConfigList import ConfigList
from Components.Button import Button
from Components.Label import Label
from Components.Harddisk import harddiskmanager
from Components.PluginComponent import plugins
from Components.ActionMap import NumberActionMap
from Tools.Directories import resolveFilename, SCOPE_PLUGINS
from Plugins.Plugin import PluginDescriptor
from crossepglib import *
from crossepg_locale import _
from crossepg_auto import crossepg_auto
from time import *
import os
class CrossEPG_Setup(Screen):
def __init__(self, session):
if (getDesktop(0).size().width() < 800):
skin = "%s/skins/setup_sd.xml" % (os.path.dirname(sys.modules[__name__].__file__))
else:
skin = "%s/skins/setup_hd.xml" % (os.path.dirname(sys.modules[__name__].__file__))
f = open(skin, "r")
self.skin = f.read()
f.close()
Screen.__init__(self, session)
patchtype = getEPGPatchType()
if patchtype == 0 or patchtype == 1 or patchtype == 3:
self.fastpatch = True
else:
self.fastpatch = False
self.session = session
self.config = CrossEPG_Config()
self.config.load()
self.lamedbs = self.config.getAllLamedbs()
self.lamedbs_desc = []
self.mountpoint = []
self.mountdescription = []
self.automatictype = []
self.show_extension = self.config.show_extension
self.show_plugin = self.config.show_plugin
self.show_force_reload_as_plugin = self.config.show_force_reload_as_plugin
# make devices entries
if self.config.isQBOXHD():
self.mountdescription.append(_("Internal flash"))
self.mountpoint.append("/var/crossepg/data")
for partition in harddiskmanager.getMountedPartitions():
if (partition.mountpoint != '/') and (partition.mountpoint != '') and self.isMountedInRW(partition.mountpoint):
self.mountpoint.append(partition.mountpoint + "/crossepg")
if partition.description != '':
self.mountdescription.append(partition.description)
else:
self.mountdescription.append(partition.mountpoint)
if not self.config.isQBOXHD(): # for other decoders we add internal flash as last entry (it's unsuggested)
self.mountdescription.append(_("Internal flash (unsuggested)"))
self.mountpoint.append(self.config.home_directory + "/data")
# make lamedb entries
for lamedb in self.lamedbs:
if lamedb == "lamedb":
self.lamedbs_desc.append(_("main lamedb"))
else:
self.lamedbs_desc.append(lamedb.replace("lamedb.", "").replace(".", " "))
# make automatic type entries
self.automatictype.append(_("disabled"))
self.automatictype.append(_("once a day"))
self.automatictype.append(_("every hour (only in standby)"))
self.list = []
self["config"] = ConfigList(self.list, session = self.session)
self["config"].onSelectionChanged.append(self.setInfo)
self["information"] = Label("")
self["key_red"] = Button(_("Back"))
self["key_green"] = Button()
self["key_yellow"] = Button()
self["key_blue"] = Button("")
self["config_actions"] = NumberActionMap(["SetupActions", "InputAsciiActions", "KeyboardInputActions", "ColorActions"],
{
"red": self.quit,
"cancel": self.quit,
"left": self.keyLeft,
"right": self.keyRight,
"home": self.keyHome,
"end": self.keyEnd,
"1": self.keyNumberGlobal,
"2": self.keyNumberGlobal,
"3": self.keyNumberGlobal,
"4": self.keyNumberGlobal,
"5": self.keyNumberGlobal,
"6": self.keyNumberGlobal,
"7": self.keyNumberGlobal,
"8": self.keyNumberGlobal,
"9": self.keyNumberGlobal,
"0": self.keyNumberGlobal
}, -1) # to prevent left/right overriding the listbox
self.makeList()
# Modded by IAmATeaf 13/04/2012
# def isMountedInRW(self, path):
# testfile = path + "/tmp-rw-test"
# os.system("touch " + testfile)
# if os.path.exists(testfile):
# os.system("rm -f " + testfile)
# return True
# return False
def isMountedInRW(self, path):
testfile = os.path.join(path, "tmp-rw-test")
try:
open(testfile, "wb").close()
os.unlink(testfile)
except:
return False
return True
def showWarning(self):
self.session.open(MessageBox, _("PLEASE READ!\nNo disk found. An hard drive or an usb pen is HARDLY SUGGESTED. If you still want use your internal flash pay attention to:\n(1) If you don't have enough free space your box may completely block and you need to flash it again\n(2) Many write operations on your internal flash may damage your flash memory"), type = MessageBox.TYPE_ERROR)
def keyLeft(self):
self["config"].handleKey(KEY_LEFT)
self.update()
#self.setInfo()
def keyRight(self):
self["config"].handleKey(KEY_RIGHT)
self.update()
#self.setInfo()
def keyHome(self):
self["config"].handleKey(KEY_HOME)
self.update()
#self.setInfo()
def keyEnd(self):
self["config"].handleKey(KEY_END)
self.update()
#self.setInfo()
def keyNumberGlobal(self, number):
self["config"].handleKey(KEY_0 + number)
self.update()
#self.setInfo()
def makeList(self):
self.list = []
device_default = None
i = 0
for mountpoint in self.mountpoint:
if mountpoint == self.config.db_root:
device_default = self.mountdescription[i]
i += 1
# default device is really important... if miss a default we force it on first entry and update now the main config
if device_default == None:
self.config.db_root = self.mountpoint[0]
device_default = self.mountdescription[0]
lamedb_default = _("main lamedb")
if self.config.lamedb != "lamedb":
lamedb_default = self.config.lamedb.replace("lamedb.", "").replace(".", " ")
scheduled_default = None
if self.config.download_standby_enabled:
scheduled_default = _("every hour (only in standby)")
elif self.config.download_daily_enabled:
scheduled_default = _("once a day")
else:
scheduled_default = _("disabled")
self.list.append((_("Storage device"), ConfigSelection(self.mountdescription, device_default)))
if len(self.lamedbs_desc) > 1:
self.list.append((_("Preferred lamedb"), ConfigSelection(self.lamedbs_desc, lamedb_default)))
self.list.append((_("Enable csv import"), ConfigYesNo(self.config.csv_import_enabled > 0)))
self.list.append((_("Force epg reload on boot"), ConfigYesNo(self.config.force_load_on_boot > 0)))
self.list.append((_("Download on tune"), ConfigYesNo(self.config.download_tune_enabled > 0)))
self.list.append((_("Scheduled download"), ConfigSelection(self.automatictype, scheduled_default)))
if self.config.download_daily_enabled:
ttime = localtime()
ltime = (ttime[0], ttime[1], ttime[2], self.config.download_daily_hours, self.config.download_daily_minutes, ttime[5], ttime[6], ttime[7], ttime[8])
self.list.append((_("Scheduled download at"), ConfigClock(mktime(ltime))))
if not self.fastpatch:
self.list.append((_("Reboot after a scheduled download"), ConfigYesNo(self.config.download_daily_reboot > 0)))
self.list.append((_("Reboot after a manual download"), ConfigYesNo(self.config.download_manual_reboot > 0)))
self.list.append((_("Show as plugin"), ConfigYesNo(self.config.show_plugin > 0)))
self.list.append((_("Show as extension"), ConfigYesNo(self.config.show_extension > 0)))
self.list.append((_("Show 'Force reload' as plugin"), ConfigYesNo(self.config.show_force_reload_as_plugin > 0)))
self["config"].setList(self.list)
self.setInfo()
def update(self):
redraw = False
self.config.db_root = self.mountpoint[self.list[0][1].getIndex()]
i = 1
if len(self.lamedbs_desc) > 1:
self.config.lamedb = self.lamedbs[self.list[i][1].getIndex()]
i += 1
self.config.csv_import_enabled = int(self.list[i][1].getValue())
self.config.force_load_on_boot = int(self.list[i+1][1].getValue())
self.config.download_tune_enabled = int(self.list[i+2][1].getValue())
dailycache = self.config.download_daily_enabled
standbycache = self.config.download_standby_enabled
if self.list[i+3][1].getIndex() == 0:
self.config.download_daily_enabled = 0
self.config.download_standby_enabled = 0
elif self.list[i+3][1].getIndex() == 1:
self.config.download_daily_enabled = 1
self.config.download_standby_enabled = 0
else:
self.config.download_daily_enabled = 0
self.config.download_standby_enabled = 1
if dailycache != self.config.download_daily_enabled or standbycache != self.config.download_standby_enabled:
redraw = True
i += 4
if dailycache:
self.config.download_daily_hours = self.list[i][1].getValue()[0]
self.config.download_daily_minutes = self.list[i][1].getValue()[1]
i += 1
if not self.fastpatch:
self.config.download_daily_reboot = int(self.list[i][1].getValue())
self.config.download_manual_reboot = int(self.list[i+1][1].getValue())
i += 2
self.config.show_plugin = int(self.list[i][1].getValue())
self.config.show_extension = int(self.list[i+1][1].getValue())
self.config.show_force_reload_as_plugin = int(self.list[i+2][1].getValue())
if redraw:
self.makeList()
def setInfo(self):
index = self["config"].getCurrentIndex()
if len(self.lamedbs_desc) <= 1 and index > 0:
index += 1
if self.config.download_daily_enabled == 0 and index > 5:
index += 1
if self.fastpatch and index > 6:
index += 2
if index == 0:
self["information"].setText(_("Drive where you save data.\nThe drive MUST be mounted in rw. If you can't see your device here probably is mounted as read only or autofs handle it only in read only mode. In case of mount it manually and try again"))
elif index == 1:
self["information"].setText(_("Lamedb used for epg.dat conversion.\nThis option doesn't work with crossepg patch v2"))
elif index == 2:
self["information"].setText(_("Import *.csv and *.bin from %s/import or %s/import\n(*.bin are binaries with a csv as stdout)") % (self.config.db_root, self.config.home_directory))
elif index == 3:
self["information"].setText(_("Reload epg at every boot.\nNormally it's not necessary but recover epg after an enigma2 crash"))
elif index == 4:
self["information"].setText(_("Only for opentv providers.\nIf you zap on channel used from a provider it download the epg in background"))
elif index == 5:
if self.config.download_standby_enabled:
self["information"].setText(_("When the decoder is in standby opentv providers will be automatically downloaded every hour.\nXMLTV providers will be always downloaded only once a day"))
elif self.config.download_daily_enabled:
self["information"].setText(_("Download epg once a day"))
else:
self["information"].setText(_("Scheduled download disabled"))
elif index == 6:
self["information"].setText(_("Time for scheduled daily download"))
elif index == 7:
self["information"].setText(_("Automatically reboot the decoder after a scheduled download"))
elif index == 8:
self["information"].setText(_("Automatically reboot the decoder after a manual download"))
elif index == 9:
self["information"].setText(_("Show crossepg in plugin menu"))
elif index == 10:
self["information"].setText(_("Show crossepg in extensions menu"))
def quit(self):
self.config.last_full_download_timestamp = 0
self.config.last_partial_download_timestamp = 0
self.config.configured = 1
self.config.save()
try:
if self.config.db_root[-8:] == "crossepg":
config.misc.epgcache_filename.setValue(self.config.db_root[:-9] + "/epg.dat")
else:
config.misc.epgcache_filename.setValue(self.config.db_root + "/epg.dat")
config.misc.epgcache_filename.callNotifiersOnSaveAndCancel = True
config.misc.epgcache_filename.save()
configfile.save()
except Exception, e:
print "custom epgcache filename not supported by current enigma2 version"
if getEPGPatchType() == -1:
# exec crossepg_prepare_pre_start for unpatched images
os.system(self.config.home_directory + "/crossepg_prepare_pre_start.sh")
if self.show_extension != self.config.show_extension or self.show_plugin != self.config.show_plugin:
for plugin in plugins.getPlugins(PluginDescriptor.WHERE_PLUGINMENU):
if plugin.name == "CrossEPG Downloader":
plugins.removePlugin(plugin)
for plugin in plugins.getPlugins(PluginDescriptor.WHERE_EXTENSIONSMENU):
if plugin.name == "CrossEPG Downloader":
plugins.removePlugin(plugin)
plugins.readPluginList(resolveFilename(SCOPE_PLUGINS))
crossepg_auto.forcePoll()
if self.config.db_root == self.config.home_directory + "/data" and not self.config.isQBOXHD():
self.showWarning()
self.close()
|
pasqualguerrero/django
|
refs/heads/master
|
tests/migrations/test_migrations_conflict/0001_initial.py
|
2995
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
operations = [
migrations.CreateModel(
"Author",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=255)),
("slug", models.SlugField(null=True)),
("age", models.IntegerField(default=0)),
("silly_field", models.BooleanField(default=False)),
],
),
migrations.CreateModel(
"Tribble",
[
("id", models.AutoField(primary_key=True)),
("fluffy", models.BooleanField(default=True)),
],
)
]
|
morelab/weblabdeusto
|
refs/heads/master
|
server/src/test/unit/weblab/core/coordinator/test_config_parser.py
|
3
|
#!/usr/bin/env python
#-*-*- encoding: utf-8 -*-*-
#
# Copyright (C) 2005 onwards University of Deusto
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
# This software consists of contributions made by many individuals,
# listed below:
#
# Author: Pablo Orduña <pablo@ordunya.com>
#
from __future__ import print_function, unicode_literals
import unittest
import test.unit.configuration as configuration_module
import voodoo.configuration as ConfigurationManager
import weblab.core.exc as coreExc
from weblab.data.experiments import ExperimentInstanceId
from weblab.core.coordinator.resource import Resource
import weblab.core.coordinator.config_parser as CoordinationConfigurationParser
class CoordinationConfigurationParserTestCase(unittest.TestCase):
def setUp(self):
self.cfg_manager = ConfigurationManager.ConfigurationManager()
self.cfg_manager.append_module(configuration_module)
self.coordination_configuration_parser = CoordinationConfigurationParser.CoordinationConfigurationParser(self.cfg_manager)
def test_coordination_configuration_parser(self):
self.cfg_manager._set_value(CoordinationConfigurationParser.COORDINATOR_LABORATORY_SERVERS, {
'laboratory1:WL_SERVER1@WL_MACHINE1' : {
'exp1|ud-fpga|FPGA experiments' : 'fpga1@fpga boards',
'exp1|ud-pld|PLD experiments' : 'pld1@pld boards',
},
})
configuration = self.coordination_configuration_parser.parse_configuration()
self.assertEquals(1, len(configuration))
lab_config = configuration['laboratory1:WL_SERVER1@WL_MACHINE1']
self.assertEquals(2, len(lab_config))
exp_fpga = ExperimentInstanceId("exp1","ud-fpga","FPGA experiments")
exp_pld = ExperimentInstanceId("exp1","ud-pld","PLD experiments")
fpga_resource = lab_config[exp_fpga]
self.assertEquals(Resource("fpga boards", "fpga1"), fpga_resource)
pld_resource = lab_config[exp_pld]
self.assertEquals(Resource("pld boards", "pld1"), pld_resource)
def test_coordination_parse_resources_for_experiment_ids(self):
self.cfg_manager._set_value(CoordinationConfigurationParser.COORDINATOR_LABORATORY_SERVERS, {
'laboratory1:WL_SERVER1@WL_MACHINE1' : {
'exp1|ud-fpga|FPGA experiments' : 'fpga1@fpga boards',
'exp1|ud-pld|PLD experiments' : 'pld1@pld boards',
'exp1|ud-logic|PIC experiments' : 'pld1@pld boards'
},
'laboratory2:WL_SERVER1@WL_MACHINE1' : {
'exp2|ud-fpga|FPGA experiments' : 'fpga1@fpga boards',
'exp2|ud-pld|PLD experiments' : 'pld1@pld boards',
'exp2|ud-logic|PIC experiments' : 'fpga1@fpga boards'
},
})
self.cfg_manager._set_value(CoordinationConfigurationParser.COORDINATOR_EXTERNAL_SERVERS, {
'ud-pld@PLD experiments' : ['weblab_university1', 'weblab_university2'],
'visir@VISIR experiments' : ['weblab_university3']
})
configuration = self.coordination_configuration_parser.parse_resources_for_experiment_ids()
self.assertTrue('ud-pld@PLD experiments' in configuration)
self.assertTrue('ud-fpga@FPGA experiments' in configuration)
self.assertTrue('ud-logic@PIC experiments' in configuration)
self.assertEquals([u'weblab_university1', u'weblab_university2', u'pld boards'], configuration['ud-pld@PLD experiments'])
self.assertEquals([u'weblab_university3'], configuration['visir@VISIR experiments'])
self.assertEquals(['fpga boards'], configuration['ud-fpga@FPGA experiments'])
self.assertEquals(['pld boards', 'fpga boards'], configuration['ud-logic@PIC experiments'])
def test_coordination_configuration_parser_fail1(self):
self.cfg_manager._set_value(CoordinationConfigurationParser.COORDINATOR_LABORATORY_SERVERS, {
'laboratory1:WL_SERVER1@WL_MACHINE1' : {
'not.a.valid.experiment.instance.id' : 'fpga1@fpga boards'
},
})
self.assertRaises(
coreExc.CoordinationConfigurationParsingError,
self.coordination_configuration_parser.parse_configuration
)
def test_coordination_configuration_parser_fail2(self):
self.cfg_manager._set_value(CoordinationConfigurationParser.COORDINATOR_LABORATORY_SERVERS, {
'laboratory1:WL_SERVER1@WL_MACHINE1' : {
'exp1|ud-fpga|FPGA experiments' : 'not.a.valid.resource.instance',
},
})
self.assertRaises(
coreExc.CoordinationConfigurationParsingError,
self.coordination_configuration_parser.parse_configuration
)
def suite():
return unittest.makeSuite(CoordinationConfigurationParserTestCase)
if __name__ == '__main__':
unittest.main()
|
j00bar/ansible
|
refs/heads/devel
|
lib/ansible/playbook/conditional.py
|
19
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import ast
import re
from jinja2.compiler import generate
from jinja2.exceptions import UndefinedError
from ansible.errors import AnsibleError, AnsibleUndefinedVariable
from ansible.module_utils.six import text_type
from ansible.module_utils._text import to_native
from ansible.playbook.attribute import FieldAttribute
from ansible.template import Templar
from ansible.template.safe_eval import safe_eval
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
DEFINED_REGEX = re.compile(r'(hostvars\[.+\]|[\w_]+)\s+(not\s+is|is|is\s+not)\s+(defined|undefined)')
LOOKUP_REGEX = re.compile(r'lookup\s*\(')
VALID_VAR_REGEX = re.compile("^[_A-Za-z][_a-zA-Z0-9]*$")
class Conditional:
'''
This is a mix-in class, to be used with Base to allow the object
to be run conditionally when a condition is met or skipped.
'''
_when = FieldAttribute(isa='list', default=[])
def __init__(self, loader=None):
# when used directly, this class needs a loader, but we want to
# make sure we don't trample on the existing one if this class
# is used as a mix-in with a playbook base class
if not hasattr(self, '_loader'):
if loader is None:
raise AnsibleError("a loader must be specified when using Conditional() directly")
else:
self._loader = loader
super(Conditional, self).__init__()
def _validate_when(self, attr, name, value):
if not isinstance(value, list):
setattr(self, name, [ value ])
def _get_attr_when(self):
'''
Override for the 'tags' getattr fetcher, used from Base.
'''
when = self._attributes['when']
if when is None:
when = []
if hasattr(self, '_get_parent_attribute'):
when = self._get_parent_attribute('when', extend=True, prepend=True)
return when
def extract_defined_undefined(self, conditional):
results = []
cond = conditional
m = DEFINED_REGEX.search(cond)
while m:
results.append(m.groups())
cond = cond[m.end():]
m = DEFINED_REGEX.search(cond)
return results
def evaluate_conditional(self, templar, all_vars):
'''
Loops through the conditionals set on this object, returning
False if any of them evaluate as such.
'''
# since this is a mix-in, it may not have an underlying datastructure
# associated with it, so we pull it out now in case we need it for
# error reporting below
ds = None
if hasattr(self, '_ds'):
ds = getattr(self, '_ds')
try:
# this allows for direct boolean assignments to conditionals "when: False"
if isinstance(self.when, bool):
return self.when
for conditional in self.when:
if not self._check_conditional(conditional, templar, all_vars):
return False
except Exception as e:
raise AnsibleError(
"The conditional check '%s' failed. The error was: %s" % (to_native(conditional), to_native(e)), obj=ds
)
return True
def _check_conditional(self, conditional, templar, all_vars):
'''
This method does the low-level evaluation of each conditional
set on this object, using jinja2 to wrap the conditionals for
evaluation.
'''
original = conditional
if conditional is None or conditional == '':
return True
# pull the "bare" var out, which allows for nested conditionals
# and things like:
# - assert:
# that:
# - item
# with_items:
# - 1 == 1
if conditional in all_vars and VALID_VAR_REGEX.match(conditional):
conditional = all_vars[conditional]
if templar._clean_data(conditional) != conditional:
display.warning('when statements should not include jinja2 '
'templating delimiters such as {{ }} or {%% %%}. '
'Found: %s' % conditional)
# make sure the templar is using the variables specified with this method
templar.set_available_variables(variables=all_vars)
try:
# if the conditional is "unsafe", disable lookups
disable_lookups = hasattr(conditional, '__UNSAFE__')
conditional = templar.template(conditional, disable_lookups=disable_lookups)
if not isinstance(conditional, text_type) or conditional == "":
return conditional
# update the lookups flag, as the string returned above may now be unsafe
# and we don't want future templating calls to do unsafe things
disable_lookups |= hasattr(conditional, '__UNSAFE__')
# First, we do some low-level jinja2 parsing involving the AST format of the
# statement to ensure we don't do anything unsafe (using the disable_lookup flag above)
class CleansingNodeVisitor(ast.NodeVisitor):
def generic_visit(self, node, inside_call=False, inside_yield=False):
if isinstance(node, ast.Call):
inside_call = True
elif isinstance(node, ast.Yield):
inside_yield = True
elif isinstance(node, ast.Str):
if disable_lookups:
if inside_call and node.s.startswith("__"):
# calling things with a dunder is generally bad at this point...
raise AnsibleError(
"Invalid access found in the conditional: '%s'" % conditional
)
elif inside_yield:
# we're inside a yield, so recursively parse and traverse the AST
# of the result to catch forbidden syntax from executing
parsed = ast.parse(node.s, mode='exec')
cnv = CleansingNodeVisitor()
cnv.visit(parsed)
# iterate over all child nodes
for child_node in ast.iter_child_nodes(node):
self.generic_visit(
child_node,
inside_call=inside_call,
inside_yield=inside_yield
)
try:
e = templar.environment.overlay()
e.filters.update(templar._get_filters())
e.tests.update(templar._get_tests())
res = e._parse(conditional, None, None)
res = generate(res, e, None, None)
parsed = ast.parse(res, mode='exec')
cnv = CleansingNodeVisitor()
cnv.visit(parsed)
except Exception as e:
raise AnsibleError("Invalid conditional detected: %s" % to_native(e))
# and finally we generate and template the presented string and look at the resulting string
presented = "{%% if %s %%} True {%% else %%} False {%% endif %%}" % conditional
val = templar.template(presented, disable_lookups=disable_lookups).strip()
if val == "True":
return True
elif val == "False":
return False
else:
raise AnsibleError("unable to evaluate conditional: %s" % original)
except (AnsibleUndefinedVariable, UndefinedError) as e:
# the templating failed, meaning most likely a variable was undefined. If we happened
# to be looking for an undefined variable, return True, otherwise fail
try:
# first we extract the variable name from the error message
var_name = re.compile(r"'(hostvars\[.+\]|[\w_]+)' is undefined").search(str(e)).groups()[0]
# next we extract all defined/undefined tests from the conditional string
def_undef = self.extract_defined_undefined(conditional)
# then we loop through these, comparing the error variable name against
# each def/undef test we found above. If there is a match, we determine
# whether the logic/state mean the variable should exist or not and return
# the corresponding True/False
for (du_var, logic, state) in def_undef:
# when we compare the var names, normalize quotes because something
# like hostvars['foo'] may be tested against hostvars["foo"]
if var_name.replace("'", '"') == du_var.replace("'", '"'):
# the should exist is a xor test between a negation in the logic portion
# against the state (defined or undefined)
should_exist = ('not' in logic) != (state == 'defined')
if should_exist:
return False
else:
return True
# as nothing above matched the failed var name, re-raise here to
# trigger the AnsibleUndefinedVariable exception again below
raise
except Exception as new_e:
raise AnsibleUndefinedVariable(
"error while evaluating conditional (%s): %s" % (original, e)
)
|
JinXinDeep/tensorflow
|
refs/heads/master
|
tensorflow/python/kernel_tests/shape_ops_test.py
|
5
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for various tensorflow.ops.tf."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.python.platform
import numpy as np
import tensorflow as tf
class ShapeOpsTest(tf.test.TestCase):
def _compareShape(self, x, use_gpu=False):
np_ans = np.array(np.shape(x))
with self.test_session(use_gpu=use_gpu):
tf_ans = tf.shape(x)
result = tf_ans.eval()
self.assertAllEqual(np_ans, result)
self.assertShapeEqual(np_ans, tf_ans)
def _compareShapeN(self, x, use_gpu=False):
np_ans = np.array(np.shape(x))
with self.test_session(use_gpu=use_gpu) as sess:
tf_ans = tf.shape_n([x, x, x])
result = sess.run(tf_ans)
for i in range(3):
self.assertAllEqual(np_ans, result[i])
self.assertShapeEqual(np_ans, tf_ans[i])
def _compareRank(self, x, use_gpu=False):
np_ans = np.asarray(np.ndim(x))
with self.test_session(use_gpu=use_gpu):
tf_ans = tf.rank(x)
result = tf_ans.eval()
self.assertAllEqual(np_ans, result)
self.assertShapeEqual(np_ans, tf_ans)
def _compareSize(self, x, use_gpu=False):
np_ans = np.asarray(np.size(x))
with self.test_session(use_gpu=use_gpu):
tf_ans = tf.size(x)
result = tf_ans.eval()
self.assertAllEqual(np_ans, result)
self.assertShapeEqual(np_ans, tf_ans)
def _testCpu(self, x):
self._compareShape(x, use_gpu=False)
self._compareShapeN(x, use_gpu=False)
self._compareRank(x, use_gpu=False)
self._compareSize(x, use_gpu=False)
def _testGpu(self, x):
self._compareShape(x, use_gpu=True)
self._compareShapeN(x, use_gpu=True)
self._compareRank(x, use_gpu=True)
self._compareSize(x, use_gpu=True)
def _testAll(self, x):
self._testCpu(x)
self._testGpu(x)
def testBasic(self):
self._testAll(np.zeros([2]))
self._testAll(np.zeros([2, 3]))
self._testAll(np.zeros([2, 3, 5]))
self._testAll(np.zeros([2, 3, 5, 7]))
self._testAll(np.zeros([2, 3, 5, 7, 11]))
self._testAll(np.zeros([2, 3, 5, 7, 11, 13]))
def _compareExpandDims(self, x, dim, use_gpu):
np_ans = np.expand_dims(x, axis=dim)
with self.test_session(use_gpu=use_gpu):
tensor = tf.expand_dims(x, dim)
tf_ans = tensor.eval()
self.assertShapeEqual(np_ans, tensor)
self.assertAllEqual(np_ans, tf_ans)
def _compareExpandDimsAll(self, x, dim):
self._compareExpandDims(x, dim, False)
self._compareExpandDims(x, dim, True)
def testExpandDims(self):
self._compareExpandDimsAll(np.zeros([2]), 0)
self._compareExpandDimsAll(np.zeros([2]), 1)
self._compareExpandDimsAll(np.zeros([2]), -1)
self._compareExpandDimsAll(np.zeros([2, 3]), 0)
self._compareExpandDimsAll(np.zeros([2, 3]), 1)
self._compareExpandDimsAll(np.zeros([2, 3]), 2)
self._compareExpandDimsAll(np.zeros([2, 3]), -1)
self._compareExpandDimsAll(np.zeros([2, 3]), -2)
self._compareExpandDimsAll(np.zeros([2, 3, 5]), 0)
self._compareExpandDimsAll(np.zeros([2, 3, 5]), 1)
self._compareExpandDimsAll(np.zeros([2, 3, 5]), 2)
self._compareExpandDimsAll(np.zeros([2, 3, 5]), 3)
self._compareExpandDimsAll(np.zeros([2, 3, 5]), -1)
self._compareExpandDimsAll(np.zeros([2, 3, 5]), -2)
self._compareExpandDimsAll(np.zeros([2, 3, 5]), -3)
self._compareExpandDimsAll(np.zeros([2, 3, 5]), -4)
def testExpandDimsErrors(self):
with self.test_session():
self.assertRaises(ValueError, tf.expand_dims, np.zeros([2, 3, 5]), -5)
self.assertRaises(ValueError, tf.expand_dims, np.zeros([2, 3, 5]), 4)
def testExpandDimsGradient(self):
with self.test_session():
inp = tf.constant(np.random.rand(4, 2).astype("f"),
dtype=tf.float32)
squeezed = tf.expand_dims(inp, 1)
err = tf.test.compute_gradient_error(inp, [4, 2], squeezed, [4, 1, 2])
self.assertLess(err, 1e-3)
def testExpandDimsScalar(self):
with self.test_session():
inp = tf.constant(7)
self.assertAllEqual([7], tf.expand_dims(inp, 0).eval())
self.assertAllEqual([7], tf.expand_dims(inp, -1).eval())
def _compareSqueeze(self, x, squeeze_dims, use_gpu):
with self.test_session(use_gpu=use_gpu):
if squeeze_dims:
np_ans = np.squeeze(x, axis=tuple(squeeze_dims))
tensor = tf.squeeze(x, squeeze_dims)
tf_ans = tensor.eval()
else:
np_ans = np.squeeze(x)
tensor = tf.squeeze(x)
tf_ans = tensor.eval()
self.assertShapeEqual(np_ans, tensor)
self.assertAllEqual(np_ans, tf_ans)
def _compareSqueezeAll(self, x, squeeze_dims=None):
if squeeze_dims is None:
squeeze_dims = []
self._compareSqueeze(x, squeeze_dims, False)
self._compareSqueeze(x, squeeze_dims, True)
def testSqueeze(self):
# Nothing to squeeze.
self._compareSqueezeAll(np.zeros([2]))
self._compareSqueezeAll(np.zeros([2, 3]))
# Squeeze the middle element away.
self._compareSqueezeAll(np.zeros([2, 1, 2]))
# Squeeze on both ends.
self._compareSqueezeAll(np.zeros([1, 2, 1, 3, 1]))
def testSqueezeSpecificDimension(self):
# Positive squeeze dim index.
self._compareSqueezeAll(np.zeros([1, 2, 1, 3, 1]), [0])
self._compareSqueezeAll(np.zeros([1, 2, 1, 3, 1]), [2, 4])
self._compareSqueezeAll(np.zeros([1, 2, 1, 3, 1]), [0, 4, 2])
# Negative squeeze dim index.
self._compareSqueezeAll(np.zeros([1, 2, 1, 3, 1]), [-1])
self._compareSqueezeAll(np.zeros([1, 2, 1, 3, 1]), [-3, -5])
self._compareSqueezeAll(np.zeros([1, 2, 1, 3, 1]), [-3, -5, -1])
def testSqueezeAllOnes(self):
# Numpy squeezes a 1 element tensor into a zero dimensional tensor.
# Verify that we do the same.
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu):
tensor = tf.squeeze(np.zeros([1, 1, 1]), [])
self.assertEqual(np.shape(1), tensor.get_shape())
tf_ans = tensor.eval()
self.assertEqual(np.shape(1), tf_ans.shape)
def testSqueezeOnlyOnes(self):
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu):
input_1x1x3 = np.zeros([1, 1, 3])
self._compareSqueezeAll(input_1x1x3)
self._compareSqueezeAll(input_1x1x3, [0])
self._compareSqueezeAll(input_1x1x3, [1])
self.assertRaises(ValueError, tf.squeeze, input_1x1x3, [2])
def testSqueezeErrors(self):
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu):
self.assertRaises(ValueError, tf.squeeze, np.zeros([1, 2, 1]), [-4])
self.assertRaises(ValueError, tf.squeeze, np.zeros([1, 2, 1]), [0, -4])
self.assertRaises(ValueError, tf.squeeze, np.zeros([1, 2, 1]), [3])
self.assertRaises(ValueError, tf.squeeze, np.zeros([1, 2, 1]), [2, 3])
def testSqueezeGradient(self):
with self.test_session():
inp = np.random.rand(4, 2).astype("f")
a = tf.reshape(inp, [4, 1, 2])
squeezed = tf.squeeze(a, [])
err = tf.test.compute_gradient_error(a, [4, 1, 2], squeezed, [4, 2])
self.assertLess(err, 1e-3)
def testSqueezeGradientWithSqueezeDims(self):
with self.test_session():
inp = np.random.rand(4, 2).astype("f")
a = tf.reshape(inp, [4, 1, 2, 1])
squeezed = tf.squeeze(a, [1])
err = tf.test.compute_gradient_error(a, [4, 1, 2, 1], squeezed, [4, 2, 1])
self.assertLess(err, 1e-3)
class TileTest(tf.test.TestCase):
def testScalar(self):
for use_gpu in False, True:
with self.test_session(use_gpu=use_gpu):
a = tf.constant(7, shape=[], dtype=tf.float32)
tiled = tf.tile(a, [])
result = tiled.eval()
self.assertEqual(result.shape, ())
self.assertEqual([], tiled.get_shape())
self.assertEqual(7, result)
def testSimple(self):
with self.test_session():
inp = np.random.rand(4, 1).astype(np.float32)
a = tf.constant(inp)
tiled = tf.tile(a, [1, 4])
result = tiled.eval()
self.assertEqual(result.shape, (4, 4))
self.assertEqual([4, 4], tiled.get_shape())
self.assertTrue((result == np.tile(inp, (1, 4))).all())
def testEmpty(self):
with self.test_session():
inp = np.random.rand(2, 3).astype(np.float32)
a = tf.constant(inp)
tiled = tf.tile(a, [5, 0])
result = tiled.eval()
self.assertEqual(result.shape, (10, 0))
self.assertEqual([10, 0], tiled.get_shape())
def testTypes(self):
types_to_test = {
"bool": (tf.bool, bool),
"float32": (tf.float32, float),
"float64": (tf.float64, float),
"uint8": (tf.uint8, int),
"int32": (tf.int32, int),
"int64": (tf.int64, int),
bytes: (tf.string, bytes)
}
for dtype_np, (dtype_tf, cast) in types_to_test.items():
with self.test_session():
inp = np.random.rand(4, 1).astype(dtype_np)
a = tf.constant([cast(x) for x in inp.ravel(order="C")],
shape=[4, 1],
dtype=dtype_tf)
tiled = tf.tile(a, [1, 4])
result = tiled.eval()
self.assertEqual(result.shape, (4, 4))
self.assertEqual([4, 4], tiled.get_shape())
self.assertAllEqual(result, np.tile(inp, (1, 4)))
def testInvalidDim(self):
with self.test_session():
inp = np.random.rand(4, 1).astype("f")
a = tf.constant([float(x) for x in inp.ravel(order="C")],
shape=[4, 1], dtype=tf.float32)
# Wrong length of multiples.
with self.assertRaises(ValueError):
tf.tile(a, [1, 4, 2])
# Wrong rank for multiples.
with self.assertRaises(ValueError):
tf.tile(a, [[2, 3], [3, 4]]).eval()
def _RunAndVerifyResult(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
# Random dims of rank 5
input_shape = np.random.randint(1, 4, size=5)
inp = np.random.rand(*input_shape).astype("f")
a = tf.constant([float(x) for x in inp.ravel(order="C")],
shape=input_shape, dtype=tf.float32)
multiples = np.random.randint(1, 4, size=5).astype(np.int32)
tiled = tf.tile(a, multiples)
result = tiled.eval()
self.assertTrue((np.array(multiples) * np.array(inp.shape) ==
np.array(result.shape)).all())
self.assertAllEqual(result, np.tile(inp, tuple(multiples)))
self.assertShapeEqual(result, tiled)
def testRandom(self):
for _ in range(5):
self._RunAndVerifyResult(use_gpu=False)
for _ in range(5):
self._RunAndVerifyResult(use_gpu=True)
def testGradientSimpleReduction(self):
with self.test_session():
inp = np.random.rand(4, 1).astype("f")
a = tf.constant([float(x) for x in inp.flatten()],
shape=[4, 1], dtype=tf.float32)
tiled = tf.tile(a, [1, 4])
grad_shape = [4, 4]
grad_inp = np.random.rand(*grad_shape).astype("f")
grad_tensor = tf.constant([float(x) for x in grad_inp.flatten()],
shape=grad_shape)
grad = tf.gradients([tiled], [a], [grad_tensor])[0]
self.assertShapeEqual(inp, grad)
result = grad.eval()
self.assertAllClose(np.sum(grad_inp, axis=1).reshape(4, 1), result, 1e-3)
def testGradientStridedReduction(self):
with self.test_session():
inp = np.random.rand(4, 2).astype("f")
a = tf.constant([float(x) for x in inp.flatten()],
shape=[4, 2], dtype=tf.float32)
tiled = tf.tile(a, [1, 2])
grad_shape = [4, 4]
grad_inp = np.random.rand(*grad_shape).astype("f")
grad_tensor = tf.constant([float(x) for x in grad_inp.flatten()],
shape=grad_shape)
grad = tf.gradients([tiled], [a], [grad_tensor])[0]
self.assertShapeEqual(inp, grad)
result = grad.eval()
expected_shape = [4, 2]
expected = np.zeros(expected_shape)
expected[:, 0] = grad_inp[:, 0] + grad_inp[:, 2]
expected[:, 1] = grad_inp[:, 1] + grad_inp[:, 3]
self.assertTrue((np.abs(expected - result) < 1e-3).all())
def testGradientSimpleReductionOnGPU(self):
with self.test_session(use_gpu=True):
inp = np.random.rand(4, 1).astype("f")
a = tf.constant([float(x) for x in inp.flatten()],
shape=[4, 1], dtype=tf.float32)
tiled = tf.tile(a, [1, 4])
grad_shape = [4, 4]
grad_inp = np.random.rand(*grad_shape).astype("f")
grad_tensor = tf.constant([float(x) for x in grad_inp.flatten()],
shape=grad_shape)
grad = tf.gradients([tiled], [a], [grad_tensor])[0]
result = grad.eval()
self.assertAllClose(np.sum(grad_inp, axis=1).reshape(4, 1), result, 1e-3)
def testGradientStridedReductionOnGPU(self):
with self.test_session(use_gpu=True):
inp = np.random.rand(4, 2).astype("f")
a = tf.constant([float(x) for x in inp.flatten()],
shape=[4, 2], dtype=tf.float32)
tiled = tf.tile(a, [1, 2])
grad_shape = [4, 4]
grad_inp = np.random.rand(*grad_shape).astype("f")
grad_tensor = tf.constant([float(x) for x in grad_inp.flatten()],
shape=grad_shape)
grad = tf.gradients([tiled], [a], [grad_tensor])[0]
result = grad.eval()
expected_shape = [4, 2]
expected = np.zeros(expected_shape)
expected[:, 0] = grad_inp[:, 0] + grad_inp[:, 2]
expected[:, 1] = grad_inp[:, 1] + grad_inp[:, 3]
self.assertAllClose(expected, result, 1e-3)
def _RunAndVerifyGradientResult(self, input_shape, multiples):
for use_gpu in False, True:
with self.test_session(use_gpu=use_gpu):
# Random values
inp = np.asarray(np.random.rand(*input_shape))
a = tf.constant(inp, dtype=tf.float64)
tiled = tf.tile(a, multiples)
grad_shape = list(np.array(multiples) * np.array(inp.shape))
err = tf.test.compute_gradient_error(a,
list(input_shape),
tiled,
grad_shape,
x_init_value=inp)
print("tile(float) error = ", err)
self.assertLess(err, 1e-3)
def testGradientRandomScalar(self):
self._RunAndVerifyGradientResult([], [])
def testGradientRandom(self):
self._RunAndVerifyGradientResult([2, 2, 1, 1, 3], [1, 2, 1, 3, 1])
self._RunAndVerifyGradientResult([2, 3, 1, 1, 3], [3, 1, 1, 2, 2])
self._RunAndVerifyGradientResult([2, 1, 3, 3, 2], [1, 3, 3, 1, 2])
def testGradientStridedReductionGC(self):
with self.test_session():
inp = np.random.rand(4, 2).astype("f")
a = tf.constant([float(x) for x in inp.flatten()],
shape=[4, 2], dtype=tf.float32)
tiled = tf.tile(a, [1, 2])
err = tf.test.compute_gradient_error(a, [4, 2], tiled, [4, 4])
self.assertLess(err, 1e-3)
def testShapeFunctionEdgeCases(self):
# Unknown multiples shape.
inp = tf.constant(0.0, shape=[4, 4, 4, 4])
tiled = tf.tile(inp, tf.placeholder(tf.int32))
self.assertEqual([None, None, None, None], tiled.get_shape().as_list())
# Unknown input shape.
inp = tf.placeholder(tf.float32)
tiled = tf.tile(inp, [2, 2, 2, 2])
self.assertEqual([None, None, None, None], tiled.get_shape().as_list())
# Unknown input and multiples shape.
inp = tf.placeholder(tf.float32)
tiled = tf.tile(inp, tf.placeholder(tf.int32))
self.assertIs(None, tiled.get_shape().ndims)
if __name__ == "__main__":
tf.test.main()
|
jakevdp/scipy
|
refs/heads/master
|
scipy/special/tests/test_ellip_harm.py
|
99
|
#
# Tests for the Ellipsoidal Harmonic Function,
# Distributed under the same license as SciPy itself.
#
from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
from numpy.testing import (assert_equal, assert_almost_equal, assert_allclose,
assert_, run_module_suite)
from scipy.special._testutils import assert_func_equal
from scipy.special import ellip_harm, ellip_harm_2, ellip_normal
from scipy.integrate import IntegrationWarning
from numpy import sqrt, pi
def test_ellip_potential():
def change_coefficient(lambda1, mu, nu, h2, k2):
x = sqrt(lambda1**2*mu**2*nu**2/(h2*k2))
y = sqrt((lambda1**2 - h2)*(mu**2 - h2)*(h2 - nu**2)/(h2*(k2 - h2)))
z = sqrt((lambda1**2 - k2)*(k2 - mu**2)*(k2 - nu**2)/(k2*(k2 - h2)))
return x, y, z
def solid_int_ellip(lambda1, mu, nu, n, p, h2, k2):
return (ellip_harm(h2, k2, n, p, lambda1)*ellip_harm(h2, k2, n, p, mu)
* ellip_harm(h2, k2, n, p, nu))
def solid_int_ellip2(lambda1, mu, nu, n, p, h2, k2):
return (ellip_harm_2(h2, k2, n, p, lambda1)
* ellip_harm(h2, k2, n, p, mu)*ellip_harm(h2, k2, n, p, nu))
def summation(lambda1, mu1, nu1, lambda2, mu2, nu2, h2, k2):
tol = 1e-8
sum1 = 0
for n in range(20):
xsum = 0
for p in range(1, 2*n+2):
xsum += (4*pi*(solid_int_ellip(lambda2, mu2, nu2, n, p, h2, k2)
* solid_int_ellip2(lambda1, mu1, nu1, n, p, h2, k2)) /
(ellip_normal(h2, k2, n, p)*(2*n + 1)))
if abs(xsum) < 0.1*tol*abs(sum1):
break
sum1 += xsum
return sum1, xsum
def potential(lambda1, mu1, nu1, lambda2, mu2, nu2, h2, k2):
x1, y1, z1 = change_coefficient(lambda1, mu1, nu1, h2, k2)
x2, y2, z2 = change_coefficient(lambda2, mu2, nu2, h2, k2)
res = sqrt((x2 - x1)**2 + (y2 - y1)**2 + (z2 - z1)**2)
return 1/res
pts = [
(120, sqrt(19), 2, 41, sqrt(17), 2, 15, 25),
(120, sqrt(16), 3.2, 21, sqrt(11), 2.9, 11, 20),
]
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=IntegrationWarning)
for p in pts:
err_msg = repr(p)
exact = potential(*p)
result, last_term = summation(*p)
assert_allclose(exact, result, atol=0, rtol=1e-8, err_msg=err_msg)
assert_(abs(result - exact) < 10*abs(last_term), err_msg)
def test_ellip_norm():
def G01(h2, k2):
return 4*pi
def G11(h2, k2):
return 4*pi*h2*k2/3
def G12(h2, k2):
return 4*pi*h2*(k2 - h2)/3
def G13(h2, k2):
return 4*pi*k2*(k2 - h2)/3
def G22(h2, k2):
res = (2*(h2**4 + k2**4) - 4*h2*k2*(h2**2 + k2**2) + 6*h2**2*k2**2 +
sqrt(h2**2 + k2**2 - h2*k2)*(-2*(h2**3 + k2**3) + 3*h2*k2*(h2 + k2)))
return 16*pi/405*res
def G21(h2, k2):
res = (2*(h2**4 + k2**4) - 4*h2*k2*(h2**2 + k2**2) + 6*h2**2*k2**2
+ sqrt(h2**2 + k2**2 - h2*k2)*(2*(h2**3 + k2**3) - 3*h2*k2*(h2 + k2)))
return 16*pi/405*res
def G23(h2, k2):
return 4*pi*h2**2*k2*(k2 - h2)/15
def G24(h2, k2):
return 4*pi*h2*k2**2*(k2 - h2)/15
def G25(h2, k2):
return 4*pi*h2*k2*(k2 - h2)**2/15
def G32(h2, k2):
res = (16*(h2**4 + k2**4) - 36*h2*k2*(h2**2 + k2**2) + 46*h2**2*k2**2
+ sqrt(4*(h2**2 + k2**2) - 7*h2*k2)*(-8*(h2**3 + k2**3) +
11*h2*k2*(h2 + k2)))
return 16*pi/13125*k2*h2*res
def G31(h2, k2):
res = (16*(h2**4 + k2**4) - 36*h2*k2*(h2**2 + k2**2) + 46*h2**2*k2**2
+ sqrt(4*(h2**2 + k2**2) - 7*h2*k2)*(8*(h2**3 + k2**3) -
11*h2*k2*(h2 + k2)))
return 16*pi/13125*h2*k2*res
def G34(h2, k2):
res = (6*h2**4 + 16*k2**4 - 12*h2**3*k2 - 28*h2*k2**3 + 34*h2**2*k2**2
+ sqrt(h2**2 + 4*k2**2 - h2*k2)*(-6*h2**3 - 8*k2**3 + 9*h2**2*k2 +
13*h2*k2**2))
return 16*pi/13125*h2*(k2 - h2)*res
def G33(h2, k2):
res = (6*h2**4 + 16*k2**4 - 12*h2**3*k2 - 28*h2*k2**3 + 34*h2**2*k2**2
+ sqrt(h2**2 + 4*k2**2 - h2*k2)*(6*h2**3 + 8*k2**3 - 9*h2**2*k2 -
13*h2*k2**2))
return 16*pi/13125*h2*(k2 - h2)*res
def G36(h2, k2):
res = (16*h2**4 + 6*k2**4 - 28*h2**3*k2 - 12*h2*k2**3 + 34*h2**2*k2**2
+ sqrt(4*h2**2 + k2**2 - h2*k2)*(-8*h2**3 - 6*k2**3 + 13*h2**2*k2 +
9*h2*k2**2))
return 16*pi/13125*k2*(k2 - h2)*res
def G35(h2, k2):
res = (16*h2**4 + 6*k2**4 - 28*h2**3*k2 - 12*h2*k2**3 + 34*h2**2*k2**2
+ sqrt(4*h2**2 + k2**2 - h2*k2)*(8*h2**3 + 6*k2**3 - 13*h2**2*k2 -
9*h2*k2**2))
return 16*pi/13125*k2*(k2 - h2)*res
def G37(h2, k2):
return 4*pi*h2**2*k2**2*(k2 - h2)**2/105
known_funcs = {(0, 1): G01, (1, 1): G11, (1, 2): G12, (1, 3): G13,
(2, 1): G21, (2, 2): G22, (2, 3): G23, (2, 4): G24,
(2, 5): G25, (3, 1): G31, (3, 2): G32, (3, 3): G33,
(3, 4): G34, (3, 5): G35, (3, 6): G36, (3, 7): G37}
def _ellip_norm(n, p, h2, k2):
func = known_funcs[n, p]
return func(h2, k2)
_ellip_norm = np.vectorize(_ellip_norm)
def ellip_normal_known(h2, k2, n, p):
return _ellip_norm(n, p, h2, k2)
# generate both large and small h2 < k2 pairs
np.random.seed(1234)
h2 = np.random.pareto(0.5, size=1)
k2 = h2 * (1 + np.random.pareto(0.5, size=h2.size))
points = []
for n in range(4):
for p in range(1, 2*n+2):
points.append((h2, k2, n*np.ones(h2.size), p*np.ones(h2.size)))
points = np.array(points)
with warnings.catch_warnings(record=True): # occurrence of roundoff ...
assert_func_equal(ellip_normal, ellip_normal_known, points, rtol=1e-12)
def test_ellip_harm_2():
def I1(h2, k2, s):
res = (ellip_harm_2(h2, k2, 1, 1, s)/(3 * ellip_harm(h2, k2, 1, 1, s))
+ ellip_harm_2(h2, k2, 1, 2, s)/(3 * ellip_harm(h2, k2, 1, 2, s)) +
ellip_harm_2(h2, k2, 1, 3, s)/(3 * ellip_harm(h2, k2, 1, 3, s)))
return res
with warnings.catch_warnings(record=True): # occurrence of roundoff ...
assert_almost_equal(I1(5, 8, 10), 1/(10*sqrt((100-5)*(100-8))))
# Values produced by code from arXiv:1204.0267
assert_almost_equal(ellip_harm_2(5, 8, 2, 1, 10), 0.00108056853382)
assert_almost_equal(ellip_harm_2(5, 8, 2, 2, 10), 0.00105820513809)
assert_almost_equal(ellip_harm_2(5, 8, 2, 3, 10), 0.00106058384743)
assert_almost_equal(ellip_harm_2(5, 8, 2, 4, 10), 0.00106774492306)
assert_almost_equal(ellip_harm_2(5, 8, 2, 5, 10), 0.00107976356454)
def test_ellip_harm():
def E01(h2, k2, s):
return 1
def E11(h2, k2, s):
return s
def E12(h2, k2, s):
return sqrt(abs(s*s - h2))
def E13(h2, k2, s):
return sqrt(abs(s*s - k2))
def E21(h2, k2, s):
return s*s - 1/3*((h2 + k2) + sqrt(abs((h2 + k2)*(h2 + k2)-3*h2*k2)))
def E22(h2, k2, s):
return s*s - 1/3*((h2 + k2) - sqrt(abs((h2 + k2)*(h2 + k2)-3*h2*k2)))
def E23(h2, k2, s):
return s * sqrt(abs(s*s - h2))
def E24(h2, k2, s):
return s * sqrt(abs(s*s - k2))
def E25(h2, k2, s):
return sqrt(abs((s*s - h2)*(s*s - k2)))
def E31(h2, k2, s):
return s*s*s - (s/5)*(2*(h2 + k2) + sqrt(4*(h2 + k2)*(h2 + k2) -
15*h2*k2))
def E32(h2, k2, s):
return s*s*s - (s/5)*(2*(h2 + k2) - sqrt(4*(h2 + k2)*(h2 + k2) -
15*h2*k2))
def E33(h2, k2, s):
return sqrt(abs(s*s - h2))*(s*s - 1/5*((h2 + 2*k2) + sqrt(abs((h2 +
2*k2)*(h2 + 2*k2) - 5*h2*k2))))
def E34(h2, k2, s):
return sqrt(abs(s*s - h2))*(s*s - 1/5*((h2 + 2*k2) - sqrt(abs((h2 +
2*k2)*(h2 + 2*k2) - 5*h2*k2))))
def E35(h2, k2, s):
return sqrt(abs(s*s - k2))*(s*s - 1/5*((2*h2 + k2) + sqrt(abs((2*h2
+ k2)*(2*h2 + k2) - 5*h2*k2))))
def E36(h2, k2, s):
return sqrt(abs(s*s - k2))*(s*s - 1/5*((2*h2 + k2) - sqrt(abs((2*h2
+ k2)*(2*h2 + k2) - 5*h2*k2))))
def E37(h2, k2, s):
return s * sqrt(abs((s*s - h2)*(s*s - k2)))
assert_equal(ellip_harm(5, 8, 1, 2, 2.5, 1, 1),
ellip_harm(5, 8, 1, 2, 2.5))
known_funcs = {(0, 1): E01, (1, 1): E11, (1, 2): E12, (1, 3): E13,
(2, 1): E21, (2, 2): E22, (2, 3): E23, (2, 4): E24,
(2, 5): E25, (3, 1): E31, (3, 2): E32, (3, 3): E33,
(3, 4): E34, (3, 5): E35, (3, 6): E36, (3, 7): E37}
point_ref = []
def ellip_harm_known(h2, k2, n, p, s):
for i in range(h2.size):
func = known_funcs[(int(n[i]), int(p[i]))]
point_ref.append(func(h2[i], k2[i], s[i]))
return point_ref
np.random.seed(1234)
h2 = np.random.pareto(0.5, size=30)
k2 = h2*(1 + np.random.pareto(0.5, size=h2.size))
s = np.random.pareto(0.5, size=h2.size)
points = []
for i in range(h2.size):
for n in range(4):
for p in range(1, 2*n+2):
points.append((h2[i], k2[i], n, p, s[i]))
points = np.array(points)
assert_func_equal(ellip_harm, ellip_harm_known, points, rtol=1e-12)
if __name__ == "__main__":
run_module_suite()
|
StackOps/python-automationclient
|
refs/heads/master
|
automationclient/tests/v1_1/test_devices.py
|
1
|
# Copyright 2012-2013 STACKOPS TECHNOLOGIES S.L.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from automationclient.tests import utils
from automationclient.tests.v1_1 import fakes
from automationclient.v1_1.devices import Device
cs = fakes.FakeClient()
class DevicesTest(utils.TestCase):
def test_device_list(self):
devices = cs.devices.list()
cs.assert_called('GET', '/pool/devices')
self.assertEqual(len(devices), 2)
[self.assertTrue(isinstance(dev, Device)) for dev in devices]
def test_device_show(self):
device = cs.devices.get(1234)
cs.assert_called('GET', '/pool/devices/1234')
self.assertIsInstance(device, Device)
def test_device_update(self):
device = cs.devices.get(1234)
cs.assert_called('GET', '/pool/devices/1234')
options = {"management_network_dns": "8.8.8.8",
"management_network_netmask": "255.255.255.0",
"management_network_ip": "180.10.10.119",
"lom_mac": "00:00:00:00",
"lom_ip": "0.0.0.0",
"management_network_gateway": "180.10.10.1"}
device = cs.devices.update(device, **options)
cs.assert_called('PUT', '/pool/devices/1234', body=options)
self.assertIsInstance(device, dict)
def test_device_delete(self):
device = cs.devices.list()[0]
cs.assert_called('GET', '/pool/devices')
options = {"action": "nothing",
"lom_password": "stackops",
"lom_user": "stackops"}
cs.devices.delete(device, **options)
cs.assert_called('POST', '/pool/devices/%s/delete' % device.mac)
def test_device_power_on(self):
device = cs.devices.list()[0]
cs.assert_called('GET', '/pool/devices')
options = {"lom_password": "stackops", "lom_user": "stackops"}
cs.devices.power_on(device, **options)
cs.assert_called('POST', '/pool/devices/1234/poweron', body=options)
def test_device_power_off(self):
device = cs.devices.list()[0]
cs.assert_called('GET', '/pool/devices')
options = {"lom_password": "stackops", "lom_user": "stackops"}
cs.devices.power_off(device, **options)
cs.assert_called('POST', '/pool/devices/1234/poweroff', body=options)
def test_device_reboot(self):
device = cs.devices.list()[0]
cs.assert_called('GET', '/pool/devices')
options = {"lom_password": "stackops", "lom_user": "stackops"}
cs.devices.reboot(device, **options)
cs.assert_called('POST', '/pool/devices/1234/reboot', body=options)
def test_device_shutdown(self):
device = cs.devices.list()[0]
cs.assert_called('GET', '/pool/devices')
cs.devices.shutdown(device)
cs.assert_called('POST', '/pool/devices/1234/shutdown')
def test_device_soft_reboot(self):
device = cs.devices.list()[0]
cs.assert_called('GET', '/pool/devices')
cs.devices.soft_reboot(device)
cs.assert_called('POST', '/pool/devices/1234/soft_reboot')
def test_device_activate(self):
device = cs.devices.list()[0]
cs.assert_called('GET', '/pool/devices')
options = {"lom_password": "stackops",
"lom_user": "stackops",
"zone_id": 1}
cs.devices.activate(device, **options)
cs.assert_called('POST', '/pool/devices/1234/activate', body=options)
|
aviau/joulupukki-worker
|
refs/heads/master
|
config.py
|
2
|
# Server Specific Configurations
server = {
'port': '8082',
'host': '0.0.0.0'
}
# Pecan Application Configurations
app = {
'root': 'joulupukki.worker.controllers.root.RootController',
'modules': ['joulupukki.worker'],
'static_root': '%(confdir)s/public',
'template_path': '%(confdir)s/joulupukki/templates',
'debug': True,
'errors': {
404: '/error/404',
'__force_dict__': True
}
}
logging = {
# 'root': {'level': 'INFO', 'handlers': ['console']},
'root': {'level': 'DEBUG', 'handlers': ['console']},
'loggers': {
'joulupukki': {'level': 'DEBUG', 'handlers': ['console']},
'pecan.commands.serve': {'level': 'DEBUG', 'handlers': ['console']},
'py.warnings': {'handlers': ['console']},
'__force_dict__': True
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'color'
}
},
'formatters': {
'simple': {
'format': ('%(asctime)s %(levelname)-5.5s [%(name)s] '
'%(message)s')
},
'color': {
'()': 'pecan.log.ColorFormatter',
'format': ('%(asctime)s [%(padded_color_levelname)s] [%(name)s] '
'%(message)s'),
'__force_dict__': True
}
}
}
# Custom Configurations must be in Python dictionary format::
#
# foo = {'bar':'baz'}
workspace_path = '%(confdir)s/../output'
rabbit_server = "127.0.0.1"
rabbit_port = 5672
rabbit_db = "joulupukki"
rabbit_user = "guest"
rabbit_password = "guest"
rabbit_vhost = "/"
mongo_server = "127.0.0.1"
mongo_port = 27017
mongo_db = "joulupukki"
distros = (
("ubuntu_10.04", "ubuntu:10.04", "deb", "docker"),
("ubuntu_12.04", "ubuntu:12.04", "deb", "docker"),
("ubuntu_14.04", "ubuntu:14.04", "deb", "docker"),
("ubuntu_14.10", "ubuntu:14.10", "deb", "docker"),
("ubuntu_15.04", "ubuntu:15.04", "deb", "docker"),
("debian_7", "debian:7", "deb", "docker"),
("debian_8", "debian:8", "deb", "docker"),
("centos_6", "centos:6", "rpm", "docker"),
("centos_7", "centos:7", "rpm", "docker"),
("fedora_20", "fedora:20", "rpm", "docker"),
("fedora_21", "fedora:21", "rpm", "docker"),
("win32", "ubuntu:14.10", "win32", "docker"),
)
docker_version = "1.14"
ccache_path = '%(confdir)s/ccache'
supported_build_type = ['docker', 'ios']
docker_cpuset = 1
thread_count = 5
#
# All configurations are accessible at::
# pecan.conf
|
googleapis/python-dialogflow
|
refs/heads/master
|
samples/snippets/noxfile.py
|
1
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
from pathlib import Path
import sys
import nox
# WARNING - WARNING - WARNING - WARNING - WARNING
# WARNING - WARNING - WARNING - WARNING - WARNING
# DO NOT EDIT THIS FILE EVER!
# WARNING - WARNING - WARNING - WARNING - WARNING
# WARNING - WARNING - WARNING - WARNING - WARNING
# Copy `noxfile_config.py` to your directory and modify it instead.
# `TEST_CONFIG` dict is a configuration hook that allows users to
# modify the test configurations. The values here should be in sync
# with `noxfile_config.py`. Users will copy `noxfile_config.py` into
# their directory and modify it.
TEST_CONFIG = {
# You can opt out from the test for specific Python versions.
"ignored_versions": ["2.7"],
# Old samples are opted out of enforcing Python type hints
# All new samples should feature them
"enforce_type_hints": False,
# An envvar key for determining the project id to use. Change it
# to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a
# build specific Cloud project. You can also use your own string
# to use your own Cloud project.
"gcloud_project_env": "GOOGLE_CLOUD_PROJECT",
# 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT',
# A dictionary you want to inject into your test. Don't put any
# secrets here. These values will override predefined values.
"envs": {},
}
try:
# Ensure we can import noxfile_config in the project's directory.
sys.path.append(".")
from noxfile_config import TEST_CONFIG_OVERRIDE
except ImportError as e:
print("No user noxfile_config found: detail: {}".format(e))
TEST_CONFIG_OVERRIDE = {}
# Update the TEST_CONFIG with the user supplied values.
TEST_CONFIG.update(TEST_CONFIG_OVERRIDE)
def get_pytest_env_vars():
"""Returns a dict for pytest invocation."""
ret = {}
# Override the GCLOUD_PROJECT and the alias.
env_key = TEST_CONFIG["gcloud_project_env"]
# This should error out if not set.
ret["GOOGLE_CLOUD_PROJECT"] = os.environ[env_key]
# Apply user supplied envs.
ret.update(TEST_CONFIG["envs"])
return ret
# DO NOT EDIT - automatically generated.
# All versions used to tested samples.
ALL_VERSIONS = ["2.7", "3.6", "3.7", "3.8"]
# Any default versions that should be ignored.
IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"]
TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS])
INSTALL_LIBRARY_FROM_SOURCE = bool(os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False))
#
# Style Checks
#
def _determine_local_import_names(start_dir):
"""Determines all import names that should be considered "local".
This is used when running the linter to insure that import order is
properly checked.
"""
file_ext_pairs = [os.path.splitext(path) for path in os.listdir(start_dir)]
return [
basename
for basename, extension in file_ext_pairs
if extension == ".py"
or os.path.isdir(os.path.join(start_dir, basename))
and basename not in ("__pycache__")
]
# Linting with flake8.
#
# We ignore the following rules:
# E203: whitespace before ‘:’
# E266: too many leading ‘#’ for block comment
# E501: line too long
# I202: Additional newline in a section of imports
#
# We also need to specify the rules which are ignored by default:
# ['E226', 'W504', 'E126', 'E123', 'W503', 'E24', 'E704', 'E121']
FLAKE8_COMMON_ARGS = [
"--show-source",
"--builtin=gettext",
"--max-complexity=20",
"--import-order-style=google",
"--exclude=.nox,.cache,env,lib,generated_pb2,*_pb2.py,*_pb2_grpc.py",
"--ignore=E121,E123,E126,E203,E226,E24,E266,E501,E704,W503,W504,I202",
"--max-line-length=88",
]
@nox.session
def lint(session):
if not TEST_CONFIG["enforce_type_hints"]:
session.install("flake8", "flake8-import-order")
else:
session.install("flake8", "flake8-import-order", "flake8-annotations")
local_names = _determine_local_import_names(".")
args = FLAKE8_COMMON_ARGS + [
"--application-import-names",
",".join(local_names),
".",
]
session.run("flake8", *args)
#
# Black
#
@nox.session
def blacken(session):
session.install("black")
python_files = [path for path in os.listdir(".") if path.endswith(".py")]
session.run("black", *python_files)
#
# Sample Tests
#
PYTEST_COMMON_ARGS = ["--junitxml=sponge_log.xml"]
def _session_tests(session, post_install=None):
"""Runs py.test for a particular project."""
if os.path.exists("requirements.txt"):
session.install("-r", "requirements.txt")
if os.path.exists("requirements-test.txt"):
session.install("-r", "requirements-test.txt")
if INSTALL_LIBRARY_FROM_SOURCE:
session.install("-e", _get_repo_root())
if post_install:
post_install(session)
session.run(
"pytest",
*(PYTEST_COMMON_ARGS + session.posargs),
# Pytest will return 5 when no tests are collected. This can happen
# on travis where slow and flaky tests are excluded.
# See http://doc.pytest.org/en/latest/_modules/_pytest/main.html
success_codes=[0, 5],
env=get_pytest_env_vars()
)
@nox.session(python=ALL_VERSIONS)
def py(session):
"""Runs py.test for a sample using the specified version of Python."""
if session.python in TESTED_VERSIONS:
_session_tests(session)
else:
session.skip(
"SKIPPED: {} tests are disabled for this sample.".format(session.python)
)
#
# Readmegen
#
def _get_repo_root():
""" Returns the root folder of the project. """
# Get root of this repository. Assume we don't have directories nested deeper than 10 items.
p = Path(os.getcwd())
for i in range(10):
if p is None:
break
if Path(p / ".git").exists():
return str(p)
# .git is not available in repos cloned via Cloud Build
# setup.py is always in the library's root, so use that instead
# https://github.com/googleapis/synthtool/issues/792
if Path(p / "setup.py").exists():
return str(p)
p = p.parent
raise Exception("Unable to detect repository root.")
GENERATED_READMES = sorted([x for x in Path(".").rglob("*.rst.in")])
@nox.session
@nox.parametrize("path", GENERATED_READMES)
def readmegen(session, path):
"""(Re-)generates the readme for a sample."""
session.install("jinja2", "pyyaml")
dir_ = os.path.dirname(path)
if os.path.exists(os.path.join(dir_, "requirements.txt")):
session.install("-r", os.path.join(dir_, "requirements.txt"))
in_file = os.path.join(dir_, "README.rst.in")
session.run(
"python", _get_repo_root() + "/scripts/readme-gen/readme_gen.py", in_file
)
|
leonsio/YAHM
|
refs/heads/master
|
share/tools/ubi_reader/ubi/display.py
|
3
|
#!/usr/bin/env python
#############################################################
# ubi_reader/ubi
# (c) 2013 Jason Pruitt (jrspruitt@gmail.com)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#############################################################
from ubi.defines import PRINT_COMPAT_LIST, PRINT_VOL_TYPE_LIST, UBI_VTBL_AUTORESIZE_FLG
def ubi(ubi, tab=''):
print '%sUBI File' % (tab)
print '%s---------------------' % (tab)
print '\t%sMin I/O: %s' % (tab, ubi.min_io_size)
print '\t%sLEB Size: %s' % (tab, ubi.leb_size)
print '\t%sPEB Size: %s' % (tab, ubi.peb_size)
print '\t%sTotal Block Count: %s' % (tab, ubi.block_count)
print '\t%sData Block Count: %s' % (tab, len(ubi.data_blocks_list))
print '\t%sLayout Block Count: %s' % (tab, len(ubi.layout_blocks_list))
print '\t%sInternal Volume Block Count: %s' % (tab, len(ubi.int_vol_blocks_list))
print '\t%sUnknown Block Count: %s' % (tab, len(ubi.unknown_blocks_list))
print '\t%sFirst UBI PEB Number: %s' % (tab, ubi.first_peb_num)
def image(image, tab=''):
print '%s%s' % (tab, image)
print '%s---------------------' % (tab)
print '\t%sImage Sequence Num: %s' % (tab, image.image_seq)
for volume in image.volumes:
print '\t%sVolume Name:%s' % (tab, volume)
print '\t%sPEB Range: %s - %s' % (tab, image.peb_range[0], image.peb_range[1])
def volume(volume, tab=''):
print '%s%s' % (tab, volume)
print '%s---------------------' % (tab)
print '\t%sVol ID: %s' % (tab, volume.vol_id)
print '\t%sName: %s' % (tab, volume.name)
print '\t%sBlock Count: %s' % (tab, volume.block_count)
print '\n'
print '\t%sVolume Record' % (tab)
print '\t%s---------------------' % (tab)
vol_rec(volume.vol_rec, '\t\t%s' % tab)
print '\n'
def block(block, tab='\t'):
print '%s%s' % (tab, block)
print '%s---------------------' % (tab)
print '\t%sFile Offset: %s' % (tab, block.file_offset)
print '\t%sPEB #: %s' % (tab, block.peb_num)
print '\t%sLEB #: %s' % (tab, block.leb_num)
print '\t%sBlock Size: %s' % (tab, block.size)
print '\t%sInternal Volume: %s' % (tab, block.is_internal_vol)
print '\t%sIs Volume Table: %s' % (tab, block.is_vtbl)
print '\t%sIs Valid: %s' % (tab, block.is_valid)
if not block.ec_hdr.errors:
print '\n'
print '\t%sErase Count Header' % (tab)
print '\t%s---------------------' % (tab)
ec_hdr(block.ec_hdr, '\t\t%s' % tab)
if block.vid_hdr and not block.vid_hdr.errors:
print '\n'
print '\t%sVID Header Header' % (tab)
print '\t%s---------------------' % (tab)
vid_hdr(block.vid_hdr, '\t\t%s' % tab)
if block.vtbl_recs:
print '\n'
print '\t%sVolume Records' % (tab)
print '\t%s---------------------' % (tab)
for vol in block.vtbl_recs:
vol_rec(vol, '\t\t%s' % tab)
print '\n'
def ec_hdr(ec_hdr, tab=''):
for key, value in ec_hdr:
if key == 'errors':
value = ','.join(value)
print '%s%s: %r' % (tab, key, value)
def vid_hdr(vid_hdr, tab=''):
for key, value in vid_hdr:
if key == 'errors':
value = ','.join(value)
elif key == 'compat':
if value in PRINT_COMPAT_LIST:
value = PRINT_COMPAT_LIST[value]
else:
value = -1
elif key == 'vol_type':
if value < len(PRINT_VOL_TYPE_LIST):
value = PRINT_VOL_TYPE_LIST[value]
else:
value = -1
print '%s%s: %s' % (tab, key, value)
def vol_rec(vol_rec, tab=''):
for key, value in vol_rec:
if key == 'errors':
value = ','.join(value)
elif key == 'vol_type':
if value < len(PRINT_VOL_TYPE_LIST):
value = PRINT_VOL_TYPE_LIST[value]
else:
value = -1
elif key == 'flags' and value == UBI_VTBL_AUTORESIZE_FLG:
value = 'autoresize'
elif key == 'name':
value = value.strip('\x00')
print '%s%s: %s' % (tab, key, value)
|
appneta/boto
|
refs/heads/develop
|
boto/vpc/networkacl.py
|
151
|
# Copyright (c) 2009-2010 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Represents a Network ACL
"""
from boto.ec2.ec2object import TaggedEC2Object
from boto.resultset import ResultSet
class Icmp(object):
"""
Defines the ICMP code and type.
"""
def __init__(self, connection=None):
self.code = None
self.type = None
def __repr__(self):
return 'Icmp::code:%s, type:%s)' % ( self.code, self.type)
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'code':
self.code = value
elif name == 'type':
self.type = value
class NetworkAcl(TaggedEC2Object):
def __init__(self, connection=None):
super(NetworkAcl, self).__init__(connection)
self.id = None
self.vpc_id = None
self.network_acl_entries = []
self.associations = []
def __repr__(self):
return 'NetworkAcl:%s' % self.id
def startElement(self, name, attrs, connection):
result = super(NetworkAcl, self).startElement(name, attrs, connection)
if result is not None:
# Parent found an interested element, just return it
return result
if name == 'entrySet':
self.network_acl_entries = ResultSet([('item', NetworkAclEntry)])
return self.network_acl_entries
elif name == 'associationSet':
self.associations = ResultSet([('item', NetworkAclAssociation)])
return self.associations
else:
return None
def endElement(self, name, value, connection):
if name == 'networkAclId':
self.id = value
elif name == 'vpcId':
self.vpc_id = value
else:
setattr(self, name, value)
class NetworkAclEntry(object):
def __init__(self, connection=None):
self.rule_number = None
self.protocol = None
self.rule_action = None
self.egress = None
self.cidr_block = None
self.port_range = PortRange()
self.icmp = Icmp()
def __repr__(self):
return 'Acl:%s' % self.rule_number
def startElement(self, name, attrs, connection):
if name == 'portRange':
return self.port_range
elif name == 'icmpTypeCode':
return self.icmp
else:
return None
def endElement(self, name, value, connection):
if name == 'cidrBlock':
self.cidr_block = value
elif name == 'egress':
self.egress = value
elif name == 'protocol':
self.protocol = value
elif name == 'ruleAction':
self.rule_action = value
elif name == 'ruleNumber':
self.rule_number = value
class NetworkAclAssociation(object):
def __init__(self, connection=None):
self.id = None
self.subnet_id = None
self.network_acl_id = None
def __repr__(self):
return 'NetworkAclAssociation:%s' % self.id
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'networkAclAssociationId':
self.id = value
elif name == 'networkAclId':
self.network_acl_id = value
elif name == 'subnetId':
self.subnet_id = value
class PortRange(object):
"""
Define the port range for the ACL entry if it is tcp / udp
"""
def __init__(self, connection=None):
self.from_port = None
self.to_port = None
def __repr__(self):
return 'PortRange:(%s-%s)' % ( self.from_port, self.to_port)
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'from':
self.from_port = value
elif name == 'to':
self.to_port = value
|
agogear/python-1
|
refs/heads/master
|
NKUCodingCat/0023/main.py
|
40
|
#coding=utf-8
from bottle import static_file,route, run, post, request, redirect
import os, makeweb, urllib,re,json,time
Root = os.path.split(os.path.realpath(__file__))[0]+"/static/"
Const = """
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="Content-Type" content="text/html; charset=gb2312" />
<title>无标题文档</title>
<link type="text/css" rel="stylesheet" href="./static/css.css" />
</head>
<body>
<div id="1">
<form id="form1" name="form1" method="post" action="">
<label>名字
<input id="name" type="text" name="textfield" tabindex="0" />
</label>
<p>
<label>评论
<textarea name="textarea" tabindex="1" style="height: 89px; width: 350px;"></textarea>
</label>
</p>
<p>
<label>
<input id="Submit" type="submit" name="Submit" value="提交" />
</label>
</p>
</form>
</div>
<div>
%s
</div>
</body>
</html>
"""
@route('/board')
def index():
return Const%makeweb.Pack(makeweb.Stor_out())
@post('/board')
def Accept():
Req = request.body.read()
L = re.split("&",Req)
M = {}
for i in L:
A = re.split("=",i)
M[A[0]] = urllib.unquote(A[1])
New = {}
New["Name"] = M["textfield"]
New["Content"] = M["textarea"]
makeweb.Stor_in(New)
redirect('/board', 302)
@route('/static/<filename>')
def server_static(filename):
return static_file(filename, root=Root)
run(host='localhost',port=8080)
|
kdwink/intellij-community
|
refs/heads/master
|
python/testData/formatter/alignListComprehensionInDict_after.py
|
79
|
def foo():
return {field.key: field for key, field in inspect.getmembers(instance)
if isinstance(field, QueryableAttribute)
and isinstance(field.property, ColumnProperty)
or field.foreign_keys}
|
palaxi00/palaxi00.github.io
|
refs/heads/gh-pages
|
Codeeval/calculate_distance.py
|
1
|
import sys
import re
with open(sys.argv[1], 'r') as test_cases:
for test in test_cases:
stri = str(test.strip())
points = re.findall("[-+]?\d+[\.]?\d*", stri)
x1 = int(points[0])
y1 = int(points[1])
x2 = int(points[2])
y2 = int(points[3])
d = (((x2-x1)**2)+(y2-y1)**2)**(0.5)
print (int(d))
|
michel-slm/0install
|
refs/heads/master
|
tests/testgpg.py
|
9
|
#!/usr/bin/env python
from basetest import BaseTest
import sys, tempfile
import unittest
import warnings
sys.path.insert(0, '..')
from zeroinstall.injector import gpg, model, trust
err_sig = b"""<?xml version="1.0" ?>
<?xml-stylesheet type='text/xsl' href='interface.xsl'?>
<interface xmlns="http://zero-install.sourceforge.net/2004/injector/interface">
<name>test</name>
<summary>test</summary>
</interface>
<!-- Base64 Signature
iJwEAAECAAYFAk1NVyAACgkQerial32qo5eVCgP/RYEzT43M2Dj3winnkX2HQDO2Fx5dq83pmidd
LDEID3FxbuIpMUP/2rvPmNM3itRo/J4R2xkM65TEol/55uxDC1bbuarKf3wbgwEF60srFEDeeiYM
FmTQtWYPtrzAGtNRTgKfD75xk9lcM2GHmKNlgSQ7G8ZsfL6KaraF4Wa6nqU=
-->
"""
bad_xml_main = b"""<?xml version='1.0'?>
<root/>"""
invalid_xmls_sigs = [
('last line is not end-of-comment',
b"""<!-- Base64 Signature
"""),
('No signature block in XML',
b"""<!-- Base64 Sig
iD8DBQBDtpK9rgeCgFmlPMERAg0gAKCaJhXFnk
-->
"""),
('extra data on comment line',
b"""<!-- Base64 Signature data
iD8DBQBDtpK9rgeCgFmlPMERAg0gAKCaJhXFnk
-->
"""),
('last line is not end-of-comment',
b"""<!-- Base64 Signature
iD8DBQBDtpK9rgeCgFmlPMERAg0gAKCaJhXFnk
WZRBLT0an56WYaBODukSsf4=
--> More
"""),
('Invalid base 64 encoded signature:',
b"""<!-- Base64 Signature
iD8DBQBDtpK9rgeCgFmlPMERAg0gAKCaJhXFnk
WZRBLT0an56WYaBODukSsf4=
=zMc+
-->
"""),
('Invalid characters found',
b"""<!-- Base64 Signature
iD8DBQBDtpK9rge<CgFmlPMERAg0gAKCaJhXFnk
WZRBLT0an56WYaBODukSsf4=
-->
""")]
good_xml_sig = b"""<?xml version='1.0'?>
<root/>
<!-- Base64 Signature
iD8DBQBDuChIrgeCgFmlPMERAnGEAJ0ZS1PeyWonx6xS/mgpYTKNgSXa5QCeMSYPHhNcvxu3f84y
Uk7hxHFeQPo=
-->
"""
bad_xml_sig = b"""<?xml version='1.0'?>
<ro0t/>
<!-- Base64 Signature
iD8DBQBDuChIrgeCgFmlPMERAnGEAJ0ZS1PeyWonx6xS/mgpYTKNgSXa5QCeMSYPHhNcvxu3f84y
Uk7hxHFeQPo=
-->
"""
from data import thomas_key
THOMAS_FINGERPRINT = '92429807C9853C0744A68B9AAE07828059A53CC1'
class TestGPG(BaseTest):
def setUp(self):
BaseTest.setUp(self)
with tempfile.TemporaryFile(mode = 'w+b') as stream:
stream.write(thomas_key)
stream.seek(0)
gpg.import_key(stream)
trust.trust_db.trust_key(THOMAS_FINGERPRINT)
warnings.filterwarnings("ignore", category = DeprecationWarning)
def testImportBad(self):
with tempfile.TemporaryFile(mode = 'w+b') as stream:
stream.write(b"Bad key")
stream.seek(0)
try:
gpg.import_key(stream)
assert False
except model.SafeException:
pass # OK
def testErrSig(self):
with tempfile.TemporaryFile(mode = 'w+b') as stream:
stream.write(err_sig)
stream.seek(0)
data, sigs = gpg.check_stream(stream)
self.assertEqual(err_sig, data.read())
assert len(sigs) == 1
assert isinstance(sigs[0], gpg.ErrSig)
assert sigs[0].need_key() == "7AB89A977DAAA397"
self.assertEqual("1", sigs[0].status[gpg.ErrSig.ALG])
assert sigs[0].is_trusted() is False
assert str(sigs[0]).startswith('ERROR')
def testBadXMLSig(self):
self.assertEqual(bad_xml_sig, self.check_bad(bad_xml_sig))
def testInvalidXMLSig(self):
for error, sig in invalid_xmls_sigs:
try:
self.check_bad(bad_xml_main + b'\n' + sig)
except model.SafeException as ex:
if error not in str(ex):
raise model.SafeException(str(ex) + '\nSig:\n' + sig)
def check_bad(self, sig):
with tempfile.TemporaryFile(mode = 'w+b') as stream:
stream.write(sig)
stream.seek(0)
data, sigs = gpg.check_stream(stream)
assert len(sigs) == 1
assert isinstance(sigs[0], gpg.BadSig)
self.assertEqual("AE07828059A53CC1",
sigs[0].status[gpg.BadSig.KEYID])
assert sigs[0].is_trusted() is False
assert sigs[0].need_key() is None
assert str(sigs[0]).startswith('BAD')
return data.read()
def testGoodXMLSig(self):
self.assertEqual(good_xml_sig, self.check_good(good_xml_sig))
def check_good(self, sig):
with tempfile.TemporaryFile(mode = 'w+b') as stream:
stream.write(sig)
stream.seek(0)
data, sigs = gpg.check_stream(stream)
assert len(sigs) == 1
assert isinstance(sigs[0], gpg.ValidSig)
self.assertEqual("92429807C9853C0744A68B9AAE07828059A53CC1",
sigs[0].fingerprint)
assert sigs[0].is_trusted() is True
assert sigs[0].need_key() is None
assert str(sigs[0]).startswith('Valid')
for item in sigs[0].get_details():
if item[0] == 'uid' and len(item) > 9:
assert item[9] in ["Thomas Leonard <tal197@users.sourceforge.net>"], str(item)
break
else:
self.fail("Missing name")
return data.read()
def testNoSig(self):
with tempfile.TemporaryFile(mode = 'w+b') as stream:
stream.write(b"Hello")
stream.seek(0)
try:
gpg.check_stream(stream)
assert False
except model.SafeException:
pass # OK
def testLoadKeys(self):
self.assertEqual({}, gpg.load_keys([]))
keys = gpg.load_keys([THOMAS_FINGERPRINT])
self.assertEqual(1, len(keys))
key = keys[THOMAS_FINGERPRINT]
self.assertEqual(THOMAS_FINGERPRINT, key.fingerprint)
self.assertEqual('Thomas Leonard <tal197@users.sourceforge.net>',
key.name)
if __name__ == '__main__':
unittest.main()
|
videetssinghai/Blog-Rest-Api
|
refs/heads/master
|
lib/python2.7/site-packages/pip/_vendor/html5lib/_tokenizer.py
|
385
|
from __future__ import absolute_import, division, unicode_literals
from pip._vendor.six import unichr as chr
from collections import deque
from .constants import spaceCharacters
from .constants import entities
from .constants import asciiLetters, asciiUpper2Lower
from .constants import digits, hexDigits, EOF
from .constants import tokenTypes, tagTokenTypes
from .constants import replacementCharacters
from ._inputstream import HTMLInputStream
from ._trie import Trie
entitiesTrie = Trie(entities)
class HTMLTokenizer(object):
""" This class takes care of tokenizing HTML.
* self.currentToken
Holds the token that is currently being processed.
* self.state
Holds a reference to the method to be invoked... XXX
* self.stream
Points to HTMLInputStream object.
"""
def __init__(self, stream, parser=None, **kwargs):
self.stream = HTMLInputStream(stream, **kwargs)
self.parser = parser
# Setup the initial tokenizer state
self.escapeFlag = False
self.lastFourChars = []
self.state = self.dataState
self.escape = False
# The current token being created
self.currentToken = None
super(HTMLTokenizer, self).__init__()
def __iter__(self):
""" This is where the magic happens.
We do our usually processing through the states and when we have a token
to return we yield the token which pauses processing until the next token
is requested.
"""
self.tokenQueue = deque([])
# Start processing. When EOF is reached self.state will return False
# instead of True and the loop will terminate.
while self.state():
while self.stream.errors:
yield {"type": tokenTypes["ParseError"], "data": self.stream.errors.pop(0)}
while self.tokenQueue:
yield self.tokenQueue.popleft()
def consumeNumberEntity(self, isHex):
"""This function returns either U+FFFD or the character based on the
decimal or hexadecimal representation. It also discards ";" if present.
If not present self.tokenQueue.append({"type": tokenTypes["ParseError"]}) is invoked.
"""
allowed = digits
radix = 10
if isHex:
allowed = hexDigits
radix = 16
charStack = []
# Consume all the characters that are in range while making sure we
# don't hit an EOF.
c = self.stream.char()
while c in allowed and c is not EOF:
charStack.append(c)
c = self.stream.char()
# Convert the set of characters consumed to an int.
charAsInt = int("".join(charStack), radix)
# Certain characters get replaced with others
if charAsInt in replacementCharacters:
char = replacementCharacters[charAsInt]
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"illegal-codepoint-for-numeric-entity",
"datavars": {"charAsInt": charAsInt}})
elif ((0xD800 <= charAsInt <= 0xDFFF) or
(charAsInt > 0x10FFFF)):
char = "\uFFFD"
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"illegal-codepoint-for-numeric-entity",
"datavars": {"charAsInt": charAsInt}})
else:
# Should speed up this check somehow (e.g. move the set to a constant)
if ((0x0001 <= charAsInt <= 0x0008) or
(0x000E <= charAsInt <= 0x001F) or
(0x007F <= charAsInt <= 0x009F) or
(0xFDD0 <= charAsInt <= 0xFDEF) or
charAsInt in frozenset([0x000B, 0xFFFE, 0xFFFF, 0x1FFFE,
0x1FFFF, 0x2FFFE, 0x2FFFF, 0x3FFFE,
0x3FFFF, 0x4FFFE, 0x4FFFF, 0x5FFFE,
0x5FFFF, 0x6FFFE, 0x6FFFF, 0x7FFFE,
0x7FFFF, 0x8FFFE, 0x8FFFF, 0x9FFFE,
0x9FFFF, 0xAFFFE, 0xAFFFF, 0xBFFFE,
0xBFFFF, 0xCFFFE, 0xCFFFF, 0xDFFFE,
0xDFFFF, 0xEFFFE, 0xEFFFF, 0xFFFFE,
0xFFFFF, 0x10FFFE, 0x10FFFF])):
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data":
"illegal-codepoint-for-numeric-entity",
"datavars": {"charAsInt": charAsInt}})
try:
# Try/except needed as UCS-2 Python builds' unichar only works
# within the BMP.
char = chr(charAsInt)
except ValueError:
v = charAsInt - 0x10000
char = chr(0xD800 | (v >> 10)) + chr(0xDC00 | (v & 0x3FF))
# Discard the ; if present. Otherwise, put it back on the queue and
# invoke parseError on parser.
if c != ";":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"numeric-entity-without-semicolon"})
self.stream.unget(c)
return char
def consumeEntity(self, allowedChar=None, fromAttribute=False):
# Initialise to the default output for when no entity is matched
output = "&"
charStack = [self.stream.char()]
if (charStack[0] in spaceCharacters or charStack[0] in (EOF, "<", "&") or
(allowedChar is not None and allowedChar == charStack[0])):
self.stream.unget(charStack[0])
elif charStack[0] == "#":
# Read the next character to see if it's hex or decimal
hex = False
charStack.append(self.stream.char())
if charStack[-1] in ("x", "X"):
hex = True
charStack.append(self.stream.char())
# charStack[-1] should be the first digit
if (hex and charStack[-1] in hexDigits) \
or (not hex and charStack[-1] in digits):
# At least one digit found, so consume the whole number
self.stream.unget(charStack[-1])
output = self.consumeNumberEntity(hex)
else:
# No digits found
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "expected-numeric-entity"})
self.stream.unget(charStack.pop())
output = "&" + "".join(charStack)
else:
# At this point in the process might have named entity. Entities
# are stored in the global variable "entities".
#
# Consume characters and compare to these to a substring of the
# entity names in the list until the substring no longer matches.
while (charStack[-1] is not EOF):
if not entitiesTrie.has_keys_with_prefix("".join(charStack)):
break
charStack.append(self.stream.char())
# At this point we have a string that starts with some characters
# that may match an entity
# Try to find the longest entity the string will match to take care
# of ¬i for instance.
try:
entityName = entitiesTrie.longest_prefix("".join(charStack[:-1]))
entityLength = len(entityName)
except KeyError:
entityName = None
if entityName is not None:
if entityName[-1] != ";":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"named-entity-without-semicolon"})
if (entityName[-1] != ";" and fromAttribute and
(charStack[entityLength] in asciiLetters or
charStack[entityLength] in digits or
charStack[entityLength] == "=")):
self.stream.unget(charStack.pop())
output = "&" + "".join(charStack)
else:
output = entities[entityName]
self.stream.unget(charStack.pop())
output += "".join(charStack[entityLength:])
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-named-entity"})
self.stream.unget(charStack.pop())
output = "&" + "".join(charStack)
if fromAttribute:
self.currentToken["data"][-1][1] += output
else:
if output in spaceCharacters:
tokenType = "SpaceCharacters"
else:
tokenType = "Characters"
self.tokenQueue.append({"type": tokenTypes[tokenType], "data": output})
def processEntityInAttribute(self, allowedChar):
"""This method replaces the need for "entityInAttributeValueState".
"""
self.consumeEntity(allowedChar=allowedChar, fromAttribute=True)
def emitCurrentToken(self):
"""This method is a generic handler for emitting the tags. It also sets
the state to "data" because that's what's needed after a token has been
emitted.
"""
token = self.currentToken
# Add token to the queue to be yielded
if (token["type"] in tagTokenTypes):
token["name"] = token["name"].translate(asciiUpper2Lower)
if token["type"] == tokenTypes["EndTag"]:
if token["data"]:
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "attributes-in-end-tag"})
if token["selfClosing"]:
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "self-closing-flag-on-end-tag"})
self.tokenQueue.append(token)
self.state = self.dataState
# Below are the various tokenizer states worked out.
def dataState(self):
data = self.stream.char()
if data == "&":
self.state = self.entityDataState
elif data == "<":
self.state = self.tagOpenState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\u0000"})
elif data is EOF:
# Tokenization ends.
return False
elif data in spaceCharacters:
# Directly after emitting a token you switch back to the "data
# state". At that point spaceCharacters are important so they are
# emitted separately.
self.tokenQueue.append({"type": tokenTypes["SpaceCharacters"], "data":
data + self.stream.charsUntil(spaceCharacters, True)})
# No need to update lastFourChars here, since the first space will
# have already been appended to lastFourChars and will have broken
# any <!-- or --> sequences
else:
chars = self.stream.charsUntil(("&", "<", "\u0000"))
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + chars})
return True
def entityDataState(self):
self.consumeEntity()
self.state = self.dataState
return True
def rcdataState(self):
data = self.stream.char()
if data == "&":
self.state = self.characterReferenceInRcdata
elif data == "<":
self.state = self.rcdataLessThanSignState
elif data == EOF:
# Tokenization ends.
return False
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
elif data in spaceCharacters:
# Directly after emitting a token you switch back to the "data
# state". At that point spaceCharacters are important so they are
# emitted separately.
self.tokenQueue.append({"type": tokenTypes["SpaceCharacters"], "data":
data + self.stream.charsUntil(spaceCharacters, True)})
# No need to update lastFourChars here, since the first space will
# have already been appended to lastFourChars and will have broken
# any <!-- or --> sequences
else:
chars = self.stream.charsUntil(("&", "<", "\u0000"))
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + chars})
return True
def characterReferenceInRcdata(self):
self.consumeEntity()
self.state = self.rcdataState
return True
def rawtextState(self):
data = self.stream.char()
if data == "<":
self.state = self.rawtextLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
elif data == EOF:
# Tokenization ends.
return False
else:
chars = self.stream.charsUntil(("<", "\u0000"))
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + chars})
return True
def scriptDataState(self):
data = self.stream.char()
if data == "<":
self.state = self.scriptDataLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
elif data == EOF:
# Tokenization ends.
return False
else:
chars = self.stream.charsUntil(("<", "\u0000"))
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + chars})
return True
def plaintextState(self):
data = self.stream.char()
if data == EOF:
# Tokenization ends.
return False
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + self.stream.charsUntil("\u0000")})
return True
def tagOpenState(self):
data = self.stream.char()
if data == "!":
self.state = self.markupDeclarationOpenState
elif data == "/":
self.state = self.closeTagOpenState
elif data in asciiLetters:
self.currentToken = {"type": tokenTypes["StartTag"],
"name": data, "data": [],
"selfClosing": False,
"selfClosingAcknowledged": False}
self.state = self.tagNameState
elif data == ">":
# XXX In theory it could be something besides a tag name. But
# do we really care?
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-tag-name-but-got-right-bracket"})
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<>"})
self.state = self.dataState
elif data == "?":
# XXX In theory it could be something besides a tag name. But
# do we really care?
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-tag-name-but-got-question-mark"})
self.stream.unget(data)
self.state = self.bogusCommentState
else:
# XXX
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-tag-name"})
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.stream.unget(data)
self.state = self.dataState
return True
def closeTagOpenState(self):
data = self.stream.char()
if data in asciiLetters:
self.currentToken = {"type": tokenTypes["EndTag"], "name": data,
"data": [], "selfClosing": False}
self.state = self.tagNameState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-closing-tag-but-got-right-bracket"})
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-closing-tag-but-got-eof"})
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
self.state = self.dataState
else:
# XXX data can be _'_...
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-closing-tag-but-got-char",
"datavars": {"data": data}})
self.stream.unget(data)
self.state = self.bogusCommentState
return True
def tagNameState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeAttributeNameState
elif data == ">":
self.emitCurrentToken()
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-tag-name"})
self.state = self.dataState
elif data == "/":
self.state = self.selfClosingStartTagState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["name"] += "\uFFFD"
else:
self.currentToken["name"] += data
# (Don't use charsUntil here, because tag names are
# very short and it's faster to not do anything fancy)
return True
def rcdataLessThanSignState(self):
data = self.stream.char()
if data == "/":
self.temporaryBuffer = ""
self.state = self.rcdataEndTagOpenState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.stream.unget(data)
self.state = self.rcdataState
return True
def rcdataEndTagOpenState(self):
data = self.stream.char()
if data in asciiLetters:
self.temporaryBuffer += data
self.state = self.rcdataEndTagNameState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
self.stream.unget(data)
self.state = self.rcdataState
return True
def rcdataEndTagNameState(self):
appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower()
data = self.stream.char()
if data in spaceCharacters and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.beforeAttributeNameState
elif data == "/" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.selfClosingStartTagState
elif data == ">" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.emitCurrentToken()
self.state = self.dataState
elif data in asciiLetters:
self.temporaryBuffer += data
else:
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "</" + self.temporaryBuffer})
self.stream.unget(data)
self.state = self.rcdataState
return True
def rawtextLessThanSignState(self):
data = self.stream.char()
if data == "/":
self.temporaryBuffer = ""
self.state = self.rawtextEndTagOpenState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.stream.unget(data)
self.state = self.rawtextState
return True
def rawtextEndTagOpenState(self):
data = self.stream.char()
if data in asciiLetters:
self.temporaryBuffer += data
self.state = self.rawtextEndTagNameState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
self.stream.unget(data)
self.state = self.rawtextState
return True
def rawtextEndTagNameState(self):
appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower()
data = self.stream.char()
if data in spaceCharacters and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.beforeAttributeNameState
elif data == "/" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.selfClosingStartTagState
elif data == ">" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.emitCurrentToken()
self.state = self.dataState
elif data in asciiLetters:
self.temporaryBuffer += data
else:
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "</" + self.temporaryBuffer})
self.stream.unget(data)
self.state = self.rawtextState
return True
def scriptDataLessThanSignState(self):
data = self.stream.char()
if data == "/":
self.temporaryBuffer = ""
self.state = self.scriptDataEndTagOpenState
elif data == "!":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<!"})
self.state = self.scriptDataEscapeStartState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEndTagOpenState(self):
data = self.stream.char()
if data in asciiLetters:
self.temporaryBuffer += data
self.state = self.scriptDataEndTagNameState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEndTagNameState(self):
appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower()
data = self.stream.char()
if data in spaceCharacters and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.beforeAttributeNameState
elif data == "/" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.selfClosingStartTagState
elif data == ">" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.emitCurrentToken()
self.state = self.dataState
elif data in asciiLetters:
self.temporaryBuffer += data
else:
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "</" + self.temporaryBuffer})
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEscapeStartState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataEscapeStartDashState
else:
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEscapeStartDashState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataEscapedDashDashState
else:
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEscapedState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataEscapedDashState
elif data == "<":
self.state = self.scriptDataEscapedLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
elif data == EOF:
self.state = self.dataState
else:
chars = self.stream.charsUntil(("<", "-", "\u0000"))
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + chars})
return True
def scriptDataEscapedDashState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataEscapedDashDashState
elif data == "<":
self.state = self.scriptDataEscapedLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
self.state = self.scriptDataEscapedState
elif data == EOF:
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.state = self.scriptDataEscapedState
return True
def scriptDataEscapedDashDashState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
elif data == "<":
self.state = self.scriptDataEscapedLessThanSignState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": ">"})
self.state = self.scriptDataState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
self.state = self.scriptDataEscapedState
elif data == EOF:
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.state = self.scriptDataEscapedState
return True
def scriptDataEscapedLessThanSignState(self):
data = self.stream.char()
if data == "/":
self.temporaryBuffer = ""
self.state = self.scriptDataEscapedEndTagOpenState
elif data in asciiLetters:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<" + data})
self.temporaryBuffer = data
self.state = self.scriptDataDoubleEscapeStartState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.stream.unget(data)
self.state = self.scriptDataEscapedState
return True
def scriptDataEscapedEndTagOpenState(self):
data = self.stream.char()
if data in asciiLetters:
self.temporaryBuffer = data
self.state = self.scriptDataEscapedEndTagNameState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
self.stream.unget(data)
self.state = self.scriptDataEscapedState
return True
def scriptDataEscapedEndTagNameState(self):
appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower()
data = self.stream.char()
if data in spaceCharacters and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.beforeAttributeNameState
elif data == "/" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.selfClosingStartTagState
elif data == ">" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.emitCurrentToken()
self.state = self.dataState
elif data in asciiLetters:
self.temporaryBuffer += data
else:
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "</" + self.temporaryBuffer})
self.stream.unget(data)
self.state = self.scriptDataEscapedState
return True
def scriptDataDoubleEscapeStartState(self):
data = self.stream.char()
if data in (spaceCharacters | frozenset(("/", ">"))):
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
if self.temporaryBuffer.lower() == "script":
self.state = self.scriptDataDoubleEscapedState
else:
self.state = self.scriptDataEscapedState
elif data in asciiLetters:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.temporaryBuffer += data
else:
self.stream.unget(data)
self.state = self.scriptDataEscapedState
return True
def scriptDataDoubleEscapedState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataDoubleEscapedDashState
elif data == "<":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.state = self.scriptDataDoubleEscapedLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
elif data == EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-script-in-script"})
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
return True
def scriptDataDoubleEscapedDashState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataDoubleEscapedDashDashState
elif data == "<":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.state = self.scriptDataDoubleEscapedLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
self.state = self.scriptDataDoubleEscapedState
elif data == EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-script-in-script"})
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.state = self.scriptDataDoubleEscapedState
return True
def scriptDataDoubleEscapedDashDashState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
elif data == "<":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.state = self.scriptDataDoubleEscapedLessThanSignState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": ">"})
self.state = self.scriptDataState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
self.state = self.scriptDataDoubleEscapedState
elif data == EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-script-in-script"})
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.state = self.scriptDataDoubleEscapedState
return True
def scriptDataDoubleEscapedLessThanSignState(self):
data = self.stream.char()
if data == "/":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "/"})
self.temporaryBuffer = ""
self.state = self.scriptDataDoubleEscapeEndState
else:
self.stream.unget(data)
self.state = self.scriptDataDoubleEscapedState
return True
def scriptDataDoubleEscapeEndState(self):
data = self.stream.char()
if data in (spaceCharacters | frozenset(("/", ">"))):
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
if self.temporaryBuffer.lower() == "script":
self.state = self.scriptDataEscapedState
else:
self.state = self.scriptDataDoubleEscapedState
elif data in asciiLetters:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.temporaryBuffer += data
else:
self.stream.unget(data)
self.state = self.scriptDataDoubleEscapedState
return True
def beforeAttributeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
self.stream.charsUntil(spaceCharacters, True)
elif data in asciiLetters:
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
elif data == ">":
self.emitCurrentToken()
elif data == "/":
self.state = self.selfClosingStartTagState
elif data in ("'", '"', "=", "<"):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"invalid-character-in-attribute-name"})
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"].append(["\uFFFD", ""])
self.state = self.attributeNameState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-attribute-name-but-got-eof"})
self.state = self.dataState
else:
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
return True
def attributeNameState(self):
data = self.stream.char()
leavingThisState = True
emitToken = False
if data == "=":
self.state = self.beforeAttributeValueState
elif data in asciiLetters:
self.currentToken["data"][-1][0] += data +\
self.stream.charsUntil(asciiLetters, True)
leavingThisState = False
elif data == ">":
# XXX If we emit here the attributes are converted to a dict
# without being checked and when the code below runs we error
# because data is a dict not a list
emitToken = True
elif data in spaceCharacters:
self.state = self.afterAttributeNameState
elif data == "/":
self.state = self.selfClosingStartTagState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"][-1][0] += "\uFFFD"
leavingThisState = False
elif data in ("'", '"', "<"):
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data":
"invalid-character-in-attribute-name"})
self.currentToken["data"][-1][0] += data
leavingThisState = False
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "eof-in-attribute-name"})
self.state = self.dataState
else:
self.currentToken["data"][-1][0] += data
leavingThisState = False
if leavingThisState:
# Attributes are not dropped at this stage. That happens when the
# start tag token is emitted so values can still be safely appended
# to attributes, but we do want to report the parse error in time.
self.currentToken["data"][-1][0] = (
self.currentToken["data"][-1][0].translate(asciiUpper2Lower))
for name, _ in self.currentToken["data"][:-1]:
if self.currentToken["data"][-1][0] == name:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"duplicate-attribute"})
break
# XXX Fix for above XXX
if emitToken:
self.emitCurrentToken()
return True
def afterAttributeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
self.stream.charsUntil(spaceCharacters, True)
elif data == "=":
self.state = self.beforeAttributeValueState
elif data == ">":
self.emitCurrentToken()
elif data in asciiLetters:
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
elif data == "/":
self.state = self.selfClosingStartTagState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"].append(["\uFFFD", ""])
self.state = self.attributeNameState
elif data in ("'", '"', "<"):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"invalid-character-after-attribute-name"})
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-end-of-tag-but-got-eof"})
self.state = self.dataState
else:
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
return True
def beforeAttributeValueState(self):
data = self.stream.char()
if data in spaceCharacters:
self.stream.charsUntil(spaceCharacters, True)
elif data == "\"":
self.state = self.attributeValueDoubleQuotedState
elif data == "&":
self.state = self.attributeValueUnQuotedState
self.stream.unget(data)
elif data == "'":
self.state = self.attributeValueSingleQuotedState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-attribute-value-but-got-right-bracket"})
self.emitCurrentToken()
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"][-1][1] += "\uFFFD"
self.state = self.attributeValueUnQuotedState
elif data in ("=", "<", "`"):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"equals-in-unquoted-attribute-value"})
self.currentToken["data"][-1][1] += data
self.state = self.attributeValueUnQuotedState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-attribute-value-but-got-eof"})
self.state = self.dataState
else:
self.currentToken["data"][-1][1] += data
self.state = self.attributeValueUnQuotedState
return True
def attributeValueDoubleQuotedState(self):
data = self.stream.char()
if data == "\"":
self.state = self.afterAttributeValueState
elif data == "&":
self.processEntityInAttribute('"')
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"][-1][1] += "\uFFFD"
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-attribute-value-double-quote"})
self.state = self.dataState
else:
self.currentToken["data"][-1][1] += data +\
self.stream.charsUntil(("\"", "&", "\u0000"))
return True
def attributeValueSingleQuotedState(self):
data = self.stream.char()
if data == "'":
self.state = self.afterAttributeValueState
elif data == "&":
self.processEntityInAttribute("'")
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"][-1][1] += "\uFFFD"
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-attribute-value-single-quote"})
self.state = self.dataState
else:
self.currentToken["data"][-1][1] += data +\
self.stream.charsUntil(("'", "&", "\u0000"))
return True
def attributeValueUnQuotedState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeAttributeNameState
elif data == "&":
self.processEntityInAttribute(">")
elif data == ">":
self.emitCurrentToken()
elif data in ('"', "'", "=", "<", "`"):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-character-in-unquoted-attribute-value"})
self.currentToken["data"][-1][1] += data
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"][-1][1] += "\uFFFD"
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-attribute-value-no-quotes"})
self.state = self.dataState
else:
self.currentToken["data"][-1][1] += data + self.stream.charsUntil(
frozenset(("&", ">", '"', "'", "=", "<", "`", "\u0000")) | spaceCharacters)
return True
def afterAttributeValueState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeAttributeNameState
elif data == ">":
self.emitCurrentToken()
elif data == "/":
self.state = self.selfClosingStartTagState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-EOF-after-attribute-value"})
self.stream.unget(data)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-character-after-attribute-value"})
self.stream.unget(data)
self.state = self.beforeAttributeNameState
return True
def selfClosingStartTagState(self):
data = self.stream.char()
if data == ">":
self.currentToken["selfClosing"] = True
self.emitCurrentToken()
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data":
"unexpected-EOF-after-solidus-in-tag"})
self.stream.unget(data)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-character-after-solidus-in-tag"})
self.stream.unget(data)
self.state = self.beforeAttributeNameState
return True
def bogusCommentState(self):
# Make a new comment token and give it as value all the characters
# until the first > or EOF (charsUntil checks for EOF automatically)
# and emit it.
data = self.stream.charsUntil(">")
data = data.replace("\u0000", "\uFFFD")
self.tokenQueue.append(
{"type": tokenTypes["Comment"], "data": data})
# Eat the character directly after the bogus comment which is either a
# ">" or an EOF.
self.stream.char()
self.state = self.dataState
return True
def markupDeclarationOpenState(self):
charStack = [self.stream.char()]
if charStack[-1] == "-":
charStack.append(self.stream.char())
if charStack[-1] == "-":
self.currentToken = {"type": tokenTypes["Comment"], "data": ""}
self.state = self.commentStartState
return True
elif charStack[-1] in ('d', 'D'):
matched = True
for expected in (('o', 'O'), ('c', 'C'), ('t', 'T'),
('y', 'Y'), ('p', 'P'), ('e', 'E')):
charStack.append(self.stream.char())
if charStack[-1] not in expected:
matched = False
break
if matched:
self.currentToken = {"type": tokenTypes["Doctype"],
"name": "",
"publicId": None, "systemId": None,
"correct": True}
self.state = self.doctypeState
return True
elif (charStack[-1] == "[" and
self.parser is not None and
self.parser.tree.openElements and
self.parser.tree.openElements[-1].namespace != self.parser.tree.defaultNamespace):
matched = True
for expected in ["C", "D", "A", "T", "A", "["]:
charStack.append(self.stream.char())
if charStack[-1] != expected:
matched = False
break
if matched:
self.state = self.cdataSectionState
return True
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-dashes-or-doctype"})
while charStack:
self.stream.unget(charStack.pop())
self.state = self.bogusCommentState
return True
def commentStartState(self):
data = self.stream.char()
if data == "-":
self.state = self.commentStartDashState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"incorrect-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += data
self.state = self.commentState
return True
def commentStartDashState(self):
data = self.stream.char()
if data == "-":
self.state = self.commentEndState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "-\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"incorrect-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += "-" + data
self.state = self.commentState
return True
def commentState(self):
data = self.stream.char()
if data == "-":
self.state = self.commentEndDashState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "\uFFFD"
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "eof-in-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += data + \
self.stream.charsUntil(("-", "\u0000"))
return True
def commentEndDashState(self):
data = self.stream.char()
if data == "-":
self.state = self.commentEndState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "-\uFFFD"
self.state = self.commentState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment-end-dash"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += "-" + data
self.state = self.commentState
return True
def commentEndState(self):
data = self.stream.char()
if data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "--\uFFFD"
self.state = self.commentState
elif data == "!":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-bang-after-double-dash-in-comment"})
self.state = self.commentEndBangState
elif data == "-":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-dash-after-double-dash-in-comment"})
self.currentToken["data"] += data
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment-double-dash"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
# XXX
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-comment"})
self.currentToken["data"] += "--" + data
self.state = self.commentState
return True
def commentEndBangState(self):
data = self.stream.char()
if data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == "-":
self.currentToken["data"] += "--!"
self.state = self.commentEndDashState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "--!\uFFFD"
self.state = self.commentState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment-end-bang-state"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += "--!" + data
self.state = self.commentState
return True
def doctypeState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeDoctypeNameState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-doctype-name-but-got-eof"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"need-space-after-doctype"})
self.stream.unget(data)
self.state = self.beforeDoctypeNameState
return True
def beforeDoctypeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-doctype-name-but-got-right-bracket"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["name"] = "\uFFFD"
self.state = self.doctypeNameState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-doctype-name-but-got-eof"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["name"] = data
self.state = self.doctypeNameState
return True
def doctypeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower)
self.state = self.afterDoctypeNameState
elif data == ">":
self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower)
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["name"] += "\uFFFD"
self.state = self.doctypeNameState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype-name"})
self.currentToken["correct"] = False
self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower)
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["name"] += data
return True
def afterDoctypeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.currentToken["correct"] = False
self.stream.unget(data)
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
if data in ("p", "P"):
matched = True
for expected in (("u", "U"), ("b", "B"), ("l", "L"),
("i", "I"), ("c", "C")):
data = self.stream.char()
if data not in expected:
matched = False
break
if matched:
self.state = self.afterDoctypePublicKeywordState
return True
elif data in ("s", "S"):
matched = True
for expected in (("y", "Y"), ("s", "S"), ("t", "T"),
("e", "E"), ("m", "M")):
data = self.stream.char()
if data not in expected:
matched = False
break
if matched:
self.state = self.afterDoctypeSystemKeywordState
return True
# All the characters read before the current 'data' will be
# [a-zA-Z], so they're garbage in the bogus doctype and can be
# discarded; only the latest character might be '>' or EOF
# and needs to be ungetted
self.stream.unget(data)
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-space-or-right-bracket-in-doctype", "datavars":
{"data": data}})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def afterDoctypePublicKeywordState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeDoctypePublicIdentifierState
elif data in ("'", '"'):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.stream.unget(data)
self.state = self.beforeDoctypePublicIdentifierState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.stream.unget(data)
self.state = self.beforeDoctypePublicIdentifierState
return True
def beforeDoctypePublicIdentifierState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == "\"":
self.currentToken["publicId"] = ""
self.state = self.doctypePublicIdentifierDoubleQuotedState
elif data == "'":
self.currentToken["publicId"] = ""
self.state = self.doctypePublicIdentifierSingleQuotedState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def doctypePublicIdentifierDoubleQuotedState(self):
data = self.stream.char()
if data == "\"":
self.state = self.afterDoctypePublicIdentifierState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["publicId"] += "\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["publicId"] += data
return True
def doctypePublicIdentifierSingleQuotedState(self):
data = self.stream.char()
if data == "'":
self.state = self.afterDoctypePublicIdentifierState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["publicId"] += "\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["publicId"] += data
return True
def afterDoctypePublicIdentifierState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.betweenDoctypePublicAndSystemIdentifiersState
elif data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == '"':
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierDoubleQuotedState
elif data == "'":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierSingleQuotedState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def betweenDoctypePublicAndSystemIdentifiersState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == '"':
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierDoubleQuotedState
elif data == "'":
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierSingleQuotedState
elif data == EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def afterDoctypeSystemKeywordState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeDoctypeSystemIdentifierState
elif data in ("'", '"'):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.stream.unget(data)
self.state = self.beforeDoctypeSystemIdentifierState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.stream.unget(data)
self.state = self.beforeDoctypeSystemIdentifierState
return True
def beforeDoctypeSystemIdentifierState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == "\"":
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierDoubleQuotedState
elif data == "'":
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierSingleQuotedState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def doctypeSystemIdentifierDoubleQuotedState(self):
data = self.stream.char()
if data == "\"":
self.state = self.afterDoctypeSystemIdentifierState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["systemId"] += "\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["systemId"] += data
return True
def doctypeSystemIdentifierSingleQuotedState(self):
data = self.stream.char()
if data == "'":
self.state = self.afterDoctypeSystemIdentifierState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["systemId"] += "\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["systemId"] += data
return True
def afterDoctypeSystemIdentifierState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.state = self.bogusDoctypeState
return True
def bogusDoctypeState(self):
data = self.stream.char()
if data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
# XXX EMIT
self.stream.unget(data)
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
pass
return True
def cdataSectionState(self):
data = []
while True:
data.append(self.stream.charsUntil("]"))
data.append(self.stream.charsUntil(">"))
char = self.stream.char()
if char == EOF:
break
else:
assert char == ">"
if data[-1][-2:] == "]]":
data[-1] = data[-1][:-2]
break
else:
data.append(char)
data = "".join(data) # pylint:disable=redefined-variable-type
# Deal with null here rather than in the parser
nullCount = data.count("\u0000")
if nullCount > 0:
for _ in range(nullCount):
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
data = data.replace("\u0000", "\uFFFD")
if data:
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": data})
self.state = self.dataState
return True
|
nycholas/ask-undrgz
|
refs/heads/master
|
src/ask-undrgz/django/db/backends/sqlite3/base.py
|
11
|
"""
SQLite3 backend for django.
Python 2.4 requires pysqlite2 (http://pysqlite.org/).
Python 2.5 and later can use a pysqlite2 module or the sqlite3 module in the
standard library.
"""
import re
import sys
from django.db import utils
from django.db.backends import *
from django.db.backends.signals import connection_created
from django.db.backends.sqlite3.client import DatabaseClient
from django.db.backends.sqlite3.creation import DatabaseCreation
from django.db.backends.sqlite3.introspection import DatabaseIntrospection
from django.utils.safestring import SafeString
try:
try:
from pysqlite2 import dbapi2 as Database
except ImportError, e1:
from sqlite3 import dbapi2 as Database
except ImportError, exc:
import sys
from django.core.exceptions import ImproperlyConfigured
if sys.version_info < (2, 5, 0):
module = 'pysqlite2 module'
exc = e1
else:
module = 'either pysqlite2 or sqlite3 modules (tried in that order)'
raise ImproperlyConfigured("Error loading %s: %s" % (module, exc))
DatabaseError = Database.DatabaseError
IntegrityError = Database.IntegrityError
Database.register_converter("bool", lambda s: str(s) == '1')
Database.register_converter("time", util.typecast_time)
Database.register_converter("date", util.typecast_date)
Database.register_converter("datetime", util.typecast_timestamp)
Database.register_converter("timestamp", util.typecast_timestamp)
Database.register_converter("TIMESTAMP", util.typecast_timestamp)
Database.register_converter("decimal", util.typecast_decimal)
Database.register_adapter(decimal.Decimal, util.rev_typecast_decimal)
if Database.version_info >= (2,4,1):
# Starting in 2.4.1, the str type is not accepted anymore, therefore,
# we convert all str objects to Unicode
# As registering a adapter for a primitive type causes a small
# slow-down, this adapter is only registered for sqlite3 versions
# needing it.
Database.register_adapter(str, lambda s:s.decode('utf-8'))
Database.register_adapter(SafeString, lambda s:s.decode('utf-8'))
class DatabaseFeatures(BaseDatabaseFeatures):
# SQLite cannot handle us only partially reading from a cursor's result set
# and then writing the same rows to the database in another cursor. This
# setting ensures we always read result sets fully into memory all in one
# go.
can_use_chunked_reads = False
class DatabaseOperations(BaseDatabaseOperations):
def date_extract_sql(self, lookup_type, field_name):
# sqlite doesn't support extract, so we fake it with the user-defined
# function django_extract that's registered in connect(). Note that
# single quotes are used because this is a string (and could otherwise
# cause a collision with a field name).
return "django_extract('%s', %s)" % (lookup_type.lower(), field_name)
def date_trunc_sql(self, lookup_type, field_name):
# sqlite doesn't support DATE_TRUNC, so we fake it with a user-defined
# function django_date_trunc that's registered in connect(). Note that
# single quotes are used because this is a string (and could otherwise
# cause a collision with a field name).
return "django_date_trunc('%s', %s)" % (lookup_type.lower(), field_name)
def drop_foreignkey_sql(self):
return ""
def pk_default_value(self):
return 'NULL'
def quote_name(self, name):
if name.startswith('"') and name.endswith('"'):
return name # Quoting once is enough.
return '"%s"' % name
def no_limit_value(self):
return -1
def sql_flush(self, style, tables, sequences):
# NB: The generated SQL below is specific to SQLite
# Note: The DELETE FROM... SQL generated below works for SQLite databases
# because constraints don't exist
sql = ['%s %s %s;' % \
(style.SQL_KEYWORD('DELETE'),
style.SQL_KEYWORD('FROM'),
style.SQL_FIELD(self.quote_name(table))
) for table in tables]
# Note: No requirement for reset of auto-incremented indices (cf. other
# sql_flush() implementations). Just return SQL at this point
return sql
def year_lookup_bounds(self, value):
first = '%s-01-01'
second = '%s-12-31 23:59:59.999999'
return [first % value, second % value]
def convert_values(self, value, field):
"""SQLite returns floats when it should be returning decimals,
and gets dates and datetimes wrong.
For consistency with other backends, coerce when required.
"""
internal_type = field.get_internal_type()
if internal_type == 'DecimalField':
return util.typecast_decimal(field.format_number(value))
elif internal_type and internal_type.endswith('IntegerField') or internal_type == 'AutoField':
return int(value)
elif internal_type == 'DateField':
return util.typecast_date(value)
elif internal_type == 'DateTimeField':
return util.typecast_timestamp(value)
elif internal_type == 'TimeField':
return util.typecast_time(value)
# No field, or the field isn't known to be a decimal or integer
return value
class DatabaseWrapper(BaseDatabaseWrapper):
# SQLite requires LIKE statements to include an ESCAPE clause if the value
# being escaped has a percent or underscore in it.
# See http://www.sqlite.org/lang_expr.html for an explanation.
operators = {
'exact': '= %s',
'iexact': "LIKE %s ESCAPE '\\'",
'contains': "LIKE %s ESCAPE '\\'",
'icontains': "LIKE %s ESCAPE '\\'",
'regex': 'REGEXP %s',
'iregex': "REGEXP '(?i)' || %s",
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': "LIKE %s ESCAPE '\\'",
'endswith': "LIKE %s ESCAPE '\\'",
'istartswith': "LIKE %s ESCAPE '\\'",
'iendswith': "LIKE %s ESCAPE '\\'",
}
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.features = DatabaseFeatures()
self.ops = DatabaseOperations()
self.client = DatabaseClient(self)
self.creation = DatabaseCreation(self)
self.introspection = DatabaseIntrospection(self)
self.validation = BaseDatabaseValidation(self)
def _cursor(self):
if self.connection is None:
settings_dict = self.settings_dict
if not settings_dict['NAME']:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("Please fill out the database NAME in the settings module before using the database.")
kwargs = {
'database': settings_dict['NAME'],
'detect_types': Database.PARSE_DECLTYPES | Database.PARSE_COLNAMES,
}
kwargs.update(settings_dict['OPTIONS'])
self.connection = Database.connect(**kwargs)
# Register extract, date_trunc, and regexp functions.
self.connection.create_function("django_extract", 2, _sqlite_extract)
self.connection.create_function("django_date_trunc", 2, _sqlite_date_trunc)
self.connection.create_function("regexp", 2, _sqlite_regexp)
connection_created.send(sender=self.__class__)
return self.connection.cursor(factory=SQLiteCursorWrapper)
def close(self):
# If database is in memory, closing the connection destroys the
# database. To prevent accidental data loss, ignore close requests on
# an in-memory db.
if self.settings_dict['NAME'] != ":memory:":
BaseDatabaseWrapper.close(self)
FORMAT_QMARK_REGEX = re.compile(r'(?![^%])%s')
class SQLiteCursorWrapper(Database.Cursor):
"""
Django uses "format" style placeholders, but pysqlite2 uses "qmark" style.
This fixes it -- but note that if you want to use a literal "%s" in a query,
you'll need to use "%%s".
"""
def execute(self, query, params=()):
query = self.convert_query(query)
try:
return Database.Cursor.execute(self, query, params)
except Database.IntegrityError, e:
raise utils.IntegrityError, utils.IntegrityError(*tuple(e)), sys.exc_info()[2]
except Database.DatabaseError, e:
raise utils.DatabaseError, utils.DatabaseError(*tuple(e)), sys.exc_info()[2]
def executemany(self, query, param_list):
query = self.convert_query(query)
try:
return Database.Cursor.executemany(self, query, param_list)
except Database.IntegrityError, e:
raise utils.IntegrityError, utils.IntegrityError(*tuple(e)), sys.exc_info()[2]
except Database.DatabaseError, e:
raise utils.DatabaseError, utils.DatabaseError(*tuple(e)), sys.exc_info()[2]
def convert_query(self, query):
return FORMAT_QMARK_REGEX.sub('?', query).replace('%%','%')
def _sqlite_extract(lookup_type, dt):
if dt is None:
return None
try:
dt = util.typecast_timestamp(dt)
except (ValueError, TypeError):
return None
if lookup_type == 'week_day':
return (dt.isoweekday() % 7) + 1
else:
return getattr(dt, lookup_type)
def _sqlite_date_trunc(lookup_type, dt):
try:
dt = util.typecast_timestamp(dt)
except (ValueError, TypeError):
return None
if lookup_type == 'year':
return "%i-01-01 00:00:00" % dt.year
elif lookup_type == 'month':
return "%i-%02i-01 00:00:00" % (dt.year, dt.month)
elif lookup_type == 'day':
return "%i-%02i-%02i 00:00:00" % (dt.year, dt.month, dt.day)
def _sqlite_regexp(re_pattern, re_string):
import re
try:
return bool(re.search(re_pattern, re_string))
except:
return False
|
alxgu/ansible
|
refs/heads/devel
|
test/integration/targets/module_utils/module_utils/spam3/ham/bacon.py
|
298
|
data = 'spam3'
|
angr/angr
|
refs/heads/master
|
angr/procedures/win_user32/keyboard.py
|
5
|
import angr
class GetKeyboardType(angr.SimProcedure):
def run(self, param):
# return the values present at time of author's testing
if self.state.solver.is_true(param == 0):
return 4
if self.state.solver.is_true(param == 1):
return 0
if self.state.solver.is_true(param == 2):
return 12
return 0
|
SungEun-Steve-Kim/test-mp
|
refs/heads/master
|
tests/float/int_power.py
|
100
|
# negative power should produce float
x = 2
print(x ** -2)
x = 3
x **= -2
print('%.5f' % x)
|
keithroe/vtkoptix
|
refs/heads/master
|
ThirdParty/Twisted/twisted/conch/ssh/forwarding.py
|
59
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
#
"""
This module contains the implementation of the TCP forwarding, which allows
clients and servers to forward arbitrary TCP data across the connection.
Maintainer: Paul Swartz
"""
import struct
from twisted.internet import protocol, reactor
from twisted.python import log
import common, channel
class SSHListenForwardingFactory(protocol.Factory):
def __init__(self, connection, hostport, klass):
self.conn = connection
self.hostport = hostport # tuple
self.klass = klass
def buildProtocol(self, addr):
channel = self.klass(conn = self.conn)
client = SSHForwardingClient(channel)
channel.client = client
addrTuple = (addr.host, addr.port)
channelOpenData = packOpen_direct_tcpip(self.hostport, addrTuple)
self.conn.openChannel(channel, channelOpenData)
return client
class SSHListenForwardingChannel(channel.SSHChannel):
def channelOpen(self, specificData):
log.msg('opened forwarding channel %s' % self.id)
if len(self.client.buf)>1:
b = self.client.buf[1:]
self.write(b)
self.client.buf = ''
def openFailed(self, reason):
self.closed()
def dataReceived(self, data):
self.client.transport.write(data)
def eofReceived(self):
self.client.transport.loseConnection()
def closed(self):
if hasattr(self, 'client'):
log.msg('closing local forwarding channel %s' % self.id)
self.client.transport.loseConnection()
del self.client
class SSHListenClientForwardingChannel(SSHListenForwardingChannel):
name = 'direct-tcpip'
class SSHListenServerForwardingChannel(SSHListenForwardingChannel):
name = 'forwarded-tcpip'
class SSHConnectForwardingChannel(channel.SSHChannel):
def __init__(self, hostport, *args, **kw):
channel.SSHChannel.__init__(self, *args, **kw)
self.hostport = hostport
self.client = None
self.clientBuf = ''
def channelOpen(self, specificData):
cc = protocol.ClientCreator(reactor, SSHForwardingClient, self)
log.msg("connecting to %s:%i" % self.hostport)
cc.connectTCP(*self.hostport).addCallbacks(self._setClient, self._close)
def _setClient(self, client):
self.client = client
log.msg("connected to %s:%i" % self.hostport)
if self.clientBuf:
self.client.transport.write(self.clientBuf)
self.clientBuf = None
if self.client.buf[1:]:
self.write(self.client.buf[1:])
self.client.buf = ''
def _close(self, reason):
log.msg("failed to connect: %s" % reason)
self.loseConnection()
def dataReceived(self, data):
if self.client:
self.client.transport.write(data)
else:
self.clientBuf += data
def closed(self):
if self.client:
log.msg('closed remote forwarding channel %s' % self.id)
if self.client.channel:
self.loseConnection()
self.client.transport.loseConnection()
del self.client
def openConnectForwardingClient(remoteWindow, remoteMaxPacket, data, avatar):
remoteHP, origHP = unpackOpen_direct_tcpip(data)
return SSHConnectForwardingChannel(remoteHP,
remoteWindow=remoteWindow,
remoteMaxPacket=remoteMaxPacket,
avatar=avatar)
class SSHForwardingClient(protocol.Protocol):
def __init__(self, channel):
self.channel = channel
self.buf = '\000'
def dataReceived(self, data):
if self.buf:
self.buf += data
else:
self.channel.write(data)
def connectionLost(self, reason):
if self.channel:
self.channel.loseConnection()
self.channel = None
def packOpen_direct_tcpip((connHost, connPort), (origHost, origPort)):
"""Pack the data suitable for sending in a CHANNEL_OPEN packet.
"""
conn = common.NS(connHost) + struct.pack('>L', connPort)
orig = common.NS(origHost) + struct.pack('>L', origPort)
return conn + orig
packOpen_forwarded_tcpip = packOpen_direct_tcpip
def unpackOpen_direct_tcpip(data):
"""Unpack the data to a usable format.
"""
connHost, rest = common.getNS(data)
connPort = int(struct.unpack('>L', rest[:4])[0])
origHost, rest = common.getNS(rest[4:])
origPort = int(struct.unpack('>L', rest[:4])[0])
return (connHost, connPort), (origHost, origPort)
unpackOpen_forwarded_tcpip = unpackOpen_direct_tcpip
def packGlobal_tcpip_forward((host, port)):
return common.NS(host) + struct.pack('>L', port)
def unpackGlobal_tcpip_forward(data):
host, rest = common.getNS(data)
port = int(struct.unpack('>L', rest[:4])[0])
return host, port
"""This is how the data -> eof -> close stuff /should/ work.
debug3: channel 1: waiting for connection
debug1: channel 1: connected
debug1: channel 1: read<=0 rfd 7 len 0
debug1: channel 1: read failed
debug1: channel 1: close_read
debug1: channel 1: input open -> drain
debug1: channel 1: ibuf empty
debug1: channel 1: send eof
debug1: channel 1: input drain -> closed
debug1: channel 1: rcvd eof
debug1: channel 1: output open -> drain
debug1: channel 1: obuf empty
debug1: channel 1: close_write
debug1: channel 1: output drain -> closed
debug1: channel 1: rcvd close
debug3: channel 1: will not send data after close
debug1: channel 1: send close
debug1: channel 1: is dead
"""
|
glovebx/odoo
|
refs/heads/8.0
|
addons/auth_ldap/__init__.py
|
442
|
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import users_ldap
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
insidenothing/3D-Printing-Software
|
refs/heads/master
|
skein_engines/skeinforge-47/fabmetheus_utilities/euclidean.py
|
4
|
"""
Euclidean is a collection of python utilities for complex numbers, paths, polygons & Vector3s.
To use euclidean, install python 2.x on your machine, which is avaliable from http://www.python.org/download/
Then in the folder which euclidean is in, type 'python' in a shell to run the python interpreter. Finally type 'import euclidean' to import these utilities and 'from vector3 import Vector3' to import the Vector3 class.
Below are examples of euclidean use.
>>> from euclidean import *
>>> origin=complex()
>>> right=complex(1.0,0.0)
>>> back=complex(0.0,1.0)
>>> getMaximum(right,back)
1.0, 1.0
>>> polygon=[origin, right, back]
>>> getLoopLength(polygon)
3.4142135623730949
>>> getAreaLoop(polygon)
0.5
"""
from __future__ import absolute_import
try:
import psyco
psyco.full()
except:
pass
#Init has to be imported first because it has code to workaround the python bug where relative imports don't work if the module is imported as a main module.
import __init__
from fabmetheus_utilities.vector3 import Vector3
from fabmetheus_utilities import xml_simple_writer
import cStringIO
import math
import random
__author__ = 'Enrique Perez (perez_enrique@yahoo.com)'
__date__ = '$Date: 2008/21/04 $'
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
globalGoldenAngle = 3.8832220774509332 # (math.sqrt(5.0) - 1.0) * math.pi
globalGoldenRatio = 1.6180339887498948482045868 # math.sqrt(1.25) - .5
globalTau = math.pi + math.pi # http://tauday.com/
def addElementToListDictionary(element, key, listDictionary):
'Add an element to the list table.'
if key in listDictionary:
listDictionary[key].append(element)
else:
listDictionary[key] = [element]
def addElementToListDictionaryIfNotThere(element, key, listDictionary):
'Add the value to the lists.'
if key in listDictionary:
elements = listDictionary[key]
if element not in elements:
elements.append(element)
else:
listDictionary[key] = [element]
def addElementToPixelList( element, pixelDictionary, x, y ):
'Add an element to the pixel list.'
addElementToListDictionary( element, (x, y), pixelDictionary )
def addElementToPixelListFromPoint( element, pixelDictionary, point ):
'Add an element to the pixel list.'
addElementToPixelList( element, pixelDictionary, int( round( point.real ) ), int( round( point.imag ) ) )
def addHorizontallyBoundedPoint(begin, center, end, horizontalBegin, horizontalEnd, path):
'Add point if it is within the horizontal bounds.'
if center.real >= horizontalEnd and center.real <= horizontalBegin:
path.append(center)
return
if end != None:
if center.real > horizontalBegin and end.real <= horizontalBegin:
centerMinusEnd = center - end
along = (center.real - horizontalBegin) / centerMinusEnd.real
path.append(center - along * centerMinusEnd)
return
if begin != None:
if center.real < horizontalEnd and begin.real >= horizontalEnd:
centerMinusBegin = center - begin
along = (center.real - horizontalEnd) / centerMinusBegin.real
path.append(center - along * centerMinusBegin)
def addListToListTable( elementList, key, listDictionary ):
'Add a list to the list table.'
if key in listDictionary:
listDictionary[key] += elementList
else:
listDictionary[key] = elementList
def addLoopToPixelTable( loop, pixelDictionary, width ):
'Add loop to the pixel table.'
for pointIndex in xrange(len(loop)):
pointBegin = loop[pointIndex]
pointEnd = loop[(pointIndex + 1) % len(loop)]
addValueSegmentToPixelTable( pointBegin, pointEnd, pixelDictionary, None, width )
def addNestedRingBeginning(distanceFeedRate, loop, z):
'Add nested ring beginning to gcode output.'
distanceFeedRate.addLine('(<nestedRing>)')
distanceFeedRate.addLine('(<boundaryPerimeter>)')
for point in loop:
pointVector3 = Vector3(point.real, point.imag, z)
distanceFeedRate.addLine(distanceFeedRate.getBoundaryLine(pointVector3))
def addPathToPixelTable( path, pixelDictionary, value, width ):
'Add path to the pixel table.'
for pointIndex in xrange( len(path) - 1 ):
pointBegin = path[pointIndex]
pointEnd = path[pointIndex + 1]
addValueSegmentToPixelTable( pointBegin, pointEnd, pixelDictionary, value, width )
def addPixelTableToPixelTable( fromPixelTable, intoPixelTable ):
'Add from pixel table to the into pixel table.'
for fromPixelTableKey in fromPixelTable.keys():
intoPixelTable[ fromPixelTableKey ] = fromPixelTable[ fromPixelTableKey ]
def addPixelToPixelTableWithSteepness( isSteep, pixelDictionary, value, x, y ):
'Add pixels to the pixel table with steepness.'
if isSteep:
pixelDictionary[(y, x)] = value
else:
pixelDictionary[(x, y)] = value
def addPointToPath( path, pixelDictionary, point, value, width ):
'Add a point to a path and the pixel table.'
path.append(point)
if len(path) < 2:
return
begin = path[-2]
addValueSegmentToPixelTable( begin, point, pixelDictionary, value, width )
def addSegmentToPixelTable( beginComplex, endComplex, pixelDictionary, shortenDistanceBegin, shortenDistanceEnd, width ):
'Add line segment to the pixel table.'
if abs( beginComplex - endComplex ) <= 0.0:
return
beginComplex /= width
endComplex /= width
if shortenDistanceBegin > 0.0:
endMinusBeginComplex = endComplex - beginComplex
endMinusBeginComplexLength = abs( endMinusBeginComplex )
if endMinusBeginComplexLength < shortenDistanceBegin:
return
beginComplex = beginComplex + endMinusBeginComplex * shortenDistanceBegin / endMinusBeginComplexLength
if shortenDistanceEnd > 0.0:
beginMinusEndComplex = beginComplex - endComplex
beginMinusEndComplexLength = abs( beginMinusEndComplex )
if beginMinusEndComplexLength < 0.0:
return
endComplex = endComplex + beginMinusEndComplex * shortenDistanceEnd / beginMinusEndComplexLength
deltaX = endComplex.real - beginComplex.real
deltaY = endComplex.imag - beginComplex.imag
isSteep = abs( deltaY ) > abs( deltaX )
if isSteep:
beginComplex = complex( beginComplex.imag, beginComplex.real )
endComplex = complex( endComplex.imag, endComplex.real )
if beginComplex.real > endComplex.real:
endComplex, beginComplex = beginComplex, endComplex
deltaX = endComplex.real - beginComplex.real
deltaY = endComplex.imag - beginComplex.imag
if deltaX > 0.0:
gradient = deltaY / deltaX
else:
gradient = 0.0
print('Warning, deltaX in addSegmentToPixelTable in euclidean is 0.')
print(beginComplex)
print(endComplex)
print(shortenDistanceBegin)
print(shortenDistanceEnd)
print(width)
xBegin = int(round(beginComplex.real))
xEnd = int(round(endComplex.real))
yIntersection = beginComplex.imag - beginComplex.real * gradient
if isSteep:
pixelDictionary[( int( round( beginComplex.imag ) ), xBegin)] = None
pixelDictionary[( int( round( endComplex.imag ) ), xEnd )] = None
for x in xrange( xBegin + 1, xEnd ):
y = int( math.floor( yIntersection + x * gradient ) )
pixelDictionary[(y, x)] = None
pixelDictionary[(y + 1, x)] = None
else:
pixelDictionary[(xBegin, int( round( beginComplex.imag ) ) )] = None
pixelDictionary[(xEnd, int( round( endComplex.imag ) ) )] = None
for x in xrange( xBegin + 1, xEnd ):
y = int( math.floor( yIntersection + x * gradient ) )
pixelDictionary[(x, y)] = None
pixelDictionary[(x, y + 1)] = None
def addSquareTwoToPixelDictionary(pixelDictionary, point, value, width):
'Add square with two pixels around the center to pixel dictionary.'
point /= width
x = int(round(point.real))
y = int(round(point.imag))
for xStep in xrange(x - 2, x + 3):
for yStep in xrange(y - 2, y + 3):
pixelDictionary[(xStep, yStep)] = value
def addToThreadsFromLoop(extrusionHalfWidth, gcodeType, loop, oldOrderedLocation, skein):
'Add to threads from the last location from loop.'
loop = getLoopStartingClosest(extrusionHalfWidth, oldOrderedLocation.dropAxis(), loop)
oldOrderedLocation.x = loop[0].real
oldOrderedLocation.y = loop[0].imag
gcodeTypeStart = gcodeType
if isWiddershins(loop):
skein.distanceFeedRate.addLine('(<%s> outer )' % gcodeType)
else:
skein.distanceFeedRate.addLine('(<%s> inner )' % gcodeType)
skein.addGcodeFromThreadZ(loop + [loop[0]], oldOrderedLocation.z)
skein.distanceFeedRate.addLine('(</%s>)' % gcodeType)
def addToThreadsRemove(extrusionHalfWidth, nestedRings, oldOrderedLocation, skein, threadSequence):
'Add to threads from the last location from nested rings.'
while len(nestedRings) > 0:
getTransferClosestNestedRing(extrusionHalfWidth, nestedRings, oldOrderedLocation, skein, threadSequence)
def addValueSegmentToPixelTable( beginComplex, endComplex, pixelDictionary, value, width ):
'Add line segment to the pixel table.'
if abs( beginComplex - endComplex ) <= 0.0:
return
beginComplex /= width
endComplex /= width
deltaX = endComplex.real - beginComplex.real
deltaY = endComplex.imag - beginComplex.imag
isSteep = abs( deltaY ) > abs( deltaX )
if isSteep:
beginComplex = complex( beginComplex.imag, beginComplex.real )
endComplex = complex( endComplex.imag, endComplex.real )
if beginComplex.real > endComplex.real:
endComplex, beginComplex = beginComplex, endComplex
deltaX = endComplex.real - beginComplex.real
deltaY = endComplex.imag - beginComplex.imag
if deltaX > 0.0:
gradient = deltaY / deltaX
else:
gradient = 0.0
print('Warning, deltaX in addValueSegmentToPixelTable in euclidean is 0.')
print(beginComplex)
print(value)
print(endComplex)
print(width)
xBegin = int(round(beginComplex.real))
xEnd = int(round(endComplex.real))
yIntersection = beginComplex.imag - beginComplex.real * gradient
if isSteep:
pixelDictionary[(int( round( beginComplex.imag ) ), xBegin)] = value
pixelDictionary[(int( round( endComplex.imag ) ), xEnd)] = value
for x in xrange( xBegin + 1, xEnd ):
y = int( math.floor( yIntersection + x * gradient ) )
pixelDictionary[(y, x)] = value
pixelDictionary[(y + 1, x)] = value
else:
pixelDictionary[(xBegin, int( round( beginComplex.imag ) ))] = value
pixelDictionary[(xEnd, int( round( endComplex.imag ) ))] = value
for x in xrange( xBegin + 1, xEnd ):
y = int( math.floor( yIntersection + x * gradient ) )
pixelDictionary[(x, y)] = value
pixelDictionary[(x, y + 1)] = value
def addValueToOutput(depth, keyInput, output, value):
'Add value to the output.'
depthStart = ' ' * depth
output.write('%s%s:' % (depthStart, keyInput))
if value.__class__ == dict:
output.write('\n')
keys = value.keys()
keys.sort()
for key in keys:
addValueToOutput(depth + 1, key, output, value[key])
return
if value.__class__ == list:
output.write('\n')
for elementIndex, element in enumerate(value):
addValueToOutput(depth + 1, elementIndex, output, element)
return
output.write(' %s\n' % value)
def addXIntersectionIndexesFromLoopListsY( loopLists, xIntersectionIndexList, y ):
'Add the x intersection indexes for the loop lists.'
for loopListIndex in xrange( len(loopLists) ):
loopList = loopLists[ loopListIndex ]
addXIntersectionIndexesFromLoopsY( loopList, loopListIndex, xIntersectionIndexList, y )
def addXIntersectionIndexesFromLoopsY( loops, solidIndex, xIntersectionIndexList, y ):
'Add the x intersection indexes for the loops.'
for loop in loops:
addXIntersectionIndexesFromLoopY( loop, solidIndex, xIntersectionIndexList, y )
def addXIntersectionIndexesFromLoopY( loop, solidIndex, xIntersectionIndexList, y ):
'Add the x intersection indexes for a loop.'
for pointIndex in xrange(len(loop)):
pointFirst = loop[pointIndex]
pointSecond = loop[(pointIndex + 1) % len(loop)]
xIntersection = getXIntersectionIfExists( pointFirst, pointSecond, y )
if xIntersection != None:
xIntersectionIndexList.append( XIntersectionIndex( solidIndex, xIntersection ) )
def addXIntersectionIndexesFromSegment( index, segment, xIntersectionIndexList ):
'Add the x intersection indexes from the segment.'
for endpoint in segment:
xIntersectionIndexList.append( XIntersectionIndex( index, endpoint.point.real ) )
def addXIntersectionIndexesFromSegments( index, segments, xIntersectionIndexList ):
'Add the x intersection indexes from the segments.'
for segment in segments:
addXIntersectionIndexesFromSegment( index, segment, xIntersectionIndexList )
def addXIntersectionIndexesFromXIntersections( index, xIntersectionIndexList, xIntersections ):
'Add the x intersection indexes from the XIntersections.'
for xIntersection in xIntersections:
xIntersectionIndexList.append( XIntersectionIndex( index, xIntersection ) )
def addXIntersections( loop, xIntersections, y ):
'Add the x intersections for a loop.'
for pointIndex in xrange(len(loop)):
pointFirst = loop[pointIndex]
pointSecond = loop[(pointIndex + 1) % len(loop)]
xIntersection = getXIntersectionIfExists( pointFirst, pointSecond, y )
if xIntersection != None:
xIntersections.append( xIntersection )
def addXIntersectionsFromLoopForTable(loop, xIntersectionsTable, width):
'Add the x intersections for a loop into a table.'
for pointIndex in xrange(len(loop)):
pointBegin = loop[pointIndex]
pointEnd = loop[(pointIndex + 1) % len(loop)]
if pointBegin.imag > pointEnd.imag:
pointOriginal = pointBegin
pointBegin = pointEnd
pointEnd = pointOriginal
fillBegin = int( math.ceil( pointBegin.imag / width ) )
fillEnd = int( math.ceil( pointEnd.imag / width ) )
if fillEnd > fillBegin:
secondMinusFirstComplex = pointEnd - pointBegin
secondMinusFirstImaginaryOverReal = secondMinusFirstComplex.real / secondMinusFirstComplex.imag
beginRealMinusImaginary = pointBegin.real - pointBegin.imag * secondMinusFirstImaginaryOverReal
for fillLine in xrange( fillBegin, fillEnd ):
y = fillLine * width
xIntersection = y * secondMinusFirstImaginaryOverReal + beginRealMinusImaginary
addElementToListDictionary( xIntersection, fillLine, xIntersectionsTable )
def addXIntersectionsFromLoops(loops, xIntersections, y):
'Add the x intersections for the loops.'
for loop in loops:
addXIntersections(loop, xIntersections, y)
def addXIntersectionsFromLoopsForTable(loops, xIntersectionsTable, width):
'Add the x intersections for a loop into a table.'
for loop in loops:
addXIntersectionsFromLoopForTable(loop, xIntersectionsTable, width)
def compareSegmentLength( endpoint, otherEndpoint ):
'Get comparison in order to sort endpoints in ascending order of segment length.'
if endpoint.segmentLength > otherEndpoint.segmentLength:
return 1
if endpoint.segmentLength < otherEndpoint.segmentLength:
return - 1
return 0
def concatenateRemovePath( connectedPaths, pathIndex, paths, pixelDictionary, segments, width ):
'Get connected paths from paths.'
bottomSegment = segments[ pathIndex ]
path = paths[ pathIndex ]
if bottomSegment == None:
connectedPaths.append(path)
return
endpoints = getEndpointsFromSegments( segments[ pathIndex + 1 : ] )
bottomSegmentEndpoint = bottomSegment[0]
nextEndpoint = bottomSegmentEndpoint.getClosestMissCheckEndpointPath( endpoints, bottomSegmentEndpoint.path, pixelDictionary, width )
if nextEndpoint == None:
bottomSegmentEndpoint = bottomSegment[1]
nextEndpoint = bottomSegmentEndpoint.getClosestMissCheckEndpointPath( endpoints, bottomSegmentEndpoint.path, pixelDictionary, width )
if nextEndpoint == None:
connectedPaths.append(path)
return
if len( bottomSegmentEndpoint.path ) > 0 and len( nextEndpoint.path ) > 0:
bottomEnd = bottomSegmentEndpoint.path[-1]
nextBegin = nextEndpoint.path[-1]
nextMinusBottomNormalized = getNormalized( nextBegin - bottomEnd )
if len( bottomSegmentEndpoint.path ) > 1:
bottomPenultimate = bottomSegmentEndpoint.path[-2]
if getDotProduct( getNormalized( bottomPenultimate - bottomEnd ), nextMinusBottomNormalized ) > 0.9:
connectedPaths.append(path)
return
if len( nextEndpoint.path ) > 1:
nextPenultimate = nextEndpoint.path[-2]
if getDotProduct( getNormalized( nextPenultimate - nextBegin ), - nextMinusBottomNormalized ) > 0.9:
connectedPaths.append(path)
return
nextEndpoint.path.reverse()
concatenatedPath = bottomSegmentEndpoint.path + nextEndpoint.path
paths[ nextEndpoint.pathIndex ] = concatenatedPath
segments[ nextEndpoint.pathIndex ] = getSegmentFromPath( concatenatedPath, nextEndpoint.pathIndex )
addValueSegmentToPixelTable( bottomSegmentEndpoint.point, nextEndpoint.point, pixelDictionary, None, width )
def getAngleAroundZAxisDifference( subtractFromVec3, subtractVec3 ):
'Get the angle around the Z axis difference between a pair of Vector3s.'
subtractVectorMirror = complex( subtractVec3.x , - subtractVec3.y )
differenceVector = getRoundZAxisByPlaneAngle( subtractVectorMirror, subtractFromVec3 )
return math.atan2( differenceVector.y, differenceVector.x )
def getAngleDifferenceByComplex( subtractFromComplex, subtractComplex ):
'Get the angle between a pair of normalized complexes.'
subtractComplexMirror = complex( subtractComplex.real , - subtractComplex.imag )
differenceComplex = subtractComplexMirror * subtractFromComplex
return math.atan2( differenceComplex.imag, differenceComplex.real )
def getAreaLoop(loop):
'Get the area of a complex polygon.'
areaLoopDouble = 0.0
for pointIndex, point in enumerate(loop):
pointEnd = loop[(pointIndex + 1) % len(loop)]
areaLoopDouble += point.real * pointEnd.imag - pointEnd.real * point.imag
return 0.5 * areaLoopDouble
def getAreaLoopAbsolute(loop):
'Get the absolute area of a complex polygon.'
return abs(getAreaLoop(loop))
def getAreaLoops(loops):
'Get the area of a list of complex polygons.'
areaLoops = 0.0
for loop in loops:
areaLoops += getAreaLoop(loop)
return areaLoops
def getAreaVector3LoopAbsolute(loop):
'Get the absolute area of a vector3 polygon.'
return getAreaLoopAbsolute(getComplexPath(loop))
def getAroundLoop(begin, end, loop):
'Get an arc around a loop.'
aroundLoop = []
if end <= begin:
end += len(loop)
for pointIndex in xrange(begin, end):
aroundLoop.append(loop[pointIndex % len(loop)])
return aroundLoop
def getAwayPath(path, radius):
'Get a path with only the points that are far enough away from each other, except for the last point.'
if len(path) < 2:
return path
lastPoint = path[-1]
awayPath = getAwayPoints(path, radius)
if len(awayPath) == 0:
return [lastPoint]
if abs(lastPoint - awayPath[-1]) > 0.001 * radius:
awayPath.append(lastPoint)
return awayPath
def getAwayPoints(points, radius):
'Get a path with only the points that are far enough away from each other.'
awayPoints = []
oneOverOverlapDistance = 1000.0 / radius
pixelDictionary = {}
for point in points:
x = int(point.real * oneOverOverlapDistance)
y = int(point.imag * oneOverOverlapDistance)
if not getSquareIsOccupied(pixelDictionary, x, y):
awayPoints.append(point)
pixelDictionary[(x, y)] = None
return awayPoints
def getBooleanFromDictionary(defaultBoolean, dictionary, key):
'Get boolean from the dictionary and key.'
if key not in dictionary:
return defaultBoolean
return getBooleanFromValue(dictionary[key])
def getBooleanFromValue(value):
'Get boolean from the word.'
firstCharacter = str(value).lower().lstrip()[: 1]
return firstCharacter == 't' or firstCharacter == '1'
def getBottomByPath(path):
'Get the bottom of the path.'
bottom = 987654321987654321.0
for point in path:
bottom = min(bottom, point.z)
return bottom
def getBottomByPaths(paths):
'Get the bottom of the paths.'
bottom = 987654321987654321.0
for path in paths:
for point in path:
bottom = min(bottom, point.z)
return bottom
def getClippedAtEndLoopPath( clip, loopPath ):
'Get a clipped loop path.'
if clip <= 0.0:
return loopPath
loopPathLength = getPathLength(loopPath)
clip = min( clip, 0.3 * loopPathLength )
lastLength = 0.0
pointIndex = 0
totalLength = 0.0
clippedLength = loopPathLength - clip
while totalLength < clippedLength and pointIndex < len(loopPath) - 1:
firstPoint = loopPath[pointIndex]
secondPoint = loopPath[pointIndex + 1]
pointIndex += 1
lastLength = totalLength
totalLength += abs(firstPoint - secondPoint)
remainingLength = clippedLength - lastLength
clippedLoopPath = loopPath[ : pointIndex ]
ultimateClippedPoint = loopPath[pointIndex]
penultimateClippedPoint = clippedLoopPath[-1]
segment = ultimateClippedPoint - penultimateClippedPoint
segmentLength = abs(segment)
if segmentLength <= 0.0:
return clippedLoopPath
newUltimatePoint = penultimateClippedPoint + segment * remainingLength / segmentLength
return clippedLoopPath + [newUltimatePoint]
def getClippedLoopPath(clip, loopPath):
'Get a clipped loop path.'
if clip <= 0.0:
return loopPath
loopPathLength = getPathLength(loopPath)
clip = min(clip, 0.3 * loopPathLength)
lastLength = 0.0
pointIndex = 0
totalLength = 0.0
while totalLength < clip and pointIndex < len(loopPath) - 1:
firstPoint = loopPath[pointIndex]
secondPoint = loopPath[pointIndex + 1]
pointIndex += 1
lastLength = totalLength
totalLength += abs(firstPoint - secondPoint)
remainingLength = clip - lastLength
clippedLoopPath = loopPath[pointIndex :]
ultimateClippedPoint = clippedLoopPath[0]
penultimateClippedPoint = loopPath[pointIndex - 1]
segment = ultimateClippedPoint - penultimateClippedPoint
segmentLength = abs(segment)
loopPath = clippedLoopPath
if segmentLength > 0.0:
newUltimatePoint = penultimateClippedPoint + segment * remainingLength / segmentLength
loopPath = [newUltimatePoint] + loopPath
return getClippedAtEndLoopPath(clip, loopPath)
def getClippedSimplifiedLoopPath(clip, loopPath, radius):
'Get a clipped and simplified loop path.'
return getSimplifiedPath(getClippedLoopPath(clip, loopPath), radius)
def getClosestDistanceIndexToLine(point, loop):
'Get the distance squared to the closest segment of the loop and index of that segment.'
smallestDistance = 987654321987654321.0
closestDistanceIndex = None
for pointIndex in xrange(len(loop)):
segmentBegin = loop[pointIndex]
segmentEnd = loop[(pointIndex + 1) % len(loop)]
distance = getDistanceToPlaneSegment(segmentBegin, segmentEnd, point)
if distance < smallestDistance:
smallestDistance = distance
closestDistanceIndex = DistanceIndex(distance, pointIndex)
return closestDistanceIndex
def getClosestPointOnSegment(segmentBegin, segmentEnd, point):
'Get the closest point on the segment.'
segmentDifference = segmentEnd - segmentBegin
if abs(segmentDifference) <= 0.0:
return segmentBegin
pointMinusSegmentBegin = point - segmentBegin
beginPlaneDot = getDotProduct(pointMinusSegmentBegin, segmentDifference)
differencePlaneDot = getDotProduct(segmentDifference, segmentDifference)
intercept = beginPlaneDot / differencePlaneDot
intercept = max(intercept, 0.0)
intercept = min(intercept, 1.0)
return segmentBegin + segmentDifference * intercept
def getComplexByCommaString( valueCommaString ):
'Get the commaString as a complex.'
try:
splitLine = valueCommaString.replace(',', ' ').split()
return complex( float( splitLine[0] ), float(splitLine[1]) )
except:
pass
return None
def getComplexByWords(words, wordIndex=0):
'Get the complex by the first two words.'
try:
return complex(float(words[wordIndex]), float(words[wordIndex + 1]))
except:
pass
return None
def getComplexDefaultByDictionary( defaultComplex, dictionary, key ):
'Get the value as a complex.'
if key in dictionary:
return complex( dictionary[key].strip().replace('(', '').replace(')', '') )
return defaultComplex
def getComplexDefaultByDictionaryKeys( defaultComplex, dictionary, keyX, keyY ):
'Get the value as a complex.'
x = getFloatDefaultByDictionary( defaultComplex.real, dictionary, keyX )
y = getFloatDefaultByDictionary( defaultComplex.real, dictionary, keyY )
return complex(x, y)
def getComplexPath(vector3Path):
'Get the complex path from the vector3 path.'
complexPath = []
for point in vector3Path:
complexPath.append(point.dropAxis())
return complexPath
def getComplexPathByMultiplier(multiplier, path):
'Get the multiplied complex path.'
complexPath = []
for point in path:
complexPath.append(multiplier * point)
return complexPath
def getComplexPaths(vector3Paths):
'Get the complex paths from the vector3 paths.'
complexPaths = []
for vector3Path in vector3Paths:
complexPaths.append(getComplexPath(vector3Path))
return complexPaths
def getComplexPolygon(center, radius, sides, startAngle=0.0):
'Get the complex polygon.'
complexPolygon = []
sideAngle = 2.0 * math.pi / float(sides)
for side in xrange(abs(sides)):
unitPolar = getWiddershinsUnitPolar(startAngle)
complexPolygon.append(unitPolar * radius + center)
startAngle += sideAngle
return complexPolygon
def getComplexPolygonByComplexRadius(radius, sides, startAngle=0.0):
'Get the complex polygon.'
complexPolygon = []
sideAngle = 2.0 * math.pi / float(sides)
for side in xrange(abs(sides)):
unitPolar = getWiddershinsUnitPolar(startAngle)
complexPolygon.append(complex(unitPolar.real * radius.real, unitPolar.imag * radius.imag))
startAngle += sideAngle
return complexPolygon
def getComplexPolygonByStartEnd(endAngle, radius, sides, startAngle=0.0):
'Get the complex polygon by start and end angle.'
angleExtent = endAngle - startAngle
sideAngle = 2.0 * math.pi / float(sides)
sides = int(math.ceil(abs(angleExtent / sideAngle)))
sideAngle = angleExtent / float(sides)
complexPolygon = []
for side in xrange(abs(sides) + 1):
unitPolar = getWiddershinsUnitPolar(startAngle)
complexPolygon.append(unitPolar * radius)
startAngle += sideAngle
return getLoopWithoutCloseEnds(0.000001 * radius, complexPolygon)
def getConcatenatedList(originalLists):
'Get the lists as one concatenated list.'
concatenatedList = []
for originalList in originalLists:
concatenatedList += originalList
return concatenatedList
def getConnectedPaths( paths, pixelDictionary, width ):
'Get connected paths from paths.'
if len(paths) < 2:
return paths
connectedPaths = []
segments = []
for pathIndex in xrange( len(paths) ):
path = paths[ pathIndex ]
segments.append( getSegmentFromPath( path, pathIndex ) )
for pathIndex in xrange( 0, len(paths) - 1 ):
concatenateRemovePath( connectedPaths, pathIndex, paths, pixelDictionary, segments, width )
connectedPaths.append( paths[-1] )
return connectedPaths
def getCrossProduct(firstComplex, secondComplex):
'Get z component cross product of a pair of complexes.'
return firstComplex.real * secondComplex.imag - firstComplex.imag * secondComplex.real
def getDecimalPlacesCarried(extraDecimalPlaces, value):
'Get decimal places carried by the decimal places of the value plus the extraDecimalPlaces.'
return max(0, 1 + int(math.ceil(extraDecimalPlaces - math.log10(value))))
def getDiagonalFlippedLoop(loop):
'Get loop flipped over the dialogonal, in other words with the x and y swapped.'
diagonalFlippedLoop = []
for point in loop:
diagonalFlippedLoop.append( complex( point.imag, point.real ) )
return diagonalFlippedLoop
def getDiagonalFlippedLoops(loops):
'Get loops flipped over the dialogonal, in other words with the x and y swapped.'
diagonalFlippedLoops = []
for loop in loops:
diagonalFlippedLoops.append( getDiagonalFlippedLoop(loop) )
return diagonalFlippedLoops
def getDictionaryString(dictionary):
'Get the dictionary string.'
output = cStringIO.StringIO()
keys = dictionary.keys()
keys.sort()
for key in keys:
addValueToOutput(0, key, output, dictionary[key])
return output.getvalue()
def getDistanceToLine(begin, end, point):
'Get the distance from a vector3 point to an infinite line.'
pointMinusBegin = point - begin
if begin == end:
return abs(pointMinusBegin)
endMinusBegin = end - begin
return abs(endMinusBegin.cross(pointMinusBegin)) / abs(endMinusBegin)
def getDistanceToLineByPath(begin, end, path):
'Get the maximum distance from a path to an infinite line.'
distanceToLine = -987654321.0
for point in path:
distanceToLine = max(getDistanceToLine(begin, end, point), distanceToLine)
return distanceToLine
def getDistanceToLineByPaths(begin, end, paths):
'Get the maximum distance from paths to an infinite line.'
distanceToLine = -987654321.0
for path in paths:
distanceToLine = max(getDistanceToLineByPath(begin, end, path), distanceToLine)
return distanceToLine
def getDistanceToPlaneSegment( segmentBegin, segmentEnd, point ):
'Get the distance squared from a point to the x & y components of a segment.'
segmentDifference = segmentEnd - segmentBegin
pointMinusSegmentBegin = point - segmentBegin
beginPlaneDot = getDotProduct( pointMinusSegmentBegin, segmentDifference )
if beginPlaneDot <= 0.0:
return abs( point - segmentBegin ) * abs( point - segmentBegin )
differencePlaneDot = getDotProduct( segmentDifference, segmentDifference )
if differencePlaneDot <= beginPlaneDot:
return abs( point - segmentEnd ) * abs( point - segmentEnd )
intercept = beginPlaneDot / differencePlaneDot
interceptPerpendicular = segmentBegin + segmentDifference * intercept
return abs( point - interceptPerpendicular ) * abs( point - interceptPerpendicular )
def getDotProduct(firstComplex, secondComplex):
'Get the dot product of a pair of complexes.'
return firstComplex.real * secondComplex.real + firstComplex.imag * secondComplex.imag
def getDotProductPlusOne( firstComplex, secondComplex ):
'Get the dot product plus one of the x and y components of a pair of Vector3s.'
return 1.0 + getDotProduct( firstComplex, secondComplex )
def getDurationString( seconds ):
'Get the duration string.'
secondsRounded = int( round( seconds ) )
durationString = getPluralString( secondsRounded % 60, 'second')
if seconds < 60:
return durationString
durationString = '%s %s' % ( getPluralString( ( secondsRounded / 60 ) % 60, 'minute'), durationString )
if seconds < 3600:
return durationString
return '%s %s' % ( getPluralString( secondsRounded / 3600, 'hour'), durationString )
def getEndpointFromPath( path, pathIndex ):
'Get endpoint segment from a path.'
begin = path[-1]
end = path[-2]
endpointBegin = Endpoint()
endpointEnd = Endpoint().getFromOtherPoint( endpointBegin, end )
endpointBegin.getFromOtherPoint( endpointEnd, begin )
endpointBegin.path = path
endpointBegin.pathIndex = pathIndex
return endpointBegin
def getEndpointsFromSegments( segments ):
'Get endpoints from segments.'
endpoints = []
for segment in segments:
for endpoint in segment:
endpoints.append( endpoint )
return endpoints
def getEndpointsFromSegmentTable( segmentTable ):
'Get the endpoints from the segment table.'
endpoints = []
segmentTableKeys = segmentTable.keys()
segmentTableKeys.sort()
for segmentTableKey in segmentTableKeys:
for segment in segmentTable[ segmentTableKey ]:
for endpoint in segment:
endpoints.append( endpoint )
return endpoints
def getEnumeratorKeys(enumerator, keys):
'Get enumerator keys.'
if len(keys) == 1:
return keys[0]
return getEnumeratorKeysExceptForOneArgument(enumerator, keys)
def getEnumeratorKeysAlwaysList(enumerator, keys):
'Get enumerator keys.'
if keys.__class__ != list:
return [keys]
if len(keys) == 1:
return keys
return getEnumeratorKeysExceptForOneArgument(enumerator, keys)
def getEnumeratorKeysExceptForOneArgument(enumerator, keys):
'Get enumerator keys, except when there is one argument.'
if len(keys) == 0:
return range(0, len(enumerator))
beginIndex = keys[0]
endIndex = keys[1]
if len(keys) == 2:
if beginIndex == None:
beginIndex = 0
if endIndex == None:
endIndex = len(enumerator)
return range(beginIndex, endIndex)
step = keys[2]
beginIndexDefault = 0
endIndexDefault = len(enumerator)
if step < 0:
beginIndexDefault = endIndexDefault - 1
endIndexDefault = -1
if beginIndex == None:
beginIndex = beginIndexDefault
if endIndex == None:
endIndex = endIndexDefault
return range(beginIndex, endIndex, step)
def getFillOfSurroundings(nestedRings, penultimateFillLoops):
'Get extra fill loops of nested rings.'
fillOfSurroundings = []
for nestedRing in nestedRings:
fillOfSurroundings += nestedRing.getFillLoops(penultimateFillLoops)
return fillOfSurroundings
def getFlattenedNestedRings(nestedRings):
'Get flattened nested rings.'
flattenedNestedRings = []
for nestedRing in nestedRings:
nestedRing.addFlattenedNestedRings(flattenedNestedRings)
return flattenedNestedRings
def getFloatDefaultByDictionary( defaultFloat, dictionary, key ):
'Get the value as a float.'
evaluatedFloat = None
if key in dictionary:
evaluatedFloat = getFloatFromValue(dictionary[key])
if evaluatedFloat == None:
return defaultFloat
return evaluatedFloat
def getFloatFromValue(value):
'Get the value as a float.'
try:
return float(value)
except:
pass
return None
def getFourSignificantFigures(number):
'Get number rounded to four significant figures as a string.'
if number == None:
return None
absoluteNumber = abs(number)
if absoluteNumber >= 100.0:
return getRoundedToPlacesString( 2, number )
if absoluteNumber < 0.000000001:
return getRoundedToPlacesString( 13, number )
return getRoundedToPlacesString( 3 - math.floor( math.log10( absoluteNumber ) ), number )
def getHalfSimplifiedLoop( loop, radius, remainder ):
'Get the loop with half of the points inside the channel removed.'
if len(loop) < 2:
return loop
channelRadius = radius * .01
simplified = []
addIndex = 0
if remainder == 1:
addIndex = len(loop) - 1
for pointIndex in xrange(len(loop)):
point = loop[pointIndex]
if pointIndex % 2 == remainder or pointIndex == addIndex:
simplified.append(point)
elif not isWithinChannel( channelRadius, pointIndex, loop ):
simplified.append(point)
return simplified
def getHalfSimplifiedPath(path, radius, remainder):
'Get the path with half of the points inside the channel removed.'
if len(path) < 2:
return path
channelRadius = radius * .01
simplified = [path[0]]
for pointIndex in xrange(1, len(path) - 1):
point = path[pointIndex]
if pointIndex % 2 == remainder:
simplified.append(point)
elif not isWithinChannel(channelRadius, pointIndex, path):
simplified.append(point)
simplified.append(path[-1])
return simplified
def getHorizontallyBoundedPath(horizontalBegin, horizontalEnd, path):
'Get horizontally bounded path.'
horizontallyBoundedPath = []
for pointIndex, point in enumerate(path):
begin = None
previousIndex = pointIndex - 1
if previousIndex >= 0:
begin = path[previousIndex]
end = None
nextIndex = pointIndex + 1
if nextIndex < len(path):
end = path[nextIndex]
addHorizontallyBoundedPoint(begin, point, end, horizontalBegin, horizontalEnd, horizontallyBoundedPath)
return horizontallyBoundedPath
def getIncrementFromRank( rank ):
'Get the increment from the rank which is 0 at 1 and increases by three every power of ten.'
rankZone = int( math.floor( rank / 3 ) )
rankModulo = rank % 3
powerOfTen = pow( 10, rankZone )
moduloMultipliers = ( 1, 2, 5 )
return float( powerOfTen * moduloMultipliers[ rankModulo ] )
def getInsidesAddToOutsides( loops, outsides ):
'Add loops to either the insides or outsides.'
insides = []
for loopIndex in xrange( len(loops) ):
loop = loops[loopIndex]
if isInsideOtherLoops( loopIndex, loops ):
insides.append(loop)
else:
outsides.append(loop)
return insides
def getIntermediateLocation( alongWay, begin, end ):
'Get the intermediate location between begin and end.'
return begin * ( 1.0 - alongWay ) + end * alongWay
def getIntersectionOfXIntersectionIndexes( totalSolidSurfaceThickness, xIntersectionIndexList ):
'Get x intersections from surrounding layers.'
xIntersectionList = []
solidTable = {}
solid = False
xIntersectionIndexList.sort()
for xIntersectionIndex in xIntersectionIndexList:
toggleHashtable(solidTable, xIntersectionIndex.index, '')
oldSolid = solid
solid = len(solidTable) >= totalSolidSurfaceThickness
if oldSolid != solid:
xIntersectionList.append(xIntersectionIndex.x)
return xIntersectionList
def getIntersectionOfXIntersectionsTables(xIntersectionsTables):
'Get the intersection of the XIntersections tables.'
if len(xIntersectionsTables) == 0:
return {}
intersectionOfXIntersectionsTables = {}
firstIntersectionTable = xIntersectionsTables[0]
for firstIntersectionTableKey in firstIntersectionTable.keys():
xIntersectionIndexList = []
for xIntersectionsTableIndex in xrange(len(xIntersectionsTables)):
xIntersectionsTable = xIntersectionsTables[xIntersectionsTableIndex]
if firstIntersectionTableKey in xIntersectionsTable:
addXIntersectionIndexesFromXIntersections(xIntersectionsTableIndex, xIntersectionIndexList, xIntersectionsTable[firstIntersectionTableKey])
xIntersections = getIntersectionOfXIntersectionIndexes(len(xIntersectionsTables), xIntersectionIndexList)
if len(xIntersections) > 0:
intersectionOfXIntersectionsTables[firstIntersectionTableKey] = xIntersections
return intersectionOfXIntersectionsTables
def getIntFromValue(value):
'Get the value as an int.'
try:
return int(value)
except:
pass
return None
def getIsInFilledRegion(loops, point):
'Determine if the point is in the filled region of the loops.'
return getNumberOfIntersectionsToLeftOfLoops(loops, point) % 2 == 1
def getIsInFilledRegionByPaths(loops, paths):
'Determine if the point of any path is in the filled region of the loops.'
for path in paths:
if len(path) > 0:
if getIsInFilledRegion(loops, path[0]):
return True
return False
def getIsRadianClose(firstRadian, secondRadian):
'Determine if the firstRadian is close to the secondRadian.'
return abs(math.pi - abs(math.pi - ((firstRadian - secondRadian) % (math.pi + math.pi) ))) < 0.000001
def getIsWiddershinsByVector3( polygon ):
'Determine if the polygon goes round in the widdershins direction.'
return isWiddershins( getComplexPath( polygon ) )
def getJoinOfXIntersectionIndexes( xIntersectionIndexList ):
'Get joined x intersections from surrounding layers.'
xIntersections = []
solidTable = {}
solid = False
xIntersectionIndexList.sort()
for xIntersectionIndex in xIntersectionIndexList:
toggleHashtable(solidTable, xIntersectionIndex.index, '')
oldSolid = solid
solid = len(solidTable) > 0
if oldSolid != solid:
xIntersections.append(xIntersectionIndex.x)
return xIntersections
def getLargestLoop(loops):
'Get largest loop from loops.'
largestArea = -987654321.0
largestLoop = []
for loop in loops:
loopArea = abs(getAreaLoopAbsolute(loop))
if loopArea > largestArea:
largestArea = loopArea
largestLoop = loop
return largestLoop
def getLeftPoint(points):
'Get the leftmost complex point in the points.'
leftmost = 987654321.0
leftPointComplex = None
for pointComplex in points:
if pointComplex.real < leftmost:
leftmost = pointComplex.real
leftPointComplex = pointComplex
return leftPointComplex
def getLeftPointIndex(points):
'Get the index of the leftmost complex point in the points.'
if len(points) < 1:
return None
leftPointIndex = 0
for pointIndex in xrange( len(points) ):
if points[pointIndex].real < points[ leftPointIndex ].real:
leftPointIndex = pointIndex
return leftPointIndex
def getListTableElements( listDictionary ):
'Get all the element in a list table.'
listDictionaryElements = []
for listDictionaryValue in listDictionary.values():
listDictionaryElements += listDictionaryValue
return listDictionaryElements
def getLoopCentroid(polygonComplex):
'Get the area of a complex polygon using http://en.wikipedia.org/wiki/Centroid.'
polygonDoubleArea = 0.0
polygonTorque = 0.0
for pointIndex in xrange( len(polygonComplex) ):
pointBegin = polygonComplex[pointIndex]
pointEnd = polygonComplex[ (pointIndex + 1) % len(polygonComplex) ]
doubleArea = pointBegin.real * pointEnd.imag - pointEnd.real * pointBegin.imag
doubleCenter = complex( pointBegin.real + pointEnd.real, pointBegin.imag + pointEnd.imag )
polygonDoubleArea += doubleArea
polygonTorque += doubleArea * doubleCenter
torqueMultiplier = 0.333333333333333333333333 / polygonDoubleArea
return polygonTorque * torqueMultiplier
def getLoopConvex(points):
'Get convex hull of points using gift wrap algorithm.'
loopConvex = []
pointSet = set()
for point in points:
if point not in pointSet:
pointSet.add(point)
loopConvex.append(point)
if len(loopConvex) < 4:
return loopConvex
leftPoint = getLeftPoint(loopConvex)
lastPoint = leftPoint
pointSet.remove(leftPoint)
loopConvex = [leftPoint]
lastSegment = complex(0.0, 1.0)
while True:
greatestDotProduct = -9.9
greatestPoint = None
greatestSegment = None
if len(loopConvex) > 2:
nextSegment = getNormalized(leftPoint - lastPoint)
if abs(nextSegment) > 0.0:
greatestDotProduct = getDotProduct(nextSegment, lastSegment)
for point in pointSet:
nextSegment = getNormalized(point - lastPoint)
if abs(nextSegment) > 0.0:
dotProduct = getDotProduct(nextSegment, lastSegment)
if dotProduct > greatestDotProduct:
greatestDotProduct = dotProduct
greatestPoint = point
greatestSegment = nextSegment
if greatestPoint == None:
return loopConvex
lastPoint = greatestPoint
loopConvex.append(greatestPoint)
pointSet.remove(greatestPoint)
lastSegment = greatestSegment
return loopConvex
def getLoopConvexCentroid(polygonComplex):
'Get centroid of the convex hull of a complex polygon.'
return getLoopCentroid( getLoopConvex(polygonComplex) )
def getLoopInsideContainingLoop( containingLoop, loops ):
'Get a loop that is inside the containing loop.'
for loop in loops:
if loop != containingLoop:
if isPathInsideLoop( containingLoop, loop ):
return loop
return None
def getLoopLength( polygon ):
'Get the length of a polygon perimeter.'
polygonLength = 0.0
for pointIndex in xrange( len( polygon ) ):
point = polygon[pointIndex]
secondPoint = polygon[ (pointIndex + 1) % len( polygon ) ]
polygonLength += abs( point - secondPoint )
return polygonLength
def getLoopStartingClosest(extrusionHalfWidth, location, loop):
'Add to threads from the last location from loop.'
closestIndex = getClosestDistanceIndexToLine(location, loop).index
loop = getAroundLoop(closestIndex, closestIndex, loop)
closestPoint = getClosestPointOnSegment(loop[0], loop[1], location)
if abs(closestPoint - loop[0]) > extrusionHalfWidth and abs(closestPoint - loop[1]) > extrusionHalfWidth:
loop = [closestPoint] + loop[1 :] + [loop[0]]
elif abs(closestPoint - loop[0]) > abs(closestPoint - loop[1]):
loop = loop[1 :] + [loop[0]]
return loop
def getLoopWithoutCloseEnds(close, loop):
'Get loop without close ends.'
if len(loop) < 2:
return loop
if abs(loop[0] - loop[-1]) > close:
return loop
return loop[: -1]
def getLoopWithoutCloseSequentialPoints(close, loop):
'Get loop without close sequential points.'
if len(loop) < 2:
return loop
lastPoint = loop[-1]
loopWithoutCloseSequentialPoints = []
for point in loop:
if abs(point - lastPoint) > close:
loopWithoutCloseSequentialPoints.append(point)
lastPoint = point
return loopWithoutCloseSequentialPoints
def getMaximum(firstComplex, secondComplex):
'Get a complex with each component the maximum of the respective components of a pair of complexes.'
return complex(max(firstComplex.real, secondComplex.real), max(firstComplex.imag, secondComplex.imag))
def getMaximumByComplexPath(path):
'Get a complex with each component the maximum of the respective components of a complex path.'
maximum = complex(-987654321987654321.0, -987654321987654321.0)
for point in path:
maximum = getMaximum(maximum, point)
return maximum
def getMaximumByComplexPaths(paths):
'Get a complex with each component the maximum of the respective components of complex paths.'
maximum = complex(-987654321987654321.0, -987654321987654321.0)
for path in paths:
for point in path:
maximum = getMaximum(maximum, point)
return maximum
def getMaximumByVector3Path(path):
'Get a vector3 with each component the maximum of the respective components of a vector3 path.'
maximum = Vector3(-987654321987654321.0, -987654321987654321.0, -987654321987654321.0)
for point in path:
maximum.maximize(point)
return maximum
def getMaximumByVector3Paths(paths):
'Get a complex with each component the maximum of the respective components of a complex path.'
maximum = Vector3(-987654321987654321.0, -987654231987654321.0, -987654321987654321.0)
for path in paths:
for point in path:
maximum.maximize(point)
return maximum
def getMaximumSpan(loop):
'Get the maximum span of the loop.'
extent = getMaximumByComplexPath(loop) - getMinimumByComplexPath(loop)
return max(extent.real, extent.imag)
def getMinimum(firstComplex, secondComplex):
'Get a complex with each component the minimum of the respective components of a pair of complexes.'
return complex(min(firstComplex.real, secondComplex.real), min(firstComplex.imag, secondComplex.imag))
def getMinimumByComplexPath(path):
'Get a complex with each component the minimum of the respective components of a complex path.'
minimum = complex(987654321987654321.0, 987654321987654321.0)
for point in path:
minimum = getMinimum(minimum, point)
return minimum
def getMinimumByComplexPaths(paths):
'Get a complex with each component the minimum of the respective components of complex paths.'
minimum = complex(987654321987654321.0, 987654321987654321.0)
for path in paths:
for point in path:
minimum = getMinimum(minimum, point)
return minimum
def getMinimumByVector3Path(path):
'Get a vector3 with each component the minimum of the respective components of a vector3 path.'
minimum = Vector3(987654321987654321.0, 987654321987654321.0, 987654321987654321.0)
for point in path:
minimum.minimize(point)
return minimum
def getMinimumByVector3Paths(paths):
'Get a complex with each component the minimum of the respective components of a complex path.'
minimum = Vector3(987654321987654321.0, 987654321987654321.0, 987654321987654321.0)
for path in paths:
for point in path:
minimum.minimize(point)
return minimum
def getMirrorPath(path):
"Get mirror path."
close = 0.001 * getPathLength(path)
for pointIndex in xrange(len(path) - 1, -1, -1):
point = path[pointIndex]
flipPoint = complex(-point.real, point.imag)
if abs(flipPoint - path[-1]) > close:
path.append(flipPoint)
return path
def getNormal(begin, center, end):
'Get normal.'
centerMinusBegin = (center - begin).getNormalized()
endMinusCenter = (end - center).getNormalized()
return centerMinusBegin.cross(endMinusCenter)
def getNormalByPath(path):
'Get normal by path.'
totalNormal = Vector3()
for pointIndex, point in enumerate(path):
center = path[(pointIndex + 1) % len(path)]
end = path[(pointIndex + 2) % len(path)]
totalNormal += getNormalWeighted(point, center, end)
return totalNormal.getNormalized()
def getNormalized(complexNumber):
'Get the normalized complex.'
complexNumberLength = abs(complexNumber)
if complexNumberLength > 0.0:
return complexNumber / complexNumberLength
return complexNumber
def getNormalWeighted(begin, center, end):
'Get weighted normal.'
return (center - begin).cross(end - center)
def getNumberOfIntersectionsToLeft(loop, point):
'Get the number of intersections through the loop for the line going left.'
numberOfIntersectionsToLeft = 0
for pointIndex in xrange(len(loop)):
firstPointComplex = loop[pointIndex]
secondPointComplex = loop[(pointIndex + 1) % len(loop)]
xIntersection = getXIntersectionIfExists(firstPointComplex, secondPointComplex, point.imag)
if xIntersection != None:
if xIntersection < point.real:
numberOfIntersectionsToLeft += 1
return numberOfIntersectionsToLeft
def getNumberOfIntersectionsToLeftOfLoops(loops, point):
'Get the number of intersections through the loop for the line starting from the left point and going left.'
totalNumberOfIntersectionsToLeft = 0
for loop in loops:
totalNumberOfIntersectionsToLeft += getNumberOfIntersectionsToLeft(loop, point)
return totalNumberOfIntersectionsToLeft
def getOrderedNestedRings(nestedRings):
'Get ordered nestedRings from nestedRings.'
insides = []
orderedNestedRings = []
for loopIndex in xrange(len(nestedRings)):
nestedRing = nestedRings[loopIndex]
otherLoops = []
for beforeIndex in xrange(loopIndex):
otherLoops.append(nestedRings[beforeIndex].boundary)
for afterIndex in xrange(loopIndex + 1, len(nestedRings)):
otherLoops.append(nestedRings[afterIndex].boundary)
if isPathEntirelyInsideLoops(otherLoops, nestedRing.boundary):
insides.append(nestedRing)
else:
orderedNestedRings.append(nestedRing)
for outside in orderedNestedRings:
outside.getFromInsideSurroundings(insides)
return orderedNestedRings
def getPathCopy(path):
'Get path copy.'
pathCopy = []
for point in path:
pathCopy.append(point.copy())
return pathCopy
def getPathLength(path):
'Get the length of a path ( an open polyline ).'
pathLength = 0.0
for pointIndex in xrange( len(path) - 1 ):
firstPoint = path[pointIndex]
secondPoint = path[pointIndex + 1]
pathLength += abs(firstPoint - secondPoint)
return pathLength
def getPathsFromEndpoints(endpoints, maximumConnectionLength, pixelDictionary, width):
'Get paths from endpoints.'
if len(endpoints) < 2:
return []
endpoints = endpoints[:] # so that the first two endpoints aren't removed when used again
for beginningEndpoint in endpoints[: : 2]:
beginningPoint = beginningEndpoint.point
addSegmentToPixelTable(beginningPoint, beginningEndpoint.otherEndpoint.point, pixelDictionary, 0, 0, width)
endpointFirst = endpoints[0]
endpoints.remove(endpointFirst)
otherEndpoint = endpointFirst.otherEndpoint
endpoints.remove(otherEndpoint)
nextEndpoint = None
path = []
paths = [path]
if len(endpoints) > 1:
nextEndpoint = otherEndpoint.getClosestMiss(endpoints, path, pixelDictionary, width)
if nextEndpoint != None:
if abs(nextEndpoint.point - endpointFirst.point) < abs(nextEndpoint.point - otherEndpoint.point):
endpointFirst = endpointFirst.otherEndpoint
otherEndpoint = endpointFirst.otherEndpoint
addPointToPath(path, pixelDictionary, endpointFirst.point, None, width)
addPointToPath(path, pixelDictionary, otherEndpoint.point, len(paths) - 1, width)
oneOverEndpointWidth = 1.0 / maximumConnectionLength
endpointTable = {}
for endpoint in endpoints:
addElementToPixelListFromPoint(endpoint, endpointTable, endpoint.point * oneOverEndpointWidth)
while len(endpointTable) > 0:
if len(endpointTable) == 1:
if len(endpointTable.values()[0]) < 2:
return []
endpoints = getSquareValuesFromPoint(endpointTable, otherEndpoint.point * oneOverEndpointWidth)
nextEndpoint = otherEndpoint.getClosestMiss(endpoints, path, pixelDictionary, width)
if nextEndpoint == None:
path = []
paths.append(path)
endpoints = getListTableElements(endpointTable)
nextEndpoint = otherEndpoint.getClosestEndpoint(endpoints)
# this commented code should be faster than the getListTableElements code, but it isn't, someday a spiral algorithim could be tried
# endpoints = getSquareValuesFromPoint( endpointTable, otherEndpoint.point * oneOverEndpointWidth )
# nextEndpoint = otherEndpoint.getClosestEndpoint(endpoints)
# if nextEndpoint == None:
# endpoints = []
# for endpointTableValue in endpointTable.values():
# endpoints.append( endpointTableValue[0] )
# nextEndpoint = otherEndpoint.getClosestEndpoint(endpoints)
# endpoints = getSquareValuesFromPoint( endpointTable, nextEndpoint.point * oneOverEndpointWidth )
# nextEndpoint = otherEndpoint.getClosestEndpoint(endpoints)
addPointToPath(path, pixelDictionary, nextEndpoint.point, len(paths) - 1, width)
removeElementFromPixelListFromPoint(nextEndpoint, endpointTable, nextEndpoint.point * oneOverEndpointWidth)
otherEndpoint = nextEndpoint.otherEndpoint
addPointToPath(path, pixelDictionary, otherEndpoint.point, len(paths) - 1, width)
removeElementFromPixelListFromPoint(otherEndpoint, endpointTable, otherEndpoint.point * oneOverEndpointWidth)
return paths
def getPlaneDot( vec3First, vec3Second ):
'Get the dot product of the x and y components of a pair of Vector3s.'
return vec3First.x * vec3Second.x + vec3First.y * vec3Second.y
def getPluralString( number, suffix ):
'Get the plural string.'
if number == 1:
return '1 %s' % suffix
return '%s %ss' % ( number, suffix )
def getPointPlusSegmentWithLength( length, point, segment ):
'Get point plus a segment scaled to a given length.'
return segment * length / abs(segment) + point
def getPointsByHorizontalDictionary(width, xIntersectionsDictionary):
'Get points from the horizontalXIntersectionsDictionary.'
points = []
xIntersectionsDictionaryKeys = xIntersectionsDictionary.keys()
xIntersectionsDictionaryKeys.sort()
for xIntersectionsDictionaryKey in xIntersectionsDictionaryKeys:
for xIntersection in xIntersectionsDictionary[xIntersectionsDictionaryKey]:
points.append(complex(xIntersection, xIntersectionsDictionaryKey * width))
return points
def getPointsByVerticalDictionary(width, xIntersectionsDictionary):
'Get points from the verticalXIntersectionsDictionary.'
points = []
xIntersectionsDictionaryKeys = xIntersectionsDictionary.keys()
xIntersectionsDictionaryKeys.sort()
for xIntersectionsDictionaryKey in xIntersectionsDictionaryKeys:
for xIntersection in xIntersectionsDictionary[xIntersectionsDictionaryKey]:
points.append(complex(xIntersectionsDictionaryKey * width, xIntersection))
return points
def getRadiusArealizedMultiplier(sides):
'Get the radius multiplier for a polygon of equal area.'
return math.sqrt(globalTau / sides / math.sin(globalTau / sides))
def getRandomComplex(begin, end):
'Get random complex.'
endMinusBegin = end - begin
return begin + complex(random.random() * endMinusBegin.real, random.random() * endMinusBegin.imag)
def getRank(width):
'Get the rank which is 0 at 1 and increases by three every power of ten.'
return int(math.floor(3.0 * math.log10(width)))
def getRotatedComplexes(planeAngle, points):
'Get points rotated by the plane angle'
rotatedComplexes = []
for point in points:
rotatedComplexes.append(planeAngle * point)
return rotatedComplexes
def getRotatedComplexLists(planeAngle, pointLists):
'Get point lists rotated by the plane angle'
rotatedComplexLists = []
for pointList in pointLists:
rotatedComplexLists.append(getRotatedComplexes(planeAngle, pointList))
return rotatedComplexLists
def getRotatedWiddershinsQuarterAroundZAxis(vector3):
'Get Vector3 rotated a quarter widdershins turn around Z axis.'
return Vector3(-vector3.y, vector3.x, vector3.z)
def getRoundedPoint(point):
'Get point with each component rounded.'
return Vector3(round(point.x), round( point.y ), round(point.z))
def getRoundedToPlaces(decimalPlaces, number):
'Get number rounded to a number of decimal places.'
decimalPlacesRounded = max(1, int(round(decimalPlaces)))
return round(number, decimalPlacesRounded)
def getRoundedToPlacesString(decimalPlaces, number):
'Get number rounded to a number of decimal places as a string, without exponential formatting.'
roundedToPlaces = getRoundedToPlaces(decimalPlaces, number)
roundedToPlacesString = str(roundedToPlaces)
if 'e' in roundedToPlacesString:
return ('%.15f' % roundedToPlaces).rstrip('0')
return roundedToPlacesString
def getRoundedToThreePlaces(number):
'Get number rounded to three places as a string.'
return str(round(number, 3))
def getRoundZAxisByPlaneAngle( planeAngle, vector3 ):
'Get Vector3 rotated by a plane angle.'
return Vector3( vector3.x * planeAngle.real - vector3.y * planeAngle.imag, vector3.x * planeAngle.imag + vector3.y * planeAngle.real, vector3.z )
def getSegmentFromPath( path, pathIndex ):
'Get endpoint segment from a path.'
if len(path) < 2:
return None
begin = path[-1]
end = path[-2]
forwardEndpoint = getEndpointFromPath( path, pathIndex )
reversePath = path[:]
reversePath.reverse()
reverseEndpoint = getEndpointFromPath( reversePath, pathIndex )
return ( forwardEndpoint, reverseEndpoint )
def getSegmentFromPoints( begin, end ):
'Get endpoint segment from a pair of points.'
endpointFirst = Endpoint()
endpointSecond = Endpoint().getFromOtherPoint( endpointFirst, end )
endpointFirst.getFromOtherPoint( endpointSecond, begin )
return ( endpointFirst, endpointSecond )
def getSegmentsFromXIntersectionIndexes( xIntersectionIndexList, y ):
'Get endpoint segments from the x intersection indexes.'
xIntersections = getXIntersectionsFromIntersections( xIntersectionIndexList )
return getSegmentsFromXIntersections( xIntersections, y )
def getSegmentsFromXIntersections( xIntersections, y ):
'Get endpoint segments from the x intersections.'
segments = []
end = len( xIntersections )
if len( xIntersections ) % 2 == 1:
end -= 1
for xIntersectionIndex in xrange( 0, end, 2 ):
firstX = xIntersections[ xIntersectionIndex ]
secondX = xIntersections[ xIntersectionIndex + 1 ]
if firstX != secondX:
segments.append( getSegmentFromPoints( complex( firstX, y ), complex( secondX, y ) ) )
return segments
def getSimplifiedLoop( loop, radius ):
'Get loop with points inside the channel removed.'
if len(loop) < 2:
return loop
simplificationMultiplication = 256
simplificationRadius = radius / float( simplificationMultiplication )
maximumIndex = len(loop) * simplificationMultiplication
pointIndex = 1
while pointIndex < maximumIndex:
oldLoopLength = len(loop)
loop = getHalfSimplifiedLoop( loop, simplificationRadius, 0 )
loop = getHalfSimplifiedLoop( loop, simplificationRadius, 1 )
simplificationRadius += simplificationRadius
if oldLoopLength == len(loop):
if simplificationRadius > radius:
return getAwayPoints( loop, radius )
else:
simplificationRadius *= 1.5
simplificationRadius = min( simplificationRadius, radius )
pointIndex += pointIndex
return getAwayPoints( loop, radius )
def getSimplifiedLoops( loops, radius ):
'Get the simplified loops.'
simplifiedLoops = []
for loop in loops:
simplifiedLoops.append( getSimplifiedLoop( loop, radius ) )
return simplifiedLoops
def getSimplifiedPath(path, radius):
'Get path with points inside the channel removed.'
if len(path) < 2:
return path
simplificationMultiplication = 256
simplificationRadius = radius / float(simplificationMultiplication)
maximumIndex = len(path) * simplificationMultiplication
pointIndex = 1
while pointIndex < maximumIndex:
oldPathLength = len(path)
path = getHalfSimplifiedPath(path, simplificationRadius, 0)
path = getHalfSimplifiedPath(path, simplificationRadius, 1)
simplificationRadius += simplificationRadius
if oldPathLength == len(path):
if simplificationRadius > radius:
return getAwayPath(path, radius)
else:
simplificationRadius *= 1.5
simplificationRadius = min(simplificationRadius, radius)
pointIndex += pointIndex
return getAwayPath(path, radius)
def getSquareIsOccupied( pixelDictionary, x, y ):
'Determine if a square around the x and y pixel coordinates is occupied.'
squareValues = []
for xStep in xrange(x - 1, x + 2):
for yStep in xrange(y - 1, y + 2):
if (xStep, yStep) in pixelDictionary:
return True
return False
def getSquareLoopWiddershins(beginComplex, endComplex):
'Get a square loop from the beginning to the end and back.'
loop = [beginComplex, complex(endComplex.real, beginComplex.imag), endComplex]
loop.append(complex(beginComplex.real, endComplex.imag))
return loop
def getSquareValues( pixelDictionary, x, y ):
'Get a list of the values in a square around the x and y pixel coordinates.'
squareValues = []
for xStep in xrange(x - 1, x + 2):
for yStep in xrange(y - 1, y + 2):
stepKey = (xStep, yStep)
if stepKey in pixelDictionary:
squareValues += pixelDictionary[ stepKey ]
return squareValues
def getSquareValuesFromPoint( pixelDictionary, point ):
'Get a list of the values in a square around the point.'
return getSquareValues(pixelDictionary, int(round(point.real)), int(round(point.imag)))
def getStepKeyFromPoint(point):
'Get step key for the point.'
return (int(round(point.real)), int(round(point.imag)))
def getThreeSignificantFigures(number):
'Get number rounded to three significant figures as a string.'
absoluteNumber = abs(number)
if absoluteNumber >= 10.0:
return getRoundedToPlacesString( 1, number )
if absoluteNumber < 0.000000001:
return getRoundedToPlacesString( 12, number )
return getRoundedToPlacesString( 1 - math.floor( math.log10( absoluteNumber ) ), number )
def getTopPath(path):
'Get the top of the path.'
top = -987654321987654321.0
for point in path:
top = max(top, point.z)
return top
def getTopPaths(paths):
'Get the top of the paths.'
top = -987654321987654321.0
for path in paths:
for point in path:
top = max(top, point.z)
return top
def getTransferClosestNestedRing(extrusionHalfWidth, nestedRings, oldOrderedLocation, skein, threadSequence):
'Get and transfer the closest remaining nested ring.'
if len(nestedRings) > 0:
oldOrderedLocation.z = nestedRings[0].z
closestDistance = 987654321987654321.0
closestNestedRing = None
for remainingNestedRing in nestedRings:
distance = getClosestDistanceIndexToLine(oldOrderedLocation.dropAxis(), remainingNestedRing.boundary).distance
if distance < closestDistance:
closestDistance = distance
closestNestedRing = remainingNestedRing
nestedRings.remove(closestNestedRing)
closestNestedRing.addToThreads(extrusionHalfWidth, oldOrderedLocation, skein, threadSequence)
return closestNestedRing
def getTransferredNestedRings( insides, loop ):
'Get transferred paths from inside nested rings.'
transferredSurroundings = []
for insideIndex in xrange( len( insides ) - 1, - 1, - 1 ):
insideSurrounding = insides[ insideIndex ]
if isPathInsideLoop( loop, insideSurrounding.boundary ):
transferredSurroundings.append( insideSurrounding )
del insides[ insideIndex ]
return transferredSurroundings
def getTransferredPaths( insides, loop ):
'Get transferred paths from inside paths.'
transferredPaths = []
for insideIndex in xrange( len( insides ) - 1, - 1, - 1 ):
inside = insides[ insideIndex ]
if isPathInsideLoop( loop, inside ):
transferredPaths.append( inside )
del insides[ insideIndex ]
return transferredPaths
def getTranslatedComplexPath(path, translateComplex):
'Get the translated complex path.'
translatedComplexPath = []
for point in path:
translatedComplexPath.append(point + translateComplex)
return translatedComplexPath
def getVector3Path(complexPath, z=0.0):
'Get the vector3 path from the complex path.'
vector3Path = []
for complexPoint in complexPath:
vector3Path.append(Vector3(complexPoint.real, complexPoint.imag, z))
return vector3Path
def getVector3Paths(complexPaths, z=0.0):
'Get the vector3 paths from the complex paths.'
vector3Paths = []
for complexPath in complexPaths:
vector3Paths.append(getVector3Path(complexPath, z))
return vector3Paths
def getWiddershinsUnitPolar(angle):
'Get polar complex from counterclockwise angle from 1, 0.'
return complex(math.cos(angle), math.sin(angle))
def getXIntersectionIfExists( beginComplex, endComplex, y ):
'Get the x intersection if it exists.'
if ( y > beginComplex.imag ) == ( y > endComplex.imag ):
return None
endMinusBeginComplex = endComplex - beginComplex
return ( y - beginComplex.imag ) / endMinusBeginComplex.imag * endMinusBeginComplex.real + beginComplex.real
def getXIntersectionsFromIntersections( xIntersectionIndexList ):
'Get x intersections from the x intersection index list, in other words subtract non negative intersections from negatives.'
xIntersections = []
fill = False
solid = False
solidTable = {}
xIntersectionIndexList.sort()
for solidX in xIntersectionIndexList:
if solidX.index >= 0:
toggleHashtable( solidTable, solidX.index, '' )
else:
fill = not fill
oldSolid = solid
solid = ( len( solidTable ) == 0 and fill )
if oldSolid != solid:
xIntersections.append( solidX.x )
return xIntersections
def getXYComplexFromVector3(vector3):
'Get an xy complex from a vector3 if it exists, otherwise return None.'
if vector3 == None:
return None
return vector3.dropAxis()
def getYIntersectionIfExists( beginComplex, endComplex, x ):
'Get the y intersection if it exists.'
if ( x > beginComplex.real ) == ( x > endComplex.real ):
return None
endMinusBeginComplex = endComplex - beginComplex
return ( x - beginComplex.real ) / endMinusBeginComplex.real * endMinusBeginComplex.imag + beginComplex.imag
def getZComponentCrossProduct( vec3First, vec3Second ):
'Get z component cross product of a pair of Vector3s.'
return vec3First.x * vec3Second.y - vec3First.y * vec3Second.x
def isInsideOtherLoops( loopIndex, loops ):
'Determine if a loop in a list is inside another loop in that list.'
return isPathInsideLoops( loops[ : loopIndex ] + loops[loopIndex + 1 :], loops[loopIndex] )
def isLineIntersectingInsideXSegment( beginComplex, endComplex, segmentFirstX, segmentSecondX, y ):
'Determine if the line is crossing inside the x segment.'
xIntersection = getXIntersectionIfExists( beginComplex, endComplex, y )
if xIntersection == None:
return False
if xIntersection < min( segmentFirstX, segmentSecondX ):
return False
return xIntersection <= max( segmentFirstX, segmentSecondX )
def isLineIntersectingLoop( loop, pointBegin, pointEnd ):
'Determine if the line is intersecting loops.'
normalizedSegment = pointEnd - pointBegin
normalizedSegmentLength = abs( normalizedSegment )
if normalizedSegmentLength > 0.0:
normalizedSegment /= normalizedSegmentLength
segmentYMirror = complex(normalizedSegment.real, -normalizedSegment.imag)
pointBeginRotated = segmentYMirror * pointBegin
pointEndRotated = segmentYMirror * pointEnd
if isLoopIntersectingInsideXSegment( loop, pointBeginRotated.real, pointEndRotated.real, segmentYMirror, pointBeginRotated.imag ):
return True
return False
def isLineIntersectingLoops( loops, pointBegin, pointEnd ):
'Determine if the line is intersecting loops.'
normalizedSegment = pointEnd - pointBegin
normalizedSegmentLength = abs( normalizedSegment )
if normalizedSegmentLength > 0.0:
normalizedSegment /= normalizedSegmentLength
segmentYMirror = complex(normalizedSegment.real, -normalizedSegment.imag)
pointBeginRotated = segmentYMirror * pointBegin
pointEndRotated = segmentYMirror * pointEnd
if isLoopListIntersectingInsideXSegment( loops, pointBeginRotated.real, pointEndRotated.real, segmentYMirror, pointBeginRotated.imag ):
return True
return False
def isLoopIntersectingInsideXSegment( loop, segmentFirstX, segmentSecondX, segmentYMirror, y ):
'Determine if the loop is intersecting inside the x segment.'
rotatedLoop = getRotatedComplexes( segmentYMirror, loop )
for pointIndex in xrange( len( rotatedLoop ) ):
pointFirst = rotatedLoop[pointIndex]
pointSecond = rotatedLoop[ (pointIndex + 1) % len( rotatedLoop ) ]
if isLineIntersectingInsideXSegment( pointFirst, pointSecond, segmentFirstX, segmentSecondX, y ):
return True
return False
def isLoopIntersectingLoop( loop, otherLoop ):
'Determine if the loop is intersecting the other loop.'
for pointIndex in xrange(len(loop)):
pointBegin = loop[pointIndex]
pointEnd = loop[(pointIndex + 1) % len(loop)]
if isLineIntersectingLoop( otherLoop, pointBegin, pointEnd ):
return True
return False
def isLoopIntersectingLoops( loop, otherLoops ):
'Determine if the loop is intersecting other loops.'
for pointIndex in xrange(len(loop)):
pointBegin = loop[pointIndex]
pointEnd = loop[(pointIndex + 1) % len(loop)]
if isLineIntersectingLoops( otherLoops, pointBegin, pointEnd ):
return True
return False
def isLoopListIntersecting(loops):
'Determine if a loop in the list is intersecting the other loops.'
for loopIndex in xrange(len(loops) - 1):
loop = loops[loopIndex]
if isLoopIntersectingLoops(loop, loops[loopIndex + 1 :]):
return True
return False
def isLoopListIntersectingInsideXSegment( loopList, segmentFirstX, segmentSecondX, segmentYMirror, y ):
'Determine if the loop list is crossing inside the x segment.'
for alreadyFilledLoop in loopList:
if isLoopIntersectingInsideXSegment( alreadyFilledLoop, segmentFirstX, segmentSecondX, segmentYMirror, y ):
return True
return False
def isPathEntirelyInsideLoop(loop, path):
'Determine if a path is entirely inside another loop.'
for point in path:
if not isPointInsideLoop(loop, point):
return False
return True
def isPathEntirelyInsideLoops(loops, path):
'Determine if a path is entirely inside another loop in a list.'
for loop in loops:
if isPathEntirelyInsideLoop(loop, path):
return True
return False
def isPathInsideLoop(loop, path):
'Determine if a path is inside another loop.'
return isPointInsideLoop(loop, getLeftPoint(path))
def isPathInsideLoops(loops, path):
'Determine if a path is inside another loop in a list.'
for loop in loops:
if isPathInsideLoop(loop, path):
return True
return False
def isPixelTableIntersecting( bigTable, littleTable, maskTable = {} ):
'Add path to the pixel table.'
littleTableKeys = littleTable.keys()
for littleTableKey in littleTableKeys:
if littleTableKey not in maskTable:
if littleTableKey in bigTable:
return True
return False
def isPointInsideLoop(loop, point):
'Determine if a point is inside another loop.'
return getNumberOfIntersectionsToLeft(loop, point) % 2 == 1
def isSegmentCompletelyInX( segment, xFirst, xSecond ):
'Determine if the segment overlaps within x.'
segmentFirstX = segment[0].point.real
segmentSecondX = segment[1].point.real
if max( segmentFirstX, segmentSecondX ) > max( xFirst, xSecond ):
return False
return min( segmentFirstX, segmentSecondX ) >= min( xFirst, xSecond )
def isWiddershins(polygonComplex):
'Determine if the complex polygon goes round in the widdershins direction.'
return getAreaLoop(polygonComplex) > 0.0
def isWithinChannel( channelRadius, pointIndex, loop ):
'Determine if the the point is within the channel between two adjacent points.'
point = loop[pointIndex]
behindSegmentComplex = loop[(pointIndex + len(loop) - 1) % len(loop)] - point
behindSegmentComplexLength = abs( behindSegmentComplex )
if behindSegmentComplexLength < channelRadius:
return True
aheadSegmentComplex = loop[(pointIndex + 1) % len(loop)] - point
aheadSegmentComplexLength = abs( aheadSegmentComplex )
if aheadSegmentComplexLength < channelRadius:
return True
behindSegmentComplex /= behindSegmentComplexLength
aheadSegmentComplex /= aheadSegmentComplexLength
absoluteZ = getDotProductPlusOne( aheadSegmentComplex, behindSegmentComplex )
if behindSegmentComplexLength * absoluteZ < channelRadius:
return True
return aheadSegmentComplexLength * absoluteZ < channelRadius
def isXSegmentIntersectingPath( path, segmentFirstX, segmentSecondX, segmentYMirror, y ):
'Determine if a path is crossing inside the x segment.'
rotatedPath = getRotatedComplexes( segmentYMirror, path )
for pointIndex in xrange( len( rotatedPath ) - 1 ):
pointFirst = rotatedPath[pointIndex]
pointSecond = rotatedPath[pointIndex + 1]
if isLineIntersectingInsideXSegment( pointFirst, pointSecond, segmentFirstX, segmentSecondX, y ):
return True
return False
def isXSegmentIntersectingPaths( paths, segmentFirstX, segmentSecondX, segmentYMirror, y ):
'Determine if a path list is crossing inside the x segment.'
for path in paths:
if isXSegmentIntersectingPath( path, segmentFirstX, segmentSecondX, segmentYMirror, y ):
return True
return False
def joinSegmentTables( fromTable, intoTable ):
'Join both segment tables and put the join into the intoTable.'
intoTableKeys = intoTable.keys()
fromTableKeys = fromTable.keys()
joinedKeyTable = {}
concatenatedTableKeys = intoTableKeys + fromTableKeys
for concatenatedTableKey in concatenatedTableKeys:
joinedKeyTable[ concatenatedTableKey ] = None
joinedKeys = joinedKeyTable.keys()
joinedKeys.sort()
for joinedKey in joinedKeys:
xIntersectionIndexList = []
if joinedKey in intoTable:
addXIntersectionIndexesFromSegments( 0, intoTable[ joinedKey ], xIntersectionIndexList )
if joinedKey in fromTable:
addXIntersectionIndexesFromSegments( 1, fromTable[ joinedKey ], xIntersectionIndexList )
xIntersections = getJoinOfXIntersectionIndexes( xIntersectionIndexList )
lineSegments = getSegmentsFromXIntersections( xIntersections, joinedKey )
if len( lineSegments ) > 0:
intoTable[ joinedKey ] = lineSegments
else:
print('This should never happen, there are no line segments in joinSegments in euclidean')
def joinXIntersectionsTables( fromTable, intoTable ):
'Join both XIntersections tables and put the join into the intoTable.'
joinedKeyTable = {}
concatenatedTableKeys = fromTable.keys() + intoTable.keys()
for concatenatedTableKey in concatenatedTableKeys:
joinedKeyTable[ concatenatedTableKey ] = None
for joinedKey in joinedKeyTable.keys():
xIntersectionIndexList = []
if joinedKey in intoTable:
addXIntersectionIndexesFromXIntersections( 0, xIntersectionIndexList, intoTable[ joinedKey ] )
if joinedKey in fromTable:
addXIntersectionIndexesFromXIntersections( 1, xIntersectionIndexList, fromTable[ joinedKey ] )
xIntersections = getJoinOfXIntersectionIndexes( xIntersectionIndexList )
if len( xIntersections ) > 0:
intoTable[ joinedKey ] = xIntersections
else:
print('This should never happen, there are no line segments in joinSegments in euclidean')
def overwriteDictionary(fromDictionary, keys, toDictionary):
'Overwrite the dictionary.'
for key in keys:
if key in fromDictionary:
toDictionary[key] = fromDictionary[key]
def removeElementFromDictionary(dictionary, key):
'Remove element from the dictionary.'
if key in dictionary:
del dictionary[key]
def removeElementFromListTable(element, key, listDictionary):
'Remove an element from the list table.'
if key not in listDictionary:
return
elementList = listDictionary[key]
if len( elementList ) < 2:
del listDictionary[key]
return
if element in elementList:
elementList.remove(element)
def removeElementFromPixelListFromPoint( element, pixelDictionary, point ):
'Remove an element from the pixel list.'
stepKey = getStepKeyFromPoint(point)
removeElementFromListTable( element, stepKey, pixelDictionary )
def removeElementsFromDictionary(dictionary, keys):
'Remove list from the dictionary.'
for key in keys:
removeElementFromDictionary(dictionary, key)
def removePixelTableFromPixelTable( pixelDictionaryToBeRemoved, pixelDictionaryToBeRemovedFrom ):
'Remove pixel from the pixel table.'
removeElementsFromDictionary( pixelDictionaryToBeRemovedFrom, pixelDictionaryToBeRemoved.keys() )
def removePrefixFromDictionary( dictionary, prefix ):
'Remove the attributes starting with the prefix from the dictionary.'
for key in dictionary.keys():
if key.startswith( prefix ):
del dictionary[key]
def removeTrueFromDictionary(dictionary, key):
'Remove key from the dictionary in the value is true.'
if key in dictionary:
if getBooleanFromValue(dictionary[key]):
del dictionary[key]
def removeTrueListFromDictionary( dictionary, keys ):
'Remove list from the dictionary in the value is true.'
for key in keys:
removeTrueFromDictionary( dictionary, key )
def subtractXIntersectionsTable( subtractFromTable, subtractTable ):
'Subtract the subtractTable from the subtractFromTable.'
subtractFromTableKeys = subtractFromTable.keys()
subtractFromTableKeys.sort()
for subtractFromTableKey in subtractFromTableKeys:
xIntersectionIndexList = []
addXIntersectionIndexesFromXIntersections( - 1, xIntersectionIndexList, subtractFromTable[ subtractFromTableKey ] )
if subtractFromTableKey in subtractTable:
addXIntersectionIndexesFromXIntersections( 0, xIntersectionIndexList, subtractTable[ subtractFromTableKey ] )
xIntersections = getXIntersectionsFromIntersections( xIntersectionIndexList )
if len( xIntersections ) > 0:
subtractFromTable[ subtractFromTableKey ] = xIntersections
else:
del subtractFromTable[ subtractFromTableKey ]
def swapList( elements, indexBegin, indexEnd ):
'Swap the list elements.'
elements[ indexBegin ], elements[ indexEnd ] = elements[ indexEnd ], elements[ indexBegin ]
def toggleHashtable( hashtable, key, value ):
'Toggle a hashtable between having and not having a key.'
if key in hashtable:
del hashtable[key]
else:
hashtable[key] = value
def transferClosestFillLoop(extrusionHalfWidth, oldOrderedLocation, remainingFillLoops, skein):
'Transfer the closest remaining fill loop.'
closestDistance = 987654321987654321.0
closestFillLoop = None
for remainingFillLoop in remainingFillLoops:
distance = getClosestDistanceIndexToLine(oldOrderedLocation.dropAxis(), remainingFillLoop).distance
if distance < closestDistance:
closestDistance = distance
closestFillLoop = remainingFillLoop
newClosestFillLoop = getLoopInsideContainingLoop(closestFillLoop, remainingFillLoops)
while newClosestFillLoop != None:
closestFillLoop = newClosestFillLoop
newClosestFillLoop = getLoopInsideContainingLoop(closestFillLoop, remainingFillLoops)
remainingFillLoops.remove(closestFillLoop)
addToThreadsFromLoop(extrusionHalfWidth, 'loop', closestFillLoop[:], oldOrderedLocation, skein)
def transferClosestPath( oldOrderedLocation, remainingPaths, skein ):
'Transfer the closest remaining path.'
closestDistance = 987654321987654321.0
closestPath = None
oldOrderedLocationComplex = oldOrderedLocation.dropAxis()
for remainingPath in remainingPaths:
distance = min( abs( oldOrderedLocationComplex - remainingPath[0] ), abs( oldOrderedLocationComplex - remainingPath[-1] ) )
if distance < closestDistance:
closestDistance = distance
closestPath = remainingPath
remainingPaths.remove( closestPath )
skein.addGcodeFromThreadZ( closestPath, oldOrderedLocation.z )
oldOrderedLocation.x = closestPath[-1].real
oldOrderedLocation.y = closestPath[-1].imag
def transferClosestPaths(oldOrderedLocation, remainingPaths, skein):
'Transfer the closest remaining paths.'
while len(remainingPaths) > 0:
transferClosestPath(oldOrderedLocation, remainingPaths, skein)
def transferPathsToNestedRings(nestedRings, paths):
'Transfer paths to nested rings.'
for nestedRing in nestedRings:
nestedRing.transferPaths(paths)
def translateVector3Path(path, translateVector3):
'Translate the vector3 path.'
for point in path:
point.setToVector3(point + translateVector3)
def translateVector3Paths(paths, translateVector3):
'Translate the vector3 paths.'
for path in paths:
translateVector3Path(path, translateVector3)
def unbuckleBasis( basis, maximumUnbuckling, normal ):
'Unbuckle space.'
normalDot = basis.dot( normal )
dotComplement = math.sqrt( 1.0 - normalDot * normalDot )
unbuckling = maximumUnbuckling
if dotComplement > 0.0:
unbuckling = min( 1.0 / dotComplement, maximumUnbuckling )
basis.setToVector3( basis * unbuckling )
class DistanceIndex:
'A class to hold the distance and the index of the loop.'
def __init__(self, distance, index):
'Initialize.'
self.distance = distance
self.index = index
def __repr__(self):
'Get the string representation of this distance index.'
return '%s, %s' % (self.distance, self.index)
class Endpoint:
'The endpoint of a segment.'
def __repr__(self):
'Get the string representation of this Endpoint.'
return 'Endpoint %s, %s' % ( self.point, self.otherEndpoint.point )
def getClosestEndpoint( self, endpoints ):
'Get closest endpoint.'
smallestDistance = 987654321987654321.0
closestEndpoint = None
for endpoint in endpoints:
distance = abs( self.point - endpoint.point )
if distance < smallestDistance:
smallestDistance = distance
closestEndpoint = endpoint
return closestEndpoint
def getClosestMiss(self, endpoints, path, pixelDictionary, width):
'Get the closest endpoint which the segment to that endpoint misses the other extrusions.'
pathMaskTable = {}
smallestDistance = 987654321.0
penultimateMinusPoint = complex(0.0, 0.0)
if len(path) > 1:
penultimatePoint = path[-2]
addSegmentToPixelTable(penultimatePoint, self.point, pathMaskTable, 0, 0, width)
penultimateMinusPoint = penultimatePoint - self.point
if abs(penultimateMinusPoint) > 0.0:
penultimateMinusPoint /= abs(penultimateMinusPoint)
for endpoint in endpoints:
endpoint.segment = endpoint.point - self.point
endpoint.segmentLength = abs(endpoint.segment)
if endpoint.segmentLength <= 0.0:
return endpoint
endpoints.sort(compareSegmentLength)
for endpoint in endpoints[: 15]: # increasing the number of searched endpoints increases the search time, with 20 fill took 600 seconds for cilinder.gts, with 10 fill took 533 seconds
normalizedSegment = endpoint.segment / endpoint.segmentLength
isOverlappingSelf = getDotProduct(penultimateMinusPoint, normalizedSegment) > 0.9
if not isOverlappingSelf:
if len(path) > 2:
segmentYMirror = complex(normalizedSegment.real, -normalizedSegment.imag)
pointRotated = segmentYMirror * self.point
endpointPointRotated = segmentYMirror * endpoint.point
if isXSegmentIntersectingPath(path[max(0, len(path) - 21) : -1], pointRotated.real, endpointPointRotated.real, segmentYMirror, pointRotated.imag):
isOverlappingSelf = True
if not isOverlappingSelf:
totalMaskTable = pathMaskTable.copy()
addSegmentToPixelTable(endpoint.point, endpoint.otherEndpoint.point, totalMaskTable, 0, 0, width)
segmentTable = {}
addSegmentToPixelTable(self.point, endpoint.point, segmentTable, 0, 0, width)
if not isPixelTableIntersecting(pixelDictionary, segmentTable, totalMaskTable):
return endpoint
return None
def getClosestMissCheckEndpointPath( self, endpoints, path, pixelDictionary, width ):
'Get the closest endpoint which the segment to that endpoint misses the other extrusions, also checking the path of the endpoint.'
pathMaskTable = {}
smallestDistance = 987654321.0
penultimateMinusPoint = complex(0.0, 0.0)
if len(path) > 1:
penultimatePoint = path[-2]
addSegmentToPixelTable( penultimatePoint, self.point, pathMaskTable, 0, 0, width )
penultimateMinusPoint = penultimatePoint - self.point
if abs(penultimateMinusPoint) > 0.0:
penultimateMinusPoint /= abs(penultimateMinusPoint)
for endpoint in endpoints:
endpoint.segment = endpoint.point - self.point
endpoint.segmentLength = abs(endpoint.segment)
if endpoint.segmentLength <= 0.0:
return endpoint
endpoints.sort( compareSegmentLength )
for endpoint in endpoints[ : 15 ]: # increasing the number of searched endpoints increases the search time, with 20 fill took 600 seconds for cilinder.gts, with 10 fill took 533 seconds
normalizedSegment = endpoint.segment / endpoint.segmentLength
isOverlappingSelf = getDotProduct( penultimateMinusPoint, normalizedSegment ) > 0.9
if not isOverlappingSelf:
if len(path) > 2:
segmentYMirror = complex(normalizedSegment.real, -normalizedSegment.imag)
pointRotated = segmentYMirror * self.point
endpointPointRotated = segmentYMirror * endpoint.point
if isXSegmentIntersectingPath( path[ max( 0, len(path) - 21 ) : - 1 ], pointRotated.real, endpointPointRotated.real, segmentYMirror, pointRotated.imag ):
isOverlappingSelf = True
endpointPath = endpoint.path
if len( endpointPath ) > 2:
segmentYMirror = complex(normalizedSegment.real, -normalizedSegment.imag)
pointRotated = segmentYMirror * self.point
endpointPointRotated = segmentYMirror * endpoint.point
if isXSegmentIntersectingPath( endpointPath, pointRotated.real, endpointPointRotated.real, segmentYMirror, pointRotated.imag ):
isOverlappingSelf = True
if not isOverlappingSelf:
totalMaskTable = pathMaskTable.copy()
addSegmentToPixelTable( endpoint.point, endpoint.otherEndpoint.point, totalMaskTable, 0, 0, width )
segmentTable = {}
addSegmentToPixelTable( self.point, endpoint.point, segmentTable, 0, 0, width )
if not isPixelTableIntersecting( pixelDictionary, segmentTable, totalMaskTable ):
return endpoint
return None
def getFromOtherPoint( self, otherEndpoint, point ):
'Initialize from other endpoint.'
self.otherEndpoint = otherEndpoint
self.point = point
return self
class LoopLayer:
'Loops with a z.'
def __init__(self, z):
'Initialize.'
self.loops = []
self.z = z
def __repr__(self):
'Get the string representation of this loop layer.'
return '%s, %s' % (self.z, self.loops)
class NestedRing:
'A nested ring.'
def __init__(self):
'Initialize.'
self.boundary = []
self.innerNestedRings = None
def __repr__(self):
'Get the string representation of this nested ring.'
return str(self.__dict__)
def addFlattenedNestedRings(self, flattenedNestedRings):
'Add flattened nested rings.'
flattenedNestedRings.append(self)
for innerNestedRing in self.innerNestedRings:
flattenedNestedRings += getFlattenedNestedRings(innerNestedRing.innerNestedRings)
def getFromInsideSurroundings(self, inputSurroundingInsides):
'Initialize from inside nested rings.'
transferredSurroundings = getTransferredNestedRings(inputSurroundingInsides, self.boundary)
self.innerNestedRings = getOrderedNestedRings(transferredSurroundings)
return self
class NestedBand(NestedRing):
'A loop that surrounds paths.'
def __init__(self):
'Initialize.'
NestedRing.__init__(self)
self.extraLoops = []
self.infillBoundaries = []
self.infillPaths = []
# self.lastExistingFillLoops = None
self.lastFillLoops = None
self.loop = None
self.penultimateFillLoops = []
self.perimeterPaths = []
self.z = None
def __repr__(self):
'Get the string representation of this nested ring.'
stringRepresentation = 'boundary\n%s\n' % self.boundary
stringRepresentation += 'loop\n%s\n' % self.loop
stringRepresentation += 'inner nested rings\n%s\n' % self.innerNestedRings
stringRepresentation += 'infillPaths\n'
for infillPath in self.infillPaths:
stringRepresentation += 'infillPath\n%s\n' % infillPath
stringRepresentation += 'perimeterPaths\n'
for perimeterPath in self.perimeterPaths:
stringRepresentation += 'perimeterPath\n%s\n' % perimeterPath
return stringRepresentation + '\n'
def addPerimeterInner(self, extrusionHalfWidth, oldOrderedLocation, skein, threadSequence):
'Add to the perimeter and the inner island.'
if self.loop == None:
skein.distanceFeedRate.addLine('(<perimeterPath>)')
transferClosestPaths(oldOrderedLocation, self.perimeterPaths[:], skein)
skein.distanceFeedRate.addLine('(</perimeterPath>)')
else:
addToThreadsFromLoop(extrusionHalfWidth, 'perimeter', self.loop[:], oldOrderedLocation, skein)
skein.distanceFeedRate.addLine('(</boundaryPerimeter>)')
addToThreadsRemove(extrusionHalfWidth, self.innerNestedRings[:], oldOrderedLocation, skein, threadSequence)
def addToBoundary(self, vector3):
'Add vector3 to boundary.'
self.boundary.append(vector3.dropAxis())
self.z = vector3.z
def addToLoop(self, vector3):
'Add vector3 to loop.'
if self.loop == None:
self.loop = []
self.loop.append(vector3.dropAxis())
self.z = vector3.z
def addToThreads(self, extrusionHalfWidth, oldOrderedLocation, skein, threadSequence):
'Add to paths from the last location. perimeter>inner >fill>paths or fill> perimeter>inner >paths'
addNestedRingBeginning(skein.distanceFeedRate, self.boundary, self.z)
threadFunctionDictionary = {
'infill' : self.transferInfillPaths, 'loops' : self.transferClosestFillLoops, 'perimeter' : self.addPerimeterInner}
for threadType in threadSequence:
threadFunctionDictionary[threadType](extrusionHalfWidth, oldOrderedLocation, skein, threadSequence)
skein.distanceFeedRate.addLine('(</nestedRing>)')
def getFillLoops(self, penultimateFillLoops):
'Get last fill loops from the outside loop and the loops inside the inside loops.'
fillLoops = self.getLoopsToBeFilled()[:]
surroundingBoundaries = self.getSurroundingBoundaries()
withinLoops = []
if penultimateFillLoops == None:
penultimateFillLoops = self.penultimateFillLoops
if penultimateFillLoops == None:
print('Warning, penultimateFillLoops == None in getFillLoops in NestedBand in euclidean.')
return fillLoops
for penultimateFillLoop in penultimateFillLoops:
if len(penultimateFillLoop) > 2:
if getIsInFilledRegion(surroundingBoundaries, penultimateFillLoop[0]):
withinLoops.append(penultimateFillLoop)
if not getIsInFilledRegionByPaths(self.penultimateFillLoops, fillLoops):
fillLoops += self.penultimateFillLoops
for nestedRing in self.innerNestedRings:
fillLoops += getFillOfSurroundings(nestedRing.innerNestedRings, penultimateFillLoops)
return fillLoops
#
# def getLastExistingFillLoops(self):
# 'Get last existing fill loops.'
# lastExistingFillLoops = self.lastExistingFillLoops[:]
# for nestedRing in self.innerNestedRings:
# lastExistingFillLoops += nestedRing.getLastExistingFillLoops()
# return lastExistingFillLoops
def getLoopsToBeFilled(self):
'Get last fill loops from the outside loop and the loops inside the inside loops.'
if self.lastFillLoops == None:
return self.getSurroundingBoundaries()
return self.lastFillLoops
def getSurroundingBoundaries(self):
'Get the boundary of the surronding loop plus any boundaries of the innerNestedRings.'
surroundingBoundaries = [self.boundary]
for nestedRing in self.innerNestedRings:
surroundingBoundaries.append(nestedRing.boundary)
return surroundingBoundaries
def transferClosestFillLoops(self, extrusionHalfWidth, oldOrderedLocation, skein, threadSequence):
'Transfer closest fill loops.'
if len( self.extraLoops ) < 1:
return
remainingFillLoops = self.extraLoops[:]
while len( remainingFillLoops ) > 0:
transferClosestFillLoop(extrusionHalfWidth, oldOrderedLocation, remainingFillLoops, skein)
def transferInfillPaths(self, extrusionHalfWidth, oldOrderedLocation, skein, threadSequence):
'Transfer the infill paths.'
if len(self.infillBoundaries) == 0 and len(self.infillPaths) == 0:
return
skein.distanceFeedRate.addLine('(<infill>)')
for infillBoundary in self.infillBoundaries:
skein.distanceFeedRate.addLine('(<infillBoundary>)')
for infillPoint in infillBoundary:
infillPointVector3 = Vector3(infillPoint.real, infillPoint.imag, self.z)
skein.distanceFeedRate.addLine(skein.distanceFeedRate.getInfillBoundaryLine(infillPointVector3))
skein.distanceFeedRate.addLine('(</infillBoundary>)')
transferClosestPaths(oldOrderedLocation, self.infillPaths[:], skein)
skein.distanceFeedRate.addLine('(</infill>)')
def transferPaths(self, paths):
'Transfer paths.'
for nestedRing in self.innerNestedRings:
transferPathsToNestedRings(nestedRing.innerNestedRings, paths)
self.infillPaths = getTransferredPaths(paths, self.boundary)
class PathZ:
'Complex path with a z.'
def __init__( self, z ):
self.path = []
self.z = z
def __repr__(self):
'Get the string representation of this path z.'
return '%s, %s' % ( self.z, self.path )
class ProjectiveSpace:
'Class to define a projective space.'
def __init__( self, basisX = Vector3(1.0, 0.0, 0.0), basisY = Vector3( 0.0, 1.0, 0.0 ), basisZ = Vector3(0.0, 0.0, 1.0) ):
'Initialize the basis vectors.'
self.basisX = basisX
self.basisY = basisY
self.basisZ = basisZ
def __repr__(self):
'Get the string representation of this ProjectivePlane.'
return '%s, %s, %s' % ( self.basisX, self.basisY, self.basisZ )
def getByBasisXZ( self, basisX, basisZ ):
'Get by x basis x and y basis.'
self.basisX = basisX
self.basisZ = basisZ
self.basisX.normalize()
self.basisY = basisZ.cross(self.basisX)
self.basisY.normalize()
return self
def getByBasisZFirst(self, basisZ, firstVector3):
'Get by basisZ and first.'
self.basisZ = basisZ
self.basisY = basisZ.cross(firstVector3)
self.basisY.normalize()
self.basisX = self.basisY.cross(self.basisZ)
self.basisX.normalize()
return self
def getByBasisZTop(self, basisZ, top):
'Get by basisZ and top.'
return self.getByBasisXZ(top.cross(basisZ), basisZ)
def getByLatitudeLongitude( self, viewpointLatitude, viewpointLongitude ):
'Get by latitude and longitude.'
longitudeComplex = getWiddershinsUnitPolar( math.radians( 90.0 - viewpointLongitude ) )
viewpointLatitudeRatio = getWiddershinsUnitPolar( math.radians( viewpointLatitude ) )
basisZ = Vector3( viewpointLatitudeRatio.imag * longitudeComplex.real, viewpointLatitudeRatio.imag * longitudeComplex.imag, viewpointLatitudeRatio.real )
return self.getByBasisXZ( Vector3( - longitudeComplex.imag, longitudeComplex.real, 0.0 ), basisZ )
def getByTilt( self, tilt ):
'Get by latitude and longitude.'
xPlaneAngle = getWiddershinsUnitPolar( tilt.real )
self.basisX = Vector3( xPlaneAngle.real, 0.0, xPlaneAngle.imag )
yPlaneAngle = getWiddershinsUnitPolar( tilt.imag )
self.basisY = Vector3( 0.0, yPlaneAngle.real, yPlaneAngle.imag )
self.basisZ = self.basisX.cross(self.basisY)
return self
def getComplexByComplex( self, pointComplex ):
'Get complex by complex point.'
return self.basisX.dropAxis() * pointComplex.real + self.basisY.dropAxis() * pointComplex.imag
def getCopy(self):
'Get copy.'
return ProjectiveSpace( self.basisX, self.basisY, self.basisZ )
def getDotComplex(self, point):
'Get the dot complex.'
return complex( point.dot(self.basisX), point.dot(self.basisY) )
def getDotVector3(self, point):
'Get the dot vector3.'
return Vector3(point.dot(self.basisX), point.dot(self.basisY), point.dot(self.basisZ))
def getNextSpace( self, nextNormal ):
'Get next space by next normal.'
nextSpace = self.getCopy()
nextSpace.normalize()
dotNext = nextSpace.basisZ.dot( nextNormal )
if dotNext > 0.999999:
return nextSpace
if dotNext < - 0.999999:
nextSpace.basisX = - nextSpace.basisX
return nextSpace
crossNext = nextSpace.basisZ.cross( nextNormal )
oldBasis = ProjectiveSpace().getByBasisZTop( nextSpace.basisZ, crossNext )
newBasis = ProjectiveSpace().getByBasisZTop( nextNormal, crossNext )
nextSpace.basisX = newBasis.getVector3ByPoint( oldBasis.getDotVector3( nextSpace.basisX ) )
nextSpace.basisY = newBasis.getVector3ByPoint( oldBasis.getDotVector3( nextSpace.basisY ) )
nextSpace.basisZ = newBasis.getVector3ByPoint( oldBasis.getDotVector3( nextSpace.basisZ ) )
nextSpace.normalize()
return nextSpace
def getSpaceByXYScaleAngle( self, angle, scale ):
'Get space by angle and scale.'
spaceByXYScaleRotation = ProjectiveSpace()
planeAngle = getWiddershinsUnitPolar(angle)
spaceByXYScaleRotation.basisX = self.basisX * scale.real * planeAngle.real + self.basisY * scale.imag * planeAngle.imag
spaceByXYScaleRotation.basisY = - self.basisX * scale.real * planeAngle.imag + self.basisY * scale.imag * planeAngle.real
spaceByXYScaleRotation.basisZ = self.basisZ
return spaceByXYScaleRotation
def getVector3ByPoint(self, point):
'Get vector3 by point.'
return self.basisX * point.x + self.basisY * point.y + self.basisZ * point.z
def normalize(self):
'Normalize.'
self.basisX.normalize()
self.basisY.normalize()
self.basisZ.normalize()
def unbuckle( self, maximumUnbuckling, normal ):
'Unbuckle space.'
unbuckleBasis( self.basisX, maximumUnbuckling, normal )
unbuckleBasis( self.basisY, maximumUnbuckling, normal )
class XIntersectionIndex:
'A class to hold the x intersection position and the index of the loop which intersected.'
def __init__( self, index, x ):
'Initialize.'
self.index = index
self.x = x
def __cmp__(self, other):
'Get comparison in order to sort x intersections in ascending order of x.'
if self.x < other.x:
return - 1
return int( self.x > other.x )
def __eq__(self, other):
'Determine whether this XIntersectionIndex is identical to other one.'
if other == None:
return False
if other.__class__ != self.__class__:
return False
return self.index == other.index and self.x == other.x
def __ne__(self, other):
'Determine whether this XIntersectionIndex is not identical to other one.'
return not self.__eq__(other)
def __repr__(self):
'Get the string representation of this x intersection.'
return 'XIntersectionIndex index %s; x %s ' % ( self.index, self.x )
|
bhargav2408/python-for-android
|
refs/heads/master
|
python3-alpha/python3-src/Lib/lib2to3/tests/data/py2_test_grammar.py
|
285
|
# Python test set -- part 1, grammar.
# This just tests whether the parser accepts them all.
# NOTE: When you run this test as a script from the command line, you
# get warnings about certain hex/oct constants. Since those are
# issued by the parser, you can't suppress them by adding a
# filterwarnings() call to this module. Therefore, to shut up the
# regression test, the filterwarnings() call has been added to
# regrtest.py.
from test.test_support import run_unittest, check_syntax_error
import unittest
import sys
# testing import *
from sys import *
class TokenTests(unittest.TestCase):
def testBackslash(self):
# Backslash means line continuation:
x = 1 \
+ 1
self.assertEquals(x, 2, 'backslash for line continuation')
# Backslash does not means continuation in comments :\
x = 0
self.assertEquals(x, 0, 'backslash ending comment')
def testPlainIntegers(self):
self.assertEquals(0xff, 255)
self.assertEquals(0377, 255)
self.assertEquals(2147483647, 017777777777)
# "0x" is not a valid literal
self.assertRaises(SyntaxError, eval, "0x")
from sys import maxint
if maxint == 2147483647:
self.assertEquals(-2147483647-1, -020000000000)
# XXX -2147483648
self.assert_(037777777777 > 0)
self.assert_(0xffffffff > 0)
for s in '2147483648', '040000000000', '0x100000000':
try:
x = eval(s)
except OverflowError:
self.fail("OverflowError on huge integer literal %r" % s)
elif maxint == 9223372036854775807:
self.assertEquals(-9223372036854775807-1, -01000000000000000000000)
self.assert_(01777777777777777777777 > 0)
self.assert_(0xffffffffffffffff > 0)
for s in '9223372036854775808', '02000000000000000000000', \
'0x10000000000000000':
try:
x = eval(s)
except OverflowError:
self.fail("OverflowError on huge integer literal %r" % s)
else:
self.fail('Weird maxint value %r' % maxint)
def testLongIntegers(self):
x = 0L
x = 0l
x = 0xffffffffffffffffL
x = 0xffffffffffffffffl
x = 077777777777777777L
x = 077777777777777777l
x = 123456789012345678901234567890L
x = 123456789012345678901234567890l
def testFloats(self):
x = 3.14
x = 314.
x = 0.314
# XXX x = 000.314
x = .314
x = 3e14
x = 3E14
x = 3e-14
x = 3e+14
x = 3.e14
x = .3e14
x = 3.1e4
def testStringLiterals(self):
x = ''; y = ""; self.assert_(len(x) == 0 and x == y)
x = '\''; y = "'"; self.assert_(len(x) == 1 and x == y and ord(x) == 39)
x = '"'; y = "\""; self.assert_(len(x) == 1 and x == y and ord(x) == 34)
x = "doesn't \"shrink\" does it"
y = 'doesn\'t "shrink" does it'
self.assert_(len(x) == 24 and x == y)
x = "does \"shrink\" doesn't it"
y = 'does "shrink" doesn\'t it'
self.assert_(len(x) == 24 and x == y)
x = """
The "quick"
brown fox
jumps over
the 'lazy' dog.
"""
y = '\nThe "quick"\nbrown fox\njumps over\nthe \'lazy\' dog.\n'
self.assertEquals(x, y)
y = '''
The "quick"
brown fox
jumps over
the 'lazy' dog.
'''
self.assertEquals(x, y)
y = "\n\
The \"quick\"\n\
brown fox\n\
jumps over\n\
the 'lazy' dog.\n\
"
self.assertEquals(x, y)
y = '\n\
The \"quick\"\n\
brown fox\n\
jumps over\n\
the \'lazy\' dog.\n\
'
self.assertEquals(x, y)
class GrammarTests(unittest.TestCase):
# single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE
# XXX can't test in a script -- this rule is only used when interactive
# file_input: (NEWLINE | stmt)* ENDMARKER
# Being tested as this very moment this very module
# expr_input: testlist NEWLINE
# XXX Hard to test -- used only in calls to input()
def testEvalInput(self):
# testlist ENDMARKER
x = eval('1, 0 or 1')
def testFuncdef(self):
### 'def' NAME parameters ':' suite
### parameters: '(' [varargslist] ')'
### varargslist: (fpdef ['=' test] ',')* ('*' NAME [',' ('**'|'*' '*') NAME]
### | ('**'|'*' '*') NAME)
### | fpdef ['=' test] (',' fpdef ['=' test])* [',']
### fpdef: NAME | '(' fplist ')'
### fplist: fpdef (',' fpdef)* [',']
### arglist: (argument ',')* (argument | *' test [',' '**' test] | '**' test)
### argument: [test '='] test # Really [keyword '='] test
def f1(): pass
f1()
f1(*())
f1(*(), **{})
def f2(one_argument): pass
def f3(two, arguments): pass
def f4(two, (compound, (argument, list))): pass
def f5((compound, first), two): pass
self.assertEquals(f2.func_code.co_varnames, ('one_argument',))
self.assertEquals(f3.func_code.co_varnames, ('two', 'arguments'))
if sys.platform.startswith('java'):
self.assertEquals(f4.func_code.co_varnames,
('two', '(compound, (argument, list))', 'compound', 'argument',
'list',))
self.assertEquals(f5.func_code.co_varnames,
('(compound, first)', 'two', 'compound', 'first'))
else:
self.assertEquals(f4.func_code.co_varnames,
('two', '.1', 'compound', 'argument', 'list'))
self.assertEquals(f5.func_code.co_varnames,
('.0', 'two', 'compound', 'first'))
def a1(one_arg,): pass
def a2(two, args,): pass
def v0(*rest): pass
def v1(a, *rest): pass
def v2(a, b, *rest): pass
def v3(a, (b, c), *rest): return a, b, c, rest
f1()
f2(1)
f2(1,)
f3(1, 2)
f3(1, 2,)
f4(1, (2, (3, 4)))
v0()
v0(1)
v0(1,)
v0(1,2)
v0(1,2,3,4,5,6,7,8,9,0)
v1(1)
v1(1,)
v1(1,2)
v1(1,2,3)
v1(1,2,3,4,5,6,7,8,9,0)
v2(1,2)
v2(1,2,3)
v2(1,2,3,4)
v2(1,2,3,4,5,6,7,8,9,0)
v3(1,(2,3))
v3(1,(2,3),4)
v3(1,(2,3),4,5,6,7,8,9,0)
# ceval unpacks the formal arguments into the first argcount names;
# thus, the names nested inside tuples must appear after these names.
if sys.platform.startswith('java'):
self.assertEquals(v3.func_code.co_varnames, ('a', '(b, c)', 'rest', 'b', 'c'))
else:
self.assertEquals(v3.func_code.co_varnames, ('a', '.1', 'rest', 'b', 'c'))
self.assertEquals(v3(1, (2, 3), 4), (1, 2, 3, (4,)))
def d01(a=1): pass
d01()
d01(1)
d01(*(1,))
d01(**{'a':2})
def d11(a, b=1): pass
d11(1)
d11(1, 2)
d11(1, **{'b':2})
def d21(a, b, c=1): pass
d21(1, 2)
d21(1, 2, 3)
d21(*(1, 2, 3))
d21(1, *(2, 3))
d21(1, 2, *(3,))
d21(1, 2, **{'c':3})
def d02(a=1, b=2): pass
d02()
d02(1)
d02(1, 2)
d02(*(1, 2))
d02(1, *(2,))
d02(1, **{'b':2})
d02(**{'a': 1, 'b': 2})
def d12(a, b=1, c=2): pass
d12(1)
d12(1, 2)
d12(1, 2, 3)
def d22(a, b, c=1, d=2): pass
d22(1, 2)
d22(1, 2, 3)
d22(1, 2, 3, 4)
def d01v(a=1, *rest): pass
d01v()
d01v(1)
d01v(1, 2)
d01v(*(1, 2, 3, 4))
d01v(*(1,))
d01v(**{'a':2})
def d11v(a, b=1, *rest): pass
d11v(1)
d11v(1, 2)
d11v(1, 2, 3)
def d21v(a, b, c=1, *rest): pass
d21v(1, 2)
d21v(1, 2, 3)
d21v(1, 2, 3, 4)
d21v(*(1, 2, 3, 4))
d21v(1, 2, **{'c': 3})
def d02v(a=1, b=2, *rest): pass
d02v()
d02v(1)
d02v(1, 2)
d02v(1, 2, 3)
d02v(1, *(2, 3, 4))
d02v(**{'a': 1, 'b': 2})
def d12v(a, b=1, c=2, *rest): pass
d12v(1)
d12v(1, 2)
d12v(1, 2, 3)
d12v(1, 2, 3, 4)
d12v(*(1, 2, 3, 4))
d12v(1, 2, *(3, 4, 5))
d12v(1, *(2,), **{'c': 3})
def d22v(a, b, c=1, d=2, *rest): pass
d22v(1, 2)
d22v(1, 2, 3)
d22v(1, 2, 3, 4)
d22v(1, 2, 3, 4, 5)
d22v(*(1, 2, 3, 4))
d22v(1, 2, *(3, 4, 5))
d22v(1, *(2, 3), **{'d': 4})
def d31v((x)): pass
d31v(1)
def d32v((x,)): pass
d32v((1,))
# keyword arguments after *arglist
def f(*args, **kwargs):
return args, kwargs
self.assertEquals(f(1, x=2, *[3, 4], y=5), ((1, 3, 4),
{'x':2, 'y':5}))
self.assertRaises(SyntaxError, eval, "f(1, *(2,3), 4)")
self.assertRaises(SyntaxError, eval, "f(1, x=2, *(3,4), x=5)")
# Check ast errors in *args and *kwargs
check_syntax_error(self, "f(*g(1=2))")
check_syntax_error(self, "f(**g(1=2))")
def testLambdef(self):
### lambdef: 'lambda' [varargslist] ':' test
l1 = lambda : 0
self.assertEquals(l1(), 0)
l2 = lambda : a[d] # XXX just testing the expression
l3 = lambda : [2 < x for x in [-1, 3, 0L]]
self.assertEquals(l3(), [0, 1, 0])
l4 = lambda x = lambda y = lambda z=1 : z : y() : x()
self.assertEquals(l4(), 1)
l5 = lambda x, y, z=2: x + y + z
self.assertEquals(l5(1, 2), 5)
self.assertEquals(l5(1, 2, 3), 6)
check_syntax_error(self, "lambda x: x = 2")
check_syntax_error(self, "lambda (None,): None")
### stmt: simple_stmt | compound_stmt
# Tested below
def testSimpleStmt(self):
### simple_stmt: small_stmt (';' small_stmt)* [';']
x = 1; pass; del x
def foo():
# verify statements that end with semi-colons
x = 1; pass; del x;
foo()
### small_stmt: expr_stmt | print_stmt | pass_stmt | del_stmt | flow_stmt | import_stmt | global_stmt | access_stmt | exec_stmt
# Tested below
def testExprStmt(self):
# (exprlist '=')* exprlist
1
1, 2, 3
x = 1
x = 1, 2, 3
x = y = z = 1, 2, 3
x, y, z = 1, 2, 3
abc = a, b, c = x, y, z = xyz = 1, 2, (3, 4)
check_syntax_error(self, "x + 1 = 1")
check_syntax_error(self, "a + 1 = b + 2")
def testPrintStmt(self):
# 'print' (test ',')* [test]
import StringIO
# Can't test printing to real stdout without comparing output
# which is not available in unittest.
save_stdout = sys.stdout
sys.stdout = StringIO.StringIO()
print 1, 2, 3
print 1, 2, 3,
print
print 0 or 1, 0 or 1,
print 0 or 1
# 'print' '>>' test ','
print >> sys.stdout, 1, 2, 3
print >> sys.stdout, 1, 2, 3,
print >> sys.stdout
print >> sys.stdout, 0 or 1, 0 or 1,
print >> sys.stdout, 0 or 1
# test printing to an instance
class Gulp:
def write(self, msg): pass
gulp = Gulp()
print >> gulp, 1, 2, 3
print >> gulp, 1, 2, 3,
print >> gulp
print >> gulp, 0 or 1, 0 or 1,
print >> gulp, 0 or 1
# test print >> None
def driver():
oldstdout = sys.stdout
sys.stdout = Gulp()
try:
tellme(Gulp())
tellme()
finally:
sys.stdout = oldstdout
# we should see this once
def tellme(file=sys.stdout):
print >> file, 'hello world'
driver()
# we should not see this at all
def tellme(file=None):
print >> file, 'goodbye universe'
driver()
self.assertEqual(sys.stdout.getvalue(), '''\
1 2 3
1 2 3
1 1 1
1 2 3
1 2 3
1 1 1
hello world
''')
sys.stdout = save_stdout
# syntax errors
check_syntax_error(self, 'print ,')
check_syntax_error(self, 'print >> x,')
def testDelStmt(self):
# 'del' exprlist
abc = [1,2,3]
x, y, z = abc
xyz = x, y, z
del abc
del x, y, (z, xyz)
def testPassStmt(self):
# 'pass'
pass
# flow_stmt: break_stmt | continue_stmt | return_stmt | raise_stmt
# Tested below
def testBreakStmt(self):
# 'break'
while 1: break
def testContinueStmt(self):
# 'continue'
i = 1
while i: i = 0; continue
msg = ""
while not msg:
msg = "ok"
try:
continue
msg = "continue failed to continue inside try"
except:
msg = "continue inside try called except block"
if msg != "ok":
self.fail(msg)
msg = ""
while not msg:
msg = "finally block not called"
try:
continue
finally:
msg = "ok"
if msg != "ok":
self.fail(msg)
def test_break_continue_loop(self):
# This test warrants an explanation. It is a test specifically for SF bugs
# #463359 and #462937. The bug is that a 'break' statement executed or
# exception raised inside a try/except inside a loop, *after* a continue
# statement has been executed in that loop, will cause the wrong number of
# arguments to be popped off the stack and the instruction pointer reset to
# a very small number (usually 0.) Because of this, the following test
# *must* written as a function, and the tracking vars *must* be function
# arguments with default values. Otherwise, the test will loop and loop.
def test_inner(extra_burning_oil = 1, count=0):
big_hippo = 2
while big_hippo:
count += 1
try:
if extra_burning_oil and big_hippo == 1:
extra_burning_oil -= 1
break
big_hippo -= 1
continue
except:
raise
if count > 2 or big_hippo <> 1:
self.fail("continue then break in try/except in loop broken!")
test_inner()
def testReturn(self):
# 'return' [testlist]
def g1(): return
def g2(): return 1
g1()
x = g2()
check_syntax_error(self, "class foo:return 1")
def testYield(self):
check_syntax_error(self, "class foo:yield 1")
def testRaise(self):
# 'raise' test [',' test]
try: raise RuntimeError, 'just testing'
except RuntimeError: pass
try: raise KeyboardInterrupt
except KeyboardInterrupt: pass
def testImport(self):
# 'import' dotted_as_names
import sys
import time, sys
# 'from' dotted_name 'import' ('*' | '(' import_as_names ')' | import_as_names)
from time import time
from time import (time)
# not testable inside a function, but already done at top of the module
# from sys import *
from sys import path, argv
from sys import (path, argv)
from sys import (path, argv,)
def testGlobal(self):
# 'global' NAME (',' NAME)*
global a
global a, b
global one, two, three, four, five, six, seven, eight, nine, ten
def testExec(self):
# 'exec' expr ['in' expr [',' expr]]
z = None
del z
exec 'z=1+1\n'
if z != 2: self.fail('exec \'z=1+1\'\\n')
del z
exec 'z=1+1'
if z != 2: self.fail('exec \'z=1+1\'')
z = None
del z
import types
if hasattr(types, "UnicodeType"):
exec r"""if 1:
exec u'z=1+1\n'
if z != 2: self.fail('exec u\'z=1+1\'\\n')
del z
exec u'z=1+1'
if z != 2: self.fail('exec u\'z=1+1\'')"""
g = {}
exec 'z = 1' in g
if g.has_key('__builtins__'): del g['__builtins__']
if g != {'z': 1}: self.fail('exec \'z = 1\' in g')
g = {}
l = {}
import warnings
warnings.filterwarnings("ignore", "global statement", module="<string>")
exec 'global a; a = 1; b = 2' in g, l
if g.has_key('__builtins__'): del g['__builtins__']
if l.has_key('__builtins__'): del l['__builtins__']
if (g, l) != ({'a':1}, {'b':2}):
self.fail('exec ... in g (%s), l (%s)' %(g,l))
def testAssert(self):
# assert_stmt: 'assert' test [',' test]
assert 1
assert 1, 1
assert lambda x:x
assert 1, lambda x:x+1
try:
assert 0, "msg"
except AssertionError, e:
self.assertEquals(e.args[0], "msg")
else:
if __debug__:
self.fail("AssertionError not raised by assert 0")
### compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | funcdef | classdef
# Tested below
def testIf(self):
# 'if' test ':' suite ('elif' test ':' suite)* ['else' ':' suite]
if 1: pass
if 1: pass
else: pass
if 0: pass
elif 0: pass
if 0: pass
elif 0: pass
elif 0: pass
elif 0: pass
else: pass
def testWhile(self):
# 'while' test ':' suite ['else' ':' suite]
while 0: pass
while 0: pass
else: pass
# Issue1920: "while 0" is optimized away,
# ensure that the "else" clause is still present.
x = 0
while 0:
x = 1
else:
x = 2
self.assertEquals(x, 2)
def testFor(self):
# 'for' exprlist 'in' exprlist ':' suite ['else' ':' suite]
for i in 1, 2, 3: pass
for i, j, k in (): pass
else: pass
class Squares:
def __init__(self, max):
self.max = max
self.sofar = []
def __len__(self): return len(self.sofar)
def __getitem__(self, i):
if not 0 <= i < self.max: raise IndexError
n = len(self.sofar)
while n <= i:
self.sofar.append(n*n)
n = n+1
return self.sofar[i]
n = 0
for x in Squares(10): n = n+x
if n != 285:
self.fail('for over growing sequence')
result = []
for x, in [(1,), (2,), (3,)]:
result.append(x)
self.assertEqual(result, [1, 2, 3])
def testTry(self):
### try_stmt: 'try' ':' suite (except_clause ':' suite)+ ['else' ':' suite]
### | 'try' ':' suite 'finally' ':' suite
### except_clause: 'except' [expr [('as' | ',') expr]]
try:
1/0
except ZeroDivisionError:
pass
else:
pass
try: 1/0
except EOFError: pass
except TypeError as msg: pass
except RuntimeError, msg: pass
except: pass
else: pass
try: 1/0
except (EOFError, TypeError, ZeroDivisionError): pass
try: 1/0
except (EOFError, TypeError, ZeroDivisionError), msg: pass
try: pass
finally: pass
def testSuite(self):
# simple_stmt | NEWLINE INDENT NEWLINE* (stmt NEWLINE*)+ DEDENT
if 1: pass
if 1:
pass
if 1:
#
#
#
pass
pass
#
pass
#
def testTest(self):
### and_test ('or' and_test)*
### and_test: not_test ('and' not_test)*
### not_test: 'not' not_test | comparison
if not 1: pass
if 1 and 1: pass
if 1 or 1: pass
if not not not 1: pass
if not 1 and 1 and 1: pass
if 1 and 1 or 1 and 1 and 1 or not 1 and 1: pass
def testComparison(self):
### comparison: expr (comp_op expr)*
### comp_op: '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not' 'in'|'is'|'is' 'not'
if 1: pass
x = (1 == 1)
if 1 == 1: pass
if 1 != 1: pass
if 1 <> 1: pass
if 1 < 1: pass
if 1 > 1: pass
if 1 <= 1: pass
if 1 >= 1: pass
if 1 is 1: pass
if 1 is not 1: pass
if 1 in (): pass
if 1 not in (): pass
if 1 < 1 > 1 == 1 >= 1 <= 1 <> 1 != 1 in 1 not in 1 is 1 is not 1: pass
def testBinaryMaskOps(self):
x = 1 & 1
x = 1 ^ 1
x = 1 | 1
def testShiftOps(self):
x = 1 << 1
x = 1 >> 1
x = 1 << 1 >> 1
def testAdditiveOps(self):
x = 1
x = 1 + 1
x = 1 - 1 - 1
x = 1 - 1 + 1 - 1 + 1
def testMultiplicativeOps(self):
x = 1 * 1
x = 1 / 1
x = 1 % 1
x = 1 / 1 * 1 % 1
def testUnaryOps(self):
x = +1
x = -1
x = ~1
x = ~1 ^ 1 & 1 | 1 & 1 ^ -1
x = -1*1/1 + 1*1 - ---1*1
def testSelectors(self):
### trailer: '(' [testlist] ')' | '[' subscript ']' | '.' NAME
### subscript: expr | [expr] ':' [expr]
import sys, time
c = sys.path[0]
x = time.time()
x = sys.modules['time'].time()
a = '01234'
c = a[0]
c = a[-1]
s = a[0:5]
s = a[:5]
s = a[0:]
s = a[:]
s = a[-5:]
s = a[:-1]
s = a[-4:-3]
# A rough test of SF bug 1333982. http://python.org/sf/1333982
# The testing here is fairly incomplete.
# Test cases should include: commas with 1 and 2 colons
d = {}
d[1] = 1
d[1,] = 2
d[1,2] = 3
d[1,2,3] = 4
L = list(d)
L.sort()
self.assertEquals(str(L), '[1, (1,), (1, 2), (1, 2, 3)]')
def testAtoms(self):
### atom: '(' [testlist] ')' | '[' [testlist] ']' | '{' [dictmaker] '}' | '`' testlist '`' | NAME | NUMBER | STRING
### dictmaker: test ':' test (',' test ':' test)* [',']
x = (1)
x = (1 or 2 or 3)
x = (1 or 2 or 3, 2, 3)
x = []
x = [1]
x = [1 or 2 or 3]
x = [1 or 2 or 3, 2, 3]
x = []
x = {}
x = {'one': 1}
x = {'one': 1,}
x = {'one' or 'two': 1 or 2}
x = {'one': 1, 'two': 2}
x = {'one': 1, 'two': 2,}
x = {'one': 1, 'two': 2, 'three': 3, 'four': 4, 'five': 5, 'six': 6}
x = `x`
x = `1 or 2 or 3`
self.assertEqual(`1,2`, '(1, 2)')
x = x
x = 'x'
x = 123
### exprlist: expr (',' expr)* [',']
### testlist: test (',' test)* [',']
# These have been exercised enough above
def testClassdef(self):
# 'class' NAME ['(' [testlist] ')'] ':' suite
class B: pass
class B2(): pass
class C1(B): pass
class C2(B): pass
class D(C1, C2, B): pass
class C:
def meth1(self): pass
def meth2(self, arg): pass
def meth3(self, a1, a2): pass
# decorator: '@' dotted_name [ '(' [arglist] ')' ] NEWLINE
# decorators: decorator+
# decorated: decorators (classdef | funcdef)
def class_decorator(x):
x.decorated = True
return x
@class_decorator
class G:
pass
self.assertEqual(G.decorated, True)
def testListcomps(self):
# list comprehension tests
nums = [1, 2, 3, 4, 5]
strs = ["Apple", "Banana", "Coconut"]
spcs = [" Apple", " Banana ", "Coco nut "]
self.assertEqual([s.strip() for s in spcs], ['Apple', 'Banana', 'Coco nut'])
self.assertEqual([3 * x for x in nums], [3, 6, 9, 12, 15])
self.assertEqual([x for x in nums if x > 2], [3, 4, 5])
self.assertEqual([(i, s) for i in nums for s in strs],
[(1, 'Apple'), (1, 'Banana'), (1, 'Coconut'),
(2, 'Apple'), (2, 'Banana'), (2, 'Coconut'),
(3, 'Apple'), (3, 'Banana'), (3, 'Coconut'),
(4, 'Apple'), (4, 'Banana'), (4, 'Coconut'),
(5, 'Apple'), (5, 'Banana'), (5, 'Coconut')])
self.assertEqual([(i, s) for i in nums for s in [f for f in strs if "n" in f]],
[(1, 'Banana'), (1, 'Coconut'), (2, 'Banana'), (2, 'Coconut'),
(3, 'Banana'), (3, 'Coconut'), (4, 'Banana'), (4, 'Coconut'),
(5, 'Banana'), (5, 'Coconut')])
self.assertEqual([(lambda a:[a**i for i in range(a+1)])(j) for j in range(5)],
[[1], [1, 1], [1, 2, 4], [1, 3, 9, 27], [1, 4, 16, 64, 256]])
def test_in_func(l):
return [None < x < 3 for x in l if x > 2]
self.assertEqual(test_in_func(nums), [False, False, False])
def test_nested_front():
self.assertEqual([[y for y in [x, x + 1]] for x in [1,3,5]],
[[1, 2], [3, 4], [5, 6]])
test_nested_front()
check_syntax_error(self, "[i, s for i in nums for s in strs]")
check_syntax_error(self, "[x if y]")
suppliers = [
(1, "Boeing"),
(2, "Ford"),
(3, "Macdonalds")
]
parts = [
(10, "Airliner"),
(20, "Engine"),
(30, "Cheeseburger")
]
suppart = [
(1, 10), (1, 20), (2, 20), (3, 30)
]
x = [
(sname, pname)
for (sno, sname) in suppliers
for (pno, pname) in parts
for (sp_sno, sp_pno) in suppart
if sno == sp_sno and pno == sp_pno
]
self.assertEqual(x, [('Boeing', 'Airliner'), ('Boeing', 'Engine'), ('Ford', 'Engine'),
('Macdonalds', 'Cheeseburger')])
def testGenexps(self):
# generator expression tests
g = ([x for x in range(10)] for x in range(1))
self.assertEqual(g.next(), [x for x in range(10)])
try:
g.next()
self.fail('should produce StopIteration exception')
except StopIteration:
pass
a = 1
try:
g = (a for d in a)
g.next()
self.fail('should produce TypeError')
except TypeError:
pass
self.assertEqual(list((x, y) for x in 'abcd' for y in 'abcd'), [(x, y) for x in 'abcd' for y in 'abcd'])
self.assertEqual(list((x, y) for x in 'ab' for y in 'xy'), [(x, y) for x in 'ab' for y in 'xy'])
a = [x for x in range(10)]
b = (x for x in (y for y in a))
self.assertEqual(sum(b), sum([x for x in range(10)]))
self.assertEqual(sum(x**2 for x in range(10)), sum([x**2 for x in range(10)]))
self.assertEqual(sum(x*x for x in range(10) if x%2), sum([x*x for x in range(10) if x%2]))
self.assertEqual(sum(x for x in (y for y in range(10))), sum([x for x in range(10)]))
self.assertEqual(sum(x for x in (y for y in (z for z in range(10)))), sum([x for x in range(10)]))
self.assertEqual(sum(x for x in [y for y in (z for z in range(10))]), sum([x for x in range(10)]))
self.assertEqual(sum(x for x in (y for y in (z for z in range(10) if True)) if True), sum([x for x in range(10)]))
self.assertEqual(sum(x for x in (y for y in (z for z in range(10) if True) if False) if True), 0)
check_syntax_error(self, "foo(x for x in range(10), 100)")
check_syntax_error(self, "foo(100, x for x in range(10))")
def testComprehensionSpecials(self):
# test for outmost iterable precomputation
x = 10; g = (i for i in range(x)); x = 5
self.assertEqual(len(list(g)), 10)
# This should hold, since we're only precomputing outmost iterable.
x = 10; t = False; g = ((i,j) for i in range(x) if t for j in range(x))
x = 5; t = True;
self.assertEqual([(i,j) for i in range(10) for j in range(5)], list(g))
# Grammar allows multiple adjacent 'if's in listcomps and genexps,
# even though it's silly. Make sure it works (ifelse broke this.)
self.assertEqual([ x for x in range(10) if x % 2 if x % 3 ], [1, 5, 7])
self.assertEqual(list(x for x in range(10) if x % 2 if x % 3), [1, 5, 7])
# verify unpacking single element tuples in listcomp/genexp.
self.assertEqual([x for x, in [(4,), (5,), (6,)]], [4, 5, 6])
self.assertEqual(list(x for x, in [(7,), (8,), (9,)]), [7, 8, 9])
def test_with_statement(self):
class manager(object):
def __enter__(self):
return (1, 2)
def __exit__(self, *args):
pass
with manager():
pass
with manager() as x:
pass
with manager() as (x, y):
pass
with manager(), manager():
pass
with manager() as x, manager() as y:
pass
with manager() as x, manager():
pass
def testIfElseExpr(self):
# Test ifelse expressions in various cases
def _checkeval(msg, ret):
"helper to check that evaluation of expressions is done correctly"
print x
return ret
self.assertEqual([ x() for x in lambda: True, lambda: False if x() ], [True])
self.assertEqual([ x() for x in (lambda: True, lambda: False) if x() ], [True])
self.assertEqual([ x(False) for x in (lambda x: False if x else True, lambda x: True if x else False) if x(False) ], [True])
self.assertEqual((5 if 1 else _checkeval("check 1", 0)), 5)
self.assertEqual((_checkeval("check 2", 0) if 0 else 5), 5)
self.assertEqual((5 and 6 if 0 else 1), 1)
self.assertEqual(((5 and 6) if 0 else 1), 1)
self.assertEqual((5 and (6 if 1 else 1)), 6)
self.assertEqual((0 or _checkeval("check 3", 2) if 0 else 3), 3)
self.assertEqual((1 or _checkeval("check 4", 2) if 1 else _checkeval("check 5", 3)), 1)
self.assertEqual((0 or 5 if 1 else _checkeval("check 6", 3)), 5)
self.assertEqual((not 5 if 1 else 1), False)
self.assertEqual((not 5 if 0 else 1), 1)
self.assertEqual((6 + 1 if 1 else 2), 7)
self.assertEqual((6 - 1 if 1 else 2), 5)
self.assertEqual((6 * 2 if 1 else 4), 12)
self.assertEqual((6 / 2 if 1 else 3), 3)
self.assertEqual((6 < 4 if 0 else 2), 2)
def test_main():
run_unittest(TokenTests, GrammarTests)
if __name__ == '__main__':
test_main()
|
theheros/kbengine
|
refs/heads/master
|
kbe/src/lib/python/Lib/lib2to3/tests/data/py2_test_grammar.py
|
285
|
# Python test set -- part 1, grammar.
# This just tests whether the parser accepts them all.
# NOTE: When you run this test as a script from the command line, you
# get warnings about certain hex/oct constants. Since those are
# issued by the parser, you can't suppress them by adding a
# filterwarnings() call to this module. Therefore, to shut up the
# regression test, the filterwarnings() call has been added to
# regrtest.py.
from test.test_support import run_unittest, check_syntax_error
import unittest
import sys
# testing import *
from sys import *
class TokenTests(unittest.TestCase):
def testBackslash(self):
# Backslash means line continuation:
x = 1 \
+ 1
self.assertEquals(x, 2, 'backslash for line continuation')
# Backslash does not means continuation in comments :\
x = 0
self.assertEquals(x, 0, 'backslash ending comment')
def testPlainIntegers(self):
self.assertEquals(0xff, 255)
self.assertEquals(0377, 255)
self.assertEquals(2147483647, 017777777777)
# "0x" is not a valid literal
self.assertRaises(SyntaxError, eval, "0x")
from sys import maxint
if maxint == 2147483647:
self.assertEquals(-2147483647-1, -020000000000)
# XXX -2147483648
self.assert_(037777777777 > 0)
self.assert_(0xffffffff > 0)
for s in '2147483648', '040000000000', '0x100000000':
try:
x = eval(s)
except OverflowError:
self.fail("OverflowError on huge integer literal %r" % s)
elif maxint == 9223372036854775807:
self.assertEquals(-9223372036854775807-1, -01000000000000000000000)
self.assert_(01777777777777777777777 > 0)
self.assert_(0xffffffffffffffff > 0)
for s in '9223372036854775808', '02000000000000000000000', \
'0x10000000000000000':
try:
x = eval(s)
except OverflowError:
self.fail("OverflowError on huge integer literal %r" % s)
else:
self.fail('Weird maxint value %r' % maxint)
def testLongIntegers(self):
x = 0L
x = 0l
x = 0xffffffffffffffffL
x = 0xffffffffffffffffl
x = 077777777777777777L
x = 077777777777777777l
x = 123456789012345678901234567890L
x = 123456789012345678901234567890l
def testFloats(self):
x = 3.14
x = 314.
x = 0.314
# XXX x = 000.314
x = .314
x = 3e14
x = 3E14
x = 3e-14
x = 3e+14
x = 3.e14
x = .3e14
x = 3.1e4
def testStringLiterals(self):
x = ''; y = ""; self.assert_(len(x) == 0 and x == y)
x = '\''; y = "'"; self.assert_(len(x) == 1 and x == y and ord(x) == 39)
x = '"'; y = "\""; self.assert_(len(x) == 1 and x == y and ord(x) == 34)
x = "doesn't \"shrink\" does it"
y = 'doesn\'t "shrink" does it'
self.assert_(len(x) == 24 and x == y)
x = "does \"shrink\" doesn't it"
y = 'does "shrink" doesn\'t it'
self.assert_(len(x) == 24 and x == y)
x = """
The "quick"
brown fox
jumps over
the 'lazy' dog.
"""
y = '\nThe "quick"\nbrown fox\njumps over\nthe \'lazy\' dog.\n'
self.assertEquals(x, y)
y = '''
The "quick"
brown fox
jumps over
the 'lazy' dog.
'''
self.assertEquals(x, y)
y = "\n\
The \"quick\"\n\
brown fox\n\
jumps over\n\
the 'lazy' dog.\n\
"
self.assertEquals(x, y)
y = '\n\
The \"quick\"\n\
brown fox\n\
jumps over\n\
the \'lazy\' dog.\n\
'
self.assertEquals(x, y)
class GrammarTests(unittest.TestCase):
# single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE
# XXX can't test in a script -- this rule is only used when interactive
# file_input: (NEWLINE | stmt)* ENDMARKER
# Being tested as this very moment this very module
# expr_input: testlist NEWLINE
# XXX Hard to test -- used only in calls to input()
def testEvalInput(self):
# testlist ENDMARKER
x = eval('1, 0 or 1')
def testFuncdef(self):
### 'def' NAME parameters ':' suite
### parameters: '(' [varargslist] ')'
### varargslist: (fpdef ['=' test] ',')* ('*' NAME [',' ('**'|'*' '*') NAME]
### | ('**'|'*' '*') NAME)
### | fpdef ['=' test] (',' fpdef ['=' test])* [',']
### fpdef: NAME | '(' fplist ')'
### fplist: fpdef (',' fpdef)* [',']
### arglist: (argument ',')* (argument | *' test [',' '**' test] | '**' test)
### argument: [test '='] test # Really [keyword '='] test
def f1(): pass
f1()
f1(*())
f1(*(), **{})
def f2(one_argument): pass
def f3(two, arguments): pass
def f4(two, (compound, (argument, list))): pass
def f5((compound, first), two): pass
self.assertEquals(f2.func_code.co_varnames, ('one_argument',))
self.assertEquals(f3.func_code.co_varnames, ('two', 'arguments'))
if sys.platform.startswith('java'):
self.assertEquals(f4.func_code.co_varnames,
('two', '(compound, (argument, list))', 'compound', 'argument',
'list',))
self.assertEquals(f5.func_code.co_varnames,
('(compound, first)', 'two', 'compound', 'first'))
else:
self.assertEquals(f4.func_code.co_varnames,
('two', '.1', 'compound', 'argument', 'list'))
self.assertEquals(f5.func_code.co_varnames,
('.0', 'two', 'compound', 'first'))
def a1(one_arg,): pass
def a2(two, args,): pass
def v0(*rest): pass
def v1(a, *rest): pass
def v2(a, b, *rest): pass
def v3(a, (b, c), *rest): return a, b, c, rest
f1()
f2(1)
f2(1,)
f3(1, 2)
f3(1, 2,)
f4(1, (2, (3, 4)))
v0()
v0(1)
v0(1,)
v0(1,2)
v0(1,2,3,4,5,6,7,8,9,0)
v1(1)
v1(1,)
v1(1,2)
v1(1,2,3)
v1(1,2,3,4,5,6,7,8,9,0)
v2(1,2)
v2(1,2,3)
v2(1,2,3,4)
v2(1,2,3,4,5,6,7,8,9,0)
v3(1,(2,3))
v3(1,(2,3),4)
v3(1,(2,3),4,5,6,7,8,9,0)
# ceval unpacks the formal arguments into the first argcount names;
# thus, the names nested inside tuples must appear after these names.
if sys.platform.startswith('java'):
self.assertEquals(v3.func_code.co_varnames, ('a', '(b, c)', 'rest', 'b', 'c'))
else:
self.assertEquals(v3.func_code.co_varnames, ('a', '.1', 'rest', 'b', 'c'))
self.assertEquals(v3(1, (2, 3), 4), (1, 2, 3, (4,)))
def d01(a=1): pass
d01()
d01(1)
d01(*(1,))
d01(**{'a':2})
def d11(a, b=1): pass
d11(1)
d11(1, 2)
d11(1, **{'b':2})
def d21(a, b, c=1): pass
d21(1, 2)
d21(1, 2, 3)
d21(*(1, 2, 3))
d21(1, *(2, 3))
d21(1, 2, *(3,))
d21(1, 2, **{'c':3})
def d02(a=1, b=2): pass
d02()
d02(1)
d02(1, 2)
d02(*(1, 2))
d02(1, *(2,))
d02(1, **{'b':2})
d02(**{'a': 1, 'b': 2})
def d12(a, b=1, c=2): pass
d12(1)
d12(1, 2)
d12(1, 2, 3)
def d22(a, b, c=1, d=2): pass
d22(1, 2)
d22(1, 2, 3)
d22(1, 2, 3, 4)
def d01v(a=1, *rest): pass
d01v()
d01v(1)
d01v(1, 2)
d01v(*(1, 2, 3, 4))
d01v(*(1,))
d01v(**{'a':2})
def d11v(a, b=1, *rest): pass
d11v(1)
d11v(1, 2)
d11v(1, 2, 3)
def d21v(a, b, c=1, *rest): pass
d21v(1, 2)
d21v(1, 2, 3)
d21v(1, 2, 3, 4)
d21v(*(1, 2, 3, 4))
d21v(1, 2, **{'c': 3})
def d02v(a=1, b=2, *rest): pass
d02v()
d02v(1)
d02v(1, 2)
d02v(1, 2, 3)
d02v(1, *(2, 3, 4))
d02v(**{'a': 1, 'b': 2})
def d12v(a, b=1, c=2, *rest): pass
d12v(1)
d12v(1, 2)
d12v(1, 2, 3)
d12v(1, 2, 3, 4)
d12v(*(1, 2, 3, 4))
d12v(1, 2, *(3, 4, 5))
d12v(1, *(2,), **{'c': 3})
def d22v(a, b, c=1, d=2, *rest): pass
d22v(1, 2)
d22v(1, 2, 3)
d22v(1, 2, 3, 4)
d22v(1, 2, 3, 4, 5)
d22v(*(1, 2, 3, 4))
d22v(1, 2, *(3, 4, 5))
d22v(1, *(2, 3), **{'d': 4})
def d31v((x)): pass
d31v(1)
def d32v((x,)): pass
d32v((1,))
# keyword arguments after *arglist
def f(*args, **kwargs):
return args, kwargs
self.assertEquals(f(1, x=2, *[3, 4], y=5), ((1, 3, 4),
{'x':2, 'y':5}))
self.assertRaises(SyntaxError, eval, "f(1, *(2,3), 4)")
self.assertRaises(SyntaxError, eval, "f(1, x=2, *(3,4), x=5)")
# Check ast errors in *args and *kwargs
check_syntax_error(self, "f(*g(1=2))")
check_syntax_error(self, "f(**g(1=2))")
def testLambdef(self):
### lambdef: 'lambda' [varargslist] ':' test
l1 = lambda : 0
self.assertEquals(l1(), 0)
l2 = lambda : a[d] # XXX just testing the expression
l3 = lambda : [2 < x for x in [-1, 3, 0L]]
self.assertEquals(l3(), [0, 1, 0])
l4 = lambda x = lambda y = lambda z=1 : z : y() : x()
self.assertEquals(l4(), 1)
l5 = lambda x, y, z=2: x + y + z
self.assertEquals(l5(1, 2), 5)
self.assertEquals(l5(1, 2, 3), 6)
check_syntax_error(self, "lambda x: x = 2")
check_syntax_error(self, "lambda (None,): None")
### stmt: simple_stmt | compound_stmt
# Tested below
def testSimpleStmt(self):
### simple_stmt: small_stmt (';' small_stmt)* [';']
x = 1; pass; del x
def foo():
# verify statements that end with semi-colons
x = 1; pass; del x;
foo()
### small_stmt: expr_stmt | print_stmt | pass_stmt | del_stmt | flow_stmt | import_stmt | global_stmt | access_stmt | exec_stmt
# Tested below
def testExprStmt(self):
# (exprlist '=')* exprlist
1
1, 2, 3
x = 1
x = 1, 2, 3
x = y = z = 1, 2, 3
x, y, z = 1, 2, 3
abc = a, b, c = x, y, z = xyz = 1, 2, (3, 4)
check_syntax_error(self, "x + 1 = 1")
check_syntax_error(self, "a + 1 = b + 2")
def testPrintStmt(self):
# 'print' (test ',')* [test]
import StringIO
# Can't test printing to real stdout without comparing output
# which is not available in unittest.
save_stdout = sys.stdout
sys.stdout = StringIO.StringIO()
print 1, 2, 3
print 1, 2, 3,
print
print 0 or 1, 0 or 1,
print 0 or 1
# 'print' '>>' test ','
print >> sys.stdout, 1, 2, 3
print >> sys.stdout, 1, 2, 3,
print >> sys.stdout
print >> sys.stdout, 0 or 1, 0 or 1,
print >> sys.stdout, 0 or 1
# test printing to an instance
class Gulp:
def write(self, msg): pass
gulp = Gulp()
print >> gulp, 1, 2, 3
print >> gulp, 1, 2, 3,
print >> gulp
print >> gulp, 0 or 1, 0 or 1,
print >> gulp, 0 or 1
# test print >> None
def driver():
oldstdout = sys.stdout
sys.stdout = Gulp()
try:
tellme(Gulp())
tellme()
finally:
sys.stdout = oldstdout
# we should see this once
def tellme(file=sys.stdout):
print >> file, 'hello world'
driver()
# we should not see this at all
def tellme(file=None):
print >> file, 'goodbye universe'
driver()
self.assertEqual(sys.stdout.getvalue(), '''\
1 2 3
1 2 3
1 1 1
1 2 3
1 2 3
1 1 1
hello world
''')
sys.stdout = save_stdout
# syntax errors
check_syntax_error(self, 'print ,')
check_syntax_error(self, 'print >> x,')
def testDelStmt(self):
# 'del' exprlist
abc = [1,2,3]
x, y, z = abc
xyz = x, y, z
del abc
del x, y, (z, xyz)
def testPassStmt(self):
# 'pass'
pass
# flow_stmt: break_stmt | continue_stmt | return_stmt | raise_stmt
# Tested below
def testBreakStmt(self):
# 'break'
while 1: break
def testContinueStmt(self):
# 'continue'
i = 1
while i: i = 0; continue
msg = ""
while not msg:
msg = "ok"
try:
continue
msg = "continue failed to continue inside try"
except:
msg = "continue inside try called except block"
if msg != "ok":
self.fail(msg)
msg = ""
while not msg:
msg = "finally block not called"
try:
continue
finally:
msg = "ok"
if msg != "ok":
self.fail(msg)
def test_break_continue_loop(self):
# This test warrants an explanation. It is a test specifically for SF bugs
# #463359 and #462937. The bug is that a 'break' statement executed or
# exception raised inside a try/except inside a loop, *after* a continue
# statement has been executed in that loop, will cause the wrong number of
# arguments to be popped off the stack and the instruction pointer reset to
# a very small number (usually 0.) Because of this, the following test
# *must* written as a function, and the tracking vars *must* be function
# arguments with default values. Otherwise, the test will loop and loop.
def test_inner(extra_burning_oil = 1, count=0):
big_hippo = 2
while big_hippo:
count += 1
try:
if extra_burning_oil and big_hippo == 1:
extra_burning_oil -= 1
break
big_hippo -= 1
continue
except:
raise
if count > 2 or big_hippo <> 1:
self.fail("continue then break in try/except in loop broken!")
test_inner()
def testReturn(self):
# 'return' [testlist]
def g1(): return
def g2(): return 1
g1()
x = g2()
check_syntax_error(self, "class foo:return 1")
def testYield(self):
check_syntax_error(self, "class foo:yield 1")
def testRaise(self):
# 'raise' test [',' test]
try: raise RuntimeError, 'just testing'
except RuntimeError: pass
try: raise KeyboardInterrupt
except KeyboardInterrupt: pass
def testImport(self):
# 'import' dotted_as_names
import sys
import time, sys
# 'from' dotted_name 'import' ('*' | '(' import_as_names ')' | import_as_names)
from time import time
from time import (time)
# not testable inside a function, but already done at top of the module
# from sys import *
from sys import path, argv
from sys import (path, argv)
from sys import (path, argv,)
def testGlobal(self):
# 'global' NAME (',' NAME)*
global a
global a, b
global one, two, three, four, five, six, seven, eight, nine, ten
def testExec(self):
# 'exec' expr ['in' expr [',' expr]]
z = None
del z
exec 'z=1+1\n'
if z != 2: self.fail('exec \'z=1+1\'\\n')
del z
exec 'z=1+1'
if z != 2: self.fail('exec \'z=1+1\'')
z = None
del z
import types
if hasattr(types, "UnicodeType"):
exec r"""if 1:
exec u'z=1+1\n'
if z != 2: self.fail('exec u\'z=1+1\'\\n')
del z
exec u'z=1+1'
if z != 2: self.fail('exec u\'z=1+1\'')"""
g = {}
exec 'z = 1' in g
if g.has_key('__builtins__'): del g['__builtins__']
if g != {'z': 1}: self.fail('exec \'z = 1\' in g')
g = {}
l = {}
import warnings
warnings.filterwarnings("ignore", "global statement", module="<string>")
exec 'global a; a = 1; b = 2' in g, l
if g.has_key('__builtins__'): del g['__builtins__']
if l.has_key('__builtins__'): del l['__builtins__']
if (g, l) != ({'a':1}, {'b':2}):
self.fail('exec ... in g (%s), l (%s)' %(g,l))
def testAssert(self):
# assert_stmt: 'assert' test [',' test]
assert 1
assert 1, 1
assert lambda x:x
assert 1, lambda x:x+1
try:
assert 0, "msg"
except AssertionError, e:
self.assertEquals(e.args[0], "msg")
else:
if __debug__:
self.fail("AssertionError not raised by assert 0")
### compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | funcdef | classdef
# Tested below
def testIf(self):
# 'if' test ':' suite ('elif' test ':' suite)* ['else' ':' suite]
if 1: pass
if 1: pass
else: pass
if 0: pass
elif 0: pass
if 0: pass
elif 0: pass
elif 0: pass
elif 0: pass
else: pass
def testWhile(self):
# 'while' test ':' suite ['else' ':' suite]
while 0: pass
while 0: pass
else: pass
# Issue1920: "while 0" is optimized away,
# ensure that the "else" clause is still present.
x = 0
while 0:
x = 1
else:
x = 2
self.assertEquals(x, 2)
def testFor(self):
# 'for' exprlist 'in' exprlist ':' suite ['else' ':' suite]
for i in 1, 2, 3: pass
for i, j, k in (): pass
else: pass
class Squares:
def __init__(self, max):
self.max = max
self.sofar = []
def __len__(self): return len(self.sofar)
def __getitem__(self, i):
if not 0 <= i < self.max: raise IndexError
n = len(self.sofar)
while n <= i:
self.sofar.append(n*n)
n = n+1
return self.sofar[i]
n = 0
for x in Squares(10): n = n+x
if n != 285:
self.fail('for over growing sequence')
result = []
for x, in [(1,), (2,), (3,)]:
result.append(x)
self.assertEqual(result, [1, 2, 3])
def testTry(self):
### try_stmt: 'try' ':' suite (except_clause ':' suite)+ ['else' ':' suite]
### | 'try' ':' suite 'finally' ':' suite
### except_clause: 'except' [expr [('as' | ',') expr]]
try:
1/0
except ZeroDivisionError:
pass
else:
pass
try: 1/0
except EOFError: pass
except TypeError as msg: pass
except RuntimeError, msg: pass
except: pass
else: pass
try: 1/0
except (EOFError, TypeError, ZeroDivisionError): pass
try: 1/0
except (EOFError, TypeError, ZeroDivisionError), msg: pass
try: pass
finally: pass
def testSuite(self):
# simple_stmt | NEWLINE INDENT NEWLINE* (stmt NEWLINE*)+ DEDENT
if 1: pass
if 1:
pass
if 1:
#
#
#
pass
pass
#
pass
#
def testTest(self):
### and_test ('or' and_test)*
### and_test: not_test ('and' not_test)*
### not_test: 'not' not_test | comparison
if not 1: pass
if 1 and 1: pass
if 1 or 1: pass
if not not not 1: pass
if not 1 and 1 and 1: pass
if 1 and 1 or 1 and 1 and 1 or not 1 and 1: pass
def testComparison(self):
### comparison: expr (comp_op expr)*
### comp_op: '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not' 'in'|'is'|'is' 'not'
if 1: pass
x = (1 == 1)
if 1 == 1: pass
if 1 != 1: pass
if 1 <> 1: pass
if 1 < 1: pass
if 1 > 1: pass
if 1 <= 1: pass
if 1 >= 1: pass
if 1 is 1: pass
if 1 is not 1: pass
if 1 in (): pass
if 1 not in (): pass
if 1 < 1 > 1 == 1 >= 1 <= 1 <> 1 != 1 in 1 not in 1 is 1 is not 1: pass
def testBinaryMaskOps(self):
x = 1 & 1
x = 1 ^ 1
x = 1 | 1
def testShiftOps(self):
x = 1 << 1
x = 1 >> 1
x = 1 << 1 >> 1
def testAdditiveOps(self):
x = 1
x = 1 + 1
x = 1 - 1 - 1
x = 1 - 1 + 1 - 1 + 1
def testMultiplicativeOps(self):
x = 1 * 1
x = 1 / 1
x = 1 % 1
x = 1 / 1 * 1 % 1
def testUnaryOps(self):
x = +1
x = -1
x = ~1
x = ~1 ^ 1 & 1 | 1 & 1 ^ -1
x = -1*1/1 + 1*1 - ---1*1
def testSelectors(self):
### trailer: '(' [testlist] ')' | '[' subscript ']' | '.' NAME
### subscript: expr | [expr] ':' [expr]
import sys, time
c = sys.path[0]
x = time.time()
x = sys.modules['time'].time()
a = '01234'
c = a[0]
c = a[-1]
s = a[0:5]
s = a[:5]
s = a[0:]
s = a[:]
s = a[-5:]
s = a[:-1]
s = a[-4:-3]
# A rough test of SF bug 1333982. http://python.org/sf/1333982
# The testing here is fairly incomplete.
# Test cases should include: commas with 1 and 2 colons
d = {}
d[1] = 1
d[1,] = 2
d[1,2] = 3
d[1,2,3] = 4
L = list(d)
L.sort()
self.assertEquals(str(L), '[1, (1,), (1, 2), (1, 2, 3)]')
def testAtoms(self):
### atom: '(' [testlist] ')' | '[' [testlist] ']' | '{' [dictmaker] '}' | '`' testlist '`' | NAME | NUMBER | STRING
### dictmaker: test ':' test (',' test ':' test)* [',']
x = (1)
x = (1 or 2 or 3)
x = (1 or 2 or 3, 2, 3)
x = []
x = [1]
x = [1 or 2 or 3]
x = [1 or 2 or 3, 2, 3]
x = []
x = {}
x = {'one': 1}
x = {'one': 1,}
x = {'one' or 'two': 1 or 2}
x = {'one': 1, 'two': 2}
x = {'one': 1, 'two': 2,}
x = {'one': 1, 'two': 2, 'three': 3, 'four': 4, 'five': 5, 'six': 6}
x = `x`
x = `1 or 2 or 3`
self.assertEqual(`1,2`, '(1, 2)')
x = x
x = 'x'
x = 123
### exprlist: expr (',' expr)* [',']
### testlist: test (',' test)* [',']
# These have been exercised enough above
def testClassdef(self):
# 'class' NAME ['(' [testlist] ')'] ':' suite
class B: pass
class B2(): pass
class C1(B): pass
class C2(B): pass
class D(C1, C2, B): pass
class C:
def meth1(self): pass
def meth2(self, arg): pass
def meth3(self, a1, a2): pass
# decorator: '@' dotted_name [ '(' [arglist] ')' ] NEWLINE
# decorators: decorator+
# decorated: decorators (classdef | funcdef)
def class_decorator(x):
x.decorated = True
return x
@class_decorator
class G:
pass
self.assertEqual(G.decorated, True)
def testListcomps(self):
# list comprehension tests
nums = [1, 2, 3, 4, 5]
strs = ["Apple", "Banana", "Coconut"]
spcs = [" Apple", " Banana ", "Coco nut "]
self.assertEqual([s.strip() for s in spcs], ['Apple', 'Banana', 'Coco nut'])
self.assertEqual([3 * x for x in nums], [3, 6, 9, 12, 15])
self.assertEqual([x for x in nums if x > 2], [3, 4, 5])
self.assertEqual([(i, s) for i in nums for s in strs],
[(1, 'Apple'), (1, 'Banana'), (1, 'Coconut'),
(2, 'Apple'), (2, 'Banana'), (2, 'Coconut'),
(3, 'Apple'), (3, 'Banana'), (3, 'Coconut'),
(4, 'Apple'), (4, 'Banana'), (4, 'Coconut'),
(5, 'Apple'), (5, 'Banana'), (5, 'Coconut')])
self.assertEqual([(i, s) for i in nums for s in [f for f in strs if "n" in f]],
[(1, 'Banana'), (1, 'Coconut'), (2, 'Banana'), (2, 'Coconut'),
(3, 'Banana'), (3, 'Coconut'), (4, 'Banana'), (4, 'Coconut'),
(5, 'Banana'), (5, 'Coconut')])
self.assertEqual([(lambda a:[a**i for i in range(a+1)])(j) for j in range(5)],
[[1], [1, 1], [1, 2, 4], [1, 3, 9, 27], [1, 4, 16, 64, 256]])
def test_in_func(l):
return [None < x < 3 for x in l if x > 2]
self.assertEqual(test_in_func(nums), [False, False, False])
def test_nested_front():
self.assertEqual([[y for y in [x, x + 1]] for x in [1,3,5]],
[[1, 2], [3, 4], [5, 6]])
test_nested_front()
check_syntax_error(self, "[i, s for i in nums for s in strs]")
check_syntax_error(self, "[x if y]")
suppliers = [
(1, "Boeing"),
(2, "Ford"),
(3, "Macdonalds")
]
parts = [
(10, "Airliner"),
(20, "Engine"),
(30, "Cheeseburger")
]
suppart = [
(1, 10), (1, 20), (2, 20), (3, 30)
]
x = [
(sname, pname)
for (sno, sname) in suppliers
for (pno, pname) in parts
for (sp_sno, sp_pno) in suppart
if sno == sp_sno and pno == sp_pno
]
self.assertEqual(x, [('Boeing', 'Airliner'), ('Boeing', 'Engine'), ('Ford', 'Engine'),
('Macdonalds', 'Cheeseburger')])
def testGenexps(self):
# generator expression tests
g = ([x for x in range(10)] for x in range(1))
self.assertEqual(g.next(), [x for x in range(10)])
try:
g.next()
self.fail('should produce StopIteration exception')
except StopIteration:
pass
a = 1
try:
g = (a for d in a)
g.next()
self.fail('should produce TypeError')
except TypeError:
pass
self.assertEqual(list((x, y) for x in 'abcd' for y in 'abcd'), [(x, y) for x in 'abcd' for y in 'abcd'])
self.assertEqual(list((x, y) for x in 'ab' for y in 'xy'), [(x, y) for x in 'ab' for y in 'xy'])
a = [x for x in range(10)]
b = (x for x in (y for y in a))
self.assertEqual(sum(b), sum([x for x in range(10)]))
self.assertEqual(sum(x**2 for x in range(10)), sum([x**2 for x in range(10)]))
self.assertEqual(sum(x*x for x in range(10) if x%2), sum([x*x for x in range(10) if x%2]))
self.assertEqual(sum(x for x in (y for y in range(10))), sum([x for x in range(10)]))
self.assertEqual(sum(x for x in (y for y in (z for z in range(10)))), sum([x for x in range(10)]))
self.assertEqual(sum(x for x in [y for y in (z for z in range(10))]), sum([x for x in range(10)]))
self.assertEqual(sum(x for x in (y for y in (z for z in range(10) if True)) if True), sum([x for x in range(10)]))
self.assertEqual(sum(x for x in (y for y in (z for z in range(10) if True) if False) if True), 0)
check_syntax_error(self, "foo(x for x in range(10), 100)")
check_syntax_error(self, "foo(100, x for x in range(10))")
def testComprehensionSpecials(self):
# test for outmost iterable precomputation
x = 10; g = (i for i in range(x)); x = 5
self.assertEqual(len(list(g)), 10)
# This should hold, since we're only precomputing outmost iterable.
x = 10; t = False; g = ((i,j) for i in range(x) if t for j in range(x))
x = 5; t = True;
self.assertEqual([(i,j) for i in range(10) for j in range(5)], list(g))
# Grammar allows multiple adjacent 'if's in listcomps and genexps,
# even though it's silly. Make sure it works (ifelse broke this.)
self.assertEqual([ x for x in range(10) if x % 2 if x % 3 ], [1, 5, 7])
self.assertEqual(list(x for x in range(10) if x % 2 if x % 3), [1, 5, 7])
# verify unpacking single element tuples in listcomp/genexp.
self.assertEqual([x for x, in [(4,), (5,), (6,)]], [4, 5, 6])
self.assertEqual(list(x for x, in [(7,), (8,), (9,)]), [7, 8, 9])
def test_with_statement(self):
class manager(object):
def __enter__(self):
return (1, 2)
def __exit__(self, *args):
pass
with manager():
pass
with manager() as x:
pass
with manager() as (x, y):
pass
with manager(), manager():
pass
with manager() as x, manager() as y:
pass
with manager() as x, manager():
pass
def testIfElseExpr(self):
# Test ifelse expressions in various cases
def _checkeval(msg, ret):
"helper to check that evaluation of expressions is done correctly"
print x
return ret
self.assertEqual([ x() for x in lambda: True, lambda: False if x() ], [True])
self.assertEqual([ x() for x in (lambda: True, lambda: False) if x() ], [True])
self.assertEqual([ x(False) for x in (lambda x: False if x else True, lambda x: True if x else False) if x(False) ], [True])
self.assertEqual((5 if 1 else _checkeval("check 1", 0)), 5)
self.assertEqual((_checkeval("check 2", 0) if 0 else 5), 5)
self.assertEqual((5 and 6 if 0 else 1), 1)
self.assertEqual(((5 and 6) if 0 else 1), 1)
self.assertEqual((5 and (6 if 1 else 1)), 6)
self.assertEqual((0 or _checkeval("check 3", 2) if 0 else 3), 3)
self.assertEqual((1 or _checkeval("check 4", 2) if 1 else _checkeval("check 5", 3)), 1)
self.assertEqual((0 or 5 if 1 else _checkeval("check 6", 3)), 5)
self.assertEqual((not 5 if 1 else 1), False)
self.assertEqual((not 5 if 0 else 1), 1)
self.assertEqual((6 + 1 if 1 else 2), 7)
self.assertEqual((6 - 1 if 1 else 2), 5)
self.assertEqual((6 * 2 if 1 else 4), 12)
self.assertEqual((6 / 2 if 1 else 3), 3)
self.assertEqual((6 < 4 if 0 else 2), 2)
def test_main():
run_unittest(TokenTests, GrammarTests)
if __name__ == '__main__':
test_main()
|
pantsbuild/pex
|
refs/heads/main
|
pex/vendor/_vendored/pip/pip/_vendor/chardet/euctwprober.py
|
289
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import EUCTWDistributionAnalysis
from .mbcssm import EUCTW_SM_MODEL
class EUCTWProber(MultiByteCharSetProber):
def __init__(self):
super(EUCTWProber, self).__init__()
self.coding_sm = CodingStateMachine(EUCTW_SM_MODEL)
self.distribution_analyzer = EUCTWDistributionAnalysis()
self.reset()
@property
def charset_name(self):
return "EUC-TW"
@property
def language(self):
return "Taiwan"
|
prutseltje/ansible
|
refs/heads/devel
|
test/units/modules/system/test_systemd.py
|
133
|
import os
import tempfile
from ansible.compat.tests import unittest
from ansible.modules.system.systemd import parse_systemctl_show
class ParseSystemctlShowTestCase(unittest.TestCase):
def test_simple(self):
lines = [
'Type=simple',
'Restart=no',
'Requires=system.slice sysinit.target',
'Description=Blah blah blah',
]
parsed = parse_systemctl_show(lines)
self.assertEqual(parsed, {
'Type': 'simple',
'Restart': 'no',
'Requires': 'system.slice sysinit.target',
'Description': 'Blah blah blah',
})
def test_multiline_exec(self):
# This was taken from a real service that specified "ExecStart=/bin/echo foo\nbar"
lines = [
'Type=simple',
'ExecStart={ path=/bin/echo ; argv[]=/bin/echo foo',
'bar ; ignore_errors=no ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }',
'Description=blah',
]
parsed = parse_systemctl_show(lines)
self.assertEqual(parsed, {
'Type': 'simple',
'ExecStart': '{ path=/bin/echo ; argv[]=/bin/echo foo\n'
'bar ; ignore_errors=no ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }',
'Description': 'blah',
})
def test_single_line_with_brace(self):
lines = [
'Type=simple',
'Description={ this is confusing',
'Restart=no',
]
parsed = parse_systemctl_show(lines)
self.assertEqual(parsed, {
'Type': 'simple',
'Description': '{ this is confusing',
'Restart': 'no',
})
|
domenicosolazzo/practice-django
|
refs/heads/master
|
venv/lib/python2.7/site-packages/django/contrib/contenttypes/__init__.py
|
809
|
default_app_config = 'django.contrib.contenttypes.apps.ContentTypesConfig'
|
s20121035/rk3288_android5.1_repo
|
refs/heads/master
|
external/chromium_org/build/android/gyp/util/md5_check_test.py
|
99
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import tempfile
import unittest
import md5_check # pylint: disable=W0403
class TestMd5Check(unittest.TestCase):
def setUp(self):
self.called = False
def testCallAndRecordIfStale(self):
input_strings = ['string1', 'string2']
input_file1 = tempfile.NamedTemporaryFile()
input_file2 = tempfile.NamedTemporaryFile()
file1_contents = 'input file 1'
file2_contents = 'input file 2'
input_file1.write(file1_contents)
input_file1.flush()
input_file2.write(file2_contents)
input_file2.flush()
input_files = [input_file1.name, input_file2.name]
record_path = tempfile.NamedTemporaryFile(suffix='.stamp')
def CheckCallAndRecord(should_call, message, force=False):
self.called = False
def MarkCalled():
self.called = True
md5_check.CallAndRecordIfStale(
MarkCalled,
record_path=record_path.name,
input_paths=input_files,
input_strings=input_strings,
force=force)
self.failUnlessEqual(should_call, self.called, message)
CheckCallAndRecord(True, 'should call when record doesn\'t exist')
CheckCallAndRecord(False, 'should not call when nothing changed')
CheckCallAndRecord(True, force=True, message='should call when forced')
input_file1.write('some more input')
input_file1.flush()
CheckCallAndRecord(True, 'changed input file should trigger call')
input_files = input_files[::-1]
CheckCallAndRecord(False, 'reordering of inputs shouldn\'t trigger call')
input_files = input_files[:1]
CheckCallAndRecord(True, 'removing file should trigger call')
input_files.append(input_file2.name)
CheckCallAndRecord(True, 'added input file should trigger call')
input_strings[0] = input_strings[0] + ' a bit longer'
CheckCallAndRecord(True, 'changed input string should trigger call')
input_strings = input_strings[::-1]
CheckCallAndRecord(True, 'reordering of string inputs should trigger call')
input_strings = input_strings[:1]
CheckCallAndRecord(True, 'removing a string should trigger call')
input_strings.append('a brand new string')
CheckCallAndRecord(True, 'added input string should trigger call')
if __name__ == '__main__':
unittest.main()
|
wgapl/moose
|
refs/heads/devel
|
framework/contrib/nsiqcppstyle/nsiqcppstyle_exe-test.py
|
43
|
#!/usr/bin/python
#
# Copyright (c) 2009 NHN Inc. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of NHN Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import getopt
import os #@UnusedImport
import re
import sys #@UnusedImport
import copy
import nsiqcppstyle_checker
import nsiqcppstyle_state
import nsiqcppstyle_rulemanager
import nsiqcppstyle_reporter
import updateagent.agent
from nsiqcppstyle_util import * #@UnusedWildImport
try :
set()
except NameError:
from sets import Set as set
version = "0.2.2.13"
####################################################################################################
title = "nsiqcppstyle : N'SIQ Cpp Style ver "+ version + "\n"
def ShowMessageAndExit(msg, usageOutput=True) :
print >> sys.stderr, msg
if usageOutput : Usage()
sys.exit(-1)
def Usage() :
print \
"""
======================================================================================
Usage : nsiqcppstyle [Options]
targetdirectory
[Example]
nsiqcppstyle .
nsiqcppstyle targetdir
nsiqcppstyle -f filefilterpath targetfilepath
[Options]
-h Show this help
-v Show detail ouput(verbose mode)
-r Show rule list
-o path Set the output path. It's only applied when the output is csv or xml.
-f path Set the filefilter path. If not provided, it uses the default filterpath
(target/filefilter.txt)
If you provide the file path(not folder path) for the target,
-f option should be provided.
--var=key:value,key:value
provide the variables to customize the rule behavior.
Add file extensions to be counted as assigned languages.
-s Assign Filter scope name to be applied in this analysis
--output= output format 'emacs', 'vs7', 'csv', 'xml', 'eclipse' and html. Default value is vs7
emacs, vs7, eclipse output the result on the stdout in the form
that each tool recognizes.
csv and xml outputs the result on the file "nsiqcppstyle_result.csv"
"nsiqcppstyle_result.xml" respectively, if you don't provide -o option.
--url= Specify a base url path. To be used in conjunction with --output=html
targetfilepath with be appended to url path.
--ci Continuous Integration mode. If this mode is on, this tool only report summary.
* nsiqcppstyle reports coding standard violations on C/C++ source code.
* In default, it doesn't apply any rules on the source. If you want to apply rule,
they are should be provided in the filefilter.txt file in following form.
~ RULENAME
* You can customize the rule behavior by providing --var=key:value pair when executing
tool and you can put it in the filefilter.txt. The format is following.
% key:value
* If you want to filter in or out some source code files in the target directory
please locate filefilter.txt file in the target directory in the form of
* FILTER_SCOPE_NAME
+ INCLUDE_PATH_PATTERNS
- EXCLUDE_PATH_PATTERNS
= LANGUAGE_NAME:EXTENSION,LANGUAGE_NAME:EXTENSION
The filter scope name is the identifier to selectively apply filter.
In case of the quality, Maybe only main sources except test should be measured.
Otherwise, to measure the productivity, the test code might be measured as well.
To keep this information in the same file(filefilter.txt), you can provide the
* file_scope_name before the filter configuration starts.
You can define multiple filter scope name in the filefilter.txt. In addition,
you can run nsiqcollector with -s option to specify the filter scope name used.
We recommend you to define at least two filter scopes (Productivity, Quality)
The included(+)/excluded(-) paths are applied sequentially from up to down
In default, all files under target directory but under /.cvs/ and /.svn/
will be included for analysis.
* It the basefilelist.txt (pair of filename and filesize) is in the target directory,
nsiqcppstyle recognizes it and check the file are modified or new.
And It checks only new and modified file. Please refer the nsiqcollector
to generate basefilelist.txt.
"""
sys.exit(0)
def main(argv=None):
global filename
if argv is None:
argv = sys.argv
try:
try:
opts, args = getopt.getopt(argv[1:], "o:s:m:hvrf:", ["help", "csv", "output=", "list_rules", "verbose=", "show-url", "no-update", "ci", "var=", "noBase", "url=", "quiet", "basedir="])
except getopt.error, msg:
raise ShowMessageAndExit(msg)
return 0
outputPath = ""
_nsiqcppstyle_state.output_format = "vs7"
_nsiqcppstyle_state.baseURL = ""
_nsiqcppstyle_state.base_dir = ""
_nsiqcppstyle_state.quiet = False
filterScope = "default"
filterPath = ""
ciMode = False
noBase = False
varMap = {}
extLangMap = {
"Html":set(["htm", "html"]),
"Java":set(["java"]),
"Javascript/ActionScript":set(["js", "as"]),
"JSP/PHP":set(["jsp", "php", "JSP", "PHP"]),
"C/C++":set(["cpp", "h", "c", "hxx", "cxx", "hpp"])
}
updateNsiqCppStyle = False
for o, a in opts:
if o in ("-h", "--help"):
print title
Usage()
elif o in ("-r", "--list-rules") :
ShowRuleList()
elif o == "-o" :
outputPath = a.strip().replace("\"", "")
elif o == "--no-update" :
updateNsiqCppStyle = False
elif o == "-f" :
filterPath = a.strip().replace("\"", "")
elif o == "-v" :
EnableVerbose()
elif o == "-s" :
filterScope = a
elif o == "--show-url" :
_nsiqcppstyle_state.showUrl = True
elif o == '--output':
if not a in ('emacs', 'vs7', 'csv', 'xml', 'eclipse', 'html'):
print title
ShowMessageAndExit('The only allowed output formats are emacs, vs7, csv and html.')
_nsiqcppstyle_state.output_format = a
elif o == "--var" :
varMap = GetCustomKeyValueMap(a, "--var="+a)
elif o == "--ci" :
ciMode = True
elif o == "--noBase" :
noBase = True
elif o == "--url" :
_nsiqcppstyle_state.baseURL = a
elif o == "--quiet" :
_nsiqcppstyle_state.quiet = True
elif o == "--basedir" :
_nsiqcppstyle_state.base_dir = a
print title
runtimePath = GetRuntimePath()
sys.path.append(runtimePath)
if updateNsiqCppStyle :
try :
print "======================================================================================"
except Exception, e:
print e
targetPaths = GetRealTargetPaths(args)
multipleTarget = True
if len(targetPaths) == 1 :
multipleTarget = False
#If multiple target
if multipleTarget :
if len(outputPath) == 0 :
ShowMessageAndExit("OutputPath(-o) should be provided to analyze multiple targets")
else :
outputPath = GetOutputPath(targetPaths[0], outputPath)
ruleManager = nsiqcppstyle_rulemanager.ruleManager
cExtendstionSet = extLangMap.get("C/C++")
nsiqcppstyle_reporter.PrepareReport(outputPath, _nsiqcppstyle_state.output_format)
analyzedFiles = []
for targetPath in targetPaths :
nsiqcppstyle_reporter.StartTarget(targetPath)
extLangMapCopy = copy.deepcopy(extLangMap)
targetName = os.path.basename(targetPath)
if _nsiqcppstyle_state.quiet == False:
print "======================================================================================"
print "= Analyzing %s " % targetName
if filterPath != "" :
filefilterPath= filterPath
elif os.path.isfile(targetPath) :
filefilterPath = os.path.join(os.path.dirname(targetPath), "filefilter.txt")
else :
filefilterPath = os.path.join(targetPath, "filefilter.txt")
if (noBase) :
basefilelist = NullBaseFileList()
else :
basefilelist = BaseFileList(targetPath)
# Get Active Filter
filterManager = FilterManager(filefilterPath, extLangMapCopy, varMap, filterScope)
if filterScope != filterManager.GetActiveFilter().filterName :
print "\n%s filter scope is not available. Instead, use %s\n" % (filterScope, filterManager.GetActiveFilter().filterName)
filter = filterManager.GetActiveFilter()
# Load Rule
if len(filter.nsiqCppStyleRules) == 0 :
ShowMessageAndExit("Error!. Rules must be set in %s" % filefilterPath, False)
continue
ruleManager.LoadRules(filter.nsiqCppStyleRules, (not ciMode and not _nsiqcppstyle_state.quiet))
_nsiqcppstyle_state.checkers = filter.nsiqCppStyleRules
_nsiqcppstyle_state.varMap = filter.varMap
nsiqcppstyle_reporter.ReportRules(ruleManager.availRuleNames, filter.nsiqCppStyleRules)
if not ciMode and _nsiqcppstyle_state.quiet == False:
print filter.to_string()
print "======================================================================================"
if VerboseMode() : print "* run nsiqcppstyle analysis on %s" % targetName
# if the target is file, analyze it without condition
if os.path.isfile(targetPath) :
fileExtension = targetPath[targetPath.rfind('.') + 1:]
if fileExtension in cExtendstionSet :
ProcessFile(ruleManager, targetPath, analyzedFiles, ciMode)
# if the target is directory, analyze it with filefilter and basefilelist
else :
for root, dirs, files in os.walk(targetPath) :
if '.cvs' in dirs :
dirs.remove('.cvs')
if '.svn' in dirs :
dirs.remove('.svn')
for fname in files :
fileExtension = fname[fname.rfind('.') + 1:]
eachFile = os.path.join(root, fname)
basePart = eachFile[len(targetPath):]
if fileExtension in cExtendstionSet and basefilelist.IsNewOrChanged(eachFile) and filter.CheckFileInclusion(basePart) :
nsiqcppstyle_reporter.StartFile(os.path.dirname(basePart), fname)
ProcessFile(ruleManager, eachFile, analyzedFiles, ciMode)
nsiqcppstyle_reporter.EndFile()
ruleManager.RunProjectRules(targetPath)
nsiqcppstyle_reporter.EndTarget()
nsiqcppstyle_reporter.ReportSummaryToScreen(analyzedFiles, _nsiqcppstyle_state, filter, ciMode)
nsiqcppstyle_reporter.CloseReport(_nsiqcppstyle_state.output_format)
return 0
except Usage, err:
print >> sys.stderr, err.msg
print >> sys.stderr, "for help use --help"
sys.exit(-1)
#################################################################################################3
def ProcessFile(ruleManager, file, analyzedFiles, ciMode):
if not ciMode and _nsiqcppstyle_state.quiet == False: print "Processing : ", file
nsiqcppstyle_checker.ProcessFile(ruleManager, file)
analyzedFiles.append(file)
def Update():
updateagent.agent.Update("http://nsiqcppstyle.nsiq.nhncorp.com/update", version)
csvResult = []
def GetOutputPath(outputBasePath, outputPath) :
"Returns the LOC and complexity result path"
if outputPath == "" :
if os.path.isfile(outputBasePath) :
outputPath = os.path.dirname(outputBasePath)
else :
outputPath = outputBasePath
return os.path.realpath(outputPath)
def GetRealTargetPaths(args) :
"extract real target path list from args"
if len(args) == 0 :
ShowMessageAndExit("Error! : Target directory must be provided")
targetPaths = []
for eachTarget in args :
realPath = os.path.realpath(eachTarget)
targetPaths.append(realPath)
# CheckPathPermission(realPath, "Target directory")
if not os.path.exists(realPath) :
ShowMessageAndExit("Error! : Target directory %s is not exists" % eachTarget)
return targetPaths
#################################################################################################3
def EnableVerbose() :
_nsiqcppstyle_state.verbose = True
def VerboseMode() :
return _nsiqcppstyle_state.verbose
##############################################################################
# Filter Manager
# - Load Filter
##############################################################################
class FilterManager :
defaultFilterName = "default"
def __init__(self, fileFilterPath, extLangMap, varMap, activeFilterName) :
self.fileFilterPath = fileFilterPath
self.baseExtLangMap = extLangMap
self.baseVarMap = varMap
self.filterMap = {FilterManager.defaultFilterName: self.CreateNewFilter(FilterManager.defaultFilterName)}
filter = self.GetFilter(self.defaultFilterName)
self.activeFilterName = self.defaultFilterName
f = self.GetFilterFile(fileFilterPath)
if f == None :
filter.AddExclude("/.svn/")
filter.AddExclude("/.cvs/")
return;
for line in f.readlines():
line = line.strip()
if line.startswith("#") or len(line) == 0 :
continue
if line.startswith("*") :
if (len(line[1:].strip()) != 0) :
filterName = line[1:].strip()
filter = self.GetFilter(filterName)
elif line.startswith("=") :
if (len(line[1:].strip()) != 0) :
filter.AddLangMap(line[1:].strip(), "\"" + line + "\" of filefilter.txt")
elif line.startswith("~") :
if (len(line[1:].strip()) != 0) :
filter.AddCppChecker(line[1:].strip())
elif line.startswith("+") :
arg = line[1:].strip()
if arg != "" : filter.AddInclude(arg)
elif line.startswith("-") :
arg = line[1:].strip()
if arg != "" : filter.AddExclude(arg)
elif line.startswith("%") :
arg = line[1:].strip()
if arg != "" : filter.AddVarMap(arg, "\"" + arg + "\" of filefilter.txt")
f.close()
for eachMapKey in self.filterMap.keys() :
self.filterMap[eachMapKey].AddExclude("/.cvs/")
self.filterMap[eachMapKey].AddExclude("/.svn/")
if (self.filterMap.has_key(activeFilterName)) :
self.activeFilterName = activeFilterName
def CreateNewFilter(self, filterName):
return Filter(filterName, copy.deepcopy(self.baseExtLangMap), copy.deepcopy(self.baseVarMap))
def GetFilter(self, filterName):
if self.filterMap.has_key(filterName) :
return self.filterMap[filterName]
else :
self.filterMap[filterName] = self.CreateNewFilter(filterName)
return self.filterMap[filterName]
def GetActiveFilter(self):
return self.GetFilter(self.activeFilterName)
def GetFilterFile(self, filterfile):
if not os.path.exists(filterfile) :
return None
f = file(filterfile, 'r')
return f
##############################################################################
# Filter
# - Represent each Filter
# - Check if the file is included or not
##############################################################################
class Filter :
"""
Filter
- Represent each Filter
- Check if the file is included or not
"""
def __init__(self, filterName, baseExtLangMap, baseVarMap):
self.extLangMap = baseExtLangMap
self.varMap = baseVarMap
self.filterName = filterName
self.filefilter = []
self.match = re.compile("^(\\\\|//)")
self.nsiqCppStyleRules = []
def to_string(self) :
template = """Filter Scope "%s" is applied.
Current Filter Setting (Following is applied sequentially)
%s
Current File extension and Language Settings
%s"""
s = ""
count = 1
for eachfilter in self.filefilter :
filterment = ""
if eachfilter[0] : filterment = "is included"
else : filterment = "is excluded"
s = s + (" %s. %s %s\n" % (count, eachfilter[1], filterment))
count = count + 1
return template % (self.filterName, s, self.GetLangString())
def NormalizePath(self, eachFilter):
replacedpath = eachFilter.replace("/", os.path.sep)
replacedpath = replacedpath.replace("\\\\", os.path.sep);
return replacedpath.replace("\\", os.path.sep);
def CheckExist(self, includeOrExclude, eachFilter, startwith):
return (self.filefilter.count([includeOrExclude, eachFilter, startwith]) == 1)
def AddInclude(self, eachFilter):
self.AddFilter(True, eachFilter)
def AddExclude(self, eachFilter):
self.AddFilter(False, eachFilter)
def AddCppChecker(self, eachChecker):
self.nsiqCppStyleRules.append(eachChecker)
def AddFilter(self, inclusion, eachFilter):
startwith = False
if eachFilter.startswith("\\\\") or eachFilter.startswith("//") :
eachFilter = self.match.sub("", eachFilter)
filterString = self.NormalizePath(eachFilter)
if self.CheckExist(inclusion, filterString, startwith) :
self.filefilter.remove([inclusion, filterString, startwith])
self.filefilter.append([inclusion, filterString, startwith])
def GetFileFilter(self):
return self.filefilter
def GetLangString(self) :
s = ""
for eachKey in self.extLangMap.keys():
if eachKey == "C/C++" :
s = s + " " + eachKey + "="
extSet = self.extLangMap.get(eachKey)
setLen = len(extSet)
count = 0
for eachExt in extSet :
count = count + 1
s = s + eachExt
if count < setLen : s = s + ","
else : s = s + "\n"
return s;
def CheckFileInclusion(self, fileStr):
eachfile = self.NormalizePath(fileStr)
inclusion = True
for eachfilter in self.filefilter :
if eachfilter[2] == True :
if eachfile.startswith(eachfilter[1]) :
inclusion = eachfilter[0]
else :
if eachfile.find(eachfilter[1]) != -1 :
inclusion = eachfilter[0]
return inclusion
def GetLangMap(self):
return self.extLangMap;
def AddLangMap(self, langMapString, where):
langExtList = langMapString.split(",")
for eachExt in langExtList :
extLangPair = eachExt.split(":")
if len(extLangPair) != 2 :
ShowMessageAndExit("Error! : The extension and language pair (%s) is incorrect in %s, please use LANGUAGENAME:EXTENSION style" % (langMapString, where))
lang, ext = extLangPair
self.extLangMap.get(lang).add(ext)
def AddVarMap(self, keyValuePairString, where):
varMap = GetCustomKeyValueMap(keyValuePairString, where)
for eachVar in varMap.keys() :
if self.varMap.has_key(eachVar) :
continue
else :
self.varMap[eachVar] = varMap[eachVar]
def GetCustomKeyValueMap(keyValuePair, where):
varMap = {}
customKeyValues = keyValuePair.split(",")
for eachCustomKeyValue in customKeyValues :
customKeyValuePair = eachCustomKeyValue.split(":")
if len(customKeyValuePair) != 2 :
ShowMessageAndExit("Error! : The var key and value pair (%s) is incorrect in %s, please use KEY:VALUE style" % (keyValuePair, where))
key, value = customKeyValuePair
varMap[key] = value
return varMap
##############################################################################
# BaseFileList
##############################################################################
class BaseFileList(object):
"""
- Represent basefilelist.txt state
- It check if the current file and size pair is in the basefilelist.
"""
def __init__(self, targetDir):
self.baseFileList = {}
if os.path.isdir(targetDir) :
fsrc = os.path.join(targetDir, "basefilelist.txt")
if os.path.exists(fsrc) :
f = file(fsrc)
for line in f.readlines() :
self.baseFileList[line.strip()] = True
def IsNewOrChanged(self, filename):
item = os.path.basename(filename) + str(os.path.getsize(filename))
return not self.baseFileList.get(item, False)
class NullBaseFileList(object):
"""
- Represent basefilelist.txt state
- It check if the current file and size pair is in the basefilelist.
"""
def __init__(self):
pass
def IsNewOrChanged(self, filename):
return True
def ShowRuleList():
nsiqcppstyle_rulemanager.ruleManager.availRuleNames.sort()
for rule in nsiqcppstyle_rulemanager.ruleManager.availRuleNames:
print "~", rule
sys.exit(1)
def CheckPathPermission(path, folderrole) :
if not os.access(path, os.R_OK) and os.path.exists(path) :
ShowMessageAndExit("Error! : %s You should have read permission in %s." % (folderrole, path))
return True
####################################################################################################
_nsiqcppstyle_state = nsiqcppstyle_state._nsiqcppstyle_state
if __name__ == "__main__":
sys.path.append(GetRuntimePath())
sys.exit(main())
|
40223133/cadp_w2
|
refs/heads/master
|
wsgi/static/templates/jscript/Lib/itertools.py
|
4
|
# downloaded from http://shedskin.googlecode.com/svn-history/r1279/trunk/shedskin/lib/itertools.py
# http://docs.python.org/dev/_sources/library/itertools.txt
# Infinite Iterators
def count(start = 0, step = 1):
yield start
def cycle(iterable):
yield next(iter(iterable))
def repeat(object, times = 0):
yield object
# Iterators terminating on the shortest input iterableuence
def chain(*iterables):
yield next(iter(iterables))
def compress(data, selectors):
next(iter(selectors))
yield next(iter(data))
def __pred_elem(predicate, iterable):
elem = next(iter(iterable))
predicate(elem)
return elem
def dropwhile(predicate, iterable):
yield __pred_elem(predicate, iterable)
def groupby(iterable, key = lambda x: x):
yield key(next(iter(iterable))), iter(iterable)
def ifilter(predicate, iterable):
yield __pred_elem(predicate, iterable)
def ifilterfalse(predicate, iterable):
yield __pred_elem(predicate, iterable)
def takewhile(predicate, iterable):
yield __pred_elem(predicate, iterable)
def islice(iterable, start, stop = -1, step = -1):
'Known limitations: cannot distinguish between 0 and None for the stop argument'
yield next(iter(iterable))
def imap(function, *iterables):
'Known limitations: no more than 5 iterables are supported'
yield function(next(*iter(iterables)))
def __imap3(function, iter1, iter2):
yield function(next(iter(iter1)), next(iter(iter2)))
def __imap4(function, iter1, iter2, iter3):
yield function(next(iter(iter1)), next(iter(iter2)), next(iter(iter3)))
def __imap5(function, iter1, iter2, iter3, iter4):
yield function(next(iter(iter1)), next(iter(iter2)), next(iter(iter3)), next(iter(iter4)))
def __imap6(function, iter1, iter2, iter3, iter4, iter5):
yield function(next(iter(iter1)), next(iter(iter2)), next(iter(iter3)), next(iter(iter4)), next(iter(iter5)))
def starmap(function, iterable):
yield func(*iterable[0])
def tee(iterable, n = 2):
return iter(iterable), iter(iterable)
def izip(*iterables):
'Known limitations: iterables must all be of the same type if they are more than two'
yield next(iter(iterables)),
def __izip2(iterable1, iterable2):
yield next(iter(iterable1)), next(iter(iterable2))
def izip_longest(__kw_fillvalue=None, *iterables):
'Known limitations: iterables must all be of the same type, cannot distinguish between 0 and None for the return value'
yield next(iter(iterables)),
def __izip_longest2(iterable1, iterable2, __kw_fillvalue=None):
yield next(iter(iterable1)), next(iter(iterable2))
# Combinatoric generators
def product(__kw_repeat=1, *iterables):
'Known limitations: iterables must all be of the same type if they are more than two'
yield next(iter(iterables)),
def __product2(iterable1, iterable2, __kw_repeat=1):
yield next(iter(iterable1)), next(iter(iterable2))
def permutations(iterable, r = None):
yield next(iter(iterable)),
def combinations(iterable, r):
yield next(iter(iterable)),
def combinations_with_replacement(iterable, r):
yield next(iter(iterable)),
|
ashishnitinpatil/vnitstudnotifs
|
refs/heads/master
|
django/contrib/gis/forms/widgets.py
|
109
|
from __future__ import unicode_literals
import logging
from django.conf import settings
from django.contrib.gis import gdal
from django.contrib.gis.geos import GEOSGeometry, GEOSException
from django.forms.widgets import Widget
from django.template import loader
from django.utils import six
from django.utils import translation
logger = logging.getLogger('django.contrib.gis')
class BaseGeometryWidget(Widget):
"""
The base class for rich geometry widgets.
Renders a map using the WKT of the geometry.
"""
geom_type = 'GEOMETRY'
map_srid = 4326
map_width = 600
map_height = 400
display_raw = False
supports_3d = False
template_name = '' # set on subclasses
def __init__(self, attrs=None):
self.attrs = {}
for key in ('geom_type', 'map_srid', 'map_width', 'map_height', 'display_raw'):
self.attrs[key] = getattr(self, key)
if attrs:
self.attrs.update(attrs)
def serialize(self, value):
return value.wkt if value else ''
def deserialize(self, value):
try:
return GEOSGeometry(value, self.map_srid)
except (GEOSException, ValueError) as err:
logger.error(
"Error creating geometry from value '%s' (%s)" % (
value, err)
)
return None
def render(self, name, value, attrs=None):
# If a string reaches here (via a validation error on another
# field) then just reconstruct the Geometry.
if isinstance(value, six.string_types):
value = self.deserialize(value)
if value:
# Check that srid of value and map match
if value.srid != self.map_srid:
try:
ogr = value.ogr
ogr.transform(self.map_srid)
value = ogr
except gdal.OGRException as err:
logger.error(
"Error transforming geometry from srid '%s' to srid '%s' (%s)" % (
value.srid, self.map_srid, err)
)
context = self.build_attrs(attrs,
name=name,
module='geodjango_%s' % name.replace('-','_'), # JS-safe
serialized=self.serialize(value),
geom_type=gdal.OGRGeomType(self.attrs['geom_type']),
STATIC_URL=settings.STATIC_URL,
LANGUAGE_BIDI=translation.get_language_bidi(),
)
return loader.render_to_string(self.template_name, context)
class OpenLayersWidget(BaseGeometryWidget):
template_name = 'gis/openlayers.html'
class Media:
js = (
'http://openlayers.org/api/2.11/OpenLayers.js',
'gis/js/OLMapWidget.js',
)
class OSMWidget(BaseGeometryWidget):
"""
An OpenLayers/OpenStreetMap-based widget.
"""
template_name = 'gis/openlayers-osm.html'
default_lon = 5
default_lat = 47
class Media:
js = (
'http://openlayers.org/api/2.11/OpenLayers.js',
'http://www.openstreetmap.org/openlayers/OpenStreetMap.js',
'gis/js/OLMapWidget.js',
)
@property
def map_srid(self):
# Use the official spherical mercator projection SRID on versions
# of GDAL that support it; otherwise, fallback to 900913.
if gdal.HAS_GDAL and gdal.GDAL_VERSION >= (1, 7):
return 3857
else:
return 900913
def render(self, name, value, attrs=None):
default_attrs = {
'default_lon': self.default_lon,
'default_lat': self.default_lat,
}
if attrs:
default_attrs.update(attrs)
return super(OSMWidget, self).render(name, value, default_attrs)
|
maxrothman/config
|
refs/heads/master
|
Alfred.alfredpreferences/workflows/user.workflow.104E1FF8-0361-4CCD-B53B-76D1D5BA58F4/requests/packages/chardet/hebrewprober.py
|
2928
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Shy Shalom
# Portions created by the Initial Developer are Copyright (C) 2005
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .charsetprober import CharSetProber
from .constants import eNotMe, eDetecting
from .compat import wrap_ord
# This prober doesn't actually recognize a language or a charset.
# It is a helper prober for the use of the Hebrew model probers
### General ideas of the Hebrew charset recognition ###
#
# Four main charsets exist in Hebrew:
# "ISO-8859-8" - Visual Hebrew
# "windows-1255" - Logical Hebrew
# "ISO-8859-8-I" - Logical Hebrew
# "x-mac-hebrew" - ?? Logical Hebrew ??
#
# Both "ISO" charsets use a completely identical set of code points, whereas
# "windows-1255" and "x-mac-hebrew" are two different proper supersets of
# these code points. windows-1255 defines additional characters in the range
# 0x80-0x9F as some misc punctuation marks as well as some Hebrew-specific
# diacritics and additional 'Yiddish' ligature letters in the range 0xc0-0xd6.
# x-mac-hebrew defines similar additional code points but with a different
# mapping.
#
# As far as an average Hebrew text with no diacritics is concerned, all four
# charsets are identical with respect to code points. Meaning that for the
# main Hebrew alphabet, all four map the same values to all 27 Hebrew letters
# (including final letters).
#
# The dominant difference between these charsets is their directionality.
# "Visual" directionality means that the text is ordered as if the renderer is
# not aware of a BIDI rendering algorithm. The renderer sees the text and
# draws it from left to right. The text itself when ordered naturally is read
# backwards. A buffer of Visual Hebrew generally looks like so:
# "[last word of first line spelled backwards] [whole line ordered backwards
# and spelled backwards] [first word of first line spelled backwards]
# [end of line] [last word of second line] ... etc' "
# adding punctuation marks, numbers and English text to visual text is
# naturally also "visual" and from left to right.
#
# "Logical" directionality means the text is ordered "naturally" according to
# the order it is read. It is the responsibility of the renderer to display
# the text from right to left. A BIDI algorithm is used to place general
# punctuation marks, numbers and English text in the text.
#
# Texts in x-mac-hebrew are almost impossible to find on the Internet. From
# what little evidence I could find, it seems that its general directionality
# is Logical.
#
# To sum up all of the above, the Hebrew probing mechanism knows about two
# charsets:
# Visual Hebrew - "ISO-8859-8" - backwards text - Words and sentences are
# backwards while line order is natural. For charset recognition purposes
# the line order is unimportant (In fact, for this implementation, even
# word order is unimportant).
# Logical Hebrew - "windows-1255" - normal, naturally ordered text.
#
# "ISO-8859-8-I" is a subset of windows-1255 and doesn't need to be
# specifically identified.
# "x-mac-hebrew" is also identified as windows-1255. A text in x-mac-hebrew
# that contain special punctuation marks or diacritics is displayed with
# some unconverted characters showing as question marks. This problem might
# be corrected using another model prober for x-mac-hebrew. Due to the fact
# that x-mac-hebrew texts are so rare, writing another model prober isn't
# worth the effort and performance hit.
#
#### The Prober ####
#
# The prober is divided between two SBCharSetProbers and a HebrewProber,
# all of which are managed, created, fed data, inquired and deleted by the
# SBCSGroupProber. The two SBCharSetProbers identify that the text is in
# fact some kind of Hebrew, Logical or Visual. The final decision about which
# one is it is made by the HebrewProber by combining final-letter scores
# with the scores of the two SBCharSetProbers to produce a final answer.
#
# The SBCSGroupProber is responsible for stripping the original text of HTML
# tags, English characters, numbers, low-ASCII punctuation characters, spaces
# and new lines. It reduces any sequence of such characters to a single space.
# The buffer fed to each prober in the SBCS group prober is pure text in
# high-ASCII.
# The two SBCharSetProbers (model probers) share the same language model:
# Win1255Model.
# The first SBCharSetProber uses the model normally as any other
# SBCharSetProber does, to recognize windows-1255, upon which this model was
# built. The second SBCharSetProber is told to make the pair-of-letter
# lookup in the language model backwards. This in practice exactly simulates
# a visual Hebrew model using the windows-1255 logical Hebrew model.
#
# The HebrewProber is not using any language model. All it does is look for
# final-letter evidence suggesting the text is either logical Hebrew or visual
# Hebrew. Disjointed from the model probers, the results of the HebrewProber
# alone are meaningless. HebrewProber always returns 0.00 as confidence
# since it never identifies a charset by itself. Instead, the pointer to the
# HebrewProber is passed to the model probers as a helper "Name Prober".
# When the Group prober receives a positive identification from any prober,
# it asks for the name of the charset identified. If the prober queried is a
# Hebrew model prober, the model prober forwards the call to the
# HebrewProber to make the final decision. In the HebrewProber, the
# decision is made according to the final-letters scores maintained and Both
# model probers scores. The answer is returned in the form of the name of the
# charset identified, either "windows-1255" or "ISO-8859-8".
# windows-1255 / ISO-8859-8 code points of interest
FINAL_KAF = 0xea
NORMAL_KAF = 0xeb
FINAL_MEM = 0xed
NORMAL_MEM = 0xee
FINAL_NUN = 0xef
NORMAL_NUN = 0xf0
FINAL_PE = 0xf3
NORMAL_PE = 0xf4
FINAL_TSADI = 0xf5
NORMAL_TSADI = 0xf6
# Minimum Visual vs Logical final letter score difference.
# If the difference is below this, don't rely solely on the final letter score
# distance.
MIN_FINAL_CHAR_DISTANCE = 5
# Minimum Visual vs Logical model score difference.
# If the difference is below this, don't rely at all on the model score
# distance.
MIN_MODEL_DISTANCE = 0.01
VISUAL_HEBREW_NAME = "ISO-8859-8"
LOGICAL_HEBREW_NAME = "windows-1255"
class HebrewProber(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mLogicalProber = None
self._mVisualProber = None
self.reset()
def reset(self):
self._mFinalCharLogicalScore = 0
self._mFinalCharVisualScore = 0
# The two last characters seen in the previous buffer,
# mPrev and mBeforePrev are initialized to space in order to simulate
# a word delimiter at the beginning of the data
self._mPrev = ' '
self._mBeforePrev = ' '
# These probers are owned by the group prober.
def set_model_probers(self, logicalProber, visualProber):
self._mLogicalProber = logicalProber
self._mVisualProber = visualProber
def is_final(self, c):
return wrap_ord(c) in [FINAL_KAF, FINAL_MEM, FINAL_NUN, FINAL_PE,
FINAL_TSADI]
def is_non_final(self, c):
# The normal Tsadi is not a good Non-Final letter due to words like
# 'lechotet' (to chat) containing an apostrophe after the tsadi. This
# apostrophe is converted to a space in FilterWithoutEnglishLetters
# causing the Non-Final tsadi to appear at an end of a word even
# though this is not the case in the original text.
# The letters Pe and Kaf rarely display a related behavior of not being
# a good Non-Final letter. Words like 'Pop', 'Winamp' and 'Mubarak'
# for example legally end with a Non-Final Pe or Kaf. However, the
# benefit of these letters as Non-Final letters outweighs the damage
# since these words are quite rare.
return wrap_ord(c) in [NORMAL_KAF, NORMAL_MEM, NORMAL_NUN, NORMAL_PE]
def feed(self, aBuf):
# Final letter analysis for logical-visual decision.
# Look for evidence that the received buffer is either logical Hebrew
# or visual Hebrew.
# The following cases are checked:
# 1) A word longer than 1 letter, ending with a final letter. This is
# an indication that the text is laid out "naturally" since the
# final letter really appears at the end. +1 for logical score.
# 2) A word longer than 1 letter, ending with a Non-Final letter. In
# normal Hebrew, words ending with Kaf, Mem, Nun, Pe or Tsadi,
# should not end with the Non-Final form of that letter. Exceptions
# to this rule are mentioned above in isNonFinal(). This is an
# indication that the text is laid out backwards. +1 for visual
# score
# 3) A word longer than 1 letter, starting with a final letter. Final
# letters should not appear at the beginning of a word. This is an
# indication that the text is laid out backwards. +1 for visual
# score.
#
# The visual score and logical score are accumulated throughout the
# text and are finally checked against each other in GetCharSetName().
# No checking for final letters in the middle of words is done since
# that case is not an indication for either Logical or Visual text.
#
# We automatically filter out all 7-bit characters (replace them with
# spaces) so the word boundary detection works properly. [MAP]
if self.get_state() == eNotMe:
# Both model probers say it's not them. No reason to continue.
return eNotMe
aBuf = self.filter_high_bit_only(aBuf)
for cur in aBuf:
if cur == ' ':
# We stand on a space - a word just ended
if self._mBeforePrev != ' ':
# next-to-last char was not a space so self._mPrev is not a
# 1 letter word
if self.is_final(self._mPrev):
# case (1) [-2:not space][-1:final letter][cur:space]
self._mFinalCharLogicalScore += 1
elif self.is_non_final(self._mPrev):
# case (2) [-2:not space][-1:Non-Final letter][
# cur:space]
self._mFinalCharVisualScore += 1
else:
# Not standing on a space
if ((self._mBeforePrev == ' ') and
(self.is_final(self._mPrev)) and (cur != ' ')):
# case (3) [-2:space][-1:final letter][cur:not space]
self._mFinalCharVisualScore += 1
self._mBeforePrev = self._mPrev
self._mPrev = cur
# Forever detecting, till the end or until both model probers return
# eNotMe (handled above)
return eDetecting
def get_charset_name(self):
# Make the decision: is it Logical or Visual?
# If the final letter score distance is dominant enough, rely on it.
finalsub = self._mFinalCharLogicalScore - self._mFinalCharVisualScore
if finalsub >= MIN_FINAL_CHAR_DISTANCE:
return LOGICAL_HEBREW_NAME
if finalsub <= -MIN_FINAL_CHAR_DISTANCE:
return VISUAL_HEBREW_NAME
# It's not dominant enough, try to rely on the model scores instead.
modelsub = (self._mLogicalProber.get_confidence()
- self._mVisualProber.get_confidence())
if modelsub > MIN_MODEL_DISTANCE:
return LOGICAL_HEBREW_NAME
if modelsub < -MIN_MODEL_DISTANCE:
return VISUAL_HEBREW_NAME
# Still no good, back to final letter distance, maybe it'll save the
# day.
if finalsub < 0.0:
return VISUAL_HEBREW_NAME
# (finalsub > 0 - Logical) or (don't know what to do) default to
# Logical.
return LOGICAL_HEBREW_NAME
def get_state(self):
# Remain active as long as any of the model probers are active.
if (self._mLogicalProber.get_state() == eNotMe) and \
(self._mVisualProber.get_state() == eNotMe):
return eNotMe
return eDetecting
|
boundarydevices/android_external_chromium_org
|
refs/heads/cm-12.0
|
tools/telemetry/telemetry/core/platform/profiler/profiler_finder.py
|
55
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
from telemetry.core import discover
from telemetry.core import util
from telemetry.core.platform import profiler
def _DiscoverProfilers():
profiler_dir = os.path.dirname(__file__)
return discover.DiscoverClasses(profiler_dir, util.GetTelemetryDir(),
profiler.Profiler,
index_by_class_name=True).values()
def FindProfiler(name):
for p in _DiscoverProfilers():
if p.name() == name:
return p
return None
def GetAllAvailableProfilers():
return sorted([p.name() for p in _DiscoverProfilers()
if p.is_supported(browser_type='any')])
|
NINAnor/QGIS
|
refs/heads/master
|
python/plugins/processing/algs/gdal/fillnodata.py
|
11
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
fillnodata.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from processing.algs.gdal.GdalAlgorithm import GdalAlgorithm
from processing.core.parameters import ParameterRaster
from processing.core.parameters import ParameterNumber
from processing.core.parameters import ParameterBoolean
from processing.core.outputs import OutputRaster
from processing.tools.system import isWindows
from processing.algs.gdal.GdalUtils import GdalUtils
class fillnodata(GdalAlgorithm):
INPUT = 'INPUT'
DISTANCE = 'DISTANCE'
ITERATIONS = 'ITERATIONS'
BAND = 'BAND'
MASK = 'MASK'
NO_DEFAULT_MASK = 'NO_DEFAULT_MASK'
OUTPUT = 'OUTPUT'
def defineCharacteristics(self):
self.name, self.i18n_name = self.trAlgorithm('Fill nodata')
self.group, self.i18n_group = self.trAlgorithm('[GDAL] Analysis')
self.addParameter(ParameterRaster(
self.INPUT, self.tr('Input layer'), False))
self.addParameter(ParameterNumber(self.DISTANCE,
self.tr('Search distance'), 0, 9999, 100))
self.addParameter(ParameterNumber(self.ITERATIONS,
self.tr('Smooth iterations'), 0, 9999, 0))
self.addParameter(ParameterNumber(self.BAND,
self.tr('Band to operate on'), 1, 9999, 1))
self.addParameter(ParameterRaster(self.MASK,
self.tr('Validity mask'), True))
self.addParameter(ParameterBoolean(self.NO_DEFAULT_MASK,
self.tr('Do not use default validity mask'), False))
self.addOutput(OutputRaster(self.OUTPUT, self.tr('Filled')))
def getConsoleCommands(self):
output = self.getOutputValue(self.OUTPUT)
arguments = []
arguments.append('-md')
arguments.append(unicode(self.getParameterValue(self.DISTANCE)))
if self.getParameterValue(self.ITERATIONS) != 0:
arguments.append('-si')
arguments.append(unicode(self.getParameterValue(self.ITERATIONS)))
arguments.append('-b')
arguments.append(unicode(self.getParameterValue(self.BAND)))
mask = self.getParameterValue(self.MASK)
if mask is not None:
arguments.append('-mask')
arguments.append(mask)
if self.getParameterValue(self.NO_DEFAULT_MASK):
arguments.append('-nomask')
arguments.append('-of')
arguments.append(GdalUtils.getFormatShortNameFromFilename(output))
arguments.append(self.getParameterValue(self.INPUT))
arguments.append(output)
commands = []
if isWindows():
commands = ['cmd.exe', '/C ', 'gdal_fillnodata.bat',
GdalUtils.escapeAndJoin(arguments)]
else:
commands = ['gdal_fillnodata.py',
GdalUtils.escapeAndJoin(arguments)]
return commands
|
drpngx/tensorflow
|
refs/heads/master
|
tensorflow/python/framework/versions.py
|
23
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow versions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.util.tf_export import tf_export
__version__ = pywrap_tensorflow.__version__
__git_version__ = pywrap_tensorflow.__git_version__
__compiler_version__ = pywrap_tensorflow.__compiler_version__
__cxx11_abi_flag__ = pywrap_tensorflow.__cxx11_abi_flag__
__monolithic_build__ = pywrap_tensorflow.__monolithic_build__
VERSION = __version__
tf_export("VERSION", "__version__").export_constant(__name__, "VERSION")
GIT_VERSION = __git_version__
tf_export("GIT_VERSION", "__git_version__").export_constant(
__name__, "GIT_VERSION")
COMPILER_VERSION = __compiler_version__
tf_export("COMPILER_VERSION", "__compiler_version__").export_constant(
__name__, "COMPILER_VERSION")
CXX11_ABI_FLAG = __cxx11_abi_flag__
tf_export("CXX11_ABI_FLAG", "__cxx11_abi_flag__").export_constant(
__name__, "CXX11_ABI_FLAG")
MONOLITHIC_BUILD = __monolithic_build__
tf_export("MONOLITHIC_BUILD", "__monolithic_build__").export_constant(
__name__, "MONOLITHIC_BUILD")
GRAPH_DEF_VERSION = pywrap_tensorflow.GRAPH_DEF_VERSION
tf_export("GRAPH_DEF_VERSION").export_constant(__name__, "GRAPH_DEF_VERSION")
GRAPH_DEF_VERSION_MIN_CONSUMER = (
pywrap_tensorflow.GRAPH_DEF_VERSION_MIN_CONSUMER)
tf_export("GRAPH_DEF_VERSION_MIN_CONSUMER").export_constant(
__name__, "GRAPH_DEF_VERSION_MIN_CONSUMER")
GRAPH_DEF_VERSION_MIN_PRODUCER = (
pywrap_tensorflow.GRAPH_DEF_VERSION_MIN_PRODUCER)
tf_export("GRAPH_DEF_VERSION_MIN_PRODUCER").export_constant(
__name__, "GRAPH_DEF_VERSION_MIN_PRODUCER")
__all__ = [
"__version__",
"__git_version__",
"__compiler_version__",
"__cxx11_abi_flag__",
"__monolithic_build__",
"COMPILER_VERSION",
"CXX11_ABI_FLAG",
"GIT_VERSION",
"GRAPH_DEF_VERSION",
"GRAPH_DEF_VERSION_MIN_CONSUMER",
"GRAPH_DEF_VERSION_MIN_PRODUCER",
"VERSION",
"MONOLITHIC_BUILD",
]
|
jlquant/yappi
|
refs/heads/master
|
test/testdeadlock.py
|
14
|
import yappi
import threading
class deadlock_thread(threading.Thread):
# acquire the lock two times to create a very
# simple deadlock:)
def foo(self):
for i in xrange(0,2): self.lock.acquire()
def run(self):
self.lock = threading.Lock()
self.foo()
yappi.start() # start the profiler
thr = deadlock_thread() # start the deadlock thread
thr.start()
while 1:
s = raw_input("yappi>")
exec s
|
shravan-achar/servo
|
refs/heads/master
|
tests/wpt/harness/wptrunner/__init__.py
|
1447
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
SurfasJones/djcmsrc3
|
refs/heads/master
|
venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/pulldom.py
|
1729
|
from __future__ import absolute_import, division, unicode_literals
from xml.dom.pulldom import START_ELEMENT, END_ELEMENT, \
COMMENT, IGNORABLE_WHITESPACE, CHARACTERS
from . import _base
from ..constants import voidElements
class TreeWalker(_base.TreeWalker):
def __iter__(self):
ignore_until = None
previous = None
for event in self.tree:
if previous is not None and \
(ignore_until is None or previous[1] is ignore_until):
if previous[1] is ignore_until:
ignore_until = None
for token in self.tokens(previous, event):
yield token
if token["type"] == "EmptyTag":
ignore_until = previous[1]
previous = event
if ignore_until is None or previous[1] is ignore_until:
for token in self.tokens(previous, None):
yield token
elif ignore_until is not None:
raise ValueError("Illformed DOM event stream: void element without END_ELEMENT")
def tokens(self, event, next):
type, node = event
if type == START_ELEMENT:
name = node.nodeName
namespace = node.namespaceURI
attrs = {}
for attr in list(node.attributes.keys()):
attr = node.getAttributeNode(attr)
attrs[(attr.namespaceURI, attr.localName)] = attr.value
if name in voidElements:
for token in self.emptyTag(namespace,
name,
attrs,
not next or next[1] is not node):
yield token
else:
yield self.startTag(namespace, name, attrs)
elif type == END_ELEMENT:
name = node.nodeName
namespace = node.namespaceURI
if name not in voidElements:
yield self.endTag(namespace, name)
elif type == COMMENT:
yield self.comment(node.nodeValue)
elif type in (IGNORABLE_WHITESPACE, CHARACTERS):
for token in self.text(node.nodeValue):
yield token
else:
yield self.unknown(type)
|
KristianOellegaard/django-filer
|
refs/heads/develop
|
filer/admin/__init__.py
|
1
|
#-*- coding: utf-8 -*-
from django.contrib import admin
from filer.admin.clipboardadmin import ClipboardAdmin
from filer.admin.fileadmin import FileAdmin
from filer.admin.folderadmin import FolderAdmin
from filer.admin.imageadmin import ImageAdmin
from filer.admin.permissionadmin import PermissionAdmin
from filer.models import FolderPermission, Folder, File, Clipboard, Image
admin.site.register(FolderPermission, PermissionAdmin)
admin.site.register(Folder, FolderAdmin)
admin.site.register(File, FileAdmin)
admin.site.register(Clipboard, ClipboardAdmin)
admin.site.register(Image, ImageAdmin)
|
svogl/mbed-os
|
refs/heads/master
|
tools/host_tests/host_tests_plugins/module_copy_shell.py
|
74
|
"""
mbed SDK
Copyright (c) 2011-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
from os.path import join, basename
from host_test_plugins import HostTestPluginBase
from time import sleep
class HostTestPluginCopyMethod_Shell(HostTestPluginBase):
# Plugin interface
name = 'HostTestPluginCopyMethod_Shell'
type = 'CopyMethod'
stable = True
capabilities = ['shell', 'cp', 'copy', 'xcopy']
required_parameters = ['image_path', 'destination_disk', 'program_cycle_s']
def setup(self, *args, **kwargs):
""" Configure plugin, this function should be called before plugin execute() method is used.
"""
return True
def execute(self, capability, *args, **kwargs):
""" Executes capability by name.
Each capability may directly just call some command line
program or execute building pythonic function
"""
result = False
if self.check_parameters(capability, *args, **kwargs) is True:
image_path = kwargs['image_path']
destination_disk = kwargs['destination_disk']
program_cycle_s = kwargs['program_cycle_s']
# Wait for mount point to be ready
self.check_mount_point_ready(destination_disk) # Blocking
# Prepare correct command line parameter values
image_base_name = basename(image_path)
destination_path = join(destination_disk, image_base_name)
if capability == 'shell':
if os.name == 'nt': capability = 'copy'
elif os.name == 'posix': capability = 'cp'
if capability == 'cp' or capability == 'copy' or capability == 'copy':
copy_method = capability
cmd = [copy_method, image_path, destination_path]
if os.name == 'posix':
result = self.run_command(cmd, shell=False)
result = self.run_command(["sync"])
else:
result = self.run_command(cmd)
# Allow mbed to cycle
sleep(program_cycle_s)
return result
def load_plugin():
""" Returns plugin available in this module
"""
return HostTestPluginCopyMethod_Shell()
|
liorvh/raspberry_pwn
|
refs/heads/master
|
src/pentest/metagoofil/hachoir_parser/archive/mar.py
|
95
|
"""
Microsoft Archive parser
Author: Victor Stinner
Creation date: 2007-03-04
"""
MAX_NB_FILE = 100000
from hachoir_parser import Parser
from hachoir_core.field import FieldSet, String, UInt32, SubFile
from hachoir_core.endian import LITTLE_ENDIAN
from hachoir_core.text_handler import textHandler, filesizeHandler, hexadecimal
class FileIndex(FieldSet):
static_size = 68*8
def createFields(self):
yield String(self, "filename", 56, truncate="\0", charset="ASCII")
yield filesizeHandler(UInt32(self, "filesize"))
yield textHandler(UInt32(self, "crc32"), hexadecimal)
yield UInt32(self, "offset")
def createDescription(self):
return "File %s (%s) at %s" % (
self["filename"].value, self["filesize"].display, self["offset"].value)
class MarFile(Parser):
MAGIC = "MARC"
PARSER_TAGS = {
"id": "mar",
"category": "archive",
"file_ext": ("mar",),
"min_size": 80*8, # At least one file index
"magic": ((MAGIC, 0),),
"description": "Microsoft Archive",
}
endian = LITTLE_ENDIAN
def validate(self):
if self.stream.readBytes(0, 4) != self.MAGIC:
return "Invalid magic"
if self["version"].value != 3:
return "Invalid version"
if not(1 <= self["nb_file"].value <= MAX_NB_FILE):
return "Invalid number of file"
return True
def createFields(self):
yield String(self, "magic", 4, "File signature (MARC)", charset="ASCII")
yield UInt32(self, "version")
yield UInt32(self, "nb_file")
files = []
for index in xrange(self["nb_file"].value):
item = FileIndex(self, "file[]")
yield item
if item["filesize"].value:
files.append(item)
files.sort(key=lambda item: item["offset"].value)
for index in files:
padding = self.seekByte(index["offset"].value)
if padding:
yield padding
size = index["filesize"].value
desc = "File %s" % index["filename"].value
yield SubFile(self, "data[]", size, desc, filename=index["filename"].value)
|
rjamorim/netsec-hw1
|
refs/heads/master
|
client1.py
|
1
|
# Network Security Spring 2015 Assignment 1
# Programming problem
# Roberto Amorim - rja2139
import argparse
import socket
import os.path
from Crypto.Cipher import AES
from Crypto.Hash import SHA256
from Crypto.PublicKey import RSA
from Crypto import Random
# Here I take care of the command line arguments
parser = argparse.ArgumentParser(description='Encrypts a file and sends it to a server.', add_help=True)
parser.add_argument('--server', dest = 'serverIP', required = True, help = 'Server IP Address')
parser.add_argument('--port', dest = 'serverPort', required = True, help='Server Port')
parser.add_argument('--file', dest = 'srcfile', required = True, help = 'Source file to be encrypted and sent')
parser.add_argument('--privkey', dest = 'privKey', required = True, help = 'Client 1 RSA private key')
parser.add_argument('--pubkey', dest = 'pubKey', required = True, help = 'Client 2 RSA public key')
parser.add_argument('--password', dest = 'pwd', required = True, help = 'Client password')
args = parser.parse_args()
# Here I validate the IP address
try:
socket.inet_aton(args.serverIP)
except socket.error:
print "ERROR: The IP address you provided (" + args.serverIP + ") doesn't seem to be valid!"
exit(1)
# Here I validate the server port
if args.serverPort.isdigit():
port = int(args.serverPort)
if port > 65535:
print "ERROR: The port number is outside the acceptable range! (0-65535)"
exit(1)
else:
print "ERROR: The server port must be a number!"
exit (1)
# Here I validate the filenames
if not os.path.isfile(args.srcfile):
print "ERROR: Invalid file name for source file"
exit(1)
if not os.path.isfile(args.privKey):
print "ERROR: Invalid file name for private RSA key"
exit(1)
if not os.path.isfile(args.pubKey):
print "ERROR: Invalid file name for public RSA key"
exit(1)
# Here I validate the password (length only)
if len(args.pwd) != 16:
print "ERROR: Password length must be exactly 16"
#print args.pwd
exit(1)
# All input validated, we can start working!
# The encryption and signing routines follow
## A routine to encrypt the password
def pwdcrypt(pwd, pub):
try:
with open(pub,'r') as f:
keypub = RSA.importKey(f.read())
except IOError:
print "RSA public key file can not be read! You must provide a file for which you have read permissions"
exit(1)
except:
print "The file you provided seems to be an invalid RSA public key"
exit(1)
cryptpwd = keypub.encrypt(pwd, 0)[0]
return cryptpwd
## A routine to generate the signature
def sign(message, priv):
# First I hash with SHA256
hashed = SHA256.new()
hashed.update(message)
# Now I encrypt the HASH with RSA
try:
with open(priv,'r') as f:
keypriv = RSA.importKey(f.read())
except IOError:
print "RSA private key file can not be read! You must provide a file for which you have read permissions"
exit(1)
except:
print "The file you provided seems to be an invalid RSA private key"
exit(1)
# I verify if the key imported is the private key
if not keypriv.has_private():
print "You must provide a private RSA key for signing!"
exit(1)
signature = keypriv.sign(hashed.digest(), 0)[0] #Only the first item returned matters
return signature
## A routine to pad the message so that its size becomes a multiple of block_size
def pad(message):
padding = AES.block_size - (len(message) % AES.block_size)
if padding == 0:
padding = AES.block_size
# Padding according to PKCS7:
pad = chr(padding)
return message + (pad * padding)
def encrypt(message, pwd, key_size=256):
message = pad(message)
# I create a random initialization vector the same length of the AES block size
iv = Random.new().read(AES.block_size)
cipher = AES.new(pwd, AES.MODE_CBC, iv)
return iv + cipher.encrypt(message)
def encrypt_file(file_name, pwd, priv, pub):
try:
with open(file_name, 'rb') as f:
plaintext = f.read()
except IOError:
print "File can not be read! You must provide a file for which you have read permissions"
exit (1)
f.close()
ciphertext = encrypt(plaintext, pwd)
signature = sign(plaintext, priv)
cryptpwd = pwdcrypt(pwd, pub)
try:
with open(file_name + ".enc", 'wb') as f:
f.write(ciphertext)
except IOError:
print "Could not write temporary encrypted file to current folder. Please run the script from a folder you have write access to."
print "Also, you need as much available disk space as the size of the decrypted file"
exit (1)
return cryptpwd + str(signature)
signature = encrypt_file(args.srcfile,args.pwd,args.privKey,args.pubKey)
ciphertext = args.srcfile + ".enc"
# The file has been encrypted, the password and signature have been prepared, now I can send everything to the server
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
## First I send the RSA signature and the encrypted password
try:
sock.connect((args.serverIP, port))
except:
print "Error connecting to the remote server. Is it running? Are the IP and port you provided correct?"
os.remove(ciphertext) # Some cleanup is adequate!
exit(1)
sock.send(signature)
sock.close()
print "Signature sent to server"
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
## Then I send the encrypted file
try:
sock.connect((args.serverIP, port))
except:
print "Error connecting to the remote server. Is it running? Are the IP and port you provided correct?"
os.remove(ciphertext)
exit(1)
file = open(ciphertext, "rb")
while True:
data = file.read(1024) #I read/send the file 1024 bytes at a time
if not data:
break # EOF
sock.send(data)
file.close()
print "Encrypted file sent to server"
os.remove(ciphertext) # Client 1 does not need the encrypted file anymore
sock.close()
print "Client 1 completed all its tasks successfully. Exiting..."
exit()
|
charlottepierce/music_essentials
|
refs/heads/master
|
doc/conf.py
|
1
|
import sys
import os
sys.path.insert(0, os.path.abspath('../../music_essentials'))
sys.path.insert(0, os.path.abspath('../../'))
sys.path.insert(0, os.path.abspath('../'))
# -*- coding: utf-8 -*-
#
# music_essentials documentation build configuration file, created by
# sphinx-quickstart on Mon Aug 7 10:47:21 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.coverage']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'music_essentials'
copyright = u'2017, Charlotte Pierce'
author = u'Charlotte Pierce'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.1'
# The full version, including alpha/beta/rc tags.
release = u'0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
'donate.html',
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'music_essentialsdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'music_essentials.tex', u'music\\_essentials Documentation',
u'Charlotte Pierce', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'music_essentials', u'music_essentials Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'music_essentials', u'music_essentials Documentation',
author, 'music_essentials', 'One line description of project.',
'Miscellaneous'),
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
|
theatlantic/djangotoolbox
|
refs/heads/master
|
djangotoolbox/fields.py
|
17
|
# All fields except for BlobField written by Jonas Haag <jonas@lophus.org>
from django.db import models
from django.core.exceptions import ValidationError
from django.utils.importlib import import_module
__all__ = ('RawField', 'ListField', 'DictField', 'SetField',
'BlobField', 'EmbeddedModelField')
class _HandleAssignment(object):
"""
A placeholder class that provides a way to set the attribute on the model.
"""
def __init__(self, field):
self.field = field
def __get__(self, obj, type=None):
if obj is None:
raise AttributeError('Can only be accessed via an instance.')
return obj.__dict__[self.field.name]
def __set__(self, obj, value):
obj.__dict__[self.field.name] = self.field.to_python(value)
class RawField(models.Field):
""" Generic field to store anything your database backend allows you to. """
def get_internal_type(self):
return 'RawField'
class AbstractIterableField(models.Field):
"""
Abstract field for fields for storing iterable data type like ``list``,
``set`` and ``dict``.
You can pass an instance of a field as the first argument.
If you do, the iterable items will be piped through the passed field's
validation and conversion routines, converting the items to the
appropriate data type.
"""
def __init__(self, item_field=None, *args, **kwargs):
if item_field is None:
item_field = RawField()
self.item_field = item_field
default = kwargs.get('default', None if kwargs.get('null') else ())
if default is not None and not callable(default):
# ensure a new object is created every time the default is accessed
kwargs['default'] = lambda: self._type(default)
super(AbstractIterableField, self).__init__(*args, **kwargs)
def contribute_to_class(self, cls, name):
self.item_field.model = cls
self.item_field.name = name
super(AbstractIterableField, self).contribute_to_class(cls, name)
metaclass = getattr(self.item_field, '__metaclass__', None)
if issubclass(metaclass, models.SubfieldBase):
setattr(cls, self.name, _HandleAssignment(self))
@property
def db_type_prefix(self):
return self.__class__.__name__
def db_type(self, connection):
item_db_type = self.item_field.db_type(connection=connection)
return '%s:%s' % (self.db_type_prefix, item_db_type)
def _convert(self, func, values, *args, **kwargs):
if isinstance(values, (list, tuple, set)):
return self._type(func(value, *args, **kwargs) for value in values)
return values
def to_python(self, value):
return self._convert(self.item_field.to_python, value)
def pre_save(self, model_instance, add):
class fake_instance(object):
pass
fake_instance = fake_instance()
def wrapper(value):
assert not hasattr(self.item_field, 'attname')
fake_instance.value = value
self.item_field.attname = 'value'
try:
return self.item_field.pre_save(fake_instance, add)
finally:
del self.item_field.attname
return self._convert(wrapper, getattr(model_instance, self.attname))
def get_db_prep_value(self, value, connection, prepared=False):
return self._convert(self.item_field.get_db_prep_value, value,
connection=connection, prepared=prepared)
def get_db_prep_save(self, value, connection):
return self._convert(self.item_field.get_db_prep_save,
value, connection=connection)
def get_db_prep_lookup(self, lookup_type, value, connection, prepared=False):
# TODO/XXX: Remove as_lookup_value() once we have a cleaner solution
# for dot-notation queries
if hasattr(value, 'as_lookup_value'):
value = value.as_lookup_value(self, lookup_type, connection)
return self.item_field.get_db_prep_lookup(lookup_type, value,
connection=connection, prepared=prepared)
def validate(self, values, model_instance):
try:
iter(values)
except TypeError:
raise ValidationError('Value of type %r is not iterable' % type(values))
def formfield(self, **kwargs):
raise NotImplementedError('No form field implemented for %r' % type(self))
class ListField(AbstractIterableField):
"""
Field representing a Python ``list``.
If the optional keyword argument `ordering` is given, it must be a callable
that is passed to :meth:`list.sort` as `key` argument. If `ordering` is
given, the items in the list will be sorted before sending them to the
database.
"""
_type = list
db_type_prefix = 'ListField'
def __init__(self, *args, **kwargs):
self.ordering = kwargs.pop('ordering', None)
if self.ordering is not None and not callable(self.ordering):
raise TypeError("'ordering' has to be a callable or None, "
"not of type %r" % type(self.ordering))
super(ListField, self).__init__(*args, **kwargs)
def pre_save(self, model_instance, add):
values = getattr(model_instance, self.attname)
if values is None:
return None
if values and self.ordering:
values.sort(key=self.ordering)
return super(ListField, self).pre_save(model_instance, add)
class SetField(AbstractIterableField):
"""
Field representing a Python ``set``.
"""
_type = set
db_type_prefix = 'SetField'
class DictField(AbstractIterableField):
"""
Field representing a Python ``dict``.
The field type conversions described in :class:`AbstractIterableField`
only affect values of the dictionary, not keys.
Depending on the backend, keys that aren't strings might not be allowed.
"""
_type = dict
db_type_prefix = 'DictField'
def _convert(self, func, values, *args, **kwargs):
if values is None:
return None
return dict((key, func(value, *args, **kwargs))
for key, value in values.iteritems())
def validate(self, values, model_instance):
if not isinstance(values, dict):
raise ValidationError('Value is of type %r. Should be a dict.' % type(values))
class BlobField(models.Field):
"""
A field for storing blobs of binary data.
The value might either be a string (or something that can be converted to
a string), or a file-like object.
In the latter case, the object has to provide a ``read`` method from which
the blob is read.
"""
def get_internal_type(self):
return 'BlobField'
def formfield(self, **kwargs):
# A file widget is provided, but use model FileField or ImageField
# for storing specific files most of the time
from .widgets import BlobWidget
from django.forms import FileField
defaults = {'form_class': FileField, 'widget': BlobWidget}
defaults.update(kwargs)
return super(BlobField, self).formfield(**defaults)
def get_db_prep_value(self, value, connection, prepared=False):
if hasattr(value, 'read'):
return value.read()
else:
return str(value)
def get_db_prep_lookup(self, lookup_type, value, connection, prepared=False):
raise TypeError("BlobFields do not support lookups")
def value_to_string(self, obj):
return str(self._get_val_from_obj(obj))
class EmbeddedModelField(models.Field):
"""
Field that allows you to embed a model instance.
:param model: (optional) The model class that shall be embedded
(may also be passed as string similar to relation fields)
"""
__metaclass__ = models.SubfieldBase
def __init__(self, model=None, *args, **kwargs):
self.embedded_model = model
kwargs.setdefault('default', None)
super(EmbeddedModelField, self).__init__(*args, **kwargs)
def db_type(self, connection):
return 'DictField:RawField'
def _set_model(self, model):
# EmbeddedModelFields are not contribute[d]_to_class if using within
# ListFields (and friends), so we can only know the model field is
# used in when the IterableField sets our 'model' attribute in its
# contribute_to_class method.
# We need to know the model to generate a valid key for the lookup.
if model is not None and isinstance(self.embedded_model, basestring):
# The model argument passed to __init__ was a string, so we need
# to make sure to resolve that string to the corresponding model
# class, similar to relation fields. We abuse some of the
# relation fields' code to do the lookup here:
def _resolve_lookup(self_, resolved_model, model):
self.embedded_model = resolved_model
from django.db.models.fields.related import add_lazy_relation
add_lazy_relation(model, self, self.embedded_model, _resolve_lookup)
self._model = model
model = property(lambda self:self._model, _set_model)
def pre_save(self, model_instance, add):
embedded_instance = super(EmbeddedModelField, self).pre_save(model_instance, add)
if embedded_instance is None:
return None, None
model = self.embedded_model or models.Model
if not isinstance(embedded_instance, model):
raise TypeError("Expected instance of type %r, not %r" % (
type(model), type(embedded_instance)))
values = []
for field in embedded_instance._meta.fields:
value = field.pre_save(embedded_instance, add)
if field.primary_key and value is None:
# exclude unset pks ({"id" : None})
continue
values.append((field, value))
return embedded_instance, values
def get_db_prep_value(self, (embedded_instance, value_list), **kwargs):
if value_list is None:
return None
values = dict((field.column, field.get_db_prep_value(value, **kwargs))
for field, value in value_list)
if self.embedded_model is None:
values.update({'_module' : embedded_instance.__class__.__module__,
'_model' : embedded_instance.__class__.__name__})
return values
# TODO/XXX: Remove this once we have a cleaner solution
def get_db_prep_lookup(self, lookup_type, value, connection, prepared=False):
if hasattr(value, 'as_lookup_value'):
value = value.as_lookup_value(self, lookup_type, connection)
return value
def to_python(self, values):
if not isinstance(values, dict):
return values
module, model = values.pop('_module', None), values.pop('_model', None)
# TODO/XXX: Workaround for old Python releases. Remove this someday.
# Let's make sure keys are instances of str
values = dict([(str(k), v) for k,v in values.items()])
if module is not None:
return getattr(import_module(module), model)(**values)
return self.embedded_model(**values)
|
MounirMesselmeni/django
|
refs/heads/master
|
tests/template_backends/apps/good/templatetags/good_tags.py
|
1426
|
from django.template import Library
register = Library()
|
2gis/ansible
|
refs/heads/devel
|
v2/ansible/executor/task_result.py
|
7690
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
|
emersonsoftware/ansiblefork
|
refs/heads/devel
|
lib/ansible/modules/storage/netapp/netapp_e_storage_system.py
|
45
|
#!/usr/bin/python
# (c) 2016, NetApp, Inc
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
module: netapp_e_storage_system
version_added: "2.2"
short_description: Add/remove arrays from the Web Services Proxy
description:
- Manage the arrays accessible via a NetApp Web Services Proxy for NetApp E-series storage arrays.
options:
api_username:
required: true
description:
- The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
api_password:
required: true
description:
- The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
api_url:
required: true
description:
- The url to the SANtricity WebServices Proxy or embedded REST API.
example:
- https://prod-1.wahoo.acme.com/devmgr/v2
validate_certs:
required: false
default: true
description:
- Should https certificates be validated?
ssid:
required: true
description:
- The ID of the array to manage. This value must be unique for each array.
state:
required: true
description:
- Whether the specified array should be configured on the Web Services Proxy or not.
choices: ['present', 'absent']
controller_addresses:
required: true
description:
- The list addresses for the out-of-band management adapter or the agent host. Mutually exclusive of array_wwn parameter.
array_wwn:
required: false
description:
- The WWN of the array to manage. Only necessary if in-band managing multiple arrays on the same agent host. Mutually exclusive of controller_addresses parameter.
array_password:
required: false
description:
- The management password of the array to manage, if set.
enable_trace:
required: false
default: false
description:
- Enable trace logging for SYMbol calls to the storage system.
meta_tags:
required: false
default: None
description:
- Optional meta tags to associate to this storage system
author: Kevin Hulquest (@hulquest)
'''
EXAMPLES = '''
---
- name: Presence of storage system
netapp_e_storage_system:
ssid: "{{ item.key }}"
state: present
api_url: "{{ netapp_api_url }}"
api_username: "{{ netapp_api_username }}"
api_password: "{{ netapp_api_password }}"
validate_certs: "{{ netapp_api_validate_certs }}"
controller_addresses:
- "{{ item.value.address1 }}"
- "{{ item.value.address2 }}"
with_dict: "{{ storage_systems }}"
when: check_storage_system
'''
RETURN = '''
msg: Storage system removed.
msg: Storage system added.
'''
import json
from datetime import datetime as dt, timedelta
from time import sleep
from ansible.module_utils.api import basic_auth_argument_spec
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
from ansible.module_utils.urls import open_url
from ansible.module_utils.six.moves.urllib.error import HTTPError
def request(url, data=None, headers=None, method='GET', use_proxy=True,
force=False, last_mod_time=None, timeout=10, validate_certs=True,
url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
try:
r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
url_username=url_username, url_password=url_password, http_agent=http_agent,
force_basic_auth=force_basic_auth)
except HTTPError:
err = get_exception()
r = err.fp
try:
raw_data = r.read()
if raw_data:
data = json.loads(raw_data)
else:
raw_data = None
except:
if ignore_errors:
pass
else:
raise Exception(raw_data)
resp_code = r.getcode()
if resp_code >= 400 and not ignore_errors:
raise Exception(resp_code, data)
else:
return resp_code, data
def do_post(ssid, api_url, post_headers, api_usr, api_pwd, validate_certs, request_body, timeout):
(rc, resp) = request(api_url + "/storage-systems", data=request_body, headers=post_headers,
method='POST', url_username=api_usr, url_password=api_pwd,
validate_certs=validate_certs)
status = None
return_resp = resp
if 'status' in resp:
status = resp['status']
if rc == 201:
status = 'neverContacted'
fail_after_time = dt.utcnow() + timedelta(seconds=timeout)
while status == 'neverContacted':
if dt.utcnow() > fail_after_time:
raise Exception("web proxy timed out waiting for array status")
sleep(1)
(rc, system_resp) = request(api_url + "/storage-systems/%s" % ssid,
headers=dict(Accept="application/json"), url_username=api_usr,
url_password=api_pwd, validate_certs=validate_certs,
ignore_errors=True)
status = system_resp['status']
return_resp = system_resp
return status, return_resp
def main():
argument_spec = basic_auth_argument_spec()
argument_spec.update(dict(
state=dict(required=True, choices=['present', 'absent']),
ssid=dict(required=True, type='str'),
controller_addresses=dict(type='list'),
array_wwn=dict(required=False, type='str'),
array_password=dict(required=False, type='str', no_log=True),
array_status_timeout_sec=dict(default=60, type='int'),
enable_trace=dict(default=False, type='bool'),
meta_tags=dict(type='list')
))
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
mutually_exclusive=[['controller_addresses', 'array_wwn']],
required_if=[('state', 'present', ['controller_addresses'])]
)
p = module.params
state = p['state']
ssid = p['ssid']
controller_addresses = p['controller_addresses']
array_wwn = p['array_wwn']
array_password = p['array_password']
array_status_timeout_sec = p['array_status_timeout_sec']
validate_certs = p['validate_certs']
meta_tags = p['meta_tags']
enable_trace = p['enable_trace']
api_usr = p['api_username']
api_pwd = p['api_password']
api_url = p['api_url']
changed = False
array_exists = False
try:
(rc, resp) = request(api_url + "/storage-systems/%s" % ssid, headers=dict(Accept="application/json"),
url_username=api_usr, url_password=api_pwd, validate_certs=validate_certs,
ignore_errors=True)
except:
err = get_exception()
module.fail_json(msg="Error accessing storage-system with id [%s]. Error [%s]" % (ssid, str(err)))
array_exists = True
array_detail = resp
if rc == 200:
if state == 'absent':
changed = True
array_exists = False
elif state == 'present':
current_addresses = frozenset(i for i in (array_detail['ip1'], array_detail['ip2']) if i)
if set(controller_addresses) != current_addresses:
changed = True
if array_detail['wwn'] != array_wwn and array_wwn is not None:
module.fail_json(
msg='It seems you may have specified a bad WWN. The storage system ID you specified, %s, currently has the WWN of %s' % (ssid, array_detail['wwn']))
elif rc == 404:
if state == 'present':
changed = True
array_exists = False
else:
changed = False
module.exit_json(changed=changed, msg="Storage system was not present.")
if changed and not module.check_mode:
if state == 'present':
if not array_exists:
# add the array
array_add_req = dict(
id=ssid,
controllerAddresses=controller_addresses,
metaTags=meta_tags,
enableTrace=enable_trace
)
if array_wwn:
array_add_req['wwn'] = array_wwn
if array_password:
array_add_req['password'] = array_password
post_headers = dict(Accept="application/json")
post_headers['Content-Type'] = 'application/json'
request_data = json.dumps(array_add_req)
try:
(rc, resp) = do_post(ssid, api_url, post_headers, api_usr, api_pwd, validate_certs, request_data,
array_status_timeout_sec)
except:
err = get_exception()
module.fail_json(msg="Failed to add storage system. Id[%s]. Request body [%s]. Error[%s]." %
(ssid, request_data, str(err)))
else: # array exists, modify...
post_headers = dict(Accept="application/json")
post_headers['Content-Type'] = 'application/json'
post_body = dict(
controllerAddresses=controller_addresses,
removeAllTags=True,
enableTrace=enable_trace,
metaTags=meta_tags
)
try:
(rc, resp) = do_post(ssid, api_url, post_headers, api_usr, api_pwd, validate_certs, post_body,
array_status_timeout_sec)
except:
err = get_exception()
module.fail_json(msg="Failed to update storage system. Id[%s]. Request body [%s]. Error[%s]." %
(ssid, post_body, str(err)))
elif state == 'absent':
# delete the array
try:
(rc, resp) = request(api_url + "/storage-systems/%s" % ssid, method='DELETE',
url_username=api_usr,
url_password=api_pwd, validate_certs=validate_certs)
except:
err = get_exception()
module.fail_json(msg="Failed to remove storage array. Id[%s]. Error[%s]." % (ssid, str(err)))
if rc == 422:
module.exit_json(changed=changed, msg="Storage system was not presnt.")
if rc == 204:
module.exit_json(changed=changed, msg="Storage system removed.")
module.exit_json(changed=changed, **resp)
if __name__ == '__main__':
main()
|
auready/django
|
refs/heads/master
|
django/db/backends/base/operations.py
|
9
|
import datetime
import decimal
from importlib import import_module
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db.backends import utils
from django.utils import timezone
from django.utils.dateparse import parse_duration
from django.utils.encoding import force_text
class BaseDatabaseOperations:
"""
This class encapsulates all backend-specific differences, such as the way
a backend performs ordering or calculates the ID of a recently-inserted
row.
"""
compiler_module = "django.db.models.sql.compiler"
# Integer field safe ranges by `internal_type` as documented
# in docs/ref/models/fields.txt.
integer_field_ranges = {
'SmallIntegerField': (-32768, 32767),
'IntegerField': (-2147483648, 2147483647),
'BigIntegerField': (-9223372036854775808, 9223372036854775807),
'PositiveSmallIntegerField': (0, 32767),
'PositiveIntegerField': (0, 2147483647),
}
set_operators = {
'union': 'UNION',
'intersection': 'INTERSECT',
'difference': 'EXCEPT',
}
def __init__(self, connection):
self.connection = connection
self._cache = None
def autoinc_sql(self, table, column):
"""
Returns any SQL needed to support auto-incrementing primary keys, or
None if no SQL is necessary.
This SQL is executed when a table is created.
"""
return None
def bulk_batch_size(self, fields, objs):
"""
Returns the maximum allowed batch size for the backend. The fields
are the fields going to be inserted in the batch, the objs contains
all the objects to be inserted.
"""
return len(objs)
def cache_key_culling_sql(self):
"""
Returns an SQL query that retrieves the first cache key greater than the
n smallest.
This is used by the 'db' cache backend to determine where to start
culling.
"""
return "SELECT cache_key FROM %s ORDER BY cache_key LIMIT 1 OFFSET %%s"
def unification_cast_sql(self, output_field):
"""
Given a field instance, returns the SQL necessary to cast the result of
a union to that type. Note that the resulting string should contain a
'%s' placeholder for the expression being cast.
"""
return '%s'
def date_extract_sql(self, lookup_type, field_name):
"""
Given a lookup_type of 'year', 'month' or 'day', returns the SQL that
extracts a value from the given date field field_name.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a date_extract_sql() method')
def date_interval_sql(self, timedelta):
"""
Implements the date interval functionality for expressions
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a date_interval_sql() method')
def date_trunc_sql(self, lookup_type, field_name):
"""
Given a lookup_type of 'year', 'month' or 'day', returns the SQL that
truncates the given date field field_name to a date object with only
the given specificity.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a datetrunc_sql() method')
def datetime_cast_date_sql(self, field_name, tzname):
"""
Returns the SQL necessary to cast a datetime value to date value.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a datetime_cast_date() method')
def datetime_cast_time_sql(self, field_name, tzname):
"""
Returns the SQL necessary to cast a datetime value to time value.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a datetime_cast_time_sql() method')
def datetime_extract_sql(self, lookup_type, field_name, tzname):
"""
Given a lookup_type of 'year', 'month', 'day', 'hour', 'minute' or
'second', returns the SQL that extracts a value from the given
datetime field field_name, and a tuple of parameters.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a datetime_extract_sql() method')
def datetime_trunc_sql(self, lookup_type, field_name, tzname):
"""
Given a lookup_type of 'year', 'month', 'day', 'hour', 'minute' or
'second', returns the SQL that truncates the given datetime field
field_name to a datetime object with only the given specificity, and
a tuple of parameters.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a datetime_trunk_sql() method')
def time_trunc_sql(self, lookup_type, field_name):
"""
Given a lookup_type of 'hour', 'minute' or 'second', returns the SQL
that truncates the given time field field_name to a time object with
only the given specificity.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a time_trunc_sql() method')
def time_extract_sql(self, lookup_type, field_name):
"""
Given a lookup_type of 'hour', 'minute' or 'second', returns the SQL
that extracts a value from the given time field field_name.
"""
return self.date_extract_sql(lookup_type, field_name)
def deferrable_sql(self):
"""
Returns the SQL necessary to make a constraint "initially deferred"
during a CREATE TABLE statement.
"""
return ''
def distinct_sql(self, fields):
"""
Returns an SQL DISTINCT clause which removes duplicate rows from the
result set. If any fields are given, only the given fields are being
checked for duplicates.
"""
if fields:
raise NotImplementedError('DISTINCT ON fields is not supported by this database backend')
else:
return 'DISTINCT'
def fetch_returned_insert_id(self, cursor):
"""
Given a cursor object that has just performed an INSERT...RETURNING
statement into a table that has an auto-incrementing ID, returns the
newly created ID.
"""
return cursor.fetchone()[0]
def field_cast_sql(self, db_type, internal_type):
"""
Given a column type (e.g. 'BLOB', 'VARCHAR'), and an internal type
(e.g. 'GenericIPAddressField'), returns the SQL necessary to cast it
before using it in a WHERE statement. Note that the resulting string
should contain a '%s' placeholder for the column being searched against.
"""
return '%s'
def force_no_ordering(self):
"""
Returns a list used in the "ORDER BY" clause to force no ordering at
all. Returning an empty list means that nothing will be included in the
ordering.
"""
return []
def for_update_sql(self, nowait=False, skip_locked=False):
"""
Returns the FOR UPDATE SQL clause to lock rows for an update operation.
"""
if nowait:
return 'FOR UPDATE NOWAIT'
elif skip_locked:
return 'FOR UPDATE SKIP LOCKED'
else:
return 'FOR UPDATE'
def last_executed_query(self, cursor, sql, params):
"""
Returns a string of the query last executed by the given cursor, with
placeholders replaced with actual values.
`sql` is the raw query containing placeholders, and `params` is the
sequence of parameters. These are used by default, but this method
exists for database backends to provide a better implementation
according to their own quoting schemes.
"""
# Convert params to contain string values.
def to_string(s):
return force_text(s, strings_only=True, errors='replace')
if isinstance(params, (list, tuple)):
u_params = tuple(to_string(val) for val in params)
elif params is None:
u_params = ()
else:
u_params = {to_string(k): to_string(v) for k, v in params.items()}
return "QUERY = %r - PARAMS = %r" % (sql, u_params)
def last_insert_id(self, cursor, table_name, pk_name):
"""
Given a cursor object that has just performed an INSERT statement into
a table that has an auto-incrementing ID, returns the newly created ID.
This method also receives the table name and the name of the primary-key
column.
"""
return cursor.lastrowid
def lookup_cast(self, lookup_type, internal_type=None):
"""
Returns the string to use in a query when performing lookups
("contains", "like", etc.). The resulting string should contain a '%s'
placeholder for the column being searched against.
"""
return "%s"
def max_in_list_size(self):
"""
Returns the maximum number of items that can be passed in a single 'IN'
list condition, or None if the backend does not impose a limit.
"""
return None
def max_name_length(self):
"""
Returns the maximum length of table and column names, or None if there
is no limit.
"""
return None
def no_limit_value(self):
"""
Returns the value to use for the LIMIT when we are wanting "LIMIT
infinity". Returns None if the limit clause can be omitted in this case.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a no_limit_value() method')
def pk_default_value(self):
"""
Returns the value to use during an INSERT statement to specify that
the field should use its default value.
"""
return 'DEFAULT'
def prepare_sql_script(self, sql):
"""
Takes an SQL script that may contain multiple lines and returns a list
of statements to feed to successive cursor.execute() calls.
Since few databases are able to process raw SQL scripts in a single
cursor.execute() call and PEP 249 doesn't talk about this use case,
the default implementation is conservative.
"""
try:
import sqlparse
except ImportError:
raise ImproperlyConfigured(
"sqlparse is required if you don't split your SQL "
"statements manually."
)
else:
return [sqlparse.format(statement, strip_comments=True)
for statement in sqlparse.split(sql) if statement]
def process_clob(self, value):
"""
Returns the value of a CLOB column, for backends that return a locator
object that requires additional processing.
"""
return value
def return_insert_id(self):
"""
For backends that support returning the last insert ID as part
of an insert query, this method returns the SQL and params to
append to the INSERT query. The returned fragment should
contain a format string to hold the appropriate column.
"""
pass
def compiler(self, compiler_name):
"""
Returns the SQLCompiler class corresponding to the given name,
in the namespace corresponding to the `compiler_module` attribute
on this backend.
"""
if self._cache is None:
self._cache = import_module(self.compiler_module)
return getattr(self._cache, compiler_name)
def quote_name(self, name):
"""
Returns a quoted version of the given table, index or column name. Does
not quote the given name if it's already been quoted.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a quote_name() method')
def random_function_sql(self):
"""
Returns an SQL expression that returns a random value.
"""
return 'RANDOM()'
def regex_lookup(self, lookup_type):
"""
Returns the string to use in a query when performing regular expression
lookups (using "regex" or "iregex"). The resulting string should
contain a '%s' placeholder for the column being searched against.
If the feature is not supported (or part of it is not supported), a
NotImplementedError exception can be raised.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a regex_lookup() method')
def savepoint_create_sql(self, sid):
"""
Returns the SQL for starting a new savepoint. Only required if the
"uses_savepoints" feature is True. The "sid" parameter is a string
for the savepoint id.
"""
return "SAVEPOINT %s" % self.quote_name(sid)
def savepoint_commit_sql(self, sid):
"""
Returns the SQL for committing the given savepoint.
"""
return "RELEASE SAVEPOINT %s" % self.quote_name(sid)
def savepoint_rollback_sql(self, sid):
"""
Returns the SQL for rolling back the given savepoint.
"""
return "ROLLBACK TO SAVEPOINT %s" % self.quote_name(sid)
def set_time_zone_sql(self):
"""
Returns the SQL that will set the connection's time zone.
Returns '' if the backend doesn't support time zones.
"""
return ''
def sql_flush(self, style, tables, sequences, allow_cascade=False):
"""
Returns a list of SQL statements required to remove all data from
the given database tables (without actually removing the tables
themselves).
The returned value also includes SQL statements required to reset DB
sequences passed in :param sequences:.
The `style` argument is a Style object as returned by either
color_style() or no_style() in django.core.management.color.
The `allow_cascade` argument determines whether truncation may cascade
to tables with foreign keys pointing the tables being truncated.
PostgreSQL requires a cascade even if these tables are empty.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations must provide an sql_flush() method')
def sequence_reset_by_name_sql(self, style, sequences):
"""
Returns a list of the SQL statements required to reset sequences
passed in :param sequences:.
The `style` argument is a Style object as returned by either
color_style() or no_style() in django.core.management.color.
"""
return []
def sequence_reset_sql(self, style, model_list):
"""
Returns a list of the SQL statements required to reset sequences for
the given models.
The `style` argument is a Style object as returned by either
color_style() or no_style() in django.core.management.color.
"""
return [] # No sequence reset required by default.
def start_transaction_sql(self):
"""
Returns the SQL statement required to start a transaction.
"""
return "BEGIN;"
def end_transaction_sql(self, success=True):
"""
Returns the SQL statement required to end a transaction.
"""
if not success:
return "ROLLBACK;"
return "COMMIT;"
def tablespace_sql(self, tablespace, inline=False):
"""
Returns the SQL that will be used in a query to define the tablespace.
Returns '' if the backend doesn't support tablespaces.
If inline is True, the SQL is appended to a row; otherwise it's appended
to the entire CREATE TABLE or CREATE INDEX statement.
"""
return ''
def prep_for_like_query(self, x):
"""Prepares a value for use in a LIKE query."""
return force_text(x).replace("\\", "\\\\").replace("%", r"\%").replace("_", r"\_")
# Same as prep_for_like_query(), but called for "iexact" matches, which
# need not necessarily be implemented using "LIKE" in the backend.
prep_for_iexact_query = prep_for_like_query
def validate_autopk_value(self, value):
"""
Certain backends do not accept some values for "serial" fields
(for example zero in MySQL). This method will raise a ValueError
if the value is invalid, otherwise returns validated value.
"""
return value
def adapt_unknown_value(self, value):
"""
Transforms a value to something compatible with the backend driver.
This method only depends on the type of the value. It's designed for
cases where the target type isn't known, such as .raw() SQL queries.
As a consequence it may not work perfectly in all circumstances.
"""
if isinstance(value, datetime.datetime): # must be before date
return self.adapt_datetimefield_value(value)
elif isinstance(value, datetime.date):
return self.adapt_datefield_value(value)
elif isinstance(value, datetime.time):
return self.adapt_timefield_value(value)
elif isinstance(value, decimal.Decimal):
return self.adapt_decimalfield_value(value)
else:
return value
def adapt_datefield_value(self, value):
"""
Transforms a date value to an object compatible with what is expected
by the backend driver for date columns.
"""
if value is None:
return None
return str(value)
def adapt_datetimefield_value(self, value):
"""
Transforms a datetime value to an object compatible with what is expected
by the backend driver for datetime columns.
"""
if value is None:
return None
return str(value)
def adapt_timefield_value(self, value):
"""
Transforms a time value to an object compatible with what is expected
by the backend driver for time columns.
"""
if value is None:
return None
if timezone.is_aware(value):
raise ValueError("Django does not support timezone-aware times.")
return str(value)
def adapt_decimalfield_value(self, value, max_digits=None, decimal_places=None):
"""
Transforms a decimal.Decimal value to an object compatible with what is
expected by the backend driver for decimal (numeric) columns.
"""
return utils.format_number(value, max_digits, decimal_places)
def adapt_ipaddressfield_value(self, value):
"""
Transforms a string representation of an IP address into the expected
type for the backend driver.
"""
return value or None
def year_lookup_bounds_for_date_field(self, value):
"""
Returns a two-elements list with the lower and upper bound to be used
with a BETWEEN operator to query a DateField value using a year
lookup.
`value` is an int, containing the looked-up year.
"""
first = datetime.date(value, 1, 1)
second = datetime.date(value, 12, 31)
first = self.adapt_datefield_value(first)
second = self.adapt_datefield_value(second)
return [first, second]
def year_lookup_bounds_for_datetime_field(self, value):
"""
Returns a two-elements list with the lower and upper bound to be used
with a BETWEEN operator to query a DateTimeField value using a year
lookup.
`value` is an int, containing the looked-up year.
"""
first = datetime.datetime(value, 1, 1)
second = datetime.datetime(value, 12, 31, 23, 59, 59, 999999)
if settings.USE_TZ:
tz = timezone.get_current_timezone()
first = timezone.make_aware(first, tz)
second = timezone.make_aware(second, tz)
first = self.adapt_datetimefield_value(first)
second = self.adapt_datetimefield_value(second)
return [first, second]
def get_db_converters(self, expression):
"""
Get a list of functions needed to convert field data.
Some field types on some backends do not provide data in the correct
format, this is the hook for converter functions.
"""
return []
def convert_durationfield_value(self, value, expression, connection, context):
if value is not None:
value = str(decimal.Decimal(value) / decimal.Decimal(1000000))
value = parse_duration(value)
return value
def check_expression_support(self, expression):
"""
Check that the backend supports the provided expression.
This is used on specific backends to rule out known expressions
that have problematic or nonexistent implementations. If the
expression has a known problem, the backend should raise
NotImplementedError.
"""
pass
def combine_expression(self, connector, sub_expressions):
"""Combine a list of subexpressions into a single expression, using
the provided connecting operator. This is required because operators
can vary between backends (e.g., Oracle with %% and &) and between
subexpression types (e.g., date expressions)
"""
conn = ' %s ' % connector
return conn.join(sub_expressions)
def combine_duration_expression(self, connector, sub_expressions):
return self.combine_expression(connector, sub_expressions)
def binary_placeholder_sql(self, value):
"""
Some backends require special syntax to insert binary content (MySQL
for example uses '_binary %s').
"""
return '%s'
def modify_insert_params(self, placeholder, params):
"""Allow modification of insert parameters. Needed for Oracle Spatial
backend due to #10888.
"""
return params
def integer_field_range(self, internal_type):
"""
Given an integer field internal type (e.g. 'PositiveIntegerField'),
returns a tuple of the (min_value, max_value) form representing the
range of the column type bound to the field.
"""
return self.integer_field_ranges[internal_type]
def subtract_temporals(self, internal_type, lhs, rhs):
if self.connection.features.supports_temporal_subtraction:
lhs_sql, lhs_params = lhs
rhs_sql, rhs_params = rhs
return "(%s - %s)" % (lhs_sql, rhs_sql), lhs_params + rhs_params
raise NotImplementedError("This backend does not support %s subtraction." % internal_type)
|
ymollard/apex_playground
|
refs/heads/master
|
ros/apex_playground/src/apex_playground/controller/learning.py
|
2
|
import rospy
from apex_playground.srv import *
class Learning(object):
def __init__(self):
self.services = {'produce': {'name': 'learning/produce', 'type': Produce},
'perceive': {'name': 'learning/perceive', 'type': Perceive}}
for service_name, service in self.services.items():
rospy.loginfo("Controller is waiting service {}...".format(service['name']))
rospy.wait_for_service(service['name'])
service['call'] = rospy.ServiceProxy(service['name'], service['type'])
def perceive(self, demonstration):
call = self.services['perceive']['call']
return call(PerceiveRequest(demo=demonstration)).success
def produce(self, skill_to_demonstrate=""):
call = self.services['produce']['call']
return call(ProduceRequest(skill_to_demonstrate=skill_to_demonstrate))
|
you21979/phantomjs
|
refs/heads/2.0
|
src/qt/qtwebkit/Tools/Scripts/webkitpy/style/checkers/cmake_unittest.py
|
121
|
# Copyright (C) 2012 Intel Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for cmake.py."""
import unittest2 as unittest
from cmake import CMakeChecker
class CMakeCheckerTest(unittest.TestCase):
"""Tests CMakeChecker class."""
def test_init(self):
"""Test __init__() method."""
def _mock_handle_style_error(self):
pass
checker = CMakeChecker("foo.cmake", _mock_handle_style_error)
self.assertEqual(checker._handle_style_error, _mock_handle_style_error)
def test_check(self):
"""Test check() method."""
errors = []
def _mock_handle_style_error(line_number, category, confidence,
message):
error = (line_number, category, confidence, message)
errors.append(error)
checker = CMakeChecker("foo.cmake", _mock_handle_style_error)
lines = [
'# This file is sample input for cmake_unittest.py and includes below problems:\n',
'IF ()',
'\tmessage("Error line with Tab")\n',
' message("Error line with endding spaces") \n',
' message( "Error line with space after (")\n',
' message("Error line with space before (" )\n',
' MESSAGE("Error line with upper case non-condtional command")\n',
' MESSage("Error line with upper case non-condtional command")\n',
' message("correct message line")\n',
'ENDif ()\n',
'\n',
'if()\n',
'endif ()\n',
'\n',
'macro ()\n',
'ENDMacro()\n',
'\n',
'function ()\n',
'endfunction()\n',
]
checker.check(lines)
self.maxDiff = None
self.assertEqual(errors, [
(3, 'whitespace/tab', 5, 'Line contains tab character.'),
(2, 'command/lowercase', 5, 'Use lowercase command "if"'),
(4, 'whitespace/trailing', 5, 'No trailing spaces'),
(5, 'whitespace/parentheses', 5, 'No space after "("'),
(6, 'whitespace/parentheses', 5, 'No space before ")"'),
(7, 'command/lowercase', 5, 'Use lowercase command "message"'),
(8, 'command/lowercase', 5, 'Use lowercase command "message"'),
(10, 'command/lowercase', 5, 'Use lowercase command "endif"'),
(12, 'whitespace/parentheses', 5, 'One space between command "if" and its parentheses, should be "if ("'),
(15, 'whitespace/parentheses', 5, 'No space between command "macro" and its parentheses, should be "macro("'),
(16, 'command/lowercase', 5, 'Use lowercase command "endmacro"'),
(18, 'whitespace/parentheses', 5, 'No space between command "function" and its parentheses, should be "function("'),
])
|
Wojtechnology/Muzit
|
refs/heads/master
|
StreetMuse/lib/python2.7/site-packages/tornado/httputil.py
|
22
|
#!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""HTTP utility code shared by clients and servers.
This module also defines the `HTTPServerRequest` class which is exposed
via `tornado.web.RequestHandler.request`.
"""
from __future__ import absolute_import, division, print_function, with_statement
import calendar
import collections
import copy
import datetime
import email.utils
import numbers
import re
import time
from tornado.escape import native_str, parse_qs_bytes, utf8
from tornado.log import gen_log
from tornado.util import ObjectDict, bytes_type
try:
import Cookie # py2
except ImportError:
import http.cookies as Cookie # py3
try:
from httplib import responses # py2
except ImportError:
from http.client import responses # py3
# responses is unused in this file, but we re-export it to other files.
# Reference it so pyflakes doesn't complain.
responses
try:
from urllib import urlencode # py2
except ImportError:
from urllib.parse import urlencode # py3
try:
from ssl import SSLError
except ImportError:
# ssl is unavailable on app engine.
class SSLError(Exception):
pass
class _NormalizedHeaderCache(dict):
"""Dynamic cached mapping of header names to Http-Header-Case.
Implemented as a dict subclass so that cache hits are as fast as a
normal dict lookup, without the overhead of a python function
call.
>>> normalized_headers = _NormalizedHeaderCache(10)
>>> normalized_headers["coNtent-TYPE"]
'Content-Type'
"""
def __init__(self, size):
super(_NormalizedHeaderCache, self).__init__()
self.size = size
self.queue = collections.deque()
def __missing__(self, key):
normalized = "-".join([w.capitalize() for w in key.split("-")])
self[key] = normalized
self.queue.append(key)
if len(self.queue) > self.size:
# Limit the size of the cache. LRU would be better, but this
# simpler approach should be fine. In Python 2.7+ we could
# use OrderedDict (or in 3.2+, @functools.lru_cache).
old_key = self.queue.popleft()
del self[old_key]
return normalized
_normalized_headers = _NormalizedHeaderCache(1000)
class HTTPHeaders(dict):
"""A dictionary that maintains ``Http-Header-Case`` for all keys.
Supports multiple values per key via a pair of new methods,
`add()` and `get_list()`. The regular dictionary interface
returns a single value per key, with multiple values joined by a
comma.
>>> h = HTTPHeaders({"content-type": "text/html"})
>>> list(h.keys())
['Content-Type']
>>> h["Content-Type"]
'text/html'
>>> h.add("Set-Cookie", "A=B")
>>> h.add("Set-Cookie", "C=D")
>>> h["set-cookie"]
'A=B,C=D'
>>> h.get_list("set-cookie")
['A=B', 'C=D']
>>> for (k,v) in sorted(h.get_all()):
... print('%s: %s' % (k,v))
...
Content-Type: text/html
Set-Cookie: A=B
Set-Cookie: C=D
"""
def __init__(self, *args, **kwargs):
# Don't pass args or kwargs to dict.__init__, as it will bypass
# our __setitem__
dict.__init__(self)
self._as_list = {}
self._last_key = None
if (len(args) == 1 and len(kwargs) == 0 and
isinstance(args[0], HTTPHeaders)):
# Copy constructor
for k, v in args[0].get_all():
self.add(k, v)
else:
# Dict-style initialization
self.update(*args, **kwargs)
# new public methods
def add(self, name, value):
"""Adds a new value for the given key."""
norm_name = _normalized_headers[name]
self._last_key = norm_name
if norm_name in self:
# bypass our override of __setitem__ since it modifies _as_list
dict.__setitem__(self, norm_name,
native_str(self[norm_name]) + ',' +
native_str(value))
self._as_list[norm_name].append(value)
else:
self[norm_name] = value
def get_list(self, name):
"""Returns all values for the given header as a list."""
norm_name = _normalized_headers[name]
return self._as_list.get(norm_name, [])
def get_all(self):
"""Returns an iterable of all (name, value) pairs.
If a header has multiple values, multiple pairs will be
returned with the same name.
"""
for name, values in self._as_list.items():
for value in values:
yield (name, value)
def parse_line(self, line):
"""Updates the dictionary with a single header line.
>>> h = HTTPHeaders()
>>> h.parse_line("Content-Type: text/html")
>>> h.get('content-type')
'text/html'
"""
if line[0].isspace():
# continuation of a multi-line header
new_part = ' ' + line.lstrip()
self._as_list[self._last_key][-1] += new_part
dict.__setitem__(self, self._last_key,
self[self._last_key] + new_part)
else:
name, value = line.split(":", 1)
self.add(name, value.strip())
@classmethod
def parse(cls, headers):
"""Returns a dictionary from HTTP header text.
>>> h = HTTPHeaders.parse("Content-Type: text/html\\r\\nContent-Length: 42\\r\\n")
>>> sorted(h.items())
[('Content-Length', '42'), ('Content-Type', 'text/html')]
"""
h = cls()
for line in headers.splitlines():
if line:
h.parse_line(line)
return h
# dict implementation overrides
def __setitem__(self, name, value):
norm_name = _normalized_headers[name]
dict.__setitem__(self, norm_name, value)
self._as_list[norm_name] = [value]
def __getitem__(self, name):
return dict.__getitem__(self, _normalized_headers[name])
def __delitem__(self, name):
norm_name = _normalized_headers[name]
dict.__delitem__(self, norm_name)
del self._as_list[norm_name]
def __contains__(self, name):
norm_name = _normalized_headers[name]
return dict.__contains__(self, norm_name)
def get(self, name, default=None):
return dict.get(self, _normalized_headers[name], default)
def update(self, *args, **kwargs):
# dict.update bypasses our __setitem__
for k, v in dict(*args, **kwargs).items():
self[k] = v
def copy(self):
# default implementation returns dict(self), not the subclass
return HTTPHeaders(self)
class HTTPServerRequest(object):
"""A single HTTP request.
All attributes are type `str` unless otherwise noted.
.. attribute:: method
HTTP request method, e.g. "GET" or "POST"
.. attribute:: uri
The requested uri.
.. attribute:: path
The path portion of `uri`
.. attribute:: query
The query portion of `uri`
.. attribute:: version
HTTP version specified in request, e.g. "HTTP/1.1"
.. attribute:: headers
`.HTTPHeaders` dictionary-like object for request headers. Acts like
a case-insensitive dictionary with additional methods for repeated
headers.
.. attribute:: body
Request body, if present, as a byte string.
.. attribute:: remote_ip
Client's IP address as a string. If ``HTTPServer.xheaders`` is set,
will pass along the real IP address provided by a load balancer
in the ``X-Real-Ip`` or ``X-Forwarded-For`` header.
.. versionchanged:: 3.1
The list format of ``X-Forwarded-For`` is now supported.
.. attribute:: protocol
The protocol used, either "http" or "https". If ``HTTPServer.xheaders``
is set, will pass along the protocol used by a load balancer if
reported via an ``X-Scheme`` header.
.. attribute:: host
The requested hostname, usually taken from the ``Host`` header.
.. attribute:: arguments
GET/POST arguments are available in the arguments property, which
maps arguments names to lists of values (to support multiple values
for individual names). Names are of type `str`, while arguments
are byte strings. Note that this is different from
`.RequestHandler.get_argument`, which returns argument values as
unicode strings.
.. attribute:: query_arguments
Same format as ``arguments``, but contains only arguments extracted
from the query string.
.. versionadded:: 3.2
.. attribute:: body_arguments
Same format as ``arguments``, but contains only arguments extracted
from the request body.
.. versionadded:: 3.2
.. attribute:: files
File uploads are available in the files property, which maps file
names to lists of `.HTTPFile`.
.. attribute:: connection
An HTTP request is attached to a single HTTP connection, which can
be accessed through the "connection" attribute. Since connections
are typically kept open in HTTP/1.1, multiple requests can be handled
sequentially on a single connection.
.. versionchanged:: 4.0
Moved from ``tornado.httpserver.HTTPRequest``.
"""
def __init__(self, method=None, uri=None, version="HTTP/1.0", headers=None,
body=None, host=None, files=None, connection=None,
start_line=None):
if start_line is not None:
method, uri, version = start_line
self.method = method
self.uri = uri
self.version = version
self.headers = headers or HTTPHeaders()
self.body = body or ""
# set remote IP and protocol
context = getattr(connection, 'context', None)
self.remote_ip = getattr(context, 'remote_ip')
self.protocol = getattr(context, 'protocol', "http")
self.host = host or self.headers.get("Host") or "127.0.0.1"
self.files = files or {}
self.connection = connection
self._start_time = time.time()
self._finish_time = None
self.path, sep, self.query = uri.partition('?')
self.arguments = parse_qs_bytes(self.query, keep_blank_values=True)
self.query_arguments = copy.deepcopy(self.arguments)
self.body_arguments = {}
def supports_http_1_1(self):
"""Returns True if this request supports HTTP/1.1 semantics.
.. deprecated:: 4.0
Applications are less likely to need this information with the
introduction of `.HTTPConnection`. If you still need it, access
the ``version`` attribute directly.
"""
return self.version == "HTTP/1.1"
@property
def cookies(self):
"""A dictionary of Cookie.Morsel objects."""
if not hasattr(self, "_cookies"):
self._cookies = Cookie.SimpleCookie()
if "Cookie" in self.headers:
try:
self._cookies.load(
native_str(self.headers["Cookie"]))
except Exception:
self._cookies = {}
return self._cookies
def write(self, chunk, callback=None):
"""Writes the given chunk to the response stream.
.. deprecated:: 4.0
Use ``request.connection`` and the `.HTTPConnection` methods
to write the response.
"""
assert isinstance(chunk, bytes_type)
self.connection.write(chunk, callback=callback)
def finish(self):
"""Finishes this HTTP request on the open connection.
.. deprecated:: 4.0
Use ``request.connection`` and the `.HTTPConnection` methods
to write the response.
"""
self.connection.finish()
self._finish_time = time.time()
def full_url(self):
"""Reconstructs the full URL for this request."""
return self.protocol + "://" + self.host + self.uri
def request_time(self):
"""Returns the amount of time it took for this request to execute."""
if self._finish_time is None:
return time.time() - self._start_time
else:
return self._finish_time - self._start_time
def get_ssl_certificate(self, binary_form=False):
"""Returns the client's SSL certificate, if any.
To use client certificates, the HTTPServer must have been constructed
with cert_reqs set in ssl_options, e.g.::
server = HTTPServer(app,
ssl_options=dict(
certfile="foo.crt",
keyfile="foo.key",
cert_reqs=ssl.CERT_REQUIRED,
ca_certs="cacert.crt"))
By default, the return value is a dictionary (or None, if no
client certificate is present). If ``binary_form`` is true, a
DER-encoded form of the certificate is returned instead. See
SSLSocket.getpeercert() in the standard library for more
details.
http://docs.python.org/library/ssl.html#sslsocket-objects
"""
try:
return self.connection.stream.socket.getpeercert(
binary_form=binary_form)
except SSLError:
return None
def _parse_body(self):
parse_body_arguments(
self.headers.get("Content-Type", ""), self.body,
self.body_arguments, self.files,
self.headers)
for k, v in self.body_arguments.items():
self.arguments.setdefault(k, []).extend(v)
def __repr__(self):
attrs = ("protocol", "host", "method", "uri", "version", "remote_ip")
args = ", ".join(["%s=%r" % (n, getattr(self, n)) for n in attrs])
return "%s(%s, headers=%s)" % (
self.__class__.__name__, args, dict(self.headers))
class HTTPInputError(Exception):
"""Exception class for malformed HTTP requests or responses
from remote sources.
.. versionadded:: 4.0
"""
pass
class HTTPOutputError(Exception):
"""Exception class for errors in HTTP output.
.. versionadded:: 4.0
"""
pass
class HTTPServerConnectionDelegate(object):
"""Implement this interface to handle requests from `.HTTPServer`.
.. versionadded:: 4.0
"""
def start_request(self, server_conn, request_conn):
"""This method is called by the server when a new request has started.
:arg server_conn: is an opaque object representing the long-lived
(e.g. tcp-level) connection.
:arg request_conn: is a `.HTTPConnection` object for a single
request/response exchange.
This method should return a `.HTTPMessageDelegate`.
"""
raise NotImplementedError()
def on_close(self, server_conn):
"""This method is called when a connection has been closed.
:arg server_conn: is a server connection that has previously been
passed to ``start_request``.
"""
pass
class HTTPMessageDelegate(object):
"""Implement this interface to handle an HTTP request or response.
.. versionadded:: 4.0
"""
def headers_received(self, start_line, headers):
"""Called when the HTTP headers have been received and parsed.
:arg start_line: a `.RequestStartLine` or `.ResponseStartLine`
depending on whether this is a client or server message.
:arg headers: a `.HTTPHeaders` instance.
Some `.HTTPConnection` methods can only be called during
``headers_received``.
May return a `.Future`; if it does the body will not be read
until it is done.
"""
pass
def data_received(self, chunk):
"""Called when a chunk of data has been received.
May return a `.Future` for flow control.
"""
pass
def finish(self):
"""Called after the last chunk of data has been received."""
pass
def on_connection_close(self):
"""Called if the connection is closed without finishing the request.
If ``headers_received`` is called, either ``finish`` or
``on_connection_close`` will be called, but not both.
"""
pass
class HTTPConnection(object):
"""Applications use this interface to write their responses.
.. versionadded:: 4.0
"""
def write_headers(self, start_line, headers, chunk=None, callback=None):
"""Write an HTTP header block.
:arg start_line: a `.RequestStartLine` or `.ResponseStartLine`.
:arg headers: a `.HTTPHeaders` instance.
:arg chunk: the first (optional) chunk of data. This is an optimization
so that small responses can be written in the same call as their
headers.
:arg callback: a callback to be run when the write is complete.
Returns a `.Future` if no callback is given.
"""
raise NotImplementedError()
def write(self, chunk, callback=None):
"""Writes a chunk of body data.
The callback will be run when the write is complete. If no callback
is given, returns a Future.
"""
raise NotImplementedError()
def finish(self):
"""Indicates that the last body data has been written.
"""
raise NotImplementedError()
def url_concat(url, args):
"""Concatenate url and argument dictionary regardless of whether
url has existing query parameters.
>>> url_concat("http://example.com/foo?a=b", dict(c="d"))
'http://example.com/foo?a=b&c=d'
"""
if not args:
return url
if url[-1] not in ('?', '&'):
url += '&' if ('?' in url) else '?'
return url + urlencode(args)
class HTTPFile(ObjectDict):
"""Represents a file uploaded via a form.
For backwards compatibility, its instance attributes are also
accessible as dictionary keys.
* ``filename``
* ``body``
* ``content_type``
"""
pass
def _parse_request_range(range_header):
"""Parses a Range header.
Returns either ``None`` or tuple ``(start, end)``.
Note that while the HTTP headers use inclusive byte positions,
this method returns indexes suitable for use in slices.
>>> start, end = _parse_request_range("bytes=1-2")
>>> start, end
(1, 3)
>>> [0, 1, 2, 3, 4][start:end]
[1, 2]
>>> _parse_request_range("bytes=6-")
(6, None)
>>> _parse_request_range("bytes=-6")
(-6, None)
>>> _parse_request_range("bytes=-0")
(None, 0)
>>> _parse_request_range("bytes=")
(None, None)
>>> _parse_request_range("foo=42")
>>> _parse_request_range("bytes=1-2,6-10")
Note: only supports one range (ex, ``bytes=1-2,6-10`` is not allowed).
See [0] for the details of the range header.
[0]: http://greenbytes.de/tech/webdav/draft-ietf-httpbis-p5-range-latest.html#byte.ranges
"""
unit, _, value = range_header.partition("=")
unit, value = unit.strip(), value.strip()
if unit != "bytes":
return None
start_b, _, end_b = value.partition("-")
try:
start = _int_or_none(start_b)
end = _int_or_none(end_b)
except ValueError:
return None
if end is not None:
if start is None:
if end != 0:
start = -end
end = None
else:
end += 1
return (start, end)
def _get_content_range(start, end, total):
"""Returns a suitable Content-Range header:
>>> print(_get_content_range(None, 1, 4))
bytes 0-0/4
>>> print(_get_content_range(1, 3, 4))
bytes 1-2/4
>>> print(_get_content_range(None, None, 4))
bytes 0-3/4
"""
start = start or 0
end = (end or total) - 1
return "bytes %s-%s/%s" % (start, end, total)
def _int_or_none(val):
val = val.strip()
if val == "":
return None
return int(val)
def parse_body_arguments(content_type, body, arguments, files, headers=None):
"""Parses a form request body.
Supports ``application/x-www-form-urlencoded`` and
``multipart/form-data``. The ``content_type`` parameter should be
a string and ``body`` should be a byte string. The ``arguments``
and ``files`` parameters are dictionaries that will be updated
with the parsed contents.
"""
if headers and 'Content-Encoding' in headers:
gen_log.warning("Unsupported Content-Encoding: %s",
headers['Content-Encoding'])
return
if content_type.startswith("application/x-www-form-urlencoded"):
try:
uri_arguments = parse_qs_bytes(native_str(body), keep_blank_values=True)
except Exception as e:
gen_log.warning('Invalid x-www-form-urlencoded body: %s', e)
uri_arguments = {}
for name, values in uri_arguments.items():
if values:
arguments.setdefault(name, []).extend(values)
elif content_type.startswith("multipart/form-data"):
fields = content_type.split(";")
for field in fields:
k, sep, v = field.strip().partition("=")
if k == "boundary" and v:
parse_multipart_form_data(utf8(v), body, arguments, files)
break
else:
gen_log.warning("Invalid multipart/form-data")
def parse_multipart_form_data(boundary, data, arguments, files):
"""Parses a ``multipart/form-data`` body.
The ``boundary`` and ``data`` parameters are both byte strings.
The dictionaries given in the arguments and files parameters
will be updated with the contents of the body.
"""
# The standard allows for the boundary to be quoted in the header,
# although it's rare (it happens at least for google app engine
# xmpp). I think we're also supposed to handle backslash-escapes
# here but I'll save that until we see a client that uses them
# in the wild.
if boundary.startswith(b'"') and boundary.endswith(b'"'):
boundary = boundary[1:-1]
final_boundary_index = data.rfind(b"--" + boundary + b"--")
if final_boundary_index == -1:
gen_log.warning("Invalid multipart/form-data: no final boundary")
return
parts = data[:final_boundary_index].split(b"--" + boundary + b"\r\n")
for part in parts:
if not part:
continue
eoh = part.find(b"\r\n\r\n")
if eoh == -1:
gen_log.warning("multipart/form-data missing headers")
continue
headers = HTTPHeaders.parse(part[:eoh].decode("utf-8"))
disp_header = headers.get("Content-Disposition", "")
disposition, disp_params = _parse_header(disp_header)
if disposition != "form-data" or not part.endswith(b"\r\n"):
gen_log.warning("Invalid multipart/form-data")
continue
value = part[eoh + 4:-2]
if not disp_params.get("name"):
gen_log.warning("multipart/form-data value missing name")
continue
name = disp_params["name"]
if disp_params.get("filename"):
ctype = headers.get("Content-Type", "application/unknown")
files.setdefault(name, []).append(HTTPFile(
filename=disp_params["filename"], body=value,
content_type=ctype))
else:
arguments.setdefault(name, []).append(value)
def format_timestamp(ts):
"""Formats a timestamp in the format used by HTTP.
The argument may be a numeric timestamp as returned by `time.time`,
a time tuple as returned by `time.gmtime`, or a `datetime.datetime`
object.
>>> format_timestamp(1359312200)
'Sun, 27 Jan 2013 18:43:20 GMT'
"""
if isinstance(ts, numbers.Real):
pass
elif isinstance(ts, (tuple, time.struct_time)):
ts = calendar.timegm(ts)
elif isinstance(ts, datetime.datetime):
ts = calendar.timegm(ts.utctimetuple())
else:
raise TypeError("unknown timestamp type: %r" % ts)
return email.utils.formatdate(ts, usegmt=True)
RequestStartLine = collections.namedtuple(
'RequestStartLine', ['method', 'path', 'version'])
def parse_request_start_line(line):
"""Returns a (method, path, version) tuple for an HTTP 1.x request line.
The response is a `collections.namedtuple`.
>>> parse_request_start_line("GET /foo HTTP/1.1")
RequestStartLine(method='GET', path='/foo', version='HTTP/1.1')
"""
try:
method, path, version = line.split(" ")
except ValueError:
raise HTTPInputError("Malformed HTTP request line")
if not version.startswith("HTTP/"):
raise HTTPInputError(
"Malformed HTTP version in HTTP Request-Line: %r" % version)
return RequestStartLine(method, path, version)
ResponseStartLine = collections.namedtuple(
'ResponseStartLine', ['version', 'code', 'reason'])
def parse_response_start_line(line):
"""Returns a (version, code, reason) tuple for an HTTP 1.x response line.
The response is a `collections.namedtuple`.
>>> parse_response_start_line("HTTP/1.1 200 OK")
ResponseStartLine(version='HTTP/1.1', code=200, reason='OK')
"""
line = native_str(line)
match = re.match("(HTTP/1.[01]) ([0-9]+) ([^\r]*)", line)
if not match:
raise HTTPInputError("Error parsing response start line")
return ResponseStartLine(match.group(1), int(match.group(2)),
match.group(3))
# _parseparam and _parse_header are copied and modified from python2.7's cgi.py
# The original 2.7 version of this code did not correctly support some
# combinations of semicolons and double quotes.
def _parseparam(s):
while s[:1] == ';':
s = s[1:]
end = s.find(';')
while end > 0 and (s.count('"', 0, end) - s.count('\\"', 0, end)) % 2:
end = s.find(';', end + 1)
if end < 0:
end = len(s)
f = s[:end]
yield f.strip()
s = s[end:]
def _parse_header(line):
"""Parse a Content-type like header.
Return the main content-type and a dictionary of options.
"""
parts = _parseparam(';' + line)
key = next(parts)
pdict = {}
for p in parts:
i = p.find('=')
if i >= 0:
name = p[:i].strip().lower()
value = p[i + 1:].strip()
if len(value) >= 2 and value[0] == value[-1] == '"':
value = value[1:-1]
value = value.replace('\\\\', '\\').replace('\\"', '"')
pdict[name] = value
return key, pdict
def doctests():
import doctest
return doctest.DocTestSuite()
|
vetscience/Tools
|
refs/heads/master
|
Cwl/renamereads/renameFasta.py
|
1
|
#!/usr/bin/env python
import os, sys, optparse
from base import Base
#################################################
def options():
parser = optparse.OptionParser('usage: "python %prog -i filename -f reffile" or "cat file | python %prog -i - -f reffile" or "cat file | python %prog -f reffile"')
parser.add_option('-i', '--fasta', dest='fasta', help='FASTA file to filter', metavar='FASTA', default='-')
parser.add_option('-p', '--prefix', dest='prefix', help='Prefix in renamed FASTA header', metavar='PREFIX', default='Seq')
parser.add_option('-m', '--mapped', dest='mapped', help='Mapped identifiers', metavar='MAPPED', default='mapped.ids')
#parser.add_option('-a', '--assemble', dest='assemble', action='store_true', help='Do assembly', default=False)
options, args = parser.parse_args()
if options.fasta == '':
parser.print_help()
sys.exit(1)
return options
#################################################
def main():
'''
'''
opts = options()
prefix = opts.prefix
#fasta = Fasta(opts.fasta)
base = Base()
handle = base.ropen(opts.fasta)
cnt = 1
with open(opts.mapped, "w") as handleW:
for line in handle:
if line[0] == ">":
newHeader = "%s%ds" %(prefix, cnt)
print ">%s" %newHeader
handleW.write("%s\t%s\n" %(newHeader, line.strip()[1:]) )
cnt += 1
else:
print line,
base.rclose()
#################################################
if __name__ == "__main__":
main()
|
AlkamiCode/snake-game
|
refs/heads/gh-pages
|
node_modules/node-gyp/gyp/pylib/gyp/generator/ninja.py
|
1284
|
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import copy
import hashlib
import json
import multiprocessing
import os.path
import re
import signal
import subprocess
import sys
import gyp
import gyp.common
from gyp.common import OrderedSet
import gyp.msvs_emulation
import gyp.MSVSUtil as MSVSUtil
import gyp.xcode_emulation
from cStringIO import StringIO
from gyp.common import GetEnvironFallback
import gyp.ninja_syntax as ninja_syntax
generator_default_variables = {
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '',
'STATIC_LIB_PREFIX': 'lib',
'STATIC_LIB_SUFFIX': '.a',
'SHARED_LIB_PREFIX': 'lib',
# Gyp expects the following variables to be expandable by the build
# system to the appropriate locations. Ninja prefers paths to be
# known at gyp time. To resolve this, introduce special
# variables starting with $! and $| (which begin with a $ so gyp knows it
# should be treated specially, but is otherwise an invalid
# ninja/shell variable) that are passed to gyp here but expanded
# before writing out into the target .ninja files; see
# ExpandSpecial.
# $! is used for variables that represent a path and that can only appear at
# the start of a string, while $| is used for variables that can appear
# anywhere in a string.
'INTERMEDIATE_DIR': '$!INTERMEDIATE_DIR',
'SHARED_INTERMEDIATE_DIR': '$!PRODUCT_DIR/gen',
'PRODUCT_DIR': '$!PRODUCT_DIR',
'CONFIGURATION_NAME': '$|CONFIGURATION_NAME',
# Special variables that may be used by gyp 'rule' targets.
# We generate definitions for these variables on the fly when processing a
# rule.
'RULE_INPUT_ROOT': '${root}',
'RULE_INPUT_DIRNAME': '${dirname}',
'RULE_INPUT_PATH': '${source}',
'RULE_INPUT_EXT': '${ext}',
'RULE_INPUT_NAME': '${name}',
}
# Placates pylint.
generator_additional_non_configuration_keys = []
generator_additional_path_sections = []
generator_extra_sources_for_rules = []
generator_filelist_paths = None
generator_supports_multiple_toolsets = gyp.common.CrossCompileRequested()
def StripPrefix(arg, prefix):
if arg.startswith(prefix):
return arg[len(prefix):]
return arg
def QuoteShellArgument(arg, flavor):
"""Quote a string such that it will be interpreted as a single argument
by the shell."""
# Rather than attempting to enumerate the bad shell characters, just
# whitelist common OK ones and quote anything else.
if re.match(r'^[a-zA-Z0-9_=.\\/-]+$', arg):
return arg # No quoting necessary.
if flavor == 'win':
return gyp.msvs_emulation.QuoteForRspFile(arg)
return "'" + arg.replace("'", "'" + '"\'"' + "'") + "'"
def Define(d, flavor):
"""Takes a preprocessor define and returns a -D parameter that's ninja- and
shell-escaped."""
if flavor == 'win':
# cl.exe replaces literal # characters with = in preprocesor definitions for
# some reason. Octal-encode to work around that.
d = d.replace('#', '\\%03o' % ord('#'))
return QuoteShellArgument(ninja_syntax.escape('-D' + d), flavor)
def AddArch(output, arch):
"""Adds an arch string to an output path."""
output, extension = os.path.splitext(output)
return '%s.%s%s' % (output, arch, extension)
class Target(object):
"""Target represents the paths used within a single gyp target.
Conceptually, building a single target A is a series of steps:
1) actions/rules/copies generates source/resources/etc.
2) compiles generates .o files
3) link generates a binary (library/executable)
4) bundle merges the above in a mac bundle
(Any of these steps can be optional.)
From a build ordering perspective, a dependent target B could just
depend on the last output of this series of steps.
But some dependent commands sometimes need to reach inside the box.
For example, when linking B it needs to get the path to the static
library generated by A.
This object stores those paths. To keep things simple, member
variables only store concrete paths to single files, while methods
compute derived values like "the last output of the target".
"""
def __init__(self, type):
# Gyp type ("static_library", etc.) of this target.
self.type = type
# File representing whether any input dependencies necessary for
# dependent actions have completed.
self.preaction_stamp = None
# File representing whether any input dependencies necessary for
# dependent compiles have completed.
self.precompile_stamp = None
# File representing the completion of actions/rules/copies, if any.
self.actions_stamp = None
# Path to the output of the link step, if any.
self.binary = None
# Path to the file representing the completion of building the bundle,
# if any.
self.bundle = None
# On Windows, incremental linking requires linking against all the .objs
# that compose a .lib (rather than the .lib itself). That list is stored
# here. In this case, we also need to save the compile_deps for the target,
# so that the the target that directly depends on the .objs can also depend
# on those.
self.component_objs = None
self.compile_deps = None
# Windows only. The import .lib is the output of a build step, but
# because dependents only link against the lib (not both the lib and the
# dll) we keep track of the import library here.
self.import_lib = None
def Linkable(self):
"""Return true if this is a target that can be linked against."""
return self.type in ('static_library', 'shared_library')
def UsesToc(self, flavor):
"""Return true if the target should produce a restat rule based on a TOC
file."""
# For bundles, the .TOC should be produced for the binary, not for
# FinalOutput(). But the naive approach would put the TOC file into the
# bundle, so don't do this for bundles for now.
if flavor == 'win' or self.bundle:
return False
return self.type in ('shared_library', 'loadable_module')
def PreActionInput(self, flavor):
"""Return the path, if any, that should be used as a dependency of
any dependent action step."""
if self.UsesToc(flavor):
return self.FinalOutput() + '.TOC'
return self.FinalOutput() or self.preaction_stamp
def PreCompileInput(self):
"""Return the path, if any, that should be used as a dependency of
any dependent compile step."""
return self.actions_stamp or self.precompile_stamp
def FinalOutput(self):
"""Return the last output of the target, which depends on all prior
steps."""
return self.bundle or self.binary or self.actions_stamp
# A small discourse on paths as used within the Ninja build:
# All files we produce (both at gyp and at build time) appear in the
# build directory (e.g. out/Debug).
#
# Paths within a given .gyp file are always relative to the directory
# containing the .gyp file. Call these "gyp paths". This includes
# sources as well as the starting directory a given gyp rule/action
# expects to be run from. We call the path from the source root to
# the gyp file the "base directory" within the per-.gyp-file
# NinjaWriter code.
#
# All paths as written into the .ninja files are relative to the build
# directory. Call these paths "ninja paths".
#
# We translate between these two notions of paths with two helper
# functions:
#
# - GypPathToNinja translates a gyp path (i.e. relative to the .gyp file)
# into the equivalent ninja path.
#
# - GypPathToUniqueOutput translates a gyp path into a ninja path to write
# an output file; the result can be namespaced such that it is unique
# to the input file name as well as the output target name.
class NinjaWriter(object):
def __init__(self, hash_for_rules, target_outputs, base_dir, build_dir,
output_file, toplevel_build, output_file_name, flavor,
toplevel_dir=None):
"""
base_dir: path from source root to directory containing this gyp file,
by gyp semantics, all input paths are relative to this
build_dir: path from source root to build output
toplevel_dir: path to the toplevel directory
"""
self.hash_for_rules = hash_for_rules
self.target_outputs = target_outputs
self.base_dir = base_dir
self.build_dir = build_dir
self.ninja = ninja_syntax.Writer(output_file)
self.toplevel_build = toplevel_build
self.output_file_name = output_file_name
self.flavor = flavor
self.abs_build_dir = None
if toplevel_dir is not None:
self.abs_build_dir = os.path.abspath(os.path.join(toplevel_dir,
build_dir))
self.obj_ext = '.obj' if flavor == 'win' else '.o'
if flavor == 'win':
# See docstring of msvs_emulation.GenerateEnvironmentFiles().
self.win_env = {}
for arch in ('x86', 'x64'):
self.win_env[arch] = 'environment.' + arch
# Relative path from build output dir to base dir.
build_to_top = gyp.common.InvertRelativePath(build_dir, toplevel_dir)
self.build_to_base = os.path.join(build_to_top, base_dir)
# Relative path from base dir to build dir.
base_to_top = gyp.common.InvertRelativePath(base_dir, toplevel_dir)
self.base_to_build = os.path.join(base_to_top, build_dir)
def ExpandSpecial(self, path, product_dir=None):
"""Expand specials like $!PRODUCT_DIR in |path|.
If |product_dir| is None, assumes the cwd is already the product
dir. Otherwise, |product_dir| is the relative path to the product
dir.
"""
PRODUCT_DIR = '$!PRODUCT_DIR'
if PRODUCT_DIR in path:
if product_dir:
path = path.replace(PRODUCT_DIR, product_dir)
else:
path = path.replace(PRODUCT_DIR + '/', '')
path = path.replace(PRODUCT_DIR + '\\', '')
path = path.replace(PRODUCT_DIR, '.')
INTERMEDIATE_DIR = '$!INTERMEDIATE_DIR'
if INTERMEDIATE_DIR in path:
int_dir = self.GypPathToUniqueOutput('gen')
# GypPathToUniqueOutput generates a path relative to the product dir,
# so insert product_dir in front if it is provided.
path = path.replace(INTERMEDIATE_DIR,
os.path.join(product_dir or '', int_dir))
CONFIGURATION_NAME = '$|CONFIGURATION_NAME'
path = path.replace(CONFIGURATION_NAME, self.config_name)
return path
def ExpandRuleVariables(self, path, root, dirname, source, ext, name):
if self.flavor == 'win':
path = self.msvs_settings.ConvertVSMacros(
path, config=self.config_name)
path = path.replace(generator_default_variables['RULE_INPUT_ROOT'], root)
path = path.replace(generator_default_variables['RULE_INPUT_DIRNAME'],
dirname)
path = path.replace(generator_default_variables['RULE_INPUT_PATH'], source)
path = path.replace(generator_default_variables['RULE_INPUT_EXT'], ext)
path = path.replace(generator_default_variables['RULE_INPUT_NAME'], name)
return path
def GypPathToNinja(self, path, env=None):
"""Translate a gyp path to a ninja path, optionally expanding environment
variable references in |path| with |env|.
See the above discourse on path conversions."""
if env:
if self.flavor == 'mac':
path = gyp.xcode_emulation.ExpandEnvVars(path, env)
elif self.flavor == 'win':
path = gyp.msvs_emulation.ExpandMacros(path, env)
if path.startswith('$!'):
expanded = self.ExpandSpecial(path)
if self.flavor == 'win':
expanded = os.path.normpath(expanded)
return expanded
if '$|' in path:
path = self.ExpandSpecial(path)
assert '$' not in path, path
return os.path.normpath(os.path.join(self.build_to_base, path))
def GypPathToUniqueOutput(self, path, qualified=True):
"""Translate a gyp path to a ninja path for writing output.
If qualified is True, qualify the resulting filename with the name
of the target. This is necessary when e.g. compiling the same
path twice for two separate output targets.
See the above discourse on path conversions."""
path = self.ExpandSpecial(path)
assert not path.startswith('$'), path
# Translate the path following this scheme:
# Input: foo/bar.gyp, target targ, references baz/out.o
# Output: obj/foo/baz/targ.out.o (if qualified)
# obj/foo/baz/out.o (otherwise)
# (and obj.host instead of obj for cross-compiles)
#
# Why this scheme and not some other one?
# 1) for a given input, you can compute all derived outputs by matching
# its path, even if the input is brought via a gyp file with '..'.
# 2) simple files like libraries and stamps have a simple filename.
obj = 'obj'
if self.toolset != 'target':
obj += '.' + self.toolset
path_dir, path_basename = os.path.split(path)
assert not os.path.isabs(path_dir), (
"'%s' can not be absolute path (see crbug.com/462153)." % path_dir)
if qualified:
path_basename = self.name + '.' + path_basename
return os.path.normpath(os.path.join(obj, self.base_dir, path_dir,
path_basename))
def WriteCollapsedDependencies(self, name, targets, order_only=None):
"""Given a list of targets, return a path for a single file
representing the result of building all the targets or None.
Uses a stamp file if necessary."""
assert targets == filter(None, targets), targets
if len(targets) == 0:
assert not order_only
return None
if len(targets) > 1 or order_only:
stamp = self.GypPathToUniqueOutput(name + '.stamp')
targets = self.ninja.build(stamp, 'stamp', targets, order_only=order_only)
self.ninja.newline()
return targets[0]
def _SubninjaNameForArch(self, arch):
output_file_base = os.path.splitext(self.output_file_name)[0]
return '%s.%s.ninja' % (output_file_base, arch)
def WriteSpec(self, spec, config_name, generator_flags):
"""The main entry point for NinjaWriter: write the build rules for a spec.
Returns a Target object, which represents the output paths for this spec.
Returns None if there are no outputs (e.g. a settings-only 'none' type
target)."""
self.config_name = config_name
self.name = spec['target_name']
self.toolset = spec['toolset']
config = spec['configurations'][config_name]
self.target = Target(spec['type'])
self.is_standalone_static_library = bool(
spec.get('standalone_static_library', 0))
# Track if this target contains any C++ files, to decide if gcc or g++
# should be used for linking.
self.uses_cpp = False
self.is_mac_bundle = gyp.xcode_emulation.IsMacBundle(self.flavor, spec)
self.xcode_settings = self.msvs_settings = None
if self.flavor == 'mac':
self.xcode_settings = gyp.xcode_emulation.XcodeSettings(spec)
if self.flavor == 'win':
self.msvs_settings = gyp.msvs_emulation.MsvsSettings(spec,
generator_flags)
arch = self.msvs_settings.GetArch(config_name)
self.ninja.variable('arch', self.win_env[arch])
self.ninja.variable('cc', '$cl_' + arch)
self.ninja.variable('cxx', '$cl_' + arch)
self.ninja.variable('cc_host', '$cl_' + arch)
self.ninja.variable('cxx_host', '$cl_' + arch)
self.ninja.variable('asm', '$ml_' + arch)
if self.flavor == 'mac':
self.archs = self.xcode_settings.GetActiveArchs(config_name)
if len(self.archs) > 1:
self.arch_subninjas = dict(
(arch, ninja_syntax.Writer(
OpenOutput(os.path.join(self.toplevel_build,
self._SubninjaNameForArch(arch)),
'w')))
for arch in self.archs)
# Compute predepends for all rules.
# actions_depends is the dependencies this target depends on before running
# any of its action/rule/copy steps.
# compile_depends is the dependencies this target depends on before running
# any of its compile steps.
actions_depends = []
compile_depends = []
# TODO(evan): it is rather confusing which things are lists and which
# are strings. Fix these.
if 'dependencies' in spec:
for dep in spec['dependencies']:
if dep in self.target_outputs:
target = self.target_outputs[dep]
actions_depends.append(target.PreActionInput(self.flavor))
compile_depends.append(target.PreCompileInput())
actions_depends = filter(None, actions_depends)
compile_depends = filter(None, compile_depends)
actions_depends = self.WriteCollapsedDependencies('actions_depends',
actions_depends)
compile_depends = self.WriteCollapsedDependencies('compile_depends',
compile_depends)
self.target.preaction_stamp = actions_depends
self.target.precompile_stamp = compile_depends
# Write out actions, rules, and copies. These must happen before we
# compile any sources, so compute a list of predependencies for sources
# while we do it.
extra_sources = []
mac_bundle_depends = []
self.target.actions_stamp = self.WriteActionsRulesCopies(
spec, extra_sources, actions_depends, mac_bundle_depends)
# If we have actions/rules/copies, we depend directly on those, but
# otherwise we depend on dependent target's actions/rules/copies etc.
# We never need to explicitly depend on previous target's link steps,
# because no compile ever depends on them.
compile_depends_stamp = (self.target.actions_stamp or compile_depends)
# Write out the compilation steps, if any.
link_deps = []
sources = extra_sources + spec.get('sources', [])
if sources:
if self.flavor == 'mac' and len(self.archs) > 1:
# Write subninja file containing compile and link commands scoped to
# a single arch if a fat binary is being built.
for arch in self.archs:
self.ninja.subninja(self._SubninjaNameForArch(arch))
pch = None
if self.flavor == 'win':
gyp.msvs_emulation.VerifyMissingSources(
sources, self.abs_build_dir, generator_flags, self.GypPathToNinja)
pch = gyp.msvs_emulation.PrecompiledHeader(
self.msvs_settings, config_name, self.GypPathToNinja,
self.GypPathToUniqueOutput, self.obj_ext)
else:
pch = gyp.xcode_emulation.MacPrefixHeader(
self.xcode_settings, self.GypPathToNinja,
lambda path, lang: self.GypPathToUniqueOutput(path + '-' + lang))
link_deps = self.WriteSources(
self.ninja, config_name, config, sources, compile_depends_stamp, pch,
spec)
# Some actions/rules output 'sources' that are already object files.
obj_outputs = [f for f in sources if f.endswith(self.obj_ext)]
if obj_outputs:
if self.flavor != 'mac' or len(self.archs) == 1:
link_deps += [self.GypPathToNinja(o) for o in obj_outputs]
else:
print "Warning: Actions/rules writing object files don't work with " \
"multiarch targets, dropping. (target %s)" % spec['target_name']
elif self.flavor == 'mac' and len(self.archs) > 1:
link_deps = collections.defaultdict(list)
compile_deps = self.target.actions_stamp or actions_depends
if self.flavor == 'win' and self.target.type == 'static_library':
self.target.component_objs = link_deps
self.target.compile_deps = compile_deps
# Write out a link step, if needed.
output = None
is_empty_bundle = not link_deps and not mac_bundle_depends
if link_deps or self.target.actions_stamp or actions_depends:
output = self.WriteTarget(spec, config_name, config, link_deps,
compile_deps)
if self.is_mac_bundle:
mac_bundle_depends.append(output)
# Bundle all of the above together, if needed.
if self.is_mac_bundle:
output = self.WriteMacBundle(spec, mac_bundle_depends, is_empty_bundle)
if not output:
return None
assert self.target.FinalOutput(), output
return self.target
def _WinIdlRule(self, source, prebuild, outputs):
"""Handle the implicit VS .idl rule for one source file. Fills |outputs|
with files that are generated."""
outdir, output, vars, flags = self.msvs_settings.GetIdlBuildData(
source, self.config_name)
outdir = self.GypPathToNinja(outdir)
def fix_path(path, rel=None):
path = os.path.join(outdir, path)
dirname, basename = os.path.split(source)
root, ext = os.path.splitext(basename)
path = self.ExpandRuleVariables(
path, root, dirname, source, ext, basename)
if rel:
path = os.path.relpath(path, rel)
return path
vars = [(name, fix_path(value, outdir)) for name, value in vars]
output = [fix_path(p) for p in output]
vars.append(('outdir', outdir))
vars.append(('idlflags', flags))
input = self.GypPathToNinja(source)
self.ninja.build(output, 'idl', input,
variables=vars, order_only=prebuild)
outputs.extend(output)
def WriteWinIdlFiles(self, spec, prebuild):
"""Writes rules to match MSVS's implicit idl handling."""
assert self.flavor == 'win'
if self.msvs_settings.HasExplicitIdlRulesOrActions(spec):
return []
outputs = []
for source in filter(lambda x: x.endswith('.idl'), spec['sources']):
self._WinIdlRule(source, prebuild, outputs)
return outputs
def WriteActionsRulesCopies(self, spec, extra_sources, prebuild,
mac_bundle_depends):
"""Write out the Actions, Rules, and Copies steps. Return a path
representing the outputs of these steps."""
outputs = []
if self.is_mac_bundle:
mac_bundle_resources = spec.get('mac_bundle_resources', [])[:]
else:
mac_bundle_resources = []
extra_mac_bundle_resources = []
if 'actions' in spec:
outputs += self.WriteActions(spec['actions'], extra_sources, prebuild,
extra_mac_bundle_resources)
if 'rules' in spec:
outputs += self.WriteRules(spec['rules'], extra_sources, prebuild,
mac_bundle_resources,
extra_mac_bundle_resources)
if 'copies' in spec:
outputs += self.WriteCopies(spec['copies'], prebuild, mac_bundle_depends)
if 'sources' in spec and self.flavor == 'win':
outputs += self.WriteWinIdlFiles(spec, prebuild)
stamp = self.WriteCollapsedDependencies('actions_rules_copies', outputs)
if self.is_mac_bundle:
xcassets = self.WriteMacBundleResources(
extra_mac_bundle_resources + mac_bundle_resources, mac_bundle_depends)
partial_info_plist = self.WriteMacXCassets(xcassets, mac_bundle_depends)
self.WriteMacInfoPlist(partial_info_plist, mac_bundle_depends)
return stamp
def GenerateDescription(self, verb, message, fallback):
"""Generate and return a description of a build step.
|verb| is the short summary, e.g. ACTION or RULE.
|message| is a hand-written description, or None if not available.
|fallback| is the gyp-level name of the step, usable as a fallback.
"""
if self.toolset != 'target':
verb += '(%s)' % self.toolset
if message:
return '%s %s' % (verb, self.ExpandSpecial(message))
else:
return '%s %s: %s' % (verb, self.name, fallback)
def WriteActions(self, actions, extra_sources, prebuild,
extra_mac_bundle_resources):
# Actions cd into the base directory.
env = self.GetToolchainEnv()
all_outputs = []
for action in actions:
# First write out a rule for the action.
name = '%s_%s' % (action['action_name'], self.hash_for_rules)
description = self.GenerateDescription('ACTION',
action.get('message', None),
name)
is_cygwin = (self.msvs_settings.IsRuleRunUnderCygwin(action)
if self.flavor == 'win' else False)
args = action['action']
depfile = action.get('depfile', None)
if depfile:
depfile = self.ExpandSpecial(depfile, self.base_to_build)
pool = 'console' if int(action.get('ninja_use_console', 0)) else None
rule_name, _ = self.WriteNewNinjaRule(name, args, description,
is_cygwin, env, pool,
depfile=depfile)
inputs = [self.GypPathToNinja(i, env) for i in action['inputs']]
if int(action.get('process_outputs_as_sources', False)):
extra_sources += action['outputs']
if int(action.get('process_outputs_as_mac_bundle_resources', False)):
extra_mac_bundle_resources += action['outputs']
outputs = [self.GypPathToNinja(o, env) for o in action['outputs']]
# Then write out an edge using the rule.
self.ninja.build(outputs, rule_name, inputs,
order_only=prebuild)
all_outputs += outputs
self.ninja.newline()
return all_outputs
def WriteRules(self, rules, extra_sources, prebuild,
mac_bundle_resources, extra_mac_bundle_resources):
env = self.GetToolchainEnv()
all_outputs = []
for rule in rules:
# Skip a rule with no action and no inputs.
if 'action' not in rule and not rule.get('rule_sources', []):
continue
# First write out a rule for the rule action.
name = '%s_%s' % (rule['rule_name'], self.hash_for_rules)
args = rule['action']
description = self.GenerateDescription(
'RULE',
rule.get('message', None),
('%s ' + generator_default_variables['RULE_INPUT_PATH']) % name)
is_cygwin = (self.msvs_settings.IsRuleRunUnderCygwin(rule)
if self.flavor == 'win' else False)
pool = 'console' if int(rule.get('ninja_use_console', 0)) else None
rule_name, args = self.WriteNewNinjaRule(
name, args, description, is_cygwin, env, pool)
# TODO: if the command references the outputs directly, we should
# simplify it to just use $out.
# Rules can potentially make use of some special variables which
# must vary per source file.
# Compute the list of variables we'll need to provide.
special_locals = ('source', 'root', 'dirname', 'ext', 'name')
needed_variables = set(['source'])
for argument in args:
for var in special_locals:
if '${%s}' % var in argument:
needed_variables.add(var)
def cygwin_munge(path):
# pylint: disable=cell-var-from-loop
if is_cygwin:
return path.replace('\\', '/')
return path
inputs = [self.GypPathToNinja(i, env) for i in rule.get('inputs', [])]
# If there are n source files matching the rule, and m additional rule
# inputs, then adding 'inputs' to each build edge written below will
# write m * n inputs. Collapsing reduces this to m + n.
sources = rule.get('rule_sources', [])
num_inputs = len(inputs)
if prebuild:
num_inputs += 1
if num_inputs > 2 and len(sources) > 2:
inputs = [self.WriteCollapsedDependencies(
rule['rule_name'], inputs, order_only=prebuild)]
prebuild = []
# For each source file, write an edge that generates all the outputs.
for source in sources:
source = os.path.normpath(source)
dirname, basename = os.path.split(source)
root, ext = os.path.splitext(basename)
# Gather the list of inputs and outputs, expanding $vars if possible.
outputs = [self.ExpandRuleVariables(o, root, dirname,
source, ext, basename)
for o in rule['outputs']]
if int(rule.get('process_outputs_as_sources', False)):
extra_sources += outputs
was_mac_bundle_resource = source in mac_bundle_resources
if was_mac_bundle_resource or \
int(rule.get('process_outputs_as_mac_bundle_resources', False)):
extra_mac_bundle_resources += outputs
# Note: This is n_resources * n_outputs_in_rule. Put to-be-removed
# items in a set and remove them all in a single pass if this becomes
# a performance issue.
if was_mac_bundle_resource:
mac_bundle_resources.remove(source)
extra_bindings = []
for var in needed_variables:
if var == 'root':
extra_bindings.append(('root', cygwin_munge(root)))
elif var == 'dirname':
# '$dirname' is a parameter to the rule action, which means
# it shouldn't be converted to a Ninja path. But we don't
# want $!PRODUCT_DIR in there either.
dirname_expanded = self.ExpandSpecial(dirname, self.base_to_build)
extra_bindings.append(('dirname', cygwin_munge(dirname_expanded)))
elif var == 'source':
# '$source' is a parameter to the rule action, which means
# it shouldn't be converted to a Ninja path. But we don't
# want $!PRODUCT_DIR in there either.
source_expanded = self.ExpandSpecial(source, self.base_to_build)
extra_bindings.append(('source', cygwin_munge(source_expanded)))
elif var == 'ext':
extra_bindings.append(('ext', ext))
elif var == 'name':
extra_bindings.append(('name', cygwin_munge(basename)))
else:
assert var == None, repr(var)
outputs = [self.GypPathToNinja(o, env) for o in outputs]
if self.flavor == 'win':
# WriteNewNinjaRule uses unique_name for creating an rsp file on win.
extra_bindings.append(('unique_name',
hashlib.md5(outputs[0]).hexdigest()))
self.ninja.build(outputs, rule_name, self.GypPathToNinja(source),
implicit=inputs,
order_only=prebuild,
variables=extra_bindings)
all_outputs.extend(outputs)
return all_outputs
def WriteCopies(self, copies, prebuild, mac_bundle_depends):
outputs = []
env = self.GetToolchainEnv()
for copy in copies:
for path in copy['files']:
# Normalize the path so trailing slashes don't confuse us.
path = os.path.normpath(path)
basename = os.path.split(path)[1]
src = self.GypPathToNinja(path, env)
dst = self.GypPathToNinja(os.path.join(copy['destination'], basename),
env)
outputs += self.ninja.build(dst, 'copy', src, order_only=prebuild)
if self.is_mac_bundle:
# gyp has mac_bundle_resources to copy things into a bundle's
# Resources folder, but there's no built-in way to copy files to other
# places in the bundle. Hence, some targets use copies for this. Check
# if this file is copied into the current bundle, and if so add it to
# the bundle depends so that dependent targets get rebuilt if the copy
# input changes.
if dst.startswith(self.xcode_settings.GetBundleContentsFolderPath()):
mac_bundle_depends.append(dst)
return outputs
def WriteMacBundleResources(self, resources, bundle_depends):
"""Writes ninja edges for 'mac_bundle_resources'."""
xcassets = []
for output, res in gyp.xcode_emulation.GetMacBundleResources(
generator_default_variables['PRODUCT_DIR'],
self.xcode_settings, map(self.GypPathToNinja, resources)):
output = self.ExpandSpecial(output)
if os.path.splitext(output)[-1] != '.xcassets':
isBinary = self.xcode_settings.IsBinaryOutputFormat(self.config_name)
self.ninja.build(output, 'mac_tool', res,
variables=[('mactool_cmd', 'copy-bundle-resource'), \
('binary', isBinary)])
bundle_depends.append(output)
else:
xcassets.append(res)
return xcassets
def WriteMacXCassets(self, xcassets, bundle_depends):
"""Writes ninja edges for 'mac_bundle_resources' .xcassets files.
This add an invocation of 'actool' via the 'mac_tool.py' helper script.
It assumes that the assets catalogs define at least one imageset and
thus an Assets.car file will be generated in the application resources
directory. If this is not the case, then the build will probably be done
at each invocation of ninja."""
if not xcassets:
return
extra_arguments = {}
settings_to_arg = {
'XCASSETS_APP_ICON': 'app-icon',
'XCASSETS_LAUNCH_IMAGE': 'launch-image',
}
settings = self.xcode_settings.xcode_settings[self.config_name]
for settings_key, arg_name in settings_to_arg.iteritems():
value = settings.get(settings_key)
if value:
extra_arguments[arg_name] = value
partial_info_plist = None
if extra_arguments:
partial_info_plist = self.GypPathToUniqueOutput(
'assetcatalog_generated_info.plist')
extra_arguments['output-partial-info-plist'] = partial_info_plist
outputs = []
outputs.append(
os.path.join(
self.xcode_settings.GetBundleResourceFolder(),
'Assets.car'))
if partial_info_plist:
outputs.append(partial_info_plist)
keys = QuoteShellArgument(json.dumps(extra_arguments), self.flavor)
extra_env = self.xcode_settings.GetPerTargetSettings()
env = self.GetSortedXcodeEnv(additional_settings=extra_env)
env = self.ComputeExportEnvString(env)
bundle_depends.extend(self.ninja.build(
outputs, 'compile_xcassets', xcassets,
variables=[('env', env), ('keys', keys)]))
return partial_info_plist
def WriteMacInfoPlist(self, partial_info_plist, bundle_depends):
"""Write build rules for bundle Info.plist files."""
info_plist, out, defines, extra_env = gyp.xcode_emulation.GetMacInfoPlist(
generator_default_variables['PRODUCT_DIR'],
self.xcode_settings, self.GypPathToNinja)
if not info_plist:
return
out = self.ExpandSpecial(out)
if defines:
# Create an intermediate file to store preprocessed results.
intermediate_plist = self.GypPathToUniqueOutput(
os.path.basename(info_plist))
defines = ' '.join([Define(d, self.flavor) for d in defines])
info_plist = self.ninja.build(
intermediate_plist, 'preprocess_infoplist', info_plist,
variables=[('defines',defines)])
env = self.GetSortedXcodeEnv(additional_settings=extra_env)
env = self.ComputeExportEnvString(env)
if partial_info_plist:
intermediate_plist = self.GypPathToUniqueOutput('merged_info.plist')
info_plist = self.ninja.build(
intermediate_plist, 'merge_infoplist',
[partial_info_plist, info_plist])
keys = self.xcode_settings.GetExtraPlistItems(self.config_name)
keys = QuoteShellArgument(json.dumps(keys), self.flavor)
isBinary = self.xcode_settings.IsBinaryOutputFormat(self.config_name)
self.ninja.build(out, 'copy_infoplist', info_plist,
variables=[('env', env), ('keys', keys),
('binary', isBinary)])
bundle_depends.append(out)
def WriteSources(self, ninja_file, config_name, config, sources, predepends,
precompiled_header, spec):
"""Write build rules to compile all of |sources|."""
if self.toolset == 'host':
self.ninja.variable('ar', '$ar_host')
self.ninja.variable('cc', '$cc_host')
self.ninja.variable('cxx', '$cxx_host')
self.ninja.variable('ld', '$ld_host')
self.ninja.variable('ldxx', '$ldxx_host')
self.ninja.variable('nm', '$nm_host')
self.ninja.variable('readelf', '$readelf_host')
if self.flavor != 'mac' or len(self.archs) == 1:
return self.WriteSourcesForArch(
self.ninja, config_name, config, sources, predepends,
precompiled_header, spec)
else:
return dict((arch, self.WriteSourcesForArch(
self.arch_subninjas[arch], config_name, config, sources, predepends,
precompiled_header, spec, arch=arch))
for arch in self.archs)
def WriteSourcesForArch(self, ninja_file, config_name, config, sources,
predepends, precompiled_header, spec, arch=None):
"""Write build rules to compile all of |sources|."""
extra_defines = []
if self.flavor == 'mac':
cflags = self.xcode_settings.GetCflags(config_name, arch=arch)
cflags_c = self.xcode_settings.GetCflagsC(config_name)
cflags_cc = self.xcode_settings.GetCflagsCC(config_name)
cflags_objc = ['$cflags_c'] + \
self.xcode_settings.GetCflagsObjC(config_name)
cflags_objcc = ['$cflags_cc'] + \
self.xcode_settings.GetCflagsObjCC(config_name)
elif self.flavor == 'win':
asmflags = self.msvs_settings.GetAsmflags(config_name)
cflags = self.msvs_settings.GetCflags(config_name)
cflags_c = self.msvs_settings.GetCflagsC(config_name)
cflags_cc = self.msvs_settings.GetCflagsCC(config_name)
extra_defines = self.msvs_settings.GetComputedDefines(config_name)
# See comment at cc_command for why there's two .pdb files.
pdbpath_c = pdbpath_cc = self.msvs_settings.GetCompilerPdbName(
config_name, self.ExpandSpecial)
if not pdbpath_c:
obj = 'obj'
if self.toolset != 'target':
obj += '.' + self.toolset
pdbpath = os.path.normpath(os.path.join(obj, self.base_dir, self.name))
pdbpath_c = pdbpath + '.c.pdb'
pdbpath_cc = pdbpath + '.cc.pdb'
self.WriteVariableList(ninja_file, 'pdbname_c', [pdbpath_c])
self.WriteVariableList(ninja_file, 'pdbname_cc', [pdbpath_cc])
self.WriteVariableList(ninja_file, 'pchprefix', [self.name])
else:
cflags = config.get('cflags', [])
cflags_c = config.get('cflags_c', [])
cflags_cc = config.get('cflags_cc', [])
# Respect environment variables related to build, but target-specific
# flags can still override them.
if self.toolset == 'target':
cflags_c = (os.environ.get('CPPFLAGS', '').split() +
os.environ.get('CFLAGS', '').split() + cflags_c)
cflags_cc = (os.environ.get('CPPFLAGS', '').split() +
os.environ.get('CXXFLAGS', '').split() + cflags_cc)
elif self.toolset == 'host':
cflags_c = (os.environ.get('CPPFLAGS_host', '').split() +
os.environ.get('CFLAGS_host', '').split() + cflags_c)
cflags_cc = (os.environ.get('CPPFLAGS_host', '').split() +
os.environ.get('CXXFLAGS_host', '').split() + cflags_cc)
defines = config.get('defines', []) + extra_defines
self.WriteVariableList(ninja_file, 'defines',
[Define(d, self.flavor) for d in defines])
if self.flavor == 'win':
self.WriteVariableList(ninja_file, 'asmflags',
map(self.ExpandSpecial, asmflags))
self.WriteVariableList(ninja_file, 'rcflags',
[QuoteShellArgument(self.ExpandSpecial(f), self.flavor)
for f in self.msvs_settings.GetRcflags(config_name,
self.GypPathToNinja)])
include_dirs = config.get('include_dirs', [])
env = self.GetToolchainEnv()
if self.flavor == 'win':
include_dirs = self.msvs_settings.AdjustIncludeDirs(include_dirs,
config_name)
self.WriteVariableList(ninja_file, 'includes',
[QuoteShellArgument('-I' + self.GypPathToNinja(i, env), self.flavor)
for i in include_dirs])
if self.flavor == 'win':
midl_include_dirs = config.get('midl_include_dirs', [])
midl_include_dirs = self.msvs_settings.AdjustMidlIncludeDirs(
midl_include_dirs, config_name)
self.WriteVariableList(ninja_file, 'midl_includes',
[QuoteShellArgument('-I' + self.GypPathToNinja(i, env), self.flavor)
for i in midl_include_dirs])
pch_commands = precompiled_header.GetPchBuildCommands(arch)
if self.flavor == 'mac':
# Most targets use no precompiled headers, so only write these if needed.
for ext, var in [('c', 'cflags_pch_c'), ('cc', 'cflags_pch_cc'),
('m', 'cflags_pch_objc'), ('mm', 'cflags_pch_objcc')]:
include = precompiled_header.GetInclude(ext, arch)
if include: ninja_file.variable(var, include)
arflags = config.get('arflags', [])
self.WriteVariableList(ninja_file, 'cflags',
map(self.ExpandSpecial, cflags))
self.WriteVariableList(ninja_file, 'cflags_c',
map(self.ExpandSpecial, cflags_c))
self.WriteVariableList(ninja_file, 'cflags_cc',
map(self.ExpandSpecial, cflags_cc))
if self.flavor == 'mac':
self.WriteVariableList(ninja_file, 'cflags_objc',
map(self.ExpandSpecial, cflags_objc))
self.WriteVariableList(ninja_file, 'cflags_objcc',
map(self.ExpandSpecial, cflags_objcc))
self.WriteVariableList(ninja_file, 'arflags',
map(self.ExpandSpecial, arflags))
ninja_file.newline()
outputs = []
has_rc_source = False
for source in sources:
filename, ext = os.path.splitext(source)
ext = ext[1:]
obj_ext = self.obj_ext
if ext in ('cc', 'cpp', 'cxx'):
command = 'cxx'
self.uses_cpp = True
elif ext == 'c' or (ext == 'S' and self.flavor != 'win'):
command = 'cc'
elif ext == 's' and self.flavor != 'win': # Doesn't generate .o.d files.
command = 'cc_s'
elif (self.flavor == 'win' and ext == 'asm' and
not self.msvs_settings.HasExplicitAsmRules(spec)):
command = 'asm'
# Add the _asm suffix as msvs is capable of handling .cc and
# .asm files of the same name without collision.
obj_ext = '_asm.obj'
elif self.flavor == 'mac' and ext == 'm':
command = 'objc'
elif self.flavor == 'mac' and ext == 'mm':
command = 'objcxx'
self.uses_cpp = True
elif self.flavor == 'win' and ext == 'rc':
command = 'rc'
obj_ext = '.res'
has_rc_source = True
else:
# Ignore unhandled extensions.
continue
input = self.GypPathToNinja(source)
output = self.GypPathToUniqueOutput(filename + obj_ext)
if arch is not None:
output = AddArch(output, arch)
implicit = precompiled_header.GetObjDependencies([input], [output], arch)
variables = []
if self.flavor == 'win':
variables, output, implicit = precompiled_header.GetFlagsModifications(
input, output, implicit, command, cflags_c, cflags_cc,
self.ExpandSpecial)
ninja_file.build(output, command, input,
implicit=[gch for _, _, gch in implicit],
order_only=predepends, variables=variables)
outputs.append(output)
if has_rc_source:
resource_include_dirs = config.get('resource_include_dirs', include_dirs)
self.WriteVariableList(ninja_file, 'resource_includes',
[QuoteShellArgument('-I' + self.GypPathToNinja(i, env), self.flavor)
for i in resource_include_dirs])
self.WritePchTargets(ninja_file, pch_commands)
ninja_file.newline()
return outputs
def WritePchTargets(self, ninja_file, pch_commands):
"""Writes ninja rules to compile prefix headers."""
if not pch_commands:
return
for gch, lang_flag, lang, input in pch_commands:
var_name = {
'c': 'cflags_pch_c',
'cc': 'cflags_pch_cc',
'm': 'cflags_pch_objc',
'mm': 'cflags_pch_objcc',
}[lang]
map = { 'c': 'cc', 'cc': 'cxx', 'm': 'objc', 'mm': 'objcxx', }
cmd = map.get(lang)
ninja_file.build(gch, cmd, input, variables=[(var_name, lang_flag)])
def WriteLink(self, spec, config_name, config, link_deps):
"""Write out a link step. Fills out target.binary. """
if self.flavor != 'mac' or len(self.archs) == 1:
return self.WriteLinkForArch(
self.ninja, spec, config_name, config, link_deps)
else:
output = self.ComputeOutput(spec)
inputs = [self.WriteLinkForArch(self.arch_subninjas[arch], spec,
config_name, config, link_deps[arch],
arch=arch)
for arch in self.archs]
extra_bindings = []
build_output = output
if not self.is_mac_bundle:
self.AppendPostbuildVariable(extra_bindings, spec, output, output)
# TODO(yyanagisawa): more work needed to fix:
# https://code.google.com/p/gyp/issues/detail?id=411
if (spec['type'] in ('shared_library', 'loadable_module') and
not self.is_mac_bundle):
extra_bindings.append(('lib', output))
self.ninja.build([output, output + '.TOC'], 'solipo', inputs,
variables=extra_bindings)
else:
self.ninja.build(build_output, 'lipo', inputs, variables=extra_bindings)
return output
def WriteLinkForArch(self, ninja_file, spec, config_name, config,
link_deps, arch=None):
"""Write out a link step. Fills out target.binary. """
command = {
'executable': 'link',
'loadable_module': 'solink_module',
'shared_library': 'solink',
}[spec['type']]
command_suffix = ''
implicit_deps = set()
solibs = set()
order_deps = set()
if 'dependencies' in spec:
# Two kinds of dependencies:
# - Linkable dependencies (like a .a or a .so): add them to the link line.
# - Non-linkable dependencies (like a rule that generates a file
# and writes a stamp file): add them to implicit_deps
extra_link_deps = set()
for dep in spec['dependencies']:
target = self.target_outputs.get(dep)
if not target:
continue
linkable = target.Linkable()
if linkable:
new_deps = []
if (self.flavor == 'win' and
target.component_objs and
self.msvs_settings.IsUseLibraryDependencyInputs(config_name)):
new_deps = target.component_objs
if target.compile_deps:
order_deps.add(target.compile_deps)
elif self.flavor == 'win' and target.import_lib:
new_deps = [target.import_lib]
elif target.UsesToc(self.flavor):
solibs.add(target.binary)
implicit_deps.add(target.binary + '.TOC')
else:
new_deps = [target.binary]
for new_dep in new_deps:
if new_dep not in extra_link_deps:
extra_link_deps.add(new_dep)
link_deps.append(new_dep)
final_output = target.FinalOutput()
if not linkable or final_output != target.binary:
implicit_deps.add(final_output)
extra_bindings = []
if self.uses_cpp and self.flavor != 'win':
extra_bindings.append(('ld', '$ldxx'))
output = self.ComputeOutput(spec, arch)
if arch is None and not self.is_mac_bundle:
self.AppendPostbuildVariable(extra_bindings, spec, output, output)
is_executable = spec['type'] == 'executable'
# The ldflags config key is not used on mac or win. On those platforms
# linker flags are set via xcode_settings and msvs_settings, respectively.
env_ldflags = os.environ.get('LDFLAGS', '').split()
if self.flavor == 'mac':
ldflags = self.xcode_settings.GetLdflags(config_name,
self.ExpandSpecial(generator_default_variables['PRODUCT_DIR']),
self.GypPathToNinja, arch)
ldflags = env_ldflags + ldflags
elif self.flavor == 'win':
manifest_base_name = self.GypPathToUniqueOutput(
self.ComputeOutputFileName(spec))
ldflags, intermediate_manifest, manifest_files = \
self.msvs_settings.GetLdflags(config_name, self.GypPathToNinja,
self.ExpandSpecial, manifest_base_name,
output, is_executable,
self.toplevel_build)
ldflags = env_ldflags + ldflags
self.WriteVariableList(ninja_file, 'manifests', manifest_files)
implicit_deps = implicit_deps.union(manifest_files)
if intermediate_manifest:
self.WriteVariableList(
ninja_file, 'intermediatemanifest', [intermediate_manifest])
command_suffix = _GetWinLinkRuleNameSuffix(
self.msvs_settings.IsEmbedManifest(config_name))
def_file = self.msvs_settings.GetDefFile(self.GypPathToNinja)
if def_file:
implicit_deps.add(def_file)
else:
# Respect environment variables related to build, but target-specific
# flags can still override them.
ldflags = env_ldflags + config.get('ldflags', [])
if is_executable and len(solibs):
rpath = 'lib/'
if self.toolset != 'target':
rpath += self.toolset
ldflags.append(r'-Wl,-rpath=\$$ORIGIN/%s' % rpath)
ldflags.append('-Wl,-rpath-link=%s' % rpath)
self.WriteVariableList(ninja_file, 'ldflags',
map(self.ExpandSpecial, ldflags))
library_dirs = config.get('library_dirs', [])
if self.flavor == 'win':
library_dirs = [self.msvs_settings.ConvertVSMacros(l, config_name)
for l in library_dirs]
library_dirs = ['/LIBPATH:' + QuoteShellArgument(self.GypPathToNinja(l),
self.flavor)
for l in library_dirs]
else:
library_dirs = [QuoteShellArgument('-L' + self.GypPathToNinja(l),
self.flavor)
for l in library_dirs]
libraries = gyp.common.uniquer(map(self.ExpandSpecial,
spec.get('libraries', [])))
if self.flavor == 'mac':
libraries = self.xcode_settings.AdjustLibraries(libraries, config_name)
elif self.flavor == 'win':
libraries = self.msvs_settings.AdjustLibraries(libraries)
self.WriteVariableList(ninja_file, 'libs', library_dirs + libraries)
linked_binary = output
if command in ('solink', 'solink_module'):
extra_bindings.append(('soname', os.path.split(output)[1]))
extra_bindings.append(('lib',
gyp.common.EncodePOSIXShellArgument(output)))
if self.flavor != 'win':
link_file_list = output
if self.is_mac_bundle:
# 'Dependency Framework.framework/Versions/A/Dependency Framework' ->
# 'Dependency Framework.framework.rsp'
link_file_list = self.xcode_settings.GetWrapperName()
if arch:
link_file_list += '.' + arch
link_file_list += '.rsp'
# If an rspfile contains spaces, ninja surrounds the filename with
# quotes around it and then passes it to open(), creating a file with
# quotes in its name (and when looking for the rsp file, the name
# makes it through bash which strips the quotes) :-/
link_file_list = link_file_list.replace(' ', '_')
extra_bindings.append(
('link_file_list',
gyp.common.EncodePOSIXShellArgument(link_file_list)))
if self.flavor == 'win':
extra_bindings.append(('binary', output))
if ('/NOENTRY' not in ldflags and
not self.msvs_settings.GetNoImportLibrary(config_name)):
self.target.import_lib = output + '.lib'
extra_bindings.append(('implibflag',
'/IMPLIB:%s' % self.target.import_lib))
pdbname = self.msvs_settings.GetPDBName(
config_name, self.ExpandSpecial, output + '.pdb')
output = [output, self.target.import_lib]
if pdbname:
output.append(pdbname)
elif not self.is_mac_bundle:
output = [output, output + '.TOC']
else:
command = command + '_notoc'
elif self.flavor == 'win':
extra_bindings.append(('binary', output))
pdbname = self.msvs_settings.GetPDBName(
config_name, self.ExpandSpecial, output + '.pdb')
if pdbname:
output = [output, pdbname]
if len(solibs):
extra_bindings.append(('solibs', gyp.common.EncodePOSIXShellList(solibs)))
ninja_file.build(output, command + command_suffix, link_deps,
implicit=list(implicit_deps),
order_only=list(order_deps),
variables=extra_bindings)
return linked_binary
def WriteTarget(self, spec, config_name, config, link_deps, compile_deps):
extra_link_deps = any(self.target_outputs.get(dep).Linkable()
for dep in spec.get('dependencies', [])
if dep in self.target_outputs)
if spec['type'] == 'none' or (not link_deps and not extra_link_deps):
# TODO(evan): don't call this function for 'none' target types, as
# it doesn't do anything, and we fake out a 'binary' with a stamp file.
self.target.binary = compile_deps
self.target.type = 'none'
elif spec['type'] == 'static_library':
self.target.binary = self.ComputeOutput(spec)
if (self.flavor not in ('mac', 'openbsd', 'netbsd', 'win') and not
self.is_standalone_static_library):
self.ninja.build(self.target.binary, 'alink_thin', link_deps,
order_only=compile_deps)
else:
variables = []
if self.xcode_settings:
libtool_flags = self.xcode_settings.GetLibtoolflags(config_name)
if libtool_flags:
variables.append(('libtool_flags', libtool_flags))
if self.msvs_settings:
libflags = self.msvs_settings.GetLibFlags(config_name,
self.GypPathToNinja)
variables.append(('libflags', libflags))
if self.flavor != 'mac' or len(self.archs) == 1:
self.AppendPostbuildVariable(variables, spec,
self.target.binary, self.target.binary)
self.ninja.build(self.target.binary, 'alink', link_deps,
order_only=compile_deps, variables=variables)
else:
inputs = []
for arch in self.archs:
output = self.ComputeOutput(spec, arch)
self.arch_subninjas[arch].build(output, 'alink', link_deps[arch],
order_only=compile_deps,
variables=variables)
inputs.append(output)
# TODO: It's not clear if libtool_flags should be passed to the alink
# call that combines single-arch .a files into a fat .a file.
self.AppendPostbuildVariable(variables, spec,
self.target.binary, self.target.binary)
self.ninja.build(self.target.binary, 'alink', inputs,
# FIXME: test proving order_only=compile_deps isn't
# needed.
variables=variables)
else:
self.target.binary = self.WriteLink(spec, config_name, config, link_deps)
return self.target.binary
def WriteMacBundle(self, spec, mac_bundle_depends, is_empty):
assert self.is_mac_bundle
package_framework = spec['type'] in ('shared_library', 'loadable_module')
output = self.ComputeMacBundleOutput()
if is_empty:
output += '.stamp'
variables = []
self.AppendPostbuildVariable(variables, spec, output, self.target.binary,
is_command_start=not package_framework)
if package_framework and not is_empty:
variables.append(('version', self.xcode_settings.GetFrameworkVersion()))
self.ninja.build(output, 'package_framework', mac_bundle_depends,
variables=variables)
else:
self.ninja.build(output, 'stamp', mac_bundle_depends,
variables=variables)
self.target.bundle = output
return output
def GetToolchainEnv(self, additional_settings=None):
"""Returns the variables toolchain would set for build steps."""
env = self.GetSortedXcodeEnv(additional_settings=additional_settings)
if self.flavor == 'win':
env = self.GetMsvsToolchainEnv(
additional_settings=additional_settings)
return env
def GetMsvsToolchainEnv(self, additional_settings=None):
"""Returns the variables Visual Studio would set for build steps."""
return self.msvs_settings.GetVSMacroEnv('$!PRODUCT_DIR',
config=self.config_name)
def GetSortedXcodeEnv(self, additional_settings=None):
"""Returns the variables Xcode would set for build steps."""
assert self.abs_build_dir
abs_build_dir = self.abs_build_dir
return gyp.xcode_emulation.GetSortedXcodeEnv(
self.xcode_settings, abs_build_dir,
os.path.join(abs_build_dir, self.build_to_base), self.config_name,
additional_settings)
def GetSortedXcodePostbuildEnv(self):
"""Returns the variables Xcode would set for postbuild steps."""
postbuild_settings = {}
# CHROMIUM_STRIP_SAVE_FILE is a chromium-specific hack.
# TODO(thakis): It would be nice to have some general mechanism instead.
strip_save_file = self.xcode_settings.GetPerTargetSetting(
'CHROMIUM_STRIP_SAVE_FILE')
if strip_save_file:
postbuild_settings['CHROMIUM_STRIP_SAVE_FILE'] = strip_save_file
return self.GetSortedXcodeEnv(additional_settings=postbuild_settings)
def AppendPostbuildVariable(self, variables, spec, output, binary,
is_command_start=False):
"""Adds a 'postbuild' variable if there is a postbuild for |output|."""
postbuild = self.GetPostbuildCommand(spec, output, binary, is_command_start)
if postbuild:
variables.append(('postbuilds', postbuild))
def GetPostbuildCommand(self, spec, output, output_binary, is_command_start):
"""Returns a shell command that runs all the postbuilds, and removes
|output| if any of them fails. If |is_command_start| is False, then the
returned string will start with ' && '."""
if not self.xcode_settings or spec['type'] == 'none' or not output:
return ''
output = QuoteShellArgument(output, self.flavor)
postbuilds = gyp.xcode_emulation.GetSpecPostbuildCommands(spec, quiet=True)
if output_binary is not None:
postbuilds = self.xcode_settings.AddImplicitPostbuilds(
self.config_name,
os.path.normpath(os.path.join(self.base_to_build, output)),
QuoteShellArgument(
os.path.normpath(os.path.join(self.base_to_build, output_binary)),
self.flavor),
postbuilds, quiet=True)
if not postbuilds:
return ''
# Postbuilds expect to be run in the gyp file's directory, so insert an
# implicit postbuild to cd to there.
postbuilds.insert(0, gyp.common.EncodePOSIXShellList(
['cd', self.build_to_base]))
env = self.ComputeExportEnvString(self.GetSortedXcodePostbuildEnv())
# G will be non-null if any postbuild fails. Run all postbuilds in a
# subshell.
commands = env + ' (' + \
' && '.join([ninja_syntax.escape(command) for command in postbuilds])
command_string = (commands + '); G=$$?; '
# Remove the final output if any postbuild failed.
'((exit $$G) || rm -rf %s) ' % output + '&& exit $$G)')
if is_command_start:
return '(' + command_string + ' && '
else:
return '$ && (' + command_string
def ComputeExportEnvString(self, env):
"""Given an environment, returns a string looking like
'export FOO=foo; export BAR="${FOO} bar;'
that exports |env| to the shell."""
export_str = []
for k, v in env:
export_str.append('export %s=%s;' %
(k, ninja_syntax.escape(gyp.common.EncodePOSIXShellArgument(v))))
return ' '.join(export_str)
def ComputeMacBundleOutput(self):
"""Return the 'output' (full output path) to a bundle output directory."""
assert self.is_mac_bundle
path = generator_default_variables['PRODUCT_DIR']
return self.ExpandSpecial(
os.path.join(path, self.xcode_settings.GetWrapperName()))
def ComputeOutputFileName(self, spec, type=None):
"""Compute the filename of the final output for the current target."""
if not type:
type = spec['type']
default_variables = copy.copy(generator_default_variables)
CalculateVariables(default_variables, {'flavor': self.flavor})
# Compute filename prefix: the product prefix, or a default for
# the product type.
DEFAULT_PREFIX = {
'loadable_module': default_variables['SHARED_LIB_PREFIX'],
'shared_library': default_variables['SHARED_LIB_PREFIX'],
'static_library': default_variables['STATIC_LIB_PREFIX'],
'executable': default_variables['EXECUTABLE_PREFIX'],
}
prefix = spec.get('product_prefix', DEFAULT_PREFIX.get(type, ''))
# Compute filename extension: the product extension, or a default
# for the product type.
DEFAULT_EXTENSION = {
'loadable_module': default_variables['SHARED_LIB_SUFFIX'],
'shared_library': default_variables['SHARED_LIB_SUFFIX'],
'static_library': default_variables['STATIC_LIB_SUFFIX'],
'executable': default_variables['EXECUTABLE_SUFFIX'],
}
extension = spec.get('product_extension')
if extension:
extension = '.' + extension
else:
extension = DEFAULT_EXTENSION.get(type, '')
if 'product_name' in spec:
# If we were given an explicit name, use that.
target = spec['product_name']
else:
# Otherwise, derive a name from the target name.
target = spec['target_name']
if prefix == 'lib':
# Snip out an extra 'lib' from libs if appropriate.
target = StripPrefix(target, 'lib')
if type in ('static_library', 'loadable_module', 'shared_library',
'executable'):
return '%s%s%s' % (prefix, target, extension)
elif type == 'none':
return '%s.stamp' % target
else:
raise Exception('Unhandled output type %s' % type)
def ComputeOutput(self, spec, arch=None):
"""Compute the path for the final output of the spec."""
type = spec['type']
if self.flavor == 'win':
override = self.msvs_settings.GetOutputName(self.config_name,
self.ExpandSpecial)
if override:
return override
if arch is None and self.flavor == 'mac' and type in (
'static_library', 'executable', 'shared_library', 'loadable_module'):
filename = self.xcode_settings.GetExecutablePath()
else:
filename = self.ComputeOutputFileName(spec, type)
if arch is None and 'product_dir' in spec:
path = os.path.join(spec['product_dir'], filename)
return self.ExpandSpecial(path)
# Some products go into the output root, libraries go into shared library
# dir, and everything else goes into the normal place.
type_in_output_root = ['executable', 'loadable_module']
if self.flavor == 'mac' and self.toolset == 'target':
type_in_output_root += ['shared_library', 'static_library']
elif self.flavor == 'win' and self.toolset == 'target':
type_in_output_root += ['shared_library']
if arch is not None:
# Make sure partial executables don't end up in a bundle or the regular
# output directory.
archdir = 'arch'
if self.toolset != 'target':
archdir = os.path.join('arch', '%s' % self.toolset)
return os.path.join(archdir, AddArch(filename, arch))
elif type in type_in_output_root or self.is_standalone_static_library:
return filename
elif type == 'shared_library':
libdir = 'lib'
if self.toolset != 'target':
libdir = os.path.join('lib', '%s' % self.toolset)
return os.path.join(libdir, filename)
else:
return self.GypPathToUniqueOutput(filename, qualified=False)
def WriteVariableList(self, ninja_file, var, values):
assert not isinstance(values, str)
if values is None:
values = []
ninja_file.variable(var, ' '.join(values))
def WriteNewNinjaRule(self, name, args, description, is_cygwin, env, pool,
depfile=None):
"""Write out a new ninja "rule" statement for a given command.
Returns the name of the new rule, and a copy of |args| with variables
expanded."""
if self.flavor == 'win':
args = [self.msvs_settings.ConvertVSMacros(
arg, self.base_to_build, config=self.config_name)
for arg in args]
description = self.msvs_settings.ConvertVSMacros(
description, config=self.config_name)
elif self.flavor == 'mac':
# |env| is an empty list on non-mac.
args = [gyp.xcode_emulation.ExpandEnvVars(arg, env) for arg in args]
description = gyp.xcode_emulation.ExpandEnvVars(description, env)
# TODO: we shouldn't need to qualify names; we do it because
# currently the ninja rule namespace is global, but it really
# should be scoped to the subninja.
rule_name = self.name
if self.toolset == 'target':
rule_name += '.' + self.toolset
rule_name += '.' + name
rule_name = re.sub('[^a-zA-Z0-9_]', '_', rule_name)
# Remove variable references, but not if they refer to the magic rule
# variables. This is not quite right, as it also protects these for
# actions, not just for rules where they are valid. Good enough.
protect = [ '${root}', '${dirname}', '${source}', '${ext}', '${name}' ]
protect = '(?!' + '|'.join(map(re.escape, protect)) + ')'
description = re.sub(protect + r'\$', '_', description)
# gyp dictates that commands are run from the base directory.
# cd into the directory before running, and adjust paths in
# the arguments to point to the proper locations.
rspfile = None
rspfile_content = None
args = [self.ExpandSpecial(arg, self.base_to_build) for arg in args]
if self.flavor == 'win':
rspfile = rule_name + '.$unique_name.rsp'
# The cygwin case handles this inside the bash sub-shell.
run_in = '' if is_cygwin else ' ' + self.build_to_base
if is_cygwin:
rspfile_content = self.msvs_settings.BuildCygwinBashCommandLine(
args, self.build_to_base)
else:
rspfile_content = gyp.msvs_emulation.EncodeRspFileList(args)
command = ('%s gyp-win-tool action-wrapper $arch ' % sys.executable +
rspfile + run_in)
else:
env = self.ComputeExportEnvString(env)
command = gyp.common.EncodePOSIXShellList(args)
command = 'cd %s; ' % self.build_to_base + env + command
# GYP rules/actions express being no-ops by not touching their outputs.
# Avoid executing downstream dependencies in this case by specifying
# restat=1 to ninja.
self.ninja.rule(rule_name, command, description, depfile=depfile,
restat=True, pool=pool,
rspfile=rspfile, rspfile_content=rspfile_content)
self.ninja.newline()
return rule_name, args
def CalculateVariables(default_variables, params):
"""Calculate additional variables for use in the build (called by gyp)."""
global generator_additional_non_configuration_keys
global generator_additional_path_sections
flavor = gyp.common.GetFlavor(params)
if flavor == 'mac':
default_variables.setdefault('OS', 'mac')
default_variables.setdefault('SHARED_LIB_SUFFIX', '.dylib')
default_variables.setdefault('SHARED_LIB_DIR',
generator_default_variables['PRODUCT_DIR'])
default_variables.setdefault('LIB_DIR',
generator_default_variables['PRODUCT_DIR'])
# Copy additional generator configuration data from Xcode, which is shared
# by the Mac Ninja generator.
import gyp.generator.xcode as xcode_generator
generator_additional_non_configuration_keys = getattr(xcode_generator,
'generator_additional_non_configuration_keys', [])
generator_additional_path_sections = getattr(xcode_generator,
'generator_additional_path_sections', [])
global generator_extra_sources_for_rules
generator_extra_sources_for_rules = getattr(xcode_generator,
'generator_extra_sources_for_rules', [])
elif flavor == 'win':
exts = gyp.MSVSUtil.TARGET_TYPE_EXT
default_variables.setdefault('OS', 'win')
default_variables['EXECUTABLE_SUFFIX'] = '.' + exts['executable']
default_variables['STATIC_LIB_PREFIX'] = ''
default_variables['STATIC_LIB_SUFFIX'] = '.' + exts['static_library']
default_variables['SHARED_LIB_PREFIX'] = ''
default_variables['SHARED_LIB_SUFFIX'] = '.' + exts['shared_library']
# Copy additional generator configuration data from VS, which is shared
# by the Windows Ninja generator.
import gyp.generator.msvs as msvs_generator
generator_additional_non_configuration_keys = getattr(msvs_generator,
'generator_additional_non_configuration_keys', [])
generator_additional_path_sections = getattr(msvs_generator,
'generator_additional_path_sections', [])
gyp.msvs_emulation.CalculateCommonVariables(default_variables, params)
else:
operating_system = flavor
if flavor == 'android':
operating_system = 'linux' # Keep this legacy behavior for now.
default_variables.setdefault('OS', operating_system)
default_variables.setdefault('SHARED_LIB_SUFFIX', '.so')
default_variables.setdefault('SHARED_LIB_DIR',
os.path.join('$!PRODUCT_DIR', 'lib'))
default_variables.setdefault('LIB_DIR',
os.path.join('$!PRODUCT_DIR', 'obj'))
def ComputeOutputDir(params):
"""Returns the path from the toplevel_dir to the build output directory."""
# generator_dir: relative path from pwd to where make puts build files.
# Makes migrating from make to ninja easier, ninja doesn't put anything here.
generator_dir = os.path.relpath(params['options'].generator_output or '.')
# output_dir: relative path from generator_dir to the build directory.
output_dir = params.get('generator_flags', {}).get('output_dir', 'out')
# Relative path from source root to our output files. e.g. "out"
return os.path.normpath(os.path.join(generator_dir, output_dir))
def CalculateGeneratorInputInfo(params):
"""Called by __init__ to initialize generator values based on params."""
# E.g. "out/gypfiles"
toplevel = params['options'].toplevel_dir
qualified_out_dir = os.path.normpath(os.path.join(
toplevel, ComputeOutputDir(params), 'gypfiles'))
global generator_filelist_paths
generator_filelist_paths = {
'toplevel': toplevel,
'qualified_out_dir': qualified_out_dir,
}
def OpenOutput(path, mode='w'):
"""Open |path| for writing, creating directories if necessary."""
gyp.common.EnsureDirExists(path)
return open(path, mode)
def CommandWithWrapper(cmd, wrappers, prog):
wrapper = wrappers.get(cmd, '')
if wrapper:
return wrapper + ' ' + prog
return prog
def GetDefaultConcurrentLinks():
"""Returns a best-guess for a number of concurrent links."""
pool_size = int(os.environ.get('GYP_LINK_CONCURRENCY', 0))
if pool_size:
return pool_size
if sys.platform in ('win32', 'cygwin'):
import ctypes
class MEMORYSTATUSEX(ctypes.Structure):
_fields_ = [
("dwLength", ctypes.c_ulong),
("dwMemoryLoad", ctypes.c_ulong),
("ullTotalPhys", ctypes.c_ulonglong),
("ullAvailPhys", ctypes.c_ulonglong),
("ullTotalPageFile", ctypes.c_ulonglong),
("ullAvailPageFile", ctypes.c_ulonglong),
("ullTotalVirtual", ctypes.c_ulonglong),
("ullAvailVirtual", ctypes.c_ulonglong),
("sullAvailExtendedVirtual", ctypes.c_ulonglong),
]
stat = MEMORYSTATUSEX()
stat.dwLength = ctypes.sizeof(stat)
ctypes.windll.kernel32.GlobalMemoryStatusEx(ctypes.byref(stat))
# VS 2015 uses 20% more working set than VS 2013 and can consume all RAM
# on a 64 GB machine.
mem_limit = max(1, stat.ullTotalPhys / (5 * (2 ** 30))) # total / 5GB
hard_cap = max(1, int(os.environ.get('GYP_LINK_CONCURRENCY_MAX', 2**32)))
return min(mem_limit, hard_cap)
elif sys.platform.startswith('linux'):
if os.path.exists("/proc/meminfo"):
with open("/proc/meminfo") as meminfo:
memtotal_re = re.compile(r'^MemTotal:\s*(\d*)\s*kB')
for line in meminfo:
match = memtotal_re.match(line)
if not match:
continue
# Allow 8Gb per link on Linux because Gold is quite memory hungry
return max(1, int(match.group(1)) / (8 * (2 ** 20)))
return 1
elif sys.platform == 'darwin':
try:
avail_bytes = int(subprocess.check_output(['sysctl', '-n', 'hw.memsize']))
# A static library debug build of Chromium's unit_tests takes ~2.7GB, so
# 4GB per ld process allows for some more bloat.
return max(1, avail_bytes / (4 * (2 ** 30))) # total / 4GB
except:
return 1
else:
# TODO(scottmg): Implement this for other platforms.
return 1
def _GetWinLinkRuleNameSuffix(embed_manifest):
"""Returns the suffix used to select an appropriate linking rule depending on
whether the manifest embedding is enabled."""
return '_embed' if embed_manifest else ''
def _AddWinLinkRules(master_ninja, embed_manifest):
"""Adds link rules for Windows platform to |master_ninja|."""
def FullLinkCommand(ldcmd, out, binary_type):
resource_name = {
'exe': '1',
'dll': '2',
}[binary_type]
return '%(python)s gyp-win-tool link-with-manifests $arch %(embed)s ' \
'%(out)s "%(ldcmd)s" %(resname)s $mt $rc "$intermediatemanifest" ' \
'$manifests' % {
'python': sys.executable,
'out': out,
'ldcmd': ldcmd,
'resname': resource_name,
'embed': embed_manifest }
rule_name_suffix = _GetWinLinkRuleNameSuffix(embed_manifest)
use_separate_mspdbsrv = (
int(os.environ.get('GYP_USE_SEPARATE_MSPDBSRV', '0')) != 0)
dlldesc = 'LINK%s(DLL) $binary' % rule_name_suffix.upper()
dllcmd = ('%s gyp-win-tool link-wrapper $arch %s '
'$ld /nologo $implibflag /DLL /OUT:$binary '
'@$binary.rsp' % (sys.executable, use_separate_mspdbsrv))
dllcmd = FullLinkCommand(dllcmd, '$binary', 'dll')
master_ninja.rule('solink' + rule_name_suffix,
description=dlldesc, command=dllcmd,
rspfile='$binary.rsp',
rspfile_content='$libs $in_newline $ldflags',
restat=True,
pool='link_pool')
master_ninja.rule('solink_module' + rule_name_suffix,
description=dlldesc, command=dllcmd,
rspfile='$binary.rsp',
rspfile_content='$libs $in_newline $ldflags',
restat=True,
pool='link_pool')
# Note that ldflags goes at the end so that it has the option of
# overriding default settings earlier in the command line.
exe_cmd = ('%s gyp-win-tool link-wrapper $arch %s '
'$ld /nologo /OUT:$binary @$binary.rsp' %
(sys.executable, use_separate_mspdbsrv))
exe_cmd = FullLinkCommand(exe_cmd, '$binary', 'exe')
master_ninja.rule('link' + rule_name_suffix,
description='LINK%s $binary' % rule_name_suffix.upper(),
command=exe_cmd,
rspfile='$binary.rsp',
rspfile_content='$in_newline $libs $ldflags',
pool='link_pool')
def GenerateOutputForConfig(target_list, target_dicts, data, params,
config_name):
options = params['options']
flavor = gyp.common.GetFlavor(params)
generator_flags = params.get('generator_flags', {})
# build_dir: relative path from source root to our output files.
# e.g. "out/Debug"
build_dir = os.path.normpath(
os.path.join(ComputeOutputDir(params), config_name))
toplevel_build = os.path.join(options.toplevel_dir, build_dir)
master_ninja_file = OpenOutput(os.path.join(toplevel_build, 'build.ninja'))
master_ninja = ninja_syntax.Writer(master_ninja_file, width=120)
# Put build-time support tools in out/{config_name}.
gyp.common.CopyTool(flavor, toplevel_build)
# Grab make settings for CC/CXX.
# The rules are
# - The priority from low to high is gcc/g++, the 'make_global_settings' in
# gyp, the environment variable.
# - If there is no 'make_global_settings' for CC.host/CXX.host or
# 'CC_host'/'CXX_host' enviroment variable, cc_host/cxx_host should be set
# to cc/cxx.
if flavor == 'win':
ar = 'lib.exe'
# cc and cxx must be set to the correct architecture by overriding with one
# of cl_x86 or cl_x64 below.
cc = 'UNSET'
cxx = 'UNSET'
ld = 'link.exe'
ld_host = '$ld'
else:
ar = 'ar'
cc = 'cc'
cxx = 'c++'
ld = '$cc'
ldxx = '$cxx'
ld_host = '$cc_host'
ldxx_host = '$cxx_host'
ar_host = 'ar'
cc_host = None
cxx_host = None
cc_host_global_setting = None
cxx_host_global_setting = None
clang_cl = None
nm = 'nm'
nm_host = 'nm'
readelf = 'readelf'
readelf_host = 'readelf'
build_file, _, _ = gyp.common.ParseQualifiedTarget(target_list[0])
make_global_settings = data[build_file].get('make_global_settings', [])
build_to_root = gyp.common.InvertRelativePath(build_dir,
options.toplevel_dir)
wrappers = {}
for key, value in make_global_settings:
if key == 'AR':
ar = os.path.join(build_to_root, value)
if key == 'AR.host':
ar_host = os.path.join(build_to_root, value)
if key == 'CC':
cc = os.path.join(build_to_root, value)
if cc.endswith('clang-cl'):
clang_cl = cc
if key == 'CXX':
cxx = os.path.join(build_to_root, value)
if key == 'CC.host':
cc_host = os.path.join(build_to_root, value)
cc_host_global_setting = value
if key == 'CXX.host':
cxx_host = os.path.join(build_to_root, value)
cxx_host_global_setting = value
if key == 'LD':
ld = os.path.join(build_to_root, value)
if key == 'LD.host':
ld_host = os.path.join(build_to_root, value)
if key == 'NM':
nm = os.path.join(build_to_root, value)
if key == 'NM.host':
nm_host = os.path.join(build_to_root, value)
if key == 'READELF':
readelf = os.path.join(build_to_root, value)
if key == 'READELF.host':
readelf_host = os.path.join(build_to_root, value)
if key.endswith('_wrapper'):
wrappers[key[:-len('_wrapper')]] = os.path.join(build_to_root, value)
# Support wrappers from environment variables too.
for key, value in os.environ.iteritems():
if key.lower().endswith('_wrapper'):
key_prefix = key[:-len('_wrapper')]
key_prefix = re.sub(r'\.HOST$', '.host', key_prefix)
wrappers[key_prefix] = os.path.join(build_to_root, value)
if flavor == 'win':
configs = [target_dicts[qualified_target]['configurations'][config_name]
for qualified_target in target_list]
shared_system_includes = None
if not generator_flags.get('ninja_use_custom_environment_files', 0):
shared_system_includes = \
gyp.msvs_emulation.ExtractSharedMSVSSystemIncludes(
configs, generator_flags)
cl_paths = gyp.msvs_emulation.GenerateEnvironmentFiles(
toplevel_build, generator_flags, shared_system_includes, OpenOutput)
for arch, path in cl_paths.iteritems():
if clang_cl:
# If we have selected clang-cl, use that instead.
path = clang_cl
command = CommandWithWrapper('CC', wrappers,
QuoteShellArgument(path, 'win'))
if clang_cl:
# Use clang-cl to cross-compile for x86 or x86_64.
command += (' -m32' if arch == 'x86' else ' -m64')
master_ninja.variable('cl_' + arch, command)
cc = GetEnvironFallback(['CC_target', 'CC'], cc)
master_ninja.variable('cc', CommandWithWrapper('CC', wrappers, cc))
cxx = GetEnvironFallback(['CXX_target', 'CXX'], cxx)
master_ninja.variable('cxx', CommandWithWrapper('CXX', wrappers, cxx))
if flavor == 'win':
master_ninja.variable('ld', ld)
master_ninja.variable('idl', 'midl.exe')
master_ninja.variable('ar', ar)
master_ninja.variable('rc', 'rc.exe')
master_ninja.variable('ml_x86', 'ml.exe')
master_ninja.variable('ml_x64', 'ml64.exe')
master_ninja.variable('mt', 'mt.exe')
else:
master_ninja.variable('ld', CommandWithWrapper('LINK', wrappers, ld))
master_ninja.variable('ldxx', CommandWithWrapper('LINK', wrappers, ldxx))
master_ninja.variable('ar', GetEnvironFallback(['AR_target', 'AR'], ar))
if flavor != 'mac':
# Mac does not use readelf/nm for .TOC generation, so avoiding polluting
# the master ninja with extra unused variables.
master_ninja.variable(
'nm', GetEnvironFallback(['NM_target', 'NM'], nm))
master_ninja.variable(
'readelf', GetEnvironFallback(['READELF_target', 'READELF'], readelf))
if generator_supports_multiple_toolsets:
if not cc_host:
cc_host = cc
if not cxx_host:
cxx_host = cxx
master_ninja.variable('ar_host', GetEnvironFallback(['AR_host'], ar_host))
master_ninja.variable('nm_host', GetEnvironFallback(['NM_host'], nm_host))
master_ninja.variable('readelf_host',
GetEnvironFallback(['READELF_host'], readelf_host))
cc_host = GetEnvironFallback(['CC_host'], cc_host)
cxx_host = GetEnvironFallback(['CXX_host'], cxx_host)
# The environment variable could be used in 'make_global_settings', like
# ['CC.host', '$(CC)'] or ['CXX.host', '$(CXX)'], transform them here.
if '$(CC)' in cc_host and cc_host_global_setting:
cc_host = cc_host_global_setting.replace('$(CC)', cc)
if '$(CXX)' in cxx_host and cxx_host_global_setting:
cxx_host = cxx_host_global_setting.replace('$(CXX)', cxx)
master_ninja.variable('cc_host',
CommandWithWrapper('CC.host', wrappers, cc_host))
master_ninja.variable('cxx_host',
CommandWithWrapper('CXX.host', wrappers, cxx_host))
if flavor == 'win':
master_ninja.variable('ld_host', ld_host)
else:
master_ninja.variable('ld_host', CommandWithWrapper(
'LINK', wrappers, ld_host))
master_ninja.variable('ldxx_host', CommandWithWrapper(
'LINK', wrappers, ldxx_host))
master_ninja.newline()
master_ninja.pool('link_pool', depth=GetDefaultConcurrentLinks())
master_ninja.newline()
deps = 'msvc' if flavor == 'win' else 'gcc'
if flavor != 'win':
master_ninja.rule(
'cc',
description='CC $out',
command=('$cc -MMD -MF $out.d $defines $includes $cflags $cflags_c '
'$cflags_pch_c -c $in -o $out'),
depfile='$out.d',
deps=deps)
master_ninja.rule(
'cc_s',
description='CC $out',
command=('$cc $defines $includes $cflags $cflags_c '
'$cflags_pch_c -c $in -o $out'))
master_ninja.rule(
'cxx',
description='CXX $out',
command=('$cxx -MMD -MF $out.d $defines $includes $cflags $cflags_cc '
'$cflags_pch_cc -c $in -o $out'),
depfile='$out.d',
deps=deps)
else:
# TODO(scottmg) Separate pdb names is a test to see if it works around
# http://crbug.com/142362. It seems there's a race between the creation of
# the .pdb by the precompiled header step for .cc and the compilation of
# .c files. This should be handled by mspdbsrv, but rarely errors out with
# c1xx : fatal error C1033: cannot open program database
# By making the rules target separate pdb files this might be avoided.
cc_command = ('ninja -t msvc -e $arch ' +
'-- '
'$cc /nologo /showIncludes /FC '
'@$out.rsp /c $in /Fo$out /Fd$pdbname_c ')
cxx_command = ('ninja -t msvc -e $arch ' +
'-- '
'$cxx /nologo /showIncludes /FC '
'@$out.rsp /c $in /Fo$out /Fd$pdbname_cc ')
master_ninja.rule(
'cc',
description='CC $out',
command=cc_command,
rspfile='$out.rsp',
rspfile_content='$defines $includes $cflags $cflags_c',
deps=deps)
master_ninja.rule(
'cxx',
description='CXX $out',
command=cxx_command,
rspfile='$out.rsp',
rspfile_content='$defines $includes $cflags $cflags_cc',
deps=deps)
master_ninja.rule(
'idl',
description='IDL $in',
command=('%s gyp-win-tool midl-wrapper $arch $outdir '
'$tlb $h $dlldata $iid $proxy $in '
'$midl_includes $idlflags' % sys.executable))
master_ninja.rule(
'rc',
description='RC $in',
# Note: $in must be last otherwise rc.exe complains.
command=('%s gyp-win-tool rc-wrapper '
'$arch $rc $defines $resource_includes $rcflags /fo$out $in' %
sys.executable))
master_ninja.rule(
'asm',
description='ASM $out',
command=('%s gyp-win-tool asm-wrapper '
'$arch $asm $defines $includes $asmflags /c /Fo $out $in' %
sys.executable))
if flavor != 'mac' and flavor != 'win':
master_ninja.rule(
'alink',
description='AR $out',
command='rm -f $out && $ar rcs $arflags $out $in')
master_ninja.rule(
'alink_thin',
description='AR $out',
command='rm -f $out && $ar rcsT $arflags $out $in')
# This allows targets that only need to depend on $lib's API to declare an
# order-only dependency on $lib.TOC and avoid relinking such downstream
# dependencies when $lib changes only in non-public ways.
# The resulting string leaves an uninterpolated %{suffix} which
# is used in the final substitution below.
mtime_preserving_solink_base = (
'if [ ! -e $lib -o ! -e $lib.TOC ]; then '
'%(solink)s && %(extract_toc)s > $lib.TOC; else '
'%(solink)s && %(extract_toc)s > $lib.tmp && '
'if ! cmp -s $lib.tmp $lib.TOC; then mv $lib.tmp $lib.TOC ; '
'fi; fi'
% { 'solink':
'$ld -shared $ldflags -o $lib -Wl,-soname=$soname %(suffix)s',
'extract_toc':
('{ $readelf -d $lib | grep SONAME ; '
'$nm -gD -f p $lib | cut -f1-2 -d\' \'; }')})
master_ninja.rule(
'solink',
description='SOLINK $lib',
restat=True,
command=mtime_preserving_solink_base % {'suffix': '@$link_file_list'},
rspfile='$link_file_list',
rspfile_content=
'-Wl,--whole-archive $in $solibs -Wl,--no-whole-archive $libs',
pool='link_pool')
master_ninja.rule(
'solink_module',
description='SOLINK(module) $lib',
restat=True,
command=mtime_preserving_solink_base % {'suffix': '@$link_file_list'},
rspfile='$link_file_list',
rspfile_content='-Wl,--start-group $in -Wl,--end-group $solibs $libs',
pool='link_pool')
master_ninja.rule(
'link',
description='LINK $out',
command=('$ld $ldflags -o $out '
'-Wl,--start-group $in -Wl,--end-group $solibs $libs'),
pool='link_pool')
elif flavor == 'win':
master_ninja.rule(
'alink',
description='LIB $out',
command=('%s gyp-win-tool link-wrapper $arch False '
'$ar /nologo /ignore:4221 /OUT:$out @$out.rsp' %
sys.executable),
rspfile='$out.rsp',
rspfile_content='$in_newline $libflags')
_AddWinLinkRules(master_ninja, embed_manifest=True)
_AddWinLinkRules(master_ninja, embed_manifest=False)
else:
master_ninja.rule(
'objc',
description='OBJC $out',
command=('$cc -MMD -MF $out.d $defines $includes $cflags $cflags_objc '
'$cflags_pch_objc -c $in -o $out'),
depfile='$out.d',
deps=deps)
master_ninja.rule(
'objcxx',
description='OBJCXX $out',
command=('$cxx -MMD -MF $out.d $defines $includes $cflags $cflags_objcc '
'$cflags_pch_objcc -c $in -o $out'),
depfile='$out.d',
deps=deps)
master_ninja.rule(
'alink',
description='LIBTOOL-STATIC $out, POSTBUILDS',
command='rm -f $out && '
'./gyp-mac-tool filter-libtool libtool $libtool_flags '
'-static -o $out $in'
'$postbuilds')
master_ninja.rule(
'lipo',
description='LIPO $out, POSTBUILDS',
command='rm -f $out && lipo -create $in -output $out$postbuilds')
master_ninja.rule(
'solipo',
description='SOLIPO $out, POSTBUILDS',
command=(
'rm -f $lib $lib.TOC && lipo -create $in -output $lib$postbuilds &&'
'%(extract_toc)s > $lib.TOC'
% { 'extract_toc':
'{ otool -l $lib | grep LC_ID_DYLIB -A 5; '
'nm -gP $lib | cut -f1-2 -d\' \' | grep -v U$$; true; }'}))
# Record the public interface of $lib in $lib.TOC. See the corresponding
# comment in the posix section above for details.
solink_base = '$ld %(type)s $ldflags -o $lib %(suffix)s'
mtime_preserving_solink_base = (
'if [ ! -e $lib -o ! -e $lib.TOC ] || '
# Always force dependent targets to relink if this library
# reexports something. Handling this correctly would require
# recursive TOC dumping but this is rare in practice, so punt.
'otool -l $lib | grep -q LC_REEXPORT_DYLIB ; then '
'%(solink)s && %(extract_toc)s > $lib.TOC; '
'else '
'%(solink)s && %(extract_toc)s > $lib.tmp && '
'if ! cmp -s $lib.tmp $lib.TOC; then '
'mv $lib.tmp $lib.TOC ; '
'fi; '
'fi'
% { 'solink': solink_base,
'extract_toc':
'{ otool -l $lib | grep LC_ID_DYLIB -A 5; '
'nm -gP $lib | cut -f1-2 -d\' \' | grep -v U$$; true; }'})
solink_suffix = '@$link_file_list$postbuilds'
master_ninja.rule(
'solink',
description='SOLINK $lib, POSTBUILDS',
restat=True,
command=mtime_preserving_solink_base % {'suffix': solink_suffix,
'type': '-shared'},
rspfile='$link_file_list',
rspfile_content='$in $solibs $libs',
pool='link_pool')
master_ninja.rule(
'solink_notoc',
description='SOLINK $lib, POSTBUILDS',
restat=True,
command=solink_base % {'suffix':solink_suffix, 'type': '-shared'},
rspfile='$link_file_list',
rspfile_content='$in $solibs $libs',
pool='link_pool')
master_ninja.rule(
'solink_module',
description='SOLINK(module) $lib, POSTBUILDS',
restat=True,
command=mtime_preserving_solink_base % {'suffix': solink_suffix,
'type': '-bundle'},
rspfile='$link_file_list',
rspfile_content='$in $solibs $libs',
pool='link_pool')
master_ninja.rule(
'solink_module_notoc',
description='SOLINK(module) $lib, POSTBUILDS',
restat=True,
command=solink_base % {'suffix': solink_suffix, 'type': '-bundle'},
rspfile='$link_file_list',
rspfile_content='$in $solibs $libs',
pool='link_pool')
master_ninja.rule(
'link',
description='LINK $out, POSTBUILDS',
command=('$ld $ldflags -o $out '
'$in $solibs $libs$postbuilds'),
pool='link_pool')
master_ninja.rule(
'preprocess_infoplist',
description='PREPROCESS INFOPLIST $out',
command=('$cc -E -P -Wno-trigraphs -x c $defines $in -o $out && '
'plutil -convert xml1 $out $out'))
master_ninja.rule(
'copy_infoplist',
description='COPY INFOPLIST $in',
command='$env ./gyp-mac-tool copy-info-plist $in $out $binary $keys')
master_ninja.rule(
'merge_infoplist',
description='MERGE INFOPLISTS $in',
command='$env ./gyp-mac-tool merge-info-plist $out $in')
master_ninja.rule(
'compile_xcassets',
description='COMPILE XCASSETS $in',
command='$env ./gyp-mac-tool compile-xcassets $keys $in')
master_ninja.rule(
'mac_tool',
description='MACTOOL $mactool_cmd $in',
command='$env ./gyp-mac-tool $mactool_cmd $in $out $binary')
master_ninja.rule(
'package_framework',
description='PACKAGE FRAMEWORK $out, POSTBUILDS',
command='./gyp-mac-tool package-framework $out $version$postbuilds '
'&& touch $out')
if flavor == 'win':
master_ninja.rule(
'stamp',
description='STAMP $out',
command='%s gyp-win-tool stamp $out' % sys.executable)
master_ninja.rule(
'copy',
description='COPY $in $out',
command='%s gyp-win-tool recursive-mirror $in $out' % sys.executable)
else:
master_ninja.rule(
'stamp',
description='STAMP $out',
command='${postbuilds}touch $out')
master_ninja.rule(
'copy',
description='COPY $in $out',
command='rm -rf $out && cp -af $in $out')
master_ninja.newline()
all_targets = set()
for build_file in params['build_files']:
for target in gyp.common.AllTargets(target_list,
target_dicts,
os.path.normpath(build_file)):
all_targets.add(target)
all_outputs = set()
# target_outputs is a map from qualified target name to a Target object.
target_outputs = {}
# target_short_names is a map from target short name to a list of Target
# objects.
target_short_names = {}
# short name of targets that were skipped because they didn't contain anything
# interesting.
# NOTE: there may be overlap between this an non_empty_target_names.
empty_target_names = set()
# Set of non-empty short target names.
# NOTE: there may be overlap between this an empty_target_names.
non_empty_target_names = set()
for qualified_target in target_list:
# qualified_target is like: third_party/icu/icu.gyp:icui18n#target
build_file, name, toolset = \
gyp.common.ParseQualifiedTarget(qualified_target)
this_make_global_settings = data[build_file].get('make_global_settings', [])
assert make_global_settings == this_make_global_settings, (
"make_global_settings needs to be the same for all targets. %s vs. %s" %
(this_make_global_settings, make_global_settings))
spec = target_dicts[qualified_target]
if flavor == 'mac':
gyp.xcode_emulation.MergeGlobalXcodeSettingsToSpec(data[build_file], spec)
# If build_file is a symlink, we must not follow it because there's a chance
# it could point to a path above toplevel_dir, and we cannot correctly deal
# with that case at the moment.
build_file = gyp.common.RelativePath(build_file, options.toplevel_dir,
False)
qualified_target_for_hash = gyp.common.QualifiedTarget(build_file, name,
toolset)
hash_for_rules = hashlib.md5(qualified_target_for_hash).hexdigest()
base_path = os.path.dirname(build_file)
obj = 'obj'
if toolset != 'target':
obj += '.' + toolset
output_file = os.path.join(obj, base_path, name + '.ninja')
ninja_output = StringIO()
writer = NinjaWriter(hash_for_rules, target_outputs, base_path, build_dir,
ninja_output,
toplevel_build, output_file,
flavor, toplevel_dir=options.toplevel_dir)
target = writer.WriteSpec(spec, config_name, generator_flags)
if ninja_output.tell() > 0:
# Only create files for ninja files that actually have contents.
with OpenOutput(os.path.join(toplevel_build, output_file)) as ninja_file:
ninja_file.write(ninja_output.getvalue())
ninja_output.close()
master_ninja.subninja(output_file)
if target:
if name != target.FinalOutput() and spec['toolset'] == 'target':
target_short_names.setdefault(name, []).append(target)
target_outputs[qualified_target] = target
if qualified_target in all_targets:
all_outputs.add(target.FinalOutput())
non_empty_target_names.add(name)
else:
empty_target_names.add(name)
if target_short_names:
# Write a short name to build this target. This benefits both the
# "build chrome" case as well as the gyp tests, which expect to be
# able to run actions and build libraries by their short name.
master_ninja.newline()
master_ninja.comment('Short names for targets.')
for short_name in target_short_names:
master_ninja.build(short_name, 'phony', [x.FinalOutput() for x in
target_short_names[short_name]])
# Write phony targets for any empty targets that weren't written yet. As
# short names are not necessarily unique only do this for short names that
# haven't already been output for another target.
empty_target_names = empty_target_names - non_empty_target_names
if empty_target_names:
master_ninja.newline()
master_ninja.comment('Empty targets (output for completeness).')
for name in sorted(empty_target_names):
master_ninja.build(name, 'phony')
if all_outputs:
master_ninja.newline()
master_ninja.build('all', 'phony', list(all_outputs))
master_ninja.default(generator_flags.get('default_target', 'all'))
master_ninja_file.close()
def PerformBuild(data, configurations, params):
options = params['options']
for config in configurations:
builddir = os.path.join(options.toplevel_dir, 'out', config)
arguments = ['ninja', '-C', builddir]
print 'Building [%s]: %s' % (config, arguments)
subprocess.check_call(arguments)
def CallGenerateOutputForConfig(arglist):
# Ignore the interrupt signal so that the parent process catches it and
# kills all multiprocessing children.
signal.signal(signal.SIGINT, signal.SIG_IGN)
(target_list, target_dicts, data, params, config_name) = arglist
GenerateOutputForConfig(target_list, target_dicts, data, params, config_name)
def GenerateOutput(target_list, target_dicts, data, params):
# Update target_dicts for iOS device builds.
target_dicts = gyp.xcode_emulation.CloneConfigurationForDeviceAndEmulator(
target_dicts)
user_config = params.get('generator_flags', {}).get('config', None)
if gyp.common.GetFlavor(params) == 'win':
target_list, target_dicts = MSVSUtil.ShardTargets(target_list, target_dicts)
target_list, target_dicts = MSVSUtil.InsertLargePdbShims(
target_list, target_dicts, generator_default_variables)
if user_config:
GenerateOutputForConfig(target_list, target_dicts, data, params,
user_config)
else:
config_names = target_dicts[target_list[0]]['configurations'].keys()
if params['parallel']:
try:
pool = multiprocessing.Pool(len(config_names))
arglists = []
for config_name in config_names:
arglists.append(
(target_list, target_dicts, data, params, config_name))
pool.map(CallGenerateOutputForConfig, arglists)
except KeyboardInterrupt, e:
pool.terminate()
raise e
else:
for config_name in config_names:
GenerateOutputForConfig(target_list, target_dicts, data, params,
config_name)
|
jhgoebbert/cvl-fabric-launcher
|
refs/heads/master
|
pyinstaller-2.1/tests/basic/test_module_attributes.py
|
7
|
#-----------------------------------------------------------------------------
# Copyright (c) 2013, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License with exception
# for distributing bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
# Compare attributes of ElementTree (cElementTree) module from frozen executable
# with ElementTree (cElementTree) module from standard python.
import copy
import os
import subprocess
import sys
import xml.etree.ElementTree as ET
import xml.etree.cElementTree as cET
if hasattr(sys, 'frozen'):
# In frozen mode current working dir is the path with final executable.
_pyexe_file = os.path.join('..', '..', 'python_exe.build')
else:
_pyexe_file = 'python_exe.build'
_lines = open(_pyexe_file).readlines()
_pyexe = _lines[0].strip()
_env_path = _lines[2].strip()
def exec_python(pycode):
"""
Wrap running python script in a subprocess.
Return stdout of the invoked command.
"""
# Environment variable 'PATH' has to be defined on Windows.
# Otherwise dynamic library pythonXY.dll cannot be found by
# Python executable.
env = copy.deepcopy(os.environ)
env['PATH'] = _env_path
out = subprocess.Popen([_pyexe, '-c', pycode], env=env,
stdout=subprocess.PIPE, shell=False).stdout.read()
return out.strip()
def compare(test_name, expect, frozen):
# PyInstaller sets attribute '__lodader'. Remove this attribute from the
# module properties.
frozen.remove('__loader__')
frozen = str(frozen)
print(test_name)
print(' Attributes expected: ' + expect)
print(' Attributes current: ' + frozen)
print('')
# Compare attributes of frozen module with unfronzen module.
if not frozen == expect:
raise SystemExit('Frozen module has no same attribuses as unfrozen.')
## Pure Python module.
_expect = exec_python('import xml.etree.ElementTree as ET; print dir(ET)')
_frozen = dir(ET)
compare('ElementTree', _expect, _frozen)
## C-extension Python module.
_expect = exec_python('import xml.etree.cElementTree as cET; print dir(cET)')
_frozen = dir(cET)
compare('cElementTree', _expect, _frozen)
|
RJVB/audacity
|
refs/heads/master
|
lib-src/lv2/lv2/plugins/eg02-midigate.lv2/waflib/Tools/irixcc.py
|
330
|
#! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
import os
from waflib import Utils
from waflib.Tools import ccroot,ar
from waflib.Configure import conf
@conf
def find_irixcc(conf):
v=conf.env
cc=None
if v['CC']:cc=v['CC']
elif'CC'in conf.environ:cc=conf.environ['CC']
if not cc:cc=conf.find_program('cc',var='CC')
if not cc:conf.fatal('irixcc was not found')
cc=conf.cmd_to_list(cc)
try:
conf.cmd_and_log(cc+['-version'])
except Exception:
conf.fatal('%r -version could not be executed'%cc)
v['CC']=cc
v['CC_NAME']='irix'
@conf
def irixcc_common_flags(conf):
v=conf.env
v['CC_SRC_F']=''
v['CC_TGT_F']=['-c','-o']
v['CPPPATH_ST']='-I%s'
v['DEFINES_ST']='-D%s'
if not v['LINK_CC']:v['LINK_CC']=v['CC']
v['CCLNK_SRC_F']=''
v['CCLNK_TGT_F']=['-o']
v['LIB_ST']='-l%s'
v['LIBPATH_ST']='-L%s'
v['STLIB_ST']='-l%s'
v['STLIBPATH_ST']='-L%s'
v['cprogram_PATTERN']='%s'
v['cshlib_PATTERN']='lib%s.so'
v['cstlib_PATTERN']='lib%s.a'
def configure(conf):
conf.find_irixcc()
conf.find_cpp()
conf.find_ar()
conf.irixcc_common_flags()
conf.cc_load_tools()
conf.cc_add_flags()
conf.link_add_flags()
|
abradle/jython_test
|
refs/heads/master
|
src/find_props/find_props.py
|
1
|
# File to calculate properties for a molecule and add these properties back to the molecules
# property to be calculate will be put in using a request.header string
from java import lang
lang.System.loadLibrary('GraphMolWrap')
from org.RDKit import *
from threading import Thread
import os
def num_hba(mol):
"""Function for calculating number of H-bond acceptors
Takes an RDKit molecule
Returns an int"""
return RDKFuncs.calcNumHBA(mol)
def num_hbd(mol):
"""Function for calculating number of H-bond donors
Takes an RDKit molecule
Returns an int"""
return RDKFuncs.calcNumHBD(mol)
def num_rings(mol):
"""Function for calculating number of rings
Takes an RDKit molecule
Returns an int"""
return RDKFuncs.calcNumRings(mol)
def mol_logp(mol, ret_val=False):
"""Function for calculating mol log p
Takes an RDKit molecule
Returns a int"""
return RDKFuncs.calcMolLogP(mol)
# A dictionary to relate functioons t ostrings
funct_dict = {"num_hba": num_hba,
"num_hbd": num_hbd,
"num_rings": num_rings,
"mol_logp": mol_logp}
def calc_props(request):
for mol in request.body:
val = funct_dict[request.function](mol)
mol.setProp(request.function, str(val))
# Request will comprise two parts
## 1) Stream of molecuels
## 2) String relating to property
if __name__ == "__main__":
print "calculating properties"
calc_props(request)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.