repo_name stringlengths 5 100 | ref stringlengths 12 67 | path stringlengths 4 244 | copies stringlengths 1 8 | content stringlengths 0 1.05M ⌀ |
|---|---|---|---|---|
partofthething/home-assistant | refs/heads/dev | homeassistant/components/schluter/__init__.py | 21 | """The Schluter DITRA-HEAT integration."""
import logging
from requests import RequestException, Session
from schluter.api import Api
from schluter.authenticator import AuthenticationState, Authenticator
import voluptuous as vol
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME
from homeassistant.helpers import discovery
import homeassistant.helpers.config_validation as cv
from .const import DOMAIN
_LOGGER = logging.getLogger(__name__)
DATA_SCHLUTER_SESSION = "schluter_session"
DATA_SCHLUTER_API = "schluter_api"
SCHLUTER_CONFIG_FILE = ".schluter.conf"
API_TIMEOUT = 10
CONFIG_SCHEMA = vol.Schema(
{
vol.Required(DOMAIN): vol.Schema(
{
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
}
)
},
extra=vol.ALLOW_EXTRA,
)
def setup(hass, config):
"""Set up the Schluter component."""
_LOGGER.debug("Starting setup of schluter")
conf = config[DOMAIN]
api_http_session = Session()
api = Api(timeout=API_TIMEOUT, http_session=api_http_session)
authenticator = Authenticator(
api,
conf.get(CONF_USERNAME),
conf.get(CONF_PASSWORD),
session_id_cache_file=hass.config.path(SCHLUTER_CONFIG_FILE),
)
authentication = None
try:
authentication = authenticator.authenticate()
except RequestException as ex:
_LOGGER.error("Unable to connect to Schluter service: %s", ex)
return
state = authentication.state
if state == AuthenticationState.AUTHENTICATED:
hass.data[DOMAIN] = {
DATA_SCHLUTER_API: api,
DATA_SCHLUTER_SESSION: authentication.session_id,
}
discovery.load_platform(hass, "climate", DOMAIN, {}, config)
return True
if state == AuthenticationState.BAD_PASSWORD:
_LOGGER.error("Invalid password provided")
return False
if state == AuthenticationState.BAD_EMAIL:
_LOGGER.error("Invalid email provided")
return False
_LOGGER.error("Unknown set up error: %s", state)
return False
|
reage/pp | refs/heads/master | 0005.py | 1 | from PIL import Image
import os, glob
def resizeImgInFolder(folder, maxHeight = 1136 , maxWidth = 640):
try:
for file in glob.glob(folder):
fullPath = os.path.join(folder, file)
if os.path.isfile(fullPath):
im = Image.open(fullPath)
width, height = im.size
if height > maxHeight or width > maxWidth:
heightRate = height / maxHeight
widthRate = width / maxWidth
finalRate = max(heightRate, widthRate)
newHeight = int(height / finalRate)
newWidth = int(width / finalRate)
im = im.resize((newWidth, newHeight))
im.save(fullPath)
print("Done!")
except:
pass
|
CyanogenMod/android_external_chromium_org | refs/heads/cm-12.0 | tools/perf/page_sets/typical_25.py | 8 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# pylint: disable=W0401,W0614
from telemetry.page.actions.all_page_actions import *
from telemetry.page import page as page_module
from telemetry.page import page_set as page_set_module
class Typical25Page(page_module.Page):
def __init__(self, url, page_set):
super(Typical25Page, self).__init__(url=url, page_set=page_set)
self.user_agent_type = 'desktop'
self.archive_data_file = 'data/typical_25.json'
def RunSmoothness(self, action_runner):
action_runner.RunAction(ScrollAction())
class Typical25PageSet(page_set_module.PageSet):
""" Pages designed to represent the median, not highly optimized web """
def __init__(self):
super(Typical25PageSet, self).__init__(
user_agent_type='desktop',
archive_data_file='data/typical_25.json',
bucket=page_set_module.PARTNER_BUCKET)
urls_list = [
# Why: Alexa games #48
'http://www.nick.com/games',
# Why: Alexa sports #45
'http://www.rei.com/',
# Why: Alexa sports #50
'http://www.fifa.com/',
# Why: Alexa shopping #41
'http://www.gamestop.com/ps3',
# Why: Alexa shopping #25
'http://www.barnesandnoble.com/u/books-bestselling-books/379003057/',
# Why: Alexa news #55
('http://www.economist.com/news/science-and-technology/21573529-small-'
'models-cosmic-phenomena-are-shedding-light-real-thing-how-build'),
# Why: Alexa news #67
'http://www.theonion.com',
'http://arstechnica.com/',
# Why: Alexa home #10
'http://allrecipes.com/Recipe/Pull-Apart-Hot-Cross-Buns/Detail.aspx',
'http://www.html5rocks.com/en/',
'http://www.mlb.com/',
# pylint: disable=C0301
'http://gawker.com/5939683/based-on-a-true-story-is-a-rotten-lie-i-hope-you-never-believe',
'http://www.imdb.com/title/tt0910970/',
'http://www.flickr.com/search/?q=monkeys&f=hp',
'http://money.cnn.com/',
'http://www.nationalgeographic.com/',
'http://premierleague.com',
'http://www.osubeavers.com/',
'http://walgreens.com',
'http://colorado.edu',
('http://www.ticketmaster.com/JAY-Z-and-Justin-Timberlake-tickets/artist/'
'1837448?brand=none&tm_link=tm_homeA_rc_name2'),
# pylint: disable=C0301
'http://www.theverge.com/2013/3/5/4061684/inside-ted-the-smartest-bubble-in-the-world',
'http://www.airbnb.com/',
'http://www.ign.com/',
# Why: Alexa health #25
'http://www.fda.gov',
]
for url in urls_list:
self.AddPage(Typical25Page(url, self))
|
zero-rp/miniblink49 | refs/heads/master | v8_7_5/tools/unittests/testdata/results_processor.py | 14 | #!/usr/bin/env python
# Copyright 2017 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Fake results processor for testing that just sums some things up.
"""
# for py2/py3 compatibility
from __future__ import print_function
import fileinput
import re
richards = 0.0
deltablue = 0.0
for line in fileinput.input():
match = re.match(r'^Richards\d: (.*)$', line)
if match:
richards += float(match.group(1))
match = re.match(r'^DeltaBlue\d: (.*)$', line)
if match:
deltablue += float(match.group(1))
print('Richards: %f' % richards)
print('DeltaBlue: %f' % deltablue)
|
int19h/PTVS | refs/heads/master | Python/Product/Miniconda/Miniconda3-x64/Lib/tarfile.py | 9 | #!/usr/bin/env python3
#-------------------------------------------------------------------
# tarfile.py
#-------------------------------------------------------------------
# Copyright (C) 2002 Lars Gustaebel <lars@gustaebel.de>
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
"""Read from and write to tar format archives.
"""
version = "0.9.0"
__author__ = "Lars Gust\u00e4bel (lars@gustaebel.de)"
__credits__ = "Gustavo Niemeyer, Niels Gust\u00e4bel, Richard Townsend."
#---------
# Imports
#---------
from builtins import open as bltn_open
import sys
import os
import io
import shutil
import stat
import time
import struct
import copy
import re
try:
import pwd
except ImportError:
pwd = None
try:
import grp
except ImportError:
grp = None
# os.symlink on Windows prior to 6.0 raises NotImplementedError
symlink_exception = (AttributeError, NotImplementedError)
try:
# OSError (winerror=1314) will be raised if the caller does not hold the
# SeCreateSymbolicLinkPrivilege privilege
symlink_exception += (OSError,)
except NameError:
pass
# from tarfile import *
__all__ = ["TarFile", "TarInfo", "is_tarfile", "TarError", "ReadError",
"CompressionError", "StreamError", "ExtractError", "HeaderError",
"ENCODING", "USTAR_FORMAT", "GNU_FORMAT", "PAX_FORMAT",
"DEFAULT_FORMAT", "open"]
#---------------------------------------------------------
# tar constants
#---------------------------------------------------------
NUL = b"\0" # the null character
BLOCKSIZE = 512 # length of processing blocks
RECORDSIZE = BLOCKSIZE * 20 # length of records
GNU_MAGIC = b"ustar \0" # magic gnu tar string
POSIX_MAGIC = b"ustar\x0000" # magic posix tar string
LENGTH_NAME = 100 # maximum length of a filename
LENGTH_LINK = 100 # maximum length of a linkname
LENGTH_PREFIX = 155 # maximum length of the prefix field
REGTYPE = b"0" # regular file
AREGTYPE = b"\0" # regular file
LNKTYPE = b"1" # link (inside tarfile)
SYMTYPE = b"2" # symbolic link
CHRTYPE = b"3" # character special device
BLKTYPE = b"4" # block special device
DIRTYPE = b"5" # directory
FIFOTYPE = b"6" # fifo special device
CONTTYPE = b"7" # contiguous file
GNUTYPE_LONGNAME = b"L" # GNU tar longname
GNUTYPE_LONGLINK = b"K" # GNU tar longlink
GNUTYPE_SPARSE = b"S" # GNU tar sparse file
XHDTYPE = b"x" # POSIX.1-2001 extended header
XGLTYPE = b"g" # POSIX.1-2001 global header
SOLARIS_XHDTYPE = b"X" # Solaris extended header
USTAR_FORMAT = 0 # POSIX.1-1988 (ustar) format
GNU_FORMAT = 1 # GNU tar format
PAX_FORMAT = 2 # POSIX.1-2001 (pax) format
DEFAULT_FORMAT = GNU_FORMAT
#---------------------------------------------------------
# tarfile constants
#---------------------------------------------------------
# File types that tarfile supports:
SUPPORTED_TYPES = (REGTYPE, AREGTYPE, LNKTYPE,
SYMTYPE, DIRTYPE, FIFOTYPE,
CONTTYPE, CHRTYPE, BLKTYPE,
GNUTYPE_LONGNAME, GNUTYPE_LONGLINK,
GNUTYPE_SPARSE)
# File types that will be treated as a regular file.
REGULAR_TYPES = (REGTYPE, AREGTYPE,
CONTTYPE, GNUTYPE_SPARSE)
# File types that are part of the GNU tar format.
GNU_TYPES = (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK,
GNUTYPE_SPARSE)
# Fields from a pax header that override a TarInfo attribute.
PAX_FIELDS = ("path", "linkpath", "size", "mtime",
"uid", "gid", "uname", "gname")
# Fields from a pax header that are affected by hdrcharset.
PAX_NAME_FIELDS = {"path", "linkpath", "uname", "gname"}
# Fields in a pax header that are numbers, all other fields
# are treated as strings.
PAX_NUMBER_FIELDS = {
"atime": float,
"ctime": float,
"mtime": float,
"uid": int,
"gid": int,
"size": int
}
#---------------------------------------------------------
# initialization
#---------------------------------------------------------
if os.name == "nt":
ENCODING = "utf-8"
else:
ENCODING = sys.getfilesystemencoding()
#---------------------------------------------------------
# Some useful functions
#---------------------------------------------------------
def stn(s, length, encoding, errors):
"""Convert a string to a null-terminated bytes object.
"""
s = s.encode(encoding, errors)
return s[:length] + (length - len(s)) * NUL
def nts(s, encoding, errors):
"""Convert a null-terminated bytes object to a string.
"""
p = s.find(b"\0")
if p != -1:
s = s[:p]
return s.decode(encoding, errors)
def nti(s):
"""Convert a number field to a python number.
"""
# There are two possible encodings for a number field, see
# itn() below.
if s[0] in (0o200, 0o377):
n = 0
for i in range(len(s) - 1):
n <<= 8
n += s[i + 1]
if s[0] == 0o377:
n = -(256 ** (len(s) - 1) - n)
else:
try:
s = nts(s, "ascii", "strict")
n = int(s.strip() or "0", 8)
except ValueError:
raise InvalidHeaderError("invalid header")
return n
def itn(n, digits=8, format=DEFAULT_FORMAT):
"""Convert a python number to a number field.
"""
# POSIX 1003.1-1988 requires numbers to be encoded as a string of
# octal digits followed by a null-byte, this allows values up to
# (8**(digits-1))-1. GNU tar allows storing numbers greater than
# that if necessary. A leading 0o200 or 0o377 byte indicate this
# particular encoding, the following digits-1 bytes are a big-endian
# base-256 representation. This allows values up to (256**(digits-1))-1.
# A 0o200 byte indicates a positive number, a 0o377 byte a negative
# number.
n = int(n)
if 0 <= n < 8 ** (digits - 1):
s = bytes("%0*o" % (digits - 1, n), "ascii") + NUL
elif format == GNU_FORMAT and -256 ** (digits - 1) <= n < 256 ** (digits - 1):
if n >= 0:
s = bytearray([0o200])
else:
s = bytearray([0o377])
n = 256 ** digits + n
for i in range(digits - 1):
s.insert(1, n & 0o377)
n >>= 8
else:
raise ValueError("overflow in number field")
return s
def calc_chksums(buf):
"""Calculate the checksum for a member's header by summing up all
characters except for the chksum field which is treated as if
it was filled with spaces. According to the GNU tar sources,
some tars (Sun and NeXT) calculate chksum with signed char,
which will be different if there are chars in the buffer with
the high bit set. So we calculate two checksums, unsigned and
signed.
"""
unsigned_chksum = 256 + sum(struct.unpack_from("148B8x356B", buf))
signed_chksum = 256 + sum(struct.unpack_from("148b8x356b", buf))
return unsigned_chksum, signed_chksum
def copyfileobj(src, dst, length=None, exception=OSError, bufsize=None):
"""Copy length bytes from fileobj src to fileobj dst.
If length is None, copy the entire content.
"""
bufsize = bufsize or 16 * 1024
if length == 0:
return
if length is None:
shutil.copyfileobj(src, dst, bufsize)
return
blocks, remainder = divmod(length, bufsize)
for b in range(blocks):
buf = src.read(bufsize)
if len(buf) < bufsize:
raise exception("unexpected end of data")
dst.write(buf)
if remainder != 0:
buf = src.read(remainder)
if len(buf) < remainder:
raise exception("unexpected end of data")
dst.write(buf)
return
def filemode(mode):
"""Deprecated in this location; use stat.filemode."""
import warnings
warnings.warn("deprecated in favor of stat.filemode",
DeprecationWarning, 2)
return stat.filemode(mode)
def _safe_print(s):
encoding = getattr(sys.stdout, 'encoding', None)
if encoding is not None:
s = s.encode(encoding, 'backslashreplace').decode(encoding)
print(s, end=' ')
class TarError(Exception):
"""Base exception."""
pass
class ExtractError(TarError):
"""General exception for extract errors."""
pass
class ReadError(TarError):
"""Exception for unreadable tar archives."""
pass
class CompressionError(TarError):
"""Exception for unavailable compression methods."""
pass
class StreamError(TarError):
"""Exception for unsupported operations on stream-like TarFiles."""
pass
class HeaderError(TarError):
"""Base exception for header errors."""
pass
class EmptyHeaderError(HeaderError):
"""Exception for empty headers."""
pass
class TruncatedHeaderError(HeaderError):
"""Exception for truncated headers."""
pass
class EOFHeaderError(HeaderError):
"""Exception for end of file headers."""
pass
class InvalidHeaderError(HeaderError):
"""Exception for invalid headers."""
pass
class SubsequentHeaderError(HeaderError):
"""Exception for missing and invalid extended headers."""
pass
#---------------------------
# internal stream interface
#---------------------------
class _LowLevelFile:
"""Low-level file object. Supports reading and writing.
It is used instead of a regular file object for streaming
access.
"""
def __init__(self, name, mode):
mode = {
"r": os.O_RDONLY,
"w": os.O_WRONLY | os.O_CREAT | os.O_TRUNC,
}[mode]
if hasattr(os, "O_BINARY"):
mode |= os.O_BINARY
self.fd = os.open(name, mode, 0o666)
def close(self):
os.close(self.fd)
def read(self, size):
return os.read(self.fd, size)
def write(self, s):
os.write(self.fd, s)
class _Stream:
"""Class that serves as an adapter between TarFile and
a stream-like object. The stream-like object only
needs to have a read() or write() method and is accessed
blockwise. Use of gzip or bzip2 compression is possible.
A stream-like object could be for example: sys.stdin,
sys.stdout, a socket, a tape device etc.
_Stream is intended to be used only internally.
"""
def __init__(self, name, mode, comptype, fileobj, bufsize):
"""Construct a _Stream object.
"""
self._extfileobj = True
if fileobj is None:
fileobj = _LowLevelFile(name, mode)
self._extfileobj = False
if comptype == '*':
# Enable transparent compression detection for the
# stream interface
fileobj = _StreamProxy(fileobj)
comptype = fileobj.getcomptype()
self.name = name or ""
self.mode = mode
self.comptype = comptype
self.fileobj = fileobj
self.bufsize = bufsize
self.buf = b""
self.pos = 0
self.closed = False
try:
if comptype == "gz":
try:
import zlib
except ImportError:
raise CompressionError("zlib module is not available")
self.zlib = zlib
self.crc = zlib.crc32(b"")
if mode == "r":
self._init_read_gz()
self.exception = zlib.error
else:
self._init_write_gz()
elif comptype == "bz2":
try:
import bz2
except ImportError:
raise CompressionError("bz2 module is not available")
if mode == "r":
self.dbuf = b""
self.cmp = bz2.BZ2Decompressor()
self.exception = OSError
else:
self.cmp = bz2.BZ2Compressor()
elif comptype == "xz":
try:
import lzma
except ImportError:
raise CompressionError("lzma module is not available")
if mode == "r":
self.dbuf = b""
self.cmp = lzma.LZMADecompressor()
self.exception = lzma.LZMAError
else:
self.cmp = lzma.LZMACompressor()
elif comptype != "tar":
raise CompressionError("unknown compression type %r" % comptype)
except:
if not self._extfileobj:
self.fileobj.close()
self.closed = True
raise
def __del__(self):
if hasattr(self, "closed") and not self.closed:
self.close()
def _init_write_gz(self):
"""Initialize for writing with gzip compression.
"""
self.cmp = self.zlib.compressobj(9, self.zlib.DEFLATED,
-self.zlib.MAX_WBITS,
self.zlib.DEF_MEM_LEVEL,
0)
timestamp = struct.pack("<L", int(time.time()))
self.__write(b"\037\213\010\010" + timestamp + b"\002\377")
if self.name.endswith(".gz"):
self.name = self.name[:-3]
# RFC1952 says we must use ISO-8859-1 for the FNAME field.
self.__write(self.name.encode("iso-8859-1", "replace") + NUL)
def write(self, s):
"""Write string s to the stream.
"""
if self.comptype == "gz":
self.crc = self.zlib.crc32(s, self.crc)
self.pos += len(s)
if self.comptype != "tar":
s = self.cmp.compress(s)
self.__write(s)
def __write(self, s):
"""Write string s to the stream if a whole new block
is ready to be written.
"""
self.buf += s
while len(self.buf) > self.bufsize:
self.fileobj.write(self.buf[:self.bufsize])
self.buf = self.buf[self.bufsize:]
def close(self):
"""Close the _Stream object. No operation should be
done on it afterwards.
"""
if self.closed:
return
self.closed = True
try:
if self.mode == "w" and self.comptype != "tar":
self.buf += self.cmp.flush()
if self.mode == "w" and self.buf:
self.fileobj.write(self.buf)
self.buf = b""
if self.comptype == "gz":
self.fileobj.write(struct.pack("<L", self.crc))
self.fileobj.write(struct.pack("<L", self.pos & 0xffffFFFF))
finally:
if not self._extfileobj:
self.fileobj.close()
def _init_read_gz(self):
"""Initialize for reading a gzip compressed fileobj.
"""
self.cmp = self.zlib.decompressobj(-self.zlib.MAX_WBITS)
self.dbuf = b""
# taken from gzip.GzipFile with some alterations
if self.__read(2) != b"\037\213":
raise ReadError("not a gzip file")
if self.__read(1) != b"\010":
raise CompressionError("unsupported compression method")
flag = ord(self.__read(1))
self.__read(6)
if flag & 4:
xlen = ord(self.__read(1)) + 256 * ord(self.__read(1))
self.read(xlen)
if flag & 8:
while True:
s = self.__read(1)
if not s or s == NUL:
break
if flag & 16:
while True:
s = self.__read(1)
if not s or s == NUL:
break
if flag & 2:
self.__read(2)
def tell(self):
"""Return the stream's file pointer position.
"""
return self.pos
def seek(self, pos=0):
"""Set the stream's file pointer to pos. Negative seeking
is forbidden.
"""
if pos - self.pos >= 0:
blocks, remainder = divmod(pos - self.pos, self.bufsize)
for i in range(blocks):
self.read(self.bufsize)
self.read(remainder)
else:
raise StreamError("seeking backwards is not allowed")
return self.pos
def read(self, size=None):
"""Return the next size number of bytes from the stream.
If size is not defined, return all bytes of the stream
up to EOF.
"""
if size is None:
t = []
while True:
buf = self._read(self.bufsize)
if not buf:
break
t.append(buf)
buf = b"".join(t)
else:
buf = self._read(size)
self.pos += len(buf)
return buf
def _read(self, size):
"""Return size bytes from the stream.
"""
if self.comptype == "tar":
return self.__read(size)
c = len(self.dbuf)
t = [self.dbuf]
while c < size:
buf = self.__read(self.bufsize)
if not buf:
break
try:
buf = self.cmp.decompress(buf)
except self.exception:
raise ReadError("invalid compressed data")
t.append(buf)
c += len(buf)
t = b"".join(t)
self.dbuf = t[size:]
return t[:size]
def __read(self, size):
"""Return size bytes from stream. If internal buffer is empty,
read another block from the stream.
"""
c = len(self.buf)
t = [self.buf]
while c < size:
buf = self.fileobj.read(self.bufsize)
if not buf:
break
t.append(buf)
c += len(buf)
t = b"".join(t)
self.buf = t[size:]
return t[:size]
# class _Stream
class _StreamProxy(object):
"""Small proxy class that enables transparent compression
detection for the Stream interface (mode 'r|*').
"""
def __init__(self, fileobj):
self.fileobj = fileobj
self.buf = self.fileobj.read(BLOCKSIZE)
def read(self, size):
self.read = self.fileobj.read
return self.buf
def getcomptype(self):
if self.buf.startswith(b"\x1f\x8b\x08"):
return "gz"
elif self.buf[0:3] == b"BZh" and self.buf[4:10] == b"1AY&SY":
return "bz2"
elif self.buf.startswith((b"\x5d\x00\x00\x80", b"\xfd7zXZ")):
return "xz"
else:
return "tar"
def close(self):
self.fileobj.close()
# class StreamProxy
#------------------------
# Extraction file object
#------------------------
class _FileInFile(object):
"""A thin wrapper around an existing file object that
provides a part of its data as an individual file
object.
"""
def __init__(self, fileobj, offset, size, blockinfo=None):
self.fileobj = fileobj
self.offset = offset
self.size = size
self.position = 0
self.name = getattr(fileobj, "name", None)
self.closed = False
if blockinfo is None:
blockinfo = [(0, size)]
# Construct a map with data and zero blocks.
self.map_index = 0
self.map = []
lastpos = 0
realpos = self.offset
for offset, size in blockinfo:
if offset > lastpos:
self.map.append((False, lastpos, offset, None))
self.map.append((True, offset, offset + size, realpos))
realpos += size
lastpos = offset + size
if lastpos < self.size:
self.map.append((False, lastpos, self.size, None))
def flush(self):
pass
def readable(self):
return True
def writable(self):
return False
def seekable(self):
return self.fileobj.seekable()
def tell(self):
"""Return the current file position.
"""
return self.position
def seek(self, position, whence=io.SEEK_SET):
"""Seek to a position in the file.
"""
if whence == io.SEEK_SET:
self.position = min(max(position, 0), self.size)
elif whence == io.SEEK_CUR:
if position < 0:
self.position = max(self.position + position, 0)
else:
self.position = min(self.position + position, self.size)
elif whence == io.SEEK_END:
self.position = max(min(self.size + position, self.size), 0)
else:
raise ValueError("Invalid argument")
return self.position
def read(self, size=None):
"""Read data from the file.
"""
if size is None:
size = self.size - self.position
else:
size = min(size, self.size - self.position)
buf = b""
while size > 0:
while True:
data, start, stop, offset = self.map[self.map_index]
if start <= self.position < stop:
break
else:
self.map_index += 1
if self.map_index == len(self.map):
self.map_index = 0
length = min(size, stop - self.position)
if data:
self.fileobj.seek(offset + (self.position - start))
b = self.fileobj.read(length)
if len(b) != length:
raise ReadError("unexpected end of data")
buf += b
else:
buf += NUL * length
size -= length
self.position += length
return buf
def readinto(self, b):
buf = self.read(len(b))
b[:len(buf)] = buf
return len(buf)
def close(self):
self.closed = True
#class _FileInFile
class ExFileObject(io.BufferedReader):
def __init__(self, tarfile, tarinfo):
fileobj = _FileInFile(tarfile.fileobj, tarinfo.offset_data,
tarinfo.size, tarinfo.sparse)
super().__init__(fileobj)
#class ExFileObject
#------------------
# Exported Classes
#------------------
class TarInfo(object):
"""Informational class which holds the details about an
archive member given by a tar header block.
TarInfo objects are returned by TarFile.getmember(),
TarFile.getmembers() and TarFile.gettarinfo() and are
usually created internally.
"""
__slots__ = ("name", "mode", "uid", "gid", "size", "mtime",
"chksum", "type", "linkname", "uname", "gname",
"devmajor", "devminor",
"offset", "offset_data", "pax_headers", "sparse",
"tarfile", "_sparse_structs", "_link_target")
def __init__(self, name=""):
"""Construct a TarInfo object. name is the optional name
of the member.
"""
self.name = name # member name
self.mode = 0o644 # file permissions
self.uid = 0 # user id
self.gid = 0 # group id
self.size = 0 # file size
self.mtime = 0 # modification time
self.chksum = 0 # header checksum
self.type = REGTYPE # member type
self.linkname = "" # link name
self.uname = "" # user name
self.gname = "" # group name
self.devmajor = 0 # device major number
self.devminor = 0 # device minor number
self.offset = 0 # the tar header starts here
self.offset_data = 0 # the file's data starts here
self.sparse = None # sparse member information
self.pax_headers = {} # pax header information
# In pax headers the "name" and "linkname" field are called
# "path" and "linkpath".
@property
def path(self):
return self.name
@path.setter
def path(self, name):
self.name = name
@property
def linkpath(self):
return self.linkname
@linkpath.setter
def linkpath(self, linkname):
self.linkname = linkname
def __repr__(self):
return "<%s %r at %#x>" % (self.__class__.__name__,self.name,id(self))
def get_info(self):
"""Return the TarInfo's attributes as a dictionary.
"""
info = {
"name": self.name,
"mode": self.mode & 0o7777,
"uid": self.uid,
"gid": self.gid,
"size": self.size,
"mtime": self.mtime,
"chksum": self.chksum,
"type": self.type,
"linkname": self.linkname,
"uname": self.uname,
"gname": self.gname,
"devmajor": self.devmajor,
"devminor": self.devminor
}
if info["type"] == DIRTYPE and not info["name"].endswith("/"):
info["name"] += "/"
return info
def tobuf(self, format=DEFAULT_FORMAT, encoding=ENCODING, errors="surrogateescape"):
"""Return a tar header as a string of 512 byte blocks.
"""
info = self.get_info()
if format == USTAR_FORMAT:
return self.create_ustar_header(info, encoding, errors)
elif format == GNU_FORMAT:
return self.create_gnu_header(info, encoding, errors)
elif format == PAX_FORMAT:
return self.create_pax_header(info, encoding)
else:
raise ValueError("invalid format")
def create_ustar_header(self, info, encoding, errors):
"""Return the object as a ustar header block.
"""
info["magic"] = POSIX_MAGIC
if len(info["linkname"].encode(encoding, errors)) > LENGTH_LINK:
raise ValueError("linkname is too long")
if len(info["name"].encode(encoding, errors)) > LENGTH_NAME:
info["prefix"], info["name"] = self._posix_split_name(info["name"], encoding, errors)
return self._create_header(info, USTAR_FORMAT, encoding, errors)
def create_gnu_header(self, info, encoding, errors):
"""Return the object as a GNU header block sequence.
"""
info["magic"] = GNU_MAGIC
buf = b""
if len(info["linkname"].encode(encoding, errors)) > LENGTH_LINK:
buf += self._create_gnu_long_header(info["linkname"], GNUTYPE_LONGLINK, encoding, errors)
if len(info["name"].encode(encoding, errors)) > LENGTH_NAME:
buf += self._create_gnu_long_header(info["name"], GNUTYPE_LONGNAME, encoding, errors)
return buf + self._create_header(info, GNU_FORMAT, encoding, errors)
def create_pax_header(self, info, encoding):
"""Return the object as a ustar header block. If it cannot be
represented this way, prepend a pax extended header sequence
with supplement information.
"""
info["magic"] = POSIX_MAGIC
pax_headers = self.pax_headers.copy()
# Test string fields for values that exceed the field length or cannot
# be represented in ASCII encoding.
for name, hname, length in (
("name", "path", LENGTH_NAME), ("linkname", "linkpath", LENGTH_LINK),
("uname", "uname", 32), ("gname", "gname", 32)):
if hname in pax_headers:
# The pax header has priority.
continue
# Try to encode the string as ASCII.
try:
info[name].encode("ascii", "strict")
except UnicodeEncodeError:
pax_headers[hname] = info[name]
continue
if len(info[name]) > length:
pax_headers[hname] = info[name]
# Test number fields for values that exceed the field limit or values
# that like to be stored as float.
for name, digits in (("uid", 8), ("gid", 8), ("size", 12), ("mtime", 12)):
if name in pax_headers:
# The pax header has priority. Avoid overflow.
info[name] = 0
continue
val = info[name]
if not 0 <= val < 8 ** (digits - 1) or isinstance(val, float):
pax_headers[name] = str(val)
info[name] = 0
# Create a pax extended header if necessary.
if pax_headers:
buf = self._create_pax_generic_header(pax_headers, XHDTYPE, encoding)
else:
buf = b""
return buf + self._create_header(info, USTAR_FORMAT, "ascii", "replace")
@classmethod
def create_pax_global_header(cls, pax_headers):
"""Return the object as a pax global header block sequence.
"""
return cls._create_pax_generic_header(pax_headers, XGLTYPE, "utf-8")
def _posix_split_name(self, name, encoding, errors):
"""Split a name longer than 100 chars into a prefix
and a name part.
"""
components = name.split("/")
for i in range(1, len(components)):
prefix = "/".join(components[:i])
name = "/".join(components[i:])
if len(prefix.encode(encoding, errors)) <= LENGTH_PREFIX and \
len(name.encode(encoding, errors)) <= LENGTH_NAME:
break
else:
raise ValueError("name is too long")
return prefix, name
@staticmethod
def _create_header(info, format, encoding, errors):
"""Return a header block. info is a dictionary with file
information, format must be one of the *_FORMAT constants.
"""
parts = [
stn(info.get("name", ""), 100, encoding, errors),
itn(info.get("mode", 0) & 0o7777, 8, format),
itn(info.get("uid", 0), 8, format),
itn(info.get("gid", 0), 8, format),
itn(info.get("size", 0), 12, format),
itn(info.get("mtime", 0), 12, format),
b" ", # checksum field
info.get("type", REGTYPE),
stn(info.get("linkname", ""), 100, encoding, errors),
info.get("magic", POSIX_MAGIC),
stn(info.get("uname", ""), 32, encoding, errors),
stn(info.get("gname", ""), 32, encoding, errors),
itn(info.get("devmajor", 0), 8, format),
itn(info.get("devminor", 0), 8, format),
stn(info.get("prefix", ""), 155, encoding, errors)
]
buf = struct.pack("%ds" % BLOCKSIZE, b"".join(parts))
chksum = calc_chksums(buf[-BLOCKSIZE:])[0]
buf = buf[:-364] + bytes("%06o\0" % chksum, "ascii") + buf[-357:]
return buf
@staticmethod
def _create_payload(payload):
"""Return the string payload filled with zero bytes
up to the next 512 byte border.
"""
blocks, remainder = divmod(len(payload), BLOCKSIZE)
if remainder > 0:
payload += (BLOCKSIZE - remainder) * NUL
return payload
@classmethod
def _create_gnu_long_header(cls, name, type, encoding, errors):
"""Return a GNUTYPE_LONGNAME or GNUTYPE_LONGLINK sequence
for name.
"""
name = name.encode(encoding, errors) + NUL
info = {}
info["name"] = "././@LongLink"
info["type"] = type
info["size"] = len(name)
info["magic"] = GNU_MAGIC
# create extended header + name blocks.
return cls._create_header(info, USTAR_FORMAT, encoding, errors) + \
cls._create_payload(name)
@classmethod
def _create_pax_generic_header(cls, pax_headers, type, encoding):
"""Return a POSIX.1-2008 extended or global header sequence
that contains a list of keyword, value pairs. The values
must be strings.
"""
# Check if one of the fields contains surrogate characters and thereby
# forces hdrcharset=BINARY, see _proc_pax() for more information.
binary = False
for keyword, value in pax_headers.items():
try:
value.encode("utf-8", "strict")
except UnicodeEncodeError:
binary = True
break
records = b""
if binary:
# Put the hdrcharset field at the beginning of the header.
records += b"21 hdrcharset=BINARY\n"
for keyword, value in pax_headers.items():
keyword = keyword.encode("utf-8")
if binary:
# Try to restore the original byte representation of `value'.
# Needless to say, that the encoding must match the string.
value = value.encode(encoding, "surrogateescape")
else:
value = value.encode("utf-8")
l = len(keyword) + len(value) + 3 # ' ' + '=' + '\n'
n = p = 0
while True:
n = l + len(str(p))
if n == p:
break
p = n
records += bytes(str(p), "ascii") + b" " + keyword + b"=" + value + b"\n"
# We use a hardcoded "././@PaxHeader" name like star does
# instead of the one that POSIX recommends.
info = {}
info["name"] = "././@PaxHeader"
info["type"] = type
info["size"] = len(records)
info["magic"] = POSIX_MAGIC
# Create pax header + record blocks.
return cls._create_header(info, USTAR_FORMAT, "ascii", "replace") + \
cls._create_payload(records)
@classmethod
def frombuf(cls, buf, encoding, errors):
"""Construct a TarInfo object from a 512 byte bytes object.
"""
if len(buf) == 0:
raise EmptyHeaderError("empty header")
if len(buf) != BLOCKSIZE:
raise TruncatedHeaderError("truncated header")
if buf.count(NUL) == BLOCKSIZE:
raise EOFHeaderError("end of file header")
chksum = nti(buf[148:156])
if chksum not in calc_chksums(buf):
raise InvalidHeaderError("bad checksum")
obj = cls()
obj.name = nts(buf[0:100], encoding, errors)
obj.mode = nti(buf[100:108])
obj.uid = nti(buf[108:116])
obj.gid = nti(buf[116:124])
obj.size = nti(buf[124:136])
obj.mtime = nti(buf[136:148])
obj.chksum = chksum
obj.type = buf[156:157]
obj.linkname = nts(buf[157:257], encoding, errors)
obj.uname = nts(buf[265:297], encoding, errors)
obj.gname = nts(buf[297:329], encoding, errors)
obj.devmajor = nti(buf[329:337])
obj.devminor = nti(buf[337:345])
prefix = nts(buf[345:500], encoding, errors)
# Old V7 tar format represents a directory as a regular
# file with a trailing slash.
if obj.type == AREGTYPE and obj.name.endswith("/"):
obj.type = DIRTYPE
# The old GNU sparse format occupies some of the unused
# space in the buffer for up to 4 sparse structures.
# Save them for later processing in _proc_sparse().
if obj.type == GNUTYPE_SPARSE:
pos = 386
structs = []
for i in range(4):
try:
offset = nti(buf[pos:pos + 12])
numbytes = nti(buf[pos + 12:pos + 24])
except ValueError:
break
structs.append((offset, numbytes))
pos += 24
isextended = bool(buf[482])
origsize = nti(buf[483:495])
obj._sparse_structs = (structs, isextended, origsize)
# Remove redundant slashes from directories.
if obj.isdir():
obj.name = obj.name.rstrip("/")
# Reconstruct a ustar longname.
if prefix and obj.type not in GNU_TYPES:
obj.name = prefix + "/" + obj.name
return obj
@classmethod
def fromtarfile(cls, tarfile):
"""Return the next TarInfo object from TarFile object
tarfile.
"""
buf = tarfile.fileobj.read(BLOCKSIZE)
obj = cls.frombuf(buf, tarfile.encoding, tarfile.errors)
obj.offset = tarfile.fileobj.tell() - BLOCKSIZE
return obj._proc_member(tarfile)
#--------------------------------------------------------------------------
# The following are methods that are called depending on the type of a
# member. The entry point is _proc_member() which can be overridden in a
# subclass to add custom _proc_*() methods. A _proc_*() method MUST
# implement the following
# operations:
# 1. Set self.offset_data to the position where the data blocks begin,
# if there is data that follows.
# 2. Set tarfile.offset to the position where the next member's header will
# begin.
# 3. Return self or another valid TarInfo object.
def _proc_member(self, tarfile):
"""Choose the right processing method depending on
the type and call it.
"""
if self.type in (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK):
return self._proc_gnulong(tarfile)
elif self.type == GNUTYPE_SPARSE:
return self._proc_sparse(tarfile)
elif self.type in (XHDTYPE, XGLTYPE, SOLARIS_XHDTYPE):
return self._proc_pax(tarfile)
else:
return self._proc_builtin(tarfile)
def _proc_builtin(self, tarfile):
"""Process a builtin type or an unknown type which
will be treated as a regular file.
"""
self.offset_data = tarfile.fileobj.tell()
offset = self.offset_data
if self.isreg() or self.type not in SUPPORTED_TYPES:
# Skip the following data blocks.
offset += self._block(self.size)
tarfile.offset = offset
# Patch the TarInfo object with saved global
# header information.
self._apply_pax_info(tarfile.pax_headers, tarfile.encoding, tarfile.errors)
return self
def _proc_gnulong(self, tarfile):
"""Process the blocks that hold a GNU longname
or longlink member.
"""
buf = tarfile.fileobj.read(self._block(self.size))
# Fetch the next header and process it.
try:
next = self.fromtarfile(tarfile)
except HeaderError:
raise SubsequentHeaderError("missing or bad subsequent header")
# Patch the TarInfo object from the next header with
# the longname information.
next.offset = self.offset
if self.type == GNUTYPE_LONGNAME:
next.name = nts(buf, tarfile.encoding, tarfile.errors)
elif self.type == GNUTYPE_LONGLINK:
next.linkname = nts(buf, tarfile.encoding, tarfile.errors)
return next
def _proc_sparse(self, tarfile):
"""Process a GNU sparse header plus extra headers.
"""
# We already collected some sparse structures in frombuf().
structs, isextended, origsize = self._sparse_structs
del self._sparse_structs
# Collect sparse structures from extended header blocks.
while isextended:
buf = tarfile.fileobj.read(BLOCKSIZE)
pos = 0
for i in range(21):
try:
offset = nti(buf[pos:pos + 12])
numbytes = nti(buf[pos + 12:pos + 24])
except ValueError:
break
if offset and numbytes:
structs.append((offset, numbytes))
pos += 24
isextended = bool(buf[504])
self.sparse = structs
self.offset_data = tarfile.fileobj.tell()
tarfile.offset = self.offset_data + self._block(self.size)
self.size = origsize
return self
def _proc_pax(self, tarfile):
"""Process an extended or global header as described in
POSIX.1-2008.
"""
# Read the header information.
buf = tarfile.fileobj.read(self._block(self.size))
# A pax header stores supplemental information for either
# the following file (extended) or all following files
# (global).
if self.type == XGLTYPE:
pax_headers = tarfile.pax_headers
else:
pax_headers = tarfile.pax_headers.copy()
# Check if the pax header contains a hdrcharset field. This tells us
# the encoding of the path, linkpath, uname and gname fields. Normally,
# these fields are UTF-8 encoded but since POSIX.1-2008 tar
# implementations are allowed to store them as raw binary strings if
# the translation to UTF-8 fails.
match = re.search(br"\d+ hdrcharset=([^\n]+)\n", buf)
if match is not None:
pax_headers["hdrcharset"] = match.group(1).decode("utf-8")
# For the time being, we don't care about anything other than "BINARY".
# The only other value that is currently allowed by the standard is
# "ISO-IR 10646 2000 UTF-8" in other words UTF-8.
hdrcharset = pax_headers.get("hdrcharset")
if hdrcharset == "BINARY":
encoding = tarfile.encoding
else:
encoding = "utf-8"
# Parse pax header information. A record looks like that:
# "%d %s=%s\n" % (length, keyword, value). length is the size
# of the complete record including the length field itself and
# the newline. keyword and value are both UTF-8 encoded strings.
regex = re.compile(br"(\d+) ([^=]+)=")
pos = 0
while True:
match = regex.match(buf, pos)
if not match:
break
length, keyword = match.groups()
length = int(length)
value = buf[match.end(2) + 1:match.start(1) + length - 1]
# Normally, we could just use "utf-8" as the encoding and "strict"
# as the error handler, but we better not take the risk. For
# example, GNU tar <= 1.23 is known to store filenames it cannot
# translate to UTF-8 as raw strings (unfortunately without a
# hdrcharset=BINARY header).
# We first try the strict standard encoding, and if that fails we
# fall back on the user's encoding and error handler.
keyword = self._decode_pax_field(keyword, "utf-8", "utf-8",
tarfile.errors)
if keyword in PAX_NAME_FIELDS:
value = self._decode_pax_field(value, encoding, tarfile.encoding,
tarfile.errors)
else:
value = self._decode_pax_field(value, "utf-8", "utf-8",
tarfile.errors)
pax_headers[keyword] = value
pos += length
# Fetch the next header.
try:
next = self.fromtarfile(tarfile)
except HeaderError:
raise SubsequentHeaderError("missing or bad subsequent header")
# Process GNU sparse information.
if "GNU.sparse.map" in pax_headers:
# GNU extended sparse format version 0.1.
self._proc_gnusparse_01(next, pax_headers)
elif "GNU.sparse.size" in pax_headers:
# GNU extended sparse format version 0.0.
self._proc_gnusparse_00(next, pax_headers, buf)
elif pax_headers.get("GNU.sparse.major") == "1" and pax_headers.get("GNU.sparse.minor") == "0":
# GNU extended sparse format version 1.0.
self._proc_gnusparse_10(next, pax_headers, tarfile)
if self.type in (XHDTYPE, SOLARIS_XHDTYPE):
# Patch the TarInfo object with the extended header info.
next._apply_pax_info(pax_headers, tarfile.encoding, tarfile.errors)
next.offset = self.offset
if "size" in pax_headers:
# If the extended header replaces the size field,
# we need to recalculate the offset where the next
# header starts.
offset = next.offset_data
if next.isreg() or next.type not in SUPPORTED_TYPES:
offset += next._block(next.size)
tarfile.offset = offset
return next
def _proc_gnusparse_00(self, next, pax_headers, buf):
"""Process a GNU tar extended sparse header, version 0.0.
"""
offsets = []
for match in re.finditer(br"\d+ GNU.sparse.offset=(\d+)\n", buf):
offsets.append(int(match.group(1)))
numbytes = []
for match in re.finditer(br"\d+ GNU.sparse.numbytes=(\d+)\n", buf):
numbytes.append(int(match.group(1)))
next.sparse = list(zip(offsets, numbytes))
def _proc_gnusparse_01(self, next, pax_headers):
"""Process a GNU tar extended sparse header, version 0.1.
"""
sparse = [int(x) for x in pax_headers["GNU.sparse.map"].split(",")]
next.sparse = list(zip(sparse[::2], sparse[1::2]))
def _proc_gnusparse_10(self, next, pax_headers, tarfile):
"""Process a GNU tar extended sparse header, version 1.0.
"""
fields = None
sparse = []
buf = tarfile.fileobj.read(BLOCKSIZE)
fields, buf = buf.split(b"\n", 1)
fields = int(fields)
while len(sparse) < fields * 2:
if b"\n" not in buf:
buf += tarfile.fileobj.read(BLOCKSIZE)
number, buf = buf.split(b"\n", 1)
sparse.append(int(number))
next.offset_data = tarfile.fileobj.tell()
next.sparse = list(zip(sparse[::2], sparse[1::2]))
def _apply_pax_info(self, pax_headers, encoding, errors):
"""Replace fields with supplemental information from a previous
pax extended or global header.
"""
for keyword, value in pax_headers.items():
if keyword == "GNU.sparse.name":
setattr(self, "path", value)
elif keyword == "GNU.sparse.size":
setattr(self, "size", int(value))
elif keyword == "GNU.sparse.realsize":
setattr(self, "size", int(value))
elif keyword in PAX_FIELDS:
if keyword in PAX_NUMBER_FIELDS:
try:
value = PAX_NUMBER_FIELDS[keyword](value)
except ValueError:
value = 0
if keyword == "path":
value = value.rstrip("/")
setattr(self, keyword, value)
self.pax_headers = pax_headers.copy()
def _decode_pax_field(self, value, encoding, fallback_encoding, fallback_errors):
"""Decode a single field from a pax record.
"""
try:
return value.decode(encoding, "strict")
except UnicodeDecodeError:
return value.decode(fallback_encoding, fallback_errors)
def _block(self, count):
"""Round up a byte count by BLOCKSIZE and return it,
e.g. _block(834) => 1024.
"""
blocks, remainder = divmod(count, BLOCKSIZE)
if remainder:
blocks += 1
return blocks * BLOCKSIZE
def isreg(self):
return self.type in REGULAR_TYPES
def isfile(self):
return self.isreg()
def isdir(self):
return self.type == DIRTYPE
def issym(self):
return self.type == SYMTYPE
def islnk(self):
return self.type == LNKTYPE
def ischr(self):
return self.type == CHRTYPE
def isblk(self):
return self.type == BLKTYPE
def isfifo(self):
return self.type == FIFOTYPE
def issparse(self):
return self.sparse is not None
def isdev(self):
return self.type in (CHRTYPE, BLKTYPE, FIFOTYPE)
# class TarInfo
class TarFile(object):
"""The TarFile Class provides an interface to tar archives.
"""
debug = 0 # May be set from 0 (no msgs) to 3 (all msgs)
dereference = False # If true, add content of linked file to the
# tar file, else the link.
ignore_zeros = False # If true, skips empty or invalid blocks and
# continues processing.
errorlevel = 1 # If 0, fatal errors only appear in debug
# messages (if debug >= 0). If > 0, errors
# are passed to the caller as exceptions.
format = DEFAULT_FORMAT # The format to use when creating an archive.
encoding = ENCODING # Encoding for 8-bit character strings.
errors = None # Error handler for unicode conversion.
tarinfo = TarInfo # The default TarInfo class to use.
fileobject = ExFileObject # The file-object for extractfile().
def __init__(self, name=None, mode="r", fileobj=None, format=None,
tarinfo=None, dereference=None, ignore_zeros=None, encoding=None,
errors="surrogateescape", pax_headers=None, debug=None,
errorlevel=None, copybufsize=None):
"""Open an (uncompressed) tar archive `name'. `mode' is either 'r' to
read from an existing archive, 'a' to append data to an existing
file or 'w' to create a new file overwriting an existing one. `mode'
defaults to 'r'.
If `fileobj' is given, it is used for reading or writing data. If it
can be determined, `mode' is overridden by `fileobj's mode.
`fileobj' is not closed, when TarFile is closed.
"""
modes = {"r": "rb", "a": "r+b", "w": "wb", "x": "xb"}
if mode not in modes:
raise ValueError("mode must be 'r', 'a', 'w' or 'x'")
self.mode = mode
self._mode = modes[mode]
if not fileobj:
if self.mode == "a" and not os.path.exists(name):
# Create nonexistent files in append mode.
self.mode = "w"
self._mode = "wb"
fileobj = bltn_open(name, self._mode)
self._extfileobj = False
else:
if (name is None and hasattr(fileobj, "name") and
isinstance(fileobj.name, (str, bytes))):
name = fileobj.name
if hasattr(fileobj, "mode"):
self._mode = fileobj.mode
self._extfileobj = True
self.name = os.path.abspath(name) if name else None
self.fileobj = fileobj
# Init attributes.
if format is not None:
self.format = format
if tarinfo is not None:
self.tarinfo = tarinfo
if dereference is not None:
self.dereference = dereference
if ignore_zeros is not None:
self.ignore_zeros = ignore_zeros
if encoding is not None:
self.encoding = encoding
self.errors = errors
if pax_headers is not None and self.format == PAX_FORMAT:
self.pax_headers = pax_headers
else:
self.pax_headers = {}
if debug is not None:
self.debug = debug
if errorlevel is not None:
self.errorlevel = errorlevel
# Init datastructures.
self.copybufsize = copybufsize
self.closed = False
self.members = [] # list of members as TarInfo objects
self._loaded = False # flag if all members have been read
self.offset = self.fileobj.tell()
# current position in the archive file
self.inodes = {} # dictionary caching the inodes of
# archive members already added
try:
if self.mode == "r":
self.firstmember = None
self.firstmember = self.next()
if self.mode == "a":
# Move to the end of the archive,
# before the first empty block.
while True:
self.fileobj.seek(self.offset)
try:
tarinfo = self.tarinfo.fromtarfile(self)
self.members.append(tarinfo)
except EOFHeaderError:
self.fileobj.seek(self.offset)
break
except HeaderError as e:
raise ReadError(str(e))
if self.mode in ("a", "w", "x"):
self._loaded = True
if self.pax_headers:
buf = self.tarinfo.create_pax_global_header(self.pax_headers.copy())
self.fileobj.write(buf)
self.offset += len(buf)
except:
if not self._extfileobj:
self.fileobj.close()
self.closed = True
raise
#--------------------------------------------------------------------------
# Below are the classmethods which act as alternate constructors to the
# TarFile class. The open() method is the only one that is needed for
# public use; it is the "super"-constructor and is able to select an
# adequate "sub"-constructor for a particular compression using the mapping
# from OPEN_METH.
#
# This concept allows one to subclass TarFile without losing the comfort of
# the super-constructor. A sub-constructor is registered and made available
# by adding it to the mapping in OPEN_METH.
@classmethod
def open(cls, name=None, mode="r", fileobj=None, bufsize=RECORDSIZE, **kwargs):
"""Open a tar archive for reading, writing or appending. Return
an appropriate TarFile class.
mode:
'r' or 'r:*' open for reading with transparent compression
'r:' open for reading exclusively uncompressed
'r:gz' open for reading with gzip compression
'r:bz2' open for reading with bzip2 compression
'r:xz' open for reading with lzma compression
'a' or 'a:' open for appending, creating the file if necessary
'w' or 'w:' open for writing without compression
'w:gz' open for writing with gzip compression
'w:bz2' open for writing with bzip2 compression
'w:xz' open for writing with lzma compression
'x' or 'x:' create a tarfile exclusively without compression, raise
an exception if the file is already created
'x:gz' create a gzip compressed tarfile, raise an exception
if the file is already created
'x:bz2' create a bzip2 compressed tarfile, raise an exception
if the file is already created
'x:xz' create an lzma compressed tarfile, raise an exception
if the file is already created
'r|*' open a stream of tar blocks with transparent compression
'r|' open an uncompressed stream of tar blocks for reading
'r|gz' open a gzip compressed stream of tar blocks
'r|bz2' open a bzip2 compressed stream of tar blocks
'r|xz' open an lzma compressed stream of tar blocks
'w|' open an uncompressed stream for writing
'w|gz' open a gzip compressed stream for writing
'w|bz2' open a bzip2 compressed stream for writing
'w|xz' open an lzma compressed stream for writing
"""
if not name and not fileobj:
raise ValueError("nothing to open")
if mode in ("r", "r:*"):
# Find out which *open() is appropriate for opening the file.
def not_compressed(comptype):
return cls.OPEN_METH[comptype] == 'taropen'
for comptype in sorted(cls.OPEN_METH, key=not_compressed):
func = getattr(cls, cls.OPEN_METH[comptype])
if fileobj is not None:
saved_pos = fileobj.tell()
try:
return func(name, "r", fileobj, **kwargs)
except (ReadError, CompressionError):
if fileobj is not None:
fileobj.seek(saved_pos)
continue
raise ReadError("file could not be opened successfully")
elif ":" in mode:
filemode, comptype = mode.split(":", 1)
filemode = filemode or "r"
comptype = comptype or "tar"
# Select the *open() function according to
# given compression.
if comptype in cls.OPEN_METH:
func = getattr(cls, cls.OPEN_METH[comptype])
else:
raise CompressionError("unknown compression type %r" % comptype)
return func(name, filemode, fileobj, **kwargs)
elif "|" in mode:
filemode, comptype = mode.split("|", 1)
filemode = filemode or "r"
comptype = comptype or "tar"
if filemode not in ("r", "w"):
raise ValueError("mode must be 'r' or 'w'")
stream = _Stream(name, filemode, comptype, fileobj, bufsize)
try:
t = cls(name, filemode, stream, **kwargs)
except:
stream.close()
raise
t._extfileobj = False
return t
elif mode in ("a", "w", "x"):
return cls.taropen(name, mode, fileobj, **kwargs)
raise ValueError("undiscernible mode")
@classmethod
def taropen(cls, name, mode="r", fileobj=None, **kwargs):
"""Open uncompressed tar archive name for reading or writing.
"""
if mode not in ("r", "a", "w", "x"):
raise ValueError("mode must be 'r', 'a', 'w' or 'x'")
return cls(name, mode, fileobj, **kwargs)
@classmethod
def gzopen(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs):
"""Open gzip compressed tar archive name for reading or writing.
Appending is not allowed.
"""
if mode not in ("r", "w", "x"):
raise ValueError("mode must be 'r', 'w' or 'x'")
try:
import gzip
gzip.GzipFile
except (ImportError, AttributeError):
raise CompressionError("gzip module is not available")
try:
fileobj = gzip.GzipFile(name, mode + "b", compresslevel, fileobj)
except OSError:
if fileobj is not None and mode == 'r':
raise ReadError("not a gzip file")
raise
try:
t = cls.taropen(name, mode, fileobj, **kwargs)
except OSError:
fileobj.close()
if mode == 'r':
raise ReadError("not a gzip file")
raise
except:
fileobj.close()
raise
t._extfileobj = False
return t
@classmethod
def bz2open(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs):
"""Open bzip2 compressed tar archive name for reading or writing.
Appending is not allowed.
"""
if mode not in ("r", "w", "x"):
raise ValueError("mode must be 'r', 'w' or 'x'")
try:
import bz2
except ImportError:
raise CompressionError("bz2 module is not available")
fileobj = bz2.BZ2File(fileobj or name, mode,
compresslevel=compresslevel)
try:
t = cls.taropen(name, mode, fileobj, **kwargs)
except (OSError, EOFError):
fileobj.close()
if mode == 'r':
raise ReadError("not a bzip2 file")
raise
except:
fileobj.close()
raise
t._extfileobj = False
return t
@classmethod
def xzopen(cls, name, mode="r", fileobj=None, preset=None, **kwargs):
"""Open lzma compressed tar archive name for reading or writing.
Appending is not allowed.
"""
if mode not in ("r", "w", "x"):
raise ValueError("mode must be 'r', 'w' or 'x'")
try:
import lzma
except ImportError:
raise CompressionError("lzma module is not available")
fileobj = lzma.LZMAFile(fileobj or name, mode, preset=preset)
try:
t = cls.taropen(name, mode, fileobj, **kwargs)
except (lzma.LZMAError, EOFError):
fileobj.close()
if mode == 'r':
raise ReadError("not an lzma file")
raise
except:
fileobj.close()
raise
t._extfileobj = False
return t
# All *open() methods are registered here.
OPEN_METH = {
"tar": "taropen", # uncompressed tar
"gz": "gzopen", # gzip compressed tar
"bz2": "bz2open", # bzip2 compressed tar
"xz": "xzopen" # lzma compressed tar
}
#--------------------------------------------------------------------------
# The public methods which TarFile provides:
def close(self):
"""Close the TarFile. In write-mode, two finishing zero blocks are
appended to the archive.
"""
if self.closed:
return
self.closed = True
try:
if self.mode in ("a", "w", "x"):
self.fileobj.write(NUL * (BLOCKSIZE * 2))
self.offset += (BLOCKSIZE * 2)
# fill up the end with zero-blocks
# (like option -b20 for tar does)
blocks, remainder = divmod(self.offset, RECORDSIZE)
if remainder > 0:
self.fileobj.write(NUL * (RECORDSIZE - remainder))
finally:
if not self._extfileobj:
self.fileobj.close()
def getmember(self, name):
"""Return a TarInfo object for member `name'. If `name' can not be
found in the archive, KeyError is raised. If a member occurs more
than once in the archive, its last occurrence is assumed to be the
most up-to-date version.
"""
tarinfo = self._getmember(name)
if tarinfo is None:
raise KeyError("filename %r not found" % name)
return tarinfo
def getmembers(self):
"""Return the members of the archive as a list of TarInfo objects. The
list has the same order as the members in the archive.
"""
self._check()
if not self._loaded: # if we want to obtain a list of
self._load() # all members, we first have to
# scan the whole archive.
return self.members
def getnames(self):
"""Return the members of the archive as a list of their names. It has
the same order as the list returned by getmembers().
"""
return [tarinfo.name for tarinfo in self.getmembers()]
def gettarinfo(self, name=None, arcname=None, fileobj=None):
"""Create a TarInfo object from the result of os.stat or equivalent
on an existing file. The file is either named by `name', or
specified as a file object `fileobj' with a file descriptor. If
given, `arcname' specifies an alternative name for the file in the
archive, otherwise, the name is taken from the 'name' attribute of
'fileobj', or the 'name' argument. The name should be a text
string.
"""
self._check("awx")
# When fileobj is given, replace name by
# fileobj's real name.
if fileobj is not None:
name = fileobj.name
# Building the name of the member in the archive.
# Backward slashes are converted to forward slashes,
# Absolute paths are turned to relative paths.
if arcname is None:
arcname = name
drv, arcname = os.path.splitdrive(arcname)
arcname = arcname.replace(os.sep, "/")
arcname = arcname.lstrip("/")
# Now, fill the TarInfo object with
# information specific for the file.
tarinfo = self.tarinfo()
tarinfo.tarfile = self # Not needed
# Use os.stat or os.lstat, depending on platform
# and if symlinks shall be resolved.
if fileobj is None:
if hasattr(os, "lstat") and not self.dereference:
statres = os.lstat(name)
else:
statres = os.stat(name)
else:
statres = os.fstat(fileobj.fileno())
linkname = ""
stmd = statres.st_mode
if stat.S_ISREG(stmd):
inode = (statres.st_ino, statres.st_dev)
if not self.dereference and statres.st_nlink > 1 and \
inode in self.inodes and arcname != self.inodes[inode]:
# Is it a hardlink to an already
# archived file?
type = LNKTYPE
linkname = self.inodes[inode]
else:
# The inode is added only if its valid.
# For win32 it is always 0.
type = REGTYPE
if inode[0]:
self.inodes[inode] = arcname
elif stat.S_ISDIR(stmd):
type = DIRTYPE
elif stat.S_ISFIFO(stmd):
type = FIFOTYPE
elif stat.S_ISLNK(stmd):
type = SYMTYPE
linkname = os.readlink(name)
elif stat.S_ISCHR(stmd):
type = CHRTYPE
elif stat.S_ISBLK(stmd):
type = BLKTYPE
else:
return None
# Fill the TarInfo object with all
# information we can get.
tarinfo.name = arcname
tarinfo.mode = stmd
tarinfo.uid = statres.st_uid
tarinfo.gid = statres.st_gid
if type == REGTYPE:
tarinfo.size = statres.st_size
else:
tarinfo.size = 0
tarinfo.mtime = statres.st_mtime
tarinfo.type = type
tarinfo.linkname = linkname
if pwd:
try:
tarinfo.uname = pwd.getpwuid(tarinfo.uid)[0]
except KeyError:
pass
if grp:
try:
tarinfo.gname = grp.getgrgid(tarinfo.gid)[0]
except KeyError:
pass
if type in (CHRTYPE, BLKTYPE):
if hasattr(os, "major") and hasattr(os, "minor"):
tarinfo.devmajor = os.major(statres.st_rdev)
tarinfo.devminor = os.minor(statres.st_rdev)
return tarinfo
def list(self, verbose=True, *, members=None):
"""Print a table of contents to sys.stdout. If `verbose' is False, only
the names of the members are printed. If it is True, an `ls -l'-like
output is produced. `members' is optional and must be a subset of the
list returned by getmembers().
"""
self._check()
if members is None:
members = self
for tarinfo in members:
if verbose:
_safe_print(stat.filemode(tarinfo.mode))
_safe_print("%s/%s" % (tarinfo.uname or tarinfo.uid,
tarinfo.gname or tarinfo.gid))
if tarinfo.ischr() or tarinfo.isblk():
_safe_print("%10s" %
("%d,%d" % (tarinfo.devmajor, tarinfo.devminor)))
else:
_safe_print("%10d" % tarinfo.size)
_safe_print("%d-%02d-%02d %02d:%02d:%02d" \
% time.localtime(tarinfo.mtime)[:6])
_safe_print(tarinfo.name + ("/" if tarinfo.isdir() else ""))
if verbose:
if tarinfo.issym():
_safe_print("-> " + tarinfo.linkname)
if tarinfo.islnk():
_safe_print("link to " + tarinfo.linkname)
print()
def add(self, name, arcname=None, recursive=True, *, filter=None):
"""Add the file `name' to the archive. `name' may be any type of file
(directory, fifo, symbolic link, etc.). If given, `arcname'
specifies an alternative name for the file in the archive.
Directories are added recursively by default. This can be avoided by
setting `recursive' to False. `filter' is a function
that expects a TarInfo object argument and returns the changed
TarInfo object, if it returns None the TarInfo object will be
excluded from the archive.
"""
self._check("awx")
if arcname is None:
arcname = name
# Skip if somebody tries to archive the archive...
if self.name is not None and os.path.abspath(name) == self.name:
self._dbg(2, "tarfile: Skipped %r" % name)
return
self._dbg(1, name)
# Create a TarInfo object from the file.
tarinfo = self.gettarinfo(name, arcname)
if tarinfo is None:
self._dbg(1, "tarfile: Unsupported type %r" % name)
return
# Change or exclude the TarInfo object.
if filter is not None:
tarinfo = filter(tarinfo)
if tarinfo is None:
self._dbg(2, "tarfile: Excluded %r" % name)
return
# Append the tar header and data to the archive.
if tarinfo.isreg():
with bltn_open(name, "rb") as f:
self.addfile(tarinfo, f)
elif tarinfo.isdir():
self.addfile(tarinfo)
if recursive:
for f in sorted(os.listdir(name)):
self.add(os.path.join(name, f), os.path.join(arcname, f),
recursive, filter=filter)
else:
self.addfile(tarinfo)
def addfile(self, tarinfo, fileobj=None):
"""Add the TarInfo object `tarinfo' to the archive. If `fileobj' is
given, it should be a binary file, and tarinfo.size bytes are read
from it and added to the archive. You can create TarInfo objects
directly, or by using gettarinfo().
"""
self._check("awx")
tarinfo = copy.copy(tarinfo)
buf = tarinfo.tobuf(self.format, self.encoding, self.errors)
self.fileobj.write(buf)
self.offset += len(buf)
bufsize=self.copybufsize
# If there's data to follow, append it.
if fileobj is not None:
copyfileobj(fileobj, self.fileobj, tarinfo.size, bufsize=bufsize)
blocks, remainder = divmod(tarinfo.size, BLOCKSIZE)
if remainder > 0:
self.fileobj.write(NUL * (BLOCKSIZE - remainder))
blocks += 1
self.offset += blocks * BLOCKSIZE
self.members.append(tarinfo)
def extractall(self, path=".", members=None, *, numeric_owner=False):
"""Extract all members from the archive to the current working
directory and set owner, modification time and permissions on
directories afterwards. `path' specifies a different directory
to extract to. `members' is optional and must be a subset of the
list returned by getmembers(). If `numeric_owner` is True, only
the numbers for user/group names are used and not the names.
"""
directories = []
if members is None:
members = self
for tarinfo in members:
if tarinfo.isdir():
# Extract directories with a safe mode.
directories.append(tarinfo)
tarinfo = copy.copy(tarinfo)
tarinfo.mode = 0o700
# Do not set_attrs directories, as we will do that further down
self.extract(tarinfo, path, set_attrs=not tarinfo.isdir(),
numeric_owner=numeric_owner)
# Reverse sort directories.
directories.sort(key=lambda a: a.name)
directories.reverse()
# Set correct owner, mtime and filemode on directories.
for tarinfo in directories:
dirpath = os.path.join(path, tarinfo.name)
try:
self.chown(tarinfo, dirpath, numeric_owner=numeric_owner)
self.utime(tarinfo, dirpath)
self.chmod(tarinfo, dirpath)
except ExtractError as e:
if self.errorlevel > 1:
raise
else:
self._dbg(1, "tarfile: %s" % e)
def extract(self, member, path="", set_attrs=True, *, numeric_owner=False):
"""Extract a member from the archive to the current working directory,
using its full name. Its file information is extracted as accurately
as possible. `member' may be a filename or a TarInfo object. You can
specify a different directory using `path'. File attributes (owner,
mtime, mode) are set unless `set_attrs' is False. If `numeric_owner`
is True, only the numbers for user/group names are used and not
the names.
"""
self._check("r")
if isinstance(member, str):
tarinfo = self.getmember(member)
else:
tarinfo = member
# Prepare the link target for makelink().
if tarinfo.islnk():
tarinfo._link_target = os.path.join(path, tarinfo.linkname)
try:
self._extract_member(tarinfo, os.path.join(path, tarinfo.name),
set_attrs=set_attrs,
numeric_owner=numeric_owner)
except OSError as e:
if self.errorlevel > 0:
raise
else:
if e.filename is None:
self._dbg(1, "tarfile: %s" % e.strerror)
else:
self._dbg(1, "tarfile: %s %r" % (e.strerror, e.filename))
except ExtractError as e:
if self.errorlevel > 1:
raise
else:
self._dbg(1, "tarfile: %s" % e)
def extractfile(self, member):
"""Extract a member from the archive as a file object. `member' may be
a filename or a TarInfo object. If `member' is a regular file or a
link, an io.BufferedReader object is returned. Otherwise, None is
returned.
"""
self._check("r")
if isinstance(member, str):
tarinfo = self.getmember(member)
else:
tarinfo = member
if tarinfo.isreg() or tarinfo.type not in SUPPORTED_TYPES:
# Members with unknown types are treated as regular files.
return self.fileobject(self, tarinfo)
elif tarinfo.islnk() or tarinfo.issym():
if isinstance(self.fileobj, _Stream):
# A small but ugly workaround for the case that someone tries
# to extract a (sym)link as a file-object from a non-seekable
# stream of tar blocks.
raise StreamError("cannot extract (sym)link as file object")
else:
# A (sym)link's file object is its target's file object.
return self.extractfile(self._find_link_target(tarinfo))
else:
# If there's no data associated with the member (directory, chrdev,
# blkdev, etc.), return None instead of a file object.
return None
def _extract_member(self, tarinfo, targetpath, set_attrs=True,
numeric_owner=False):
"""Extract the TarInfo object tarinfo to a physical
file called targetpath.
"""
# Fetch the TarInfo object for the given name
# and build the destination pathname, replacing
# forward slashes to platform specific separators.
targetpath = targetpath.rstrip("/")
targetpath = targetpath.replace("/", os.sep)
# Create all upper directories.
upperdirs = os.path.dirname(targetpath)
if upperdirs and not os.path.exists(upperdirs):
# Create directories that are not part of the archive with
# default permissions.
os.makedirs(upperdirs)
if tarinfo.islnk() or tarinfo.issym():
self._dbg(1, "%s -> %s" % (tarinfo.name, tarinfo.linkname))
else:
self._dbg(1, tarinfo.name)
if tarinfo.isreg():
self.makefile(tarinfo, targetpath)
elif tarinfo.isdir():
self.makedir(tarinfo, targetpath)
elif tarinfo.isfifo():
self.makefifo(tarinfo, targetpath)
elif tarinfo.ischr() or tarinfo.isblk():
self.makedev(tarinfo, targetpath)
elif tarinfo.islnk() or tarinfo.issym():
self.makelink(tarinfo, targetpath)
elif tarinfo.type not in SUPPORTED_TYPES:
self.makeunknown(tarinfo, targetpath)
else:
self.makefile(tarinfo, targetpath)
if set_attrs:
self.chown(tarinfo, targetpath, numeric_owner)
if not tarinfo.issym():
self.chmod(tarinfo, targetpath)
self.utime(tarinfo, targetpath)
#--------------------------------------------------------------------------
# Below are the different file methods. They are called via
# _extract_member() when extract() is called. They can be replaced in a
# subclass to implement other functionality.
def makedir(self, tarinfo, targetpath):
"""Make a directory called targetpath.
"""
try:
# Use a safe mode for the directory, the real mode is set
# later in _extract_member().
os.mkdir(targetpath, 0o700)
except FileExistsError:
pass
def makefile(self, tarinfo, targetpath):
"""Make a file called targetpath.
"""
source = self.fileobj
source.seek(tarinfo.offset_data)
bufsize = self.copybufsize
with bltn_open(targetpath, "wb") as target:
if tarinfo.sparse is not None:
for offset, size in tarinfo.sparse:
target.seek(offset)
copyfileobj(source, target, size, ReadError, bufsize)
target.seek(tarinfo.size)
target.truncate()
else:
copyfileobj(source, target, tarinfo.size, ReadError, bufsize)
def makeunknown(self, tarinfo, targetpath):
"""Make a file from a TarInfo object with an unknown type
at targetpath.
"""
self.makefile(tarinfo, targetpath)
self._dbg(1, "tarfile: Unknown file type %r, " \
"extracted as regular file." % tarinfo.type)
def makefifo(self, tarinfo, targetpath):
"""Make a fifo called targetpath.
"""
if hasattr(os, "mkfifo"):
os.mkfifo(targetpath)
else:
raise ExtractError("fifo not supported by system")
def makedev(self, tarinfo, targetpath):
"""Make a character or block device called targetpath.
"""
if not hasattr(os, "mknod") or not hasattr(os, "makedev"):
raise ExtractError("special devices not supported by system")
mode = tarinfo.mode
if tarinfo.isblk():
mode |= stat.S_IFBLK
else:
mode |= stat.S_IFCHR
os.mknod(targetpath, mode,
os.makedev(tarinfo.devmajor, tarinfo.devminor))
def makelink(self, tarinfo, targetpath):
"""Make a (symbolic) link called targetpath. If it cannot be created
(platform limitation), we try to make a copy of the referenced file
instead of a link.
"""
try:
# For systems that support symbolic and hard links.
if tarinfo.issym():
os.symlink(tarinfo.linkname, targetpath)
else:
# See extract().
if os.path.exists(tarinfo._link_target):
os.link(tarinfo._link_target, targetpath)
else:
self._extract_member(self._find_link_target(tarinfo),
targetpath)
except symlink_exception:
try:
self._extract_member(self._find_link_target(tarinfo),
targetpath)
except KeyError:
raise ExtractError("unable to resolve link inside archive")
def chown(self, tarinfo, targetpath, numeric_owner):
"""Set owner of targetpath according to tarinfo. If numeric_owner
is True, use .gid/.uid instead of .gname/.uname. If numeric_owner
is False, fall back to .gid/.uid when the search based on name
fails.
"""
if hasattr(os, "geteuid") and os.geteuid() == 0:
# We have to be root to do so.
g = tarinfo.gid
u = tarinfo.uid
if not numeric_owner:
try:
if grp:
g = grp.getgrnam(tarinfo.gname)[2]
except KeyError:
pass
try:
if pwd:
u = pwd.getpwnam(tarinfo.uname)[2]
except KeyError:
pass
try:
if tarinfo.issym() and hasattr(os, "lchown"):
os.lchown(targetpath, u, g)
else:
os.chown(targetpath, u, g)
except OSError:
raise ExtractError("could not change owner")
def chmod(self, tarinfo, targetpath):
"""Set file permissions of targetpath according to tarinfo.
"""
if hasattr(os, 'chmod'):
try:
os.chmod(targetpath, tarinfo.mode)
except OSError:
raise ExtractError("could not change mode")
def utime(self, tarinfo, targetpath):
"""Set modification time of targetpath according to tarinfo.
"""
if not hasattr(os, 'utime'):
return
try:
os.utime(targetpath, (tarinfo.mtime, tarinfo.mtime))
except OSError:
raise ExtractError("could not change modification time")
#--------------------------------------------------------------------------
def next(self):
"""Return the next member of the archive as a TarInfo object, when
TarFile is opened for reading. Return None if there is no more
available.
"""
self._check("ra")
if self.firstmember is not None:
m = self.firstmember
self.firstmember = None
return m
# Advance the file pointer.
if self.offset != self.fileobj.tell():
self.fileobj.seek(self.offset - 1)
if not self.fileobj.read(1):
raise ReadError("unexpected end of data")
# Read the next block.
tarinfo = None
while True:
try:
tarinfo = self.tarinfo.fromtarfile(self)
except EOFHeaderError as e:
if self.ignore_zeros:
self._dbg(2, "0x%X: %s" % (self.offset, e))
self.offset += BLOCKSIZE
continue
except InvalidHeaderError as e:
if self.ignore_zeros:
self._dbg(2, "0x%X: %s" % (self.offset, e))
self.offset += BLOCKSIZE
continue
elif self.offset == 0:
raise ReadError(str(e))
except EmptyHeaderError:
if self.offset == 0:
raise ReadError("empty file")
except TruncatedHeaderError as e:
if self.offset == 0:
raise ReadError(str(e))
except SubsequentHeaderError as e:
raise ReadError(str(e))
break
if tarinfo is not None:
self.members.append(tarinfo)
else:
self._loaded = True
return tarinfo
#--------------------------------------------------------------------------
# Little helper methods:
def _getmember(self, name, tarinfo=None, normalize=False):
"""Find an archive member by name from bottom to top.
If tarinfo is given, it is used as the starting point.
"""
# Ensure that all members have been loaded.
members = self.getmembers()
# Limit the member search list up to tarinfo.
if tarinfo is not None:
members = members[:members.index(tarinfo)]
if normalize:
name = os.path.normpath(name)
for member in reversed(members):
if normalize:
member_name = os.path.normpath(member.name)
else:
member_name = member.name
if name == member_name:
return member
def _load(self):
"""Read through the entire archive file and look for readable
members.
"""
while True:
tarinfo = self.next()
if tarinfo is None:
break
self._loaded = True
def _check(self, mode=None):
"""Check if TarFile is still open, and if the operation's mode
corresponds to TarFile's mode.
"""
if self.closed:
raise OSError("%s is closed" % self.__class__.__name__)
if mode is not None and self.mode not in mode:
raise OSError("bad operation for mode %r" % self.mode)
def _find_link_target(self, tarinfo):
"""Find the target member of a symlink or hardlink member in the
archive.
"""
if tarinfo.issym():
# Always search the entire archive.
linkname = "/".join(filter(None, (os.path.dirname(tarinfo.name), tarinfo.linkname)))
limit = None
else:
# Search the archive before the link, because a hard link is
# just a reference to an already archived file.
linkname = tarinfo.linkname
limit = tarinfo
member = self._getmember(linkname, tarinfo=limit, normalize=True)
if member is None:
raise KeyError("linkname %r not found" % linkname)
return member
def __iter__(self):
"""Provide an iterator object.
"""
if self._loaded:
yield from self.members
return
# Yield items using TarFile's next() method.
# When all members have been read, set TarFile as _loaded.
index = 0
# Fix for SF #1100429: Under rare circumstances it can
# happen that getmembers() is called during iteration,
# which will have already exhausted the next() method.
if self.firstmember is not None:
tarinfo = self.next()
index += 1
yield tarinfo
while True:
if index < len(self.members):
tarinfo = self.members[index]
elif not self._loaded:
tarinfo = self.next()
if not tarinfo:
self._loaded = True
return
else:
return
index += 1
yield tarinfo
def _dbg(self, level, msg):
"""Write debugging output to sys.stderr.
"""
if level <= self.debug:
print(msg, file=sys.stderr)
def __enter__(self):
self._check()
return self
def __exit__(self, type, value, traceback):
if type is None:
self.close()
else:
# An exception occurred. We must not call close() because
# it would try to write end-of-archive blocks and padding.
if not self._extfileobj:
self.fileobj.close()
self.closed = True
#--------------------
# exported functions
#--------------------
def is_tarfile(name):
"""Return True if name points to a tar archive that we
are able to handle, else return False.
"""
try:
t = open(name)
t.close()
return True
except TarError:
return False
open = TarFile.open
def main():
import argparse
description = 'A simple command-line interface for tarfile module.'
parser = argparse.ArgumentParser(description=description)
parser.add_argument('-v', '--verbose', action='store_true', default=False,
help='Verbose output')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('-l', '--list', metavar='<tarfile>',
help='Show listing of a tarfile')
group.add_argument('-e', '--extract', nargs='+',
metavar=('<tarfile>', '<output_dir>'),
help='Extract tarfile into target dir')
group.add_argument('-c', '--create', nargs='+',
metavar=('<name>', '<file>'),
help='Create tarfile from sources')
group.add_argument('-t', '--test', metavar='<tarfile>',
help='Test if a tarfile is valid')
args = parser.parse_args()
if args.test is not None:
src = args.test
if is_tarfile(src):
with open(src, 'r') as tar:
tar.getmembers()
print(tar.getmembers(), file=sys.stderr)
if args.verbose:
print('{!r} is a tar archive.'.format(src))
else:
parser.exit(1, '{!r} is not a tar archive.\n'.format(src))
elif args.list is not None:
src = args.list
if is_tarfile(src):
with TarFile.open(src, 'r:*') as tf:
tf.list(verbose=args.verbose)
else:
parser.exit(1, '{!r} is not a tar archive.\n'.format(src))
elif args.extract is not None:
if len(args.extract) == 1:
src = args.extract[0]
curdir = os.curdir
elif len(args.extract) == 2:
src, curdir = args.extract
else:
parser.exit(1, parser.format_help())
if is_tarfile(src):
with TarFile.open(src, 'r:*') as tf:
tf.extractall(path=curdir)
if args.verbose:
if curdir == '.':
msg = '{!r} file is extracted.'.format(src)
else:
msg = ('{!r} file is extracted '
'into {!r} directory.').format(src, curdir)
print(msg)
else:
parser.exit(1, '{!r} is not a tar archive.\n'.format(src))
elif args.create is not None:
tar_name = args.create.pop(0)
_, ext = os.path.splitext(tar_name)
compressions = {
# gz
'.gz': 'gz',
'.tgz': 'gz',
# xz
'.xz': 'xz',
'.txz': 'xz',
# bz2
'.bz2': 'bz2',
'.tbz': 'bz2',
'.tbz2': 'bz2',
'.tb2': 'bz2',
}
tar_mode = 'w:' + compressions[ext] if ext in compressions else 'w'
tar_files = args.create
with TarFile.open(tar_name, tar_mode) as tf:
for file_name in tar_files:
tf.add(file_name)
if args.verbose:
print('{!r} file created.'.format(tar_name))
if __name__ == '__main__':
main()
|
sgiavasis/nipype | refs/heads/master | nipype/workflows/dmri/fsl/__init__.py | 10 | from __future__ import absolute_import
from .dti import create_bedpostx_pipeline, bedpostx_parallel
from .artifacts import (all_fmb_pipeline, all_peb_pipeline, all_fsl_pipeline,
hmc_pipeline, ecc_pipeline, sdc_fmb, sdc_peb,
remove_bias)
from .epi import (fieldmap_correction, topup_correction,
create_eddy_correct_pipeline,
create_epidewarp_pipeline, create_dmri_preprocessing)
from .tbss import (create_tbss_1_preproc, create_tbss_2_reg,
create_tbss_3_postreg, create_tbss_4_prestats,
create_tbss_all, create_tbss_non_FA)
|
miguelsc/voamos | refs/heads/master | voamos/__init__.py | 1 | # -*- coding: utf-8 -*-
import logging
logging.basicConfig()
__author__ = """Miguel Coutada"""
__email__ = 'michaelcoutada@gmail.com'
__version__ = '0.1.0'
|
Ayaz2589/Ayaz2589.github.io | refs/heads/master | node_modules/npm/node_modules/npm-lifecycle/node_modules/node-gyp/gyp/pylib/gyp/xcode_ninja.py | 1789 | # Copyright (c) 2014 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Xcode-ninja wrapper project file generator.
This updates the data structures passed to the Xcode gyp generator to build
with ninja instead. The Xcode project itself is transformed into a list of
executable targets, each with a build step to build with ninja, and a target
with every source and resource file. This appears to sidestep some of the
major performance headaches experienced using complex projects and large number
of targets within Xcode.
"""
import errno
import gyp.generator.ninja
import os
import re
import xml.sax.saxutils
def _WriteWorkspace(main_gyp, sources_gyp, params):
""" Create a workspace to wrap main and sources gyp paths. """
(build_file_root, build_file_ext) = os.path.splitext(main_gyp)
workspace_path = build_file_root + '.xcworkspace'
options = params['options']
if options.generator_output:
workspace_path = os.path.join(options.generator_output, workspace_path)
try:
os.makedirs(workspace_path)
except OSError, e:
if e.errno != errno.EEXIST:
raise
output_string = '<?xml version="1.0" encoding="UTF-8"?>\n' + \
'<Workspace version = "1.0">\n'
for gyp_name in [main_gyp, sources_gyp]:
name = os.path.splitext(os.path.basename(gyp_name))[0] + '.xcodeproj'
name = xml.sax.saxutils.quoteattr("group:" + name)
output_string += ' <FileRef location = %s></FileRef>\n' % name
output_string += '</Workspace>\n'
workspace_file = os.path.join(workspace_path, "contents.xcworkspacedata")
try:
with open(workspace_file, 'r') as input_file:
input_string = input_file.read()
if input_string == output_string:
return
except IOError:
# Ignore errors if the file doesn't exist.
pass
with open(workspace_file, 'w') as output_file:
output_file.write(output_string)
def _TargetFromSpec(old_spec, params):
""" Create fake target for xcode-ninja wrapper. """
# Determine ninja top level build dir (e.g. /path/to/out).
ninja_toplevel = None
jobs = 0
if params:
options = params['options']
ninja_toplevel = \
os.path.join(options.toplevel_dir,
gyp.generator.ninja.ComputeOutputDir(params))
jobs = params.get('generator_flags', {}).get('xcode_ninja_jobs', 0)
target_name = old_spec.get('target_name')
product_name = old_spec.get('product_name', target_name)
product_extension = old_spec.get('product_extension')
ninja_target = {}
ninja_target['target_name'] = target_name
ninja_target['product_name'] = product_name
if product_extension:
ninja_target['product_extension'] = product_extension
ninja_target['toolset'] = old_spec.get('toolset')
ninja_target['default_configuration'] = old_spec.get('default_configuration')
ninja_target['configurations'] = {}
# Tell Xcode to look in |ninja_toplevel| for build products.
new_xcode_settings = {}
if ninja_toplevel:
new_xcode_settings['CONFIGURATION_BUILD_DIR'] = \
"%s/$(CONFIGURATION)$(EFFECTIVE_PLATFORM_NAME)" % ninja_toplevel
if 'configurations' in old_spec:
for config in old_spec['configurations'].iterkeys():
old_xcode_settings = \
old_spec['configurations'][config].get('xcode_settings', {})
if 'IPHONEOS_DEPLOYMENT_TARGET' in old_xcode_settings:
new_xcode_settings['CODE_SIGNING_REQUIRED'] = "NO"
new_xcode_settings['IPHONEOS_DEPLOYMENT_TARGET'] = \
old_xcode_settings['IPHONEOS_DEPLOYMENT_TARGET']
ninja_target['configurations'][config] = {}
ninja_target['configurations'][config]['xcode_settings'] = \
new_xcode_settings
ninja_target['mac_bundle'] = old_spec.get('mac_bundle', 0)
ninja_target['ios_app_extension'] = old_spec.get('ios_app_extension', 0)
ninja_target['ios_watchkit_extension'] = \
old_spec.get('ios_watchkit_extension', 0)
ninja_target['ios_watchkit_app'] = old_spec.get('ios_watchkit_app', 0)
ninja_target['type'] = old_spec['type']
if ninja_toplevel:
ninja_target['actions'] = [
{
'action_name': 'Compile and copy %s via ninja' % target_name,
'inputs': [],
'outputs': [],
'action': [
'env',
'PATH=%s' % os.environ['PATH'],
'ninja',
'-C',
new_xcode_settings['CONFIGURATION_BUILD_DIR'],
target_name,
],
'message': 'Compile and copy %s via ninja' % target_name,
},
]
if jobs > 0:
ninja_target['actions'][0]['action'].extend(('-j', jobs))
return ninja_target
def IsValidTargetForWrapper(target_extras, executable_target_pattern, spec):
"""Limit targets for Xcode wrapper.
Xcode sometimes performs poorly with too many targets, so only include
proper executable targets, with filters to customize.
Arguments:
target_extras: Regular expression to always add, matching any target.
executable_target_pattern: Regular expression limiting executable targets.
spec: Specifications for target.
"""
target_name = spec.get('target_name')
# Always include targets matching target_extras.
if target_extras is not None and re.search(target_extras, target_name):
return True
# Otherwise just show executable targets.
if spec.get('type', '') == 'executable' and \
spec.get('product_extension', '') != 'bundle':
# If there is a filter and the target does not match, exclude the target.
if executable_target_pattern is not None:
if not re.search(executable_target_pattern, target_name):
return False
return True
return False
def CreateWrapper(target_list, target_dicts, data, params):
"""Initialize targets for the ninja wrapper.
This sets up the necessary variables in the targets to generate Xcode projects
that use ninja as an external builder.
Arguments:
target_list: List of target pairs: 'base/base.gyp:base'.
target_dicts: Dict of target properties keyed on target pair.
data: Dict of flattened build files keyed on gyp path.
params: Dict of global options for gyp.
"""
orig_gyp = params['build_files'][0]
for gyp_name, gyp_dict in data.iteritems():
if gyp_name == orig_gyp:
depth = gyp_dict['_DEPTH']
# Check for custom main gyp name, otherwise use the default CHROMIUM_GYP_FILE
# and prepend .ninja before the .gyp extension.
generator_flags = params.get('generator_flags', {})
main_gyp = generator_flags.get('xcode_ninja_main_gyp', None)
if main_gyp is None:
(build_file_root, build_file_ext) = os.path.splitext(orig_gyp)
main_gyp = build_file_root + ".ninja" + build_file_ext
# Create new |target_list|, |target_dicts| and |data| data structures.
new_target_list = []
new_target_dicts = {}
new_data = {}
# Set base keys needed for |data|.
new_data[main_gyp] = {}
new_data[main_gyp]['included_files'] = []
new_data[main_gyp]['targets'] = []
new_data[main_gyp]['xcode_settings'] = \
data[orig_gyp].get('xcode_settings', {})
# Normally the xcode-ninja generator includes only valid executable targets.
# If |xcode_ninja_executable_target_pattern| is set, that list is reduced to
# executable targets that match the pattern. (Default all)
executable_target_pattern = \
generator_flags.get('xcode_ninja_executable_target_pattern', None)
# For including other non-executable targets, add the matching target name
# to the |xcode_ninja_target_pattern| regular expression. (Default none)
target_extras = generator_flags.get('xcode_ninja_target_pattern', None)
for old_qualified_target in target_list:
spec = target_dicts[old_qualified_target]
if IsValidTargetForWrapper(target_extras, executable_target_pattern, spec):
# Add to new_target_list.
target_name = spec.get('target_name')
new_target_name = '%s:%s#target' % (main_gyp, target_name)
new_target_list.append(new_target_name)
# Add to new_target_dicts.
new_target_dicts[new_target_name] = _TargetFromSpec(spec, params)
# Add to new_data.
for old_target in data[old_qualified_target.split(':')[0]]['targets']:
if old_target['target_name'] == target_name:
new_data_target = {}
new_data_target['target_name'] = old_target['target_name']
new_data_target['toolset'] = old_target['toolset']
new_data[main_gyp]['targets'].append(new_data_target)
# Create sources target.
sources_target_name = 'sources_for_indexing'
sources_target = _TargetFromSpec(
{ 'target_name' : sources_target_name,
'toolset': 'target',
'default_configuration': 'Default',
'mac_bundle': '0',
'type': 'executable'
}, None)
# Tell Xcode to look everywhere for headers.
sources_target['configurations'] = {'Default': { 'include_dirs': [ depth ] } }
sources = []
for target, target_dict in target_dicts.iteritems():
base = os.path.dirname(target)
files = target_dict.get('sources', []) + \
target_dict.get('mac_bundle_resources', [])
for action in target_dict.get('actions', []):
files.extend(action.get('inputs', []))
# Remove files starting with $. These are mostly intermediate files for the
# build system.
files = [ file for file in files if not file.startswith('$')]
# Make sources relative to root build file.
relative_path = os.path.dirname(main_gyp)
sources += [ os.path.relpath(os.path.join(base, file), relative_path)
for file in files ]
sources_target['sources'] = sorted(set(sources))
# Put sources_to_index in it's own gyp.
sources_gyp = \
os.path.join(os.path.dirname(main_gyp), sources_target_name + ".gyp")
fully_qualified_target_name = \
'%s:%s#target' % (sources_gyp, sources_target_name)
# Add to new_target_list, new_target_dicts and new_data.
new_target_list.append(fully_qualified_target_name)
new_target_dicts[fully_qualified_target_name] = sources_target
new_data_target = {}
new_data_target['target_name'] = sources_target['target_name']
new_data_target['_DEPTH'] = depth
new_data_target['toolset'] = "target"
new_data[sources_gyp] = {}
new_data[sources_gyp]['targets'] = []
new_data[sources_gyp]['included_files'] = []
new_data[sources_gyp]['xcode_settings'] = \
data[orig_gyp].get('xcode_settings', {})
new_data[sources_gyp]['targets'].append(new_data_target)
# Write workspace to file.
_WriteWorkspace(main_gyp, sources_gyp, params)
return (new_target_list, new_target_dicts, new_data)
|
bluemini/kuma | refs/heads/master | vendor/packages/translate/storage/test_catkeys.py | 26 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from translate.storage import catkeys, test_base
class TestCatkeysUnit(test_base.TestTranslationUnit):
UnitClass = catkeys.CatkeysUnit
def test_difficult_escapes(self):
r"""Wordfast files need to perform magic with escapes.
Wordfast does not accept line breaks in its TM (even though they would be
valid in CSV) thus we turn \\n into \n and reimplement the base class test but
eliminate a few of the actual tests.
"""
unit = self.unit
specials = ['\\"', '\\ ',
'\\\n', '\\\t', '\\\\r', '\\\\"']
for special in specials:
unit.source = special
print("unit.source:", repr(unit.source) + '|')
print("special:", repr(special) + '|')
assert unit.source == special
def test_newlines(self):
"""Wordfast does not like real newlines"""
unit = self.UnitClass("One\nTwo")
assert unit.dict['source'] == "One\\nTwo"
def test_istranslated(self):
unit = self.UnitClass()
assert not unit.istranslated()
unit.source = "Test"
assert not unit.istranslated()
unit.target = "Rest"
assert unit.istranslated()
def test_note_sanity(self):
"""Override test, since the format doesn't support notes."""
pass
class TestCatkeysFile(test_base.TestTranslationStore):
StoreClass = catkeys.CatkeysFile
|
claws/AutobahnPython | refs/heads/master | autobahn/autobahn/websocket/useragent.py | 18 | ###############################################################################
##
## Copyright (C) 2011-2013 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
__all__ = ("lookupWsSupport",)
import re
UA_FIREFOX = re.compile(".*Firefox/(\d*).*")
UA_CHROME = re.compile(".*Chrome/(\d*).*")
UA_CHROMEFRAME = re.compile(".*chromeframe/(\d*).*")
UA_WEBKIT = re.compile(".*AppleWebKit/([0-9+\.]*)\w*.*")
UA_WEBOS = re.compile(".*webos/([0-9+\.]*)\w*.*")
UA_HPWEBOS = re.compile(".*hpwOS/([0-9+\.]*)\w*.*")
# Chrome =============================================================
# Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11
# Chrome Frame =======================================================
# IE6 on Windows with Chrome Frame
# Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; chromeframe/11.0.660.0)
# Firefox ============================================================
# Windows 7 64 Bit
# Mozilla/5.0 (Windows NT 6.1; WOW64; rv:12.0a2) Gecko/20120227 Firefox/12.0a2
# Android ============================================================
# Firefox Mobile
# Mozilla/5.0 (Android; Linux armv7l; rv:10.0.2) Gecko/20120215 Firefox/10.0.2 Fennec/10.0.2
# Chrome for Android (on ICS)
# Mozilla/5.0 (Linux; U; Android-4.0.3; en-us; Galaxy Nexus Build/IML74K) AppleWebKit/535.7 (KHTML, like Gecko) CrMo/16.0.912.75 Mobile Safari/535.7
# Android builtin browser
# Samsung Galaxy Tab 1
# Mozilla/5.0 (Linux; U; Android 2.2; de-de; GT-P1000 Build/FROYO) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1
# Samsung Galaxy S
# Mozilla/5.0 (Linux; U; Android 2.3.3; de-de; GT-I9000 Build/GINGERBREAD) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1
# Samsung Galaxy Note
# Mozilla/5.0 (Linux; U; Android 2.3.6; de-de; GT-N7000 Build/GINGERBREAD) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1
# Samsung Galaxy ACE (no Flash since ARM)
# Mozilla/5.0 (Linux; U; Android 2.2.1; de-de; GT-S5830 Build/FROYO) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1
# WebOS ==============================================================
# HP Touchpad
# Mozilla/5.0 (hp-tablet; Linux; hpwOS/3.0.5; U; en-US) AppleWebKit/534.6 (KHTML, like Gecko) wOSBrowser/234.83 Safari/534.6 TouchPad/1.0
# => Qt-WebKit, Hixie-76, Flash
# Safari =============================================================
# iPod Touch, iOS 4.2.1
# Mozilla/5.0 (iPod; U; CPU iPhone OS 4_2_1 like Mac OS X; de-de) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8C148 Safari/6533.18.5
# => Hixie-76
# MacBook Pro, OSX 10.5.8, Safari 5.0.6
# Mozilla/5.0 (Macintosh; Intel Mac OS X 10_5_8) AppleWebKit/534.50.2 (KHTML, like Gecko) Version/5.0.6 Safari/533.22.3
# => Hixie-76
# RFC6455
# Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534+ (KHTML, like Gecko) Version/5.1.2 Safari/534.52.7
# Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/535.24+ (KHTML, like Gecko) Version/5.1.3 Safari/534.53.10
# Hixie-76
# Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/534.53.11 (KHTML, like Gecko) Version/5.1.3 Safari/534.53.10
# Hixie-76
# Mozilla/5.0 (Macintosh; Intel Mac OS X 10_5_8) AppleWebKit/534.50.2 (KHTML, like Gecko) Version/5.0.6 Safari/533.22.3
# Opera ==============================================================
# Windows 7 32-Bit
# Opera/9.80 (Windows NT 6.1; U; de) Presto/2.10.229 Version/11.61
# Windows 7 64-Bit
# Opera/9.80 (Windows NT 6.1; WOW64; U; de) Presto/2.10.229 Version/11.62
# Samsung Galaxy S
# Opera/9.80 (Android 2.3.3; Linux; Opera Mobi/ADR-1202231246; U; de) Presto/2.10.254 Version/12.00
# Samsung Galaxy Tab 1
# Opera/9.80 (Android 2.2; Linux; Opera Tablet/ADR-1203051631; U; de) Presto/2.10.254 Version/12.00
# Samsung Galaxy ACE:
# Opera/9.80 (Android 2.2.1; Linux; Opera Mobi/ADR-1203051631; U; de) Presto/2.10.254 Version/12.00
# Nokia N8, Symbian S60 5th Ed., S60 Bell
# Opera/9.80 (S60; SymbOS; Opera Mobi/SYB-1111151949; U; de) Presto/2.9.201 Version/11.50
def _lookupWsSupport(ua):
## Internet Explorer
##
## FIXME: handle Windows Phone
##
if ua.find("MSIE") >= 0:
# IE10 has native support
if ua.find("MSIE 10") >= 0:
# native Hybi-10+
return (True, False, True)
# first, check for Google Chrome Frame
# http://www.chromium.org/developers/how-tos/chrome-frame-getting-started/understanding-chrome-frame-user-agent
if ua.find("chromeframe") >= 0:
r = UA_CHROMEFRAME.match(ua)
try:
v = int(r.groups()[0])
if v >= 14:
# native Hybi-10+
return (True, False, True)
except:
# detection problem
return (False, False, False)
# Flash fallback
if ua.find("MSIE 8") >= 0 or ua.find("MSIE 9") >= 0:
return (True, True, True)
# unsupported
return (False, False, True)
## iOS
##
if ua.find("iPhone") >= 0 or ua.find("iPad") >= 0 or ua.find("iPod") >= 0:
## native Hixie76 (as of March 2012), no Flash, no alternative browsers
return (True, False, True)
## Android
##
if ua.find("Android") >= 0:
## Firefox Mobile
##
if ua.find("Firefox") >= 0:
# Hybi-10+ for FF Mobile 8+
return (True, False, True)
## Opera Mobile
##
if ua.find("Opera") >= 0:
# Hixie76 for Opera 11+
return (True, False, True)
## Chrome for Android
##
if ua.find("CrMo") >= 0:
# http://code.google.com/chrome/mobile/docs/faq.html
return (True, False, True)
## Android builtin Browser (ooold WebKit)
##
if ua.find("AppleWebKit") >= 0:
# Though we return WS = True, and Flash = True here, when the device has no actual Flash support, that
# will get later detected in JS. This applies to i.e. ARMv6 devices like Samsung Galaxy ACE
# builtin browser, only works via Flash
return (True, True, True)
# detection problem
return (False, False, False)
## webOS
##
if ua.find("hpwOS") >= 0 or ua.find("webos") >= 0:
try:
if ua.find("hpwOS") >= 0:
vv = [int(x) for x in UA_HPWEBOS.match(ua).groups()[0].split('.')]
if vv[0] >= 3:
return (True, False, True)
elif ua.find("webos") >= 0:
vv = [int(x) for x in UA_WEBOS.match(ua).groups()[0].split('.')]
if vv[0] >= 2:
return (True, False, True)
except:
# detection problem
return (False, False, False)
else:
# unsupported
return (False, False, True)
## Opera
##
if ua.find("Opera") >= 0:
# Opera 11+ has Hixie76 (needs to be manually activated though)
return (True, False, True)
## Firefox
##
if ua.find("Firefox") >= 0:
r = UA_FIREFOX.match(ua)
try:
v = int(r.groups()[0])
if v >= 7:
# native Hybi-10+
return (True, False, True)
elif v >= 3:
# works with Flash bridge
return (True, True, True)
else:
# unsupported
return (False, False, True)
except:
# detection problem
return (False, False, False)
## Safari
##
if ua.find("Safari") >= 0 and not ua.find("Chrome") >= 0:
# rely on at least Hixie76
return (True, False, True)
## Chrome
##
if ua.find("Chrome") >= 0:
r = UA_CHROME.match(ua)
try:
v = int(r.groups()[0])
if v >= 14:
# native Hybi-10+
return (True, False, True)
elif v >= 4:
# works with Flash bridge
return (True, True, True)
else:
# unsupported
return (False, False, True)
except:
# detection problem
return (False, False, False)
# detection problem
return (False, False, False)
UA_DETECT_WS_SUPPORT_DB = {}
def lookupWsSupport(ua, debug = True):
"""
Lookup if browser supports WebSocket (Hixie76, Hybi10+, RFC6455) natively,
and if not, whether the `web-socket-js <https://github.com/gimite/web-socket-js>`_
Flash bridge works to polyfill that.
Returns a tuple of booleans `(ws_supported, needs_flash, detected)` where
* `ws_supported`: WebSocket is supported
* `needs_flash`: Flash Bridge is needed for support
* `detected` the code has explicitly mapped the support/nosupport
:param ua: The browser user agent string as sent in the HTTP header, e.g. provided as `flask.request.user_agent.string` in Flask.
:type ua: str
:returns: tuple -- A tuple `(ws_supported, needs_flash, detected)`.
"""
ws = _lookupWsSupport(ua)
if debug:
if not UA_DETECT_WS_SUPPORT_DB.has_key(ua):
UA_DETECT_WS_SUPPORT_DB[ua] = ws
if not ws[2]:
msg = "UNDETECTED"
elif ws[0]:
msg = "SUPPORTED"
elif not ws[0]:
msg = "UNSUPPORTED"
else:
msg = "ERROR"
print("DETECT_WS_SUPPORT: %s %s %s %s %s" % (ua, ws[0], ws[1], ws[2], msg))
return ws
|
qsic/qsic3 | refs/heads/master | parsers/improvteams/tests.py | 1 | import logging
import os.path
import sys
import unittest
import urllib
from django.core.files import File
from project_settings.settings.base import PROJECT_ROOT
from parsers.improvteams.parser import BaseItParser
from parsers.improvteams.parser import ItPerformerParser
from parsers.improvteams.parser import ItTeamParser
logger = logging.getLogger(__name__)
"""The file at this location holds the HTML that will be tested
against to see if the format of teams pages has changed.
"""
CONTROL_TEAM_URI_PLAIN = local_data_path('it_team_page_amanda_and_jenice.html')
"""The file at this URI will be tested against the control team page to
see if any changes in the format of the HTML.
"""
#TEST_TEAM_URI_PLAIN = 'http://bearcountry.improvteams.com/'
TEST_TEAM_URI_PLAIN = 'http://amandajenice.improvteams.com/'
"""Similar to above however this will be tested against a different
Improvteams.com Team page format. The format here has a main photo
that consumes most of the page.
"""
CONTROL_TEAM_URI_PLUS = local_data_path('it_team_page_boat.html')
TEST_TEAM_URI_PLUS = 'http://boat.improvteams.com/'
test_relations = {
'Paul': {
'control': 'it_performer_page_paullogston.html',
'test': 'http://newyork.improvteams.com/performers/2849/paullogston'
},
}
def local_data_path(file_name):
"""
Return absolute path of file file_name
located in local/test_data
"""
return os.path.join(PROJECT_ROOT, 'local', 'test_data', file_name)
def get_control_html_from_lfs(path):
with open(path) as fp:
return fp.read()
class BaseItParserUTs(unittest.TestCase):
"""Common Unit Tests to all Parser test objects"""
pass
class ParserUTsMixin(object):
"""Unit tests common to all parser objects"""
def test__000_fetch_html_returns_200(self):
"""Fetch returns status code of 200"""
parser = BaseItParser(self.__class__.test_uri)
self.assertEqual(parser.response_status, 200)
class ItPerformerParserUTs(unittest.TestCase, ParserUTsMixin):
"""Performer Parser Unit Tests"""
test_relation = test_relations['Paul']
test_uri = test_relation['test']
control_uri = test_relation['control']
@classmethod
def setUpClass(cls):
cls.test_parser = ItPerformerParser(cls.test_uri)
cls.control_parser = ItPerformerParser(None)
cls.control_parser.html = get_control_html_from_lfs(cls.control_uri)
cls.control_parser.parse_soup()
def test__000_it_id_parsed_from_it_url(self):
self.assertEqual(self.test_parser.it_id, 2849)
def test__001_bio_parsed_as_expected(self):
"""Checks for changes in the structure of bio at Improvteams.com"""
selector = '#main .profile .profile_right .bio'
control_bio = self.control_parser.soup.select(selector)[0].string
test_bio = self.test_parser.soup.select(selector)[0].string
self.assertEqual(test_bio, control_bio)
def test__002_name_parsed_as_expected(self):
"""Check for changes in structure of name"""
self.assertEqual(self.test_parser.first_name,
self.control_parser.first_name)
self.assertEqual(self.test_parser.last_name,
self.control_parser.last_name)
self.assertEqual(self.test_parser.first_name, 'Paul')
self.assertEqual(self.test_parser.last_name, 'Logston')
class ItTeamPlainParserUTs(unittest.TestCase, ParserUTsMixin):
"""Team Plain Parser Unit Tests"""
control_uri = CONTROL_TEAM_URI_PLAIN
test_uri = TEST_TEAM_URI_PLAIN
team_name = 'Amanda & Jenic'
@classmethod
def setUpClass(cls):
cls.test_parser = ItTeamParser(cls.test_uri)
cls.control_parser = ItTeamParser(None)
cls.control_parser.html = get_control_html_from_lfs(cls.control_uri)
cls.control_parser.parse_soup()
def test__000_team_name_parsed_as_expected(self):
"""Check for changes in structure of HTML effecting team name"""
self.assertEqual(self.test_parser.team_name,
self.control_parser.team_name)
self.assertEqual(self.test_parser.team_name,
self.team_name)
def test__001_team_photo_uri_parsed_as_expected(self):
"""Check for changes in structure of HTML effecting team photo uri"""
self.assertEqual(self.test_parser.team_photo_uri,
self.control_parser.team_photo_uri)
def test__002_team_bio_parsed_as_expected(self):
"""Check for changes in structure of HTML effecting team bio"""
self.assertEqual(self.test_parser.team_bio,
self.control_parser.team_bio)
def test__003_performer_uri_list_parsed_as_expected(self):
"""Check for changes in structure of HTML effecting performer list"""
self.assertEqual(self.test_parser.performer_uri_list,
self.control_parser.performer_uri_list)
class ItTeamPlusParserUTs(ItTeamPlainParserUTs):
"""Team Plus Parser Unit Tests"""
control_uri = CONTROL_TEAM_URI_PLUS
test_uri = TEST_TEAM_URI_PLUS
team_name = 'Boat' |
vigsterkr/marathonspawner | refs/heads/master | marathonspawner/__init__.py | 1 | from ._version import __version__
from .marathonspawner import MarathonSpawner
|
tecan/xchat-rt | refs/heads/master | plugins/scripts/Supybot-0.83.4.1-bitcoinotc-bot/plugins/AutoMode/test.py | 21 | ###
# Copyright (c) 2005, Jeremiah Fincher
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
from supybot.test import *
class AutoModeTestCase(PluginTestCase):
plugins = ('AutoMode',)
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
|
masom/Puck | refs/heads/master | server/models/firewalls.py | 1 | '''
Puck: FreeBSD virtualization guest configuration server
Copyright (C) 2012 The Hotel Communication Network inc.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
from libs.model import ModelCollection, Model, TableDefinition
from collections import OrderedDict
class Firewall(Model):
def __init__(self, id=None, name ="", data = ""):
self.data = data
self.name = name
self.id = id
class Firewalls(ModelCollection):
_model = Firewall
def _generate_table_definition(self):
columns = OrderedDict([
('id', "TEXT"),
('name', "TEXT"),
('data', "TEXT")
])
return TableDefinition('firewalls', columns=columns)
|
jairtrejo/hyde | refs/heads/master | hydeengine/content_processors.py | 58 | from django.conf import settings
class PassthroughProcessor:
@staticmethod
def process(resource):
resource.prerendered = True
|
juhuntenburg/pipelines | refs/heads/master | src/connexel_experiments/create_all_subjects_matrix.py | 2 | import numpy as np
import nibabel as nb
from scipy import stats
from nipy.modalities.fmri.glm import GeneralLinearModel
beh_data = np.recfromcsv('/home/raid3/gorgolewski/Downloads/DataForChris_10_30_12.csv')
#corr_mmaps = []
#for name in beh_data['sub_id_database_brain']:
# filename = "/scr/adenauer1/workingdir/rs_analysis_test/main_workflow/_subject_id_%s/_fwhm_5/corr_matrix/corr_matrix.int16"%name
# mmap = np.memmap(filename, dtype='int16', mode='r')
# corr_mmaps.append(mmap)
filename = "/SCR/tmp/memory_z_map.float64"
z_map = np.memmap(filename, dtype='float64', mode='r')
initial_mask_file = "/SCR/MNI152_T1_4mm_brain_mask.nii.gz"
#submask_file = "/SCR/MNI152_T1_4mm_strucseg_periph.nii.gz"
submask_file = "/SCR/memory_4mm.nii.gz"
out_file = "/SCR/all_subjects.int16"
mask_nii = nb.load(initial_mask_file)
initial_mask = mask_nii.get_data() > 0
mask_nii = nb.load(submask_file)
submask = mask_nii.get_data()
submask = submask[initial_mask] > 0
print "%d vs. %d"%(initial_mask.sum(), submask.sum())
#big_map = np.memmap(out_file, dtype='int16', mode='w+', shape=(len(beh_data['sub_id_database_brain']), submask.sum()*(submask.sum()-1)/2))
filename = ""
sub_z_map = np.memmap(filename, dtype='float64', mode='w+', shape=(submask.sum()*(submask.sum()-1)/2))
old_counter = 0
new_counter = 0
for i in xrange(0,initial_mask.sum()):
for j in xrange(i+1,initial_mask.sum()):
if submask[j] and submask[i]:
#for s in range(len(beh_data['sub_id_database_brain'])):
# big_map[s,new_counter] = corr_mmaps[s][old_counter]
sub_z_map[new_counter] = z_map[old_counter]
new_counter += 1
old_counter += 1
|
chuan9/chromium-crosswalk | refs/heads/master | tools/cr/cr/commands/shell.py | 103 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A module for the shell command."""
import os
import tempfile
import cr
class ShellCommand(cr.Command):
"""The implementation of the shell command.
The shell command is the escape hatch that lets user run any program in the
same environment that cr would use if it were running it.
"""
def __init__(self):
super(ShellCommand, self).__init__()
self.help = 'Launch a shell'
self.description = ("""
If no arguments are present, this launches an interactive system
shell (ie bash) with the environment modified to that used for the
build systems.
If any arguments are present, they are used as a command line to run
in that shell.
This allows you to run commands that are not yet available natively
in cr.
""")
def AddArguments(self, subparsers):
parser = super(ShellCommand, self).AddArguments(subparsers)
self.ConsumeArgs(parser, 'the shell')
return parser
def Run(self):
if cr.context.remains:
cr.Host.Shell(*cr.context.remains)
return
# If we get here, we are trying to launch an interactive shell
shell = os.environ.get('SHELL', None)
if shell is None:
print 'Don\'t know how to run a shell on this system'
elif shell.endswith('bash'):
ps1 = '[CR] ' + os.environ.get('PS1', '')
with tempfile.NamedTemporaryFile() as rcfile:
rcfile.write('source ~/.bashrc\nPS1="'+ps1+'"')
rcfile.flush()
cr.Host.Execute(shell, '--rcfile', rcfile.name)
else:
cr.Host.Execute(shell)
|
hjfreyer/marry-fuck-kill | refs/heads/master | backend/mapreduce/lib/pipeline/util.py | 5 | #!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for use with the Google App Engine Pipeline API."""
__all__ = ["for_name",
"JsonEncoder",
"JsonDecoder"]
#pylint: disable=g-bad-name
import datetime
import inspect
import logging
import os
# Relative imports
from mapreduce.lib import simplejson
# pylint: disable=protected-access
def _get_task_target():
"""Get the default target for a pipeline task.
Current version id format is: user_defined_version.minor_version_number
Current module id is just the module's name. It could be "default"
Returns:
A complete target name is of format version.module. If module is the
default module, just version. None if target can not be determined.
"""
# Break circular dependency.
# pylint: disable=g-import-not-at-top
import pipeline
if pipeline._TEST_MODE:
return None
# Further protect against test cases that doesn't set env vars
# propertly.
if ("CURRENT_VERSION_ID" not in os.environ or
"CURRENT_MODULE_ID" not in os.environ):
logging.warning("Running Pipeline in non TEST_MODE but important "
"env vars are not set.")
return None
version = os.environ["CURRENT_VERSION_ID"].split(".")[0]
module = os.environ["CURRENT_MODULE_ID"]
if module == "default":
return version
return "%s.%s" % (version, module)
def for_name(fq_name, recursive=False):
"""Find class/function/method specified by its fully qualified name.
Fully qualified can be specified as:
* <module_name>.<class_name>
* <module_name>.<function_name>
* <module_name>.<class_name>.<method_name> (an unbound method will be
returned in this case).
for_name works by doing __import__ for <module_name>, and looks for
<class_name>/<function_name> in module's __dict__/attrs. If fully qualified
name doesn't contain '.', the current module will be used.
Args:
fq_name: fully qualified name of something to find
Returns:
class object.
Raises:
ImportError: when specified module could not be loaded or the class
was not found in the module.
"""
fq_name = str(fq_name)
module_name = __name__
short_name = fq_name
if fq_name.rfind(".") >= 0:
(module_name, short_name) = (fq_name[:fq_name.rfind(".")],
fq_name[fq_name.rfind(".") + 1:])
try:
result = __import__(module_name, None, None, [short_name])
return result.__dict__[short_name]
except KeyError:
# If we're recursively inside a for_name() chain, then we want to raise
# this error as a key error so we can report the actual source of the
# problem. If we're *not* recursively being called, that means the
# module was found and the specific item could not be loaded, and thus
# we want to raise an ImportError directly.
if recursive:
raise
else:
raise ImportError("Could not find '%s' on path '%s'" % (
short_name, module_name))
except ImportError, e:
# module_name is not actually a module. Try for_name for it to figure
# out what's this.
try:
module = for_name(module_name, recursive=True)
if hasattr(module, short_name):
return getattr(module, short_name)
else:
# The module was found, but the function component is missing.
raise KeyError()
except KeyError:
raise ImportError("Could not find '%s' on path '%s'" % (
short_name, module_name))
except ImportError:
# This means recursive import attempts failed, thus we will raise the
# first ImportError we encountered, since it's likely the most accurate.
pass
# Raise the original import error that caused all of this, since it is
# likely the real cause of the overall problem.
raise
def is_generator_function(obj):
"""Return true if the object is a user-defined generator function.
Generator function objects provides same attributes as functions.
See isfunction.__doc__ for attributes listing.
Adapted from Python 2.6.
Args:
obj: an object to test.
Returns:
true if the object is generator function.
"""
CO_GENERATOR = 0x20
return bool(((inspect.isfunction(obj) or inspect.ismethod(obj)) and
obj.func_code.co_flags & CO_GENERATOR))
class JsonEncoder(simplejson.JSONEncoder):
"""Pipeline customized json encoder."""
TYPE_ID = "__pipeline_json_type"
def default(self, o):
"""Inherit docs."""
if type(o) in _TYPE_TO_ENCODER:
encoder = _TYPE_TO_ENCODER[type(o)]
json_struct = encoder(o)
json_struct[self.TYPE_ID] = type(o).__name__
return json_struct
return super(JsonEncoder, self).default(o)
class JsonDecoder(simplejson.JSONDecoder):
"""Pipeline customized json decoder."""
def __init__(self, **kwargs):
if "object_hook" not in kwargs:
kwargs["object_hook"] = self._dict_to_obj
super(JsonDecoder, self).__init__(**kwargs)
def _dict_to_obj(self, d):
"""Converts a dictionary of json object to a Python object."""
if JsonEncoder.TYPE_ID not in d:
return d
type_name = d.pop(JsonEncoder.TYPE_ID)
if type_name in _TYPE_NAME_TO_DECODER:
decoder = _TYPE_NAME_TO_DECODER[type_name]
return decoder(d)
else:
raise TypeError("Invalid type %s.", type_name)
_DATETIME_FORMAT = "%Y-%m-%d %H:%M:%S.%f"
def _json_encode_datetime(o):
"""Json encode a datetime object.
Args:
o: a datetime object.
Returns:
A dict of json primitives.
"""
return {"isostr": o.strftime(_DATETIME_FORMAT)}
def _json_decode_datetime(d):
"""Converts a dict of json primitives to a datetime object."""
return datetime.datetime.strptime(d["isostr"], _DATETIME_FORMAT)
def _register_json_primitive(object_type, encoder, decoder):
"""Extend what Pipeline can serialize.
Args:
object_type: type of the object.
encoder: a function that takes in an object and returns
a dict of json primitives.
decoder: inverse function of encoder.
"""
global _TYPE_TO_ENCODER
global _TYPE_NAME_TO_DECODER
if object_type not in _TYPE_TO_ENCODER:
_TYPE_TO_ENCODER[object_type] = encoder
_TYPE_NAME_TO_DECODER[object_type.__name__] = decoder
_TYPE_TO_ENCODER = {}
_TYPE_NAME_TO_DECODER = {}
_register_json_primitive(datetime.datetime,
_json_encode_datetime,
_json_decode_datetime)
|
Alkemic/webpage | refs/heads/master | module/portfolio/__init__.py | 9 | __author__ = 'alkemic'
|
mhnatiuk/phd_sociology_of_religion | refs/heads/master | scrapper/build/pyasn1/test/codec/ber/__init__.py | 3653 | # This file is necessary to make this directory a package.
|
grupozeety/CDerpnext | refs/heads/bk_master | erpnext/stock/doctype/landed_cost_taxes_and_charges/landed_cost_taxes_and_charges.py | 121 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class LandedCostTaxesandCharges(Document):
pass |
openshift/openshift-tools | refs/heads/prod | ansible/roles/lib_oa_openshift/src/lib/rolebinding.py | 84 | # pylint: skip-file
# flake8: noqa
# pylint: disable=too-many-instance-attributes
class RoleBindingConfig(object):
''' Handle rolebinding config '''
# pylint: disable=too-many-arguments
def __init__(self,
name,
namespace,
kubeconfig,
group_names=None,
role_ref=None,
subjects=None,
usernames=None):
''' constructor for handling rolebinding options '''
self.kubeconfig = kubeconfig
self.name = name
self.namespace = namespace
self.group_names = group_names
self.role_ref = role_ref
self.subjects = subjects
self.usernames = usernames
self.data = {}
self.create_dict()
def create_dict(self):
''' create a default rolebinding as a dict '''
self.data['apiVersion'] = 'v1'
self.data['kind'] = 'RoleBinding'
self.data['groupNames'] = self.group_names
self.data['metadata']['name'] = self.name
self.data['metadata']['namespace'] = self.namespace
self.data['roleRef'] = self.role_ref
self.data['subjects'] = self.subjects
self.data['userNames'] = self.usernames
# pylint: disable=too-many-instance-attributes,too-many-public-methods
class RoleBinding(Yedit):
''' Class to model a rolebinding openshift object'''
group_names_path = "groupNames"
role_ref_path = "roleRef"
subjects_path = "subjects"
user_names_path = "userNames"
kind = 'RoleBinding'
def __init__(self, content):
'''RoleBinding constructor'''
super(RoleBinding, self).__init__(content=content)
self._subjects = None
self._role_ref = None
self._group_names = None
self._user_names = None
@property
def subjects(self):
''' subjects property '''
if self._subjects is None:
self._subjects = self.get_subjects()
return self._subjects
@subjects.setter
def subjects(self, data):
''' subjects property setter'''
self._subjects = data
@property
def role_ref(self):
''' role_ref property '''
if self._role_ref is None:
self._role_ref = self.get_role_ref()
return self._role_ref
@role_ref.setter
def role_ref(self, data):
''' role_ref property setter'''
self._role_ref = data
@property
def group_names(self):
''' group_names property '''
if self._group_names is None:
self._group_names = self.get_group_names()
return self._group_names
@group_names.setter
def group_names(self, data):
''' group_names property setter'''
self._group_names = data
@property
def user_names(self):
''' user_names property '''
if self._user_names is None:
self._user_names = self.get_user_names()
return self._user_names
@user_names.setter
def user_names(self, data):
''' user_names property setter'''
self._user_names = data
def get_group_names(self):
''' return groupNames '''
return self.get(RoleBinding.group_names_path) or []
def get_user_names(self):
''' return usernames '''
return self.get(RoleBinding.user_names_path) or []
def get_role_ref(self):
''' return role_ref '''
return self.get(RoleBinding.role_ref_path) or {}
def get_subjects(self):
''' return subjects '''
return self.get(RoleBinding.subjects_path) or []
#### ADD #####
def add_subject(self, inc_subject):
''' add a subject '''
if self.subjects:
# pylint: disable=no-member
self.subjects.append(inc_subject)
else:
self.put(RoleBinding.subjects_path, [inc_subject])
return True
def add_role_ref(self, inc_role_ref):
''' add a role_ref '''
if not self.role_ref:
self.put(RoleBinding.role_ref_path, {"name": inc_role_ref})
return True
return False
def add_group_names(self, inc_group_names):
''' add a group_names '''
if self.group_names:
# pylint: disable=no-member
self.group_names.append(inc_group_names)
else:
self.put(RoleBinding.group_names_path, [inc_group_names])
return True
def add_user_name(self, inc_user_name):
''' add a username '''
if self.user_names:
# pylint: disable=no-member
self.user_names.append(inc_user_name)
else:
self.put(RoleBinding.user_names_path, [inc_user_name])
return True
#### /ADD #####
#### Remove #####
def remove_subject(self, inc_subject):
''' remove a subject '''
try:
# pylint: disable=no-member
self.subjects.remove(inc_subject)
except ValueError as _:
return False
return True
def remove_role_ref(self, inc_role_ref):
''' remove a role_ref '''
if self.role_ref and self.role_ref['name'] == inc_role_ref:
del self.role_ref['name']
return True
return False
def remove_group_name(self, inc_group_name):
''' remove a groupname '''
try:
# pylint: disable=no-member
self.group_names.remove(inc_group_name)
except ValueError as _:
return False
return True
def remove_user_name(self, inc_user_name):
''' remove a username '''
try:
# pylint: disable=no-member
self.user_names.remove(inc_user_name)
except ValueError as _:
return False
return True
#### /REMOVE #####
#### UPDATE #####
def update_subject(self, inc_subject):
''' update a subject '''
try:
# pylint: disable=no-member
index = self.subjects.index(inc_subject)
except ValueError as _:
return self.add_subject(inc_subject)
self.subjects[index] = inc_subject
return True
def update_group_name(self, inc_group_name):
''' update a groupname '''
try:
# pylint: disable=no-member
index = self.group_names.index(inc_group_name)
except ValueError as _:
return self.add_group_names(inc_group_name)
self.group_names[index] = inc_group_name
return True
def update_user_name(self, inc_user_name):
''' update a username '''
try:
# pylint: disable=no-member
index = self.user_names.index(inc_user_name)
except ValueError as _:
return self.add_user_name(inc_user_name)
self.user_names[index] = inc_user_name
return True
def update_role_ref(self, inc_role_ref):
''' update a role_ref '''
self.role_ref['name'] = inc_role_ref
return True
#### /UPDATE #####
#### FIND ####
def find_subject(self, inc_subject):
''' find a subject '''
index = None
try:
# pylint: disable=no-member
index = self.subjects.index(inc_subject)
except ValueError as _:
return index
return index
def find_group_name(self, inc_group_name):
''' find a group_name '''
index = None
try:
# pylint: disable=no-member
index = self.group_names.index(inc_group_name)
except ValueError as _:
return index
return index
def find_user_name(self, inc_user_name):
''' find a user_name '''
index = None
try:
# pylint: disable=no-member
index = self.user_names.index(inc_user_name)
except ValueError as _:
return index
return index
def find_role_ref(self, inc_role_ref):
''' find a user_name '''
if self.role_ref and self.role_ref['name'] == inc_role_ref['name']:
return self.role_ref
return None
|
retomerz/intellij-community | refs/heads/master | python/testData/inspections/UnnecessaryBackslashInArguments.py | 73 | def foo(a, b,<warning descr="Unnecessary backslash in expression."><caret>\</warning>
c,<warning descr="Unnecessary backslash in expression.">\</warning>
d):
pass |
mjsevilla/facts-about-sloths | refs/heads/master | functions/node_modules/firebase-admin/node_modules/grpc/third_party/boringssl/third_party/googletest/test/gtest_catch_exceptions_test.py | 2139 | #!/usr/bin/env python
#
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests Google Test's exception catching behavior.
This script invokes gtest_catch_exceptions_test_ and
gtest_catch_exceptions_ex_test_ (programs written with
Google Test) and verifies their output.
"""
__author__ = 'vladl@google.com (Vlad Losev)'
import os
import gtest_test_utils
# Constants.
FLAG_PREFIX = '--gtest_'
LIST_TESTS_FLAG = FLAG_PREFIX + 'list_tests'
NO_CATCH_EXCEPTIONS_FLAG = FLAG_PREFIX + 'catch_exceptions=0'
FILTER_FLAG = FLAG_PREFIX + 'filter'
# Path to the gtest_catch_exceptions_ex_test_ binary, compiled with
# exceptions enabled.
EX_EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'gtest_catch_exceptions_ex_test_')
# Path to the gtest_catch_exceptions_test_ binary, compiled with
# exceptions disabled.
EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'gtest_catch_exceptions_no_ex_test_')
environ = gtest_test_utils.environ
SetEnvVar = gtest_test_utils.SetEnvVar
# Tests in this file run a Google-Test-based test program and expect it
# to terminate prematurely. Therefore they are incompatible with
# the premature-exit-file protocol by design. Unset the
# premature-exit filepath to prevent Google Test from creating
# the file.
SetEnvVar(gtest_test_utils.PREMATURE_EXIT_FILE_ENV_VAR, None)
TEST_LIST = gtest_test_utils.Subprocess(
[EXE_PATH, LIST_TESTS_FLAG], env=environ).output
SUPPORTS_SEH_EXCEPTIONS = 'ThrowsSehException' in TEST_LIST
if SUPPORTS_SEH_EXCEPTIONS:
BINARY_OUTPUT = gtest_test_utils.Subprocess([EXE_PATH], env=environ).output
EX_BINARY_OUTPUT = gtest_test_utils.Subprocess(
[EX_EXE_PATH], env=environ).output
# The tests.
if SUPPORTS_SEH_EXCEPTIONS:
# pylint:disable-msg=C6302
class CatchSehExceptionsTest(gtest_test_utils.TestCase):
"""Tests exception-catching behavior."""
def TestSehExceptions(self, test_output):
self.assert_('SEH exception with code 0x2a thrown '
'in the test fixture\'s constructor'
in test_output)
self.assert_('SEH exception with code 0x2a thrown '
'in the test fixture\'s destructor'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in SetUpTestCase()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in TearDownTestCase()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in SetUp()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in TearDown()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in the test body'
in test_output)
def testCatchesSehExceptionsWithCxxExceptionsEnabled(self):
self.TestSehExceptions(EX_BINARY_OUTPUT)
def testCatchesSehExceptionsWithCxxExceptionsDisabled(self):
self.TestSehExceptions(BINARY_OUTPUT)
class CatchCxxExceptionsTest(gtest_test_utils.TestCase):
"""Tests C++ exception-catching behavior.
Tests in this test case verify that:
* C++ exceptions are caught and logged as C++ (not SEH) exceptions
* Exception thrown affect the remainder of the test work flow in the
expected manner.
"""
def testCatchesCxxExceptionsInFixtureConstructor(self):
self.assert_('C++ exception with description '
'"Standard C++ exception" thrown '
'in the test fixture\'s constructor'
in EX_BINARY_OUTPUT)
self.assert_('unexpected' not in EX_BINARY_OUTPUT,
'This failure belongs in this test only if '
'"CxxExceptionInConstructorTest" (no quotes) '
'appears on the same line as words "called unexpectedly"')
if ('CxxExceptionInDestructorTest.ThrowsExceptionInDestructor' in
EX_BINARY_OUTPUT):
def testCatchesCxxExceptionsInFixtureDestructor(self):
self.assert_('C++ exception with description '
'"Standard C++ exception" thrown '
'in the test fixture\'s destructor'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInDestructorTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInSetUpTestCase(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in SetUpTestCase()'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInConstructorTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest constructor '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest destructor '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest::SetUp() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest::TearDown() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest test body '
'called as expected.'
in EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInTearDownTestCase(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in TearDownTestCase()'
in EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInSetUp(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in SetUp()'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTest destructor '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTest::TearDown() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('unexpected' not in EX_BINARY_OUTPUT,
'This failure belongs in this test only if '
'"CxxExceptionInSetUpTest" (no quotes) '
'appears on the same line as words "called unexpectedly"')
def testCatchesCxxExceptionsInTearDown(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in TearDown()'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTearDownTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTearDownTest destructor '
'called as expected.'
in EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInTestBody(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in the test body'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTestBodyTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTestBodyTest destructor '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTestBodyTest::TearDown() '
'called as expected.'
in EX_BINARY_OUTPUT)
def testCatchesNonStdCxxExceptions(self):
self.assert_('Unknown C++ exception thrown in the test body'
in EX_BINARY_OUTPUT)
def testUnhandledCxxExceptionsAbortTheProgram(self):
# Filters out SEH exception tests on Windows. Unhandled SEH exceptions
# cause tests to show pop-up windows there.
FITLER_OUT_SEH_TESTS_FLAG = FILTER_FLAG + '=-*Seh*'
# By default, Google Test doesn't catch the exceptions.
uncaught_exceptions_ex_binary_output = gtest_test_utils.Subprocess(
[EX_EXE_PATH,
NO_CATCH_EXCEPTIONS_FLAG,
FITLER_OUT_SEH_TESTS_FLAG],
env=environ).output
self.assert_('Unhandled C++ exception terminating the program'
in uncaught_exceptions_ex_binary_output)
self.assert_('unexpected' not in uncaught_exceptions_ex_binary_output)
if __name__ == '__main__':
gtest_test_utils.Main()
|
indx/indx-core | refs/heads/master | lib/indxclient/indxclient.py | 2 | # Copyright (C) 2011-2013 University of Southampton
# Copyright (C) 2011-2013 Daniel Alexander Smith
# Copyright (C) 2011-2013 Max Van Kleek
# Copyright (C) 2011-2013 Nigel R. Shadbolt
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License, version 3,
# as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
import json
import urllib
import urllib2
import cookielib
import uuid
import pprint
import cjson
import base64
import traceback
from indx.crypto import auth_keys
import Crypto.Random.OSRNG.posix
import Crypto.PublicKey.RSA
import Crypto.Hash.SHA512
from twisted.internet import reactor, threads
from twisted.internet.defer import Deferred
from twisted.python.failure import Failure
from autobahn.twisted.websocket import connectWS, WebSocketClientFactory, WebSocketClientProtocol
# Decorator function to ensure that the IndxClient object has a token when the function requires one
def require_token(function):
def wrapper(self, *args, **kwargs):
self._debug("require_token, token is: {0}".format(self.token))
if self.token is None:
logging.error("require_token, throwing exception")
raise Exception("Non-null token required for this call.")
logging.debug("require_token, self: {0}, *args: {1}, **kwargs: {2}".format(self, args, kwargs))
return function(self, *args, **kwargs)
return wrapper
def value_truncate(data, max_length = 255):
""" Truncate a string before logging it (e.g. for file data). """
if data is None:
return data
if len(data) > max_length:
return data[:max_length] + "...[truncated, original length {0}]".format(len(data))
else:
return data
class IndxClient:
""" Authenticates and accesses an INDX. """
def __init__(self, address, box, appid, token = None, client = None, keystore = None):
""" Connect to an INDX and authenticate. """
self.address = address
self.box = box
self.token = token
self.appid = appid
self.keystore = keystore # if you want to access websockets connections as a client (usually only the server does this)
self.params = {"app": self.appid}
if self.token is not None:
self.params["token"] = self.token
if self.box is not None:
self.params["box"] = self.box # used in requests
if client is None:
self.client = IndxHTTPClient(self.params)
else:
self.client = client
self.client.params = self.params
""" Ensure self.server always ends in a / """
if self.address[-1:] != "/":
self.address += "/"
self.base = "{0}{1}".format(self.address, self.box)
def _log(self, loglevel, message):
""" Write a log message including the server and box information. """
logger = logging.getLogger("indxclient")
return logger.log(loglevel, u"%s\t%s\t%s\t%s", self.address, self.box, self.token, message)
def _debug(self, message):
return self._log(logging.DEBUG, message)
def _error(self, message):
return self._log(logging.ERROR, message)
# Utility Functions
def _gen_bnode_id(self):
""" Generate an ID for a bnode. """
return "_:{0}".format(uuid.uuid4()) # uuid4 to avoid collisions - uuid1 can (and did) cause collisions
def _prepare_objects(self, objects, create_ids = False):
""" Take raw JSON object and expand them into the INDX internal format. """
logging.debug("IndxClient _prepare_objects: {0}".format(objects))
objects_new = []
if type(objects) != type([]):
objects = [objects]
for obj in objects:
if "@id" not in obj:
if create_ids:
obj["@id"] = "{0}".format(uuid.uuid4())
else:
raise Exception("@id required in all objects.")
obj_new = {}
for predicate, sub_objs in obj.items():
if predicate[0] == "@" or predicate[0] == "_":
obj_new[predicate] = sub_objs
continue # ignore internal non-data predicates
if sub_objs is None:
continue
if type(sub_objs) != type([]):
sub_objs = [sub_objs]
for object in sub_objs:
if type(object) != type({}):
if type(object) != type(u"") and type(object) != type(""):
object = unicode(object)
object = {"@value": object} # turn single value into a literal
# check if 'object' is an object value or if it is a subobject
if "@value" in object or "@id" in object:
# this object is a value
if predicate not in obj_new:
obj_new[predicate] = []
obj_new[predicate].append(object)
else:
# this is a subobject, so rescursively process it
new_id = self._gen_bnode_id()
object["@id"] = new_id
sub_obj = self._prepare_objects(object)
if len(sub_obj) > 0 and sub_obj[0] is not None:
if predicate not in obj_new:
obj_new[predicate] = []
obj_new[predicate].append({"@id": new_id}) # link to new object
objects_new.extend(sub_obj) # add new object to object list
if len(obj_new.keys()) > 0:
objects_new.append(obj_new)
return objects_new
@staticmethod
def requires_token(call):
requires = [
'get_object_ids',
'update_raw',
'update_json',
'update',
'delete',
'get_latest',
'get_by_ids',
'query',
'set_acl',
'set_acl_public',
'get_acls',
'generate_new_key',
'diff',
'add_file',
'delete_file',
'get_file',
'list_files',
'link_remote_box',
'get_version',
]
return call.func_name in requires
# API access functions
def create_box(self):
""" Create a box. """
self._debug("Called API: create_box")
url = "{0}admin/create_box".format(self.address)
values = {"name": self.box}
return self.client.post(url, values)
def delete_box(self):
""" Delete a box. """
self._debug("Called API: delete_box")
url = "{0}admin/delete_box".format(self.address)
values = {"name": self.box}
return self.client.post(url, values)
def list_boxes(self):
""" List the boxes on the INDX server. """
self._debug("Called API: list_boxes")
url = "{0}admin/list_boxes".format(self.address)
return self.client.get(url)
def create_root_box(self, box):
""" Create a new root box for a user on the INDX server. """
self._debug("Called API: create_root_box with box {0}".format(box))
url = "{0}admin/create_root_box".format(self.address)
values = {"box": box}
return self.client.get(url, values)
def create_user(self, username, password):
""" Create a new user. """
self._debug("Called API: create_user with username: {0}".format(username))
url = "{0}admin/create_user".format(self.address)
values = {"username": username, "password": password}
return self.client.post(url, values)
@require_token
def get_object_ids(self):
""" Get the IDs of every object in this box. """
self._debug("Called API: get_object_ids")
url = "{0}/get_object_ids".format(self.base)
return self.client.get(url)
@require_token
def apply_diff(self, diff):
""" Update objects in a box, in diff JSON format.
diff -- A diff in JSON format
"""
self._debug("Called API: apply_diff with diff: {0}".format(diff))
url = "{0}/apply_diff".format(self.base)
values = {"data": json.loads(diff)}
return self.client.put(url, values)
@require_token
def update_raw(self, version, objects):
""" Update objects in a box, in INDX format.
version -- The current version of the box
objects -- List of objects to create/update
"""
self._debug("Called API: update_raw with version: {0}, objects: {1}".format(version, objects))
values = {"data": json.loads(objects), "version": version}
return self.client.put(self.base, values)
@require_token
def update(self, version, objects):
""" Update objects in a box, from any JSON format.
version -- The current version of the box
objects -- List of objects to create/update
"""
self._debug("Called API: update with version: {0}, objects: {1}".format(version, objects))
prepared_objects = self._prepare_objects(objects)
self._debug("update: prepared_objects: {0}".format(pprint.pformat(prepared_objects, indent=2, width=80)))
values = {"data": json.dumps(prepared_objects), "version": version}
return self.client.put(self.base, values)
@require_token
def update_json(self, version, objects):
""" Update objects in a box, from any JSON format, without IDs.
version -- The current version of the box
objects -- List of objects to create/update
"""
self._debug("Called API: update with version: {0}, objects: {1}".format(version, objects))
prepared_objects = self._prepare_objects(objects, create_ids = True)
self._debug("update: prepared_objects: {0}".format(pprint.pformat(prepared_objects, indent=2, width=80)))
values = {"data": json.dumps(prepared_objects), "version": version}
return self.client.put(self.base, values)
@require_token
def delete(self, version, object_id_list):
""" Test to delete objects from a box.
version -- The current version of the box
object_id_list -- List of object IDs to delete
"""
self._debug("Called API: delete with version: {0}, object_id_list: {1}".format(version, object_id_list))
values = {"data": json.dumps(object_id_list), "version": version}
return self.client.delete(self.base, values)
@require_token
def get_version(self):
""" Get the latest version number. """
self._debug("Called API: get_version")
url = "{0}/get_version".format(self.base)
return self.client.get(url)
def get_latest(self):
""" Get the latest version of every object in this box. """
self._debug("Called API: get_latest")
return self.client.get(self.base)
@require_token
def get_by_ids(self, object_id_list):
""" Get the latest version of specific objects in this box.
object_id_list -- A list of object IDs to retrieve
"""
self._debug("Called API: get_by_ids with object_ids_list: {0}".format(object_id_list))
id_tuples = map(lambda i: ("id", i), object_id_list)
return self.client.get(self.base, id_tuples)
@require_token
def query(self, query, depth = None):
""" Query a box with a filtering query
query -- The query to send, as a dict, e.g. {"@id": 2983} or {"firstname": "dan"}
depth -- How deep into the object graph/hierarchy to return full objects
"""
self._debug("Called API: query with query: {0}".format(query))
params = {'q': query}
if depth is not None:
params['depth'] = depth
url = "{0}/query".format(self.base)
return self.client.get(url, params)
@require_token
def set_acl(self, acl, target_username):
""" Set an ACL on a database for a target username.
acl -- The ACL to set, must have "read", "write" and "control" fields, all boolean, e.g. {"read": true, "write": true, "control": false}
target_username -- username of the user whose access will be set/change
"""
self._debug("Called API: set_acl with acl: {0}, target_username: {1}".format(acl, target_username))
url = "{0}/set_acl".format(self.base)
return self.client.get(url, {'acl': acl, 'target_username': target_username})
@require_token
def set_acl_public(self, acl):
""" Set an ACL on a database for the public (not authed) user.
acl -- The ACL to set, must have "read", "write" and "control" fields, all boolean, e.g. {"read": true, "write": true, "control": false}
"""
self._debug("Called API: set_acl_public with acl: {0}".format(acl))
url = "{0}/set_acl".format(self.base)
return self.client.get(url, {'acl': acl, 'unauth_user': True})
@require_token
def get_acls(self):
""" Get ACLs for a database (requires "control" permissions)"""
self._debug("Called API: get_acls")
url = "{0}/get_acls".format(self.base)
return self.client.get(url)
@require_token
def generate_new_key(self, local_key, encpk2, serverid):
""" Generate new key and store it in the keystore, send our public (not private) key to the remote server. Return the public and public-hash parts. (Not the private part.) """
self._debug("Called API: generate_new_key")
url = "{0}/generate_new_key".format(self.base)
if not (type(encpk2) == type("") or type(encpk2) == type(u"")):
encpk2 = json.dumps(encpk2)
values = {"public": local_key['public'], "public-hash": local_key['public-hash'], "encpk2": encpk2, "serverid": serverid} # don't send private to anyone ever
return self.client.get(url, values)
@require_token
def diff(self, return_objs, from_version, to_version = None):
""" Get the difference between two versions of the objects in the box.
return_objs -- How to return the results, either "diff", "objects", "ids".
"diff" means that a diff (added, changed, removed) will be returned
"objects" means that the "to" version of full objects that have been changed will be returned
"ids" means that the ids of objects that have changed will be returned
from_version -- The base version of the diff
to_version -- The end version of the diff (optional, defaults to latest version)
"""
self._debug("Called API: diff with return_objs: {0}, from_version: {1}, to_version: {2}".format(return_objs, from_version, to_version))
url = "{0}/diff".format(self.base)
params = {'from_version': from_version, "return_objs": return_objs}
if to_version is not None:
params['to_version'] = to_version
return self.client.get(url, params)
@require_token
def add_file(self, version, file_id, file_data, contenttype):
""" Add a file to the database.
version -- The latest version of the box
file_id -- The file ID
file_data -- The actual file data to upload
contenttype -- The Content-Type of the file
"""
self._debug("Called API: add_file with version: {0}, file_id: {1}, contenttype: {2}, file_data: {3}".format(version, file_id, contenttype, value_truncate(file_data)))
url = "{0}/files".format(self.base)
values = {"id": file_id, "version": version}
return self.client.req_file(url, values, "PUT", file_data, contenttype)
@require_token
def delete_file(self, version, file_id):
""" Delete a file from the database.
version -- The latest version of the box
file_id -- The file ID to delete
"""
self._debug("Called API: delete with version: {0}, file_id: {1}".format(version, file_id))
url = "{0}/files".format(self.base)
values = {"id": file_id, "version": version}
return self.client.get(url, values, method = "DELETE")
@require_token
def get_file(self, file_id):
""" Get the latest version of a file from the database.
file_id -- The file ID to retrieve
"""
self._debug("Called API: get_file with file_id: {0}".format(file_id))
url = "{0}/files".format(self.base)
params = {'id': file_id}
return self.client.get(url, params, raw = True)
@require_token
def list_files(self):
""" Get a list of the files in the latest version of the box. """
self._debug("Called API: list_files")
url = "{0}/files".format(self.base)
return self.client.get(url)
@require_token
def link_remote_box(self, remote_address, remote_box, remote_token):
""" Link a remote box with a local box. """
self._debug("Called API: link_remote_box, on remote_address '{0}', remote_box '{1}', remote_token '{2}'".format(remote_address, remote_box, remote_token))
url = "{0}/link_remote_box".format(self.base)
return self.client.get(url, {'remote_address': remote_address, 'remote_box': remote_box, 'remote_token': remote_token})
@require_token
def listen_diff(self, observer, raw_observer = False):
""" Listen to this box using a websocket. Call the observer when there's an update. """
address = self.address + "ws"
if address[0:6] == "https:":
address = "wss" + address[5:]
elif address[0:5] == "http:":
address = "ws" + address[4:]
else:
raise Exception("IndxClient: Unknown scheme to URL: {0}".format(address))
def filter(message):
if raw_observer:
observer(message)
else:
try:
if message['action'] == 'diff' and message['operation'] == 'update':
observer(message)
else:
logging.debug("Receive a non diff-update message to liste_diff observer - ignoring it.")
except Exception as e:
logging.error("IndxClient listen_diff, error trying to filter incoming message: {0}".format(e))
wsclient = IndxWebSocketClient(address, filter, token = self.token)
return wsclient
# no require token
def connect_ws(self, private_key, key_hash, observer, remote_encpk2, webserver):
address = self.address + "ws"
if address[0:6] == "https:":
address = "wss" + address[5:]
elif address[0:5] == "http:":
address = "ws" + address[4:]
else:
raise Exception("IndxClient: Unknown scheme to URL: {0}".format(address))
wsclient = IndxWebSocketClient(address, observer, self.keystore, webserver = webserver, keyauth = {"key_hash": key_hash, "private_key": private_key, "encpk2": remote_encpk2})
return wsclient
class IndxWebSocketClient:
def __init__(self, address, observer, keystore, webserver = None, token = None, keyauth = None, appid = None):
self.address = address
self.token = token
self.keyauth = keyauth
self.keystore = keystore
self.webserver = webserver # for looking up account when connecting back etc.
indx_observer = observer
appid = appid or "IndxWebSocketClient"
if token is None and keyauth is None:
raise Exception("Token or Keyauth must not be None.")
logging.debug("IndxWebSocketClient opening to {0}".format(self.address))
class IndxClientProtocol(WebSocketClientProtocol):
def onMessage(self, payload, isBinary):
try:
logging.debug("IndxClientProtocol onMessage, payload {0}".format(payload))
#data = cjson.decode(payload, all_unicode=True)
data = json.loads(payload)
if data.get("sessionid"):
# when server starts, do keys auth
self.sessionid = data.get("sessionid")
if keyauth:
return self.do_key_auth()
self.on_response(data)
except Exception as e:
logging.error("IndxWebSocketClient Exception: {0}".format(e))
logging.error(traceback.format_exc())
logging.error("IndxWebSocketClient can't decode JSON, ignoring message: {0}".format(payload))
def onOpen(self):
if token is not None:
msg = {"action": "auth", "token": token}
self.on_response = self.respond_to_auth
self.sendMessage(json.loads(msg))
elif keyauth is not None:
self.on_response = lambda x: logging.debug("Waiting for sessionid before we can auth.")
# manage state by setting a response function each time
def send_to_observer(self, data):
# manage connections coming back from server first..
# TODO imple
if data.get("action") == "diff" and data.get("operation") == "start":
def store_cb(store):
logging.debug("WebSocketsHandler listen_diff, store_cb: {0}".format(store))
def observer_local(diff):
""" Receive an update from the server. """
logging.debug("WebSocketsHandler listen_diff observer notified: {0}".format(diff))
self.sendMessage(json.loads({"action": "diff", "operation": "update", "data": diff}))
store.listen(observer_local) # no callbacks, nothing to do
# self.token is set by 'login_keys' below
self.token.get_store().addCallbacks(store_cb, lambda failure: self.sendMessage(json.loads({"success": False, "error": "500 Internal Server Error"})))
elif data.get("action") == "login_keys":
try:
signature, key_hash, algo, method, appid, encpk2 = data['signature'], data['key_hash'], data['algo'], data['method'], data['appid'], data['encpk2']
except Exception as e:
logging.error("IndxClient/ASync login_keys error getting all parameters.")
return self.sendMessage(json.loads({"success": False, "error": "400 Bad Request"}))
def win(resp, data):
# authenticated now - state of this isn't saved though, we get a token immediately instead
username, password, boxid = resp
origin = "/ws" # TODO double-check this
# get token, return that
def got_acct(acct):
if acct == False:
return self.send401()
db_user, db_pass = acct
def token_cb(new_token):
self.token = new_token
def store_cb(store):
# success, send token back to user
msg = {"action": "response", "success": True, "token": new_token.id, "respond_to": "login_keys"}
if data.get("requestid"):
msg['requestid'] = data.get("requestid")
return self.sendMessage(json.loads(msg))
new_token.get_store().addCallbacks(store_cb, lambda failure: json.loads({"success": False, "error": "500 Internal Server Error"}))
# TODO extract IP from 'address' above
webserver.tokens.new(username,password,boxid,appid,origin,"::1",webserver.server_id).addCallbacks(token_cb, lambda failure: json.loads({"success": False, "error": "500 Internal Server Error"}))
webserver.database.lookup_best_acct(boxid, username, password).addCallbacks(got_acct, lambda conn: json.loads({"success": False, "error": "500 Internal Server Error"}))
def fail(empty):
self.sendMessage(json.loads({"success": False, "error": "401 Unauthorized"}))
auth_keys(keystore, signature, key_hash, algo, method, self.sessionid, encpk2).addCallbacks(lambda resp: win(resp, data), fail)
else:
# otherwise send diffs back to indx
indx_observer(data)
def respond_to_auth(self, data):
if data['success']:
if data.get("token"):
self.token = data.get("token")
self.on_response = self.send_to_observer
msg = {"action": "diff", "operation": "start"}
self.sendMessage(json.loads(msg))
else:
logging.error("IndxWebSocketClient WebSocket auth failure.")
def do_key_auth(self):
try:
SSH_MSG_USERAUTH_REQUEST = "50"
method = "publickey"
algo = "SHA512"
key_hash, private_key, encpk2 = keyauth['key_hash'], keyauth['private_key'], keyauth['encpk2']
if not (type(encpk2) == type("") or type(encpk2) == type(u"")):
encpk2 = json.dumps(encpk2)
ordered_signature_text = '{0}\t{1}\t"{2}"\t{3}\t{4}'.format(SSH_MSG_USERAUTH_REQUEST, self.sessionid, method, algo, key_hash)
signature = rsa_sign(private_key, ordered_signature_text)
values = {"action": "login_keys", "signature": signature, "key_hash": key_hash, "algo": algo, "method": method, "appid": appid, "encpk2": encpk2}
self.on_response = self.respond_to_auth
self.sendMessage(json.loads(values))
except Exception as e:
logging.error(Failure(e))
self.factory = WebSocketClientFactory(self.address)
self.factory.protocol = IndxClientProtocol
connectWS(self.factory)
class IndxHTTPClient:
""" An HTTP requests client with cookie jar. """
def __init__(self, params):
self.params = params
""" Set up a cookies-enabled opener locally. """
self.cj = cookielib.LWPCookieJar()
self.opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cj))
def get_session_identifier(self, address):
""" Get the identifier for the INDX session (initiates a session if necessary). """
return_d = Deferred()
def check_cookies():
for cookie in self.cj:
logging.debug("COOKIE: {0}".format(cookie))
if cookie.name == "TWISTED_SESSION":
return cookie.value
return None
existing = check_cookies()
if existing is not None:
return_d.callback(existing)
else:
def whoami_cb(response):
session_id = check_cookies()
if session_id is not None:
return_d.callback(session_id)
else:
return_d.errback(Failure(Exception("No session ID was available ")))
# do a request to start a session and get a cookie
self.get("{0}auth/whoami".format(address)).addCallbacks(whoami_cb, return_d.errback)
return return_d
def get(self, url, values = None, raw = False, method = "GET"):
""" Do a GET, decode the result JSON and return it. """
logging.debug("GET request with url: {0}, values: {1}".format(url, values))
url += "?" + self._encode(values)
return self._req(method, url, raw = raw)
def put(self, url, values, content_type="application/json"):
""" Do a PUT, decode the result JSON and return it. """
logging.debug("PUT request with url: {0}, values: {1}".format(url, values))
return self._req_body(url, values, "PUT", content_type)
def post(self, url, values, content_type="application/json"):
""" Do a POST, decode the result JSON and return it. """
logging.debug("POST request with url: {0}, values: {1}".format(url, values))
return self._req_body(url, values, "POST", content_type)
def delete(self, url, values, content_type="application/json"):
""" Do a DELETE, decode the result JSON and return it. """
logging.debug("DELETE request with url: {0}, values: {1}".format(url, values))
return self._req_body(url, values, "DELETE", content_type)
def req_file(self, url, values, method, body, content_type):
""" Do an HTTP request with arguments in the URL, and file data as the body. """
url += "?" + self._encode(values)
headers = [("Content-Type", content_type)]
logging.debug("File request with url: {0}, values: {1}, method: {2}, headers: {3}, body: {4}".format(url, values, method, headers, value_truncate(body)))
return self._req(method, url, body = body, headers = headers)
def _req(self, method, url, body = None, raw = False, headers = []):
""" HTTP request. Uses the global cookie jar. """
logging.debug("HTTP Request of url: {0}, method: {1}, raw: {2}, headers: {3}, body: {4}".format(url, method, raw, headers, value_truncate(body)))
return_d = Deferred()
def do_req():
req = urllib2.Request(url, body)
for header in headers:
req.add_header(header[0], header[1])
req.get_method = lambda: method
response = self.opener.open(req)
the_page = response.read()
logging.debug("HTTP Request: response headers: {0}".format(response.info().headers))
if raw:
logging.debug("HTTP Request, returning raw results")
return the_page
else:
logging.debug("HTTP Request, raw results: {0}".format(value_truncate(the_page)))
status = json.loads(the_page)
logging.debug("HTTP Request, returning JSON decoded results: {0}".format(status))
return status
threads.deferToThread(lambda empty: do_req(), None).addCallbacks(return_d.callback, return_d.errback)
return return_d
def _encode(self, values):
""" Encode some values, either a dict or a list of tuples. """
logging.debug("Encode called with values: {0}".format(values))
# encode values separately because values may be a list of tuples
params = urllib.urlencode(self.params)
if values is None or len(values) == 0:
logging.debug("Encode is returning basic params: {0}".format(params))
return params
encoded = params + "&" + urllib.urlencode(values)
logging.debug("Encode is returning encoded values: {0}".format(encoded))
return encoded
def _req_body(self, url, values, method, content_type):
""" Do an HTTP request with arguments in the body (POST/PUT/DELETE etc), using the specified method.
"""
headers = [("Content-Type", content_type)]
logging.debug("Body request with url: {0}, values: {1}, method: {2}, headers: {3}".format(url, values, method, headers))
return self._req(method, url, body = self._encode(values), headers = headers)
class IndxClientAuth:
""" Authenticate to INDX servers, and get tokens. """
def __init__(self, address, appid, client = None):
self.address = address
self.appid = appid
self.params = {"app": self.appid}
self.is_authed = False
""" Ensure self.server always ends in a / """
if self.address[-1:] != "/":
self.address += "/"
if client is None:
self.client = IndxHTTPClient(self.params)
else:
self.client = client
# Logging Functions
def _log(self, loglevel, message):
""" Write a log message including the server and box information. """
logger = logging.getLogger("indxclientauth")
return logger.log(loglevel, u"%s\t%s", self.address, message)
def _debug(self, message):
return self._log(logging.DEBUG, message)
def _error(self, message):
return self._log(logging.ERROR, message)
# Authentication Functions
def get_token(self, boxid):
""" Get a token for this box. """
return_d = Deferred()
try:
if not self.is_authed:
return_d.errback(Failure(Exception("Must authenticate before getting token.")))
return return_d
url = "{0}auth/get_token".format(self.address)
values = {"box": boxid, "app": self.appid}
self._debug("Getting token")
def responded_cb(status):
if status['code'] != 200:
errmsg = "Getting a token failed"
self._error(errmsg)
raise Exception(errmsg)
self._debug("Getting a token was successful: {0}".format(status['token']))
return_d.callback(status['token'])
self.client.post(url, values).addCallbacks(responded_cb, return_d.errback)
except Exception as e:
return_d.errback(Failure(e))
return return_d
def auth_plain(self, username, password):
""" Plain authentication. """
return_d = Deferred()
try:
self.is_authed = False
url = "{0}auth/login".format(self.address)
values = {"username": username, "password": password}
self._debug("Calling auth_plain")
# TODO change client.post etc to be async using twisted web clients
def responded_cb(status):
if status['code'] != 200:
errmsg = "Authentication failed"
self._error(errmsg)
raise Exception(errmsg)
self._debug("Authentication successful")
self.is_authed = True
return_d.callback(status)
self.client.post(url, values).addCallbacks(responded_cb, return_d.errback)
except Exception as e:
return_d.errback(Failure(e))
return return_d
def auth_keys(self, private_key, key_hash, remote_encpk2):
""" Key based authentication, similar to RFC4252. """
return_d = Deferred()
try:
SSH_MSG_USERAUTH_REQUEST = "50"
method = "publickey"
algo = "SHA512"
self.is_authed = False
if not (type(remote_encpk2) == type("") or type(remote_encpk2) == type(u"")):
remote_encpk2 = json.dumps(remote_encpk2)
def session_id_cb(sessionid):
ordered_signature_text = '{0}\t{1}\t"{2}"\t{3}\t{4}'.format(SSH_MSG_USERAUTH_REQUEST, sessionid, method, algo, key_hash)
signature = rsa_sign(private_key, ordered_signature_text)
url = "{0}auth/login_keys".format(self.address)
values = {"signature": signature, "key_hash": key_hash, "algo": algo, "method": method, "encpk2": remote_encpk2}
self._debug("Calling auth_keys")
def responded_cb(status):
if status['code'] != 200:
errmsg = "Authentication failed"
self._error(errmsg)
raise Exception(errmsg)
self._debug("Authentication successful")
self.is_authed = True
return_d.callback(status)
# TODO change client.post etc to be async using twisted web clients
self.client.post(url, values).addCallbacks(responded_cb, return_d.errback)
self.client.get_session_identifier(self.address).addCallbacks(session_id_cb, return_d.errback)
except Exception as e:
return_d.errback(Failure(e))
return return_d
# PKI functions from indx.crypto (copied to remove the depency on INDX.)
def rsa_sign(private_key, plaintext):
""" Hash and sign a plaintext using a private key. Verify using rsa_verify with the public key. """
hsh = sha512_hash(plaintext)
PRNG = Crypto.Random.OSRNG.posix.new().read
signature = private_key.sign(hsh, PRNG)
return signature[0]
def sha512_hash(src):
h = Crypto.Hash.SHA512.new()
h.update(src)
return h.hexdigest()
|
h4ck3rm1k3/pacman | refs/heads/master | test/pacman/tests/database011.py | 28 | # TODO: these are labeled as database packages because they sure seem to me to
# be database-type operations. In their current implementation however they are
# calling -U and -R rather than -D. Obviously the tests will need to be updated
# if this changes.
self.description = "Install a package with --dbonly, no files touched"
p = pmpkg("dummy")
p.files = ["bin/dummy",
"usr/man/man1/dummy.1"]
self.addpkg(p)
self.args = "-U --dbonly %s" % p.filename()
self.addrule("PACMAN_RETCODE=0")
self.addrule("PKG_EXIST=dummy")
for f in p.files:
self.addrule("!FILE_EXIST=%s" % f)
|
pigeonflight/strider-plone | refs/heads/master | docker/appengine/lib/django-1.3/django/contrib/localflavor/uk/uk_regions.py | 347 | """
Sources:
English regions: http://www.statistics.gov.uk/geography/downloads/31_10_01_REGION_names_and_codes_12_00.xls
Northern Ireland regions: http://en.wikipedia.org/wiki/List_of_Irish_counties_by_area
Welsh regions: http://en.wikipedia.org/wiki/Preserved_counties_of_Wales
Scottish regions: http://en.wikipedia.org/wiki/Regions_and_districts_of_Scotland
"""
from django.utils.translation import ugettext_lazy as _
ENGLAND_REGION_CHOICES = (
("Bedfordshire", _("Bedfordshire")),
("Buckinghamshire", _("Buckinghamshire")),
("Cambridgeshire", ("Cambridgeshire")),
("Cheshire", _("Cheshire")),
("Cornwall and Isles of Scilly", _("Cornwall and Isles of Scilly")),
("Cumbria", _("Cumbria")),
("Derbyshire", _("Derbyshire")),
("Devon", _("Devon")),
("Dorset", _("Dorset")),
("Durham", _("Durham")),
("East Sussex", _("East Sussex")),
("Essex", _("Essex")),
("Gloucestershire", _("Gloucestershire")),
("Greater London", _("Greater London")),
("Greater Manchester", _("Greater Manchester")),
("Hampshire", _("Hampshire")),
("Hertfordshire", _("Hertfordshire")),
("Kent", _("Kent")),
("Lancashire", _("Lancashire")),
("Leicestershire", _("Leicestershire")),
("Lincolnshire", _("Lincolnshire")),
("Merseyside", _("Merseyside")),
("Norfolk", _("Norfolk")),
("North Yorkshire", _("North Yorkshire")),
("Northamptonshire", _("Northamptonshire")),
("Northumberland", _("Northumberland")),
("Nottinghamshire", _("Nottinghamshire")),
("Oxfordshire", _("Oxfordshire")),
("Shropshire", _("Shropshire")),
("Somerset", _("Somerset")),
("South Yorkshire", _("South Yorkshire")),
("Staffordshire", _("Staffordshire")),
("Suffolk", _("Suffolk")),
("Surrey", _("Surrey")),
("Tyne and Wear", _("Tyne and Wear")),
("Warwickshire", _("Warwickshire")),
("West Midlands", _("West Midlands")),
("West Sussex", _("West Sussex")),
("West Yorkshire", _("West Yorkshire")),
("Wiltshire", _("Wiltshire")),
("Worcestershire", _("Worcestershire")),
)
NORTHERN_IRELAND_REGION_CHOICES = (
("County Antrim", _("County Antrim")),
("County Armagh", _("County Armagh")),
("County Down", _("County Down")),
("County Fermanagh", _("County Fermanagh")),
("County Londonderry", _("County Londonderry")),
("County Tyrone", _("County Tyrone")),
)
WALES_REGION_CHOICES = (
("Clwyd", _("Clwyd")),
("Dyfed", _("Dyfed")),
("Gwent", _("Gwent")),
("Gwynedd", _("Gwynedd")),
("Mid Glamorgan", _("Mid Glamorgan")),
("Powys", _("Powys")),
("South Glamorgan", _("South Glamorgan")),
("West Glamorgan", _("West Glamorgan")),
)
SCOTTISH_REGION_CHOICES = (
("Borders", _("Borders")),
("Central Scotland", _("Central Scotland")),
("Dumfries and Galloway", _("Dumfries and Galloway")),
("Fife", _("Fife")),
("Grampian", _("Grampian")),
("Highland", _("Highland")),
("Lothian", _("Lothian")),
("Orkney Islands", _("Orkney Islands")),
("Shetland Islands", _("Shetland Islands")),
("Strathclyde", _("Strathclyde")),
("Tayside", _("Tayside")),
("Western Isles", _("Western Isles")),
)
UK_NATIONS_CHOICES = (
("England", _("England")),
("Northern Ireland", _("Northern Ireland")),
("Scotland", _("Scotland")),
("Wales", _("Wales")),
)
UK_REGION_CHOICES = ENGLAND_REGION_CHOICES + NORTHERN_IRELAND_REGION_CHOICES + WALES_REGION_CHOICES + SCOTTISH_REGION_CHOICES
|
robdennis/sideboard | refs/heads/master | tests/plugins/different_versions/rdflib3_0_0/env/lib/python2.7/site-packages/rdflib/plugins/parsers/rdfa/__init__.py | 3 | """
From a Python file, expecting an RDF/XML pretty printed output::
import rdflib.graph as g
graph = g.Graph()
graph.parse('filename.html', format='rdfa')
print graph.serialize(format='pretty-xml')
For details on RDFa, the reader should consult the `RDFa syntax document
<http://www.w3.org/TR/rdfa-syntax>`_.
This is an adapted version of pyRdfa (`W3C RDFa Distiller page
<http://www.w3.org/2007/08/pyRdfa/>`_) by Ivan Herman
"""
import sys
import urllib
import xml.dom.minidom
from rdflib.graph import Graph
from rdflib.namespace import Namespace
from rdflib.term import BNode, URIRef
from rdflib.parser import Parser
from rdflib.plugins.parsers.rdfa.state import ExecutionContext
from rdflib.plugins.parsers.rdfa.parse import parse_one_node
from rdflib.plugins.parsers.rdfa.options import (Options, _add_to_comment_graph,
DIST_NS, ERROR, GENERIC_XML, XHTML_RDFA, HTML5_RDFA)
from rdflib.plugins.parsers.rdfa.transform.headabout import head_about_transform
# These are part of the RDFa spec.
BUILT_IN_TRANSFORMERS = [
head_about_transform
]
# Exception handling. Essentially, all the different exceptions are re-packaged
# into separate exception class, to allow for an easier management on the user
# level
class RDFaError(Exception) :
"""Just a wrapper around the local exceptions. It does not add any new
functionality to the Exception class."""
pass
# For some doctype and element name combinations an automatic switch to an
# input mode is done
_HOST_LANG = {
("http://www.w3.org/1999/xhtml", "html"): XHTML_RDFA,
("http://www.w3.org/2000/svg", "svg"): GENERIC_XML
}
class RDFaParser(Parser):
def parse(self, source, sink,
warnings=False, space_preserve=True,
transformers=None, xhtml=True, lax=True):
if transformers is None:
transformers = []
options = Options(warnings, space_preserve, transformers, xhtml, lax)
baseURI = source.getPublicId()
stream = source.getByteStream()
dom = _try_process_source(stream, options)
_process_DOM(dom, baseURI, sink, options)
def _process_DOM(dom, base, graph, options=None):
"""
Core processing. The transformers ("pre-processing") is done on the DOM
tree, the state is initialized, and the "real" RDFa parsing is done.
The result is put into the provided Graph.
The real work is done in the parser function :obj:`parse_one_node`.
:param dom: XML DOM Tree node (for the top level)
:param base: URI for the default "base" value (usually the URI of the file to be processed)
:param options: :obj:`Options` for the distiller
:raise RDFaError: when called via CGI, this encapsulates the possible exceptions raised by the RDFLib serializer or the processing itself
"""
html = dom.documentElement
# Perform the built-in and external transformations on the HTML tree. This is,
# in simulated form, the hGRDDL approach of Ben Adida.
for trans in options.transformers + BUILT_IN_TRANSFORMERS:
trans(html, options)
# Collect the initial state. This takes care of things
# like base, top level namespace settings, etc.
# Ensure the proper initialization.
state = ExecutionContext(html, graph, base=base, options=options)
# The top level subject starts with the current document; this
# is used by the recursion
subject = URIRef(state.base)
# Parse the whole thing recursively and fill the graph.
parse_one_node(html, graph, subject, state, [])
if options.comment_graph.graph != None:
# Add the content of the comment graph to the output.
graph.bind("dist", DIST_NS)
for t in options.comment_graph.graph:
graph.add(t)
def _try_process_source(stream, options):
"""
Tries to parse input as xhtml, xml (e.g. svg) or html(5), modifying options
while figuring out input..
Returns a DOM tree.
"""
parse = xml.dom.minidom.parse
try:
dom = parse(stream)
# Try to second-guess the input type
# This is _not_ really kosher, but the minidom is not really namespace aware...
# In practice the goal is to have the system recognize svg content automatically
# First see if there is a default namespace defined for the document:
top = dom.documentElement
if top.hasAttribute("xmlns"):
key = (top.getAttribute("xmlns"), top.nodeName)
if key in _HOST_LANG:
options.host_language = _HOST_LANG[key]
except:
# XML Parsing error in the input
type, value, traceback = sys.exc_info()
if options.host_language == GENERIC_XML or options.lax == False:
raise RDFaError('Parsing error in input file: "%s"' % value)
# XML Parsing error in the input
msg = "XHTML Parsing error in input file: %s. Falling back on the HTML5 parser" % value
if options != None and options.warnings:
options.comment_graph.add_warning(msg)
# in Ivan's original code he reopened the stream if it was from urllib
if isinstance(stream, urllib.addinfourl):
stream = urllib.urlopen(stream.url)
# Now try to see if and HTML5 parser is an alternative...
try:
from html5lib import HTMLParser, treebuilders
except ImportError:
# no alternative to the XHTML error, because HTML5 parser not available...
msg2 = 'XHTML Parsing error in input file: %s. Though parsing is lax, HTML5 parser not available. Try installing html5lib <http://code.google.com/p/html5lib>' % value
raise RDFaError(msg2)
parser = HTMLParser(tree=treebuilders.getTreeBuilder("dom"))
parse = parser.parse
try:
dom = parse(stream)
# The host language has changed
options.host_language = HTML5_RDFA
except:
# Well, even the HTML5 parser could not do anything with this...
(type, value, traceback) = sys.exc_info()
msg2 = 'Parsing error in input file as HTML5: "%s"' % value
msg3 = msg + '\n' + msg2
raise RDFaError, msg3
return dom
|
kcyu1993/ML_course_kyu | refs/heads/master | labs/ex02/template/helpers.py | 1 | # -*- coding: utf-8 -*-
"""some helper functions."""
import numpy as np
def load_data(sub_sample=True, add_outlier=False):
"""Load data and convert it to the metrics system."""
path_dataset = "height_weight_genders.csv"
data = np.genfromtxt(
path_dataset, delimiter=",", skip_header=1, usecols=[1, 2])
height = data[:, 0]
weight = data[:, 1]
gender = np.genfromtxt(
path_dataset, delimiter=",", skip_header=1, usecols=[0],
converters={0: lambda x: 0 if b"Male" in x else 1})
# Convert to metric system
height *= 0.025
weight *= 0.454
# sub-sample
if sub_sample:
height = height[::50]
weight = weight[::50]
if add_outlier:
# outlier experiment
height = np.concatenate([height, [1.1, 1.2]])
weight = np.concatenate([weight, [51.5/0.454, 55.3/0.454]])
return height, weight, gender
def standardize(x):
"""Standardize the original data set."""
mean_x = np.mean(x)
x = x - mean_x
std_x = np.std(x)
x = x / std_x
return x, mean_x, std_x
def build_model_data(height, weight):
"""Form (y,tX) to get regression data in matrix form."""
y = weight
x = height
num_samples = len(y)
tx = np.c_[np.ones(num_samples), x]
return y, tx
def batch_iter(y, tx, batch_size, shuffle=True):
"""
Generate a minibatch iterator for a dataset.
Takes as input two iterables (here the output desired values 'y' and the input data 'tx')
Outputs an iterator which gives mini-batches of `batch_size` matching elements from `y` and `tx`.
Data can be randomly shuffled to avoid ordering in the original data messing with the randomness of the minibatches.
Example of use :
for minibatch_y, minibatch_tx in batch_iter(y, tx, 32):
<DO-SOMETHING>
"""
data_size = len(y)
num_batches = int(np.ceil(data_size / batch_size))
if shuffle:
shuffle_indices = np.random.permutation(np.arange(data_size))
shuffled_y = y[shuffle_indices]
shuffled_tx = tx[shuffle_indices]
else:
shuffled_y = y
shuffled_tx = tx
for batch_num in range(num_batches):
start_index = batch_num * batch_size
end_index = min((batch_num + 1) * batch_size, data_size)
if start_index != end_index:
yield shuffled_y[start_index:end_index], shuffled_tx[start_index:end_index]
|
knights-lab/NINJA-SHOGUN | refs/heads/master | shogun/scripts/old/kegg_rarefaction_csv.py | 1 | #!/usr/bin/env python
"""
Copyright 2015-2017 Knights Lab, Regents of the University of Minnesota.
This software is released under the GNU Affero General Public License (AGPL) v3.0 License.
"""
from __future__ import print_function
import argparse
import numpy as np
import csv
from multiprocessing import Pool, cpu_count
def make_arg_parser():
parser = argparse.ArgumentParser(description='')
parser.add_argument('-i', '--input', help='The input file.', required=True)
parser.add_argument('-o', '--output', help='If nothing is given, then stdout, else write to file')
return parser
def count_lines(filename):
lines = 0
buffer = bytearray(2048)
with open(filename, 'rb') as f:
while f.readinto(buffer) > 0:
lines += buffer.count(b'\n')
return lines
def write_shuffles(args):
size, num_lines, inf_path, outf_path = args
sorted = np.random.shuffle(np.arange(num_lines))[:size].sort()
with open(inf_path, 'r') as inf:
csv_inf = csv.reader(inf)
with open('%d.%s' % (size, args.output), 'a'):
csv_outf = csv.writer(outf_path)
for i, line in enumerate(csv_inf):
ind = sorted.searchsorted(i)
if sorted[ind] == i:
line[0] = '.'.join((line[0], size))
csv_outf.writerow(line)
def main():
parser = make_arg_parser()
args = parser.parse_args()
num_lines = 100000000
sizes = [np.power(10, i+1) for i in range(7)]
pool = Pool(processes=cpu_count())
pool.map(write_shuffles, zip(sizes, [num_lines]*7, [args.input]*7, [args.output]*7))
if __name__ == '__main__':
main()
|
patcon/open-cabinet | refs/heads/master | venv/lib/python2.7/site-packages/django/db/migrations/executor.py | 103 | from __future__ import unicode_literals
from django.apps.registry import apps as global_apps
from django.db import migrations
from .exceptions import InvalidMigrationPlan
from .loader import MigrationLoader
from .recorder import MigrationRecorder
from .state import ProjectState
class MigrationExecutor(object):
"""
End-to-end migration execution - loads migrations, and runs them
up or down to a specified set of targets.
"""
def __init__(self, connection, progress_callback=None):
self.connection = connection
self.loader = MigrationLoader(self.connection)
self.recorder = MigrationRecorder(self.connection)
self.progress_callback = progress_callback
def migration_plan(self, targets, clean_start=False):
"""
Given a set of targets, returns a list of (Migration instance, backwards?).
"""
plan = []
if clean_start:
applied = set()
else:
applied = set(self.loader.applied_migrations)
for target in targets:
# If the target is (app_label, None), that means unmigrate everything
if target[1] is None:
for root in self.loader.graph.root_nodes():
if root[0] == target[0]:
for migration in self.loader.graph.backwards_plan(root):
if migration in applied:
plan.append((self.loader.graph.nodes[migration], True))
applied.remove(migration)
# If the migration is already applied, do backwards mode,
# otherwise do forwards mode.
elif target in applied:
# Don't migrate backwards all the way to the target node (that
# may roll back dependencies in other apps that don't need to
# be rolled back); instead roll back through target's immediate
# child(ren) in the same app, and no further.
next_in_app = sorted(
n for n in
self.loader.graph.node_map[target].children
if n[0] == target[0]
)
for node in next_in_app:
for migration in self.loader.graph.backwards_plan(node):
if migration in applied:
plan.append((self.loader.graph.nodes[migration], True))
applied.remove(migration)
else:
for migration in self.loader.graph.forwards_plan(target):
if migration not in applied:
plan.append((self.loader.graph.nodes[migration], False))
applied.add(migration)
return plan
def migrate(self, targets, plan=None, fake=False, fake_initial=False):
"""
Migrates the database up to the given targets.
Django first needs to create all project states before a migration is
(un)applied and in a second step run all the database operations.
"""
if plan is None:
plan = self.migration_plan(targets)
# Create the forwards plan Django would follow on an empty database
full_plan = self.migration_plan(self.loader.graph.leaf_nodes(), clean_start=True)
all_forwards = all(not backwards for mig, backwards in plan)
all_backwards = all(backwards for mig, backwards in plan)
if not plan:
pass # Nothing to do for an empty plan
elif all_forwards == all_backwards:
# This should only happen if there's a mixed plan
raise InvalidMigrationPlan(
"Migration plans with both forwards and backwards migrations "
"are not supported. Please split your migration process into "
"separate plans of only forwards OR backwards migrations.",
plan
)
elif all_forwards:
self._migrate_all_forwards(plan, full_plan, fake=fake, fake_initial=fake_initial)
else:
# No need to check for `elif all_backwards` here, as that condition
# would always evaluate to true.
self._migrate_all_backwards(plan, full_plan, fake=fake)
self.check_replacements()
def _migrate_all_forwards(self, plan, full_plan, fake, fake_initial):
"""
Take a list of 2-tuples of the form (migration instance, False) and
apply them in the order they occur in the full_plan.
"""
migrations_to_run = {m[0] for m in plan}
state = ProjectState(real_apps=list(self.loader.unmigrated_apps))
for migration, _ in full_plan:
if not migrations_to_run:
# We remove every migration that we applied from this set so
# that we can bail out once the last migration has been applied
# and don't always run until the very end of the migration
# process.
break
if migration in migrations_to_run:
if 'apps' not in state.__dict__:
if self.progress_callback:
self.progress_callback("render_start")
state.apps # Render all -- performance critical
if self.progress_callback:
self.progress_callback("render_success")
state = self.apply_migration(state, migration, fake=fake, fake_initial=fake_initial)
migrations_to_run.remove(migration)
else:
migration.mutate_state(state, preserve=False)
def _migrate_all_backwards(self, plan, full_plan, fake):
"""
Take a list of 2-tuples of the form (migration instance, True) and
unapply them in reverse order they occur in the full_plan.
Since unapplying a migration requires the project state prior to that
migration, Django will compute the migration states before each of them
in a first run over the plan and then unapply them in a second run over
the plan.
"""
migrations_to_run = {m[0] for m in plan}
# Holds all migration states prior to the migrations being unapplied
states = {}
state = ProjectState(real_apps=list(self.loader.unmigrated_apps))
if self.progress_callback:
self.progress_callback("render_start")
for migration, _ in full_plan:
if not migrations_to_run:
# We remove every migration that we applied from this set so
# that we can bail out once the last migration has been applied
# and don't always run until the very end of the migration
# process.
break
if migration in migrations_to_run:
if 'apps' not in state.__dict__:
state.apps # Render all -- performance critical
# The state before this migration
states[migration] = state
# The old state keeps as-is, we continue with the new state
state = migration.mutate_state(state, preserve=True)
migrations_to_run.remove(migration)
else:
migration.mutate_state(state, preserve=False)
if self.progress_callback:
self.progress_callback("render_success")
for migration, _ in plan:
self.unapply_migration(states[migration], migration, fake=fake)
def collect_sql(self, plan):
"""
Takes a migration plan and returns a list of collected SQL
statements that represent the best-efforts version of that plan.
"""
statements = []
state = None
for migration, backwards in plan:
with self.connection.schema_editor(collect_sql=True) as schema_editor:
if state is None:
state = self.loader.project_state((migration.app_label, migration.name), at_end=False)
if not backwards:
state = migration.apply(state, schema_editor, collect_sql=True)
else:
state = migration.unapply(state, schema_editor, collect_sql=True)
statements.extend(schema_editor.collected_sql)
return statements
def apply_migration(self, state, migration, fake=False, fake_initial=False):
"""
Runs a migration forwards.
"""
if self.progress_callback:
self.progress_callback("apply_start", migration, fake)
if not fake:
if fake_initial:
# Test to see if this is an already-applied initial migration
applied, state = self.detect_soft_applied(state, migration)
if applied:
fake = True
if not fake:
# Alright, do it normally
with self.connection.schema_editor() as schema_editor:
state = migration.apply(state, schema_editor)
# For replacement migrations, record individual statuses
if migration.replaces:
for app_label, name in migration.replaces:
self.recorder.record_applied(app_label, name)
else:
self.recorder.record_applied(migration.app_label, migration.name)
# Report progress
if self.progress_callback:
self.progress_callback("apply_success", migration, fake)
return state
def unapply_migration(self, state, migration, fake=False):
"""
Runs a migration backwards.
"""
if self.progress_callback:
self.progress_callback("unapply_start", migration, fake)
if not fake:
with self.connection.schema_editor() as schema_editor:
state = migration.unapply(state, schema_editor)
# For replacement migrations, record individual statuses
if migration.replaces:
for app_label, name in migration.replaces:
self.recorder.record_unapplied(app_label, name)
else:
self.recorder.record_unapplied(migration.app_label, migration.name)
# Report progress
if self.progress_callback:
self.progress_callback("unapply_success", migration, fake)
return state
def check_replacements(self):
"""
Mark replacement migrations applied if their replaced set all are.
We do this unconditionally on every migrate, rather than just when
migrations are applied or unapplied, so as to correctly handle the case
when a new squash migration is pushed to a deployment that already had
all its replaced migrations applied. In this case no new migration will
be applied, but we still want to correctly maintain the applied state
of the squash migration.
"""
applied = self.recorder.applied_migrations()
for key, migration in self.loader.replacements.items():
all_applied = all(m in applied for m in migration.replaces)
if all_applied and key not in applied:
self.recorder.record_applied(*key)
def detect_soft_applied(self, project_state, migration):
"""
Tests whether a migration has been implicitly applied - that the
tables or columns it would create exist. This is intended only for use
on initial migrations (as it only looks for CreateModel and AddField).
"""
if migration.initial is None:
# Bail if the migration isn't the first one in its app
if any(app == migration.app_label for app, name in migration.dependencies):
return False, project_state
elif migration.initial is False:
# Bail if it's NOT an initial migration
return False, project_state
if project_state is None:
after_state = self.loader.project_state((migration.app_label, migration.name), at_end=True)
else:
after_state = migration.mutate_state(project_state)
apps = after_state.apps
found_create_model_migration = False
found_add_field_migration = False
# Make sure all create model and add field operations are done
for operation in migration.operations:
if isinstance(operation, migrations.CreateModel):
model = apps.get_model(migration.app_label, operation.name)
if model._meta.swapped:
# We have to fetch the model to test with from the
# main app cache, as it's not a direct dependency.
model = global_apps.get_model(model._meta.swapped)
if model._meta.proxy or not model._meta.managed:
continue
if model._meta.db_table not in self.connection.introspection.table_names(self.connection.cursor()):
return False, project_state
found_create_model_migration = True
elif isinstance(operation, migrations.AddField):
model = apps.get_model(migration.app_label, operation.model_name)
if model._meta.swapped:
# We have to fetch the model to test with from the
# main app cache, as it's not a direct dependency.
model = global_apps.get_model(model._meta.swapped)
if model._meta.proxy or not model._meta.managed:
continue
table = model._meta.db_table
db_field = model._meta.get_field(operation.name).column
fields = self.connection.introspection.get_table_description(self.connection.cursor(), table)
if db_field not in (f.name for f in fields):
return False, project_state
found_add_field_migration = True
# If we get this far and we found at least one CreateModel or AddField migration,
# the migration is considered implicitly applied.
return (found_create_model_migration or found_add_field_migration), after_state
|
akrylysov/yozuch | refs/heads/master | yozuch/builder.py | 1 | """
Blog builder.
"""
import functools
import os
import sys
import jinja2
from yozuch import logger, validator
from yozuch.context import Context
from yozuch.utils import emptydir, import_module, import_module_member, is_external_url
def build(project_dir, config_overrides=None, validate=False, output_dir=None):
if not os.path.isfile(os.path.join(project_dir, 'config.py')):
logger.fatal('Unable to find config.py in {}. Wrong project directory?'.format(project_dir))
sys.exit(1)
context = Context(config_overrides, project_dir, output_dir)
_load_theme(context)
_register_plugins(context)
_load_sources(context)
env = _create_template_env(context.theme_path, context.templates_path, context.pages_dir)
emptydir(context.output_path)
generators = list(_init_generators(context))
env.globals['url_for'] = functools.partial(_url_for, context.references)
env.globals['url_exists_for'] = functools.partial(_url_exists_for, context.references)
_generate_content(context, generators, env, context.output_path)
if validate:
validator.validate(context.config, context.output_path)
logger.info('Done!')
return context.output_path
def _load_theme(context):
context.config.update_from_directory(context.theme_path, replace_duplicates=False)
asset_loader = ('yozuch.loaders.asset.AssetLoader', {'path': os.path.join(context.theme_path, 'assets')})
context.config['LOADERS'].insert(0, asset_loader)
def _load_sources(context):
for name, kwargs in context.config['LOADERS']:
cls = import_module_member(name)
if cls is None:
raise LookupError('Unable to find loader with name "{}"'.format(name))
path = kwargs.pop('path', None)
loader = cls(**kwargs)
if path is None:
path = os.path.join(context.project_path, loader.name)
if not os.path.isdir(path):
continue
sources = list(loader.load(context, path))
if sources:
logger.info('Loaded {} {} from {}'.format(len(sources), loader.name, path))
context.site.setdefault(loader.name, []).extend(sources)
def _register_plugins(context):
for name in context.config['PLUGINS']:
mod = import_module(name)
if mod is None:
raise LookupError('Unable to find plugin with name "{}"'.format(name))
mod.register(context)
def _create_template_env(theme_dir, templates_dir, pages_dir):
loaders = []
if os.path.isdir(templates_dir):
loaders.append(jinja2.FileSystemLoader(templates_dir))
if os.path.isdir(pages_dir):
loaders.append(jinja2.PrefixLoader({'!pages': jinja2.FileSystemLoader(pages_dir)}))
theme_loader = jinja2.FileSystemLoader(os.path.join(theme_dir, 'templates'))
loaders.append(jinja2.PrefixLoader({'!theme': theme_loader}))
loaders.append(theme_loader)
loaders.append(jinja2.PackageLoader('yozuch', os.path.join('themes', 'base', 'templates')))
env = jinja2.Environment(loader=jinja2.ChoiceLoader(loaders))
return env
def _init_generators(context):
theme_default_templates = context.config['THEME_DEFAULT_TEMPLATES']
for route in context.config['VIEWS']:
url_template, name, generator, kwargs = route
if 'template' not in kwargs and name in theme_default_templates:
kwargs['template'] = theme_default_templates[name]
cls = import_module_member(generator)
if cls is not None:
yield cls(url_template, name, **kwargs)
else:
raise LookupError('Unable to find generator with name "{}" for url "{}".'.format(generator, url_template))
def _url_for(references, name_or_url):
if name_or_url in references:
return references[name_or_url]
if name_or_url.startswith('/') or is_external_url(name_or_url):
return name_or_url
raise LookupError('Unable to resolve URL for {}'.format(name_or_url))
def _url_exists_for(references, name_or_url):
return name_or_url in references
def _generate_content(context, generators, env, output_dir):
for gen in generators:
for entry in gen.generate(context):
if entry.url in context.entries:
logger.warning('URL {} has been already registered {}'.format(entry.url, context.entries[entry.url]))
context.entries[entry.url] = entry
if entry.id is None:
continue
if entry.id in context.references:
logger.warning('Entry ID {} has been already registered'.format(entry.id))
context.references[entry.id] = entry.url
for entry in context.entries.values():
entry.publish(context)
logger.info('Writing content...')
for entry in context.entries.values():
entry.write(context, env, output_dir)
|
tgrochow/avango | refs/heads/master | attic/avango-menu/python/avango/menu/_RadioButtonGroup.py | 6 | # -*- Mode:Python -*-
##########################################################################
# #
# This file is part of AVANGO. #
# #
# Copyright 1997 - 2010 Fraunhofer-Gesellschaft zur Foerderung der #
# angewandten Forschung (FhG), Munich, Germany. #
# #
# AVANGO is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Lesser General Public License as #
# published by the Free Software Foundation, version 3. #
# #
# AVANGO is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU Lesser General Public #
# License along with AVANGO. If not, see <http://www.gnu.org/licenses/>. #
# #
##########################################################################
import avango.script
from avango.script import field_has_changed
import Preferences
class RadioButtonGroup(avango.script.Script):
CheckedRadioButton = avango.script.SFObject()
RadioButtons = avango.script.MFObject()
def __init__(self):
self.super(RadioButtonGroup).__init__()
self.checked_changed = False
self.CheckedRadioButton.value = None
@field_has_changed(CheckedRadioButton)
def checked_radio_button_changed(self):
self.checked_changed = True
@field_has_changed(RadioButtons)
def radio_buttons_changed(self):
self.buttons_changed = True
def evaluate(self):
if self.checked_changed:
self.update_checked()
self.checked_changed = False
def update_checked(self):
for button in self.RadioButtons.value:
if button == self.CheckedRadioButton.value:
continue
button.CheckState.value = False
self.checked_changed = False
def cleanup(self):
self.disconnect_all_fields()
self.CheckedRadioButton.value = None
self.RadioButtons.value = []
def __del__(self):
if Preferences.print_destruction_of_menu_objects:
print "RadioButtonGroup deleted"
|
defionscode/ansible-modules-extras | refs/heads/devel | cloud/misc/proxmox.py | 15 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: proxmox
short_description: management of instances in Proxmox VE cluster
description:
- allows you to create/delete/stop instances in Proxmox VE cluster
- Starting in Ansible 2.1, it automatically detects containerization type (lxc for PVE 4, openvz for older)
version_added: "2.0"
options:
api_host:
description:
- the host of the Proxmox VE cluster
required: true
api_user:
description:
- the user to authenticate with
required: true
api_password:
description:
- the password to authenticate with
- you can use PROXMOX_PASSWORD environment variable
default: null
required: false
vmid:
description:
- the instance id
default: null
required: true
validate_certs:
description:
- enable / disable https certificate verification
default: false
required: false
type: boolean
node:
description:
- Proxmox VE node, when new VM will be created
- required only for C(state=present)
- for another states will be autodiscovered
default: null
required: false
password:
description:
- the instance root password
- required only for C(state=present)
default: null
required: false
hostname:
description:
- the instance hostname
- required only for C(state=present)
default: null
required: false
ostemplate:
description:
- the template for VM creating
- required only for C(state=present)
default: null
required: false
disk:
description:
- hard disk size in GB for instance
default: 3
required: false
cpus:
description:
- numbers of allocated cpus for instance
default: 1
required: false
memory:
description:
- memory size in MB for instance
default: 512
required: false
swap:
description:
- swap memory size in MB for instance
default: 0
required: false
netif:
description:
- specifies network interfaces for the container
default: null
required: false
type: string
ip_address:
description:
- specifies the address the container will be assigned
default: null
required: false
type: string
onboot:
description:
- specifies whether a VM will be started during system bootup
default: false
required: false
type: boolean
storage:
description:
- target storage
default: 'local'
required: false
type: string
cpuunits:
description:
- CPU weight for a VM
default: 1000
required: false
type: integer
nameserver:
description:
- sets DNS server IP address for a container
default: null
required: false
type: string
searchdomain:
description:
- sets DNS search domain for a container
default: null
required: false
type: string
timeout:
description:
- timeout for operations
default: 30
required: false
type: integer
force:
description:
- forcing operations
- can be used only with states C(present), C(stopped), C(restarted)
- with C(state=present) force option allow to overwrite existing container
- with states C(stopped) , C(restarted) allow to force stop instance
default: false
required: false
type: boolean
state:
description:
- Indicate desired state of the instance
choices: ['present', 'started', 'absent', 'stopped', 'restarted']
default: present
notes:
- Requires proxmoxer and requests modules on host. This modules can be installed with pip.
requirements: [ "proxmoxer", "requests" ]
author: "Sergei Antipov @UnderGreen"
'''
EXAMPLES = '''
# Create new container with minimal options
- proxmox: vmid=100 node='uk-mc02' api_user='root@pam' api_password='1q2w3e' api_host='node1' password='123456' hostname='example.org' ostemplate='local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
# Create new container with minimal options with force(it will rewrite existing container)
- proxmox: vmid=100 node='uk-mc02' api_user='root@pam' api_password='1q2w3e' api_host='node1' password='123456' hostname='example.org' ostemplate='local:vztmpl/ubuntu-14.04-x86_64.tar.gz' force=yes
# Create new container with minimal options use environment PROXMOX_PASSWORD variable(you should export it before)
- proxmox: vmid=100 node='uk-mc02' api_user='root@pam' api_host='node1' password='123456' hostname='example.org' ostemplate='local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
# Start container
- proxmox: vmid=100 api_user='root@pam' api_password='1q2w3e' api_host='node1' state=started
# Stop container
- proxmox: vmid=100 api_user='root@pam' api_password='1q2w3e' api_host='node1' state=stopped
# Stop container with force
- proxmox: vmid=100 api_user='root@pam' api_password='1q2w3e' api_host='node1' force=yes state=stopped
# Restart container(stopped or mounted container you can't restart)
- proxmox: vmid=100 api_user='root@pam' api_password='1q2w3e' api_host='node1' state=stopped
# Remove container
- proxmox: vmid=100 api_user='root@pam' api_password='1q2w3e' api_host='node1' state=absent
'''
import os
import time
try:
from proxmoxer import ProxmoxAPI
HAS_PROXMOXER = True
except ImportError:
HAS_PROXMOXER = False
VZ_TYPE=None
def get_instance(proxmox, vmid):
return [ vm for vm in proxmox.cluster.resources.get(type='vm') if vm['vmid'] == int(vmid) ]
def content_check(proxmox, node, ostemplate, storage):
return [ True for cnt in proxmox.nodes(node).storage(storage).content.get() if cnt['volid'] == ostemplate ]
def node_check(proxmox, node):
return [ True for nd in proxmox.nodes.get() if nd['node'] == node ]
def create_instance(module, proxmox, vmid, node, disk, storage, cpus, memory, swap, timeout, **kwargs):
proxmox_node = proxmox.nodes(node)
kwargs = dict((k,v) for k, v in kwargs.iteritems() if v is not None)
if VZ_TYPE =='lxc':
kwargs['cpulimit']=cpus
kwargs['rootfs']=disk
else:
kwargs['cpus']=cpus
kwargs['disk']=disk
taskid = getattr(proxmox_node, VZ_TYPE).create(vmid=vmid, storage=storage, memory=memory, swap=swap, **kwargs)
while timeout:
if ( proxmox_node.tasks(taskid).status.get()['status'] == 'stopped'
and proxmox_node.tasks(taskid).status.get()['exitstatus'] == 'OK' ):
return True
timeout = timeout - 1
if timeout == 0:
module.fail_json(msg='Reached timeout while waiting for creating VM. Last line in task before timeout: %s'
% proxmox_node.tasks(taskid).log.get()[:1])
time.sleep(1)
return False
def start_instance(module, proxmox, vm, vmid, timeout):
taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.start.post()
while timeout:
if ( proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped'
and proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK' ):
return True
timeout = timeout - 1
if timeout == 0:
module.fail_json(msg='Reached timeout while waiting for starting VM. Last line in task before timeout: %s'
% proxmox.nodes(vm[0]['node']).tasks(taskid).log.get()[:1])
time.sleep(1)
return False
def stop_instance(module, proxmox, vm, vmid, timeout, force):
if force:
taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.shutdown.post(forceStop=1)
else:
taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.shutdown.post()
while timeout:
if ( proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped'
and proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK' ):
return True
timeout = timeout - 1
if timeout == 0:
module.fail_json(msg='Reached timeout while waiting for stopping VM. Last line in task before timeout: %s'
% proxmox_node.tasks(taskid).log.get()[:1])
time.sleep(1)
return False
def umount_instance(module, proxmox, vm, vmid, timeout):
taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.umount.post()
while timeout:
if ( proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped'
and proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK' ):
return True
timeout = timeout - 1
if timeout == 0:
module.fail_json(msg='Reached timeout while waiting for unmounting VM. Last line in task before timeout: %s'
% proxmox_node.tasks(taskid).log.get()[:1])
time.sleep(1)
return False
def main():
module = AnsibleModule(
argument_spec = dict(
api_host = dict(required=True),
api_user = dict(required=True),
api_password = dict(no_log=True),
vmid = dict(required=True),
validate_certs = dict(type='bool', default='no'),
node = dict(),
password = dict(no_log=True),
hostname = dict(),
ostemplate = dict(),
disk = dict(type='int', default=3),
cpus = dict(type='int', default=1),
memory = dict(type='int', default=512),
swap = dict(type='int', default=0),
netif = dict(),
ip_address = dict(),
onboot = dict(type='bool', default='no'),
storage = dict(default='local'),
cpuunits = dict(type='int', default=1000),
nameserver = dict(),
searchdomain = dict(),
timeout = dict(type='int', default=30),
force = dict(type='bool', default='no'),
state = dict(default='present', choices=['present', 'absent', 'stopped', 'started', 'restarted']),
)
)
if not HAS_PROXMOXER:
module.fail_json(msg='proxmoxer required for this module')
state = module.params['state']
api_user = module.params['api_user']
api_host = module.params['api_host']
api_password = module.params['api_password']
vmid = module.params['vmid']
validate_certs = module.params['validate_certs']
node = module.params['node']
disk = module.params['disk']
cpus = module.params['cpus']
memory = module.params['memory']
swap = module.params['swap']
storage = module.params['storage']
timeout = module.params['timeout']
# If password not set get it from PROXMOX_PASSWORD env
if not api_password:
try:
api_password = os.environ['PROXMOX_PASSWORD']
except KeyError, e:
module.fail_json(msg='You should set api_password param or use PROXMOX_PASSWORD environment variable')
try:
proxmox = ProxmoxAPI(api_host, user=api_user, password=api_password, verify_ssl=validate_certs)
global VZ_TYPE
VZ_TYPE = 'openvz' if float(proxmox.version.get()['version']) < 4.0 else 'lxc'
except Exception, e:
module.fail_json(msg='authorization on proxmox cluster failed with exception: %s' % e)
if state == 'present':
try:
if get_instance(proxmox, vmid) and not module.params['force']:
module.exit_json(changed=False, msg="VM with vmid = %s is already exists" % vmid)
elif not (node, module.params['hostname'] and module.params['password'] and module.params['ostemplate']):
module.fail_json(msg='node, hostname, password and ostemplate are mandatory for creating vm')
elif not node_check(proxmox, node):
module.fail_json(msg="node '%s' not exists in cluster" % node)
elif not content_check(proxmox, node, module.params['ostemplate'], storage):
module.fail_json(msg="ostemplate '%s' not exists on node %s and storage %s"
% (module.params['ostemplate'], node, storage))
create_instance(module, proxmox, vmid, node, disk, storage, cpus, memory, swap, timeout,
password = module.params['password'],
hostname = module.params['hostname'],
ostemplate = module.params['ostemplate'],
netif = module.params['netif'],
ip_address = module.params['ip_address'],
onboot = int(module.params['onboot']),
cpuunits = module.params['cpuunits'],
nameserver = module.params['nameserver'],
searchdomain = module.params['searchdomain'],
force = int(module.params['force']))
module.exit_json(changed=True, msg="deployed VM %s from template %s" % (vmid, module.params['ostemplate']))
except Exception, e:
module.fail_json(msg="creation of %s VM %s failed with exception: %s" % ( VZ_TYPE, vmid, e ))
elif state == 'started':
try:
vm = get_instance(proxmox, vmid)
if not vm:
module.fail_json(msg='VM with vmid = %s not exists in cluster' % vmid)
if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'running':
module.exit_json(changed=False, msg="VM %s is already running" % vmid)
if start_instance(module, proxmox, vm, vmid, timeout):
module.exit_json(changed=True, msg="VM %s started" % vmid)
except Exception, e:
module.fail_json(msg="starting of VM %s failed with exception: %s" % ( vmid, e ))
elif state == 'stopped':
try:
vm = get_instance(proxmox, vmid)
if not vm:
module.fail_json(msg='VM with vmid = %s not exists in cluster' % vmid)
if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'mounted':
if module.params['force']:
if umount_instance(module, proxmox, vm, vmid, timeout):
module.exit_json(changed=True, msg="VM %s is shutting down" % vmid)
else:
module.exit_json(changed=False, msg=("VM %s is already shutdown, but mounted. "
"You can use force option to umount it.") % vmid)
if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'stopped':
module.exit_json(changed=False, msg="VM %s is already shutdown" % vmid)
if stop_instance(module, proxmox, vm, vmid, timeout, force = module.params['force']):
module.exit_json(changed=True, msg="VM %s is shutting down" % vmid)
except Exception, e:
module.fail_json(msg="stopping of VM %s failed with exception: %s" % ( vmid, e ))
elif state == 'restarted':
try:
vm = get_instance(proxmox, vmid)
if not vm:
module.fail_json(msg='VM with vmid = %s not exists in cluster' % vmid)
if ( getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'stopped'
or getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'mounted' ):
module.exit_json(changed=False, msg="VM %s is not running" % vmid)
if ( stop_instance(module, proxmox, vm, vmid, timeout, force = module.params['force']) and
start_instance(module, proxmox, vm, vmid, timeout) ):
module.exit_json(changed=True, msg="VM %s is restarted" % vmid)
except Exception, e:
module.fail_json(msg="restarting of VM %s failed with exception: %s" % ( vmid, e ))
elif state == 'absent':
try:
vm = get_instance(proxmox, vmid)
if not vm:
module.exit_json(changed=False, msg="VM %s does not exist" % vmid)
if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'running':
module.exit_json(changed=False, msg="VM %s is running. Stop it before deletion." % vmid)
if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'mounted':
module.exit_json(changed=False, msg="VM %s is mounted. Stop it with force option before deletion." % vmid)
taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE).delete(vmid)
while timeout:
if ( proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped'
and proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK' ):
module.exit_json(changed=True, msg="VM %s removed" % vmid)
timeout = timeout - 1
if timeout == 0:
module.fail_json(msg='Reached timeout while waiting for removing VM. Last line in task before timeout: %s'
% proxmox_node.tasks(taskid).log.get()[:1])
time.sleep(1)
except Exception, e:
module.fail_json(msg="deletion of VM %s failed with exception: %s" % ( vmid, e ))
# import module snippets
from ansible.module_utils.basic import *
main()
|
jscott413/maidsinharlem | refs/heads/master | flask/lib/python2.7/site-packages/whoosh/filedb/structfile.py | 96 | # Copyright 2009 Matt Chaput. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Matt Chaput.
from array import array
from copy import copy
from struct import calcsize
from whoosh.compat import BytesIO, bytes_type
from whoosh.compat import dump as dump_pickle
from whoosh.compat import load as load_pickle
from whoosh.compat import array_frombytes, array_tobytes
from whoosh.system import _INT_SIZE, _SHORT_SIZE, _FLOAT_SIZE, _LONG_SIZE
from whoosh.system import IS_LITTLE
from whoosh.system import pack_byte, unpack_byte, pack_sbyte, unpack_sbyte
from whoosh.system import pack_ushort, unpack_ushort
from whoosh.system import pack_ushort_le, unpack_ushort_le
from whoosh.system import pack_int, unpack_int, pack_uint, unpack_uint
from whoosh.system import pack_uint_le, unpack_uint_le
from whoosh.system import pack_long, unpack_long, pack_ulong, unpack_ulong
from whoosh.system import pack_float, unpack_float
from whoosh.util.varints import varint, read_varint
from whoosh.util.varints import signed_varint, decode_signed_varint
_SIZEMAP = dict((typecode, calcsize(typecode)) for typecode in "bBiIhHqQf")
_ORDERMAP = {"little": "<", "big": ">"}
_types = (("sbyte", "b"), ("ushort", "H"), ("int", "i"),
("long", "q"), ("float", "f"))
# Main function
class StructFile(object):
"""Returns a "structured file" object that wraps the given file object and
provides numerous additional methods for writing structured data, such as
"write_varint" and "write_long".
"""
def __init__(self, fileobj, name=None, onclose=None):
self.file = fileobj
self._name = name
self.onclose = onclose
self.is_closed = False
self.is_real = hasattr(fileobj, "fileno")
if self.is_real:
self.fileno = fileobj.fileno
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, self._name)
def __str__(self):
return self._name
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def __iter__(self):
return iter(self.file)
def raw_file(self):
return self.file
def read(self, *args, **kwargs):
return self.file.read(*args, **kwargs)
def readline(self, *args, **kwargs):
return self.file.readline(*args, **kwargs)
def write(self, *args, **kwargs):
return self.file.write(*args, **kwargs)
def tell(self, *args, **kwargs):
return self.file.tell(*args, **kwargs)
def seek(self, *args, **kwargs):
return self.file.seek(*args, **kwargs)
def truncate(self, *args, **kwargs):
return self.file.truncate(*args, **kwargs)
def flush(self):
"""Flushes the buffer of the wrapped file. This is a no-op if the
wrapped file does not have a flush method.
"""
if hasattr(self.file, "flush"):
self.file.flush()
def close(self):
"""Closes the wrapped file.
"""
if self.is_closed:
raise Exception("This file is already closed")
if self.onclose:
self.onclose(self)
if hasattr(self.file, "close"):
self.file.close()
self.is_closed = True
def subset(self, offset, length, name=None):
from whoosh.filedb.compound import SubFile
name = name or self._name
return StructFile(SubFile(self.file, offset, length), name=name)
def write_string(self, s):
"""Writes a string to the wrapped file. This method writes the length
of the string first, so you can read the string back without having to
know how long it was.
"""
self.write_varint(len(s))
self.write(s)
def write_string2(self, s):
self.write(pack_ushort(len(s)) + s)
def write_string4(self, s):
self.write(pack_int(len(s)) + s)
def read_string(self):
"""Reads a string from the wrapped file.
"""
return self.read(self.read_varint())
def read_string2(self):
l = self.read_ushort()
return self.read(l)
def read_string4(self):
l = self.read_int()
return self.read(l)
def get_string2(self, pos):
l = self.get_ushort(pos)
base = pos + _SHORT_SIZE
return self.get(base, l), base + l
def get_string4(self, pos):
l = self.get_int(pos)
base = pos + _INT_SIZE
return self.get(base, l), base + l
def skip_string(self):
l = self.read_varint()
self.seek(l, 1)
def write_varint(self, i):
"""Writes a variable-length unsigned integer to the wrapped file.
"""
self.write(varint(i))
def write_svarint(self, i):
"""Writes a variable-length signed integer to the wrapped file.
"""
self.write(signed_varint(i))
def read_varint(self):
"""Reads a variable-length encoded unsigned integer from the wrapped
file.
"""
return read_varint(self.read)
def read_svarint(self):
"""Reads a variable-length encoded signed integer from the wrapped
file.
"""
return decode_signed_varint(read_varint(self.read))
def write_tagint(self, i):
"""Writes a sometimes-compressed unsigned integer to the wrapped file.
This is similar to the varint methods but uses a less compressed but
faster format.
"""
# Store numbers 0-253 in one byte. Byte 254 means "an unsigned 16-bit
# int follows." Byte 255 means "An unsigned 32-bit int follows."
if i <= 253:
self.write(chr(i))
elif i <= 65535:
self.write("\xFE" + pack_ushort(i))
else:
self.write("\xFF" + pack_uint(i))
def read_tagint(self):
"""Reads a sometimes-compressed unsigned integer from the wrapped file.
This is similar to the varint methods but uses a less compressed but
faster format.
"""
tb = ord(self.read(1))
if tb == 254:
return self.read_ushort()
elif tb == 255:
return self.read_uint()
else:
return tb
def write_byte(self, n):
"""Writes a single byte to the wrapped file, shortcut for
``file.write(chr(n))``.
"""
self.write(pack_byte(n))
def read_byte(self):
return ord(self.read(1))
def write_pickle(self, obj, protocol=-1):
"""Writes a pickled representation of obj to the wrapped file.
"""
dump_pickle(obj, self.file, protocol)
def read_pickle(self):
"""Reads a pickled object from the wrapped file.
"""
return load_pickle(self.file)
def write_sbyte(self, n):
self.write(pack_sbyte(n))
def write_int(self, n):
self.write(pack_int(n))
def write_uint(self, n):
self.write(pack_uint(n))
def write_uint_le(self, n):
self.write(pack_uint_le(n))
def write_ushort(self, n):
self.write(pack_ushort(n))
def write_ushort_le(self, n):
self.write(pack_ushort_le(n))
def write_long(self, n):
self.write(pack_long(n))
def write_ulong(self, n):
self.write(pack_ulong(n))
def write_float(self, n):
self.write(pack_float(n))
def write_array(self, arry):
if IS_LITTLE:
arry = copy(arry)
arry.byteswap()
if self.is_real:
arry.tofile(self.file)
else:
self.write(array_tobytes(arry))
def read_sbyte(self):
return unpack_sbyte(self.read(1))[0]
def read_int(self):
return unpack_int(self.read(_INT_SIZE))[0]
def read_uint(self):
return unpack_uint(self.read(_INT_SIZE))[0]
def read_uint_le(self):
return unpack_uint_le(self.read(_INT_SIZE))[0]
def read_ushort(self):
return unpack_ushort(self.read(_SHORT_SIZE))[0]
def read_ushort_le(self):
return unpack_ushort_le(self.read(_SHORT_SIZE))[0]
def read_long(self):
return unpack_long(self.read(_LONG_SIZE))[0]
def read_ulong(self):
return unpack_ulong(self.read(_LONG_SIZE))[0]
def read_float(self):
return unpack_float(self.read(_FLOAT_SIZE))[0]
def read_array(self, typecode, length):
a = array(typecode)
if self.is_real:
a.fromfile(self.file, length)
else:
array_frombytes(a, self.read(length * _SIZEMAP[typecode]))
if IS_LITTLE:
a.byteswap()
return a
def get(self, position, length):
self.seek(position)
return self.read(length)
def get_byte(self, position):
return unpack_byte(self.get(position, 1))[0]
def get_sbyte(self, position):
return unpack_sbyte(self.get(position, 1))[0]
def get_int(self, position):
return unpack_int(self.get(position, _INT_SIZE))[0]
def get_uint(self, position):
return unpack_uint(self.get(position, _INT_SIZE))[0]
def get_ushort(self, position):
return unpack_ushort(self.get(position, _SHORT_SIZE))[0]
def get_long(self, position):
return unpack_long(self.get(position, _LONG_SIZE))[0]
def get_ulong(self, position):
return unpack_ulong(self.get(position, _LONG_SIZE))[0]
def get_float(self, position):
return unpack_float(self.get(position, _FLOAT_SIZE))[0]
def get_array(self, position, typecode, length):
self.seek(position)
return self.read_array(typecode, length)
class BufferFile(StructFile):
def __init__(self, buf, name=None, onclose=None):
self._buf = buf
self._name = name
self.file = BytesIO(buf)
self.onclose = onclose
self.is_real = False
self.is_closed = False
def subset(self, position, length, name=None):
name = name or self._name
return BufferFile(self.get(position, length), name=name)
def get(self, position, length):
return bytes_type(self._buf[position:position + length])
def get_array(self, position, typecode, length):
a = array(typecode)
array_frombytes(a, self.get(position, length * _SIZEMAP[typecode]))
if IS_LITTLE:
a.byteswap()
return a
class ChecksumFile(StructFile):
def __init__(self, *args, **kwargs):
StructFile.__init__(self, *args, **kwargs)
self._check = 0
self._crc32 = __import__("zlib").crc32
def __iter__(self):
for line in self.file:
self._check = self._crc32(line, self._check)
yield line
def seek(self, *args):
raise Exception("Cannot seek on a ChecksumFile")
def read(self, *args, **kwargs):
b = self.file.read(*args, **kwargs)
self._check = self._crc32(b, self._check)
return b
def write(self, b):
self._check = self._crc32(b, self._check)
self.file.write(b)
def checksum(self):
return self._check & 0xffffffff
|
turbomanage/training-data-analyst | refs/heads/master | courses/machine_learning/deepdive2/structured/labs/serving/application/lib/pyasn1_modules/__init__.py | 7 | # http://www.python.org/dev/peps/pep-0396/
__version__ = '0.2.7'
|
readevalprint/mezzanine | refs/heads/master | mezzanine/core/management/commands/runserver.py | 6 |
import platform
import sys
import django
from django.conf import settings
from django.contrib.staticfiles.management.commands import runserver
from django.contrib.staticfiles.handlers import StaticFilesHandler
from django.core.management.color import supports_color
from django.db import connection
from django.http import Http404
from django.utils.termcolors import colorize
from django.views.static import serve
import mezzanine
class MezzStaticFilesHandler(StaticFilesHandler):
def _should_handle(self, path):
return path.startswith((settings.STATIC_URL, settings.MEDIA_URL))
def get_response(self, request):
response = super(MezzStaticFilesHandler, self).get_response(request)
if response.status_code == 404:
locations = (
(settings.STATIC_URL, settings.STATIC_ROOT),
(settings.MEDIA_URL, settings.MEDIA_ROOT),
)
for url, root in locations:
if request.path.startswith(url):
path = request.path.replace(url, "", 1)
try:
return serve(request, path, document_root=root)
except Http404:
# Just return the original 404 response.
pass
return response
def banner():
# Database name - this is just the ``vendor`` atrribute of
# the connection backend, with some exceptions where we
# replace it with something else, such as microsoft -> sql server.
conn = connection
db_name = {
"microsoft": "sql server",
}.get(conn.vendor, conn.vendor)
db_name = "%s%s" % (db_name[:1].upper(),
db_name.replace("sql", "SQL").replace("db", "DB")[1:])
# Database version - vendor names mapped to functions that
# retrieve the version, which should be a sequence of things
# to join with dots.
db_version_func = {
"postgresql": lambda: (
conn.pg_version // 10000,
conn.pg_version // 100 % 100,
conn.pg_version % 100,
),
"mysql": lambda: conn.mysql_version,
"sqlite": lambda: conn.Database.sqlite_version_info,
# The remaining backends haven't actually been tested,
# and so their version logic has been gleaned from glancing
# at the code for each backend.
"oracle": lambda: [conn.oracle_version],
"microsoft": lambda: [conn._DatabaseWrapper__get_dbms_version()],
"firebird": lambda: conn.server_version.split(" ")[-1].split("."),
}.get(conn.vendor, lambda: [])
db_version = ".".join(map(str, db_version_func()))
# The raw banner split into lines.
lines = ("""
.....
_d^^^^^^^^^b_
.d'' ``b.
.p' `q.
.d' `b.
.d' `b. * Mezzanine %(mezzanine_version)s
:: :: * Django %(django_version)s
:: M E Z Z A N I N E :: * Python %(python_version)s
:: :: * %(db_name)s %(db_version)s
`p. .q' * %(os_name)s %(os_version)s
`p. .q'
`b. .d'
`q.. ..p'
^q........p^
''''
""" % {
"mezzanine_version": mezzanine.__version__,
"django_version": django.get_version(),
"python_version": sys.version.split(" ", 1)[0],
"db_name": db_name,
"db_version": db_version,
"os_name": platform.system(),
"os_version": platform.release(),
}).splitlines()[2:]
if not supports_color():
return "\n".join(lines)
# Pairs of function / colorize args for coloring the banner.
# These are each of the states moving from left to right on
# a single line of the banner. The function represents whether
# the current char in a line should trigger the next state.
color_states = [
(lambda c: c != " ", {}),
(lambda c: c == " ", {"fg": "red"}),
(lambda c: c != " " and not c.isupper(),
{"fg": "white", "bg": "red", "opts": ["bold"]}),
(lambda c: c == " ", {"fg": "red"}),
(lambda c: c == "*", {}),
(lambda c: c != "*", {"fg": "red"}),
(lambda c: False, {}),
]
# Colorize the banner.
for i, line in enumerate(lines):
chars = []
color_state = 0
for char in line:
color_state += color_states[color_state][0](char)
chars.append(colorize(char, **color_states[color_state][1]))
lines[i] = "".join(chars)
return "\n".join(lines)
class Command(runserver.Command):
"""
Overrides runserver so that we can serve uploaded files
during development, and not require every single developer on
every single one of their projects to have to set up multiple
web server aliases for serving static content.
See https://code.djangoproject.com/ticket/15199
For ease, we also serve any static files that have been stored
under the project's ``STATIC_ROOT``.
"""
def add_arguments(self, parser):
super(Command, self).add_arguments(parser)
parser.add_argument(
'--nobanner', action="store_false", dest='show_banner',
default=True,
help='Tells Mezzanine not to show a banner at startup.',
)
def inner_run(self, *args, **kwargs):
if kwargs["show_banner"]:
# Show Mezzanine's own cool banner in the terminal. There
# aren't really any exceptions to catch here, but we do
# so blanketly since such a trivial thing like the banner
# shouldn't be able to crash the development server.
try:
self.stdout.write(banner())
except:
pass
super(Command, self).inner_run(*args, **kwargs)
def get_handler(self, *args, **options):
handler = super(Command, self).get_handler(*args, **options)
if settings.DEBUG or options["insecure_serving"]:
handler = MezzStaticFilesHandler(handler)
return handler
|
proxysh/Safejumper-for-Desktop | refs/heads/master | buildmac/Resources/env/lib/python2.7/site-packages/twisted/web/xmlrpc.py | 15 | # -*- test-case-name: twisted.web.test.test_xmlrpc -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
A generic resource for publishing objects via XML-RPC.
Maintainer: Itamar Shtull-Trauring
@var Fault: See L{xmlrpclib.Fault}
@type Fault: L{xmlrpclib.Fault}
"""
from __future__ import division, absolute_import
from twisted.python.compat import _PY3, intToBytes, nativeString, urllib_parse
from twisted.python.compat import unicode
# System Imports
import base64
if _PY3:
import xmlrpc.client as xmlrpclib
else:
import xmlrpclib
# Sibling Imports
from twisted.web import resource, server, http
from twisted.internet import defer, protocol, reactor
from twisted.python import log, reflect, failure
# These are deprecated, use the class level definitions
NOT_FOUND = 8001
FAILURE = 8002
# Useful so people don't need to import xmlrpclib directly
Fault = xmlrpclib.Fault
Binary = xmlrpclib.Binary
Boolean = xmlrpclib.Boolean
DateTime = xmlrpclib.DateTime
def withRequest(f):
"""
Decorator to cause the request to be passed as the first argument
to the method.
If an I{xmlrpc_} method is wrapped with C{withRequest}, the
request object is passed as the first argument to that method.
For example::
@withRequest
def xmlrpc_echo(self, request, s):
return s
@since: 10.2
"""
f.withRequest = True
return f
class NoSuchFunction(Fault):
"""
There is no function by the given name.
"""
class Handler:
"""
Handle a XML-RPC request and store the state for a request in progress.
Override the run() method and return result using self.result,
a Deferred.
We require this class since we're not using threads, so we can't
encapsulate state in a running function if we're going to have
to wait for results.
For example, lets say we want to authenticate against twisted.cred,
run a LDAP query and then pass its result to a database query, all
as a result of a single XML-RPC command. We'd use a Handler instance
to store the state of the running command.
"""
def __init__(self, resource, *args):
self.resource = resource # the XML-RPC resource we are connected to
self.result = defer.Deferred()
self.run(*args)
def run(self, *args):
# event driven equivalent of 'raise UnimplementedError'
self.result.errback(
NotImplementedError("Implement run() in subclasses"))
class XMLRPC(resource.Resource):
"""
A resource that implements XML-RPC.
You probably want to connect this to '/RPC2'.
Methods published can return XML-RPC serializable results, Faults,
Binary, Boolean, DateTime, Deferreds, or Handler instances.
By default methods beginning with 'xmlrpc_' are published.
Sub-handlers for prefixed methods (e.g., system.listMethods)
can be added with putSubHandler. By default, prefixes are
separated with a '.'. Override self.separator to change this.
@ivar allowNone: Permit XML translating of Python constant None.
@type allowNone: C{bool}
@ivar useDateTime: Present C{datetime} values as C{datetime.datetime}
objects?
@type useDateTime: C{bool}
"""
# Error codes for Twisted, if they conflict with yours then
# modify them at runtime.
NOT_FOUND = 8001
FAILURE = 8002
isLeaf = 1
separator = '.'
allowedMethods = (b'POST',)
def __init__(self, allowNone=False, useDateTime=False):
resource.Resource.__init__(self)
self.subHandlers = {}
self.allowNone = allowNone
self.useDateTime = useDateTime
def __setattr__(self, name, value):
self.__dict__[name] = value
def putSubHandler(self, prefix, handler):
self.subHandlers[prefix] = handler
def getSubHandler(self, prefix):
return self.subHandlers.get(prefix, None)
def getSubHandlerPrefixes(self):
return list(self.subHandlers.keys())
def render_POST(self, request):
request.content.seek(0, 0)
request.setHeader(b"content-type", b"text/xml; charset=utf-8")
try:
args, functionPath = xmlrpclib.loads(request.content.read(),
use_datetime=self.useDateTime)
except Exception as e:
f = Fault(self.FAILURE, "Can't deserialize input: %s" % (e,))
self._cbRender(f, request)
else:
try:
function = self.lookupProcedure(functionPath)
except Fault as f:
self._cbRender(f, request)
else:
# Use this list to track whether the response has failed or not.
# This will be used later on to decide if the result of the
# Deferred should be written out and Request.finish called.
responseFailed = []
request.notifyFinish().addErrback(responseFailed.append)
if getattr(function, 'withRequest', False):
d = defer.maybeDeferred(function, request, *args)
else:
d = defer.maybeDeferred(function, *args)
d.addErrback(self._ebRender)
d.addCallback(self._cbRender, request, responseFailed)
return server.NOT_DONE_YET
def _cbRender(self, result, request, responseFailed=None):
if responseFailed:
return
if isinstance(result, Handler):
result = result.result
if not isinstance(result, Fault):
result = (result,)
try:
try:
content = xmlrpclib.dumps(
result, methodresponse=True,
allow_none=self.allowNone)
except Exception as e:
f = Fault(self.FAILURE, "Can't serialize output: %s" % (e,))
content = xmlrpclib.dumps(f, methodresponse=True,
allow_none=self.allowNone)
if isinstance(content, unicode):
content = content.encode('utf8')
request.setHeader(
b"content-length", intToBytes(len(content)))
request.write(content)
except:
log.err()
request.finish()
def _ebRender(self, failure):
if isinstance(failure.value, Fault):
return failure.value
log.err(failure)
return Fault(self.FAILURE, "error")
def lookupProcedure(self, procedurePath):
"""
Given a string naming a procedure, return a callable object for that
procedure or raise NoSuchFunction.
The returned object will be called, and should return the result of the
procedure, a Deferred, or a Fault instance.
Override in subclasses if you want your own policy. The base
implementation that given C{'foo'}, C{self.xmlrpc_foo} will be returned.
If C{procedurePath} contains C{self.separator}, the sub-handler for the
initial prefix is used to search for the remaining path.
If you override C{lookupProcedure}, you may also want to override
C{listProcedures} to accurately report the procedures supported by your
resource, so that clients using the I{system.listMethods} procedure
receive accurate results.
@since: 11.1
"""
if procedurePath.find(self.separator) != -1:
prefix, procedurePath = procedurePath.split(self.separator, 1)
handler = self.getSubHandler(prefix)
if handler is None:
raise NoSuchFunction(self.NOT_FOUND,
"no such subHandler %s" % prefix)
return handler.lookupProcedure(procedurePath)
f = getattr(self, "xmlrpc_%s" % procedurePath, None)
if not f:
raise NoSuchFunction(self.NOT_FOUND,
"procedure %s not found" % procedurePath)
elif not callable(f):
raise NoSuchFunction(self.NOT_FOUND,
"procedure %s not callable" % procedurePath)
else:
return f
def listProcedures(self):
"""
Return a list of the names of all xmlrpc procedures.
@since: 11.1
"""
return reflect.prefixedMethodNames(self.__class__, 'xmlrpc_')
class XMLRPCIntrospection(XMLRPC):
"""
Implement the XML-RPC Introspection API.
By default, the methodHelp method returns the 'help' method attribute,
if it exists, otherwise the __doc__ method attribute, if it exists,
otherwise the empty string.
To enable the methodSignature method, add a 'signature' method attribute
containing a list of lists. See methodSignature's documentation for the
format. Note the type strings should be XML-RPC types, not Python types.
"""
def __init__(self, parent):
"""
Implement Introspection support for an XMLRPC server.
@param parent: the XMLRPC server to add Introspection support to.
@type parent: L{XMLRPC}
"""
XMLRPC.__init__(self)
self._xmlrpc_parent = parent
def xmlrpc_listMethods(self):
"""
Return a list of the method names implemented by this server.
"""
functions = []
todo = [(self._xmlrpc_parent, '')]
while todo:
obj, prefix = todo.pop(0)
functions.extend([prefix + name for name in obj.listProcedures()])
todo.extend([ (obj.getSubHandler(name),
prefix + name + obj.separator)
for name in obj.getSubHandlerPrefixes() ])
return functions
xmlrpc_listMethods.signature = [['array']]
def xmlrpc_methodHelp(self, method):
"""
Return a documentation string describing the use of the given method.
"""
method = self._xmlrpc_parent.lookupProcedure(method)
return (getattr(method, 'help', None)
or getattr(method, '__doc__', None) or '')
xmlrpc_methodHelp.signature = [['string', 'string']]
def xmlrpc_methodSignature(self, method):
"""
Return a list of type signatures.
Each type signature is a list of the form [rtype, type1, type2, ...]
where rtype is the return type and typeN is the type of the Nth
argument. If no signature information is available, the empty
string is returned.
"""
method = self._xmlrpc_parent.lookupProcedure(method)
return getattr(method, 'signature', None) or ''
xmlrpc_methodSignature.signature = [['array', 'string'],
['string', 'string']]
def addIntrospection(xmlrpc):
"""
Add Introspection support to an XMLRPC server.
@param parent: the XMLRPC server to add Introspection support to.
@type parent: L{XMLRPC}
"""
xmlrpc.putSubHandler('system', XMLRPCIntrospection(xmlrpc))
class QueryProtocol(http.HTTPClient):
def connectionMade(self):
self._response = None
self.sendCommand(b'POST', self.factory.path)
self.sendHeader(b'User-Agent', b'Twisted/XMLRPClib')
self.sendHeader(b'Host', self.factory.host)
self.sendHeader(b'Content-type', b'text/xml; charset=utf-8')
payload = self.factory.payload
self.sendHeader(b'Content-length', intToBytes(len(payload)))
if self.factory.user:
auth = b':'.join([self.factory.user, self.factory.password])
authHeader = b''.join([b'Basic ', base64.b64encode(auth)])
self.sendHeader(b'Authorization', authHeader)
self.endHeaders()
self.transport.write(payload)
def handleStatus(self, version, status, message):
if status != b'200':
self.factory.badStatus(status, message)
def handleResponse(self, contents):
"""
Handle the XML-RPC response received from the server.
Specifically, disconnect from the server and store the XML-RPC
response so that it can be properly handled when the disconnect is
finished.
"""
self.transport.loseConnection()
self._response = contents
def connectionLost(self, reason):
"""
The connection to the server has been lost.
If we have a full response from the server, then parse it and fired a
Deferred with the return value or C{Fault} that the server gave us.
"""
http.HTTPClient.connectionLost(self, reason)
if self._response is not None:
response, self._response = self._response, None
self.factory.parseResponse(response)
payloadTemplate = """<?xml version="1.0"?>
<methodCall>
<methodName>%s</methodName>
%s
</methodCall>
"""
class _QueryFactory(protocol.ClientFactory):
"""
XML-RPC Client Factory
@ivar path: The path portion of the URL to which to post method calls.
@type path: L{bytes}
@ivar host: The value to use for the Host HTTP header.
@type host: L{bytes}
@ivar user: The username with which to authenticate with the server
when making calls.
@type user: L{bytes} or L{None}
@ivar password: The password with which to authenticate with the server
when making calls.
@type password: L{bytes} or L{None}
@ivar useDateTime: Accept datetime values as datetime.datetime objects.
also passed to the underlying xmlrpclib implementation. Defaults to
C{False}.
@type useDateTime: C{bool}
"""
deferred = None
protocol = QueryProtocol
def __init__(self, path, host, method, user=None, password=None,
allowNone=False, args=(), canceller=None, useDateTime=False):
"""
@param method: The name of the method to call.
@type method: C{str}
@param allowNone: allow the use of None values in parameters. It's
passed to the underlying xmlrpclib implementation. Defaults to
C{False}.
@type allowNone: C{bool} or L{None}
@param args: the arguments to pass to the method.
@type args: C{tuple}
@param canceller: A 1-argument callable passed to the deferred as the
canceller callback.
@type canceller: callable or L{None}
"""
self.path, self.host = path, host
self.user, self.password = user, password
self.payload = payloadTemplate % (method,
xmlrpclib.dumps(args, allow_none=allowNone))
if isinstance(self.payload, unicode):
self.payload = self.payload.encode('utf8')
self.deferred = defer.Deferred(canceller)
self.useDateTime = useDateTime
def parseResponse(self, contents):
if not self.deferred:
return
try:
response = xmlrpclib.loads(contents,
use_datetime=self.useDateTime)[0][0]
except:
deferred, self.deferred = self.deferred, None
deferred.errback(failure.Failure())
else:
deferred, self.deferred = self.deferred, None
deferred.callback(response)
def clientConnectionLost(self, _, reason):
if self.deferred is not None:
deferred, self.deferred = self.deferred, None
deferred.errback(reason)
clientConnectionFailed = clientConnectionLost
def badStatus(self, status, message):
deferred, self.deferred = self.deferred, None
deferred.errback(ValueError(status, message))
class Proxy:
"""
A Proxy for making remote XML-RPC calls.
Pass the URL of the remote XML-RPC server to the constructor.
Use C{proxy.callRemote('foobar', *args)} to call remote method
'foobar' with *args.
@ivar user: The username with which to authenticate with the server
when making calls. If specified, overrides any username information
embedded in C{url}. If not specified, a value may be taken from
C{url} if present.
@type user: L{bytes} or L{None}
@ivar password: The password with which to authenticate with the server
when making calls. If specified, overrides any password information
embedded in C{url}. If not specified, a value may be taken from
C{url} if present.
@type password: L{bytes} or L{None}
@ivar allowNone: allow the use of None values in parameters. It's
passed to the underlying L{xmlrpclib} implementation. Defaults to
C{False}.
@type allowNone: C{bool} or L{None}
@ivar useDateTime: Accept datetime values as datetime.datetime objects.
also passed to the underlying L{xmlrpclib} implementation. Defaults to
C{False}.
@type useDateTime: C{bool}
@ivar connectTimeout: Number of seconds to wait before assuming the
connection has failed.
@type connectTimeout: C{float}
@ivar _reactor: The reactor used to create connections.
@type _reactor: Object providing L{twisted.internet.interfaces.IReactorTCP}
@ivar queryFactory: Object returning a factory for XML-RPC protocol. Mainly
useful for tests.
"""
queryFactory = _QueryFactory
def __init__(self, url, user=None, password=None, allowNone=False,
useDateTime=False, connectTimeout=30.0, reactor=reactor):
"""
@param url: The URL to which to post method calls. Calls will be made
over SSL if the scheme is HTTPS. If netloc contains username or
password information, these will be used to authenticate, as long as
the C{user} and C{password} arguments are not specified.
@type url: L{bytes}
"""
scheme, netloc, path, params, query, fragment = urllib_parse.urlparse(
url)
netlocParts = netloc.split(b'@')
if len(netlocParts) == 2:
userpass = netlocParts.pop(0).split(b':')
self.user = userpass.pop(0)
try:
self.password = userpass.pop(0)
except:
self.password = None
else:
self.user = self.password = None
hostport = netlocParts[0].split(b':')
self.host = hostport.pop(0)
try:
self.port = int(hostport.pop(0))
except:
self.port = None
self.path = path
if self.path in [b'', None]:
self.path = b'/'
self.secure = (scheme == b'https')
if user is not None:
self.user = user
if password is not None:
self.password = password
self.allowNone = allowNone
self.useDateTime = useDateTime
self.connectTimeout = connectTimeout
self._reactor = reactor
def callRemote(self, method, *args):
"""
Call remote XML-RPC C{method} with given arguments.
@return: a L{defer.Deferred} that will fire with the method response,
or a failure if the method failed. Generally, the failure type will
be L{Fault}, but you can also have an C{IndexError} on some buggy
servers giving empty responses.
If the deferred is cancelled before the request completes, the
connection is closed and the deferred will fire with a
L{defer.CancelledError}.
"""
def cancel(d):
factory.deferred = None
connector.disconnect()
factory = self.queryFactory(
self.path, self.host, method, self.user,
self.password, self.allowNone, args, cancel, self.useDateTime)
if self.secure:
from twisted.internet import ssl
connector = self._reactor.connectSSL(
nativeString(self.host), self.port or 443,
factory, ssl.ClientContextFactory(),
timeout=self.connectTimeout)
else:
connector = self._reactor.connectTCP(
nativeString(self.host), self.port or 80, factory,
timeout=self.connectTimeout)
return factory.deferred
__all__ = [
"XMLRPC", "Handler", "NoSuchFunction", "Proxy",
"Fault", "Binary", "Boolean", "DateTime"]
|
anryko/ansible | refs/heads/devel | lib/ansible/modules/network/f5/bigip_firewall_rule.py | 21 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2018, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigip_firewall_rule
short_description: Manage AFM Firewall rules
description:
- Manages firewall rules in an AFM firewall policy. New rules will always be added to the
end of the policy. Rules can be re-ordered using the C(bigip_security_policy) module.
Rules can also be pre-ordered using the C(bigip_security_policy) module and then later
updated using the C(bigip_firewall_rule) module.
version_added: 2.7
options:
name:
description:
- Specifies the name of the rule.
type: str
required: True
parent_policy:
description:
- The policy which contains the rule to be managed.
- One of either C(parent_policy) or C(parent_rule_list) is required.
type: str
parent_rule_list:
description:
- The rule list which contains the rule to be managed.
- One of either C(parent_policy) or C(parent_rule_list) is required.
type: str
action:
description:
- Specifies the action for the firewall rule.
- When C(accept), allows packets with the specified source, destination,
and protocol to pass through the firewall. Packets that match the rule,
and are accepted, traverse the system as if the firewall is not present.
- When C(drop), drops packets with the specified source, destination, and
protocol. Dropping a packet is a silent action with no notification to
the source or destination systems. Dropping the packet causes the connection
to be retried until the retry threshold is reached.
- When C(reject), rejects packets with the specified source, destination,
and protocol. When a packet is rejected the firewall sends a destination
unreachable message to the sender.
- When C(accept-decisively), allows packets with the specified source,
destination, and protocol to pass through the firewall, and does not require
any further processing by any of the further firewalls. Packets that match
the rule, and are accepted, traverse the system as if the firewall is not
present. If the Rule List is applied to a virtual server, management IP,
or self IP firewall rule, then Accept Decisively is equivalent to Accept.
- When creating a new rule, if this parameter is not provided, the default is
C(reject).
type: str
choices:
- accept
- drop
- reject
- accept-decisively
status:
description:
- Indicates the activity state of the rule or rule list.
- When C(disabled), specifies that the rule or rule list does not apply at all.
- When C(enabled), specifies that the system applies the firewall rule or rule
list to the given context and addresses.
- When C(scheduled), specifies that the system applies the rule or rule list
according to the specified schedule.
- When creating a new rule, if this parameter is not provided, the default
is C(enabled).
type: str
choices:
- enabled
- disabled
- scheduled
schedule:
description:
- Specifies a schedule for the firewall rule.
- You configure schedules to define days and times when the firewall rule is
made active.
type: str
description:
description:
- The rule description.
type: str
irule:
description:
- Specifies an iRule that is applied to the firewall rule.
- An iRule can be started when the firewall rule matches traffic.
type: str
protocol:
description:
- Specifies the protocol to which the rule applies.
- Protocols may be specified by either their name or numeric value.
- A special protocol value C(any) can be specified to match any protocol. The
numeric equivalent of this protocol is C(255).
type: str
source:
description:
- Specifies packet sources to which the rule applies.
- Leaving this field blank applies the rule to all addresses and all ports.
- You can specify the following source items. An IPv4 or IPv6 address, an IPv4
or IPv6 address range, geographic location, VLAN, address list, port,
port range, port list or address list.
- You can specify a mix of different types of items for the source address.
suboptions:
address:
description:
- Specifies a specific IP address.
type: str
address_list:
description:
- Specifies an existing address list.
type: str
address_range:
description:
- Specifies an address range.
type: str
country:
description:
- Specifies a country code.
type: str
port:
description:
- Specifies a single numeric port.
- This option is only valid when C(protocol) is C(tcp)(6) or C(udp)(17).
type: int
port_list:
description:
- Specifes an existing port list.
- This option is only valid when C(protocol) is C(tcp)(6) or C(udp)(17).
type: str
port_range:
description:
- Specifies a range of ports, which is two port values separated by
a hyphen. The port to the left of the hyphen should be less than the
port to the right.
- This option is only valid when C(protocol) is C(tcp)(6) or C(udp)(17).
type: str
vlan:
description:
- Specifies VLANs to which the rule applies.
- The VLAN source refers to the packet's source.
type: str
type: list
destination:
description:
- Specifies packet destinations to which the rule applies.
- Leaving this field blank applies the rule to all addresses and all ports.
- You can specify the following destination items. An IPv4 or IPv6 address,
an IPv4 or IPv6 address range, geographic location, VLAN, address list, port,
port range, port list or address list.
- You can specify a mix of different types of items for the source address.
suboptions:
address:
description:
- Specifies a specific IP address.
type: str
address_list:
description:
- Specifies an existing address list.
type: str
address_range:
description:
- Specifies an address range.
type: str
country:
description:
- Specifies a country code.
type: str
port:
description:
- Specifies a single numeric port.
- This option is only valid when C(protocol) is C(tcp)(6) or C(udp)(17).
type: int
port_list:
description:
- Specifes an existing port list.
- This option is only valid when C(protocol) is C(tcp)(6) or C(udp)(17).
type: str
port_range:
description:
- Specifies a range of ports, which is two port values separated by
a hyphen. The port to the left of the hyphen should be less than the
port to the right.
- This option is only valid when C(protocol) is C(tcp)(6) or C(udp)(17).
type: str
type: list
logging:
description:
- Specifies whether logging is enabled or disabled for the firewall rule.
- When creating a new rule, if this parameter is not specified, the default
if C(no).
type: bool
rule_list:
description:
- Specifies an existing rule list to use in the rule.
- This parameter is mutually exclusive with many of the other individual-rule
specific settings. This includes C(logging), C(action), C(source),
C(destination), C(irule'), C(protocol) and C(logging).
- This parameter is only used when C(parent_policy) is specified, otherwise it is ignored.
type: str
icmp_message:
description:
- Specifies the Internet Control Message Protocol (ICMP) or ICMPv6 message
C(type) and C(code) that the rule uses.
- This parameter is only relevant when C(protocol) is either C(icmp)(1) or
C(icmpv6)(58).
suboptions:
type:
description:
- Specifies the type of ICMP message.
- You can specify control messages, such as Echo Reply (0) and Destination
Unreachable (3), or you can specify C(any) to indicate that the system
applies the rule for all ICMP messages.
- You can also specify an arbitrary ICMP message.
- The ICMP protocol contains definitions for the existing message type and
number pairs.
type: str
code:
description:
- Specifies the code returned in response to the specified ICMP message type.
- You can specify codes, each set appropriate to the associated type, such
as No Code (0) (associated with Echo Reply (0)) and Host Unreachable (1)
(associated with Destination Unreachable (3)), or you can specify C(any)
to indicate that the system applies the rule for all codes in response to
that specific ICMP message.
- You can also specify an arbitrary code.
- The ICMP protocol contains definitions for the existing message code and
number pairs.
type: str
type: list
partition:
description:
- Device partition to manage resources on.
type: str
default: Common
state:
description:
- When C(state) is C(present), ensures that the rule exists.
- When C(state) is C(absent), ensures that the rule is removed.
type: str
choices:
- present
- absent
default: present
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
- Wojciech Wypior (@wojtek0806)
'''
EXAMPLES = r'''
- name: Create a new rule in the foo firewall policy
bigip_firewall_rule:
name: foo
parent_policy: policy1
protocol: tcp
source:
- address: 1.2.3.4
- address: "::1"
- address_list: foo-list1
- address_range: 1.1.1.1-2.2.2.2
- vlan: vlan1
- country: US
- port: 22
- port_list: port-list1
- port_range: 80-443
destination:
- address: 1.2.3.4
- address: "::1"
- address_list: foo-list1
- address_range: 1.1.1.1-2.2.2.2
- country: US
- port: 22
- port_list: port-list1
- port_range: 80-443
irule: irule1
action: accept
logging: yes
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
- name: Create an ICMP specific rule
bigip_firewall_rule:
name: foo
protocol: icmp
icmp_message:
type: 0
source:
- country: US
action: drop
logging: yes
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
- name: Add a new policy rule that uses an existing rule list
bigip_firewall_rule:
name: foo
parent_policy: foo_policy
rule_list: rule-list1
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
'''
RETURN = r'''
name:
description: Name of the rule.
returned: changed
type: str
sample: FooRule
parent_policy:
description: The policy which contains the rule to be managed.
returned: changed
type: str
sample: FooPolicy
parent_rule_list:
description: The rule list which contains the rule to be managed.
returned: changed
type: str
sample: FooRuleList
action:
description: The action for the firewall rule.
returned: changed
type: str
sample: drop
status:
description: The activity state of the rule or rule list.
returned: changed
type: str
sample: scheduled
schedule:
description: The schedule for the firewall rule.
returned: changed
type: str
sample: Foo_schedule
description:
description: The rule description.
returned: changed
type: str
sample: MyRule
irule:
description: The iRule that is applied to the firewall rule.
returned: changed
type: str
sample: _sys_auth_radius
protocol:
description: The protocol to which the rule applies.
returned: changed
type: str
sample: any
source:
description: The packet sources to which the rule applies
returned: changed
type: complex
contains:
address:
description: A specific IP address.
returned: changed
type: str
sample: 192.168.1.1
address_list:
description: An existing address list.
returned: changed
type: str
sample: foo-list1
address_range:
description: The address range.
returned: changed
type: str
sample: 1.1.1.1-2.2.2.2
country:
description: A country code.
returned: changed
type: str
sample: US
port:
description: Single numeric port.
returned: changed
type: int
sample: 8080
port_list:
description: An existing port list.
returned: changed
type: str
sample: port-list1
port_range:
description: The port range.
returned: changed
type: str
sample: 80-443
vlan:
description: Source VLANs for the packets.
returned: changed
type: str
sample: vlan1
sample: hash/dictionary of values
destination:
description: The packet destinations to which the rule applies.
returned: changed
type: complex
contains:
address:
description: A specific IP address.
returned: changed
type: str
sample: 192.168.1.1
address_list:
description: An existing address list.
returned: changed
type: str
sample: foo-list1
address_range:
description: The address range.
returned: changed
type: str
sample: 1.1.1.1-2.2.2.2
country:
description: A country code.
returned: changed
type: str
sample: US
port:
description: Single numeric port.
returned: changed
type: int
sample: 8080
port_list:
description: An existing port list.
returned: changed
type: str
sample: port-list1
port_range:
description: The port range.
returned: changed
type: str
sample: 80-443
sample: hash/dictionary of values
logging:
description: Enable or Disable logging for the firewall rule.
returned: changed
type: bool
sample: yes
rule_list:
description: An existing rule list to use in the parent policy.
returned: changed
type: str
sample: rule-list-1
icmp_message:
description: The (ICMP) or ICMPv6 message C(type) and C(code) that the rule uses.
returned: changed
type: complex
contains:
type:
description: The type of ICMP message.
returned: changed
type: str
sample: 0
code:
description: The code returned in response to the specified ICMP message type.
returned: changed
type: str
sample: 1
sample: hash/dictionary of values
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
try:
from library.module_utils.network.f5.bigip import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import fq_name
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.common import transform_name
except ImportError:
from ansible.module_utils.network.f5.bigip import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import fq_name
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.common import transform_name
class Parameters(AnsibleF5Parameters):
api_map = {
'ipProtocol': 'protocol',
'log': 'logging',
'icmp': 'icmp_message',
'ruleList': 'rule_list'
}
api_attributes = [
'irule',
'ipProtocol',
'log',
'schedule',
'status',
'destination',
'source',
'icmp',
'action',
'description',
'ruleList',
]
returnables = [
'logging',
'protocol',
'irule',
'source',
'destination',
'action',
'status',
'schedule',
'description',
'icmp_message',
'rule_list',
]
updatables = [
'logging',
'protocol',
'irule',
'source',
'destination',
'action',
'status',
'schedule',
'description',
'icmp_message',
'rule_list',
]
protocol_map = {
'1': 'icmp',
'6': 'tcp',
'17': 'udp',
'58': 'icmpv6',
'255': 'any',
}
class ApiParameters(Parameters):
@property
def logging(self):
if self._values['logging'] is None:
return None
if self._values['logging'] == 'yes':
return True
return False
@property
def protocol(self):
if self._values['protocol'] is None:
return None
if self._values['protocol'] in self.protocol_map:
return self.protocol_map[self._values['protocol']]
return self._values['protocol']
@property
def source(self):
result = []
if self._values['source'] is None:
return None
v = self._values['source']
if 'addressLists' in v:
result += [('address_list', x) for x in v['addressLists']]
if 'vlans' in v:
result += [('vlan', x) for x in v['vlans']]
if 'geo' in v:
result += [('geo', x['name']) for x in v['geo']]
if 'addresses' in v:
result += [('address', x['name']) for x in v['addresses']]
if 'ports' in v:
result += [('port', str(x['name'])) for x in v['ports']]
if 'portLists' in v:
result += [('port_list', x) for x in v['portLists']]
if result:
return result
return None
@property
def destination(self):
result = []
if self._values['destination'] is None:
return None
v = self._values['destination']
if 'addressLists' in v:
result += [('address_list', x) for x in v['addressLists']]
if 'geo' in v:
result += [('geo', x['name']) for x in v['geo']]
if 'addresses' in v:
result += [('address', x['name']) for x in v['addresses']]
if 'ports' in v:
result += [('port', x['name']) for x in v['ports']]
if 'portLists' in v:
result += [('port_list', x) for x in v['portLists']]
if result:
return result
return None
@property
def icmp_message(self):
if self._values['icmp_message'] is None:
return None
result = [x['name'] for x in self._values['icmp_message']]
return result
class ModuleParameters(Parameters):
@property
def irule(self):
if self._values['irule'] is None:
return None
if self._values['irule'] == '':
return ''
return fq_name(self.partition, self._values['irule'])
@property
def description(self):
if self._values['description'] is None:
return None
if self._values['description'] == '':
return ''
return self._values['description']
@property
def schedule(self):
if self._values['schedule'] is None:
return None
if self._values['schedule'] == '':
return ''
return fq_name(self.partition, self._values['schedule'])
@property
def source(self):
result = []
if self._values['source'] is None:
return None
for x in self._values['source']:
if 'address' in x and x['address'] is not None:
result += [('address', x['address'])]
elif 'address_range' in x and x['address_range'] is not None:
result += [('address', x['address_range'])]
elif 'address_list' in x and x['address_list'] is not None:
result += [('address_list', x['address_list'])]
elif 'country' in x and x['country'] is not None:
result += [('geo', x['country'])]
elif 'vlan' in x and x['vlan'] is not None:
result += [('vlan', fq_name(self.partition, x['vlan']))]
elif 'port' in x and x['port'] is not None:
result += [('port', str(x['port']))]
elif 'port_range' in x and x['port_range'] is not None:
result += [('port', x['port_range'])]
elif 'port_list' in x and x['port_list'] is not None:
result += [('port_list', fq_name(self.partition, x['port_list']))]
if result:
return result
return None
@property
def destination(self):
result = []
if self._values['destination'] is None:
return None
for x in self._values['destination']:
if 'address' in x and x['address'] is not None:
result += [('address', x['address'])]
elif 'address_range' in x and x['address_range'] is not None:
result += [('address', x['address_range'])]
elif 'address_list' in x and x['address_list'] is not None:
result += [('address_list', x['address_list'])]
elif 'country' in x and x['country'] is not None:
result += [('geo', x['country'])]
elif 'port' in x and x['port'] is not None:
result += [('port', str(x['port']))]
elif 'port_range' in x and x['port_range'] is not None:
result += [('port', x['port_range'])]
elif 'port_list' in x and x['port_list'] is not None:
result += [('port_list', fq_name(self.partition, x['port_list']))]
if result:
return result
return None
@property
def icmp_message(self):
if self._values['icmp_message'] is None:
return None
result = []
for x in self._values['icmp_message']:
type = x.get('type', '255')
code = x.get('code', '255')
if type is None or type == 'any':
type = '255'
if code is None or code == 'any':
code = '255'
if type == '255' and code == '255':
result.append("255")
elif type == '255' and code != '255':
raise F5ModuleError(
"A type of 'any' (255) requires a code of 'any'."
)
elif code == '255':
result.append(type)
else:
result.append('{0}:{1}'.format(type, code))
result = list(set(result))
return result
@property
def rule_list(self):
if self._values['rule_list'] is None:
return None
if self._values['parent_policy'] is not None:
return fq_name(self.partition, self._values['rule_list'])
return None
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class UsableChanges(Changes):
@property
def logging(self):
if self._values['logging'] is None:
return None
if self._values['logging'] is True:
return "yes"
return "no"
@property
def source(self):
if self._values['source'] is None:
return None
result = dict(
addresses=[],
addressLists=[],
vlans=[],
geo=[],
ports=[],
portLists=[]
)
for x in self._values['source']:
if x[0] == 'address':
result['addresses'].append({'name': x[1]})
elif x[0] == 'address_list':
result['addressLists'].append(x[1])
elif x[0] == 'vlan':
result['vlans'].append(x[1])
elif x[0] == 'geo':
result['geo'].append({'name': x[1]})
elif x[0] == 'port':
result['ports'].append({'name': str(x[1])})
elif x[0] == 'port_list':
result['portLists'].append(x[1])
return result
@property
def destination(self):
if self._values['destination'] is None:
return None
result = dict(
addresses=[],
addressLists=[],
vlans=[],
geo=[],
ports=[],
portLists=[]
)
for x in self._values['destination']:
if x[0] == 'address':
result['addresses'].append({'name': x[1]})
elif x[0] == 'address_list':
result['addressLists'].append(x[1])
elif x[0] == 'geo':
result['geo'].append({'name': x[1]})
elif x[0] == 'port':
result['ports'].append({'name': str(x[1])})
elif x[0] == 'port_list':
result['portLists'].append(x[1])
return result
@property
def icmp_message(self):
if self._values['icmp_message'] is None:
return None
result = []
for x in self._values['icmp_message']:
result.append({'name': x})
return result
class ReportableChanges(Changes):
@property
def source(self):
if self._values['source'] is None:
return None
result = []
v = self._values['source']
if v['addressLists']:
result += [('address_list', x) for x in v['addressLists']]
if v['vlans']:
result += [('vlan', x) for x in v['vlans']]
if v['geo']:
result += [('geo', x['name']) for x in v['geo']]
if v['addresses']:
result += [('address', x['name']) for x in v['addresses']]
if v['ports']:
result += [('port', str(x)) for x in v['ports']]
if v['portLists']:
result += [('port_list', x['name']) for x in v['portLists']]
if result:
return dict(result)
return None
@property
def destination(self):
if self._values['destination'] is None:
return None
result = []
v = self._values['destination']
if v['addressLists']:
result += [('address_list', x) for x in v['addressLists']]
if v['geo']:
result += [('geo', x['name']) for x in v['geo']]
if v['addresses']:
result += [('address', x['name']) for x in v['addresses']]
if v['ports']:
result += [('port', str(x)) for x in v['ports']]
if v['portLists']:
result += [('port_list', x['name']) for x in v['portLists']]
if result:
return dict(result)
return None
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
@property
def irule(self):
if self.want.irule is None:
return None
if self.have.irule is None and self.want.irule == '':
return None
if self.have.irule is None:
return self.want.irule
if self.want.irule != self.have.irule:
return self.want.irule
@property
def description(self):
if self.want.description is None:
return None
if self.have.description is None and self.want.description == '':
return None
if self.have.description is None:
return self.want.description
if self.want.description != self.have.description:
return self.want.description
@property
def source(self):
if self.want.source is None:
return None
if self.want.source is None and self.have.source is None:
return None
if self.have.source is None:
return self.want.source
if set(self.want.source) != set(self.have.source):
return self.want.source
@property
def destination(self):
if self.want.destination is None:
return None
if self.want.destination is None and self.have.destination is None:
return None
if self.have.destination is None:
return self.want.destination
if set(self.want.destination) != set(self.have.destination):
return self.want.destination
@property
def icmp_message(self):
if self.want.icmp_message is None:
return None
if self.want.icmp_message is None and self.have.icmp_message is None:
return None
if self.have.icmp_message is None:
return self.want.icmp_message
if set(self.want.icmp_message) != set(self.have.icmp_message):
return self.want.icmp_message
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = F5RestClient(**self.module.params)
self.want = ModuleParameters(params=self.module.params)
self.have = ApiParameters()
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def exec_module(self):
changed = False
result = dict()
state = self.want.state
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.client.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def exists(self):
name = self.want.name
if self.want.parent_policy:
uri = "https://{0}:{1}/mgmt/tm/security/firewall/policy/{2}/rules/{3}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.parent_policy),
name.replace('/', '_')
)
else:
uri = "https://{0}:{1}/mgmt/tm/security/firewall/rule-list/{2}/rules/{3}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.parent_rule_list),
name.replace('/', '_')
)
resp = self.client.api.get(uri)
if resp.ok:
return True
return False
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the resource.")
return True
def create(self):
self._set_changed_options()
self.set_reasonable_creation_defaults()
if self.want.status == 'scheduled' and self.want.schedule is None:
raise F5ModuleError(
"A 'schedule' must be specified when 'status' is 'scheduled'."
)
if self.module.check_mode:
return True
self.create_on_device()
return True
def set_reasonable_creation_defaults(self):
if self.want.action is None:
self.changes.update({'action': 'reject'})
if self.want.logging is None:
self.changes.update({'logging': False})
if self.want.status is None:
self.changes.update({'status': 'enabled'})
def create_on_device(self):
params = self.changes.api_params()
name = self.want.name
params['name'] = name.replace('/', '_')
params['partition'] = self.want.partition
params['placeAfter'] = 'last'
if self.want.parent_policy:
uri = "https://{0}:{1}/mgmt/tm/security/firewall/policy/{2}/rules/".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.parent_policy),
)
else:
uri = "https://{0}:{1}/mgmt/tm/security/firewall/rule-list/{2}/rules/".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.parent_rule_list),
)
if self.changes.protocol not in ['icmp', 'icmpv6']:
if self.changes.icmp_message is not None:
raise F5ModuleError(
"The 'icmp_message' can only be specified when 'protocol' is 'icmp' or 'icmpv6'."
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403, 404]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def update_on_device(self):
name = self.want.name
if self.want.parent_policy and self.want.rule_list:
uri = "https://{0}:{1}/mgmt/tm/security/firewall/policy/{2}/rules/{3}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.parent_policy),
name.replace('/', '_')
)
elif self.want.parent_policy:
uri = "https://{0}:{1}/mgmt/tm/security/firewall/policy/{2}/rules/{3}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.parent_policy),
name.replace('/', '_')
)
else:
uri = "https://{0}:{1}/mgmt/tm/security/firewall/rule-list/{2}/rules/{3}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.parent_rule_list),
name.replace('/', '_')
)
if self.have.protocol not in ['icmp', 'icmpv6'] and self.changes.protocol not in ['icmp', 'icmpv6']:
if self.changes.icmp_message is not None:
raise F5ModuleError(
"The 'icmp_message' can only be specified when 'protocol' is 'icmp' or 'icmpv6'."
)
if self.changes.protocol in ['icmp', 'icmpv6']:
self.changes.update({'source': {}})
self.changes.update({'destination': {}})
params = self.changes.api_params()
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403, 404]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def absent(self):
if self.exists():
return self.remove()
return False
def remove_from_device(self):
name = self.want.name
if self.want.parent_policy:
uri = "https://{0}:{1}/mgmt/tm/security/firewall/policy/{2}/rules/{3}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.parent_policy),
name.replace('/', '_')
)
else:
uri = "https://{0}:{1}/mgmt/tm/security/firewall/rule-list/{2}/rules/{3}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.parent_rule_list),
name.replace('/', '_')
)
resp = self.client.api.delete(uri)
if resp.status == 200:
return True
def read_current_from_device(self):
if self.want.parent_policy:
uri = "https://{0}:{1}/mgmt/tm/security/firewall/policy/{2}/rules/{3}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.parent_policy),
self.want.name
)
else:
uri = "https://{0}:{1}/mgmt/tm/security/firewall/rule-list/{2}/rules/{3}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.parent_rule_list),
self.want.name
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return ApiParameters(params=response)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
name=dict(required=True),
parent_policy=dict(),
parent_rule_list=dict(),
logging=dict(type='bool'),
protocol=dict(),
irule=dict(),
description=dict(),
source=dict(
type='list',
elements='dict',
options=dict(
address=dict(),
address_list=dict(),
address_range=dict(),
country=dict(),
port=dict(type='int'),
port_list=dict(),
port_range=dict(),
vlan=dict(),
),
mutually_exclusive=[[
'address', 'address_list', 'address_range', 'country', 'vlan',
'port', 'port_range', 'port_list'
]]
),
destination=dict(
type='list',
elements='dict',
options=dict(
address=dict(),
address_list=dict(),
address_range=dict(),
country=dict(),
port=dict(type='int'),
port_list=dict(),
port_range=dict(),
),
mutually_exclusive=[[
'address', 'address_list', 'address_range', 'country',
'port', 'port_range', 'port_list'
]]
),
action=dict(
choices=['accept', 'drop', 'reject', 'accept-decisively']
),
status=dict(
choices=['enabled', 'disabled', 'scheduled']
),
schedule=dict(),
rule_list=dict(),
icmp_message=dict(
type='list',
elements='dict',
options=dict(
type=dict(),
code=dict(),
)
),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
),
state=dict(
default='present',
choices=['present', 'absent']
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
self.mutually_exclusive = [
['rule_list', 'action'],
['rule_list', 'source'],
['rule_list', 'destination'],
['rule_list', 'irule'],
['rule_list', 'protocol'],
['rule_list', 'logging'],
['parent_policy', 'parent_rule_list']
]
self.required_one_of = [
['parent_policy', 'parent_rule_list']
]
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
mutually_exclusive=spec.mutually_exclusive,
required_one_of=spec.required_one_of
)
try:
mm = ModuleManager(module=module)
results = mm.exec_module()
module.exit_json(**results)
except F5ModuleError as ex:
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
|
IONISx/edx-platform | refs/heads/master | lms/djangoapps/mobile_api/social_facebook/preferences/urls.py | 128 | """
URLs for users sharing preferences
"""
from django.conf.urls import patterns, url
from .views import UserSharing
urlpatterns = patterns(
'mobile_api.social_facebook.preferences.views',
url(
r'^preferences/$',
UserSharing.as_view(),
name='preferences'
),
)
|
adelton/django | refs/heads/master | django/contrib/contenttypes/migrations/0002_remove_content_type_name.py | 582 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
def add_legacy_name(apps, schema_editor):
ContentType = apps.get_model('contenttypes', 'ContentType')
for ct in ContentType.objects.all():
try:
ct.name = apps.get_model(ct.app_label, ct.model)._meta.object_name
except LookupError:
ct.name = ct.model
ct.save()
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='contenttype',
options={'verbose_name': 'content type', 'verbose_name_plural': 'content types'},
),
migrations.AlterField(
model_name='contenttype',
name='name',
field=models.CharField(max_length=100, null=True),
),
migrations.RunPython(
migrations.RunPython.noop,
add_legacy_name,
hints={'model_name': 'contenttype'},
),
migrations.RemoveField(
model_name='contenttype',
name='name',
),
]
|
percy-g2/Novathor_xperia_u8500 | refs/heads/master | 6.1.1.B.0.253/external/webkit/Tools/Scripts/webkitpy/tool/commands/queuestest.py | 15 | # Copyright (C) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
from webkitpy.common.net.bugzilla import Attachment
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.common.system.executive import ScriptError
from webkitpy.thirdparty.mock import Mock
from webkitpy.tool.commands.stepsequence import StepSequenceErrorHandler
from webkitpy.tool.mocktool import MockTool
class MockQueueEngine(object):
def __init__(self, name, queue, wakeup_event):
pass
def run(self):
pass
class QueuesTest(unittest.TestCase):
# This is _patch1 in mocktool.py
mock_work_item = MockTool().bugs.fetch_attachment(197)
def assert_outputs(self, func, func_name, args, expected_stdout, expected_stderr, expected_exceptions):
exception = None
if expected_exceptions and func_name in expected_exceptions:
exception = expected_exceptions[func_name]
OutputCapture().assert_outputs(self,
func,
args=args,
expected_stdout=expected_stdout.get(func_name, ""),
expected_stderr=expected_stderr.get(func_name, ""),
expected_exception=exception)
def _default_begin_work_queue_stderr(self, name, checkout_dir):
string_replacements = {"name": name, 'checkout_dir': checkout_dir}
return "CAUTION: %(name)s will discard all local changes in \"%(checkout_dir)s\"\nRunning WebKit %(name)s.\nMOCK: update_status: %(name)s Starting Queue\n" % string_replacements
def assert_queue_outputs(self, queue, args=None, work_item=None, expected_stdout=None, expected_stderr=None, expected_exceptions=None, options=None, tool=None):
if not tool:
tool = MockTool()
# This is a hack to make it easy for callers to not have to setup a custom MockFileSystem just to test the commit-queue
# the cq tries to read the layout test results, and will hit a KeyError in MockFileSystem if we don't do this.
tool.filesystem.write_text_file('/mock/results.html', "")
if not expected_stdout:
expected_stdout = {}
if not expected_stderr:
expected_stderr = {}
if not args:
args = []
if not options:
options = Mock()
options.port = None
if not work_item:
work_item = self.mock_work_item
tool.user.prompt = lambda message: "yes"
queue.execute(options, args, tool, engine=MockQueueEngine)
self.assert_outputs(queue.queue_log_path, "queue_log_path", [], expected_stdout, expected_stderr, expected_exceptions)
self.assert_outputs(queue.work_item_log_path, "work_item_log_path", [work_item], expected_stdout, expected_stderr, expected_exceptions)
self.assert_outputs(queue.begin_work_queue, "begin_work_queue", [], expected_stdout, expected_stderr, expected_exceptions)
self.assert_outputs(queue.should_continue_work_queue, "should_continue_work_queue", [], expected_stdout, expected_stderr, expected_exceptions)
self.assert_outputs(queue.next_work_item, "next_work_item", [], expected_stdout, expected_stderr, expected_exceptions)
self.assert_outputs(queue.should_proceed_with_work_item, "should_proceed_with_work_item", [work_item], expected_stdout, expected_stderr, expected_exceptions)
self.assert_outputs(queue.process_work_item, "process_work_item", [work_item], expected_stdout, expected_stderr, expected_exceptions)
self.assert_outputs(queue.handle_unexpected_error, "handle_unexpected_error", [work_item, "Mock error message"], expected_stdout, expected_stderr, expected_exceptions)
# Should we have a different function for testing StepSequenceErrorHandlers?
if isinstance(queue, StepSequenceErrorHandler):
self.assert_outputs(queue.handle_script_error, "handle_script_error", [tool, {"patch": self.mock_work_item}, ScriptError(message="ScriptError error message", script_args="MockErrorCommand")], expected_stdout, expected_stderr, expected_exceptions)
|
Vvucinic/Wander | refs/heads/master | venv_2_7/lib/python2.7/site-packages/Django-1.9-py2.7.egg/django/contrib/admin/templatetags/admin_static.py | 539 | from django.apps import apps
from django.template import Library
register = Library()
_static = None
@register.simple_tag
def static(path):
global _static
if _static is None:
if apps.is_installed('django.contrib.staticfiles'):
from django.contrib.staticfiles.templatetags.staticfiles import static as _static
else:
from django.templatetags.static import static as _static
return _static(path)
|
Argon-Zhou/django | refs/heads/master | tests/basic/tests.py | 117 | from __future__ import unicode_literals
import threading
import warnings
from datetime import datetime, timedelta
from django.core.exceptions import MultipleObjectsReturned, ObjectDoesNotExist
from django.db import DEFAULT_DB_ALIAS, DatabaseError, connections
from django.db.models.fields import Field
from django.db.models.fields.related import ForeignObjectRel
from django.db.models.manager import BaseManager
from django.db.models.query import EmptyQuerySet, QuerySet
from django.test import (
SimpleTestCase, TestCase, TransactionTestCase, skipIfDBFeature,
skipUnlessDBFeature,
)
from django.utils import six
from django.utils.translation import ugettext_lazy
from .models import Article, ArticleSelectOnSave, SelfRef
class ModelInstanceCreationTests(TestCase):
def test_object_is_not_written_to_database_until_save_was_called(self):
a = Article(
id=None,
headline='Area man programs in Python',
pub_date=datetime(2005, 7, 28),
)
self.assertIsNone(a.id)
self.assertEqual(Article.objects.all().count(), 0)
# Save it into the database. You have to call save() explicitly.
a.save()
self.assertIsNotNone(a.id)
self.assertEqual(Article.objects.all().count(), 1)
def test_can_initialize_model_instance_using_positional_arguments(self):
"""
You can initialize a model instance using positional arguments,
which should match the field order as defined in the model.
"""
a = Article(None, 'Second article', datetime(2005, 7, 29))
a.save()
self.assertEqual(a.headline, 'Second article')
self.assertEqual(a.pub_date, datetime(2005, 7, 29, 0, 0))
def test_can_create_instance_using_kwargs(self):
a = Article(
id=None,
headline='Third article',
pub_date=datetime(2005, 7, 30),
)
a.save()
self.assertEqual(a.headline, 'Third article')
self.assertEqual(a.pub_date, datetime(2005, 7, 30, 0, 0))
def test_autofields_generate_different_values_for_each_instance(self):
a1 = Article.objects.create(headline='First', pub_date=datetime(2005, 7, 30, 0, 0))
a2 = Article.objects.create(headline='First', pub_date=datetime(2005, 7, 30, 0, 0))
a3 = Article.objects.create(headline='First', pub_date=datetime(2005, 7, 30, 0, 0))
self.assertNotEqual(a3.id, a1.id)
self.assertNotEqual(a3.id, a2.id)
def test_can_mix_and_match_position_and_kwargs(self):
# You can also mix and match position and keyword arguments, but
# be sure not to duplicate field information.
a = Article(None, 'Fourth article', pub_date=datetime(2005, 7, 31))
a.save()
self.assertEqual(a.headline, 'Fourth article')
def test_cannot_create_instance_with_invalid_kwargs(self):
six.assertRaisesRegex(
self,
TypeError,
"'foo' is an invalid keyword argument for this function",
Article,
id=None,
headline='Some headline',
pub_date=datetime(2005, 7, 31),
foo='bar',
)
def test_can_leave_off_value_for_autofield_and_it_gets_value_on_save(self):
"""
You can leave off the value for an AutoField when creating an
object, because it'll get filled in automatically when you save().
"""
a = Article(headline='Article 5', pub_date=datetime(2005, 7, 31))
a.save()
self.assertEqual(a.headline, 'Article 5')
self.assertNotEqual(a.id, None)
def test_leaving_off_a_field_with_default_set_the_default_will_be_saved(self):
a = Article(pub_date=datetime(2005, 7, 31))
a.save()
self.assertEqual(a.headline, 'Default headline')
def test_for_datetimefields_saves_as_much_precision_as_was_given(self):
"""as much precision in *seconds*"""
a1 = Article(
headline='Article 7',
pub_date=datetime(2005, 7, 31, 12, 30),
)
a1.save()
self.assertEqual(Article.objects.get(id__exact=a1.id).pub_date,
datetime(2005, 7, 31, 12, 30))
a2 = Article(
headline='Article 8',
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
a2.save()
self.assertEqual(Article.objects.get(id__exact=a2.id).pub_date,
datetime(2005, 7, 31, 12, 30, 45))
def test_saving_an_object_again_does_not_create_a_new_object(self):
a = Article(headline='original', pub_date=datetime(2014, 5, 16))
a.save()
current_id = a.id
a.save()
self.assertEqual(a.id, current_id)
a.headline = 'Updated headline'
a.save()
self.assertEqual(a.id, current_id)
def test_querysets_checking_for_membership(self):
headlines = [
'Area man programs in Python', 'Second article', 'Third article']
some_pub_date = datetime(2014, 5, 16, 12, 1)
for headline in headlines:
Article(headline=headline, pub_date=some_pub_date).save()
a = Article(headline='Some headline', pub_date=some_pub_date)
a.save()
# You can use 'in' to test for membership...
self.assertIn(a, Article.objects.all())
# ... but there will often be more efficient ways if that is all you need:
self.assertTrue(Article.objects.filter(id=a.id).exists())
class ModelTest(TestCase):
def test_objects_attribute_is_only_available_on_the_class_itself(self):
six.assertRaisesRegex(
self,
AttributeError,
"Manager isn't accessible via Article instances",
getattr,
Article(),
"objects",
)
self.assertFalse(hasattr(Article(), 'objects'))
self.assertTrue(hasattr(Article, 'objects'))
def test_queryset_delete_removes_all_items_in_that_queryset(self):
headlines = [
'An article', 'Article One', 'Amazing article', 'Boring article']
some_pub_date = datetime(2014, 5, 16, 12, 1)
for headline in headlines:
Article(headline=headline, pub_date=some_pub_date).save()
self.assertQuerysetEqual(Article.objects.all().order_by('headline'),
["<Article: Amazing article>",
"<Article: An article>",
"<Article: Article One>",
"<Article: Boring article>"])
Article.objects.filter(headline__startswith='A').delete()
self.assertQuerysetEqual(Article.objects.all().order_by('headline'),
["<Article: Boring article>"])
def test_not_equal_and_equal_operators_behave_as_expected_on_instances(self):
some_pub_date = datetime(2014, 5, 16, 12, 1)
a1 = Article.objects.create(headline='First', pub_date=some_pub_date)
a2 = Article.objects.create(headline='Second', pub_date=some_pub_date)
self.assertNotEqual(a1, a2)
self.assertEqual(a1, Article.objects.get(id__exact=a1.id))
self.assertNotEqual(Article.objects.get(id__exact=a1.id), Article.objects.get(id__exact=a2.id))
@skipUnlessDBFeature('supports_microsecond_precision')
def test_microsecond_precision(self):
# In PostgreSQL, microsecond-level precision is available.
a9 = Article(
headline='Article 9',
pub_date=datetime(2005, 7, 31, 12, 30, 45, 180),
)
a9.save()
self.assertEqual(Article.objects.get(pk=a9.pk).pub_date,
datetime(2005, 7, 31, 12, 30, 45, 180))
@skipIfDBFeature('supports_microsecond_precision')
def test_microsecond_precision_not_supported(self):
# In MySQL, microsecond-level precision isn't always available. You'll
# lose microsecond-level precision once the data is saved.
a9 = Article(
headline='Article 9',
pub_date=datetime(2005, 7, 31, 12, 30, 45, 180),
)
a9.save()
self.assertEqual(
Article.objects.get(id__exact=a9.id).pub_date,
datetime(2005, 7, 31, 12, 30, 45),
)
@skipIfDBFeature('supports_microsecond_precision')
def test_microsecond_precision_not_supported_edge_case(self):
# In MySQL, microsecond-level precision isn't always available. You'll
# lose microsecond-level precision once the data is saved.
a = Article.objects.create(
headline='Article',
pub_date=datetime(2008, 12, 31, 23, 59, 59, 999999),
)
self.assertEqual(
Article.objects.get(pk=a.pk).pub_date,
datetime(2008, 12, 31, 23, 59, 59),
)
def test_manually_specify_primary_key(self):
# You can manually specify the primary key when creating a new object.
a101 = Article(
id=101,
headline='Article 101',
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
a101.save()
a101 = Article.objects.get(pk=101)
self.assertEqual(a101.headline, 'Article 101')
def test_create_method(self):
# You can create saved objects in a single step
a10 = Article.objects.create(
headline="Article 10",
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
self.assertEqual(Article.objects.get(headline="Article 10"), a10)
def test_year_lookup_edge_case(self):
# Edge-case test: A year lookup should retrieve all objects in
# the given year, including Jan. 1 and Dec. 31.
Article.objects.create(
headline='Article 11',
pub_date=datetime(2008, 1, 1),
)
Article.objects.create(
headline='Article 12',
pub_date=datetime(2008, 12, 31, 23, 59, 59, 999999),
)
self.assertQuerysetEqual(Article.objects.filter(pub_date__year=2008),
["<Article: Article 11>", "<Article: Article 12>"])
def test_unicode_data(self):
# Unicode data works, too.
a = Article(
headline='\u6797\u539f \u3081\u3050\u307f',
pub_date=datetime(2005, 7, 28),
)
a.save()
self.assertEqual(Article.objects.get(pk=a.id).headline,
'\u6797\u539f \u3081\u3050\u307f')
def test_hash_function(self):
# Model instances have a hash function, so they can be used in sets
# or as dictionary keys. Two models compare as equal if their primary
# keys are equal.
a10 = Article.objects.create(
headline="Article 10",
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
a11 = Article.objects.create(
headline='Article 11',
pub_date=datetime(2008, 1, 1),
)
a12 = Article.objects.create(
headline='Article 12',
pub_date=datetime(2008, 12, 31, 23, 59, 59, 999999),
)
s = {a10, a11, a12}
self.assertIn(Article.objects.get(headline='Article 11'), s)
def test_field_ordering(self):
"""
Field instances have a `__lt__` comparison function to define an
ordering based on their creation. Prior to #17851 this ordering
comparison relied on the now unsupported `__cmp__` and was assuming
compared objects were both Field instances raising `AttributeError`
when it should have returned `NotImplemented`.
"""
f1 = Field()
f2 = Field(auto_created=True)
f3 = Field()
self.assertLess(f2, f1)
self.assertGreater(f3, f1)
self.assertIsNotNone(f1)
self.assertNotIn(f2, (None, 1, ''))
def test_extra_method_select_argument_with_dashes_and_values(self):
# The 'select' argument to extra() supports names with dashes in
# them, as long as you use values().
Article.objects.create(
headline="Article 10",
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
Article.objects.create(
headline='Article 11',
pub_date=datetime(2008, 1, 1),
)
Article.objects.create(
headline='Article 12',
pub_date=datetime(2008, 12, 31, 23, 59, 59, 999999),
)
dicts = Article.objects.filter(
pub_date__year=2008).extra(
select={'dashed-value': '1'}).values('headline', 'dashed-value')
self.assertEqual([sorted(d.items()) for d in dicts],
[[('dashed-value', 1), ('headline', 'Article 11')], [('dashed-value', 1), ('headline', 'Article 12')]])
def test_extra_method_select_argument_with_dashes(self):
# If you use 'select' with extra() and names containing dashes on a
# query that's *not* a values() query, those extra 'select' values
# will silently be ignored.
Article.objects.create(
headline="Article 10",
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
Article.objects.create(
headline='Article 11',
pub_date=datetime(2008, 1, 1),
)
Article.objects.create(
headline='Article 12',
pub_date=datetime(2008, 12, 31, 23, 59, 59, 999999),
)
articles = Article.objects.filter(
pub_date__year=2008).extra(select={'dashed-value': '1', 'undashedvalue': '2'})
self.assertEqual(articles[0].undashedvalue, 2)
def test_create_relation_with_ugettext_lazy(self):
"""
Test that ugettext_lazy objects work when saving model instances
through various methods. Refs #10498.
"""
notlazy = 'test'
lazy = ugettext_lazy(notlazy)
Article.objects.create(headline=lazy, pub_date=datetime.now())
article = Article.objects.get()
self.assertEqual(article.headline, notlazy)
# test that assign + save works with Promise objects
article.headline = lazy
article.save()
self.assertEqual(article.headline, notlazy)
# test .update()
Article.objects.update(headline=lazy)
article = Article.objects.get()
self.assertEqual(article.headline, notlazy)
# still test bulk_create()
Article.objects.all().delete()
Article.objects.bulk_create([Article(headline=lazy, pub_date=datetime.now())])
article = Article.objects.get()
self.assertEqual(article.headline, notlazy)
def test_emptyqs(self):
# Can't be instantiated
with self.assertRaises(TypeError):
EmptyQuerySet()
self.assertIsInstance(Article.objects.none(), EmptyQuerySet)
def test_emptyqs_values(self):
# test for #15959
Article.objects.create(headline='foo', pub_date=datetime.now())
with self.assertNumQueries(0):
qs = Article.objects.none().values_list('pk')
self.assertIsInstance(qs, EmptyQuerySet)
self.assertEqual(len(qs), 0)
def test_emptyqs_customqs(self):
# A hacky test for custom QuerySet subclass - refs #17271
Article.objects.create(headline='foo', pub_date=datetime.now())
class CustomQuerySet(QuerySet):
def do_something(self):
return 'did something'
qs = Article.objects.all()
qs.__class__ = CustomQuerySet
qs = qs.none()
with self.assertNumQueries(0):
self.assertEqual(len(qs), 0)
self.assertIsInstance(qs, EmptyQuerySet)
self.assertEqual(qs.do_something(), 'did something')
def test_emptyqs_values_order(self):
# Tests for ticket #17712
Article.objects.create(headline='foo', pub_date=datetime.now())
with self.assertNumQueries(0):
self.assertEqual(len(Article.objects.none().values_list('id').order_by('id')), 0)
with self.assertNumQueries(0):
self.assertEqual(len(Article.objects.none().filter(
id__in=Article.objects.values_list('id', flat=True))), 0)
@skipUnlessDBFeature('can_distinct_on_fields')
def test_emptyqs_distinct(self):
# Tests for #19426
Article.objects.create(headline='foo', pub_date=datetime.now())
with self.assertNumQueries(0):
self.assertEqual(len(Article.objects.none().distinct('headline', 'pub_date')), 0)
def test_ticket_20278(self):
sr = SelfRef.objects.create()
with self.assertRaises(ObjectDoesNotExist):
SelfRef.objects.get(selfref=sr)
def test_eq(self):
self.assertEqual(Article(id=1), Article(id=1))
self.assertNotEqual(Article(id=1), object())
self.assertNotEqual(object(), Article(id=1))
a = Article()
self.assertEqual(a, a)
self.assertNotEqual(Article(), a)
def test_hash(self):
# Value based on PK
self.assertEqual(hash(Article(id=1)), hash(1))
with self.assertRaises(TypeError):
# No PK value -> unhashable (because save() would then change
# hash)
hash(Article())
class ModelLookupTest(TestCase):
def setUp(self):
# Create an Article.
self.a = Article(
id=None,
headline='Area woman programs in Python',
pub_date=datetime(2005, 7, 28),
)
# Save it into the database. You have to call save() explicitly.
self.a.save()
def test_all_lookup(self):
# Change values by changing the attributes, then calling save().
self.a.headline = 'Area man programs in Python'
self.a.save()
# Article.objects.all() returns all the articles in the database.
self.assertQuerysetEqual(Article.objects.all(),
['<Article: Area man programs in Python>'])
def test_rich_lookup(self):
# Django provides a rich database lookup API.
self.assertEqual(Article.objects.get(id__exact=self.a.id), self.a)
self.assertEqual(Article.objects.get(headline__startswith='Area woman'), self.a)
self.assertEqual(Article.objects.get(pub_date__year=2005), self.a)
self.assertEqual(Article.objects.get(pub_date__year=2005, pub_date__month=7), self.a)
self.assertEqual(Article.objects.get(pub_date__year=2005, pub_date__month=7, pub_date__day=28), self.a)
self.assertEqual(Article.objects.get(pub_date__week_day=5), self.a)
def test_equal_lookup(self):
# The "__exact" lookup type can be omitted, as a shortcut.
self.assertEqual(Article.objects.get(id=self.a.id), self.a)
self.assertEqual(Article.objects.get(headline='Area woman programs in Python'), self.a)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__year=2005),
['<Article: Area woman programs in Python>'],
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__year=2004),
[],
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__year=2005, pub_date__month=7),
['<Article: Area woman programs in Python>'],
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__week_day=5),
['<Article: Area woman programs in Python>'],
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__week_day=6),
[],
)
def test_does_not_exist(self):
# Django raises an Article.DoesNotExist exception for get() if the
# parameters don't match any object.
six.assertRaisesRegex(
self,
ObjectDoesNotExist,
"Article matching query does not exist.",
Article.objects.get,
id__exact=2000,
)
# To avoid dict-ordering related errors check only one lookup
# in single assert.
self.assertRaises(
ObjectDoesNotExist,
Article.objects.get,
pub_date__year=2005,
pub_date__month=8,
)
six.assertRaisesRegex(
self,
ObjectDoesNotExist,
"Article matching query does not exist.",
Article.objects.get,
pub_date__week_day=6,
)
def test_lookup_by_primary_key(self):
# Lookup by a primary key is the most common case, so Django
# provides a shortcut for primary-key exact lookups.
# The following is identical to articles.get(id=a.id).
self.assertEqual(Article.objects.get(pk=self.a.id), self.a)
# pk can be used as a shortcut for the primary key name in any query.
self.assertQuerysetEqual(Article.objects.filter(pk__in=[self.a.id]),
["<Article: Area woman programs in Python>"])
# Model instances of the same type and same ID are considered equal.
a = Article.objects.get(pk=self.a.id)
b = Article.objects.get(pk=self.a.id)
self.assertEqual(a, b)
def test_too_many(self):
# Create a very similar object
a = Article(
id=None,
headline='Area man programs in Python',
pub_date=datetime(2005, 7, 28),
)
a.save()
self.assertEqual(Article.objects.count(), 2)
# Django raises an Article.MultipleObjectsReturned exception if the
# lookup matches more than one object
six.assertRaisesRegex(
self,
MultipleObjectsReturned,
"get\(\) returned more than one Article -- it returned 2!",
Article.objects.get,
headline__startswith='Area',
)
six.assertRaisesRegex(
self,
MultipleObjectsReturned,
"get\(\) returned more than one Article -- it returned 2!",
Article.objects.get,
pub_date__year=2005,
)
six.assertRaisesRegex(
self,
MultipleObjectsReturned,
"get\(\) returned more than one Article -- it returned 2!",
Article.objects.get,
pub_date__year=2005,
pub_date__month=7,
)
class ConcurrentSaveTests(TransactionTestCase):
available_apps = ['basic']
@skipUnlessDBFeature('test_db_allows_multiple_connections')
def test_concurrent_delete_with_save(self):
"""
Test fetching, deleting and finally saving an object - we should get
an insert in this case.
"""
a = Article.objects.create(headline='foo', pub_date=datetime.now())
exceptions = []
def deleter():
try:
# Do not delete a directly - doing so alters its state.
Article.objects.filter(pk=a.pk).delete()
except Exception as e:
exceptions.append(e)
finally:
connections[DEFAULT_DB_ALIAS].close()
self.assertEqual(len(exceptions), 0)
t = threading.Thread(target=deleter)
t.start()
t.join()
a.save()
self.assertEqual(Article.objects.get(pk=a.pk).headline, 'foo')
class ManagerTest(SimpleTestCase):
QUERYSET_PROXY_METHODS = [
'none',
'count',
'dates',
'datetimes',
'distinct',
'extra',
'get',
'get_or_create',
'update_or_create',
'create',
'bulk_create',
'filter',
'aggregate',
'annotate',
'complex_filter',
'exclude',
'in_bulk',
'iterator',
'earliest',
'latest',
'first',
'last',
'order_by',
'select_for_update',
'select_related',
'prefetch_related',
'values',
'values_list',
'update',
'reverse',
'defer',
'only',
'using',
'exists',
'_insert',
'_update',
'raw',
]
def test_manager_methods(self):
"""
This test ensures that the correct set of methods from `QuerySet`
are copied onto `Manager`.
It's particularly useful to prevent accidentally leaking new methods
into `Manager`. New `QuerySet` methods that should also be copied onto
`Manager` will need to be added to `ManagerTest.QUERYSET_PROXY_METHODS`.
"""
self.assertEqual(
sorted(BaseManager._get_queryset_methods(QuerySet).keys()),
sorted(self.QUERYSET_PROXY_METHODS),
)
class SelectOnSaveTests(TestCase):
def test_select_on_save(self):
a1 = Article.objects.create(pub_date=datetime.now())
with self.assertNumQueries(1):
a1.save()
asos = ArticleSelectOnSave.objects.create(pub_date=datetime.now())
with self.assertNumQueries(2):
asos.save()
with self.assertNumQueries(1):
asos.save(force_update=True)
Article.objects.all().delete()
with self.assertRaises(DatabaseError):
with self.assertNumQueries(1):
asos.save(force_update=True)
def test_select_on_save_lying_update(self):
"""
Test that select_on_save works correctly if the database
doesn't return correct information about matched rows from
UPDATE.
"""
# Change the manager to not return "row matched" for update().
# We are going to change the Article's _base_manager class
# dynamically. This is a bit of a hack, but it seems hard to
# test this properly otherwise. Article's manager, because
# proxy models use their parent model's _base_manager.
orig_class = Article._base_manager.__class__
class FakeQuerySet(QuerySet):
# Make sure the _update method below is in fact called.
called = False
def _update(self, *args, **kwargs):
FakeQuerySet.called = True
super(FakeQuerySet, self)._update(*args, **kwargs)
return 0
class FakeManager(orig_class):
def get_queryset(self):
return FakeQuerySet(self.model)
try:
Article._base_manager.__class__ = FakeManager
asos = ArticleSelectOnSave.objects.create(pub_date=datetime.now())
with self.assertNumQueries(3):
asos.save()
self.assertTrue(FakeQuerySet.called)
# This is not wanted behavior, but this is how Django has always
# behaved for databases that do not return correct information
# about matched rows for UPDATE.
with self.assertRaises(DatabaseError):
asos.save(force_update=True)
with self.assertRaises(DatabaseError):
asos.save(update_fields=['pub_date'])
finally:
Article._base_manager.__class__ = orig_class
class ModelRefreshTests(TestCase):
def _truncate_ms(self, val):
# MySQL < 5.6.4 removes microseconds from the datetimes which can cause
# problems when comparing the original value to that loaded from DB
return val - timedelta(microseconds=val.microsecond)
def test_refresh(self):
a = Article.objects.create(pub_date=self._truncate_ms(datetime.now()))
Article.objects.create(pub_date=self._truncate_ms(datetime.now()))
Article.objects.filter(pk=a.pk).update(headline='new headline')
with self.assertNumQueries(1):
a.refresh_from_db()
self.assertEqual(a.headline, 'new headline')
orig_pub_date = a.pub_date
new_pub_date = a.pub_date + timedelta(10)
Article.objects.update(headline='new headline 2', pub_date=new_pub_date)
with self.assertNumQueries(1):
a.refresh_from_db(fields=['headline'])
self.assertEqual(a.headline, 'new headline 2')
self.assertEqual(a.pub_date, orig_pub_date)
with self.assertNumQueries(1):
a.refresh_from_db()
self.assertEqual(a.pub_date, new_pub_date)
def test_refresh_fk(self):
s1 = SelfRef.objects.create()
s2 = SelfRef.objects.create()
s3 = SelfRef.objects.create(selfref=s1)
s3_copy = SelfRef.objects.get(pk=s3.pk)
s3_copy.selfref.touched = True
s3.selfref = s2
s3.save()
with self.assertNumQueries(1):
s3_copy.refresh_from_db()
with self.assertNumQueries(1):
# The old related instance was thrown away (the selfref_id has
# changed). It needs to be reloaded on access, so one query
# executed.
self.assertFalse(hasattr(s3_copy.selfref, 'touched'))
self.assertEqual(s3_copy.selfref, s2)
def test_refresh_null_fk(self):
s1 = SelfRef.objects.create()
s2 = SelfRef.objects.create(selfref=s1)
s2.selfref = None
s2.refresh_from_db()
self.assertEqual(s2.selfref, s1)
def test_refresh_unsaved(self):
pub_date = self._truncate_ms(datetime.now())
a = Article.objects.create(pub_date=pub_date)
a2 = Article(id=a.pk)
with self.assertNumQueries(1):
a2.refresh_from_db()
self.assertEqual(a2.pub_date, pub_date)
self.assertEqual(a2._state.db, "default")
def test_refresh_no_fields(self):
a = Article.objects.create(pub_date=self._truncate_ms(datetime.now()))
with self.assertNumQueries(0):
a.refresh_from_db(fields=[])
class TestRelatedObjectDeprecation(SimpleTestCase):
def test_field_related_deprecation(self):
field = SelfRef._meta.get_field('selfref')
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter('always')
self.assertIsInstance(field.related, ForeignObjectRel)
self.assertEqual(len(warns), 1)
self.assertEqual(
str(warns.pop().message),
'Usage of field.related has been deprecated. Use field.remote_field instead.'
)
|
leoliujie/odoo | refs/heads/8.0 | addons/web_tests/tests/__init__.py | 385 | # -*- coding: utf-8 -*-
import test_ui
|
OptiPop/external_chromium_org | refs/heads/opti-5.1 | tools/memory_inspector/memory_inspector/frontends/command_line.py | 83 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Command line frontend for Memory Inspector"""
import json
import memory_inspector
import optparse
import os
import time
from memory_inspector import constants
from memory_inspector.classification import mmap_classifier
from memory_inspector.core import backends
from memory_inspector.data import serialization
def main():
COMMANDS = ['devices', 'ps', 'stats', 'mmaps', 'classified_mmaps']
usage = ('%prog [options] ' + ' | '.join(COMMANDS))
parser = optparse.OptionParser(usage=usage)
parser.add_option('-b', '--backend', help='Backend name '
'(e.g., Android)', type='string', default='Android')
parser.add_option('-s', '--device_id', help='Device '
'id (e.g., Android serial)', type='string')
parser.add_option('-p', '--process_id', help='Target process id',
type='int')
parser.add_option('-m', '--filter_process_name', help='Process '
'name to match', type='string')
parser.add_option('-r', '--mmap_rule',
help='mmap rule', type='string',
default=os.path.join(constants.CLASSIFICATION_RULES_PATH,
'default', 'mmap-android.py'))
(options, args) = parser.parse_args()
memory_inspector.RegisterAllBackends()
if not args or args[0] not in COMMANDS:
parser.print_help()
return -1
if args[0] == 'devices':
_ListDevices(options.backend)
return 0
number_of_devices = 0
if options.device_id:
device_id = options.device_id
number_of_devices = 1
else:
for device in backends.ListDevices():
if device.backend.name == options.backend:
number_of_devices += 1
device_id = device.id
if number_of_devices == 0:
print "No devices connected"
return -1
if number_of_devices > 1:
print ('More than 1 device connected. You need to provide'
' --device_id')
return -1
device = backends.GetDevice(options.backend, device_id)
if not device:
print 'Device', device_id, 'does not exist'
return -1
device.Initialize()
if args[0] == 'ps':
if not options.filter_process_name:
print 'Listing all processes'
else:
print ('Listing processes matching '
+ options.filter_process_name.lower())
print ''
print '%-10s : %-50s : %12s %12s %12s' % (
'Process ID', 'Process Name', 'RUN_TIME', 'THREADS',
'MEM_RSS_KB')
print ''
for process in device.ListProcesses():
if (not options.filter_process_name or
options.filter_process_name.lower() in process.name.lower()):
stats = process.GetStats()
run_time_min, run_time_sec = divmod(stats.run_time, 60)
print '%10s : %-50s : %6s m %2s s %8s %12s' % (
process.pid, _Truncate(process.name, 50), run_time_min,
run_time_sec, stats.threads, stats.vm_rss)
return 0
if not options.process_id:
print 'You need to provide --process_id'
return -1
process = device.GetProcess(options.process_id)
if not process:
print 'Cannot find process [%d] on device %s' % (
options.process_id, device.id)
return -1
elif args[0] == 'stats':
_ListProcessStats(process)
return 0
elif args[0] == 'mmaps':
_ListProcessMmaps(process)
return 0
elif args[0] == 'classified_mmaps':
_ListProcessClassifiedMmaps(process, options.mmap_rule)
return 0
def _ListDevices(backend_name):
print 'Device list:'
print ''
for device in backends.ListDevices():
if device.backend.name == backend_name:
print '%-16s : %s' % (device.id, device.name)
def _ListProcessStats(process):
"""Prints process stats periodically
"""
print 'Stats for process: [%d] %s' % (process.pid, process.name)
print '%-10s : %-50s : %12s %12s %13s %12s %14s' % (
'Process ID', 'Process Name', 'RUN_TIME', 'THREADS',
'CPU_USAGE', 'MEM_RSS_KB', 'PAGE_FAULTS')
print ''
while True:
stats = process.GetStats()
run_time_min, run_time_sec = divmod(stats.run_time, 60)
print '%10s : %-50s : %6s m %2s s %8s %12s %13s %11s' % (
process.pid, _Truncate(process.name, 50), run_time_min, run_time_sec,
stats.threads, stats.cpu_usage, stats.vm_rss, stats.page_faults)
time.sleep(1)
def _ListProcessMmaps(process):
"""Prints process memory maps
"""
print 'Memory Maps for process: [%d] %s' % (process.pid, process.name)
print '%-10s %-10s %6s %12s %12s %13s %13s %-40s' % (
'START', 'END', 'FLAGS', 'PRIV.DIRTY', 'PRIV.CLEAN',
'SHARED DIRTY', 'SHARED CLEAN', 'MAPPED_FILE')
print '%38s %12s %12s %13s' % ('(kb)', '(kb)', '(kb)', '(kb)')
print ''
maps = process.DumpMemoryMaps()
for entry in maps.entries:
print '%-10x %-10x %6s %12s %12s %13s %13s %-40s' % (
entry.start, entry.end, entry.prot_flags,
entry.priv_dirty_bytes / 1024, entry.priv_clean_bytes / 1024,
entry.shared_dirty_bytes / 1024,
entry.shared_clean_bytes / 1024, entry.mapped_file)
def _ListProcessClassifiedMmaps(process, mmap_rule):
"""Prints process classified memory maps
"""
maps = process.DumpMemoryMaps()
if not os.path.exists(mmap_rule):
print 'File', mmap_rule, 'not found'
return
with open(mmap_rule) as f:
rules = mmap_classifier.LoadRules(f.read())
classified_results_tree = mmap_classifier.Classify(maps, rules)
print json.dumps(classified_results_tree, cls=serialization.Encoder)
def _Truncate(name, max_length):
if len(name) <= max_length:
return name
return '%s...' % name[0:(max_length - 3)]
|
roninkenji/libpebble | refs/heads/master | pebble/VersionGenerated.py | 2 | SDK_VERSION = "2.0-BETA2" |
spallavolu/scikit-learn | refs/heads/master | sklearn/datasets/svmlight_format.py | 79 | """This module implements a loader and dumper for the svmlight format
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable to
predict.
This format is used as the default format for both svmlight and the
libsvm command line programs.
"""
# Authors: Mathieu Blondel <mathieu@mblondel.org>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from contextlib import closing
import io
import os.path
import numpy as np
import scipy.sparse as sp
from ._svmlight_format import _load_svmlight_file
from .. import __version__
from ..externals import six
from ..externals.six import u, b
from ..externals.six.moves import range, zip
from ..utils import check_array
from ..utils.fixes import frombuffer_empty
def load_svmlight_file(f, n_features=None, dtype=np.float64,
multilabel=False, zero_based="auto", query_id=False):
"""Load datasets in the svmlight / libsvm format into sparse CSR matrix
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable
to predict.
This format is used as the default format for both svmlight and the
libsvm command line programs.
Parsing a text based source can be expensive. When working on
repeatedly on the same dataset, it is recommended to wrap this
loader with joblib.Memory.cache to store a memmapped backup of the
CSR results of the first call and benefit from the near instantaneous
loading of memmapped structures for the subsequent calls.
In case the file contains a pairwise preference constraint (known
as "qid" in the svmlight format) these are ignored unless the
query_id parameter is set to True. These pairwise preference
constraints can be used to constraint the combination of samples
when using pairwise loss functions (as is the case in some
learning to rank problems) so that only pairs with the same
query_id value are considered.
This implementation is written in Cython and is reasonably fast.
However, a faster API-compatible loader is also available at:
https://github.com/mblondel/svmlight-loader
Parameters
----------
f : {str, file-like, int}
(Path to) a file to load. If a path ends in ".gz" or ".bz2", it will
be uncompressed on the fly. If an integer is passed, it is assumed to
be a file descriptor. A file-like or file descriptor will not be closed
by this function. A file-like object must be opened in binary mode.
n_features : int or None
The number of features to use. If None, it will be inferred. This
argument is useful to load several files that are subsets of a
bigger sliced dataset: each subset might not have examples of
every feature, hence the inferred shape might vary from one
slice to another.
multilabel : boolean, optional, default False
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
zero_based : boolean or "auto", optional, default "auto"
Whether column indices in f are zero-based (True) or one-based
(False). If column indices are one-based, they are transformed to
zero-based to match Python/NumPy conventions.
If set to "auto", a heuristic check is applied to determine this from
the file contents. Both kinds of files occur "in the wild", but they
are unfortunately not self-identifying. Using "auto" or True should
always be safe.
query_id : boolean, default False
If True, will return the query_id array for each file.
dtype : numpy data type, default np.float64
Data type of dataset to be loaded. This will be the data type of the
output numpy arrays ``X`` and ``y``.
Returns
-------
X: scipy.sparse matrix of shape (n_samples, n_features)
y: ndarray of shape (n_samples,), or, in the multilabel a list of
tuples of length n_samples.
query_id: array of shape (n_samples,)
query_id for each sample. Only returned when query_id is set to
True.
See also
--------
load_svmlight_files: similar function for loading multiple files in this
format, enforcing the same number of features/columns on all of them.
Examples
--------
To use joblib.Memory to cache the svmlight file::
from sklearn.externals.joblib import Memory
from sklearn.datasets import load_svmlight_file
mem = Memory("./mycache")
@mem.cache
def get_data():
data = load_svmlight_file("mysvmlightfile")
return data[0], data[1]
X, y = get_data()
"""
return tuple(load_svmlight_files([f], n_features, dtype, multilabel,
zero_based, query_id))
def _gen_open(f):
if isinstance(f, int): # file descriptor
return io.open(f, "rb", closefd=False)
elif not isinstance(f, six.string_types):
raise TypeError("expected {str, int, file-like}, got %s" % type(f))
_, ext = os.path.splitext(f)
if ext == ".gz":
import gzip
return gzip.open(f, "rb")
elif ext == ".bz2":
from bz2 import BZ2File
return BZ2File(f, "rb")
else:
return open(f, "rb")
def _open_and_load(f, dtype, multilabel, zero_based, query_id):
if hasattr(f, "read"):
actual_dtype, data, ind, indptr, labels, query = \
_load_svmlight_file(f, dtype, multilabel, zero_based, query_id)
# XXX remove closing when Python 2.7+/3.1+ required
else:
with closing(_gen_open(f)) as f:
actual_dtype, data, ind, indptr, labels, query = \
_load_svmlight_file(f, dtype, multilabel, zero_based, query_id)
# convert from array.array, give data the right dtype
if not multilabel:
labels = frombuffer_empty(labels, np.float64)
data = frombuffer_empty(data, actual_dtype)
indices = frombuffer_empty(ind, np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc) # never empty
query = frombuffer_empty(query, np.intc)
data = np.asarray(data, dtype=dtype) # no-op for float{32,64}
return data, indices, indptr, labels, query
def load_svmlight_files(files, n_features=None, dtype=np.float64,
multilabel=False, zero_based="auto", query_id=False):
"""Load dataset from multiple files in SVMlight format
This function is equivalent to mapping load_svmlight_file over a list of
files, except that the results are concatenated into a single, flat list
and the samples vectors are constrained to all have the same number of
features.
In case the file contains a pairwise preference constraint (known
as "qid" in the svmlight format) these are ignored unless the
query_id parameter is set to True. These pairwise preference
constraints can be used to constraint the combination of samples
when using pairwise loss functions (as is the case in some
learning to rank problems) so that only pairs with the same
query_id value are considered.
Parameters
----------
files : iterable over {str, file-like, int}
(Paths of) files to load. If a path ends in ".gz" or ".bz2", it will
be uncompressed on the fly. If an integer is passed, it is assumed to
be a file descriptor. File-likes and file descriptors will not be
closed by this function. File-like objects must be opened in binary
mode.
n_features: int or None
The number of features to use. If None, it will be inferred from the
maximum column index occurring in any of the files.
This can be set to a higher value than the actual number of features
in any of the input files, but setting it to a lower value will cause
an exception to be raised.
multilabel: boolean, optional
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
zero_based: boolean or "auto", optional
Whether column indices in f are zero-based (True) or one-based
(False). If column indices are one-based, they are transformed to
zero-based to match Python/NumPy conventions.
If set to "auto", a heuristic check is applied to determine this from
the file contents. Both kinds of files occur "in the wild", but they
are unfortunately not self-identifying. Using "auto" or True should
always be safe.
query_id: boolean, defaults to False
If True, will return the query_id array for each file.
dtype : numpy data type, default np.float64
Data type of dataset to be loaded. This will be the data type of the
output numpy arrays ``X`` and ``y``.
Returns
-------
[X1, y1, ..., Xn, yn]
where each (Xi, yi) pair is the result from load_svmlight_file(files[i]).
If query_id is set to True, this will return instead [X1, y1, q1,
..., Xn, yn, qn] where (Xi, yi, qi) is the result from
load_svmlight_file(files[i])
Notes
-----
When fitting a model to a matrix X_train and evaluating it against a
matrix X_test, it is essential that X_train and X_test have the same
number of features (X_train.shape[1] == X_test.shape[1]). This may not
be the case if you load the files individually with load_svmlight_file.
See also
--------
load_svmlight_file
"""
r = [_open_and_load(f, dtype, multilabel, bool(zero_based), bool(query_id))
for f in files]
if (zero_based is False
or zero_based == "auto" and all(np.min(tmp[1]) > 0 for tmp in r)):
for ind in r:
indices = ind[1]
indices -= 1
n_f = max(ind[1].max() for ind in r) + 1
if n_features is None:
n_features = n_f
elif n_features < n_f:
raise ValueError("n_features was set to {},"
" but input file contains {} features"
.format(n_features, n_f))
result = []
for data, indices, indptr, y, query_values in r:
shape = (indptr.shape[0] - 1, n_features)
X = sp.csr_matrix((data, indices, indptr), shape)
X.sort_indices()
result += X, y
if query_id:
result.append(query_values)
return result
def _dump_svmlight(X, y, f, multilabel, one_based, comment, query_id):
is_sp = int(hasattr(X, "tocsr"))
if X.dtype.kind == 'i':
value_pattern = u("%d:%d")
else:
value_pattern = u("%d:%.16g")
if y.dtype.kind == 'i':
label_pattern = u("%d")
else:
label_pattern = u("%.16g")
line_pattern = u("%s")
if query_id is not None:
line_pattern += u(" qid:%d")
line_pattern += u(" %s\n")
if comment:
f.write(b("# Generated by dump_svmlight_file from scikit-learn %s\n"
% __version__))
f.write(b("# Column indices are %s-based\n"
% ["zero", "one"][one_based]))
f.write(b("#\n"))
f.writelines(b("# %s\n" % line) for line in comment.splitlines())
for i in range(X.shape[0]):
if is_sp:
span = slice(X.indptr[i], X.indptr[i + 1])
row = zip(X.indices[span], X.data[span])
else:
nz = X[i] != 0
row = zip(np.where(nz)[0], X[i, nz])
s = " ".join(value_pattern % (j + one_based, x) for j, x in row)
if multilabel:
nz_labels = np.where(y[i] != 0)[0]
labels_str = ",".join(label_pattern % j for j in nz_labels)
else:
labels_str = label_pattern % y[i]
if query_id is not None:
feat = (labels_str, query_id[i], s)
else:
feat = (labels_str, s)
f.write((line_pattern % feat).encode('ascii'))
def dump_svmlight_file(X, y, f, zero_based=True, comment=None, query_id=None,
multilabel=False):
"""Dump the dataset in svmlight / libsvm file format.
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable
to predict.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_labels]
Target values. Class labels must be an integer or float, or array-like
objects of integer or float for multilabel classifications.
f : string or file-like in binary mode
If string, specifies the path that will contain the data.
If file-like, data will be written to f. f should be opened in binary
mode.
zero_based : boolean, optional
Whether column indices should be written zero-based (True) or one-based
(False).
comment : string, optional
Comment to insert at the top of the file. This should be either a
Unicode string, which will be encoded as UTF-8, or an ASCII byte
string.
If a comment is given, then it will be preceded by one that identifies
the file as having been dumped by scikit-learn. Note that not all
tools grok comments in SVMlight files.
query_id : array-like, shape = [n_samples]
Array containing pairwise preference constraints (qid in svmlight
format).
multilabel: boolean, optional
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
"""
if comment is not None:
# Convert comment string to list of lines in UTF-8.
# If a byte string is passed, then check whether it's ASCII;
# if a user wants to get fancy, they'll have to decode themselves.
# Avoid mention of str and unicode types for Python 3.x compat.
if isinstance(comment, bytes):
comment.decode("ascii") # just for the exception
else:
comment = comment.encode("utf-8")
if six.b("\0") in comment:
raise ValueError("comment string contains NUL byte")
y = np.asarray(y)
if y.ndim != 1 and not multilabel:
raise ValueError("expected y of shape (n_samples,), got %r"
% (y.shape,))
Xval = check_array(X, accept_sparse='csr')
if Xval.shape[0] != y.shape[0]:
raise ValueError("X.shape[0] and y.shape[0] should be the same, got"
" %r and %r instead." % (Xval.shape[0], y.shape[0]))
# We had some issues with CSR matrices with unsorted indices (e.g. #1501),
# so sort them here, but first make sure we don't modify the user's X.
# TODO We can do this cheaper; sorted_indices copies the whole matrix.
if Xval is X and hasattr(Xval, "sorted_indices"):
X = Xval.sorted_indices()
else:
X = Xval
if hasattr(X, "sort_indices"):
X.sort_indices()
if query_id is not None:
query_id = np.asarray(query_id)
if query_id.shape[0] != y.shape[0]:
raise ValueError("expected query_id of shape (n_samples,), got %r"
% (query_id.shape,))
one_based = not zero_based
if hasattr(f, "write"):
_dump_svmlight(X, y, f, multilabel, one_based, comment, query_id)
else:
with open(f, "wb") as f:
_dump_svmlight(X, y, f, multilabel, one_based, comment, query_id)
|
biomodels/BIOMD0000000347 | refs/heads/master | BIOMD0000000347/model.py | 1 | import os
path = os.path.dirname(os.path.realpath(__file__))
sbmlFilePath = os.path.join(path, 'BIOMD0000000347.xml')
with open(sbmlFilePath,'r') as f:
sbmlString = f.read()
def module_exists(module_name):
try:
__import__(module_name)
except ImportError:
return False
else:
return True
if module_exists('libsbml'):
import libsbml
sbml = libsbml.readSBMLFromString(sbmlString) |
ayepezv/GAD_ERP | refs/heads/master | addons/sale_timesheet/models/procurement.py | 1 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models, _
class ProcurementOrder(models.Model):
_inherit = 'procurement.order'
task_id = fields.Many2one('project.task', 'Task', copy=False)
def _is_procurement_task(self):
return self.product_id.type == 'service' and self.product_id.track_service == 'task'
@api.multi
def _assign(self):
self.ensure_one()
res = super(ProcurementOrder, self)._assign()
if not res:
# if there isn't any specific procurement.rule defined for the product, we may want to create a task
return self._is_procurement_task()
return res
@api.multi
def _run(self):
self.ensure_one()
if self._is_procurement_task() and not self.task_id:
# create a task for the procurement
return self._create_service_task()
return super(ProcurementOrder, self)._run()
def _convert_qty_company_hours(self):
company_time_uom_id = self.env.user.company_id.project_time_mode_id
if self.product_uom.id != company_time_uom_id.id and self.product_uom.category_id.id == company_time_uom_id.category_id.id:
planned_hours = self.product_uom._compute_quantity(self.product_qty, company_time_uom_id)
else:
planned_hours = self.product_qty
return planned_hours
def _get_project(self):
Project = self.env['project.project']
project = self.product_id.project_id
if not project and self.sale_line_id:
# find the project corresponding to the analytic account of the sales order
account = self.sale_line_id.order_id.project_id
if not account:
self.sale_line_id.order_id._create_analytic_account()
account = self.sale_line_id.order_id.project_id
project = Project.search([('analytic_account_id', '=', account.id)], limit=1)
if not project:
project_id = account.project_create({'name': account.name, 'use_tasks': True})
project = Project.browse(project_id)
return project
def _create_service_task(self):
project = self._get_project()
planned_hours = self._convert_qty_company_hours()
task = self.env['project.task'].create({
'name': '%s:%s' % (self.origin or '', self.product_id.name),
'date_deadline': self.date_planned,
'planned_hours': planned_hours,
'remaining_hours': planned_hours,
'partner_id': self.sale_line_id.order_id.partner_id.id or self.partner_dest_id.id,
'user_id': self.product_id.product_manager.id,
'procurement_id': self.id,
'description': self.name + '\n',
'project_id': project.id,
'company_id': self.company_id.id,
})
self.write({'task_id': task.id})
self.message_post(body=_("Task created"))
if self.sale_line_id.order_id:
self.sale_line_id.order_id.message_post(body=_("Task created"))
return task
|
TeamEOS/external_chromium_org | refs/heads/lp5.0 | third_party/bintrees/bintrees/walker.py | 156 | #!/usr/bin/env python
#coding:utf-8
# Author: mozman
# Purpose: tree walker
# Created: 07.05.2010
# Copyright (c) 2010-2013 by Manfred Moitzi
# License: MIT License
from operator import attrgetter, lt, gt
class Walker(object):
__slots__ = ['_node', '_stack', '_tree']
def __init__(self, tree):
self._tree = tree
self._node = tree.root
self._stack = []
def reset(self):
self._stack = []
self._node = self._tree.root
@property
def key(self):
return self._node.key
@property
def value(self):
return self._node.value
@property
def item(self):
return (self._node.key, self._node.value)
@property
def is_valid(self):
return self._node is not None
def goto(self, key):
self._node = self._tree.root
while self._node is not None:
if key == self._node.key:
return True
elif key < self._node.key:
self.go_left()
else:
self.go_right()
return False
def push(self):
self._stack.append(self._node)
def pop(self):
self._node = self._stack.pop()
def stack_is_empty(self):
return len(self._stack) == 0
def goto_leaf(self):
""" get a leaf node """
while self._node is not None:
if self.has_left():
self.go_left()
elif self.has_right():
self.go_right()
else:
return
def has_child(self, direction):
if direction == 0:
return self._node.left is not None
else:
return self._node.right is not None
def down(self, direction):
if direction == 0:
self._node = self._node.left
else:
self._node = self._node.right
def go_left(self):
self._node = self._node.left
def go_right(self):
self._node = self._node.right
def has_left(self):
return self._node.left is not None
def has_right(self):
return self._node.right is not None
def _next_item(self, key, left, right, less_than):
node = self._tree.root
succ = None
while node is not None:
if key == node.key:
break
elif less_than(key, node.key):
if (succ is None) or less_than(node.key, succ.key):
succ = node
node = left(node)
else:
node = right(node)
if node is None: # stay at dead end
raise KeyError(str(key))
# found node of key
if right(node) is not None:
# find smallest node of right subtree
node = right(node)
while left(node) is not None:
node = left(node)
if succ is None:
succ = node
elif less_than(node.key, succ.key):
succ = node
elif succ is None: # given key is biggest in tree
raise KeyError(str(key))
return (succ.key, succ.value)
def succ_item(self, key):
""" Get successor (k,v) pair of key, raises KeyError if key is max key
or key does not exist.
"""
return self._next_item(
key,
left=attrgetter("left"),
right=attrgetter("right"),
less_than=lt,
)
def prev_item(self, key):
""" Get predecessor (k,v) pair of key, raises KeyError if key is min key
or key does not exist.
"""
return self._next_item(
key,
left=attrgetter("right"),
right=attrgetter("left"),
less_than=gt,
)
def _iteritems(self, left=attrgetter("left"), right=attrgetter("right")):
""" optimized forward iterator (reduced method calls) """
if self._tree.is_empty():
return
node = self._tree.root
stack = self._stack
go_left = True
while True:
if left(node) is not None and go_left:
stack.append(node)
node = left(node)
else:
yield (node.key, node.value)
if right(node) is not None:
node = right(node)
go_left = True
else:
if len(stack) == 0:
return # all done
node = stack.pop()
go_left = False
def iter_items_forward(self):
for item in self._iteritems(
left=attrgetter("left"),
right=attrgetter("right"),
):
yield item
def iter_items_backward(self):
for item in self._iteritems(
left=attrgetter("right"),
right=attrgetter("left"),
):
yield item
def floor_item(self, key):
""" Get the element (k,v) pair associated with the greatest key less
than or equal to the given key, raises KeyError if there is no such key.
"""
node = self._tree.root
prev = None
while node is not None:
if key == node.key:
return node.key, node.value
elif key < node.key:
node = node.left
else:
if (prev is None) or (node.key > prev.key):
prev = node
node = node.right
# node must be None here
if prev:
return prev.key, prev.value
raise KeyError(str(key))
def ceiling_item(self, key):
""" Get the element (k,v) pair associated with the smallest key greater
than or equal to the given key, raises KeyError if there is no such key.
"""
node = self._tree.root
succ = None
while node is not None:
if key == node.key:
return node.key, node.value
elif key > node.key:
node = node.right
else:
if (succ is None) or (node.key < succ.key):
succ = node
node = node.left
# node must be None here
if succ:
return succ.key, succ.value
raise KeyError(str(key))
|
uber/fbthrift | refs/heads/master | thrift/test/py/JSONGenerateTest.py | 14 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import json
import os
import re
import shutil
import unittest
import subprocess
class TestJSONGenerate(unittest.TestCase):
unsupportedThriftFiles = [
'DebugProtoTest']
thriftFiles = [
'ThriftTest',
'OptionalRequiredTest',
'ManyTypedefs',
'EnumTest',
'DocTest',
'AnnotationTest']
namespaces = {
'ThriftTest': 'thrift.test',
'OptionalRequiredTest': 'thrift.test.optional',
'DocTest': 'thrift.test.doc',
}
@classmethod
def tearDownClass(cls):
if os.path.exists('gen-json'):
shutil.rmtree('gen-json')
@classmethod
def setUpClass(cls):
if os.path.exists('gen-json'):
shutil.rmtree('gen-json')
def getGenPath(self, thriftFile):
output_path = 'gen-json/'
output_path += self.namespaces.get(thriftFile,
thriftFile).replace('.', '/')
output_path += '.json'
return output_path
def testGen(self):
for thriftFile in self.thriftFiles + self.unsupportedThriftFiles:
path = 'thrift/test/' + thriftFile + '.thrift'
self.assertTrue(os.path.exists(path))
proc = subprocess.Popen(
['_bin/thrift/compiler/thrift', '-gen', 'json', path],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
output = proc.communicate()[0]
proc.wait()
self.assertTrue(
os.path.exists(self.getGenPath(thriftFile)), output)
for JSONFile in self.thriftFiles:
with open(self.getGenPath(JSONFile)) as jsonData:
data = json.load(jsonData)
for JSONFile in self.unsupportedThriftFiles:
path = 'gen-json/' + JSONFile + '.json'
jsonData = open(path)
self.assertRaises(TypeError, json.loads, jsonData)
if __name__ == '__main__':
unittest.main()
|
Predictor2/predictor | refs/heads/master | web/__init__.py | 355 | # -*- coding: utf-8 -*-
__version__ = '0.1.0'
__version_info__ = tuple([int(num) if num.isdigit() else num for num in __version__.replace('-', '.', 1).split('.')])
|
hcsturix74/django | refs/heads/master | django/contrib/gis/gdal/prototypes/srs.py | 471 | from ctypes import POINTER, c_char_p, c_int, c_void_p
from django.contrib.gis.gdal.libgdal import lgdal, std_call
from django.contrib.gis.gdal.prototypes.generation import (
const_string_output, double_output, int_output, srs_output, string_output,
void_output,
)
# Shortcut generation for routines with known parameters.
def srs_double(f):
"""
Creates a function prototype for the OSR routines that take
the OSRSpatialReference object and
"""
return double_output(f, [c_void_p, POINTER(c_int)], errcheck=True)
def units_func(f):
"""
Creates a ctypes function prototype for OSR units functions, e.g.,
OSRGetAngularUnits, OSRGetLinearUnits.
"""
return double_output(f, [c_void_p, POINTER(c_char_p)], strarg=True)
# Creation & destruction.
clone_srs = srs_output(std_call('OSRClone'), [c_void_p])
new_srs = srs_output(std_call('OSRNewSpatialReference'), [c_char_p])
release_srs = void_output(lgdal.OSRRelease, [c_void_p], errcheck=False)
destroy_srs = void_output(std_call('OSRDestroySpatialReference'), [c_void_p], errcheck=False)
srs_validate = void_output(lgdal.OSRValidate, [c_void_p])
# Getting the semi_major, semi_minor, and flattening functions.
semi_major = srs_double(lgdal.OSRGetSemiMajor)
semi_minor = srs_double(lgdal.OSRGetSemiMinor)
invflattening = srs_double(lgdal.OSRGetInvFlattening)
# WKT, PROJ, EPSG, XML importation routines.
from_wkt = void_output(lgdal.OSRImportFromWkt, [c_void_p, POINTER(c_char_p)])
from_proj = void_output(lgdal.OSRImportFromProj4, [c_void_p, c_char_p])
from_epsg = void_output(std_call('OSRImportFromEPSG'), [c_void_p, c_int])
from_xml = void_output(lgdal.OSRImportFromXML, [c_void_p, c_char_p])
from_user_input = void_output(std_call('OSRSetFromUserInput'), [c_void_p, c_char_p])
# Morphing to/from ESRI WKT.
morph_to_esri = void_output(lgdal.OSRMorphToESRI, [c_void_p])
morph_from_esri = void_output(lgdal.OSRMorphFromESRI, [c_void_p])
# Identifying the EPSG
identify_epsg = void_output(lgdal.OSRAutoIdentifyEPSG, [c_void_p])
# Getting the angular_units, linear_units functions
linear_units = units_func(lgdal.OSRGetLinearUnits)
angular_units = units_func(lgdal.OSRGetAngularUnits)
# For exporting to WKT, PROJ.4, "Pretty" WKT, and XML.
to_wkt = string_output(std_call('OSRExportToWkt'), [c_void_p, POINTER(c_char_p)], decoding='ascii')
to_proj = string_output(std_call('OSRExportToProj4'), [c_void_p, POINTER(c_char_p)], decoding='ascii')
to_pretty_wkt = string_output(std_call('OSRExportToPrettyWkt'),
[c_void_p, POINTER(c_char_p), c_int], offset=-2, decoding='ascii'
)
# Memory leak fixed in GDAL 1.5; still exists in 1.4.
to_xml = string_output(lgdal.OSRExportToXML, [c_void_p, POINTER(c_char_p), c_char_p], offset=-2, decoding='ascii')
# String attribute retrival routines.
get_attr_value = const_string_output(std_call('OSRGetAttrValue'), [c_void_p, c_char_p, c_int], decoding='ascii')
get_auth_name = const_string_output(lgdal.OSRGetAuthorityName, [c_void_p, c_char_p], decoding='ascii')
get_auth_code = const_string_output(lgdal.OSRGetAuthorityCode, [c_void_p, c_char_p], decoding='ascii')
# SRS Properties
isgeographic = int_output(lgdal.OSRIsGeographic, [c_void_p])
islocal = int_output(lgdal.OSRIsLocal, [c_void_p])
isprojected = int_output(lgdal.OSRIsProjected, [c_void_p])
# Coordinate transformation
new_ct = srs_output(std_call('OCTNewCoordinateTransformation'), [c_void_p, c_void_p])
destroy_ct = void_output(std_call('OCTDestroyCoordinateTransformation'), [c_void_p], errcheck=False)
|
wangjeaf/CSSCheckStyle | refs/heads/master | tests/unit/config/ConfigCommandLine.py | 1 | from helper import *
def doTest():
_default()
def _default():
config = parseCkStyleCmdArgs(realpath('ckstyle.ini'), [], [], True)
equal(config.errorLevel, 0, 'errorLevel is 0')
equal(config.recursive, False, 'recursive is False')
equal(config.printFlag, False, 'print flag is False')
equal(config.include, 'all', 'include is all')
equal(config.exclude, 'none', 'exclude is none')
equal(config.extension, '.ckstyle.txt', 'extension is ok')
equal(config.fixedExtension, '.fixed.css', 'fixed extension is ok')
equal(config.safeMode, False, 'safemode is false default')
equal(config.noBak, False, 'no bak is false default')
equal(len(config.ignoreRuleSets), 1, 'one ruleset to be ignored')
equal(config.compressConfig.extension, '.min.css', 'extension is .min.css')
equal(config.compressConfig.combineFile, False, 'combine file is False')
equal(config.compressConfig.browsers, None, 'browsers is false')
equal(config.compressConfig.noBak, False, 'no bak is false default')
config = parseCkStyleCmdArgs(realpath('ckstyle.ini'), [("--errorLevel", "2"), ("--include", "abcde"), ("--exclude", "fghi"), ("-p", True), ("-r", True)], [], True)
equal(config.errorLevel, 2, 'errorLevel is 2')
equal(config.recursive, True, 'recursive is True')
equal(config.printFlag, True, 'print flag is True')
equal(config.include, 'abcde', 'include is abcde')
equal(config.exclude, 'fghi', 'exclude is fghi')
config = parseCompressCmdArgs(realpath('ckstyle.ini'), [("--errorLevel", "2"), ("--include", "abcde"), ("--exclude", "fghi"), ("-p", True), ("-r", True), ('--compressExtension', '.xxx.min.css'), ('--browsers', 'ie6,ie7,std'), ('--combineFile', 'true'), ("--safeMode", True), ("--noBak", True)], [], True)
equal(config.errorLevel, 2, 'errorLevel is 2')
equal(config.recursive, True, 'recursive is True')
equal(config.printFlag, True, 'print flag is True')
equal(config.include, 'abcde', 'include is abcde')
equal(config.exclude, 'fghi', 'exclude is fghi')
equal(config.safeMode, True, 'safemode is true')
equal(config.noBak, False, 'noBak in config is true')
equal(config.compressConfig.noBak, True, 'noBak in config.compressConfig is true')
equal(config.compressConfig.extension, '.xxx.min.css', 'extension changed')
equal(config.compressConfig.combineFile, True, 'combine file is true')
equal(config.compressConfig.browsers.has_key('ie6'), True, 'browsers is true')
config = parseFixStyleCmdArgs(realpath('ckstyle.ini'), [("--errorLevel", "2"), ("--include", "abcde"), ("--exclude", "fghi"), ("-p", True), ("-r", True), ('--fixedExtension', '.xxx.fixed.css'), ("--singleLine", True), ("--safeMode", True), ("--noBak", True)], [], True)
equal(config.errorLevel, 2, 'errorLevel is 2')
equal(config.recursive, True, 'recursive is True')
equal(config.printFlag, True, 'print flag is True')
equal(config.include, 'abcde', 'include is abcde')
equal(config.exclude, 'fghi', 'exclude is fghi')
equal(config.safeMode, True, 'safemode is true')
equal(config.noBak, True, 'noBak in fixstyle is true')
equal(config.fixToSingleLine, True, 'fix to single line is true')
equal(config.fixedExtension, '.xxx.fixed.css', 'fixed extension changed')
|
zdary/intellij-community | refs/heads/master | python/testData/intentions/PyConvertToFStringIntentionTest/percentOperatorAddingEscapingToCurlyBraces.py | 11 | '{{{%d}}' % 42 |
shriyanka/daemo-forum | refs/heads/master | spirit/urls.py | 11 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf.urls import include, url
import spirit.topic.views
import spirit.admin.urls
import spirit.user.urls
import spirit.search.urls
import spirit.category.urls
import spirit.topic.urls
import spirit.comment.urls
patterns = [
url(r'^$', spirit.topic.views.index_active, name='index'),
url(r'^st/admin/', include(spirit.admin.urls, namespace='admin')),
url(r'^user/', include(spirit.user.urls, namespace='user')),
url(r'^search/', include(spirit.search.urls, namespace='search')),
url(r'^category/', include(spirit.category.urls, namespace='category')),
url(r'^topic/', include(spirit.topic.urls, namespace='topic')),
url(r'^comment/', include(spirit.comment.urls, namespace='comment')),
]
urlpatterns = [
url(r'^', include(patterns, namespace='spirit', app_name='spirit')),
]
|
mxOBS/deb-pkg_trusty_chromium-browser | refs/heads/master | mojo/public/tools/bindings/pylib/mojom_tests/support/find_files.py | 99 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import fnmatch
from os import walk
from os.path import join
import sys
def FindFiles(top, pattern, **kwargs):
"""Finds files under |top| matching the glob pattern |pattern|, returning a
list of paths."""
matches = []
for dirpath, _, filenames in walk(top, **kwargs):
for filename in fnmatch.filter(filenames, pattern):
matches.append(join(dirpath, filename))
return matches
def main(argv):
if len(argv) != 3:
print "usage: %s path pattern" % argv[0]
return 1
for filename in FindFiles(argv[1], argv[2]):
print filename
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
butterflyology/rr-intro | refs/heads/gh-pages | bin/repo_check.py | 4 | #!/usr/bin/env python
"""
Check repository settings.
"""
from __future__ import print_function
import sys
import os
from subprocess import Popen, PIPE
import re
from optparse import OptionParser
from util import Reporter, load_yaml, require
# Import this way to produce a more useful error message.
try:
import requests
except ImportError:
print('Unable to import requests module: please install requests', file=sys.stderr)
sys.exit(1)
# Pattern to match Git command-line output for remotes => (user name, project name).
P_GIT_REMOTE = re.compile(r'upstream\s+[^:]+:([^/]+)/([^.]+)\.git\s+\(fetch\)')
# Repository URL format string.
F_REPO_URL = 'https://github.com/{0}/{1}/'
# Pattern to match repository URLs => (user name, project name)
P_REPO_URL = re.compile(r'https?://github\.com/([^.]+)/([^/]+)/?')
# API URL format string.
F_API_URL = 'https://api.github.com/repos/{0}/{1}/labels'
# Expected labels and colors.
EXPECTED = {
'bug' : 'bd2c00',
'discussion' : 'fc8dc1',
'enhancement' : '9cd6dc',
'help-wanted' : 'f4fd9c',
'instructor-training' : '6e5494',
'newcomer-friendly' : 'eec275',
'question' : '808040',
'template-and-tools' : '2b3990',
'work-in-progress' : '7ae78e'
}
def main():
"""
Main driver.
"""
args = parse_args()
reporter = Reporter()
repo_url = get_repo_url(args.source_dir, args.repo_url)
check_labels(reporter, repo_url)
reporter.report()
def parse_args():
"""
Parse command-line arguments.
"""
parser = OptionParser()
parser.add_option('-r', '--repo',
default=None,
dest='repo_url',
help='repository URL')
parser.add_option('-s', '--source',
default=os.curdir,
dest='source_dir',
help='source directory')
args, extras = parser.parse_args()
require(not extras,
'Unexpected trailing command-line arguments "{0}"'.format(extras))
return args
def get_repo_url(source_dir, repo_url):
"""
Figure out which repository to query.
"""
# Explicitly specified.
if repo_url is not None:
return repo_url
# Guess.
cmd = 'git remote -v'
p = Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE, close_fds=True, universal_newlines=True)
stdout_data, stderr_data = p.communicate()
stdout_data = stdout_data.split('\n')
matches = [P_GIT_REMOTE.match(line) for line in stdout_data]
matches = [m for m in matches if m is not None]
require(len(matches) == 1,
'Unexpected output from git remote command: "{0}"'.format(matches))
username = matches[0].group(1)
require(username, 'empty username in git remote output {0}'.format(matches[0]))
project_name = matches[0].group(2)
require(username, 'empty project name in git remote output {0}'.format(matches[0]))
url = F_REPO_URL.format(username, project_name)
return url
def check_labels(reporter, repo_url):
"""
Check labels in repository.
"""
actual = get_labels(repo_url)
extra = set(actual.keys()) - set(EXPECTED.keys())
reporter.check(not extra,
None,
'Extra label(s) in repository {0}: {1}',
repo_url, ', '.join(sorted(extra)))
missing = set(EXPECTED.keys()) - set(actual.keys())
reporter.check(not missing,
None,
'Missing label(s) in repository {0}: {1}',
repo_url, ', '.join(sorted(missing)))
overlap = set(EXPECTED.keys()).intersection(set(actual.keys()))
for name in sorted(overlap):
reporter.check(EXPECTED[name] == actual[name],
None,
'Color mis-match for label {0} in {1}: expected {2}, found {3}',
name, repo_url, EXPECTED[name], actual[name])
def get_labels(repo_url):
"""
Get actual labels from repository.
"""
m = P_REPO_URL.match(repo_url)
require(m, 'repository URL {0} does not match expected pattern'.format(repo_url))
username = m.group(1)
require(username, 'empty username in repository URL {0}'.format(repo_url))
project_name = m.group(2)
require(username, 'empty project name in repository URL {0}'.format(repo_url))
url = F_API_URL.format(username, project_name)
r = requests.get(url)
require(r.status_code == 200,
'Request for {0} failed with {1}'.format(url, r.status_code))
result = {}
for entry in r.json():
result[entry['name']] = entry['color']
return result
if __name__ == '__main__':
main()
|
samuelefiorini/cgm-tools | refs/heads/master | cgmtools/__init__.py | 1 | ######################################################################
# Copyright (C) 2017 Samuele Fiorini, Chiara Martini, Annalisa Barla
#
# GPL-3.0 License
######################################################################
__version__ = "0.0.1a"
|
lucasRogerioOliveira/receitas-direcionadas | refs/heads/master | backend/apps/locale_app/__init__.py | 599 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
|
YGIronMan/python | refs/heads/master | Jimmy66/0006/0006.py | 34 | #!/bin/env python
# -*- coding: utf-8 -*-
import re
#返回英文单词列表
def list1(string):
words = re.findall(r'[a-zA-Z]+\b',string) #修改了正则表达式
return words
#从文件中读取数据
def file_read(filename):
with open(filename,'r') as fp:
article = fp.read()
return article
#计算出出现最多的单词
def most_word_number(word_list):
str_dict = {}
for item in word_list:
if item in str_dict:
str_dict[item] += 1
else:
str_dict[item] = 1
#非常漂亮的写法,来自于别人的代码,下面那个是我看到这个后自己写的,但是这个有一个比较奇怪的问题,在sublime中少打一行或者略作修改,保存的结果会不同,我也解释不清楚为什么
str_dict = {str_dict[key]:key for key in str_dict}
return (max(str_dict),str_dict[max(str_dict)])
# temp = {}
# for key in str_dict:
# temp[str_dict[key]] = key
# return max(temp),temp[max(temp)]
if __name__ == '__main__':
string = file_read('GitHub.txt')
words = list1(string)
times,word = most_word_number(words)
print '出现最多的单词为' + str(word) + ',出现了' + str(times) + '次'
|
rubiojr/surface3-kernel | refs/heads/master | arch/ia64/scripts/unwcheck.py | 13143 | #!/usr/bin/python
#
# Usage: unwcheck.py FILE
#
# This script checks the unwind info of each function in file FILE
# and verifies that the sum of the region-lengths matches the total
# length of the function.
#
# Based on a shell/awk script originally written by Harish Patil,
# which was converted to Perl by Matthew Chapman, which was converted
# to Python by David Mosberger.
#
import os
import re
import sys
if len(sys.argv) != 2:
print "Usage: %s FILE" % sys.argv[0]
sys.exit(2)
readelf = os.getenv("READELF", "readelf")
start_pattern = re.compile("<([^>]*)>: \[0x([0-9a-f]+)-0x([0-9a-f]+)\]")
rlen_pattern = re.compile(".*rlen=([0-9]+)")
def check_func (func, slots, rlen_sum):
if slots != rlen_sum:
global num_errors
num_errors += 1
if not func: func = "[%#x-%#x]" % (start, end)
print "ERROR: %s: %lu slots, total region length = %lu" % (func, slots, rlen_sum)
return
num_funcs = 0
num_errors = 0
func = False
slots = 0
rlen_sum = 0
for line in os.popen("%s -u %s" % (readelf, sys.argv[1])):
m = start_pattern.match(line)
if m:
check_func(func, slots, rlen_sum)
func = m.group(1)
start = long(m.group(2), 16)
end = long(m.group(3), 16)
slots = 3 * (end - start) / 16
rlen_sum = 0L
num_funcs += 1
else:
m = rlen_pattern.match(line)
if m:
rlen_sum += long(m.group(1))
check_func(func, slots, rlen_sum)
if num_errors == 0:
print "No errors detected in %u functions." % num_funcs
else:
if num_errors > 1:
err="errors"
else:
err="error"
print "%u %s detected in %u functions." % (num_errors, err, num_funcs)
sys.exit(1)
|
vrieni/orange | refs/heads/master | Orange/orng/orngInteract.py | 6 | #
# Module Orange Interactions
# --------------------------
#
# CVS Status: $Id$
#
# Author: Aleks Jakulin (jakulin@acm.org)
# (Copyright (C)2004 Aleks Jakulin)
#
# Purpose: Analysis of dependencies between attributes given the class.
# 3-WAY INTERACTIONS
#
# Project initiated on 2003/05/08
#
# ChangeLog:
# - 2003/05/09:
# fixed a problem with domains that need no preprocessing
# fixed the decimal point printing problem
# added the support for dissimilarity matrix, used for attribute clustering
# - 2003/05/10:
# fixed a problem with negative percentages of less than a percent
# - 2003/05/12:
# separated the 'prepare' function
# - 2003/09/18:
# added support for cluster coloring
# cleaned up backwards-incompatible changes (grrr) (color changes, discData)
# added color-coded dissimilarity matrix export
# - 2004/01/31:
# removed adhoc stats-gathering code in favor of the orngContingency module
# added p-value estimates
# - 2004/03/24:
# fixed an ugly bug in dep-dissimilarity matrix processing
#
import orange, statc
import orngContingency, numpy
import warnings, math, string, copy
import Orange
def _nicefloat(f,sig):
# pretty-float formatter
i = int(f)
s = '%1.0f'%f
n = sig-len('%d'%abs(f)) # how many digits is the integer part
if n > 0:
# we can put a few decimals at the end
fp = abs(f)-abs(i)
s = ''
if f < 0:
s += '-'
s += '%d'%abs(i) + ('%f'%fp)[1:2+n]
return s
class InteractionMatrix:
def _prepare(self, t):
# prepares an Orange table so that it doesn't contain continuous
# attributes or missing values
### DISCRETIZE VARIABLES ###
newatt = []
entroD = orange.EntropyDiscretization()
equiD = orange.EquiNDiscretization(numberOfIntervals = 2)
for i in t.domain.attributes:
if i.varType == 2:
d = entroD(i,t)
if len(d.values) < 2:
# prevent discretization into a single value
d = equiD(i,t)
d.name = 'E'+d.name
warnings.warn('Discretizing %s into %s with %d values.'%(i.name,d.name,len(d.values)))
newatt.append(d)
else:
newatt.append(i)
if list(t.domain.attributes) != newatt:
t = t.select(newatt + ([t.domain.classVar] if t.domain.classVar else []))
### FIX MISSING VALUES ###
special_attributes = []
# 2006-08-23: fixed by PJ: append classVar only if it exists
## all_attributes = [i for i in t.domain.attributes]+[t.domain.classVar]
all_attributes = [i for i in t.domain.attributes]
if t.domain.classVar:
all_attributes += [t.domain.classVar]
for i in range(len(all_attributes)):
for j in t:
if j[i].isSpecial():
special_attributes.append(i)
break
# create new attributes
if len(special_attributes) > 0:
# prepare attributes
newatts = []
for i in range(len(all_attributes)):
old = all_attributes[i]
if i in special_attributes:
oldv = [v for v in old.values]
assert('.' not in oldv)
new = orange.EnumVariable(name='M_'+old.name, values=oldv+['.'])
warnings.warn('Removing special values from %s into %s.'%(old.name,new.name))
newatts.append(new)
else:
newatts.append(old)
# convert table
exs = []
# 2006-08-23: added by PJ: add a class variable (if not already existing)
if not t.domain.classVar:
newatts.append(orange.EnumVariable("class", values=["."]))
t = orange.ExampleTable(orange.Domain(t.domain.attributes, newatts[-1]), t)
newd = orange.Domain(newatts)
for ex in t:
nex = []
for i in range(len(newatts)):
if ex[i].isSpecial():
v = newatts[i]('.')
else:
v = newatts[i](int(ex[i]))
nex.append(v)
exs.append(orange.Example(newd,nex))
t = orange.ExampleTable(exs)
return t
def __init__(self, t, save_data=1, interactions_too = 1, dependencies_too=0, prepare=1, pvalues = 0, simple_too=0,iterative_scaling=0,weighting=None):
if prepare:
t = self._prepare(t)
if save_data:
self.discData = t # save the discretized data
### PREPARE INDIVIDUAL ATTRIBUTES ###
# Attribute Preparation
NA = len(t.domain.attributes)
self.names = []
self.labelname = ""
if t.domain.classVar:
self.labelname = t.domain.classVar.name
self.gains = []
self.freqs = []
self.way2 = {}
self.way3 = {}
self.ig = []
self.list = []
self.abslist = []
self.plist = []
self.plut = {}
self.ents = {}
self.corr = {}
self.chi2 = {}
self.simple = {}
for i in range(NA):
if weighting != None:
atc = orngContingency.get2Int(t,t.domain.attributes[i],t.domain.classVar,wid=weighting)
else:
atc = orngContingency.get2Int(t,t.domain.attributes[i],t.domain.classVar)
gai = atc.InteractionInformation()
self.gains.append(gai)
self.corr[(i,-1)] = gai
self.ents[(i,)] = orngContingency.Entropy(atc.a)
self.way2[(i,-1,)] = atc
self.ents[(i,-1)] = orngContingency.Entropy(atc.m)
N = sum(atc.a)
self.chi2[(i, i)] = statc.chisqprob(N * (numpy.sum(numpy.outer(atc.pa, atc.pa)) - 2 + len(atc.pa)), (len(atc.pa)-1)**2)
# self.chi2[(i, i)] = N * (numpy.sum(numpy.outer(atc.pa, atc.pa)) - 2 + len(atc.pa))
if simple_too:
simp = 0.0
for k in xrange(min(len(atc.a),len(atc.b))):
try:
simp += atc.pm[k,k]
except:
pass
self.simple[(i,-1)] = simp
# fix the name
st = '%s'%t.domain.attributes[i].name # copy
self.names.append(st)
if pvalues:
pv = orngContingency.getPvalue(gai,atc)
self.plist.append((pv,(gai,i,-1)))
self.plut[(i,-1)] = pv
#print "%s\t%f\t%f\t%d"%(st,pv,gai,atc.total)
line = []
for j in range(i):
if dependencies_too:
if weighting != None:
c = orngContingency.get2Int(t,t.domain.attributes[j],t.domain.attributes[i],wid=weighting)
else:
c = orngContingency.get2Int(t,t.domain.attributes[j],t.domain.attributes[i])
self.way2[(j,i,)] = c
gai = c.InteractionInformation()
self.ents[(j,i,)] = orngContingency.Entropy(c.m)
self.corr[(j,i,)] = gai
self.chi2[(j,i)] = c.ChiSquareP()
if simple_too:
simp = 0.0
for k in xrange(min(len(c.a),len(c.b))):
try:
qq = c.pm[k,k]
except:
qq = 0
simp += qq
self.simple[(j,i)] = simp
if pvalues:
pv = orngContingency.getPvalue(gai,c)
self.plist.append((pv,(gai,j,i)))
self.plut[(j,i)] = pv
if interactions_too:
if weighting != None:
c = orngContingency.get3Int(t,t.domain.attributes[j],t.domain.attributes[i],t.domain.classVar,wid=weighting)
else:
c = orngContingency.get3Int(t,t.domain.attributes[j],t.domain.attributes[i],t.domain.classVar)
self.way3[(j,i,-1)] = c
igv = c.InteractionInformation()
line.append(igv)
self.list.append((igv,(igv,j,i)))
self.abslist.append((abs(igv),(igv,j,i)))
if pvalues:
if iterative_scaling:
div = c.IPF()
else:
div = c.KSA()[0]
pv = orngContingency.getPvalue(div,c)
#print "%s-%s\t%f\t%f\t%d"%(c.names[0],c.names[1],pv,igv,c.total)
self.plist.append((pv,(igv,j,i,-1)))
self.plut[(j,i,-1)] = pv
self.ig.append(line)
self.entropy = orngContingency.Entropy(atc.b)
self.ents[(-1,)] = self.entropy
self.list.sort()
self.abslist.sort()
self.plist.sort()
self.attlist = []
for i in range(NA):
self.attlist.append((self.gains[i],i))
self.attlist.sort()
self.NA = NA
def dump(self):
NA = len(self.names)
for j in range(1,NA):
for i in range(j):
t = '%s+%s'%(self.names[i],self.names[j])
print "%30s\t%2.4f\t%2.4f\t%2.4f\t%2.4f\t%2.4f"%(t,self.igain[(i,j)],self.corr[(i,j)],self.igain[(i,j)]+self.corr[(i,j)],self.gains[i],self.gains[j])
def exportNetwork(self, absolute_int=10, positive_int = 0, negative_int = 0, best_attributes = 0, significant_digits = 2, pretty_names = 1, widget_coloring=1, pcutoff = 1):
NA = len(self.names)
### SELECTION OF INTERACTIONS AND ATTRIBUTES ###
# prevent crashes
best_attributes = min(best_attributes,len(self.attlist))
positive_int = min(positive_int,len(self.list))
absolute_int = min(absolute_int,len(self.list))
negative_int = min(negative_int,len(self.list))
# select the top interactions
ins = []
if positive_int > 0:
ins += self.list[-positive_int:]
ins += self.list[:negative_int]
if absolute_int > 0:
ins += self.abslist[-absolute_int:]
# pick best few attributes
atts = []
if best_attributes > 0:
atts += [i for (x,i) in self.attlist[-best_attributes:]]
# disregard the insignificant attributes, interactions
if len(self.plist) > 0 and pcutoff < 1:
# attributes
oats = atts
atts = []
for i in oats:
if self.plut[(i,-1)] < pcutoff:
atts.append(i)
# interactions
oins = ins
ins = []
for y in oins:
(ig,i,j) = y[1]
if self.plut[(i,j,-1)] < pcutoff:
ins.append(y)
ints = []
max_igain = -1e6
min_gain = 1e6 # lowest information gain of involved attributes
# remove duplicates and sorting keys
for (x,v) in ins:
if v not in ints:
ints.append(v)
# add to attribute list
(ig,i,j) = v
max_igain = max(abs(ig),max_igain)
for x in [i,j]:
if x not in atts:
atts.append(x)
min_gain = min(min_gain,self.gains[x])
# fill-in the attribute list with all possibly more important attributes
## todo
### NODE DRAWING ###
map = {}
graph = Orange.core.Network(len(atts), 0)
table = []
for i in range(len(atts)):
map[atts[i]] = i
ndx = atts[i]
t = '%s' % self.names[ndx]
if pretty_names:
t = string.replace(t, "ED_", "")
t = string.replace(t, "D_", "")
t = string.replace(t, "M_", "")
t = string.replace(t, " ", "\\n")
t = string.replace(t, "-", "\\n")
t = string.replace(t, "_", "\\n")
r = self.gains[ndx] * 100.0 / self.entropy
table.append([i + 1, t, r])
d = orange.Domain([orange.FloatVariable('index'), orange.StringVariable('label'), orange.FloatVariable('norm. gain')])
data = orange.ExampleTable(d, table)
graph.items = data
table = []
for (ig,i,j) in ints:
j = map[j]
i = map[i]
perc = int(abs(ig)*100.0/max(max_igain,self.attlist[-1][0])+0.5)
graph[i, j] = perc / 30 + 1
if self.entropy > 1e-6:
mc = _nicefloat(100.0*ig/self.entropy,significant_digits)+"%"
else:
mc = _nicefloat(0.0,significant_digits)
if len(self.plist) > 0 and pcutoff < 1:
mc += "\\nP\<%.3f"%self.plut[(i,j,-1)]
if ig > 0:
if widget_coloring:
color = "green"
else:
color = '"0.0 %f 0.9"'%(0.3+0.7*perc/100.0) # adjust saturation
dir = "both"
else:
if widget_coloring:
color = "red"
else:
color = '"0.5 %f 0.9"'%(0.3+0.7*perc/100.0) # adjust saturation
dir = 'none'
table.append([i, j, mc, dir, color])
d = orange.Domain([orange.FloatVariable('u'), orange.FloatVariable('v'), orange.StringVariable('label'), orange.EnumVariable('dir', values = ["both", "none"]), orange.EnumVariable('color', values = ["green", "red"])])
data = orange.ExampleTable(d, table)
graph.links = data
return graph
def exportGraph(self, f, absolute_int=10, positive_int = 0, negative_int = 0, best_attributes = 0, print_bits = 1, black_white = 0, significant_digits = 2, postscript = 1, pretty_names = 1, url = 0, widget_coloring=1, pcutoff = 1):
NA = len(self.names)
### SELECTION OF INTERACTIONS AND ATTRIBUTES ###
# prevent crashes
best_attributes = min(best_attributes,len(self.attlist))
positive_int = min(positive_int,len(self.list))
absolute_int = min(absolute_int,len(self.list))
negative_int = min(negative_int,len(self.list))
# select the top interactions
ins = []
if positive_int > 0:
ins += self.list[-positive_int:]
ins += self.list[:negative_int]
if absolute_int > 0:
ins += self.abslist[-absolute_int:]
# pick best few attributes
atts = []
if best_attributes > 0:
atts += [i for (x,i) in self.attlist[-best_attributes:]]
# disregard the insignificant attributes, interactions
if len(self.plist) > 0 and pcutoff < 1:
# attributes
oats = atts
atts = []
for i in oats:
if self.plut[(i,-1)] < pcutoff:
atts.append(i)
# interactions
oins = ins
ins = []
for y in oins:
(ig,i,j) = y[1]
if self.plut[(i,j,-1)] < pcutoff:
ins.append(y)
ints = []
max_igain = -1e6
min_gain = 1e6 # lowest information gain of involved attributes
# remove duplicates and sorting keys
for (x,v) in ins:
if v not in ints:
ints.append(v)
# add to attribute list
(ig,i,j) = v
max_igain = max(abs(ig),max_igain)
for x in [i,j]:
if x not in atts:
atts.append(x)
min_gain = min(min_gain,self.gains[x])
# fill-in the attribute list with all possibly more important attributes
## todo
### NODE DRAWING ###
# output the attributes
f.write("digraph G {\n")
if print_bits:
shap = 'record'
else:
shap = 'box'
for i in atts:
t = '%s'%self.names[i]
if pretty_names:
t = string.replace(t,"ED_","")
t = string.replace(t,"D_","")
t = string.replace(t,"M_","")
t = string.replace(t," ","\\n")
t = string.replace(t,"-","\\n")
t = string.replace(t,"_","\\n")
if print_bits:
r = self.gains[i]*100.0/self.entropy
if len(self.plist) > 0 and pcutoff < 1:
t = "{%s|{%s%% | P\<%.3f}}"%(t,_nicefloat(r,significant_digits),self.plut[(i,-1)])
else:
t = "{%s|%s%%}"%(t,_nicefloat(r,significant_digits))
if not url:
f.write("\tnode [ shape=%s, label = \"%s\"] %d;\n"%(shap,t,i))
else:
f.write("\tnode [ shape=%s, URL = \"%d\", label = \"%s\"] %d;\n"%(shap,i,t,i))
### EDGE DRAWING ###
for (ig,i,j) in ints:
perc = int(abs(ig)*100.0/max(max_igain,self.attlist[-1][0])+0.5)
if self.entropy > 1e-6:
mc = _nicefloat(100.0*ig/self.entropy,significant_digits)+"%"
else:
mc = _nicefloat(0.0,significant_digits)
if len(self.plist) > 0 and pcutoff < 1:
mc += "\\nP\<%.3f"%self.plut[(i,j,-1)]
if postscript:
style = "style=\"setlinewidth(%d)\","%(abs(perc)/30+1)
else:
style = ''
if black_white:
color = 'black'
if ig > 0:
dir = "both"
else:
style = 'style=dashed,'
dir = 'none'
else:
if ig > 0:
if widget_coloring:
color = "green"
else:
color = '"0.0 %f 0.9"'%(0.3+0.7*perc/100.0) # adjust saturation
dir = "both"
else:
if widget_coloring:
color = "red"
else:
color = '"0.5 %f 0.9"'%(0.3+0.7*perc/100.0) # adjust saturation
dir = 'none'
if not url:
f.write("\t%d -> %d [dir=%s,%scolor=%s,label=\"%s\",weight=%d];\n"%(i,j,dir,style,color,mc,(perc/30+1)))
else:
f.write("\t%d -> %d [URL=\"%d-%d\",dir=%s,%scolor=%s,label=\"%s\",weight=%d];\n"%(i,j,min(i,j),max(i,j),dir,style,color,mc,(perc/30+1)))
f.write("}\n")
def exportDissimilarityMatrix(self, truncation = 1000, pretty_names = 1, print_bits = 0, significant_digits = 2, show_gains = 1, color_coding = 0, color_gains = 0, jaccard=0, noclass=0):
NA = self.NA
### BEAUTIFY THE LABELS ###
labels = []
maxgain = max(self.gains)
for i in range(NA):
t = '%s'%self.names[i]
if pretty_names:
t = string.replace(t,"ED_","")
t = string.replace(t,"D_","")
t = string.replace(t,"M_","")
r = self.gains[i]
if print_bits:
if self.entropy > 1e-6:
t = "%s (%s%%)"%(t,_nicefloat(r*100.0/self.entropy,significant_digits))
else:
t = "%s (0%%)"%(t)
if show_gains: # a bar indicating the feature importance
if maxgain > 1e-6:
t += ' '+'*'*int(8.0*r/maxgain+0.5)
labels.append(t)
### CREATE THE DISSIMILARITY MATRIX ###
if jaccard:
# create the lookup of 3-entropies
ent3 = {}
maxx = 1e-6
for i in range(1,NA):
for j in range(i):
if noclass:
e = self.ents[(j,i)]
else:
e = self.ents[(j,i)]+self.ents[(j,-1)]+self.ents[(i,-1)]
e -= self.ents[(i,)]+self.ents[(j,)]+self.ents[(-1,)]
e -= self.ig[i][j]
ent3[(i,j)] = e
if e > 1e-6:
e = abs(self.ig[i][j])/e
else:
e = 0.0
maxx = max(maxx,e)
# check the information gains...
if color_gains:
for i in range(NA):
e = self.gains[i]
if self.ents[(i,-1)] > 1e-6:
e /= self.ents[(i,-1)]
else:
e = 0.0
ent3[(i,)] = e
maxx = max(maxx,e)
else:
maxx = self.abslist[-1][0]
if color_gains:
maxx = max(maxx,self.attlist[-1][0])
if color_gains:
if maxx > 1e-6:
cgains = [0.5*(1-i/maxx) for i in self.gains]
else:
cgains = [0.0 for i in self.gains]
diss = []
for i in range(1,NA):
newl = []
for j in range(i):
d = self.ig[i][j]
if jaccard:
if ent3[(i,j)] > 1e-6:
d /= ent3[(i,j)]
else:
d = 0.0
if color_coding:
if maxx > 1e-6:
if maxx > 1e-6:
t = 0.5*(1-d/maxx)
else:
t = 0.0
else:
t = 0
else:
# transform the IG into a distance
ad = abs(d)
if ad*truncation > 1:
t = 1.0 / ad
else:
t = truncation
newl.append(t)
diss.append(newl)
if color_gains:
return (diss,labels,cgains)
else:
return (diss,labels)
def getClusterAverages(self, clust):
#assert(len(self.attlist) == clust.n)
# get the max value
#d = max(self.attlist[-1][0],self.abslist[-1][0])
d = self.abslist[-1][0]
# prepare a lookup
LUT = {}
for (ig,(igv,i,j)) in self.list:
LUT[i,j] = igv
LUT[j,i] = igv
cols = []
merges = []
for i in range(clust.n):
merges.append((0.0,[clust.n-i-1]))
merges.append("sentry")
p = clust.n
for i in range(clust.n-1):
a = merges[p+clust.merging[i][0]] # cluster 1
b = merges[p+clust.merging[i][1]] # cluster 2
na = len(a[1])
nb = len(b[1])
# compute cross-average
sum = 0.0
for x in a[1]:
for y in b[1]:
sum += LUT[x,y]
avg = (a[0]*(na*na-na) + b[0]*(nb*nb-nb) + 2*sum)/(math.pow(na+nb,2)-na-nb)
clustercolor = 0.5*(1-avg/d)
intercluster = 0.5*(1-sum/(d*na*nb))
cols.append((clustercolor,intercluster)) # positive -> red, negative -> blue
merges.append((avg,a[1]+b[1]))
return cols
def depExportGraph(self, f, n_int=1, print_bits = 1, black_white = 0, undirected = 1, significant_digits = 2, pretty_names = 1, pcutoff=-1, postscript=1, spanning_tree = 1, TAN=1, source=-1, labelled=1,jaccard=1,filter=[],diagonal=0,pvlabel=0):
NA = self.NA
### SELECTION OF INTERACTIONS AND ATTRIBUTES ###
links = []
maxlink = -1e6
if n_int == 1 and spanning_tree:
# prepare table
lmm = []
for i in range(1,NA):
ei = self.ents[(i,)]
for j in range(i):
ej = self.ents[(j,)]
if TAN:
# I(A;B|C)
v = self.way3[(j,i,-1)].InteractionInformation()
v += self.way2[(j,i)].InteractionInformation()
else:
if jaccard:
v = self.way2[(j,i)].JaccardInteraction() # I(A;B) chow-liu, mutual information
else:
v = self.way2[(j,i)].InteractionInformation() # I(A;B) chow-liu, mutual information
if ei > ej:
lmm.append((abs(v),v,ej,(j,i)))
else:
lmm.append((abs(v),v,ei,(i,j)))
lmm.sort()
maxlink = lmm[-1][0]
# use Prim's algorithm here
mapped = []
for i in range(NA):
mapped.append(i)
n = NA
idx = -1 # running index in the sorted array of possible links
while n > 1:
# find the cheapest link
while 1:
(av,v,e,(i,j)) = lmm[idx]
idx -= 1
if mapped[i] != mapped[j]:
break
links.append((v,(i,j),e))
toremove = mapped[j]
for k in range(NA):
if mapped[k] == toremove:
mapped[k] = mapped[i]
n -= 1
else:
# select the top
lmm = []
for i in range(NA):
if filter==[] or self.names[i] in filter:
for j in range(i):
if filter==[] or self.names[j] in filter:
ii = max(i,j)
jj = min(i,j)
if jaccard and pcutoff < 0.0:
if self.ents[(jj,ii)] == 0.0:
v = 1.0
else:
v = self.way2[(jj,ii)].JaccardInteraction()
lmm.append((v,(i,j)))
else:
v = self.way2[(jj,ii)].InteractionInformation()
if pcutoff >= 0.0:
xt = self.way2[(jj,ii)]
dof = 1.0
dof *= len(xt.values[0])-1
dof *= len(xt.values[1])-1
pv = orngContingency.getPvalueDOF(v,xt,dof)
if pv <= pcutoff:
v = 1-pv
lmm.append((v,(i,j)))
else:
lmm.append((v,(i,j)))
lmm.sort()
maxlink = max(lmm[-1][0],maxlink)
links += [(v,p,1.0) for (v,p) in lmm[-n_int:]]
# mark vertices
mv = [0 for x in range(NA)]
for (v,(i,j),e) in links:
mv[i] = 1
mv[j] = 1
# output the attributes
f.write("digraph G {\n")
if print_bits:
shap = 'record'
else:
shap = 'box'
for n in range(NA):
if mv[n]:
if source != -1 and not type(source)==type(1):
# find the name
if string.upper(self.names[n])==string.upper(source):
source = n
t = '%s'%self.names[n]
if pretty_names:
t = string.replace(t,"ED_","")
t = string.replace(t,"D_","")
t = string.replace(t,"M_","")
t = string.replace(t," ","\\n")
t = string.replace(t,"-","\\n")
t = string.replace(t,"_","\\n")
if print_bits:
#t = "{%s|%s}"%(t,_nicefloat(self.ents[(n,)],significant_digits))
t = "{%s|%s}"%(t,_nicefloat(self.way2[(n,-1)].total,significant_digits))
f.write("\tnode [ shape=%s, label = \"%s\"] %d;\n"%(shap,t,n))
if source != -1:
# redirect all links
age = [-1]*NA
age[source] = 0
phase = 1
remn = NA-1
premn = -1
while remn > 0 and premn != remn:
premn = remn
for (v,(i,j),e) in links:
if age[i] >= 0 and age[i] < phase and age[j] < 0:
age[j] = phase
remn -= 1
if age[j] >= 0 and age[j] < phase and age[i] < 0:
age[i] = phase
remn -= 1
phase += 1
### EDGE DRAWING ###
for (v,(i,j),e) in links:
if v > 0:
c = v/e
perc = int(100*v/maxlink + 0.5)
style = ''
if postscript:
style += "style=\"setlinewidth(%d)\","%(abs(perc)/30+1)
if not black_white:
l = 0.3+0.7*perc/100.0
style += 'color="0.5 %f %f",'%(l,1-l) # adjust saturation
if labelled:
if diagonal:
ct = self.way2[(min(i,j),max(i,j))]
(ni,nj) = numpy.shape(ct.m)
cc = 0.0
if ni==nj:
for x in range(ni):
cc += ct.m[x,x]
style += 'label=\"%s%%\",'%_nicefloat(100.0*cc/ct.total,significant_digits)
elif pvlabel and pcutoff >= 0.0:
style += 'label=\"%e\",'%(1-v)
else:
style += 'label=\"%s%%\",'%_nicefloat(100.0*c,significant_digits)
if source == -1 or undirected:
f.write("\t%d -> %d [%sweight=%d,dir=none];\n"%(j,i,style,(perc/30+1)))
else:
if age[i] > age[j]:
f.write("\t%d -> %d [%sweight=%d];\n"%(j,i,style,(perc/30+1)))
else:
f.write("\t%d -> %d [%sweight=%d];\n"%(i,j,style,(perc/30+1)))
f.write("}\n")
def exportChi2Matrix(self, pretty_names = 1):
labels = []
for i in range(self.NA):
t = '%s'%self.names[i]
if pretty_names:
t = string.replace(t,"ED_","")
t = string.replace(t,"D_","")
t = string.replace(t,"M_","")
labels.append(t)
diss = [[self.chi2[(i,j)] for i in range(j+1)] for j in range(self.NA)]
return diss, labels
def depExportDissimilarityMatrix(self, truncation = 1000, pretty_names = 1, jaccard = 1, simple_metric=0,color_coding = 0, verbose=0, include_label=0):
NA = self.NA
### BEAUTIFY THE LABELS ###
labels = []
for i in range(NA):
t = '%s'%self.names[i]
if pretty_names:
t = string.replace(t,"ED_","")
t = string.replace(t,"D_","")
t = string.replace(t,"M_","")
labels.append(t)
if include_label:
labels.append(self.labelname)
### CREATE THE DISSIMILARITY MATRIX ###
if color_coding:
maxx = -1
pett = range(1,NA)
if include_label:
pett.append(-1)
for x in pett:
if x == -1:
sett = range(NA)
else:
sett = range(x)
for y in sett:
t = self.corr[(y,x)]
if jaccard:
l = self.ents[(y,x)]
if l > 1e-6:
t /= l
maxx = max(maxx,t)
if verbose:
if jaccard:
print 'maximum intersection is %3d percent.'%(maxx*100.0)
else:
print 'maximum intersection is %f bits.'%maxx
diss = []
pett = range(1,NA)
if include_label:
pett.append(-1)
for x in pett:
if x == -1:
sett = range(NA)
else:
sett = range(x)
newl = []
for y in sett:
if simple_metric:
t = 1-self.simple[(y,x)]
else:
t = self.corr[(y,x)]
if jaccard:
l = self.ents[(y,x)]
if l > 1e-6:
t /= l
if color_coding:
#t = 0.5*(1-t/maxx)
if jaccard:
t = (1-t)*0.5
else:
t = 0.5*(1-t/maxx)
else:
if t*truncation > 1:
t = 1.0 / t
else:
t = truncation
newl.append(t)
diss.append(newl)
return (diss, labels)
def depGetClusterAverages(self, clust):
d = 1.0
cols = []
merges = []
for i in range(clust.n):
merges.append((0.0,[clust.n-i-1]))
merges.append("sentry")
p = clust.n
for i in range(clust.n-1):
a = merges[p+clust.merging[i][0]] # cluster 1
b = merges[p+clust.merging[i][1]] # cluster 2
na = len(a[1])
nb = len(b[1])
# compute cross-average
sum = 0.0
for x in a[1]:
for y in b[1]:
xx = max(x,y)
yy = min(x,y)
if xx == self.NA:
xx = -1
t = self.corr[(yy,xx)]
l = self.ents[(yy,xx)]
if l > 1e-6:
t /= l
sum += t
avg = (a[0]*(na*na-na) + b[0]*(nb*nb-nb) + 2*sum)/(math.pow(na+nb,2)-na-nb)
clustercolor = 0.5*(1-avg/d)
intercluster = 0.5*(1-sum/(d*na*nb))
cols.append((clustercolor,intercluster)) # positive -> red, negative -> blue
merges.append((avg,a[1]+b[1]))
return cols
if __name__== "__main__":
t = orange.ExampleTable('d_zoo.tab')
im = InteractionMatrix(t,save_data=0, pvalues = 1,iterative_scaling=0)
# interaction graph
f = open('zoo.dot','w')
im.exportGraph(f,significant_digits=3,pcutoff = 0.01,absolute_int=1000,best_attributes=100,widget_coloring=0,black_white=1)
f.close()
# interaction clustering
import orngCluster
(diss,labels) = im.exportDissimilarityMatrix(show_gains=0)
c = orngCluster.DHClustering(diss)
NCLUSTERS = 6
c.domapping(NCLUSTERS)
print "Clusters:"
for j in range(1,NCLUSTERS+1):
print "%d: "%j,
# print labels of that cluster
for i in range(len(labels)):
if c.mapping[i] == j:
print labels[i],
print
|
lazywei/scikit-learn | refs/heads/master | examples/ensemble/plot_voting_decision_regions.py | 230 | """
==================================================
Plot the decision boundaries of a VotingClassifier
==================================================
Plot the decision boundaries of a `VotingClassifier` for
two features of the Iris dataset.
Plot the class probabilities of the first sample in a toy dataset
predicted by three different classifiers and averaged by the
`VotingClassifier`.
First, three examplary classifiers are initialized (`DecisionTreeClassifier`,
`KNeighborsClassifier`, and `SVC`) and used to initialize a
soft-voting `VotingClassifier` with weights `[2, 1, 2]`, which means that
the predicted probabilities of the `DecisionTreeClassifier` and `SVC`
count 5 times as much as the weights of the `KNeighborsClassifier` classifier
when the averaged probability is calculated.
"""
print(__doc__)
from itertools import product
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.ensemble import VotingClassifier
# Loading some example data
iris = datasets.load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
# Training classifiers
clf1 = DecisionTreeClassifier(max_depth=4)
clf2 = KNeighborsClassifier(n_neighbors=7)
clf3 = SVC(kernel='rbf', probability=True)
eclf = VotingClassifier(estimators=[('dt', clf1), ('knn', clf2),
('svc', clf3)],
voting='soft', weights=[2, 1, 2])
clf1.fit(X, y)
clf2.fit(X, y)
clf3.fit(X, y)
eclf.fit(X, y)
# Plotting decision regions
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.1),
np.arange(y_min, y_max, 0.1))
f, axarr = plt.subplots(2, 2, sharex='col', sharey='row', figsize=(10, 8))
for idx, clf, tt in zip(product([0, 1], [0, 1]),
[clf1, clf2, clf3, eclf],
['Decision Tree (depth=4)', 'KNN (k=7)',
'Kernel SVM', 'Soft Voting']):
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
axarr[idx[0], idx[1]].contourf(xx, yy, Z, alpha=0.4)
axarr[idx[0], idx[1]].scatter(X[:, 0], X[:, 1], c=y, alpha=0.8)
axarr[idx[0], idx[1]].set_title(tt)
plt.show()
|
xen0l/ansible | refs/heads/devel | lib/ansible/plugins/connection/jail.py | 16 | # Based on local.py by Michael DeHaan <michael.dehaan@gmail.com>
# and chroot.py by Maykel Moya <mmoya@speedyrails.com>
# Copyright (c) 2013, Michael Scherer <misc@zarb.org>
# Copyright (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com>
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
author: Ansible Core Team
connection: jail
short_description: Run tasks in jails
description:
- Run commands or put/fetch files to an existing jail
version_added: "2.0"
options:
remote_addr:
description:
- Path to the jail
default: inventory_hostname
vars:
- name: ansible_host
- name: ansible_jail_host
remote_user:
description:
- User to execute as inside the jail
vars:
- name: ansible_user
- name: ansible_jail_user
"""
import distutils.spawn
import os
import os.path
import subprocess
import traceback
import ansible.constants as C
from ansible.errors import AnsibleError
from ansible.module_utils.six.moves import shlex_quote
from ansible.module_utils._text import to_bytes, to_native, to_text
from ansible.plugins.connection import ConnectionBase, BUFSIZE
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class Connection(ConnectionBase):
''' Local BSD Jail based connections '''
modified_jailname_key = 'conn_jail_name'
transport = 'jail'
# Pipelining may work. Someone needs to test by setting this to True and
# having pipelining=True in their ansible.cfg
has_pipelining = True
become_methods = frozenset(C.BECOME_METHODS)
def __init__(self, play_context, new_stdin, *args, **kwargs):
super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
self.jail = self._play_context.remote_addr
if self.modified_jailname_key in kwargs:
self.jail = kwargs[self.modified_jailname_key]
if os.geteuid() != 0:
raise AnsibleError("jail connection requires running as root")
self.jls_cmd = self._search_executable('jls')
self.jexec_cmd = self._search_executable('jexec')
if self.jail not in self.list_jails():
raise AnsibleError("incorrect jail name %s" % self.jail)
@staticmethod
def _search_executable(executable):
cmd = distutils.spawn.find_executable(executable)
if not cmd:
raise AnsibleError("%s command not found in PATH" % executable)
return cmd
def list_jails(self):
p = subprocess.Popen([self.jls_cmd, '-q', 'name'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
return to_text(stdout, errors='surrogate_or_strict').split()
def _connect(self):
''' connect to the jail; nothing to do here '''
super(Connection, self)._connect()
if not self._connected:
display.vvv(u"ESTABLISH JAIL CONNECTION FOR USER: {0}".format(self._play_context.remote_user), host=self.jail)
self._connected = True
def _buffered_exec_command(self, cmd, stdin=subprocess.PIPE):
''' run a command on the jail. This is only needed for implementing
put_file() get_file() so that we don't have to read the whole file
into memory.
compared to exec_command() it looses some niceties like being able to
return the process's exit code immediately.
'''
local_cmd = [self.jexec_cmd]
set_env = ''
if self._play_context.remote_user is not None:
local_cmd += ['-U', self._play_context.remote_user]
# update HOME since -U does not update the jail environment
set_env = 'HOME=~' + self._play_context.remote_user + ' '
local_cmd += [self.jail, self._play_context.executable, '-c', set_env + cmd]
display.vvv("EXEC %s" % (local_cmd,), host=self.jail)
local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
p = subprocess.Popen(local_cmd, shell=False, stdin=stdin,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
return p
def exec_command(self, cmd, in_data=None, sudoable=False):
''' run a command on the jail '''
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
p = self._buffered_exec_command(cmd)
stdout, stderr = p.communicate(in_data)
return (p.returncode, stdout, stderr)
def _prefix_login_path(self, remote_path):
''' Make sure that we put files into a standard path
If a path is relative, then we need to choose where to put it.
ssh chooses $HOME but we aren't guaranteed that a home dir will
exist in any given chroot. So for now we're choosing "/" instead.
This also happens to be the former default.
Can revisit using $HOME instead if it's a problem
'''
if not remote_path.startswith(os.path.sep):
remote_path = os.path.join(os.path.sep, remote_path)
return os.path.normpath(remote_path)
def put_file(self, in_path, out_path):
''' transfer a file from local to jail '''
super(Connection, self).put_file(in_path, out_path)
display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.jail)
out_path = shlex_quote(self._prefix_login_path(out_path))
try:
with open(to_bytes(in_path, errors='surrogate_or_strict'), 'rb') as in_file:
try:
p = self._buffered_exec_command('dd of=%s bs=%s' % (out_path, BUFSIZE), stdin=in_file)
except OSError:
raise AnsibleError("jail connection requires dd command in the jail")
try:
stdout, stderr = p.communicate()
except:
traceback.print_exc()
raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path))
if p.returncode != 0:
raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, to_native(stdout), to_native(stderr)))
except IOError:
raise AnsibleError("file or module does not exist at: %s" % in_path)
def fetch_file(self, in_path, out_path):
''' fetch a file from jail to local '''
super(Connection, self).fetch_file(in_path, out_path)
display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.jail)
in_path = shlex_quote(self._prefix_login_path(in_path))
try:
p = self._buffered_exec_command('dd if=%s bs=%s' % (in_path, BUFSIZE))
except OSError:
raise AnsibleError("jail connection requires dd command in the jail")
with open(to_bytes(out_path, errors='surrogate_or_strict'), 'wb+') as out_file:
try:
chunk = p.stdout.read(BUFSIZE)
while chunk:
out_file.write(chunk)
chunk = p.stdout.read(BUFSIZE)
except:
traceback.print_exc()
raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path))
stdout, stderr = p.communicate()
if p.returncode != 0:
raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, to_native(stdout), to_native(stderr)))
def close(self):
''' terminate the connection; nothing to do here '''
super(Connection, self).close()
self._connected = False
|
isandlaTech/cohorte-devtools | refs/heads/master | qualifier/deploy/cohorte-home/repo/sleekxmpp/plugins/xep_0086/stanza.py | 14 | """
SleekXMPP: The Sleek XMPP Library
Copyright (C) 2011 Nathanael C. Fritz, Lance J.T. Stout
This file is part of SleekXMPP.
See the file LICENSE for copying permission.
"""
from sleekxmpp.stanza import Error
from sleekxmpp.xmlstream import ElementBase, ET, register_stanza_plugin
class LegacyError(ElementBase):
"""
Older XMPP implementations used code based error messages, similar
to HTTP response codes. Since then, error condition elements have
been introduced. XEP-0086 provides a mapping between the new
condition elements and a combination of error types and the older
response codes.
Also see <http://xmpp.org/extensions/xep-0086.html>.
Example legacy error stanzas:
<error xmlns="jabber:client" code="501" type="cancel">
<feature-not-implemented
xmlns="urn:ietf:params:xml:ns:xmpp-stanzas" />
</error>
<error code="402" type="auth">
<payment-required
xmlns="urn:ietf:params:xml:ns:xmpp-stanzas" />
</error>
Attributes:
error_map -- A map of error conditions to error types and
code values.
Methods:
setup -- Overrides ElementBase.setup
set_condition -- Remap the type and code interfaces when a
condition is set.
"""
name = 'legacy'
namespace = Error.namespace
plugin_attrib = name
interfaces = set(('condition',))
overrides = ['set_condition']
error_map = {'bad-request': ('modify', '400'),
'conflict': ('cancel', '409'),
'feature-not-implemented': ('cancel', '501'),
'forbidden': ('auth', '403'),
'gone': ('modify', '302'),
'internal-server-error': ('wait', '500'),
'item-not-found': ('cancel', '404'),
'jid-malformed': ('modify', '400'),
'not-acceptable': ('modify', '406'),
'not-allowed': ('cancel', '405'),
'not-authorized': ('auth', '401'),
'payment-required': ('auth', '402'),
'recipient-unavailable': ('wait', '404'),
'redirect': ('modify', '302'),
'registration-required': ('auth', '407'),
'remote-server-not-found': ('cancel', '404'),
'remote-server-timeout': ('wait', '504'),
'resource-constraint': ('wait', '500'),
'service-unavailable': ('cancel', '503'),
'subscription-required': ('auth', '407'),
'undefined-condition': (None, '500'),
'unexpected-request': ('wait', '400')}
def setup(self, xml):
"""Don't create XML for the plugin."""
self.xml = ET.Element('')
def set_condition(self, value):
"""
Set the error type and code based on the given error
condition value.
Arguments:
value -- The new error condition.
"""
self.parent().set_condition(value)
error_data = self.error_map.get(value, None)
if error_data is not None:
if error_data[0] is not None:
self.parent()['type'] = error_data[0]
self.parent()['code'] = error_data[1]
|
zengenti/ansible | refs/heads/devel | lib/ansible/module_utils/_text.py | 62 | # This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c), Toshio Kuratomi <a.badger@gmail.com>, 2016
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
"""
.. warn:: This module_util is currently internal implementation.
We want to evaluate this code for stability and API suitability before
making backwards compatibility guarantees. The API may change between
releases. Do not use this unless you are willing to port your module code.
"""
import codecs
from ansible.module_utils.six import PY3, text_type, binary_type
try:
codecs.lookup_error('surrogateescape')
HAS_SURROGATEESCAPE = True
except LookupError:
HAS_SURROGATEESCAPE = False
_COMPOSED_ERROR_HANDLERS = frozenset((None, 'surrogate_or_escape',
'surrogate_or_strict',
'surrogate_then_replace'))
def to_bytes(obj, encoding='utf-8', errors=None, nonstring='simplerepr'):
"""Make sure that a string is a byte string
:arg obj: An object to make sure is a byte string. In most cases this
will be either a text string or a byte string. However, with
``nonstring='simplerepr'``, this can be used as a traceback-free
version of ``str(obj)``.
:kwarg encoding: The encoding to use to transform from a text string to
a byte string. Defaults to using 'utf-8'.
:kwarg errors: The error handler to use if the text string is not
encodable using the specified encoding. Any valid `codecs error
handler <https://docs.python.org/2/library/codecs.html#codec-base-classes>`_
may be specified. There are three additional error strategies
specifically aimed at helping people to port code. The first two are:
:surrogate_or_strict: Will use ``surrogateescape`` if it is a valid
handler, otherwise it will use ``strict``
:surrogate_or_replace: Will use ``surrogateescape`` if it is a valid
handler, otherwise it will use ``replace``.
Because ``surrogateescape`` was added in Python3 this usually means that
Python3 will use ``surrogateescape`` and Python2 will use the fallback
error handler. Note that the code checks for ``surrogateescape`` when the
module is imported. If you have a backport of ``surrogateescape`` for
Python2, be sure to register the error handler prior to importing this
module.
The last error handler is:
:surrogate_then_replace: Will use ``surrogateescape`` if it is a valid
handler. If encoding with ``surrogateescape`` would traceback,
surrogates are first replaced with a replacement characters
and then the string is encoded using ``replace`` (which replaces
the rest of the nonencodable bytes). If ``surrogateescape`` is
not present it will simply use ``replace``. (Added in Ansible 2.3)
This strategy is designed to never traceback when it attempts
to encode a string.
The default until Ansible-2.2 was ``surrogate_or_replace``
From Ansible-2.3 onwards, the default is ``surrogate_then_replace``.
:kwarg nonstring: The strategy to use if a nonstring is specified in
``obj``. Default is 'simplerepr'. Valid values are:
:simplerepr: The default. This takes the ``str`` of the object and
then returns the bytes version of that string.
:empty: Return an empty byte string
:passthru: Return the object passed in
:strict: Raise a :exc:`TypeError`
:returns: Typically this returns a byte string. If a nonstring object is
passed in this may be a different type depending on the strategy
specified by nonstring. This will never return a text string.
.. note:: If passed a byte string, this function does not check that the
string is valid in the specified encoding. If it's important that the
byte string is in the specified encoding do::
encoded_string = to_bytes(to_text(input_string, 'latin-1'), 'utf-8')
.. version_changed:: 2.3
Added the ``surrogate_then_replace`` error handler and made it the default error handler.
"""
if isinstance(obj, binary_type):
return obj
# We're given a text string
# If it has surrogates, we know because it will decode
original_errors = errors
if errors in _COMPOSED_ERROR_HANDLERS:
if HAS_SURROGATEESCAPE:
errors = 'surrogateescape'
elif errors == 'surrogate_or_strict':
errors = 'strict'
else:
errors = 'replace'
if isinstance(obj, text_type):
try:
# Try this first as it's the fastest
return obj.encode(encoding, errors)
except UnicodeEncodeError:
if original_errors in (None, 'surrogate_then_replace'):
# Slow but works
return_string = obj.encode('utf-8', 'surrogateescape')
return_string = return_string.decode('utf-8', 'replace')
return return_string.encode(encoding, 'replace')
raise
# Note: We do these last even though we have to call to_bytes again on the
# value because we're optimizing the common case
if nonstring == 'simplerepr':
try:
value = str(obj)
except UnicodeError:
try:
value = repr(obj)
except UnicodeError:
# Giving up
return to_bytes('')
elif nonstring == 'passthru':
return obj
elif nonstring == 'empty':
# python2.4 doesn't have b''
return to_bytes('')
elif nonstring == 'strict':
raise TypeError('obj must be a string type')
else:
raise TypeError('Invalid value %s for to_bytes\' nonstring parameter' % nonstring)
return to_bytes(value, encoding, errors)
def to_text(obj, encoding='utf-8', errors=None, nonstring='simplerepr'):
"""Make sure that a string is a text string
:arg obj: An object to make sure is a text string. In most cases this
will be either a text string or a byte string. However, with
``nonstring='simplerepr'``, this can be used as a traceback-free
version of ``str(obj)``.
:kwarg encoding: The encoding to use to transform from a byte string to
a text string. Defaults to using 'utf-8'.
:kwarg errors: The error handler to use if the byte string is not
decodable using the specified encoding. Any valid `codecs error
handler <https://docs.python.org/2/library/codecs.html#codec-base-classes>`_
may be specified. We support three additional error strategies
specifically aimed at helping people to port code:
:surrogate_or_strict: Will use surrogateescape if it is a valid
handler, otherwise it will use strict
:surrogate_or_replace: Will use surrogateescape if it is a valid
handler, otherwise it will use replace.
:surrogate_then_replace: Does the same as surrogate_or_replace but
`was added for symmetry with the error handlers in
:func:`ansible.module_utils._text.to_bytes` (Added in Ansible 2.3)
Because surrogateescape was added in Python3 this usually means that
Python3 will use `surrogateescape` and Python2 will use the fallback
error handler. Note that the code checks for surrogateescape when the
module is imported. If you have a backport of `surrogateescape` for
python2, be sure to register the error handler prior to importing this
module.
The default until Ansible-2.2 was `surrogate_or_replace`
In Ansible-2.3 this defaults to `surrogate_then_replace` for symmetry
with :func:`ansible.module_utils._text.to_bytes` .
:kwarg nonstring: The strategy to use if a nonstring is specified in
``obj``. Default is 'simplerepr'. Valid values are:
:simplerepr: The default. This takes the ``str`` of the object and
then returns the text version of that string.
:empty: Return an empty text string
:passthru: Return the object passed in
:strict: Raise a :exc:`TypeError`
:returns: Typically this returns a text string. If a nonstring object is
passed in this may be a different type depending on the strategy
specified by nonstring. This will never return a byte string.
From Ansible-2.3 onwards, the default is `surrogate_then_replace`.
.. version_changed:: 2.3
Added the surrogate_then_replace error handler and made it the default error handler.
"""
if isinstance(obj, text_type):
return obj
if errors in _COMPOSED_ERROR_HANDLERS:
if HAS_SURROGATEESCAPE:
errors = 'surrogateescape'
elif errors == 'surrogate_or_strict':
errors = 'strict'
else:
errors = 'replace'
if isinstance(obj, binary_type):
# Note: We don't need special handling for surrogate_then_replace
# because all bytes will either be made into surrogates or are valid
# to decode.
return obj.decode(encoding, errors)
# Note: We do these last even though we have to call to_text again on the
# value because we're optimizing the common case
if nonstring == 'simplerepr':
try:
value = str(obj)
except UnicodeError:
try:
value = repr(obj)
except UnicodeError:
# Giving up
return u''
elif nonstring == 'passthru':
return obj
elif nonstring == 'empty':
return u''
elif nonstring == 'strict':
raise TypeError('obj must be a string type')
else:
raise TypeError('Invalid value %s for to_text\'s nonstring parameter' % nonstring)
return to_text(value, encoding, errors)
#: :py:func:`to_native`
#: Transform a variable into the native str type for the python version
#:
#: On Python2, this is an alias for
#: :func:`~ansible.module_utils.to_bytes`. On Python3 it is an alias for
#: :func:`~ansible.module_utils.to_text`. It makes it easier to
#: transform a variable into the native str type for the python version
#: the code is running on. Use this when constructing the message to
#: send to exceptions or when dealing with an API that needs to take
#: a native string. Example::
#:
#: try:
#: 1//0
#: except ZeroDivisionError as e:
#: raise MyException('Encountered and error: %s' % to_native(e))
if PY3:
to_native = to_text
else:
to_native = to_bytes
|
zhukaixy/kbengine | refs/heads/master | kbe/res/scripts/common/Lib/idlelib/idle_test/htest.py | 59 | '''Run human tests of Idle's window, dialog, and popup widgets.
run(*tests)
Run each callable in tests after finding the matching test spec in this file.
If there are none, run an htest for each spec dict in this file after finding
the matching callable in the module named in the spec.
In a tested module, let X be a global name bound to a widget callable.
End the module with
if __name__ == '__main__':
<unittest, if there is one>
from idlelib.idle_test.htest import run
run(X)
The X object must have a .__name__ attribute and a 'parent' parameter.
X will often be a widget class, but a callable instance with .__name__
or a wrapper function also work. The name of wrapper functions, like
'_editor_window', should start with '_'.
This file must contain a matching instance of the following template,
with X.__name__ prepended, as in '_editor_window_spec ...'.
_spec = {
'file': '',
'kwds': {'title': ''},
'msg': ""
}
file (no .py): used in run() to import the file and get X.
kwds: passed to X (**kwds), after 'parent' is added, to initialize X.
title: an example; used for some widgets, delete if not.
msg: displayed in a master window. Hints as to how the user might
test the widget. Close the window to skip or end the test.
Modules not being tested at the moment:
PyShell.PyShellEditorWindow
Debugger.Debugger
AutoCompleteWindow.AutoCompleteWindow
OutputWindow.OutputWindow (indirectly being tested with grep test)
'''
from importlib import import_module
from idlelib.macosxSupport import _initializeTkVariantTests
import tkinter as tk
AboutDialog_spec = {
'file': 'aboutDialog',
'kwds': {'title': 'aboutDialog test',
'_htest': True,
},
'msg': "Test every button. Ensure Python, TK and IDLE versions "
"are correctly displayed.\n [Close] to exit.",
}
_calltip_window_spec = {
'file': 'CallTipWindow',
'kwds': {},
'msg': "Typing '(' should display a calltip.\n"
"Typing ') should hide the calltip.\n"
}
_class_browser_spec = {
'file': 'ClassBrowser',
'kwds': {},
'msg': "Inspect names of module, class(with superclass if "
"applicable), methods and functions.\nToggle nested items.\n"
"Double clicking on items prints a traceback for an exception "
"that is ignored."
}
_color_delegator_spec = {
'file': 'ColorDelegator',
'kwds': {},
'msg': "The text is sample Python code.\n"
"Ensure components like comments, keywords, builtins,\n"
"string, definitions, and break are correctly colored.\n"
"The default color scheme is in idlelib/config-highlight.def"
}
ConfigDialog_spec = {
'file': 'configDialog',
'kwds': {'title': 'Settings',
'_htest': True,},
'msg': "IDLE preferences dialog.\n"
"In the 'Fonts/Tabs' tab, changing font face, should update the "
"font face of the text in the area below it.\nIn the "
"'Highlighting' tab, try different color schemes. Clicking "
"items in the sample program should update the choices above it."
"\nIn the 'Keys' and 'General' tab, test settings of interest."
"\n[Ok] to close the dialog.[Apply] to apply the settings and "
"and [Cancel] to revert all changes.\nRe-run the test to ensure "
"changes made have persisted."
}
_dyn_option_menu_spec = {
'file': 'dynOptionMenuWidget',
'kwds': {},
'msg': "Select one of the many options in the 'old option set'.\n"
"Click the button to change the option set.\n"
"Select one of the many options in the 'new option set'."
}
_editor_window_spec = {
'file': 'EditorWindow',
'kwds': {},
'msg': "Test editor functions of interest."
}
GetCfgSectionNameDialog_spec = {
'file': 'configSectionNameDialog',
'kwds': {'title':'Get Name',
'message':'Enter something',
'used_names': {'abc'},
'_htest': True},
'msg': "After the text entered with [Ok] is stripped, <nothing>, "
"'abc', or more that 30 chars are errors.\n"
"Close 'Get Name' with a valid entry (printed to Shell), "
"[Cancel], or [X]",
}
GetHelpSourceDialog_spec = {
'file': 'configHelpSourceEdit',
'kwds': {'title': 'Get helpsource',
'_htest': True},
'msg': "Enter menu item name and help file path\n "
"<nothing> and more than 30 chars are invalid menu item names.\n"
"<nothing>, file does not exist are invalid path items.\n"
"Test for incomplete web address for help file path.\n"
"A valid entry will be printed to shell with [0k].\n"
"[Cancel] will print None to shell",
}
# Update once issue21519 is resolved.
GetKeysDialog_spec = {
'file': 'keybindingDialog',
'kwds': {'title': 'Test keybindings',
'action': 'find-again',
'currentKeySequences': [''] ,
'_htest': True,
},
'msg': "Test for different key modifier sequences.\n"
"<nothing> is invalid.\n"
"No modifier key is invalid.\n"
"Shift key with [a-z],[0-9], function key, move key, tab, space"
"is invalid.\nNo validitity checking if advanced key binding "
"entry is used."
}
_grep_dialog_spec = {
'file': 'GrepDialog',
'kwds': {},
'msg': "Click the 'Show GrepDialog' button.\n"
"Test the various 'Find-in-files' functions.\n"
"The results should be displayed in a new '*Output*' window.\n"
"'Right-click'->'Goto file/line' anywhere in the search results "
"should open that file \nin a new EditorWindow."
}
_help_dialog_spec = {
'file': 'EditorWindow',
'kwds': {},
'msg': "If the help text displays, this works.\n"
"Text is selectable. Window is scrollable."
}
_io_binding_spec = {
'file': 'IOBinding',
'kwds': {},
'msg': "Test the following bindings\n"
"<Control-o> to display open window from file dialog.\n"
"<Control-s> to save the file\n"
}
_multi_call_spec = {
'file': 'MultiCall',
'kwds': {},
'msg': "The following actions should trigger a print to console or IDLE"
" Shell.\nEntering and leaving the text area, key entry, "
"<Control-Key>,\n<Alt-Key-a>, <Control-Key-a>, "
"<Alt-Control-Key-a>, \n<Control-Button-1>, <Alt-Button-1> and "
"focusing out of the window\nare sequences to be tested."
}
_multistatus_bar_spec = {
'file': 'MultiStatusBar',
'kwds': {},
'msg': "Ensure presence of multi-status bar below text area.\n"
"Click 'Update Status' to change the multi-status text"
}
_object_browser_spec = {
'file': 'ObjectBrowser',
'kwds': {},
'msg': "Double click on items upto the lowest level.\n"
"Attributes of the objects and related information "
"will be displayed side-by-side at each level."
}
_path_browser_spec = {
'file': 'PathBrowser',
'kwds': {},
'msg': "Test for correct display of all paths in sys.path.\n"
"Toggle nested items upto the lowest level.\n"
"Double clicking on an item prints a traceback\n"
"for an exception that is ignored."
}
_percolator_spec = {
'file': 'Percolator',
'kwds': {},
'msg': "There are two tracers which can be toggled using a checkbox.\n"
"Toggling a tracer 'on' by checking it should print tracer"
"output to the console or to the IDLE shell.\n"
"If both the tracers are 'on', the output from the tracer which "
"was switched 'on' later, should be printed first\n"
"Test for actions like text entry, and removal."
}
_replace_dialog_spec = {
'file': 'ReplaceDialog',
'kwds': {},
'msg': "Click the 'Replace' button.\n"
"Test various replace options in the 'Replace dialog'.\n"
"Click [Close] or [X] to close the 'Replace Dialog'."
}
_search_dialog_spec = {
'file': 'SearchDialog',
'kwds': {},
'msg': "Click the 'Search' button.\n"
"Test various search options in the 'Search dialog'.\n"
"Click [Close] or [X] to close the 'Search Dialog'."
}
_scrolled_list_spec = {
'file': 'ScrolledList',
'kwds': {},
'msg': "You should see a scrollable list of items\n"
"Selecting (clicking) or double clicking an item "
"prints the name to the console or Idle shell.\n"
"Right clicking an item will display a popup."
}
_stack_viewer_spec = {
'file': 'StackViewer',
'kwds': {},
'msg': "A stacktrace for a NameError exception.\n"
"Expand 'idlelib ...' and '<locals>'.\n"
"Check that exc_value, exc_tb, and exc_type are correct.\n"
}
_tabbed_pages_spec = {
'file': 'tabbedpages',
'kwds': {},
'msg': "Toggle between the two tabs 'foo' and 'bar'\n"
"Add a tab by entering a suitable name for it.\n"
"Remove an existing tab by entering its name.\n"
"Remove all existing tabs.\n"
"<nothing> is an invalid add page and remove page name.\n"
}
TextViewer_spec = {
'file': 'textView',
'kwds': {'title': 'Test textView',
'text':'The quick brown fox jumps over the lazy dog.\n'*35,
'_htest': True},
'msg': "Test for read-only property of text.\n"
"Text is selectable. Window is scrollable.",
}
_tooltip_spec = {
'file': 'ToolTip',
'kwds': {},
'msg': "Place mouse cursor over both the buttons\n"
"A tooltip should appear with some text."
}
_tree_widget_spec = {
'file': 'TreeWidget',
'kwds': {},
'msg': "The canvas is scrollable.\n"
"Click on folders upto to the lowest level."
}
_undo_delegator_spec = {
'file': 'UndoDelegator',
'kwds': {},
'msg': "Click [Undo] to undo any action.\n"
"Click [Redo] to redo any action.\n"
"Click [Dump] to dump the current state "
"by printing to the console or the IDLE shell.\n"
}
_widget_redirector_spec = {
'file': 'WidgetRedirector',
'kwds': {},
'msg': "Every text insert should be printed to the console."
"or the IDLE shell."
}
def run(*tests):
root = tk.Tk()
root.title('IDLE htest')
root.resizable(0, 0)
_initializeTkVariantTests(root)
# a scrollable Label like constant width text widget.
frameLabel = tk.Frame(root, padx=10)
frameLabel.pack()
text = tk.Text(frameLabel, wrap='word')
text.configure(bg=root.cget('bg'), relief='flat', height=4, width=70)
scrollbar = tk.Scrollbar(frameLabel, command=text.yview)
text.config(yscrollcommand=scrollbar.set)
scrollbar.pack(side='right', fill='y', expand=False)
text.pack(side='left', fill='both', expand=True)
test_list = [] # List of tuples of the form (spec, callable widget)
if tests:
for test in tests:
test_spec = globals()[test.__name__ + '_spec']
test_spec['name'] = test.__name__
test_list.append((test_spec, test))
else:
for k, d in globals().items():
if k.endswith('_spec'):
test_name = k[:-5]
test_spec = d
test_spec['name'] = test_name
mod = import_module('idlelib.' + test_spec['file'])
test = getattr(mod, test_name)
test_list.append((test_spec, test))
test_name = tk.StringVar('')
callable_object = None
test_kwds = None
def next():
nonlocal test_name, callable_object, test_kwds
if len(test_list) == 1:
next_button.pack_forget()
test_spec, callable_object = test_list.pop()
test_kwds = test_spec['kwds']
test_kwds['parent'] = root
test_name.set('Test ' + test_spec['name'])
text.configure(state='normal') # enable text editing
text.delete('1.0','end')
text.insert("1.0",test_spec['msg'])
text.configure(state='disabled') # preserve read-only property
def run_test():
widget = callable_object(**test_kwds)
try:
print(widget.result)
except AttributeError:
pass
button = tk.Button(root, textvariable=test_name, command=run_test)
button.pack()
next_button = tk.Button(root, text="Next", command=next)
next_button.pack()
next()
root.mainloop()
if __name__ == '__main__':
run()
|
vipul-sharma20/oh-mainline | refs/heads/master | mysite/search/migrations/0037_questions_hold_notes_and_details_labels.py | 17 | # This file is part of OpenHatch.
# Copyright (C) 2010 OpenHatch, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from south.db import db
from django.db import models
from mysite.search.models import *
class Migration:
def forwards(self, orm):
# Adding field 'ProjectInvolvementQuestion.details_label'
db.add_column('search_projectinvolvementquestion', 'details_label', orm['search.projectinvolvementquestion:details_label'])
# Adding field 'ProjectInvolvementQuestion.note'
db.add_column('search_projectinvolvementquestion', 'note', orm['search.projectinvolvementquestion:note'])
def backwards(self, orm):
# Deleting field 'ProjectInvolvementQuestion.details_label'
db.delete_column('search_projectinvolvementquestion', 'details_label')
# Deleting field 'ProjectInvolvementQuestion.note'
db.delete_column('search_projectinvolvementquestion', 'note')
models = {
'auth.group': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)"},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'search.answer': {
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['search.Project']"}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'answers'", 'to': "orm['search.ProjectInvolvementQuestion']"}),
'text': ('django.db.models.fields.TextField', [], {})
},
'search.bug': {
'bize_size_tag_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'canonical_bug_link': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'concerns_just_documentation': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'date_reported': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.TextField', [], {}),
'good_for_newcomers': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'importance': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'last_polled': ('django.db.models.fields.DateTimeField', [], {}),
'last_touched': ('django.db.models.fields.DateTimeField', [], {}),
'looks_closed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'people_involved': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['search.Project']"}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'submitter_realname': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'submitter_username': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'search.buganswer': {
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'details': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['search.Project']"}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'bug_answers'", 'to': "orm['search.ProjectInvolvementQuestion']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'search.hitcountcache': {
'hashed_query': ('django.db.models.fields.CharField', [], {'max_length': '40', 'primary_key': 'True'}),
'hit_count': ('django.db.models.fields.IntegerField', [], {})
},
'search.project': {
'cached_contributor_count': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True'}),
'date_icon_was_fetched_from_ohloh': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'icon_for_profile': ('django.db.models.fields.files.ImageField', [], {'default': 'None', 'max_length': '100', 'null': 'True'}),
'icon_for_search_result': ('django.db.models.fields.files.ImageField', [], {'default': 'None', 'max_length': '100', 'null': 'True'}),
'icon_raw': ('django.db.models.fields.files.ImageField', [], {'default': 'None', 'max_length': '100', 'null': 'True'}),
'icon_smaller_for_badge': ('django.db.models.fields.files.ImageField', [], {'default': 'None', 'max_length': '100', 'null': 'True'}),
'icon_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'logo_contains_name': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'})
},
'search.projectinvolvementquestion': {
'details_label': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_bug_style': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'note': ('django.db.models.fields.TextField', [], {}),
'text': ('django.db.models.fields.TextField', [], {})
}
}
complete_apps = ['search']
|
endlessm/chromium-browser | refs/heads/master | third_party/catapult/third_party/gae_ts_mon/gae_ts_mon/protobuf/google/protobuf/internal/python_message.py | 35 | # Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# This code is meant to work on Python 2.4 and above only.
#
# TODO(robinson): Helpers for verbose, common checks like seeing if a
# descriptor's cpp_type is CPPTYPE_MESSAGE.
"""Contains a metaclass and helper functions used to create
protocol message classes from Descriptor objects at runtime.
Recall that a metaclass is the "type" of a class.
(A class is to a metaclass what an instance is to a class.)
In this case, we use the GeneratedProtocolMessageType metaclass
to inject all the useful functionality into the classes
output by the protocol compiler at compile-time.
The upshot of all this is that the real implementation
details for ALL pure-Python protocol buffers are *here in
this file*.
"""
__author__ = 'robinson@google.com (Will Robinson)'
from io import BytesIO
import struct
import sys
import weakref
import six
# We use "as" to avoid name collisions with variables.
from google.protobuf.internal import api_implementation
from google.protobuf.internal import containers
from google.protobuf.internal import decoder
from google.protobuf.internal import encoder
from google.protobuf.internal import enum_type_wrapper
from google.protobuf.internal import message_listener as message_listener_mod
from google.protobuf.internal import type_checkers
from google.protobuf.internal import well_known_types
from google.protobuf.internal import wire_format
from google.protobuf import descriptor as descriptor_mod
from google.protobuf import message as message_mod
from google.protobuf import text_format
_FieldDescriptor = descriptor_mod.FieldDescriptor
_AnyFullTypeName = 'google.protobuf.Any'
class GeneratedProtocolMessageType(type):
"""Metaclass for protocol message classes created at runtime from Descriptors.
We add implementations for all methods described in the Message class. We
also create properties to allow getting/setting all fields in the protocol
message. Finally, we create slots to prevent users from accidentally
"setting" nonexistent fields in the protocol message, which then wouldn't get
serialized / deserialized properly.
The protocol compiler currently uses this metaclass to create protocol
message classes at runtime. Clients can also manually create their own
classes at runtime, as in this example:
mydescriptor = Descriptor(.....)
factory = symbol_database.Default()
factory.pool.AddDescriptor(mydescriptor)
MyProtoClass = factory.GetPrototype(mydescriptor)
myproto_instance = MyProtoClass()
myproto.foo_field = 23
...
"""
# Must be consistent with the protocol-compiler code in
# proto2/compiler/internal/generator.*.
_DESCRIPTOR_KEY = 'DESCRIPTOR'
def __new__(cls, name, bases, dictionary):
"""Custom allocation for runtime-generated class types.
We override __new__ because this is apparently the only place
where we can meaningfully set __slots__ on the class we're creating(?).
(The interplay between metaclasses and slots is not very well-documented).
Args:
name: Name of the class (ignored, but required by the
metaclass protocol).
bases: Base classes of the class we're constructing.
(Should be message.Message). We ignore this field, but
it's required by the metaclass protocol
dictionary: The class dictionary of the class we're
constructing. dictionary[_DESCRIPTOR_KEY] must contain
a Descriptor object describing this protocol message
type.
Returns:
Newly-allocated class.
"""
descriptor = dictionary[GeneratedProtocolMessageType._DESCRIPTOR_KEY]
if descriptor.full_name in well_known_types.WKTBASES:
bases += (well_known_types.WKTBASES[descriptor.full_name],)
_AddClassAttributesForNestedExtensions(descriptor, dictionary)
_AddSlots(descriptor, dictionary)
superclass = super(GeneratedProtocolMessageType, cls)
new_class = superclass.__new__(cls, name, bases, dictionary)
return new_class
def __init__(cls, name, bases, dictionary):
"""Here we perform the majority of our work on the class.
We add enum getters, an __init__ method, implementations
of all Message methods, and properties for all fields
in the protocol type.
Args:
name: Name of the class (ignored, but required by the
metaclass protocol).
bases: Base classes of the class we're constructing.
(Should be message.Message). We ignore this field, but
it's required by the metaclass protocol
dictionary: The class dictionary of the class we're
constructing. dictionary[_DESCRIPTOR_KEY] must contain
a Descriptor object describing this protocol message
type.
"""
descriptor = dictionary[GeneratedProtocolMessageType._DESCRIPTOR_KEY]
cls._decoders_by_tag = {}
if (descriptor.has_options and
descriptor.GetOptions().message_set_wire_format):
cls._decoders_by_tag[decoder.MESSAGE_SET_ITEM_TAG] = (
decoder.MessageSetItemDecoder(descriptor), None)
# Attach stuff to each FieldDescriptor for quick lookup later on.
for field in descriptor.fields:
_AttachFieldHelpers(cls, field)
descriptor._concrete_class = cls # pylint: disable=protected-access
_AddEnumValues(descriptor, cls)
_AddInitMethod(descriptor, cls)
_AddPropertiesForFields(descriptor, cls)
_AddPropertiesForExtensions(descriptor, cls)
_AddStaticMethods(cls)
_AddMessageMethods(descriptor, cls)
_AddPrivateHelperMethods(descriptor, cls)
superclass = super(GeneratedProtocolMessageType, cls)
superclass.__init__(name, bases, dictionary)
# Stateless helpers for GeneratedProtocolMessageType below.
# Outside clients should not access these directly.
#
# I opted not to make any of these methods on the metaclass, to make it more
# clear that I'm not really using any state there and to keep clients from
# thinking that they have direct access to these construction helpers.
def _PropertyName(proto_field_name):
"""Returns the name of the public property attribute which
clients can use to get and (in some cases) set the value
of a protocol message field.
Args:
proto_field_name: The protocol message field name, exactly
as it appears (or would appear) in a .proto file.
"""
# TODO(robinson): Escape Python keywords (e.g., yield), and test this support.
# nnorwitz makes my day by writing:
# """
# FYI. See the keyword module in the stdlib. This could be as simple as:
#
# if keyword.iskeyword(proto_field_name):
# return proto_field_name + "_"
# return proto_field_name
# """
# Kenton says: The above is a BAD IDEA. People rely on being able to use
# getattr() and setattr() to reflectively manipulate field values. If we
# rename the properties, then every such user has to also make sure to apply
# the same transformation. Note that currently if you name a field "yield",
# you can still access it just fine using getattr/setattr -- it's not even
# that cumbersome to do so.
# TODO(kenton): Remove this method entirely if/when everyone agrees with my
# position.
return proto_field_name
def _VerifyExtensionHandle(message, extension_handle):
"""Verify that the given extension handle is valid."""
if not isinstance(extension_handle, _FieldDescriptor):
raise KeyError('HasExtension() expects an extension handle, got: %s' %
extension_handle)
if not extension_handle.is_extension:
raise KeyError('"%s" is not an extension.' % extension_handle.full_name)
if not extension_handle.containing_type:
raise KeyError('"%s" is missing a containing_type.'
% extension_handle.full_name)
if extension_handle.containing_type is not message.DESCRIPTOR:
raise KeyError('Extension "%s" extends message type "%s", but this '
'message is of type "%s".' %
(extension_handle.full_name,
extension_handle.containing_type.full_name,
message.DESCRIPTOR.full_name))
def _AddSlots(message_descriptor, dictionary):
"""Adds a __slots__ entry to dictionary, containing the names of all valid
attributes for this message type.
Args:
message_descriptor: A Descriptor instance describing this message type.
dictionary: Class dictionary to which we'll add a '__slots__' entry.
"""
dictionary['__slots__'] = ['_cached_byte_size',
'_cached_byte_size_dirty',
'_fields',
'_unknown_fields',
'_is_present_in_parent',
'_listener',
'_listener_for_children',
'__weakref__',
'_oneofs']
def _IsMessageSetExtension(field):
return (field.is_extension and
field.containing_type.has_options and
field.containing_type.GetOptions().message_set_wire_format and
field.type == _FieldDescriptor.TYPE_MESSAGE and
field.label == _FieldDescriptor.LABEL_OPTIONAL)
def _IsMapField(field):
return (field.type == _FieldDescriptor.TYPE_MESSAGE and
field.message_type.has_options and
field.message_type.GetOptions().map_entry)
def _IsMessageMapField(field):
value_type = field.message_type.fields_by_name["value"]
return value_type.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE
def _AttachFieldHelpers(cls, field_descriptor):
is_repeated = (field_descriptor.label == _FieldDescriptor.LABEL_REPEATED)
is_packable = (is_repeated and
wire_format.IsTypePackable(field_descriptor.type))
if not is_packable:
is_packed = False
elif field_descriptor.containing_type.syntax == "proto2":
is_packed = (field_descriptor.has_options and
field_descriptor.GetOptions().packed)
else:
has_packed_false = (field_descriptor.has_options and
field_descriptor.GetOptions().HasField("packed") and
field_descriptor.GetOptions().packed == False)
is_packed = not has_packed_false
is_map_entry = _IsMapField(field_descriptor)
if is_map_entry:
field_encoder = encoder.MapEncoder(field_descriptor)
sizer = encoder.MapSizer(field_descriptor,
_IsMessageMapField(field_descriptor))
elif _IsMessageSetExtension(field_descriptor):
field_encoder = encoder.MessageSetItemEncoder(field_descriptor.number)
sizer = encoder.MessageSetItemSizer(field_descriptor.number)
else:
field_encoder = type_checkers.TYPE_TO_ENCODER[field_descriptor.type](
field_descriptor.number, is_repeated, is_packed)
sizer = type_checkers.TYPE_TO_SIZER[field_descriptor.type](
field_descriptor.number, is_repeated, is_packed)
field_descriptor._encoder = field_encoder
field_descriptor._sizer = sizer
field_descriptor._default_constructor = _DefaultValueConstructorForField(
field_descriptor)
def AddDecoder(wiretype, is_packed):
tag_bytes = encoder.TagBytes(field_descriptor.number, wiretype)
decode_type = field_descriptor.type
if (decode_type == _FieldDescriptor.TYPE_ENUM and
type_checkers.SupportsOpenEnums(field_descriptor)):
decode_type = _FieldDescriptor.TYPE_INT32
oneof_descriptor = None
if field_descriptor.containing_oneof is not None:
oneof_descriptor = field_descriptor
if is_map_entry:
is_message_map = _IsMessageMapField(field_descriptor)
field_decoder = decoder.MapDecoder(
field_descriptor, _GetInitializeDefaultForMap(field_descriptor),
is_message_map)
else:
field_decoder = type_checkers.TYPE_TO_DECODER[decode_type](
field_descriptor.number, is_repeated, is_packed,
field_descriptor, field_descriptor._default_constructor)
cls._decoders_by_tag[tag_bytes] = (field_decoder, oneof_descriptor)
AddDecoder(type_checkers.FIELD_TYPE_TO_WIRE_TYPE[field_descriptor.type],
False)
if is_repeated and wire_format.IsTypePackable(field_descriptor.type):
# To support wire compatibility of adding packed = true, add a decoder for
# packed values regardless of the field's options.
AddDecoder(wire_format.WIRETYPE_LENGTH_DELIMITED, True)
def _AddClassAttributesForNestedExtensions(descriptor, dictionary):
extension_dict = descriptor.extensions_by_name
for extension_name, extension_field in extension_dict.items():
assert extension_name not in dictionary
dictionary[extension_name] = extension_field
def _AddEnumValues(descriptor, cls):
"""Sets class-level attributes for all enum fields defined in this message.
Also exporting a class-level object that can name enum values.
Args:
descriptor: Descriptor object for this message type.
cls: Class we're constructing for this message type.
"""
for enum_type in descriptor.enum_types:
setattr(cls, enum_type.name, enum_type_wrapper.EnumTypeWrapper(enum_type))
for enum_value in enum_type.values:
setattr(cls, enum_value.name, enum_value.number)
def _GetInitializeDefaultForMap(field):
if field.label != _FieldDescriptor.LABEL_REPEATED:
raise ValueError('map_entry set on non-repeated field %s' % (
field.name))
fields_by_name = field.message_type.fields_by_name
key_checker = type_checkers.GetTypeChecker(fields_by_name['key'])
value_field = fields_by_name['value']
if _IsMessageMapField(field):
def MakeMessageMapDefault(message):
return containers.MessageMap(
message._listener_for_children, value_field.message_type, key_checker,
field.message_type)
return MakeMessageMapDefault
else:
value_checker = type_checkers.GetTypeChecker(value_field)
def MakePrimitiveMapDefault(message):
return containers.ScalarMap(
message._listener_for_children, key_checker, value_checker,
field.message_type)
return MakePrimitiveMapDefault
def _DefaultValueConstructorForField(field):
"""Returns a function which returns a default value for a field.
Args:
field: FieldDescriptor object for this field.
The returned function has one argument:
message: Message instance containing this field, or a weakref proxy
of same.
That function in turn returns a default value for this field. The default
value may refer back to |message| via a weak reference.
"""
if _IsMapField(field):
return _GetInitializeDefaultForMap(field)
if field.label == _FieldDescriptor.LABEL_REPEATED:
if field.has_default_value and field.default_value != []:
raise ValueError('Repeated field default value not empty list: %s' % (
field.default_value))
if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
# We can't look at _concrete_class yet since it might not have
# been set. (Depends on order in which we initialize the classes).
message_type = field.message_type
def MakeRepeatedMessageDefault(message):
return containers.RepeatedCompositeFieldContainer(
message._listener_for_children, field.message_type)
return MakeRepeatedMessageDefault
else:
type_checker = type_checkers.GetTypeChecker(field)
def MakeRepeatedScalarDefault(message):
return containers.RepeatedScalarFieldContainer(
message._listener_for_children, type_checker)
return MakeRepeatedScalarDefault
if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
# _concrete_class may not yet be initialized.
message_type = field.message_type
def MakeSubMessageDefault(message):
result = message_type._concrete_class()
result._SetListener(
_OneofListener(message, field)
if field.containing_oneof is not None
else message._listener_for_children)
return result
return MakeSubMessageDefault
def MakeScalarDefault(message):
# TODO(protobuf-team): This may be broken since there may not be
# default_value. Combine with has_default_value somehow.
return field.default_value
return MakeScalarDefault
def _ReraiseTypeErrorWithFieldName(message_name, field_name):
"""Re-raise the currently-handled TypeError with the field name added."""
exc = sys.exc_info()[1]
if len(exc.args) == 1 and type(exc) is TypeError:
# simple TypeError; add field name to exception message
exc = TypeError('%s for field %s.%s' % (str(exc), message_name, field_name))
# re-raise possibly-amended exception with original traceback:
six.reraise(type(exc), exc, sys.exc_info()[2])
def _AddInitMethod(message_descriptor, cls):
"""Adds an __init__ method to cls."""
def _GetIntegerEnumValue(enum_type, value):
"""Convert a string or integer enum value to an integer.
If the value is a string, it is converted to the enum value in
enum_type with the same name. If the value is not a string, it's
returned as-is. (No conversion or bounds-checking is done.)
"""
if isinstance(value, six.string_types):
try:
return enum_type.values_by_name[value].number
except KeyError:
raise ValueError('Enum type %s: unknown label "%s"' % (
enum_type.full_name, value))
return value
def init(self, **kwargs):
self._cached_byte_size = 0
self._cached_byte_size_dirty = len(kwargs) > 0
self._fields = {}
# Contains a mapping from oneof field descriptors to the descriptor
# of the currently set field in that oneof field.
self._oneofs = {}
# _unknown_fields is () when empty for efficiency, and will be turned into
# a list if fields are added.
self._unknown_fields = ()
self._is_present_in_parent = False
self._listener = message_listener_mod.NullMessageListener()
self._listener_for_children = _Listener(self)
for field_name, field_value in kwargs.items():
field = _GetFieldByName(message_descriptor, field_name)
if field is None:
raise TypeError("%s() got an unexpected keyword argument '%s'" %
(message_descriptor.name, field_name))
if field_value is None:
# field=None is the same as no field at all.
continue
if field.label == _FieldDescriptor.LABEL_REPEATED:
copy = field._default_constructor(self)
if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: # Composite
if _IsMapField(field):
if _IsMessageMapField(field):
for key in field_value:
copy[key].MergeFrom(field_value[key])
else:
copy.update(field_value)
else:
for val in field_value:
if isinstance(val, dict):
copy.add(**val)
else:
copy.add().MergeFrom(val)
else: # Scalar
if field.cpp_type == _FieldDescriptor.CPPTYPE_ENUM:
field_value = [_GetIntegerEnumValue(field.enum_type, val)
for val in field_value]
copy.extend(field_value)
self._fields[field] = copy
elif field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
copy = field._default_constructor(self)
new_val = field_value
if isinstance(field_value, dict):
new_val = field.message_type._concrete_class(**field_value)
try:
copy.MergeFrom(new_val)
except TypeError:
_ReraiseTypeErrorWithFieldName(message_descriptor.name, field_name)
self._fields[field] = copy
else:
if field.cpp_type == _FieldDescriptor.CPPTYPE_ENUM:
field_value = _GetIntegerEnumValue(field.enum_type, field_value)
try:
setattr(self, field_name, field_value)
except TypeError:
_ReraiseTypeErrorWithFieldName(message_descriptor.name, field_name)
init.__module__ = None
init.__doc__ = None
cls.__init__ = init
def _GetFieldByName(message_descriptor, field_name):
"""Returns a field descriptor by field name.
Args:
message_descriptor: A Descriptor describing all fields in message.
field_name: The name of the field to retrieve.
Returns:
The field descriptor associated with the field name.
"""
try:
return message_descriptor.fields_by_name[field_name]
except KeyError:
raise ValueError('Protocol message %s has no "%s" field.' %
(message_descriptor.name, field_name))
def _AddPropertiesForFields(descriptor, cls):
"""Adds properties for all fields in this protocol message type."""
for field in descriptor.fields:
_AddPropertiesForField(field, cls)
if descriptor.is_extendable:
# _ExtensionDict is just an adaptor with no state so we allocate a new one
# every time it is accessed.
cls.Extensions = property(lambda self: _ExtensionDict(self))
def _AddPropertiesForField(field, cls):
"""Adds a public property for a protocol message field.
Clients can use this property to get and (in the case
of non-repeated scalar fields) directly set the value
of a protocol message field.
Args:
field: A FieldDescriptor for this field.
cls: The class we're constructing.
"""
# Catch it if we add other types that we should
# handle specially here.
assert _FieldDescriptor.MAX_CPPTYPE == 10
constant_name = field.name.upper() + "_FIELD_NUMBER"
setattr(cls, constant_name, field.number)
if field.label == _FieldDescriptor.LABEL_REPEATED:
_AddPropertiesForRepeatedField(field, cls)
elif field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
_AddPropertiesForNonRepeatedCompositeField(field, cls)
else:
_AddPropertiesForNonRepeatedScalarField(field, cls)
def _AddPropertiesForRepeatedField(field, cls):
"""Adds a public property for a "repeated" protocol message field. Clients
can use this property to get the value of the field, which will be either a
_RepeatedScalarFieldContainer or _RepeatedCompositeFieldContainer (see
below).
Note that when clients add values to these containers, we perform
type-checking in the case of repeated scalar fields, and we also set any
necessary "has" bits as a side-effect.
Args:
field: A FieldDescriptor for this field.
cls: The class we're constructing.
"""
proto_field_name = field.name
property_name = _PropertyName(proto_field_name)
def getter(self):
field_value = self._fields.get(field)
if field_value is None:
# Construct a new object to represent this field.
field_value = field._default_constructor(self)
# Atomically check if another thread has preempted us and, if not, swap
# in the new object we just created. If someone has preempted us, we
# take that object and discard ours.
# WARNING: We are relying on setdefault() being atomic. This is true
# in CPython but we haven't investigated others. This warning appears
# in several other locations in this file.
field_value = self._fields.setdefault(field, field_value)
return field_value
getter.__module__ = None
getter.__doc__ = 'Getter for %s.' % proto_field_name
# We define a setter just so we can throw an exception with a more
# helpful error message.
def setter(self, new_value):
raise AttributeError('Assignment not allowed to repeated field '
'"%s" in protocol message object.' % proto_field_name)
doc = 'Magic attribute generated for "%s" proto field.' % proto_field_name
setattr(cls, property_name, property(getter, setter, doc=doc))
def _AddPropertiesForNonRepeatedScalarField(field, cls):
"""Adds a public property for a nonrepeated, scalar protocol message field.
Clients can use this property to get and directly set the value of the field.
Note that when the client sets the value of a field by using this property,
all necessary "has" bits are set as a side-effect, and we also perform
type-checking.
Args:
field: A FieldDescriptor for this field.
cls: The class we're constructing.
"""
proto_field_name = field.name
property_name = _PropertyName(proto_field_name)
type_checker = type_checkers.GetTypeChecker(field)
default_value = field.default_value
valid_values = set()
is_proto3 = field.containing_type.syntax == "proto3"
def getter(self):
# TODO(protobuf-team): This may be broken since there may not be
# default_value. Combine with has_default_value somehow.
return self._fields.get(field, default_value)
getter.__module__ = None
getter.__doc__ = 'Getter for %s.' % proto_field_name
clear_when_set_to_default = is_proto3 and not field.containing_oneof
def field_setter(self, new_value):
# pylint: disable=protected-access
# Testing the value for truthiness captures all of the proto3 defaults
# (0, 0.0, enum 0, and False).
new_value = type_checker.CheckValue(new_value)
if clear_when_set_to_default and not new_value:
self._fields.pop(field, None)
else:
self._fields[field] = new_value
# Check _cached_byte_size_dirty inline to improve performance, since scalar
# setters are called frequently.
if not self._cached_byte_size_dirty:
self._Modified()
if field.containing_oneof:
def setter(self, new_value):
field_setter(self, new_value)
self._UpdateOneofState(field)
else:
setter = field_setter
setter.__module__ = None
setter.__doc__ = 'Setter for %s.' % proto_field_name
# Add a property to encapsulate the getter/setter.
doc = 'Magic attribute generated for "%s" proto field.' % proto_field_name
setattr(cls, property_name, property(getter, setter, doc=doc))
def _AddPropertiesForNonRepeatedCompositeField(field, cls):
"""Adds a public property for a nonrepeated, composite protocol message field.
A composite field is a "group" or "message" field.
Clients can use this property to get the value of the field, but cannot
assign to the property directly.
Args:
field: A FieldDescriptor for this field.
cls: The class we're constructing.
"""
# TODO(robinson): Remove duplication with similar method
# for non-repeated scalars.
proto_field_name = field.name
property_name = _PropertyName(proto_field_name)
def getter(self):
field_value = self._fields.get(field)
if field_value is None:
# Construct a new object to represent this field.
field_value = field._default_constructor(self)
# Atomically check if another thread has preempted us and, if not, swap
# in the new object we just created. If someone has preempted us, we
# take that object and discard ours.
# WARNING: We are relying on setdefault() being atomic. This is true
# in CPython but we haven't investigated others. This warning appears
# in several other locations in this file.
field_value = self._fields.setdefault(field, field_value)
return field_value
getter.__module__ = None
getter.__doc__ = 'Getter for %s.' % proto_field_name
# We define a setter just so we can throw an exception with a more
# helpful error message.
def setter(self, new_value):
raise AttributeError('Assignment not allowed to composite field '
'"%s" in protocol message object.' % proto_field_name)
# Add a property to encapsulate the getter.
doc = 'Magic attribute generated for "%s" proto field.' % proto_field_name
setattr(cls, property_name, property(getter, setter, doc=doc))
def _AddPropertiesForExtensions(descriptor, cls):
"""Adds properties for all fields in this protocol message type."""
extension_dict = descriptor.extensions_by_name
for extension_name, extension_field in extension_dict.items():
constant_name = extension_name.upper() + "_FIELD_NUMBER"
setattr(cls, constant_name, extension_field.number)
# TODO(amauryfa): Migrate all users of these attributes to functions like
# pool.FindExtensionByNumber(descriptor).
if descriptor.file is not None:
# TODO(amauryfa): Use cls.MESSAGE_FACTORY.pool when available.
pool = descriptor.file.pool
cls._extensions_by_number = pool._extensions_by_number[descriptor]
cls._extensions_by_name = pool._extensions_by_name[descriptor]
def _AddStaticMethods(cls):
# TODO(robinson): This probably needs to be thread-safe(?)
def RegisterExtension(extension_handle):
extension_handle.containing_type = cls.DESCRIPTOR
# TODO(amauryfa): Use cls.MESSAGE_FACTORY.pool when available.
cls.DESCRIPTOR.file.pool.AddExtensionDescriptor(extension_handle)
_AttachFieldHelpers(cls, extension_handle)
cls.RegisterExtension = staticmethod(RegisterExtension)
def FromString(s):
message = cls()
message.MergeFromString(s)
return message
cls.FromString = staticmethod(FromString)
def _IsPresent(item):
"""Given a (FieldDescriptor, value) tuple from _fields, return true if the
value should be included in the list returned by ListFields()."""
if item[0].label == _FieldDescriptor.LABEL_REPEATED:
return bool(item[1])
elif item[0].cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
return item[1]._is_present_in_parent
else:
return True
def _AddListFieldsMethod(message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
def ListFields(self):
all_fields = [item for item in self._fields.items() if _IsPresent(item)]
all_fields.sort(key = lambda item: item[0].number)
return all_fields
cls.ListFields = ListFields
_Proto3HasError = 'Protocol message has no non-repeated submessage field "%s"'
_Proto2HasError = 'Protocol message has no non-repeated field "%s"'
def _AddHasFieldMethod(message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
is_proto3 = (message_descriptor.syntax == "proto3")
error_msg = _Proto3HasError if is_proto3 else _Proto2HasError
hassable_fields = {}
for field in message_descriptor.fields:
if field.label == _FieldDescriptor.LABEL_REPEATED:
continue
# For proto3, only submessages and fields inside a oneof have presence.
if (is_proto3 and field.cpp_type != _FieldDescriptor.CPPTYPE_MESSAGE and
not field.containing_oneof):
continue
hassable_fields[field.name] = field
if not is_proto3:
# Fields inside oneofs are never repeated (enforced by the compiler).
for oneof in message_descriptor.oneofs:
hassable_fields[oneof.name] = oneof
def HasField(self, field_name):
try:
field = hassable_fields[field_name]
except KeyError:
raise ValueError(error_msg % field_name)
if isinstance(field, descriptor_mod.OneofDescriptor):
try:
return HasField(self, self._oneofs[field].name)
except KeyError:
return False
else:
if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
value = self._fields.get(field)
return value is not None and value._is_present_in_parent
else:
return field in self._fields
cls.HasField = HasField
def _AddClearFieldMethod(message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
def ClearField(self, field_name):
try:
field = message_descriptor.fields_by_name[field_name]
except KeyError:
try:
field = message_descriptor.oneofs_by_name[field_name]
if field in self._oneofs:
field = self._oneofs[field]
else:
return
except KeyError:
raise ValueError('Protocol message %s() has no "%s" field.' %
(message_descriptor.name, field_name))
if field in self._fields:
# To match the C++ implementation, we need to invalidate iterators
# for map fields when ClearField() happens.
if hasattr(self._fields[field], 'InvalidateIterators'):
self._fields[field].InvalidateIterators()
# Note: If the field is a sub-message, its listener will still point
# at us. That's fine, because the worst than can happen is that it
# will call _Modified() and invalidate our byte size. Big deal.
del self._fields[field]
if self._oneofs.get(field.containing_oneof, None) is field:
del self._oneofs[field.containing_oneof]
# Always call _Modified() -- even if nothing was changed, this is
# a mutating method, and thus calling it should cause the field to become
# present in the parent message.
self._Modified()
cls.ClearField = ClearField
def _AddClearExtensionMethod(cls):
"""Helper for _AddMessageMethods()."""
def ClearExtension(self, extension_handle):
_VerifyExtensionHandle(self, extension_handle)
# Similar to ClearField(), above.
if extension_handle in self._fields:
del self._fields[extension_handle]
self._Modified()
cls.ClearExtension = ClearExtension
def _AddHasExtensionMethod(cls):
"""Helper for _AddMessageMethods()."""
def HasExtension(self, extension_handle):
_VerifyExtensionHandle(self, extension_handle)
if extension_handle.label == _FieldDescriptor.LABEL_REPEATED:
raise KeyError('"%s" is repeated.' % extension_handle.full_name)
if extension_handle.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
value = self._fields.get(extension_handle)
return value is not None and value._is_present_in_parent
else:
return extension_handle in self._fields
cls.HasExtension = HasExtension
def _InternalUnpackAny(msg):
"""Unpacks Any message and returns the unpacked message.
This internal method is different from public Any Unpack method which takes
the target message as argument. _InternalUnpackAny method does not have
target message type and need to find the message type in descriptor pool.
Args:
msg: An Any message to be unpacked.
Returns:
The unpacked message.
"""
# TODO(amauryfa): Don't use the factory of generated messages.
# To make Any work with custom factories, use the message factory of the
# parent message.
# pylint: disable=g-import-not-at-top
from google.protobuf import symbol_database
factory = symbol_database.Default()
type_url = msg.type_url
if not type_url:
return None
# TODO(haberman): For now we just strip the hostname. Better logic will be
# required.
type_name = type_url.split('/')[-1]
descriptor = factory.pool.FindMessageTypeByName(type_name)
if descriptor is None:
return None
message_class = factory.GetPrototype(descriptor)
message = message_class()
message.ParseFromString(msg.value)
return message
def _AddEqualsMethod(message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
def __eq__(self, other):
if (not isinstance(other, message_mod.Message) or
other.DESCRIPTOR != self.DESCRIPTOR):
return False
if self is other:
return True
if self.DESCRIPTOR.full_name == _AnyFullTypeName:
any_a = _InternalUnpackAny(self)
any_b = _InternalUnpackAny(other)
if any_a and any_b:
return any_a == any_b
if not self.ListFields() == other.ListFields():
return False
# Sort unknown fields because their order shouldn't affect equality test.
unknown_fields = list(self._unknown_fields)
unknown_fields.sort()
other_unknown_fields = list(other._unknown_fields)
other_unknown_fields.sort()
return unknown_fields == other_unknown_fields
cls.__eq__ = __eq__
def _AddStrMethod(message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
def __str__(self):
return text_format.MessageToString(self)
cls.__str__ = __str__
def _AddReprMethod(message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
def __repr__(self):
return text_format.MessageToString(self)
cls.__repr__ = __repr__
def _AddUnicodeMethod(unused_message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
def __unicode__(self):
return text_format.MessageToString(self, as_utf8=True).decode('utf-8')
cls.__unicode__ = __unicode__
def _BytesForNonRepeatedElement(value, field_number, field_type):
"""Returns the number of bytes needed to serialize a non-repeated element.
The returned byte count includes space for tag information and any
other additional space associated with serializing value.
Args:
value: Value we're serializing.
field_number: Field number of this value. (Since the field number
is stored as part of a varint-encoded tag, this has an impact
on the total bytes required to serialize the value).
field_type: The type of the field. One of the TYPE_* constants
within FieldDescriptor.
"""
try:
fn = type_checkers.TYPE_TO_BYTE_SIZE_FN[field_type]
return fn(field_number, value)
except KeyError:
raise message_mod.EncodeError('Unrecognized field type: %d' % field_type)
def _AddByteSizeMethod(message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
def ByteSize(self):
if not self._cached_byte_size_dirty:
return self._cached_byte_size
size = 0
descriptor = self.DESCRIPTOR
if descriptor.GetOptions().map_entry:
# Fields of map entry should always be serialized.
size = descriptor.fields_by_name['key']._sizer(self.key)
size += descriptor.fields_by_name['value']._sizer(self.value)
else:
for field_descriptor, field_value in self.ListFields():
size += field_descriptor._sizer(field_value)
for tag_bytes, value_bytes in self._unknown_fields:
size += len(tag_bytes) + len(value_bytes)
self._cached_byte_size = size
self._cached_byte_size_dirty = False
self._listener_for_children.dirty = False
return size
cls.ByteSize = ByteSize
def _AddSerializeToStringMethod(message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
def SerializeToString(self, **kwargs):
# Check if the message has all of its required fields set.
errors = []
if not self.IsInitialized():
raise message_mod.EncodeError(
'Message %s is missing required fields: %s' % (
self.DESCRIPTOR.full_name, ','.join(self.FindInitializationErrors())))
return self.SerializePartialToString(**kwargs)
cls.SerializeToString = SerializeToString
def _AddSerializePartialToStringMethod(message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
def SerializePartialToString(self, **kwargs):
out = BytesIO()
self._InternalSerialize(out.write, **kwargs)
return out.getvalue()
cls.SerializePartialToString = SerializePartialToString
def InternalSerialize(self, write_bytes, deterministic=None):
if deterministic is None:
deterministic = (
api_implementation.IsPythonDefaultSerializationDeterministic())
else:
deterministic = bool(deterministic)
descriptor = self.DESCRIPTOR
if descriptor.GetOptions().map_entry:
# Fields of map entry should always be serialized.
descriptor.fields_by_name['key']._encoder(
write_bytes, self.key, deterministic)
descriptor.fields_by_name['value']._encoder(
write_bytes, self.value, deterministic)
else:
for field_descriptor, field_value in self.ListFields():
field_descriptor._encoder(write_bytes, field_value, deterministic)
for tag_bytes, value_bytes in self._unknown_fields:
write_bytes(tag_bytes)
write_bytes(value_bytes)
cls._InternalSerialize = InternalSerialize
def _AddMergeFromStringMethod(message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
def MergeFromString(self, serialized):
length = len(serialized)
try:
if self._InternalParse(serialized, 0, length) != length:
# The only reason _InternalParse would return early is if it
# encountered an end-group tag.
raise message_mod.DecodeError('Unexpected end-group tag.')
except (IndexError, TypeError):
# Now ord(buf[p:p+1]) == ord('') gets TypeError.
raise message_mod.DecodeError('Truncated message.')
except struct.error as e:
raise message_mod.DecodeError(e)
return length # Return this for legacy reasons.
cls.MergeFromString = MergeFromString
local_ReadTag = decoder.ReadTag
local_SkipField = decoder.SkipField
decoders_by_tag = cls._decoders_by_tag
is_proto3 = message_descriptor.syntax == "proto3"
def InternalParse(self, buffer, pos, end):
self._Modified()
field_dict = self._fields
unknown_field_list = self._unknown_fields
while pos != end:
(tag_bytes, new_pos) = local_ReadTag(buffer, pos)
field_decoder, field_desc = decoders_by_tag.get(tag_bytes, (None, None))
if field_decoder is None:
value_start_pos = new_pos
new_pos = local_SkipField(buffer, new_pos, end, tag_bytes)
if new_pos == -1:
return pos
if (not is_proto3 or
api_implementation.GetPythonProto3PreserveUnknownsDefault()):
if not unknown_field_list:
unknown_field_list = self._unknown_fields = []
unknown_field_list.append(
(tag_bytes, buffer[value_start_pos:new_pos]))
pos = new_pos
else:
pos = field_decoder(buffer, new_pos, end, self, field_dict)
if field_desc:
self._UpdateOneofState(field_desc)
return pos
cls._InternalParse = InternalParse
def _AddIsInitializedMethod(message_descriptor, cls):
"""Adds the IsInitialized and FindInitializationError methods to the
protocol message class."""
required_fields = [field for field in message_descriptor.fields
if field.label == _FieldDescriptor.LABEL_REQUIRED]
def IsInitialized(self, errors=None):
"""Checks if all required fields of a message are set.
Args:
errors: A list which, if provided, will be populated with the field
paths of all missing required fields.
Returns:
True iff the specified message has all required fields set.
"""
# Performance is critical so we avoid HasField() and ListFields().
for field in required_fields:
if (field not in self._fields or
(field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE and
not self._fields[field]._is_present_in_parent)):
if errors is not None:
errors.extend(self.FindInitializationErrors())
return False
for field, value in list(self._fields.items()): # dict can change size!
if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
if field.label == _FieldDescriptor.LABEL_REPEATED:
if (field.message_type.has_options and
field.message_type.GetOptions().map_entry):
continue
for element in value:
if not element.IsInitialized():
if errors is not None:
errors.extend(self.FindInitializationErrors())
return False
elif value._is_present_in_parent and not value.IsInitialized():
if errors is not None:
errors.extend(self.FindInitializationErrors())
return False
return True
cls.IsInitialized = IsInitialized
def FindInitializationErrors(self):
"""Finds required fields which are not initialized.
Returns:
A list of strings. Each string is a path to an uninitialized field from
the top-level message, e.g. "foo.bar[5].baz".
"""
errors = [] # simplify things
for field in required_fields:
if not self.HasField(field.name):
errors.append(field.name)
for field, value in self.ListFields():
if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
if field.is_extension:
name = "(%s)" % field.full_name
else:
name = field.name
if _IsMapField(field):
if _IsMessageMapField(field):
for key in value:
element = value[key]
prefix = "%s[%s]." % (name, key)
sub_errors = element.FindInitializationErrors()
errors += [prefix + error for error in sub_errors]
else:
# ScalarMaps can't have any initialization errors.
pass
elif field.label == _FieldDescriptor.LABEL_REPEATED:
for i in range(len(value)):
element = value[i]
prefix = "%s[%d]." % (name, i)
sub_errors = element.FindInitializationErrors()
errors += [prefix + error for error in sub_errors]
else:
prefix = name + "."
sub_errors = value.FindInitializationErrors()
errors += [prefix + error for error in sub_errors]
return errors
cls.FindInitializationErrors = FindInitializationErrors
def _AddMergeFromMethod(cls):
LABEL_REPEATED = _FieldDescriptor.LABEL_REPEATED
CPPTYPE_MESSAGE = _FieldDescriptor.CPPTYPE_MESSAGE
def MergeFrom(self, msg):
if not isinstance(msg, cls):
raise TypeError(
"Parameter to MergeFrom() must be instance of same class: "
'expected %s got %s.' % (cls.__name__, msg.__class__.__name__))
assert msg is not self
self._Modified()
fields = self._fields
for field, value in msg._fields.items():
if field.label == LABEL_REPEATED:
field_value = fields.get(field)
if field_value is None:
# Construct a new object to represent this field.
field_value = field._default_constructor(self)
fields[field] = field_value
field_value.MergeFrom(value)
elif field.cpp_type == CPPTYPE_MESSAGE:
if value._is_present_in_parent:
field_value = fields.get(field)
if field_value is None:
# Construct a new object to represent this field.
field_value = field._default_constructor(self)
fields[field] = field_value
field_value.MergeFrom(value)
else:
self._fields[field] = value
if field.containing_oneof:
self._UpdateOneofState(field)
if msg._unknown_fields:
if not self._unknown_fields:
self._unknown_fields = []
self._unknown_fields.extend(msg._unknown_fields)
cls.MergeFrom = MergeFrom
def _AddWhichOneofMethod(message_descriptor, cls):
def WhichOneof(self, oneof_name):
"""Returns the name of the currently set field inside a oneof, or None."""
try:
field = message_descriptor.oneofs_by_name[oneof_name]
except KeyError:
raise ValueError(
'Protocol message has no oneof "%s" field.' % oneof_name)
nested_field = self._oneofs.get(field, None)
if nested_field is not None and self.HasField(nested_field.name):
return nested_field.name
else:
return None
cls.WhichOneof = WhichOneof
def _AddReduceMethod(cls):
def __reduce__(self): # pylint: disable=invalid-name
return (type(self), (), self.__getstate__())
cls.__reduce__ = __reduce__
def _Clear(self):
# Clear fields.
self._fields = {}
self._unknown_fields = ()
self._oneofs = {}
self._Modified()
def _DiscardUnknownFields(self):
self._unknown_fields = []
for field, value in self.ListFields():
if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
if field.label == _FieldDescriptor.LABEL_REPEATED:
for sub_message in value:
sub_message.DiscardUnknownFields()
else:
value.DiscardUnknownFields()
def _SetListener(self, listener):
if listener is None:
self._listener = message_listener_mod.NullMessageListener()
else:
self._listener = listener
def _AddMessageMethods(message_descriptor, cls):
"""Adds implementations of all Message methods to cls."""
_AddListFieldsMethod(message_descriptor, cls)
_AddHasFieldMethod(message_descriptor, cls)
_AddClearFieldMethod(message_descriptor, cls)
if message_descriptor.is_extendable:
_AddClearExtensionMethod(cls)
_AddHasExtensionMethod(cls)
_AddEqualsMethod(message_descriptor, cls)
_AddStrMethod(message_descriptor, cls)
_AddReprMethod(message_descriptor, cls)
_AddUnicodeMethod(message_descriptor, cls)
_AddByteSizeMethod(message_descriptor, cls)
_AddSerializeToStringMethod(message_descriptor, cls)
_AddSerializePartialToStringMethod(message_descriptor, cls)
_AddMergeFromStringMethod(message_descriptor, cls)
_AddIsInitializedMethod(message_descriptor, cls)
_AddMergeFromMethod(cls)
_AddWhichOneofMethod(message_descriptor, cls)
_AddReduceMethod(cls)
# Adds methods which do not depend on cls.
cls.Clear = _Clear
cls.DiscardUnknownFields = _DiscardUnknownFields
cls._SetListener = _SetListener
def _AddPrivateHelperMethods(message_descriptor, cls):
"""Adds implementation of private helper methods to cls."""
def Modified(self):
"""Sets the _cached_byte_size_dirty bit to true,
and propagates this to our listener iff this was a state change.
"""
# Note: Some callers check _cached_byte_size_dirty before calling
# _Modified() as an extra optimization. So, if this method is ever
# changed such that it does stuff even when _cached_byte_size_dirty is
# already true, the callers need to be updated.
if not self._cached_byte_size_dirty:
self._cached_byte_size_dirty = True
self._listener_for_children.dirty = True
self._is_present_in_parent = True
self._listener.Modified()
def _UpdateOneofState(self, field):
"""Sets field as the active field in its containing oneof.
Will also delete currently active field in the oneof, if it is different
from the argument. Does not mark the message as modified.
"""
other_field = self._oneofs.setdefault(field.containing_oneof, field)
if other_field is not field:
del self._fields[other_field]
self._oneofs[field.containing_oneof] = field
cls._Modified = Modified
cls.SetInParent = Modified
cls._UpdateOneofState = _UpdateOneofState
class _Listener(object):
"""MessageListener implementation that a parent message registers with its
child message.
In order to support semantics like:
foo.bar.baz.qux = 23
assert foo.HasField('bar')
...child objects must have back references to their parents.
This helper class is at the heart of this support.
"""
def __init__(self, parent_message):
"""Args:
parent_message: The message whose _Modified() method we should call when
we receive Modified() messages.
"""
# This listener establishes a back reference from a child (contained) object
# to its parent (containing) object. We make this a weak reference to avoid
# creating cyclic garbage when the client finishes with the 'parent' object
# in the tree.
if isinstance(parent_message, weakref.ProxyType):
self._parent_message_weakref = parent_message
else:
self._parent_message_weakref = weakref.proxy(parent_message)
# As an optimization, we also indicate directly on the listener whether
# or not the parent message is dirty. This way we can avoid traversing
# up the tree in the common case.
self.dirty = False
def Modified(self):
if self.dirty:
return
try:
# Propagate the signal to our parents iff this is the first field set.
self._parent_message_weakref._Modified()
except ReferenceError:
# We can get here if a client has kept a reference to a child object,
# and is now setting a field on it, but the child's parent has been
# garbage-collected. This is not an error.
pass
class _OneofListener(_Listener):
"""Special listener implementation for setting composite oneof fields."""
def __init__(self, parent_message, field):
"""Args:
parent_message: The message whose _Modified() method we should call when
we receive Modified() messages.
field: The descriptor of the field being set in the parent message.
"""
super(_OneofListener, self).__init__(parent_message)
self._field = field
def Modified(self):
"""Also updates the state of the containing oneof in the parent message."""
try:
self._parent_message_weakref._UpdateOneofState(self._field)
super(_OneofListener, self).Modified()
except ReferenceError:
pass
# TODO(robinson): Move elsewhere? This file is getting pretty ridiculous...
# TODO(robinson): Unify error handling of "unknown extension" crap.
# TODO(robinson): Support iteritems()-style iteration over all
# extensions with the "has" bits turned on?
class _ExtensionDict(object):
"""Dict-like container for supporting an indexable "Extensions"
field on proto instances.
Note that in all cases we expect extension handles to be
FieldDescriptors.
"""
def __init__(self, extended_message):
"""extended_message: Message instance for which we are the Extensions dict.
"""
self._extended_message = extended_message
def __getitem__(self, extension_handle):
"""Returns the current value of the given extension handle."""
_VerifyExtensionHandle(self._extended_message, extension_handle)
result = self._extended_message._fields.get(extension_handle)
if result is not None:
return result
if extension_handle.label == _FieldDescriptor.LABEL_REPEATED:
result = extension_handle._default_constructor(self._extended_message)
elif extension_handle.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
result = extension_handle.message_type._concrete_class()
try:
result._SetListener(self._extended_message._listener_for_children)
except ReferenceError:
pass
else:
# Singular scalar -- just return the default without inserting into the
# dict.
return extension_handle.default_value
# Atomically check if another thread has preempted us and, if not, swap
# in the new object we just created. If someone has preempted us, we
# take that object and discard ours.
# WARNING: We are relying on setdefault() being atomic. This is true
# in CPython but we haven't investigated others. This warning appears
# in several other locations in this file.
result = self._extended_message._fields.setdefault(
extension_handle, result)
return result
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
my_fields = self._extended_message.ListFields()
other_fields = other._extended_message.ListFields()
# Get rid of non-extension fields.
my_fields = [ field for field in my_fields if field.is_extension ]
other_fields = [ field for field in other_fields if field.is_extension ]
return my_fields == other_fields
def __ne__(self, other):
return not self == other
def __hash__(self):
raise TypeError('unhashable object')
# Note that this is only meaningful for non-repeated, scalar extension
# fields. Note also that we may have to call _Modified() when we do
# successfully set a field this way, to set any necssary "has" bits in the
# ancestors of the extended message.
def __setitem__(self, extension_handle, value):
"""If extension_handle specifies a non-repeated, scalar extension
field, sets the value of that field.
"""
_VerifyExtensionHandle(self._extended_message, extension_handle)
if (extension_handle.label == _FieldDescriptor.LABEL_REPEATED or
extension_handle.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE):
raise TypeError(
'Cannot assign to extension "%s" because it is a repeated or '
'composite type.' % extension_handle.full_name)
# It's slightly wasteful to lookup the type checker each time,
# but we expect this to be a vanishingly uncommon case anyway.
type_checker = type_checkers.GetTypeChecker(extension_handle)
# pylint: disable=protected-access
self._extended_message._fields[extension_handle] = (
type_checker.CheckValue(value))
self._extended_message._Modified()
def _FindExtensionByName(self, name):
"""Tries to find a known extension with the specified name.
Args:
name: Extension full name.
Returns:
Extension field descriptor.
"""
return self._extended_message._extensions_by_name.get(name, None)
def _FindExtensionByNumber(self, number):
"""Tries to find a known extension with the field number.
Args:
number: Extension field number.
Returns:
Extension field descriptor.
"""
return self._extended_message._extensions_by_number.get(number, None)
|
kallimachos/CodeEval | refs/heads/master | longest_word.py | 1 | import sys
source = open(sys.argv[1], 'r').readlines()
for line in source:
line = line.strip().split()
result = line.pop(0)
for word in line:
if len(word) > len(result):
result = word
print(result)
|
blckshrk/Weboob | refs/heads/master | modules/dailymotion/pages.py | 1 | # -*- coding: utf-8 -*-
# Copyright(C) 2011 Romain Bignon
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from weboob.tools.json import json
import datetime
import re
from weboob.tools.capabilities.thumbnail import Thumbnail
from weboob.capabilities import NotAvailable
from weboob.tools.misc import html2text
from weboob.tools.browser import BasePage, BrokenPageError
from .video import DailymotionVideo
__all__ = ['IndexPage', 'VideoPage']
class IndexPage(BasePage):
def iter_videos(self):
for div in self.parser.select(self.document.getroot(), 'div.dmpi_video_item'):
_id = div.attrib.get('data-id', None)
if _id is None:
self.browser.logger.warning('Unable to find the ID of a video')
continue
video = DailymotionVideo(_id)
video.title = unicode(self.parser.select(div, 'h3 a', 1).text).strip()
video.author = unicode(self.parser.select(div, 'div.dmpi_user_login', 1).find('a').find('span').text).strip()
video.description = html2text(self.parser.tostring(self.parser.select(div, 'div.dmpi_video_description', 1))).strip() or unicode()
try:
parts = self.parser.select(div, 'div.duration', 1).text.split(':')
except BrokenPageError:
# it's probably a live, np.
video.duration = NotAvailable
else:
if len(parts) == 1:
seconds = parts[0]
hours = minutes = 0
elif len(parts) == 2:
minutes, seconds = parts
hours = 0
elif len(parts) == 3:
hours, minutes, seconds = parts
else:
raise BrokenPageError('Unable to parse duration %r' % self.parser.select(div, 'div.duration', 1).text)
video.duration = datetime.timedelta(hours=int(hours), minutes=int(minutes), seconds=int(seconds))
url = unicode(self.parser.select(div, 'img.preview', 1).attrib['data-src'])
# remove the useless anti-caching
url = re.sub('\?\d+', '', url)
video.thumbnail = Thumbnail(unicode(url))
video.set_empty_fields(NotAvailable, ('url',))
yield video
def get_rate(self, div):
m = re.match('width: *(\d+)px', div.attrib['style'])
if m:
return int(m.group(1))
else:
self.browser.logger.warning('Unable to parse rating: %s' % div.attrib['style'])
return 0
class VideoPage(BasePage):
def get_video(self, video=None):
if video is None:
video = DailymotionVideo(self.group_dict['id'])
div = self.parser.select(self.document.getroot(), 'div#content', 1)
video.title = unicode(self.parser.select(div, 'span.title', 1).text).strip()
video.author = unicode(self.parser.select(div, 'a.name, span.name, a[rel=author]', 1).text).strip()
try:
video.description = html2text(self.parser.tostring(self.parser.select(div, 'div#video_description', 1))).strip() or unicode()
except BrokenPageError:
video.description = u''
embed_page = self.browser.readurl('http://www.dailymotion.com/embed/video/%s' % video.id)
m = re.search('var info = ({.*?}),[^{"]', embed_page)
if not m:
raise BrokenPageError('Unable to find information about video')
info = json.loads(m.group(1))
for key in ['stream_h264_hd1080_url','stream_h264_hd_url',
'stream_h264_hq_url','stream_h264_url',
'stream_h264_ld_url']:
if info.get(key):#key in info and info[key]:
max_quality = key
break
else:
raise BrokenPageError(u'Unable to extract video URL')
video.url = info[max_quality]
video.set_empty_fields(NotAvailable)
return video
|
reneenoble/ckanext-iframeview | refs/heads/master | ckanext/iframeview/tests/test_plugin.py | 1 | """Tests for plugin.py."""
import ckanext.iframeview.plugin as plugin
def test_plugin():
pass |
escapewindow/mozharness | refs/heads/master | configs/builds/releng_sub_linux_configs/32_debug.py | 2 | MOZ_OBJDIR = 'obj-firefox'
config = {
'default_actions': [
'clobber',
'clone-tools',
'setup-mock',
'build',
'generate-build-props',
# 'generate-build-stats', debug skips this action
'symbols',
'packages',
'upload',
'sendchanges',
# 'pretty-names', debug skips this action
# 'check-l10n', debug skips this action
'check-test',
'update', # decided by query_is_nightly()
'ccache-stats',
],
'debug_build': True,
'stage_platform': 'linux-debug',
'purge_minsize': 15,
"enable_talos_sendchange": False, # debug does not fire a talos sendchange
'enable_signing': False,
'upload_symbols': False,
#### 32 bit build specific #####
'env': {
'DISPLAY': ':2',
'HG_SHARE_BASE_DIR': '/builds/hg-shared',
'MOZ_OBJDIR': MOZ_OBJDIR,
# not sure if this will always be server host
'POST_SYMBOL_UPLOAD_CMD': '/usr/local/bin/post-symbol-upload.py',
'MOZ_CRASHREPORTER_NO_REPORT': '1',
'CCACHE_DIR': '/builds/ccache',
'CCACHE_COMPRESS': '1',
'CCACHE_UMASK': '002',
'LC_ALL': 'C',
# 32 bit specific
'PATH': '/tools/buildbot/bin:/usr/local/bin:/usr/lib/ccache:/bin:\
/usr/bin:/usr/local/sbin:/usr/sbin:/sbin:/tools/git/bin:/tools/python27/bin:\
/tools/python27-mercurial/bin:/home/cltbld/bin',
'LD_LIBRARY_PATH': '/tools/gcc-4.3.3/installed/lib:\
%s/dist/bin' % (MOZ_OBJDIR,),
'XPCOM_DEBUG_BREAK': 'stack-and-abort',
'TINDERBOX_OUTPUT': '1',
},
'src_mozconfig': 'browser/config/mozconfigs/linux32/debug',
'base_name': 'Linux %(branch)s leak test',
#######################
}
|
MingLin-home/Ming_slim | refs/heads/master | nets/resnet_v2.py | 13 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains definitions for the preactivation form of Residual Networks.
Residual networks (ResNets) were originally proposed in:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
The full preactivation 'v2' ResNet variant implemented in this module was
introduced by:
[2] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Identity Mappings in Deep Residual Networks. arXiv: 1603.05027
The key difference of the full preactivation 'v2' variant compared to the
'v1' variant in [1] is the use of batch normalization before every weight layer.
Another difference is that 'v2' ResNets do not include an activation function in
the main pathway. Also see [2; Fig. 4e].
Typical use:
from tensorflow.contrib.slim.nets import resnet_v2
ResNet-101 for image classification into 1000 classes:
# inputs has shape [batch, 224, 224, 3]
with slim.arg_scope(resnet_v2.resnet_arg_scope()):
net, end_points = resnet_v2.resnet_v2_101(inputs, 1000, is_training=False)
ResNet-101 for semantic segmentation into 21 classes:
# inputs has shape [batch, 513, 513, 3]
with slim.arg_scope(resnet_v2.resnet_arg_scope(is_training)):
net, end_points = resnet_v2.resnet_v2_101(inputs,
21,
is_training=False,
global_pool=False,
output_stride=16)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from nets import resnet_utils
slim = tf.contrib.slim
resnet_arg_scope = resnet_utils.resnet_arg_scope
@slim.add_arg_scope
def bottleneck(inputs, depth, depth_bottleneck, stride, rate=1,
outputs_collections=None, scope=None):
"""Bottleneck residual unit variant with BN before convolutions.
This is the full preactivation residual unit variant proposed in [2]. See
Fig. 1(b) of [2] for its definition. Note that we use here the bottleneck
variant which has an extra bottleneck layer.
When putting together two consecutive ResNet blocks that use this unit, one
should use stride = 2 in the last unit of the first block.
Args:
inputs: A tensor of size [batch, height, width, channels].
depth: The depth of the ResNet unit output.
depth_bottleneck: The depth of the bottleneck layers.
stride: The ResNet unit's stride. Determines the amount of downsampling of
the units output compared to its input.
rate: An integer, rate for atrous convolution.
outputs_collections: Collection to add the ResNet unit output.
scope: Optional variable_scope.
Returns:
The ResNet unit's output.
"""
with tf.variable_scope(scope, 'bottleneck_v2', [inputs]) as sc:
depth_in = slim.utils.last_dimension(inputs.get_shape(), min_rank=4)
preact = slim.batch_norm(inputs, activation_fn=tf.nn.relu, scope='preact')
if depth == depth_in:
shortcut = resnet_utils.subsample(inputs, stride, 'shortcut')
else:
shortcut = slim.conv2d(preact, depth, [1, 1], stride=stride,
normalizer_fn=None, activation_fn=None,
scope='shortcut')
residual = slim.conv2d(preact, depth_bottleneck, [1, 1], stride=1,
scope='conv1')
residual = resnet_utils.conv2d_same(residual, depth_bottleneck, 3, stride,
rate=rate, scope='conv2')
residual = slim.conv2d(residual, depth, [1, 1], stride=1,
normalizer_fn=None, activation_fn=None,
scope='conv3')
output = shortcut + residual
return slim.utils.collect_named_outputs(outputs_collections,
sc.original_name_scope,
output)
def resnet_v2(inputs,
blocks,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
include_root_block=True,
spatial_squeeze=True,
reuse=None,
scope=None):
"""Generator for v2 (preactivation) ResNet models.
This function generates a family of ResNet v2 models. See the resnet_v2_*()
methods for specific model instantiations, obtained by selecting different
block instantiations that produce ResNets of various depths.
Training for image classification on Imagenet is usually done with [224, 224]
inputs, resulting in [7, 7] feature maps at the output of the last ResNet
block for the ResNets defined in [1] that have nominal stride equal to 32.
However, for dense prediction tasks we advise that one uses inputs with
spatial dimensions that are multiples of 32 plus 1, e.g., [321, 321]. In
this case the feature maps at the ResNet output will have spatial shape
[(height - 1) / output_stride + 1, (width - 1) / output_stride + 1]
and corners exactly aligned with the input image corners, which greatly
facilitates alignment of the features to the image. Using as input [225, 225]
images results in [8, 8] feature maps at the output of the last ResNet block.
For dense prediction tasks, the ResNet needs to run in fully-convolutional
(FCN) mode and global_pool needs to be set to False. The ResNets in [1, 2] all
have nominal stride equal to 32 and a good choice in FCN mode is to use
output_stride=16 in order to increase the density of the computed features at
small computational and memory overhead, cf. http://arxiv.org/abs/1606.00915.
Args:
inputs: A tensor of size [batch, height_in, width_in, channels].
blocks: A list of length equal to the number of ResNet blocks. Each element
is a resnet_utils.Block object describing the units in the block.
num_classes: Number of predicted classes for classification tasks. If None
we return the features before the logit layer.
is_training: whether is training or not.
global_pool: If True, we perform global average pooling before computing the
logits. Set to True for image classification, False for dense prediction.
output_stride: If None, then the output will be computed at the nominal
network stride. If output_stride is not None, it specifies the requested
ratio of input to output spatial resolution.
include_root_block: If True, include the initial convolution followed by
max-pooling, if False excludes it. If excluded, `inputs` should be the
results of an activation-less convolution.
spatial_squeeze: if True, logits is of shape [B, C], if false logits is
of shape [B, 1, 1, C], where B is batch_size and C is number of classes.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
Returns:
net: A rank-4 tensor of size [batch, height_out, width_out, channels_out].
If global_pool is False, then height_out and width_out are reduced by a
factor of output_stride compared to the respective height_in and width_in,
else both height_out and width_out equal one. If num_classes is None, then
net is the output of the last ResNet block, potentially after global
average pooling. If num_classes is not None, net contains the pre-softmax
activations.
end_points: A dictionary from components of the network to the corresponding
activation.
Raises:
ValueError: If the target output_stride is not valid.
"""
with tf.variable_scope(scope, 'resnet_v2', [inputs], reuse=reuse) as sc:
end_points_collection = sc.name + '_end_points'
with slim.arg_scope([slim.conv2d, bottleneck,
resnet_utils.stack_blocks_dense],
outputs_collections=end_points_collection):
with slim.arg_scope([slim.batch_norm], is_training=is_training):
net = inputs
if include_root_block:
if output_stride is not None:
if output_stride % 4 != 0:
raise ValueError('The output_stride needs to be a multiple of 4.')
output_stride /= 4
# We do not include batch normalization or activation functions in
# conv1 because the first ResNet unit will perform these. Cf.
# Appendix of [2].
with slim.arg_scope([slim.conv2d],
activation_fn=None, normalizer_fn=None):
net = resnet_utils.conv2d_same(net, 64, 7, stride=2, scope='conv1')
net = slim.max_pool2d(net, [3, 3], stride=2, scope='pool1')
net = resnet_utils.stack_blocks_dense(net, blocks, output_stride)
# This is needed because the pre-activation variant does not have batch
# normalization or activation functions in the residual unit output. See
# Appendix of [2].
net = slim.batch_norm(net, activation_fn=tf.nn.relu, scope='postnorm')
if global_pool:
# Global average pooling.
net = tf.reduce_mean(net, [1, 2], name='pool5', keep_dims=True)
if num_classes is not None:
net = slim.conv2d(net, num_classes, [1, 1], activation_fn=None,
normalizer_fn=None, scope='logits')
if spatial_squeeze:
logits = tf.squeeze(net, [1, 2], name='SpatialSqueeze')
# Convert end_points_collection into a dictionary of end_points.
end_points = slim.utils.convert_collection_to_dict(end_points_collection)
if num_classes is not None:
end_points['predictions'] = slim.softmax(logits, scope='predictions')
return logits, end_points
resnet_v2.default_image_size = 224
def resnet_v2_50(inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
reuse=None,
scope='resnet_v2_50'):
"""ResNet-50 model of [1]. See resnet_v2() for arg and return description."""
blocks = [
resnet_utils.Block(
'block1', bottleneck, [(256, 64, 1)] * 2 + [(256, 64, 2)]),
resnet_utils.Block(
'block2', bottleneck, [(512, 128, 1)] * 3 + [(512, 128, 2)]),
resnet_utils.Block(
'block3', bottleneck, [(1024, 256, 1)] * 5 + [(1024, 256, 2)]),
resnet_utils.Block(
'block4', bottleneck, [(2048, 512, 1)] * 3)]
return resnet_v2(inputs, blocks, num_classes, is_training=is_training,
global_pool=global_pool, output_stride=output_stride,
include_root_block=True, reuse=reuse, scope=scope)
resnet_v2_50.default_image_size = resnet_v2.default_image_size
def resnet_v2_101(inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
reuse=None,
scope='resnet_v2_101'):
"""ResNet-101 model of [1]. See resnet_v2() for arg and return description."""
blocks = [
resnet_utils.Block(
'block1', bottleneck, [(256, 64, 1)] * 2 + [(256, 64, 2)]),
resnet_utils.Block(
'block2', bottleneck, [(512, 128, 1)] * 3 + [(512, 128, 2)]),
resnet_utils.Block(
'block3', bottleneck, [(1024, 256, 1)] * 22 + [(1024, 256, 2)]),
resnet_utils.Block(
'block4', bottleneck, [(2048, 512, 1)] * 3)]
return resnet_v2(inputs, blocks, num_classes, is_training=is_training,
global_pool=global_pool, output_stride=output_stride,
include_root_block=True, reuse=reuse, scope=scope)
resnet_v2_101.default_image_size = resnet_v2.default_image_size
def resnet_v2_152(inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
reuse=None,
scope='resnet_v2_152'):
"""ResNet-152 model of [1]. See resnet_v2() for arg and return description."""
blocks = [
resnet_utils.Block(
'block1', bottleneck, [(256, 64, 1)] * 2 + [(256, 64, 2)]),
resnet_utils.Block(
'block2', bottleneck, [(512, 128, 1)] * 7 + [(512, 128, 2)]),
resnet_utils.Block(
'block3', bottleneck, [(1024, 256, 1)] * 35 + [(1024, 256, 2)]),
resnet_utils.Block(
'block4', bottleneck, [(2048, 512, 1)] * 3)]
return resnet_v2(inputs, blocks, num_classes, is_training=is_training,
global_pool=global_pool, output_stride=output_stride,
include_root_block=True, reuse=reuse, scope=scope)
resnet_v2_152.default_image_size = resnet_v2.default_image_size
def resnet_v2_200(inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
reuse=None,
scope='resnet_v2_200'):
"""ResNet-200 model of [2]. See resnet_v2() for arg and return description."""
blocks = [
resnet_utils.Block(
'block1', bottleneck, [(256, 64, 1)] * 2 + [(256, 64, 2)]),
resnet_utils.Block(
'block2', bottleneck, [(512, 128, 1)] * 23 + [(512, 128, 2)]),
resnet_utils.Block(
'block3', bottleneck, [(1024, 256, 1)] * 35 + [(1024, 256, 2)]),
resnet_utils.Block(
'block4', bottleneck, [(2048, 512, 1)] * 3)]
return resnet_v2(inputs, blocks, num_classes, is_training=is_training,
global_pool=global_pool, output_stride=output_stride,
include_root_block=True, reuse=reuse, scope=scope)
resnet_v2_200.default_image_size = resnet_v2.default_image_size
|
peterfpeterson/mantid | refs/heads/master | Framework/PythonInterface/plugins/algorithms/DeltaPDF3D.py | 3 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
from mantid.api import PythonAlgorithm, AlgorithmFactory, IMDHistoWorkspaceProperty, PropertyMode, WorkspaceProperty, Progress
from mantid.kernel import (Direction, EnabledWhenProperty, PropertyCriterion, Property, StringListValidator, FloatArrayBoundedValidator,
FloatArrayProperty, FloatBoundedValidator)
from mantid.geometry import SpaceGroupFactory
from mantid import logger
import numpy as np
from scipy import ndimage
class DeltaPDF3D(PythonAlgorithm):
def category(self):
return 'Diffraction\\Utility'
def name(self):
return 'DeltaPDF3D'
def summary(self):
return 'Calculates the 3D-deltaPDF from a HKL workspace'
def PyInit(self):
self.declareProperty(IMDHistoWorkspaceProperty("InputWorkspace", "",
optional=PropertyMode.Mandatory,
direction=Direction.Input),
"Input Workspace with HKL dimensions centered on zero.")
self.declareProperty(WorkspaceProperty("IntermediateWorkspace", "",
optional=PropertyMode.Optional,
direction=Direction.Output),
"The resulting workspace after reflection removal and filters applied. What is the input of the FFT.")
self.declareProperty(WorkspaceProperty("OutputWorkspace", "",
optional=PropertyMode.Mandatory,
direction=Direction.Output),
"Output Workspace")
self.declareProperty("Method", 'KAREN', StringListValidator(['None', 'Punch and fill', 'KAREN']), "Bragg peak removal method")
self.declareProperty("WindowFunction", 'Blackman', StringListValidator(['None', 'Gaussian', 'Blackman', 'Tukey', 'Kaiser']),
"Apply a window function to the data")
self.declareProperty("WindowParameter", defaultValue=0.5, validator=FloatBoundedValidator(0.),
doc="Parameter for window function, depends on window type, see algorithm docs")
# Punch and fill
condition = EnabledWhenProperty("Method", PropertyCriterion.IsEqualTo, 'Punch and fill')
self.declareProperty("Shape", "sphere", doc="Shape to punch out reflections",
validator=StringListValidator(['sphere', 'cube']))
self.setPropertySettings("Shape", condition)
val_min_zero = FloatArrayBoundedValidator(lower=0.)
self.declareProperty(FloatArrayProperty("Size", [0.2], validator=val_min_zero),
"Width of cube/diameter of sphere used to remove reflections, in (HKL) (one or three values)")
self.setPropertySettings("Size", condition)
self.declareProperty("SpaceGroup", "",
doc="Space group for reflection removal, either full name or number. If empty all HKL's will be removed.")
self.setPropertySettings("SpaceGroup", condition)
self.declareProperty("Convolution", True, "Apply convolution to fill in removed reflections")
self.setPropertySettings("Convolution", condition)
self.declareProperty("ConvolutionWidth", 2.0, validator=FloatBoundedValidator(0.),
doc="Width of gaussian convolution in pixels")
self.setPropertySettings("ConvolutionWidth", condition)
self.declareProperty("CropSphere", False, "Limit min/max q values. Can help with edge effects.")
condition = EnabledWhenProperty("CropSphere", PropertyCriterion.IsNotDefault)
self.declareProperty(FloatArrayProperty("SphereMin", [Property.EMPTY_DBL], validator=val_min_zero),
"HKL values below which will be removed (one or three values)")
self.setPropertySettings("SphereMin", condition)
self.declareProperty(FloatArrayProperty("SphereMax", [Property.EMPTY_DBL], validator=val_min_zero),
"HKL values above which will be removed (one or three values)")
self.setPropertySettings("SphereMax", condition)
self.declareProperty("FillValue", Property.EMPTY_DBL, "Value to replace with outside sphere")
self.setPropertySettings("FillValue", condition)
# KAREN
self.declareProperty("KARENWidth", 7, "Size of filter window")
# Reflections
self.setPropertyGroup("Shape","Punch and fill")
self.setPropertyGroup("Size","Punch and fill")
self.setPropertyGroup("SpaceGroup","Punch and fill")
# Sphere
self.setPropertyGroup("CropSphere","Cropping to a sphere")
self.setPropertyGroup("SphereMin","Cropping to a sphere")
self.setPropertyGroup("SphereMax","Cropping to a sphere")
self.setPropertyGroup("FillValue","Cropping to a sphere")
# Convolution
self.setPropertyGroup("Convolution","Convolution")
self.setPropertyGroup("ConvolutionWidth","Convolution")
def validateInputs(self):
issues = dict()
inWS = self.getProperty("InputWorkspace").value
dimX=inWS.getXDimension()
dimY=inWS.getYDimension()
dimZ=inWS.getZDimension()
if dimX.name != '[H,0,0]' or dimY.name != '[0,K,0]' or dimZ.name != '[0,0,L]':
issues['InputWorkspace'] = 'dimensions must be [H,0,0], [0,K,0] and [0,0,L]'
for d in range(inWS.getNumDims()):
dim = inWS.getDimension(d)
if not np.isclose(dim.getMaximum(), -dim.getMinimum(), atol=1e-5):
issues['InputWorkspace'] = 'dimensions must be centered on zero'
if self.getProperty("Convolution").value and self.getProperty("Method").value == 'Punch and fill':
try:
import astropy # noqa
except ImportError:
issues["Convolution"] = 'python-astropy required to do convolution'
size = self.getProperty("Size").value
if len(size) != 1 and len(size) != 3:
issues["Size"] = 'Must provide 1 or 3 sizes'
if self.getProperty("SpaceGroup").value:
space_group=self.getProperty("SpaceGroup").value
try:
if not SpaceGroupFactory.isSubscribedNumber(int(space_group)):
issues["SpaceGroup"] = 'Space group number is not valid'
except ValueError:
if not SpaceGroupFactory.isSubscribedSymbol(space_group):
issues["SpaceGroup"] = 'Space group name is not valid'
sphereMin = self.getProperty("SphereMin").value
if len(sphereMin) != 1 and len(sphereMin) != 3:
issues["SphereMin"] = 'Must provide 1 or 3 diameters'
sphereMax = self.getProperty("SphereMax").value
if len(sphereMax) != 1 and len(sphereMax) != 3:
issues["SphereMax"] = 'Must provide 1 or 3 diameters'
if self.getProperty("WindowFunction").value == 'Tukey':
import scipy.signal
if not hasattr(scipy.signal, 'tukey'):
issues["WindowFunction"] = 'Tukey window requires scipy >= 0.16.0'
return issues
def PyExec(self):
progress = Progress(self, 0.0, 1.0, 5)
inWS = self.getProperty("InputWorkspace").value
signal = inWS.getSignalArray().copy()
if self.getProperty("CropSphere").value:
signal = self._crop_sphere(signal, inWS.getXDimension(), inWS.getYDimension(), inWS.getZDimension())
window_function = self.getProperty("WindowFunction").value
if window_function != 'None':
paramater = self.getProperty("WindowParameter").value
_, _, Xbins, _ = self._get_dim_params(inWS.getXDimension())
_, _, Ybins, _ = self._get_dim_params(inWS.getYDimension())
_, _, Zbins, _ = self._get_dim_params(inWS.getZDimension())
if window_function == 'Gaussian':
progress.report("Applying Gaussian window")
window = self._gaussian_window((Xbins, Ybins, Zbins), paramater)
elif window_function == 'Blackman':
progress.report("Applying Blackman window")
window = self._blackman_window((Xbins, Ybins, Zbins))
elif window_function == 'Tukey':
progress.report("Applying Tukey window")
window = self._tukey_window((Xbins, Ybins, Zbins), paramater)
elif window_function == 'Kaiser':
progress.report("Applying Kaiser window")
window = self._kaiser_window((Xbins, Ybins, Zbins), paramater)
signal = np.multiply(signal, window)
if self.getProperty("Method").value == 'Punch and fill':
progress.report("Removing Reflections")
signal = self._punch_and_fill(signal, inWS.getXDimension(), inWS.getYDimension(), inWS.getZDimension())
if self.getProperty("Convolution").value:
progress.report("Convoluting signal")
signal = self._convolution(signal)
elif self.getProperty("Method").value == 'KAREN':
progress.report("Running KAREN")
signal = self._karen(signal, self.getProperty("KARENWidth").value)
if self.getPropertyValue("IntermediateWorkspace"):
cloneWS_alg = self.createChildAlgorithm("CloneMDWorkspace", enableLogging=False)
cloneWS_alg.setProperty("InputWorkspace",inWS)
cloneWS_alg.execute()
signalOutWS = cloneWS_alg.getProperty("OutputWorkspace").value
signalOutWS.setSignalArray(signal)
self.setProperty("IntermediateWorkspace", signalOutWS)
# Do FFT
progress.report("Running FFT")
# Replace any remaining nan's or inf's with 0
# Otherwise you end up with a lot of nan's
signal[np.isnan(signal)]=0
signal[np.isinf(signal)]=0
signal=np.fft.fftshift(np.fft.fftn(np.fft.ifftshift(signal)))
number_of_bins = signal.shape
# CreateMDHistoWorkspace expects Fortan `column-major` ordering
signal = signal.real.flatten('F')
createWS_alg = self.createChildAlgorithm("CreateMDHistoWorkspace", enableLogging=False)
createWS_alg.setProperty("SignalInput", signal)
createWS_alg.setProperty("ErrorInput", signal**2)
createWS_alg.setProperty("Dimensionality", 3)
createWS_alg.setProperty("Extents", self._calc_new_extents(inWS))
createWS_alg.setProperty("NumberOfBins", number_of_bins)
createWS_alg.setProperty("Names", 'x,y,z')
createWS_alg.setProperty("Units", 'a,b,c')
createWS_alg.execute()
outWS = createWS_alg.getProperty("OutputWorkspace").value
# Copy first experiment info
if inWS.getNumExperimentInfo() > 0:
outWS.copyExperimentInfos(inWS)
progress.report()
self.setProperty("OutputWorkspace", outWS)
def _punch_and_fill(self, signal, dimX, dimY, dimZ): # noqa
Xmin, Xmax, _, Xwidth = self._get_dim_params(dimX)
Ymin, Ymax, _, Ywidth = self._get_dim_params(dimY)
Zmin, Zmax, _, Zwidth = self._get_dim_params(dimZ)
X, Y, Z = self._get_XYZ_ogrid(dimX, dimY, dimZ)
size = self.getProperty("Size").value
if len(size)==1:
size = np.repeat(size, 3)
size/=2.0 # We want radii or half box width
cut_shape = self.getProperty("Shape").value
space_group = self.getProperty("SpaceGroup").value
if space_group:
check_space_group = True
try:
space_group=SpaceGroupFactory.subscribedSpaceGroupSymbols(int(space_group))[0]
except ValueError:
pass
logger.information('Using space group: '+space_group)
sg=SpaceGroupFactory.createSpaceGroup(space_group)
else:
check_space_group = False
if cut_shape == 'cube':
for h in range(int(np.ceil(Xmin)), int(Xmax)+1):
for k in range(int(np.ceil(Ymin)), int(Ymax)+1):
for l in range(int(np.ceil(Zmin)), int(Zmax)+1):
if not check_space_group or sg.isAllowedReflection([h,k,l]):
signal[int((h-size[0]-Xmin)/Xwidth+1):int((h+size[0]-Xmin)/Xwidth),
int((k-size[1]-Ymin)/Ywidth+1):int((k+size[1]-Ymin)/Ywidth),
int((l-size[2]-Zmin)/Zwidth+1):int((l+size[2]-Zmin)/Zwidth)]=np.nan
else: # sphere
mask=((X-np.round(X))**2/size[0]**2 + (Y-np.round(Y))**2/size[1]**2 + (Z-np.round(Z))**2/size[2]**2 < 1)
# Unmask invalid reflections
if check_space_group:
for h in range(int(np.ceil(Xmin)), int(Xmax)+1):
for k in range(int(np.ceil(Ymin)), int(Ymax)+1):
for l in range(int(np.ceil(Zmin)), int(Zmax)+1):
if not sg.isAllowedReflection([h,k,l]):
mask[int((h-0.5-Xmin)/Xwidth+1):int((h+0.5-Xmin)/Xwidth),
int((k-0.5-Ymin)/Ywidth+1):int((k+0.5-Ymin)/Ywidth),
int((l-0.5-Zmin)/Zwidth+1):int((l+0.5-Zmin)/Zwidth)]=False
signal[mask]=np.nan
return signal
def _crop_sphere(self, signal, dimX, dimY, dimZ):
X, Y, Z = self._get_XYZ_ogrid(dimX, dimY, dimZ)
sphereMin = self.getProperty("SphereMin").value
if sphereMin[0] < Property.EMPTY_DBL:
if len(sphereMin)==1:
sphereMin = np.repeat(sphereMin, 3)
signal[X**2/sphereMin[0]**2 + Y**2/sphereMin[1]**2 + Z**2/sphereMin[2]**2 < 1]=np.nan
sphereMax = self.getProperty("SphereMax").value
if sphereMax[0] < Property.EMPTY_DBL:
if len(sphereMax)==1:
sphereMax = np.repeat(sphereMax, 3)
if self.getProperty("FillValue").value == Property.EMPTY_DBL:
fill_value = np.nan
else:
fill_value = self.getProperty("FillValue").value
signal[X**2/sphereMax[0]**2 + Y**2/sphereMax[1]**2 + Z**2/sphereMax[2]**2 > 1]=fill_value
return signal
def _get_XYZ_ogrid(self, dimX, dimY, dimZ):
"""
Returns X, Y and Z as ogrid
"""
Xmin, Xmax, Xbins, _ = self._get_dim_params(dimX)
Ymin, Ymax, Ybins, _ = self._get_dim_params(dimY)
Zmin, Zmax, Zbins, _ = self._get_dim_params(dimZ)
return np.ogrid[(dimX.getX(0)+dimX.getX(1))/2:(dimX.getX(Xbins)+dimX.getX(Xbins-1))/2:Xbins*1j,
(dimY.getX(0)+dimY.getX(1))/2:(dimY.getX(Ybins)+dimY.getX(Ybins-1))/2:Ybins*1j,
(dimZ.getX(0)+dimZ.getX(1))/2:(dimZ.getX(Zbins)+dimZ.getX(Zbins-1))/2:Zbins*1j]
def _get_dim_params(self, dim):
"""
Return the min, max, number_of_bins and bin_width of dim
"""
return dim.getMinimum(), dim.getMaximum(), dim.getNBins(), dim.getBinWidth()
def _convolution(self, signal):
from astropy.convolution import convolve, convolve_fft, Gaussian1DKernel
G1D = Gaussian1DKernel(self.getProperty("ConvolutionWidth").value).array
G3D = G1D * G1D.reshape((-1,1)) * G1D.reshape((-1,1,1))
try:
logger.debug('Trying astropy.convolution.convolve_fft for convolution')
return convolve_fft(signal, G3D) # Faster but will fail with large signal and kernel arrays
except ValueError:
logger.debug('Using astropy.convolution.convolve for convolution')
return convolve(signal, G3D)
def _calc_new_extents(self, inWS):
# Calculate new extents for fft space
extents=''
for d in range(inWS.getNumDims()):
dim = inWS.getDimension(d)
if dim.getNBins() == 1:
fft_dim = 1./(dim.getMaximum()-dim.getMinimum())
extents+=str(-fft_dim/2.)+','+str(fft_dim/2.)+','
else:
fft_dim=np.fft.fftshift(np.fft.fftfreq(dim.getNBins(), (dim.getMaximum()-dim.getMinimum())/dim.getNBins()))
extents+=str(fft_dim[0])+','+str(fft_dim[-1])+','
return extents[:-1]
def _karen(self, signal, width):
"""
Bragg peaks are located as outliers in some moving window
Outliers are defined as values more than 3sigma away from the median
Sigma is estimated using 1.4826*MAD
Returns median+2.2*MAD of window for values detected to be outliers
Input dataset (dset) and window width (x)
Input an odd window or the window will be asymmetric and stuff breaks
"""
med = ndimage.filters.median_filter(signal, size=width, mode='nearest') # Get median of input data set
mad = ndimage.filters.median_filter(np.abs(signal-med), size=width, mode='nearest') # Get median absolute deviation (MAD)
asigma = np.abs(mad*3*1.4826) # Absolute value of approximate sigma
mask = np.logical_or(signal < (med-asigma), signal > (med+asigma)) # Check if value is outlier based on MAD
signal[mask] = (med+2.2*mad)[mask] # Return median+2.2*MAD if value is outlier
return signal
def _gaussian_window(self, width, sigma):
"""
Generates a gaussian window
sigma is based on the dat being in a range 0 to 1
"""
from scipy.signal import gaussian
return (gaussian(width[0], sigma*width[0]).reshape((-1,1,1))
* gaussian(width[1], sigma*width[1]).reshape((-1,1))
* gaussian(width[2], sigma*width[2]))
def _blackman_window(self, width):
"""
Generates a blackman window
"""
return np.blackman(width[0]).reshape((-1,1,1)) * np.blackman(width[1]).reshape((-1,1)) * np.blackman(width[2])
def _tukey_window(self, width, alpha):
"""
Generates a tukey window
0 <= alpha <=1
alpha = 0 becomes rectangular
alpha = 1 becomes a Hann window
"""
from scipy.signal import tukey
return (tukey(width[0], alpha).reshape((-1,1,1))
* tukey(width[1], alpha).reshape((-1,1))
* tukey(width[2], alpha))
def _kaiser_window(self, width, beta):
"""
Generates a kaiser window
beta Window shape
0 Rectangular
5 Similar to a Hamming
6 Similar to a Hann
8.6 Similar to a Blackman
"""
return np.kaiser(width[0], beta).reshape((-1,1,1)) * np.kaiser(width[1], beta).reshape((-1,1)) * np.kaiser(width[2], beta)
AlgorithmFactory.subscribe(DeltaPDF3D)
|
biddisco/VTK | refs/heads/master | ThirdParty/Twisted/twisted/lore/latex.py | 14 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
LaTeX output support for Lore.
"""
from xml.dom import minidom as dom
import os.path, re
from cStringIO import StringIO
import urlparse
from twisted.web import domhelpers
from twisted.python import text, procutils
import tree
escapingRE = re.compile(r'([\[\]#$%&_{}^~\\])')
lowerUpperRE = re.compile(r'([a-z])([A-Z])')
def _escapeMatch(match):
c = match.group()
if c == '\\':
return '$\\backslash$'
elif c == '~':
return '\\~{}'
elif c == '^':
return '\\^{}'
elif c in '[]':
return '{'+c+'}'
else:
return '\\' + c
def latexEscape(txt):
txt = escapingRE.sub(_escapeMatch, txt)
return txt.replace('\n', ' ')
entities = {'amp': '\&', 'gt': '>', 'lt': '<', 'quot': '"',
'copy': '\\copyright', 'mdash': '---', 'rdquo': '``',
'ldquo': "''"}
def realpath(path):
# Normalise path
cwd = os.getcwd()
path = os.path.normpath(os.path.join(cwd, path))
return path.replace('\\', '/') # windows slashes make LaTeX blow up
def getLatexText(node, writer, filter=lambda x:x, entities=entities):
if hasattr(node, 'eref'):
return writer(entities.get(node.eref, ''))
if hasattr(node, 'data'):
if isinstance(node.data, unicode):
data = node.data.encode('utf-8')
else:
data = node.data
return writer(filter(data))
for child in node.childNodes:
getLatexText(child, writer, filter, entities)
class BaseLatexSpitter:
def __init__(self, writer, currDir='.', filename=''):
self.writer = writer
self.currDir = currDir
self.filename = filename
def visitNode(self, node):
if isinstance(node, dom.Comment):
return
if not hasattr(node, 'tagName'):
self.writeNodeData(node)
return
getattr(self, 'visitNode_'+node.tagName, self.visitNodeDefault)(node)
def visitNodeDefault(self, node):
self.writer(getattr(self, 'start_'+node.tagName, ''))
for child in node.childNodes:
self.visitNode(child)
self.writer(getattr(self, 'end_'+node.tagName, ''))
def visitNode_a(self, node):
if node.hasAttribute('class'):
if node.getAttribute('class').endswith('listing'):
return self.visitNode_a_listing(node)
if node.hasAttribute('href'):
return self.visitNode_a_href(node)
if node.hasAttribute('name'):
return self.visitNode_a_name(node)
self.visitNodeDefault(node)
def visitNode_span(self, node):
if not node.hasAttribute('class'):
return self.visitNodeDefault(node)
node.tagName += '_'+node.getAttribute('class')
self.visitNode(node)
visitNode_div = visitNode_span
def visitNode_h1(self, node):
pass
def visitNode_style(self, node):
pass
class LatexSpitter(BaseLatexSpitter):
baseLevel = 0
diaHack = bool(procutils.which("dia"))
def writeNodeData(self, node):
buf = StringIO()
getLatexText(node, buf.write, latexEscape)
self.writer(buf.getvalue().replace('<', '$<$').replace('>', '$>$'))
def visitNode_head(self, node):
authorNodes = domhelpers.findElementsWithAttribute(node, 'rel', 'author')
authorNodes = [n for n in authorNodes if n.tagName == 'link']
if authorNodes:
self.writer('\\author{')
authors = []
for aNode in authorNodes:
name = aNode.getAttribute('title')
href = aNode.getAttribute('href')
if href.startswith('mailto:'):
href = href[7:]
if href:
if name:
name += ' '
name += '$<$' + href + '$>$'
if name:
authors.append(name)
self.writer(' \\and '.join(authors))
self.writer('}')
self.visitNodeDefault(node)
def visitNode_pre(self, node):
self.writer('\\begin{verbatim}\n')
buf = StringIO()
getLatexText(node, buf.write)
self.writer(text.removeLeadingTrailingBlanks(buf.getvalue()))
self.writer('\\end{verbatim}\n')
def visitNode_code(self, node):
fout = StringIO()
getLatexText(node, fout.write, latexEscape)
data = lowerUpperRE.sub(r'\1\\linebreak[1]\2', fout.getvalue())
data = data[:1] + data[1:].replace('.', '.\\linebreak[1]')
self.writer('\\texttt{'+data+'}')
def visitNode_img(self, node):
fileName = os.path.join(self.currDir, node.getAttribute('src'))
target, ext = os.path.splitext(fileName)
if self.diaHack and os.access(target + '.dia', os.R_OK):
ext = '.dia'
fileName = target + ext
f = getattr(self, 'convert_'+ext[1:], None)
if not f:
return
target = os.path.join(self.currDir, os.path.basename(target)+'.eps')
f(fileName, target)
target = os.path.basename(target)
self._write_img(target)
def _write_img(self, target):
"""Write LaTeX for image."""
self.writer('\\begin{center}\\includegraphics[%%\n'
'width=1.0\n'
'\\textwidth,height=1.0\\textheight,\nkeepaspectratio]'
'{%s}\\end{center}\n' % target)
def convert_png(self, src, target):
# XXX there's a *reason* Python comes with the pipes module -
# someone fix this to use it.
r = os.system('pngtopnm "%s" | pnmtops -noturn > "%s"' % (src, target))
if r != 0:
raise OSError(r)
def convert_dia(self, src, target):
# EVIL DISGUSTING HACK
data = os.popen("gunzip -dc %s" % (src)).read()
pre = '<dia:attribute name="scaling">\n <dia:real val="1"/>'
post = '<dia:attribute name="scaling">\n <dia:real val="0.5"/>'
f = open('%s_hacked.dia' % (src), 'wb')
f.write(data.replace(pre, post))
f.close()
os.system('gzip %s_hacked.dia' % (src,))
os.system('mv %s_hacked.dia.gz %s_hacked.dia' % (src,src))
# Let's pretend we never saw that.
# Silly dia needs an X server, even though it doesn't display anything.
# If this is a problem for you, try using Xvfb.
os.system("dia %s_hacked.dia -n -e %s" % (src, target))
def visitNodeHeader(self, node):
level = (int(node.tagName[1])-2)+self.baseLevel
self.writer('\n\n\\'+level*'sub'+'section{')
spitter = HeadingLatexSpitter(self.writer, self.currDir, self.filename)
spitter.visitNodeDefault(node)
self.writer('}\n')
def visitNode_a_listing(self, node):
fileName = os.path.join(self.currDir, node.getAttribute('href'))
self.writer('\\begin{verbatim}\n')
lines = map(str.rstrip, open(fileName).readlines())
skipLines = int(node.getAttribute('skipLines') or 0)
lines = lines[skipLines:]
self.writer(text.removeLeadingTrailingBlanks('\n'.join(lines)))
self.writer('\\end{verbatim}')
# Write a caption for this source listing
fileName = os.path.basename(fileName)
caption = domhelpers.getNodeText(node)
if caption == fileName:
caption = 'Source listing'
self.writer('\parbox[b]{\linewidth}{\\begin{center}%s --- '
'\\begin{em}%s\\end{em}\\end{center}}'
% (latexEscape(caption), latexEscape(fileName)))
def visitNode_a_href(self, node):
supported_schemes=['http', 'https', 'ftp', 'mailto']
href = node.getAttribute('href')
if urlparse.urlparse(href)[0] in supported_schemes:
text = domhelpers.getNodeText(node)
self.visitNodeDefault(node)
if text != href:
self.writer('\\footnote{%s}' % latexEscape(href))
else:
path, fragid = (href.split('#', 1) + [None])[:2]
if path == '':
path = self.filename
else:
path = os.path.join(os.path.dirname(self.filename), path)
#if path == '':
#path = os.path.basename(self.filename)
#else:
# # Hack for linking to man pages from howtos, i.e.
# # ../doc/foo-man.html -> foo-man.html
# path = os.path.basename(path)
path = realpath(path)
if fragid:
ref = path + 'HASH' + fragid
else:
ref = path
self.writer('\\textit{')
self.visitNodeDefault(node)
self.writer('}')
self.writer('\\loreref{%s}' % ref)
def visitNode_a_name(self, node):
self.writer('\\label{%sHASH%s}' % (
realpath(self.filename), node.getAttribute('name')))
self.visitNodeDefault(node)
def visitNode_table(self, node):
rows = [[col for col in row.childNodes
if getattr(col, 'tagName', None) in ('th', 'td')]
for row in node.childNodes if getattr(row, 'tagName', None)=='tr']
numCols = 1+max([len(row) for row in rows])
self.writer('\\begin{table}[ht]\\begin{center}')
self.writer('\\begin{tabular}{@{}'+'l'*numCols+'@{}}')
for row in rows:
th = 0
for col in row:
self.visitNode(col)
self.writer('&')
if col.tagName == 'th':
th = 1
self.writer('\\\\\n') #\\ ends lines
if th:
self.writer('\\hline\n')
self.writer('\\end{tabular}\n')
if node.hasAttribute('title'):
self.writer('\\caption{%s}'
% latexEscape(node.getAttribute('title')))
self.writer('\\end{center}\\end{table}\n')
def visitNode_span_footnote(self, node):
self.writer('\\footnote{')
spitter = FootnoteLatexSpitter(self.writer, self.currDir, self.filename)
spitter.visitNodeDefault(node)
self.writer('}')
def visitNode_span_index(self, node):
self.writer('\\index{%s}\n' % node.getAttribute('value'))
self.visitNodeDefault(node)
visitNode_h2 = visitNode_h3 = visitNode_h4 = visitNodeHeader
start_title = '\\title{'
end_title = '}\n'
start_sub = '$_{'
end_sub = '}$'
start_sup = '$^{'
end_sup = '}$'
start_html = '''\\documentclass{article}
\\newcommand{\\loreref}[1]{%
\\ifthenelse{\\value{page}=\\pageref{#1}}%
{ (this page)}%
{ (page \\pageref{#1})}%
}'''
start_body = '\\begin{document}\n\\maketitle\n'
end_body = '\\end{document}'
start_dl = '\\begin{description}\n'
end_dl = '\\end{description}\n'
start_ul = '\\begin{itemize}\n'
end_ul = '\\end{itemize}\n'
start_ol = '\\begin{enumerate}\n'
end_ol = '\\end{enumerate}\n'
start_li = '\\item '
end_li = '\n'
start_dt = '\\item['
end_dt = ']'
end_dd = '\n'
start_p = '\n\n'
start_strong = start_em = '\\begin{em}'
end_strong = end_em = '\\end{em}'
start_q = "``"
end_q = "''"
start_div_note = '\\begin{quotation}\\textbf{Note:}'
end_div_note = '\\end{quotation}'
start_th = '\\textbf{'
end_th = '}'
class SectionLatexSpitter(LatexSpitter):
baseLevel = 1
start_title = '\\section{'
def visitNode_title(self, node):
self.visitNodeDefault(node)
#self.writer('\\label{%s}}\n' % os.path.basename(self.filename))
self.writer('\\label{%s}}\n' % realpath(self.filename))
end_title = end_body = start_body = start_html = ''
class ChapterLatexSpitter(SectionLatexSpitter):
baseLevel = 0
start_title = '\\chapter{'
class HeadingLatexSpitter(BaseLatexSpitter):
start_q = "``"
end_q = "''"
writeNodeData = LatexSpitter.writeNodeData.im_func
class FootnoteLatexSpitter(LatexSpitter):
"""For multi-paragraph footnotes, this avoids having an empty leading
paragraph."""
start_p = ''
def visitNode_span_footnote(self, node):
self.visitNodeDefault(node)
def visitNode_p(self, node):
self.visitNodeDefault(node)
self.start_p = LatexSpitter.start_p
class BookLatexSpitter(LatexSpitter):
def visitNode_body(self, node):
tocs=domhelpers.locateNodes([node], 'class', 'toc')
domhelpers.clearNode(node)
if len(tocs):
toc=tocs[0]
node.appendChild(toc)
self.visitNodeDefault(node)
def visitNode_link(self, node):
if not node.hasAttribute('rel'):
return self.visitNodeDefault(node)
node.tagName += '_'+node.getAttribute('rel')
self.visitNode(node)
def visitNode_link_author(self, node):
self.writer('\\author{%s}\n' % node.getAttribute('text'))
def visitNode_link_stylesheet(self, node):
if node.hasAttribute('type') and node.hasAttribute('href'):
if node.getAttribute('type')=='application/x-latex':
packagename=node.getAttribute('href')
packagebase,ext=os.path.splitext(packagename)
self.writer('\\usepackage{%s}\n' % packagebase)
start_html = r'''\documentclass[oneside]{book}
\usepackage{graphicx}
\usepackage{times,mathptmx}
'''
start_body = r'''\begin{document}
\maketitle
\tableofcontents
'''
start_li=''
end_li=''
start_ul=''
end_ul=''
def visitNode_a(self, node):
if node.hasAttribute('class'):
a_class=node.getAttribute('class')
if a_class.endswith('listing'):
return self.visitNode_a_listing(node)
else:
return getattr(self, 'visitNode_a_%s' % a_class)(node)
if node.hasAttribute('href'):
return self.visitNode_a_href(node)
if node.hasAttribute('name'):
return self.visitNode_a_name(node)
self.visitNodeDefault(node)
def visitNode_a_chapter(self, node):
self.writer('\\chapter{')
self.visitNodeDefault(node)
self.writer('}\n')
def visitNode_a_sect(self, node):
base,ext=os.path.splitext(node.getAttribute('href'))
self.writer('\\input{%s}\n' % base)
def processFile(spitter, fin):
# XXX Use Inversion Of Control Pattern to orthogonalize the parsing API
# from the Visitor Pattern application. (EnterPrise)
dom = tree.parseFileAndReport(fin.name, lambda x: fin).documentElement
spitter.visitNode(dom)
def convertFile(filename, spitterClass):
fout = open(os.path.splitext(filename)[0]+".tex", 'w')
spitter = spitterClass(fout.write, os.path.dirname(filename), filename)
fin = open(filename)
processFile(spitter, fin)
fin.close()
fout.close()
|
zhuwenping/python-for-android | refs/heads/master | python3-alpha/python3-src/PC/VS8.0/build_ssl.py | 48 | # Script for building the _ssl and _hashlib modules for Windows.
# Uses Perl to setup the OpenSSL environment correctly
# and build OpenSSL, then invokes a simple nmake session
# for the actual _ssl.pyd and _hashlib.pyd DLLs.
# THEORETICALLY, you can:
# * Unpack the latest SSL release one level above your main Python source
# directory. It is likely you will already find the zlib library and
# any other external packages there.
# * Install ActivePerl and ensure it is somewhere on your path.
# * Run this script from the PC/VS8.0 directory.
#
# it should configure and build SSL, then build the _ssl and _hashlib
# Python extensions without intervention.
# Modified by Christian Heimes
# Now this script supports pre-generated makefiles and assembly files.
# Developers don't need an installation of Perl anymore to build Python. A svn
# checkout from our svn repository is enough.
#
# In Order to create the files in the case of an update you still need Perl.
# Run build_ssl in this order:
# python.exe build_ssl.py Release x64
# python.exe build_ssl.py Release Win32
import os, sys, re, shutil
# Find all "foo.exe" files on the PATH.
def find_all_on_path(filename, extras = None):
entries = os.environ["PATH"].split(os.pathsep)
ret = []
for p in entries:
fname = os.path.abspath(os.path.join(p, filename))
if os.path.isfile(fname) and fname not in ret:
ret.append(fname)
if extras:
for p in extras:
fname = os.path.abspath(os.path.join(p, filename))
if os.path.isfile(fname) and fname not in ret:
ret.append(fname)
return ret
# Find a suitable Perl installation for OpenSSL.
# cygwin perl does *not* work. ActivePerl does.
# Being a Perl dummy, the simplest way I can check is if the "Win32" package
# is available.
def find_working_perl(perls):
for perl in perls:
fh = os.popen('"%s" -e "use Win32;"' % perl)
fh.read()
rc = fh.close()
if rc:
continue
return perl
print("Can not find a suitable PERL:")
if perls:
print(" the following perl interpreters were found:")
for p in perls:
print(" ", p)
print(" None of these versions appear suitable for building OpenSSL")
else:
print(" NO perl interpreters were found on this machine at all!")
print(" Please install ActivePerl and ensure it appears on your path")
return None
# Locate the best SSL directory given a few roots to look into.
def find_best_ssl_dir(sources):
candidates = []
for s in sources:
try:
# note: do not abspath s; the build will fail if any
# higher up directory name has spaces in it.
fnames = os.listdir(s)
except os.error:
fnames = []
for fname in fnames:
fqn = os.path.join(s, fname)
if os.path.isdir(fqn) and fname.startswith("openssl-"):
candidates.append(fqn)
# Now we have all the candidates, locate the best.
best_parts = []
best_name = None
for c in candidates:
parts = re.split("[.-]", os.path.basename(c))[1:]
# eg - openssl-0.9.7-beta1 - ignore all "beta" or any other qualifiers
if len(parts) >= 4:
continue
if parts > best_parts:
best_parts = parts
best_name = c
if best_name is not None:
print("Found an SSL directory at '%s'" % (best_name,))
else:
print("Could not find an SSL directory in '%s'" % (sources,))
sys.stdout.flush()
return best_name
def create_makefile64(makefile, m32):
"""Create and fix makefile for 64bit
Replace 32 with 64bit directories
"""
if not os.path.isfile(m32):
return
with open(m32) as fin:
with open(makefile, 'w') as fout:
for line in fin:
line = line.replace("=tmp32", "=tmp64")
line = line.replace("=out32", "=out64")
line = line.replace("=inc32", "=inc64")
# force 64 bit machine
line = line.replace("MKLIB=lib", "MKLIB=lib /MACHINE:X64")
line = line.replace("LFLAGS=", "LFLAGS=/MACHINE:X64 ")
# don't link against the lib on 64bit systems
line = line.replace("bufferoverflowu.lib", "")
fout.write(line)
os.unlink(m32)
def fix_makefile(makefile):
"""Fix some stuff in all makefiles
"""
if not os.path.isfile(makefile):
return
with open(makefile) as fin:
lines = fin.readlines()
with open(makefile, 'w') as fout:
for line in lines:
if line.startswith("PERL="):
continue
if line.startswith("CP="):
line = "CP=copy\n"
if line.startswith("MKDIR="):
line = "MKDIR=mkdir\n"
if line.startswith("CFLAG="):
line = line.strip()
for algo in ("RC5", "MDC2", "IDEA"):
noalgo = " -DOPENSSL_NO_%s" % algo
if noalgo not in line:
line = line + noalgo
line = line + '\n'
fout.write(line)
def run_configure(configure, do_script):
print("perl Configure "+configure+" no-idea no-mdc2")
os.system("perl Configure "+configure+" no-idea no-mdc2")
print(do_script)
os.system(do_script)
def cmp(f1, f2):
bufsize = 1024 * 8
with open(f1, 'rb') as fp1, open(f2, 'rb') as fp2:
while True:
b1 = fp1.read(bufsize)
b2 = fp2.read(bufsize)
if b1 != b2:
return False
if not b1:
return True
def copy(src, dst):
if os.path.isfile(dst) and cmp(src, dst):
return
shutil.copy(src, dst)
def main():
build_all = "-a" in sys.argv
if sys.argv[1] == "Release":
debug = False
elif sys.argv[1] == "Debug":
debug = True
else:
raise ValueError(str(sys.argv))
if sys.argv[2] == "Win32":
arch = "x86"
configure = "VC-WIN32"
do_script = "ms\\do_nasm"
makefile="ms\\nt.mak"
m32 = makefile
dirsuffix = "32"
elif sys.argv[2] == "x64":
arch="amd64"
configure = "VC-WIN64A"
do_script = "ms\\do_win64a"
makefile = "ms\\nt64.mak"
m32 = makefile.replace('64', '')
dirsuffix = "64"
#os.environ["VSEXTCOMP_USECL"] = "MS_OPTERON"
else:
raise ValueError(str(sys.argv))
make_flags = ""
if build_all:
make_flags = "-a"
# perl should be on the path, but we also look in "\perl" and "c:\\perl"
# as "well known" locations
perls = find_all_on_path("perl.exe", ["\\perl\\bin", "C:\\perl\\bin"])
perl = find_working_perl(perls)
if perl:
print("Found a working perl at '%s'" % (perl,))
else:
print("No Perl installation was found. Existing Makefiles are used.")
sys.stdout.flush()
# Look for SSL 3 levels up from PC/VS8.0 - ie, same place zlib etc all live.
ssl_dir = find_best_ssl_dir(("..\\..\\..",))
if ssl_dir is None:
sys.exit(1)
old_cd = os.getcwd()
try:
os.chdir(ssl_dir)
# rebuild makefile when we do the role over from 32 to 64 build
if arch == "amd64" and os.path.isfile(m32) and not os.path.isfile(makefile):
os.unlink(m32)
# If the ssl makefiles do not exist, we invoke Perl to generate them.
# Due to a bug in this script, the makefile sometimes ended up empty
# Force a regeneration if it is.
if not os.path.isfile(makefile) or os.path.getsize(makefile)==0:
if perl is None:
print("Perl is required to build the makefiles!")
sys.exit(1)
print("Creating the makefiles...")
sys.stdout.flush()
# Put our working Perl at the front of our path
os.environ["PATH"] = os.path.dirname(perl) + \
os.pathsep + \
os.environ["PATH"]
run_configure(configure, do_script)
if debug:
print("OpenSSL debug builds aren't supported.")
#if arch=="x86" and debug:
# # the do_masm script in openssl doesn't generate a debug
# # build makefile so we generate it here:
# os.system("perl util\mk1mf.pl debug "+configure+" >"+makefile)
if arch == "amd64":
create_makefile64(makefile, m32)
fix_makefile(makefile)
copy(r"crypto\buildinf.h", r"crypto\buildinf_%s.h" % arch)
copy(r"crypto\opensslconf.h", r"crypto\opensslconf_%s.h" % arch)
# If the assembler files don't exist in tmpXX, copy them there
if perl is None and os.path.exists("asm"+dirsuffix):
if not os.path.exists("tmp"+dirsuffix):
os.mkdir("tmp"+dirsuffix)
for f in os.listdir("asm"+dirsuffix):
if not f.endswith(".asm"): continue
if os.path.isfile(r"tmp%s\%s" % (dirsuffix, f)): continue
shutil.copy(r"asm%s\%s" % (dirsuffix, f), "tmp"+dirsuffix)
# Now run make.
if arch == "amd64":
rc = os.system("ml64 -c -Foms\\uptable.obj ms\\uptable.asm")
if rc:
print("ml64 assembler has failed.")
sys.exit(rc)
copy(r"crypto\buildinf_%s.h" % arch, r"crypto\buildinf.h")
copy(r"crypto\opensslconf_%s.h" % arch, r"crypto\opensslconf.h")
#makeCommand = "nmake /nologo PERL=\"%s\" -f \"%s\"" %(perl, makefile)
makeCommand = "nmake /nologo -f \"%s\"" % makefile
print("Executing ssl makefiles:", makeCommand)
sys.stdout.flush()
rc = os.system(makeCommand)
if rc:
print("Executing "+makefile+" failed")
print(rc)
sys.exit(rc)
finally:
os.chdir(old_cd)
sys.exit(rc)
if __name__=='__main__':
main()
|
ismailbaskin/FileUploaderBundle | refs/heads/master | Resources/public/lib/jquery-file-upload/server/gae-python/main.py | 245 | # -*- coding: utf-8 -*-
#
# jQuery File Upload Plugin GAE Python Example 2.2.0
# https://github.com/blueimp/jQuery-File-Upload
#
# Copyright 2011, Sebastian Tschan
# https://blueimp.net
#
# Licensed under the MIT license:
# http://www.opensource.org/licenses/MIT
#
from __future__ import with_statement
from google.appengine.api import files, images
from google.appengine.ext import blobstore, deferred
from google.appengine.ext.webapp import blobstore_handlers
import json
import re
import urllib
import webapp2
WEBSITE = 'https://blueimp.github.io/jQuery-File-Upload/'
MIN_FILE_SIZE = 1 # bytes
MAX_FILE_SIZE = 5000000 # bytes
IMAGE_TYPES = re.compile('image/(gif|p?jpeg|(x-)?png)')
ACCEPT_FILE_TYPES = IMAGE_TYPES
THUMBNAIL_MODIFICATOR = '=s80' # max width / height
EXPIRATION_TIME = 300 # seconds
def cleanup(blob_keys):
blobstore.delete(blob_keys)
class UploadHandler(webapp2.RequestHandler):
def initialize(self, request, response):
super(UploadHandler, self).initialize(request, response)
self.response.headers['Access-Control-Allow-Origin'] = '*'
self.response.headers[
'Access-Control-Allow-Methods'
] = 'OPTIONS, HEAD, GET, POST, PUT, DELETE'
self.response.headers[
'Access-Control-Allow-Headers'
] = 'Content-Type, Content-Range, Content-Disposition'
def validate(self, file):
if file['size'] < MIN_FILE_SIZE:
file['error'] = 'File is too small'
elif file['size'] > MAX_FILE_SIZE:
file['error'] = 'File is too big'
elif not ACCEPT_FILE_TYPES.match(file['type']):
file['error'] = 'Filetype not allowed'
else:
return True
return False
def get_file_size(self, file):
file.seek(0, 2) # Seek to the end of the file
size = file.tell() # Get the position of EOF
file.seek(0) # Reset the file position to the beginning
return size
def write_blob(self, data, info):
blob = files.blobstore.create(
mime_type=info['type'],
_blobinfo_uploaded_filename=info['name']
)
with files.open(blob, 'a') as f:
f.write(data)
files.finalize(blob)
return files.blobstore.get_blob_key(blob)
def handle_upload(self):
results = []
blob_keys = []
for name, fieldStorage in self.request.POST.items():
if type(fieldStorage) is unicode:
continue
result = {}
result['name'] = re.sub(
r'^.*\\',
'',
fieldStorage.filename
)
result['type'] = fieldStorage.type
result['size'] = self.get_file_size(fieldStorage.file)
if self.validate(result):
blob_key = str(
self.write_blob(fieldStorage.value, result)
)
blob_keys.append(blob_key)
result['deleteType'] = 'DELETE'
result['deleteUrl'] = self.request.host_url +\
'/?key=' + urllib.quote(blob_key, '')
if (IMAGE_TYPES.match(result['type'])):
try:
result['url'] = images.get_serving_url(
blob_key,
secure_url=self.request.host_url.startswith(
'https'
)
)
result['thumbnailUrl'] = result['url'] +\
THUMBNAIL_MODIFICATOR
except: # Could not get an image serving url
pass
if not 'url' in result:
result['url'] = self.request.host_url +\
'/' + blob_key + '/' + urllib.quote(
result['name'].encode('utf-8'), '')
results.append(result)
deferred.defer(
cleanup,
blob_keys,
_countdown=EXPIRATION_TIME
)
return results
def options(self):
pass
def head(self):
pass
def get(self):
self.redirect(WEBSITE)
def post(self):
if (self.request.get('_method') == 'DELETE'):
return self.delete()
result = {'files': self.handle_upload()}
s = json.dumps(result, separators=(',', ':'))
redirect = self.request.get('redirect')
if redirect:
return self.redirect(str(
redirect.replace('%s', urllib.quote(s, ''), 1)
))
if 'application/json' in self.request.headers.get('Accept'):
self.response.headers['Content-Type'] = 'application/json'
self.response.write(s)
def delete(self):
key = self.request.get('key') or ''
blobstore.delete(key)
s = json.dumps({key: True}, separators=(',', ':'))
if 'application/json' in self.request.headers.get('Accept'):
self.response.headers['Content-Type'] = 'application/json'
self.response.write(s)
class DownloadHandler(blobstore_handlers.BlobstoreDownloadHandler):
def get(self, key, filename):
if not blobstore.get(key):
self.error(404)
else:
# Prevent browsers from MIME-sniffing the content-type:
self.response.headers['X-Content-Type-Options'] = 'nosniff'
# Cache for the expiration time:
self.response.headers['Cache-Control'] = 'public,max-age=%d' % EXPIRATION_TIME
# Send the file forcing a download dialog:
self.send_blob(key, save_as=filename, content_type='application/octet-stream')
app = webapp2.WSGIApplication(
[
('/', UploadHandler),
('/([^/]+)/([^/]+)', DownloadHandler)
],
debug=True
)
|
kevchentw/nctu_hackathon | refs/heads/master | backend/web/product.py | 2 | from req import WebRequestHandler
from req import Service
import tornado
class WebProductHandler(WebRequestHandler):
@tornado.gen.coroutine
def get(self, action=None, product_id=None):
print(action)
if action == None:
err, data = yield from Service.Product.get_product({'id': self.id})
if err:
self.wrire_error(500)
else: self.render('product/get_product.html', data=data)
elif action == 'add':
err, data = yield from Service.Product.get_product({'id': self.id})
self.render('product/add_product.html')
elif action == 'show':
err, data = yield from Service.Product.get_product_by_id({'id': product_id})
if err: self.write_error(500, err)
else: self.render('product/show_product.html', data=data)
elif action == 'fast':
self.render('product/fast_product.html')
|
chachan/nodeshot | refs/heads/master | nodeshot/networking/links/exceptions.py | 7 | class LinkException(Exception):
pass
class LinkDataNotFound(LinkException):
"""
Ip addresses or mac addresses not present in the database
"""
pass
class LinkNotFound(LinkException):
"""
Ip addresses or mac addresses are present but Link does not exist
"""
def __init__(self, *args, **kwargs):
self.interface_a = kwargs.pop('interface_a')
self.interface_b = kwargs.pop('interface_b')
self.topology = kwargs.pop('topology')
|
orgito/ansible | refs/heads/devel | lib/ansible/modules/network/routeros/routeros_facts.py | 33 | #!/usr/bin/python
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: routeros_facts
version_added: "2.8"
author: "Egor Zaitsev (@heuels)"
short_description: Collect facts from remote devices running MikroTik RouterOS
description:
- Collects a base set of device facts from a remote device that
is running RotuerOS. This module prepends all of the
base network fact keys with C(ansible_net_<fact>). The facts
module will always collect a base set of facts from the device
and can enable or disable collection of additional facts.
options:
gather_subset:
description:
- When supplied, this argument will restrict the facts collected
to a given subset. Possible values for this argument include
C(all), C(hardware), C(config), and C(interfaces). Can specify a list of
values to include a larger subset. Values can also be used
with an initial C(!) to specify that a specific subset should
not be collected.
required: false
default: '!config'
"""
EXAMPLES = """
# Collect all facts from the device
- routeros_facts:
gather_subset: all
# Collect only the config and default facts
- routeros_facts:
gather_subset:
- config
# Do not collect hardware facts
- routeros_facts:
gather_subset:
- "!hardware"
"""
RETURN = """
ansible_net_gather_subset:
description: The list of fact subsets collected from the device
returned: always
type: list
# default
ansible_net_model:
description: The model name returned from the device
returned: always
type: str
ansible_net_serialnum:
description: The serial number of the remote device
returned: always
type: str
ansible_net_version:
description: The operating system version running on the remote device
returned: always
type: str
ansible_net_hostname:
description: The configured hostname of the device
returned: always
type: str
# hardware
ansible_net_spacefree_mb:
description: The available disk space on the remote device in MiB
returned: when hardware is configured
type: dict
ansible_net_spacetotal_mb:
description: The total disk space on the remote device in MiB
returned: when hardware is configured
type: dict
ansible_net_memfree_mb:
description: The available free memory on the remote device in MiB
returned: when hardware is configured
type: int
ansible_net_memtotal_mb:
description: The total memory on the remote device in MiB
returned: when hardware is configured
type: int
# config
ansible_net_config:
description: The current active config from the device
returned: when config is configured
type: str
# interfaces
ansible_net_all_ipv4_addresses:
description: All IPv4 addresses configured on the device
returned: when interfaces is configured
type: list
ansible_net_all_ipv6_addresses:
description: All IPv6 addresses configured on the device
returned: when interfaces is configured
type: list
ansible_net_interfaces:
description: A hash of all interfaces running on the system
returned: when interfaces is configured
type: dict
ansible_net_neighbors:
description: The list of neighbors from the remote device
returned: when interfaces is configured
type: dict
"""
import re
from ansible.module_utils.network.routeros.routeros import run_commands
from ansible.module_utils.network.routeros.routeros import routeros_argument_spec
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import iteritems
class FactsBase(object):
COMMANDS = list()
def __init__(self, module):
self.module = module
self.facts = dict()
self.responses = None
def populate(self):
self.responses = run_commands(self.module, commands=self.COMMANDS, check_rc=False)
def run(self, cmd):
return run_commands(self.module, commands=cmd, check_rc=False)
class Default(FactsBase):
COMMANDS = [
'/system identity print without-paging',
'/system resource print without-paging',
'/system routerboard print without-paging'
]
def populate(self):
super(Default, self).populate()
data = self.responses[0]
if data:
self.facts['hostname'] = self.parse_hostname(data)
data = self.responses[1]
if data:
self.facts['version'] = self.parse_version(data)
data = self.responses[2]
if data:
self.facts['model'] = self.parse_model(data)
self.facts['serialnum'] = self.parse_serialnum(data)
def parse_hostname(self, data):
match = re.search(r'name:\s(.*)\s*$', data, re.M)
if match:
return match.group(1)
def parse_version(self, data):
match = re.search(r'version:\s(.*)\s*$', data, re.M)
if match:
return match.group(1)
def parse_model(self, data):
match = re.search(r'model:\s(.*)\s*$', data, re.M)
if match:
return match.group(1)
def parse_serialnum(self, data):
match = re.search(r'serial-number:\s(.*)\s*$', data, re.M)
if match:
return match.group(1)
class Hardware(FactsBase):
COMMANDS = [
'/system resource print without-paging'
]
def populate(self):
super(Hardware, self).populate()
data = self.responses[0]
if data:
self.parse_filesystem_info(data)
self.parse_memory_info(data)
def parse_filesystem_info(self, data):
match = re.search(r'free-hdd-space:\s(.*)([KMG]iB)', data, re.M)
if match:
self.facts['spacefree_mb'] = self.to_megabytes(match)
match = re.search(r'total-hdd-space:\s(.*)([KMG]iB)', data, re.M)
if match:
self.facts['spacetotal_mb'] = self.to_megabytes(match)
def parse_memory_info(self, data):
match = re.search(r'free-memory:\s(\d+\.?\d*)([KMG]iB)', data, re.M)
if match:
self.facts['memfree_mb'] = self.to_megabytes(match)
match = re.search(r'total-memory:\s(\d+\.?\d*)([KMG]iB)', data, re.M)
if match:
self.facts['memtotal_mb'] = self.to_megabytes(match)
def to_megabytes(self, data):
if data.group(2) == 'KiB':
return float(data.group(1)) / 1024
elif data.group(2) == 'MiB':
return float(data.group(1))
elif data.group(2) == 'GiB':
return float(data.group(1)) * 1024
else:
return None
class Config(FactsBase):
COMMANDS = ['/export']
def populate(self):
super(Config, self).populate()
data = self.responses[0]
if data:
self.facts['config'] = data
class Interfaces(FactsBase):
COMMANDS = [
'/interface print detail without-paging',
'/ip address print detail without-paging',
'/ipv6 address print detail without-paging',
'/ip neighbor print detail without-paging'
]
DETAIL_RE = re.compile(r'([\w\d\-]+)=\"?(\w{3}/\d{2}/\d{4}\s\d{2}:\d{2}:\d{2}|[\w\d\-\.:/]+)')
def populate(self):
super(Interfaces, self).populate()
self.facts['interfaces'] = dict()
self.facts['all_ipv4_addresses'] = list()
self.facts['all_ipv6_addresses'] = list()
self.facts['neighbors'] = dict()
data = self.responses[0]
if data:
interfaces = self.parse_interfaces(data)
self.populate_interfaces(interfaces)
data = self.responses[1]
if data:
data = self.parse_addresses(data)
self.populate_ipv4_interfaces(data)
data = self.responses[2]
if data:
data = self.parse_addresses(data)
self.populate_ipv6_interfaces(data)
data = self.responses[3]
if data:
self.facts['neighbors'] = self.parse_neighbors(data)
def populate_interfaces(self, data):
for key, value in iteritems(data):
self.facts['interfaces'][key] = value
def populate_ipv4_interfaces(self, data):
for key, value in iteritems(data):
if 'ipv4' not in self.facts['interfaces'][key]:
self.facts['interfaces'][key]['ipv4'] = list()
addr, subnet = value['address'].split("/")
ipv4 = dict(address=addr.strip(), subnet=subnet.strip())
self.add_ip_address(addr.strip(), 'ipv4')
self.facts['interfaces'][key]['ipv4'].append(ipv4)
def populate_ipv6_interfaces(self, data):
for key, value in iteritems(data):
if 'ipv6' not in self.facts['interfaces'][key]:
self.facts['interfaces'][key]['ipv6'] = list()
addr, subnet = value['address'].split("/")
ipv6 = dict(address=addr.strip(), subnet=subnet.strip())
self.add_ip_address(addr.strip(), 'ipv6')
self.facts['interfaces'][key]['ipv6'].append(ipv6)
def add_ip_address(self, address, family):
if family == 'ipv4':
self.facts['all_ipv4_addresses'].append(address)
else:
self.facts['all_ipv6_addresses'].append(address)
def preprocess(self, data):
preprocessed = list()
for line in data.split('\n'):
if len(line) == 0 or line[:5] == 'Flags':
continue
elif re.match(r'\s\d', line[:2]):
preprocessed.append(line[2:])
else:
preprocessed[-1] += line
return preprocessed
def parse_interfaces(self, data):
facts = dict()
data = self.preprocess(data)
for line in data:
name = self.parse_name(line)
facts[name] = dict()
for (key, value) in re.findall(self.DETAIL_RE, line):
facts[name][key] = value
return facts
def parse_addresses(self, data):
facts = dict()
data = self.preprocess(data)
for line in data:
name = self.parse_interface(line)
facts[name] = dict()
for (key, value) in re.findall(self.DETAIL_RE, line):
facts[name][key] = value
return facts
def parse_neighbors(self, data):
facts = dict()
data = self.preprocess(data)
for line in data:
name = self.parse_interface(line)
facts[name] = dict()
for (key, value) in re.findall(self.DETAIL_RE, line):
facts[name][key] = value
return facts
def parse_name(self, data):
match = re.search(r'name=\"([\w\d\-]+)\"', data, re.M)
if match:
return match.group(1)
def parse_interface(self, data):
match = re.search(r'interface=([\w\d\-]+)', data, re.M)
if match:
return match.group(1)
FACT_SUBSETS = dict(
default=Default,
hardware=Hardware,
interfaces=Interfaces,
config=Config,
)
VALID_SUBSETS = frozenset(FACT_SUBSETS.keys())
warnings = list()
def main():
"""main entry point for module execution
"""
argument_spec = dict(
gather_subset=dict(default=['!config'], type='list')
)
argument_spec.update(routeros_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
gather_subset = module.params['gather_subset']
runable_subsets = set()
exclude_subsets = set()
for subset in gather_subset:
if subset == 'all':
runable_subsets.update(VALID_SUBSETS)
continue
if subset.startswith('!'):
subset = subset[1:]
if subset == 'all':
exclude_subsets.update(VALID_SUBSETS)
continue
exclude = True
else:
exclude = False
if subset not in VALID_SUBSETS:
module.fail_json(msg='Bad subset: %s' % subset)
if exclude:
exclude_subsets.add(subset)
else:
runable_subsets.add(subset)
if not runable_subsets:
runable_subsets.update(VALID_SUBSETS)
runable_subsets.difference_update(exclude_subsets)
runable_subsets.add('default')
facts = dict()
facts['gather_subset'] = list(runable_subsets)
instances = list()
for key in runable_subsets:
instances.append(FACT_SUBSETS[key](module))
for inst in instances:
inst.populate()
facts.update(inst.facts)
ansible_facts = dict()
for key, value in iteritems(facts):
key = 'ansible_net_%s' % key
ansible_facts[key] = value
module.exit_json(ansible_facts=ansible_facts, warnings=warnings)
if __name__ == '__main__':
main()
|
dkarakats/edx-platform | refs/heads/master | lms/djangoapps/course_wiki/plugins/markdownedx/wiki_plugin.py | 158 | # -*- coding: utf-8 -*-
from wiki.core.plugins.base import BasePlugin
from wiki.core.plugins import registry as plugin_registry
from course_wiki.plugins.markdownedx import mdx_circuit, mdx_mathjax, mdx_video
class ExtendMarkdownPlugin(BasePlugin):
"""
This plugin simply loads all of the markdown extensions we use in edX.
"""
markdown_extensions = [mdx_circuit.CircuitExtension(configs={}),
#mdx_image.ImageExtension() , #This one doesn't work. Tries to import simplewiki.settings
mdx_mathjax.MathJaxExtension(configs={}),
mdx_video.VideoExtension(configs={})]
plugin_registry.register(ExtendMarkdownPlugin)
|
dawnpower/nova | refs/heads/master | nova/tests/functional/v3/test_floating_ips_bulk.py | 33 | # Copyright 2014 IBM Corp.
#
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from nova import context
from nova.tests.functional.v3 import api_sample_base
CONF = cfg.CONF
CONF.import_opt('default_floating_pool', 'nova.network.floating_ips')
CONF.import_opt('public_interface', 'nova.network.linux_net')
CONF.import_opt('osapi_compute_extension',
'nova.api.openstack.compute.extensions')
class FloatingIpsBulkTest(api_sample_base.ApiSampleTestBaseV3):
ADMIN_API = True
extension_name = "os-floating-ips-bulk"
# TODO(gmann): Overriding '_api_version' till all functional tests
# are merged between v2 and v2.1. After that base class variable
# itself can be changed to 'v2'
_api_version = 'v2'
def _get_flags(self):
f = super(FloatingIpsBulkTest, self)._get_flags()
f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
f['osapi_compute_extension'].append('nova.api.openstack.compute.'
'contrib.floating_ips_bulk.Floating_ips_bulk')
return f
def setUp(self):
super(FloatingIpsBulkTest, self).setUp()
pool = CONF.default_floating_pool
interface = CONF.public_interface
self.ip_pool = [
{
'address': "10.10.10.1",
'pool': pool,
'interface': interface,
'host': None
},
{
'address': "10.10.10.2",
'pool': pool,
'interface': interface,
'host': None
},
{
'address': "10.10.10.3",
'pool': pool,
'interface': interface,
'host': "testHost"
},
]
self.compute.db.floating_ip_bulk_create(
context.get_admin_context(), self.ip_pool)
self.addCleanup(self.compute.db.floating_ip_bulk_destroy,
context.get_admin_context(), self.ip_pool)
def test_floating_ips_bulk_list(self):
response = self._do_get('os-floating-ips-bulk')
subs = self._get_regexes()
self._verify_response('floating-ips-bulk-list-resp',
subs, response, 200)
def test_floating_ips_bulk_list_by_host(self):
response = self._do_get('os-floating-ips-bulk/testHost')
subs = self._get_regexes()
self._verify_response('floating-ips-bulk-list-by-host-resp',
subs, response, 200)
def test_floating_ips_bulk_create(self):
response = self._do_post('os-floating-ips-bulk',
'floating-ips-bulk-create-req',
{"ip_range": "192.168.1.0/24",
"pool": CONF.default_floating_pool,
"interface": CONF.public_interface})
subs = self._get_regexes()
self._verify_response('floating-ips-bulk-create-resp', subs,
response, 200)
def test_floating_ips_bulk_delete(self):
response = self._do_put('os-floating-ips-bulk/delete',
'floating-ips-bulk-delete-req',
{"ip_range": "192.168.1.0/24"})
subs = self._get_regexes()
self._verify_response('floating-ips-bulk-delete-resp', subs,
response, 200)
|
hip-odoo/odoo | refs/heads/10.0 | addons/mail/models/mail_mail.py | 9 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import base64
import datetime
import logging
import psycopg2
import threading
from email.utils import formataddr
from odoo import _, api, fields, models
from odoo import tools
from odoo.addons.base.ir.ir_mail_server import MailDeliveryException
from odoo.tools.safe_eval import safe_eval
_logger = logging.getLogger(__name__)
class MailMail(models.Model):
""" Model holding RFC2822 email messages to send. This model also provides
facilities to queue and send new email messages. """
_name = 'mail.mail'
_description = 'Outgoing Mails'
_inherits = {'mail.message': 'mail_message_id'}
_order = 'id desc'
_rec_name = 'subject'
# content
mail_message_id = fields.Many2one('mail.message', 'Message', required=True, ondelete='cascade', index=True, auto_join=True)
body_html = fields.Text('Rich-text Contents', help="Rich-text/HTML message")
references = fields.Text('References', help='Message references, such as identifiers of previous messages', readonly=1)
headers = fields.Text('Headers', copy=False)
# Auto-detected based on create() - if 'mail_message_id' was passed then this mail is a notification
# and during unlink() we will not cascade delete the parent and its attachments
notification = fields.Boolean('Is Notification', help='Mail has been created to notify people of an existing mail.message')
# recipients
email_to = fields.Text('To', help='Message recipients (emails)')
email_cc = fields.Char('Cc', help='Carbon copy message recipients')
recipient_ids = fields.Many2many('res.partner', string='To (Partners)')
# process
state = fields.Selection([
('outgoing', 'Outgoing'),
('sent', 'Sent'),
('received', 'Received'),
('exception', 'Delivery Failed'),
('cancel', 'Cancelled'),
], 'Status', readonly=True, copy=False, default='outgoing')
auto_delete = fields.Boolean(
'Auto Delete',
help="Permanently delete this email after sending it, to save space")
failure_reason = fields.Text(
'Failure Reason', readonly=1,
help="Failure reason. This is usually the exception thrown by the email server, stored to ease the debugging of mailing issues.")
scheduled_date = fields.Char('Scheduled Send Date',
help="If set, the queue manager will send the email after the date. If not set, the email will be send as soon as possible.")
@api.model
def create(self, values):
# notification field: if not set, set if mail comes from an existing mail.message
if 'notification' not in values and values.get('mail_message_id'):
values['notification'] = True
if not values.get('mail_message_id'):
self = self.with_context(message_create_from_mail_mail=True)
return super(MailMail, self).create(values)
@api.multi
def unlink(self):
# cascade-delete the parent message for all mails that are not created for a notification
to_cascade = self.search([('notification', '=', False), ('id', 'in', self.ids)]).mapped('mail_message_id')
res = super(MailMail, self).unlink()
to_cascade.unlink()
return res
@api.model
def default_get(self, fields):
# protection for `default_type` values leaking from menu action context (e.g. for invoices)
# To remove when automatic context propagation is removed in web client
if self._context.get('default_type') not in type(self).message_type.base_field.selection:
self = self.with_context(dict(self._context, default_type=None))
return super(MailMail, self).default_get(fields)
@api.multi
def mark_outgoing(self):
return self.write({'state': 'outgoing'})
@api.multi
def cancel(self):
return self.write({'state': 'cancel'})
@api.model
def process_email_queue(self, ids=None):
"""Send immediately queued messages, committing after each
message is sent - this is not transactional and should
not be called during another transaction!
:param list ids: optional list of emails ids to send. If passed
no search is performed, and these ids are used
instead.
:param dict context: if a 'filters' key is present in context,
this value will be used as an additional
filter to further restrict the outgoing
messages to send (by default all 'outgoing'
messages are sent).
"""
if not self.ids:
filters = ['&',
('state', '=', 'outgoing'),
'|',
('scheduled_date', '<', datetime.datetime.now()),
('scheduled_date', '=', False)]
if 'filters' in self._context:
filters.extend(self._context['filters'])
ids = self.search(filters).ids
res = None
try:
# auto-commit except in testing mode
auto_commit = not getattr(threading.currentThread(), 'testing', False)
res = self.browse(ids).send(auto_commit=auto_commit)
except Exception:
_logger.exception("Failed processing mail queue")
return res
@api.multi
def _postprocess_sent_message(self, mail_sent=True):
"""Perform any post-processing necessary after sending ``mail``
successfully, including deleting it completely along with its
attachment if the ``auto_delete`` flag of the mail was set.
Overridden by subclasses for extra post-processing behaviors.
:param browse_record mail: the mail that was just sent
:return: True
"""
notif_emails = self.filtered(lambda email: email.notification)
if notif_emails:
notifications = self.env['mail.notification'].search([
('mail_message_id', 'in', notif_emails.mapped('mail_message_id').ids),
('is_email', '=', True)])
if mail_sent:
notifications.write({
'email_status': 'sent',
})
else:
notifications.write({
'email_status': 'exception',
})
if mail_sent:
self.sudo().filtered(lambda self: self.auto_delete).unlink()
return True
# ------------------------------------------------------
# mail_mail formatting, tools and send mechanism
# ------------------------------------------------------
@api.multi
def send_get_mail_body(self, partner=None):
"""Return a specific ir_email body. The main purpose of this method
is to be inherited to add custom content depending on some module."""
self.ensure_one()
body = self.body_html or ''
return body
@api.multi
def send_get_mail_to(self, partner=None):
"""Forge the email_to with the following heuristic:
- if 'partner', recipient specific (Partner Name <email>)
- else fallback on mail.email_to splitting """
self.ensure_one()
if partner:
email_to = [formataddr((partner.name, partner.email))]
else:
email_to = tools.email_split_and_format(self.email_to)
return email_to
@api.multi
def send_get_email_dict(self, partner=None):
"""Return a dictionary for specific email values, depending on a
partner, or generic to the whole recipients given by mail.email_to.
:param browse_record mail: mail.mail browse_record
:param browse_record partner: specific recipient partner
"""
self.ensure_one()
body = self.send_get_mail_body(partner=partner)
body_alternative = tools.html2plaintext(body)
res = {
'body': body,
'body_alternative': body_alternative,
'email_to': self.send_get_mail_to(partner=partner),
}
return res
@api.multi
def send(self, auto_commit=False, raise_exception=False):
""" Sends the selected emails immediately, ignoring their current
state (mails that have already been sent should not be passed
unless they should actually be re-sent).
Emails successfully delivered are marked as 'sent', and those
that fail to be deliver are marked as 'exception', and the
corresponding error mail is output in the server logs.
:param bool auto_commit: whether to force a commit of the mail status
after sending each mail (meant only for scheduler processing);
should never be True during normal transactions (default: False)
:param bool raise_exception: whether to raise an exception if the
email sending process has failed
:return: True
"""
IrMailServer = self.env['ir.mail_server']
for mail_id in self.ids:
try:
mail = self.browse(mail_id)
# TDE note: remove me when model_id field is present on mail.message - done here to avoid doing it multiple times in the sub method
if mail.model:
model = self.env['ir.model'].sudo().search([('model', '=', mail.model)])[0]
else:
model = None
if model:
mail = mail.with_context(model_name=model.name)
# load attachment binary data with a separate read(), as prefetching all
# `datas` (binary field) could bloat the browse cache, triggerring
# soft/hard mem limits with temporary data.
attachments = [(a['datas_fname'], base64.b64decode(a['datas']))
for a in mail.attachment_ids.sudo().read(['datas_fname', 'datas'])]
# specific behavior to customize the send email for notified partners
email_list = []
if mail.email_to:
email_list.append(mail.send_get_email_dict())
for partner in mail.recipient_ids:
email_list.append(mail.send_get_email_dict(partner=partner))
# headers
headers = {}
bounce_alias = self.env['ir.config_parameter'].get_param("mail.bounce.alias")
catchall_domain = self.env['ir.config_parameter'].get_param("mail.catchall.domain")
if bounce_alias and catchall_domain:
if mail.model and mail.res_id:
headers['Return-Path'] = '%s+%d-%s-%d@%s' % (bounce_alias, mail.id, mail.model, mail.res_id, catchall_domain)
else:
headers['Return-Path'] = '%s+%d@%s' % (bounce_alias, mail.id, catchall_domain)
if mail.headers:
try:
headers.update(safe_eval(mail.headers))
except Exception:
pass
# Writing on the mail object may fail (e.g. lock on user) which
# would trigger a rollback *after* actually sending the email.
# To avoid sending twice the same email, provoke the failure earlier
mail.write({
'state': 'exception',
'failure_reason': _('Error without exception. Probably due do sending an email without computed recipients.'),
})
mail_sent = False
# build an RFC2822 email.message.Message object and send it without queuing
res = None
for email in email_list:
msg = IrMailServer.build_email(
email_from=mail.email_from,
email_to=email.get('email_to'),
subject=mail.subject,
body=email.get('body'),
body_alternative=email.get('body_alternative'),
email_cc=tools.email_split(mail.email_cc),
reply_to=mail.reply_to,
attachments=attachments,
message_id=mail.message_id,
references=mail.references,
object_id=mail.res_id and ('%s-%s' % (mail.res_id, mail.model)),
subtype='html',
subtype_alternative='plain',
headers=headers)
try:
res = IrMailServer.send_email(msg, mail_server_id=mail.mail_server_id.id)
except AssertionError as error:
if error.message == IrMailServer.NO_VALID_RECIPIENT:
# No valid recipient found for this particular
# mail item -> ignore error to avoid blocking
# delivery to next recipients, if any. If this is
# the only recipient, the mail will show as failed.
_logger.info("Ignoring invalid recipients for mail.mail %s: %s",
mail.message_id, email.get('email_to'))
else:
raise
if res:
mail.write({'state': 'sent', 'message_id': res, 'failure_reason': False})
mail_sent = True
# /!\ can't use mail.state here, as mail.refresh() will cause an error
# see revid:odo@openerp.com-20120622152536-42b2s28lvdv3odyr in 6.1
if mail_sent:
_logger.info('Mail with ID %r and Message-Id %r successfully sent', mail.id, mail.message_id)
mail._postprocess_sent_message(mail_sent=mail_sent)
except MemoryError:
# prevent catching transient MemoryErrors, bubble up to notify user or abort cron job
# instead of marking the mail as failed
_logger.exception(
'MemoryError while processing mail with ID %r and Msg-Id %r. Consider raising the --limit-memory-hard startup option',
mail.id, mail.message_id)
raise
except psycopg2.Error:
# If an error with the database occurs, chances are that the cursor is unusable.
# This will lead to an `psycopg2.InternalError` being raised when trying to write
# `state`, shadowing the original exception and forbid a retry on concurrent
# update. Let's bubble it.
raise
except Exception as e:
failure_reason = tools.ustr(e)
_logger.exception('failed sending mail (id: %s) due to %s', mail.id, failure_reason)
mail.write({'state': 'exception', 'failure_reason': failure_reason})
mail._postprocess_sent_message(mail_sent=False)
if raise_exception:
if isinstance(e, AssertionError):
# get the args of the original error, wrap into a value and throw a MailDeliveryException
# that is an except_orm, with name and value as arguments
value = '. '.join(e.args)
raise MailDeliveryException(_("Mail Delivery Failed"), value)
raise
if auto_commit is True:
self._cr.commit()
return True
|
creyesp/RF_Estimation | refs/heads/master | Clustering/helpers/gap/invocaGap.py | 2 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# invocaGap.py
#
# Copyright 2015 Carlos "casep" Sepulveda <carlos.sepulveda@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
import sys, os
#Relative path for RFE LIB
sys.path.append(os.path.join(os.path.dirname(__file__), '../../..','LIB'))
import rfestimationLib as rfe #Some custom functions
import argparse #argument parsing
import gap as gap
from numpy import zeros
from numpy import empty
from numpy import concatenate
from math import pi
from numpy import append
from numpy import float64
from sklearn.preprocessing import normalize
import matplotlib.pyplot as plt
def main():
parser = argparse.ArgumentParser(prog='invocaGap.py',
description='Testing gap staticstics',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--sourceFolder',
help='Source folder',
type=str, required=True)
parser.add_argument('--outputFolder',
help='Output folder',
type=str, required=True)
args = parser.parse_args()
#Source folder of the files with the timestamps
sourceFolder = rfe.fixPath(args.sourceFolder)
if not os.path.exists(sourceFolder):
print ''
print 'Source folder does not exists ' + sourceFolder
print ''
sys.exit()
#Output folder for the graphics and files
outputFolder = rfe.fixPath(args.outputFolder)
if not os.path.exists(outputFolder):
try:
os.makedirs(outputFolder)
except:
print ''
print 'Unable to create folder ' + outputFolder
print ''
sys.exit()
#dataCluster stores the data to be used for the clustering process
#the size is equal to the number of frames, aka, the time component
#plus 5 as we are incorporating the 2 dimensions of the ellipse,
#x position, y position and angle
dataCluster = zeros((1,27))
units = []
dato = empty((1,1))
for unitFile in os.listdir(sourceFolder):
if os.path.isdir(sourceFolder+unitFile):
dato = empty((1,1))
unitName = unitFile.rsplit('_', 1)[0]
#print unitName
dataUnit, coordinates = rfe.loadSTACurve(sourceFolder,unitFile,unitName)
xSize = dataUnit.shape[0]
ySize = dataUnit.shape[1]
fitResult = rfe.loadFitMatrix(sourceFolder,unitFile)
#Time data from STA with gauss fit
#dataUnitTemporal = scipy.ndimage.gaussian_filter(dataUnit[coordinates[0][0],[coordinates[1][0]],:],2)
#Time data from STA without gauss fit
dataUnitTemporal = dataUnit[coordinates[0][0],[coordinates[1][0]],:]
#Time data from FITResult
#dataUnitTemporal = rfe.loadVectorAmp(sourceFolder,unitFile).T
#A radius of the RF ellipse
aRadius = fitResult[0][2]
dato[0] = aRadius
dataUnitCompleta = concatenate((dataUnitTemporal,dato),1)
#B radius of the RF ellipse
bRadius = fitResult[0][3]
dato[0] = bRadius
dataUnitCompleta = concatenate((dataUnitCompleta,dato),1)
#angle of the RF ellipse
angle = fitResult[0][1]
dato[0] = angle
dataUnitCompleta = concatenate((dataUnitCompleta,dato),1)
#X coordinate of the RF ellipse
xCoordinate = fitResult[0][4]
#print 'xCoordinate',xCoordinate
dato[0] = xCoordinate
dataUnitCompleta = concatenate((dataUnitCompleta,dato),1)
#Y coordinate of the RF ellipse
yCoordinate = fitResult[0][5]
#print 'yCoordinate',yCoordinate
dato[0] = yCoordinate
dataUnitCompleta = concatenate((dataUnitCompleta,dato),1)
#Area of the RF ellipse
area = aRadius*bRadius*pi
dato[0] = area
dataUnitCompleta = concatenate((dataUnitCompleta,dato),1)
#UnitName
dato=empty(1, dtype='|S16')
dato[0]=unitName
dataUnitCompleta = concatenate((dataUnitCompleta,dato.reshape(1, 1)),1)
dataCluster = append(dataCluster,dataUnitCompleta, axis=0)
units.append(unitName)
# remove the first row of zeroes
dataCluster = dataCluster[1:,:]
data = dataCluster[:,0:19]
data = data.astype(float64, copy=False)
gaps = gap.gap(data, refs=None, nrefs=len(data), ks=range(1,10))
dgap = zeros(len(gaps))
for i in range(len(gaps)-1):
dgap[i] = gaps[i]-gaps[i+1]
plt.plot(gaps)
plt.show()
plt.plot(dgap)
plt.show()
return 0
if __name__ == '__main__':
main()
|
openpgh/askpgh | refs/heads/master | askbot/migrations/0055_auto__chg_field_question_thread.py | 10 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Question.thread'
db.alter_column(u'question', 'thread_id', self.gf('django.db.models.fields.related.ForeignKey')(default=None, unique=True, to=orm['askbot.Thread']))
def backwards(self, orm):
# Changing field 'Question.thread'
db.alter_column(u'question', 'thread_id', self.gf('django.db.models.fields.related.ForeignKey')(unique=True, null=True, to=orm['askbot.Thread']))
models = {
'askbot.activity': {
'Meta': {'object_name': 'Activity', 'db_table': "u'activity'"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'activity_type': ('django.db.models.fields.SmallIntegerField', [], {}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_auditted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Question']", 'null': 'True'}),
'receiving_users': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'received_activity'", 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'recipients': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'incoming_activity'", 'symmetrical': 'False', 'through': "orm['askbot.ActivityAuditStatus']", 'to': "orm['auth.User']"}),
'summary': ('django.db.models.fields.TextField', [], {'default': "''"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'askbot.activityauditstatus': {
'Meta': {'unique_together': "(('user', 'activity'),)", 'object_name': 'ActivityAuditStatus'},
'activity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Activity']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'askbot.anonymousanswer': {
'Meta': {'object_name': 'AnonymousAnswer'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_addr': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'anonymous_answers'", 'to': "orm['askbot.Question']"}),
'session_key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'text': ('django.db.models.fields.TextField', [], {}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'askbot.anonymousquestion': {
'Meta': {'object_name': 'AnonymousQuestion'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_addr': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'is_anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'session_key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'tagnames': ('django.db.models.fields.CharField', [], {'max_length': '125'}),
'text': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'askbot.answer': {
'Meta': {'object_name': 'Answer', 'db_table': "u'answer'"},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'accepted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'answers'", 'to': "orm['auth.User']"}),
'comment_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deleted_answers'", 'null': 'True', 'to': "orm['auth.User']"}),
'html': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_edited_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_edited_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'last_edited_answers'", 'null': 'True', 'to': "orm['auth.User']"}),
'locked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'locked_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'locked_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'locked_answers'", 'null': 'True', 'to': "orm['auth.User']"}),
'offensive_flag_count': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'answers'", 'to': "orm['askbot.Question']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'text': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'vote_down_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'vote_up_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'wikified_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'askbot.award': {
'Meta': {'object_name': 'Award', 'db_table': "u'award'"},
'awarded_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'badge': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'award_badge'", 'to': "orm['askbot.BadgeData']"}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notified': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'award_user'", 'to': "orm['auth.User']"})
},
'askbot.badgedata': {
'Meta': {'ordering': "('slug',)", 'object_name': 'BadgeData'},
'awarded_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'awarded_to': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'badges'", 'symmetrical': 'False', 'through': "orm['askbot.Award']", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'})
},
'askbot.comment': {
'Meta': {'ordering': "('-added_at',)", 'object_name': 'Comment', 'db_table': "u'comment'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'comment': ('django.db.models.fields.CharField', [], {'max_length': '2048'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'html': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '2048'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'offensive_flag_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'comments'", 'to': "orm['auth.User']"})
},
'askbot.emailfeedsetting': {
'Meta': {'object_name': 'EmailFeedSetting'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'feed_type': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'frequency': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '8'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reported_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'subscriber': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'notification_subscriptions'", 'to': "orm['auth.User']"})
},
'askbot.favoritequestion': {
'Meta': {'object_name': 'FavoriteQuestion', 'db_table': "u'favorite_question'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Question']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_favorite_questions'", 'to': "orm['auth.User']"})
},
'askbot.markedtag': {
'Meta': {'object_name': 'MarkedTag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reason': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_selections'", 'to': "orm['askbot.Tag']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'tag_selections'", 'to': "orm['auth.User']"})
},
'askbot.postrevision': {
'Meta': {'ordering': "('-revision',)", 'unique_together': "(('answer', 'revision'), ('question', 'revision'))", 'object_name': 'PostRevision'},
'answer': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'revisions'", 'null': 'True', 'to': "orm['askbot.Answer']"}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'postrevisions'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'revisions'", 'null': 'True', 'to': "orm['askbot.Question']"}),
'revised_at': ('django.db.models.fields.DateTimeField', [], {}),
'revision': ('django.db.models.fields.PositiveIntegerField', [], {}),
'revision_type': ('django.db.models.fields.SmallIntegerField', [], {}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}),
'tagnames': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '125', 'blank': 'True'}),
'text': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '300', 'blank': 'True'})
},
'askbot.question': {
'Meta': {'object_name': 'Question', 'db_table': "u'question'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'answer_accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'answer_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'questions'", 'to': "orm['auth.User']"}),
'close_reason': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'closed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'closed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'closed_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'closed_questions'", 'null': 'True', 'to': "orm['auth.User']"}),
'comment_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deleted_questions'", 'null': 'True', 'to': "orm['auth.User']"}),
'favorited_by': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'favorite_questions'", 'symmetrical': 'False', 'through': "orm['askbot.FavoriteQuestion']", 'to': "orm['auth.User']"}),
'followed_by': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'followed_questions'", 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'html': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_activity_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_activity_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'last_active_in_questions'", 'to': "orm['auth.User']"}),
'last_edited_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_edited_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'last_edited_questions'", 'null': 'True', 'to': "orm['auth.User']"}),
'locked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'locked_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'locked_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'locked_questions'", 'null': 'True', 'to': "orm['auth.User']"}),
'offensive_flag_count': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'tagnames': ('django.db.models.fields.CharField', [], {'max_length': '125'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'questions'", 'symmetrical': 'False', 'to': "orm['askbot.Tag']"}),
'text': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'thread': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'questions'", 'unique': 'True', 'to': "orm['askbot.Thread']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'view_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'vote_down_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'vote_up_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'wikified_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'askbot.questionview': {
'Meta': {'object_name': 'QuestionView'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'viewed'", 'to': "orm['askbot.Question']"}),
'when': ('django.db.models.fields.DateTimeField', [], {}),
'who': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'question_views'", 'to': "orm['auth.User']"})
},
'askbot.repute': {
'Meta': {'object_name': 'Repute', 'db_table': "u'repute'"},
'comment': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'negative': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'positive': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Question']", 'null': 'True', 'blank': 'True'}),
'reputation': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'reputation_type': ('django.db.models.fields.SmallIntegerField', [], {}),
'reputed_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'askbot.tag': {
'Meta': {'ordering': "('-used_count', 'name')", 'object_name': 'Tag', 'db_table': "u'tag'"},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_tags'", 'to': "orm['auth.User']"}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deleted_tags'", 'null': 'True', 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'used_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'askbot.thread': {
'Meta': {'object_name': 'Thread'},
'favourite_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'askbot.vote': {
'Meta': {'unique_together': "(('content_type', 'object_id', 'user'),)", 'object_name': 'Vote', 'db_table': "u'vote'"},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'votes'", 'to': "orm['auth.User']"}),
'vote': ('django.db.models.fields.SmallIntegerField', [], {}),
'voted_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'about': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'avatar_type': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '1'}),
'bronze': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'consecutive_days_visit_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'display_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'email_isvalid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'email_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'email_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'gold': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'gravatar': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ignored_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'interesting_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'new_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'questions_per_page': ('django.db.models.fields.SmallIntegerField', [], {'default': '10'}),
'real_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'reputation': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'seen_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'show_country': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'silver': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'w'", 'max_length': '2'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['askbot']
|
olasitarska/django | refs/heads/master | tests/file_uploads/urls.py | 41 | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^upload/$', views.file_upload_view),
url(r'^verify/$', views.file_upload_view_verify),
url(r'^unicode_name/$', views.file_upload_unicode_name),
url(r'^echo/$', views.file_upload_echo),
url(r'^echo_content_type_extra/$', views.file_upload_content_type_extra),
url(r'^echo_content/$', views.file_upload_echo_content),
url(r'^quota/$', views.file_upload_quota),
url(r'^quota/broken/$', views.file_upload_quota_broken),
url(r'^getlist_count/$', views.file_upload_getlist_count),
url(r'^upload_errors/$', views.file_upload_errors),
url(r'^filename_case/$', views.file_upload_filename_case_view),
url(r'^fd_closing/(?P<access>t|f)/$', views.file_upload_fd_closing),
]
|
lishensan/xbmc | refs/heads/master | lib/gtest/scripts/upload_gtest.py | 1963 | #!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""upload_gtest.py v0.1.0 -- uploads a Google Test patch for review.
This simple wrapper passes all command line flags and
--cc=googletestframework@googlegroups.com to upload.py.
USAGE: upload_gtest.py [options for upload.py]
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import sys
CC_FLAG = '--cc='
GTEST_GROUP = 'googletestframework@googlegroups.com'
def main():
# Finds the path to upload.py, assuming it is in the same directory
# as this file.
my_dir = os.path.dirname(os.path.abspath(__file__))
upload_py_path = os.path.join(my_dir, 'upload.py')
# Adds Google Test discussion group to the cc line if it's not there
# already.
upload_py_argv = [upload_py_path]
found_cc_flag = False
for arg in sys.argv[1:]:
if arg.startswith(CC_FLAG):
found_cc_flag = True
cc_line = arg[len(CC_FLAG):]
cc_list = [addr for addr in cc_line.split(',') if addr]
if GTEST_GROUP not in cc_list:
cc_list.append(GTEST_GROUP)
upload_py_argv.append(CC_FLAG + ','.join(cc_list))
else:
upload_py_argv.append(arg)
if not found_cc_flag:
upload_py_argv.append(CC_FLAG + GTEST_GROUP)
# Invokes upload.py with the modified command line flags.
os.execv(upload_py_path, upload_py_argv)
if __name__ == '__main__':
main()
|
pombredanne/pyjs | refs/heads/master | pyjs/runners/mshtmlevents.py | 6 | import traceback
import sys
import ctypes
import comtypes
from comtypes.hresult import *
import comtypes.automation
import comtypes.typeinfo
import comtypes.connectionpoints
from comtypes.client import wrap
from comtypes.client.dynamic import Dispatch
from comtypes.gen import MSHTML
import logging
logger = logging.getLogger(__name__)
class _AdviseConnection(object):
def __init__(self, source, interface, receiver):
cpc = source.QueryInterface(comtypes.connectionpoints.IConnectionPointContainer)
self.cp = cpc.FindConnectionPoint(ctypes.byref(interface._iid_))
logger.debug("Start advise %s", interface)
self.cookie = self.cp.Advise(receiver)
self.receiver = receiver
def disconnect(self):
if self.cookie:
self.cp.Unadvise(self.cookie)
logger.debug("Unadvised %s", self.cp)
self.cp = None
self.cookie = None
del self.receiver
def __del__(self):
try:
if self.cookie is not None:
self.cp.Unadvise(self.cookie)
except (comtypes.COMError, WindowsError):
# Are we sure we want to ignore errors here?
pass
def FindOutgoingInterface(source):
"""XXX Describe the strategy that is used..."""
# If the COM object implements IProvideClassInfo2, it is easy to
# find the default autgoing interface.
try:
pci = source.QueryInterface(comtypes.typeinfo.IProvideClassInfo2)
guid = pci.GetGUID(1)
except comtypes.COMError:
pass
else:
# another try: block needed?
try:
interface = comtypes.com_interface_registry[str(guid)]
except KeyError:
tinfo = pci.GetClassInfo()
tlib, index = tinfo.GetContainingTypeLib()
from comtypes.client import GetModule
GetModule(tlib)
interface = comtypes.com_interface_registry[str(guid)]
logger.debug("%s using sinkinterface %s", source, interface)
return interface
# If we can find the CLSID of the COM object, we can look for a
# registered outgoing interface (__clsid has been set by
# comtypes.client):
clsid = source.__dict__.get('__clsid')
try:
interface = comtypes.com_coclass_registry[clsid]._outgoing_interfaces_[0]
except KeyError:
pass
else:
logger.debug("%s using sinkinterface from clsid %s", source, interface)
return interface
## interface = find_single_connection_interface(source)
## if interface:
## return interface
raise TypeError("cannot determine source interface")
def find_single_connection_interface(source):
# Enumerate the connection interfaces. If we find a single one,
# return it, if there are more, we give up since we cannot
# determine which one to use.
cpc = source.QueryInterface(comtypes.connectionpoints.IConnectionPointContainer)
enum = cpc.EnumConnectionPoints()
iid = enum.next().GetConnectionInterface()
try:
enum.next()
except StopIteration:
try:
interface = comtypes.com_interface_registry[str(iid)]
except KeyError:
return None
else:
logger.debug("%s using sinkinterface from iid %s", source, interface)
return interface
else:
logger.debug("%s has nore than one connection point", source)
return None
from comtypes._comobject import _MethodFinder
class _SinkMethodFinder(_MethodFinder):
def __init__(self, inst, sink):
super(_SinkMethodFinder, self).__init__(inst)
self.sink = sink
def find_method(self, fq_name, mthname):
try:
return super(_SinkMethodFinder, self).find_method(fq_name, mthname)
except AttributeError:
try:
return getattr(self.sink, fq_name)
except AttributeError:
return getattr(self.sink, mthname)
def CreateEventReceiver(interface, sink):
class Sink(comtypes.COMObject):
_com_interfaces_ = [interface]
def _get_method_finder_(self, itf):
# Use a special MethodFinder that will first try 'self',
# then the sink.
return _SinkMethodFinder(self, sink)
return Sink()
def GetEvents(source, sink, interface=None):
"""Receive COM events from 'source'. Events will call methods on
the 'sink' object. 'interface' is the source interface to use.
"""
# When called from CreateObject, the sourceinterface has already
# been determined by the coclass. Otherwise, the only thing that
# makes sense is to use IProvideClassInfo2 to get the default
# source interface.
if interface is None:
interface = FindOutgoingInterface(source)
rcv = CreateEventReceiver(interface, sink)
return _AdviseConnection(source, interface, rcv)
class EventDumper(object):
"""Universal sink for COM events."""
def __getattr__(self, name):
"Create event handler methods on demand"
if name.startswith("__") and name.endswith("__"):
raise AttributeError(name)
#print "# event found:", name
def handler(self, this, *args, **kw):
# XXX handler is called with 'this'. Should we really print "None" instead?
args = (None,) + args
#print "Event %s(%s)" % (name, ", ".join([repr(a) for a in args]))
return comtypes.instancemethod(handler, EventDumper, self)
def ShowEvents(source, interface=None):
"""Receive COM events from 'source'. A special event sink will be
used that first prints the names of events that are found in the
outgoing interface, and will also print out the events when they
are fired.
"""
return GetEvents(source, sink=EventDumper(), interface=interface)
def PumpEvents(timeout):
"""This following code waits for 'timeout' seconds in the way
required for COM, internally doing the correct things depending
on the COM appartment of the current thread. It is possible to
terminate the message loop by pressing CTRL+C, which will raise
a KeyboardInterrupt.
"""
# XXX Should there be a way to pass additional event handles which
# can terminate this function?
# XXX XXX XXX
#
# It may be that I misunderstood the CoWaitForMultipleHandles
# function. Is a message loop required in a STA? Seems so...
#
# MSDN says:
#
# If the caller resides in a single-thread apartment,
# CoWaitForMultipleHandles enters the COM modal loop, and the
# thread's message loop will continue to dispatch messages using
# the thread's message filter. If no message filter is registered
# for the thread, the default COM message processing is used.
#
# If the calling thread resides in a multithread apartment (MTA),
# CoWaitForMultipleHandles calls the Win32 function
# MsgWaitForMultipleObjects.
hevt = ctypes.windll.kernel32.CreateEventA(None, True, False, None)
handles = (ctypes.c_void_p * 1)(hevt)
RPC_S_CALLPENDING = -2147417835
@ctypes.WINFUNCTYPE(ctypes.c_int, ctypes.c_uint)
def HandlerRoutine(dwCtrlType):
if dwCtrlType == 0: # CTRL+C
ctypes.windll.kernel32.SetEvent(hevt)
return 1
return 0
ctypes.windll.kernel32.SetConsoleCtrlHandler(HandlerRoutine, 1)
try:
try:
res = ctypes.oledll.ole32.CoWaitForMultipleHandles(0,
int(timeout * 1000),
len(handles), handles,
ctypes.byref(ctypes.c_ulong()))
except WindowsError, details:
if details.args[0] != RPC_S_CALLPENDING: # timeout expired
raise
else:
raise KeyboardInterrupt
finally:
ctypes.windll.kernel32.CloseHandle(hevt)
ctypes.windll.kernel32.SetConsoleCtrlHandler(HandlerRoutine, 0)
class _DispEventReceiver(comtypes.COMObject):
_com_interfaces_ = [comtypes.automation.IDispatch]
# Hrm. If the receiving interface is implemented as a dual interface,
# the methods implementations expect 'out, retval' parameters in their
# argument list.
#
# What would happen if we call ITypeInfo::Invoke() ?
# If we call the methods directly, shouldn't we pass pVarResult
# as last parameter?
def IDispatch_Invoke(self, this, memid, riid, lcid, wFlags, pDispParams,
pVarResult, pExcepInfo, puArgErr):
#print "IDispatch_Invoke", memid, this, riid, lcid, pDispParams
mth = self.dispmap.get(memid, None)
if mth is None:
return S_OK
dp = pDispParams[0]
#print "num args", dp.cArgs
# DISPPARAMS contains the arguments in reverse order
args = [dp.rgvarg[i].value for i in range(dp.cArgs)]
#print "Event", self, memid, mth, args
event = None
if len(args) > 0:
event = wrap(args[0])
try:
result = mth(self.sender, event, None)
except:
sys.stderr.write( traceback.print_exc() )
sys.stderr.flush()
if pVarResult:
pVarResult[0].value = result
return S_OK
def GetTypeInfoCount(self, this, presult):
if not presult:
return E_POINTER
presult[0] = 0
return S_OK
def GetTypeInfo(self, this, itinfo, lcid, pptinfo):
return E_NOTIMPL
def GetIDsOfNames(self, this, riid, rgszNames, cNames, lcid, rgDispId):
return E_NOTIMPL
# XXX move into comtypes
def _getmemid(idlflags):
# get the dispid from the idlflags sequence
return [memid for memid in idlflags if isinstance(memid, int)][0]
# XXX move into comtypes?
def _get_dispmap(interface):
# return a dictionary mapping dispid numbers to method names
assert issubclass(interface, comtypes.automation.IDispatch)
dispmap = {}
if "dual" in interface._idlflags_:
# It would be nice if that would work:
## for info in interface._methods_:
## mth = getattr(interface, info.name)
## memid = mth.im_func.memid
# See also MSDN docs for the 'defaultvtable' idl flag, or
# IMPLTYPEFLAG_DEFAULTVTABLE. This is not a flag of the
# interface, but of the coclass!
#
# Use the _methods_ list
assert not hasattr(interface, "_disp_methods_")
for restype, name, argtypes, paramflags, idlflags, helpstring in interface._methods_:
memid = _getmemid(idlflags)
dispmap[memid] = name
else:
# Use _disp_methods_
# tag, name, idlflags, restype(?), argtypes(?)
for tag, name, idlflags, restype, argtypes in interface._disp_methods_:
memid = _getmemid(idlflags)
dispmap[memid] = name
return dispmap
def GetDispEventReceiver(interface, sink, sink_name=None):
methods = {} # maps memid to function
interfaces = interface.mro()[:-3] # skip IDispatch, IUnknown, object
interface_names = [itf.__name__ for itf in interfaces]
for itf in interfaces:
for memid, name in _get_dispmap(itf).iteritems():
if name == sink_name:
#print "GetDispEventReceiver", memid, name
methods[0] = sink
continue
# find methods to call, if not found ignore event
for itf_name in interface_names:
mth = getattr(sink, "%s_%s" % (itf_name, name), None)
if mth is not None:
break
else:
mth = getattr(sink, name, lambda *args: S_OK)
methods[memid] = mth
# XX Move this stuff into _DispEventReceiver.__init__() ?
rcv = _DispEventReceiver()
rcv.dispmap = methods
rcv._com_pointers_[interface._iid_] = rcv._com_pointers_[comtypes.automation.IDispatch._iid_]
return rcv
def GetCustomEventReceiver(interface, sink):
class EventReceiver(comtypes.COMObject):
_com_interfaces_ = [interface]
for itf in interface.mro()[:-2]: # skip object and IUnknown
for info in itf._methods_:
restype, name, argtypes, paramflags, idlflags, docstring = info
mth = getattr(sink, name, lambda self, this, *args: S_OK)
setattr(EventReceiver, name, mth)
rcv = EventReceiver()
return rcv
|
kcpawan/django | refs/heads/master | django/db/migrations/recorder.py | 478 | from __future__ import unicode_literals
from django.apps.registry import Apps
from django.db import models
from django.db.utils import DatabaseError
from django.utils.encoding import python_2_unicode_compatible
from django.utils.timezone import now
from .exceptions import MigrationSchemaMissing
class MigrationRecorder(object):
"""
Deals with storing migration records in the database.
Because this table is actually itself used for dealing with model
creation, it's the one thing we can't do normally via migrations.
We manually handle table creation/schema updating (using schema backend)
and then have a floating model to do queries with.
If a migration is unapplied its row is removed from the table. Having
a row in the table always means a migration is applied.
"""
@python_2_unicode_compatible
class Migration(models.Model):
app = models.CharField(max_length=255)
name = models.CharField(max_length=255)
applied = models.DateTimeField(default=now)
class Meta:
apps = Apps()
app_label = "migrations"
db_table = "django_migrations"
def __str__(self):
return "Migration %s for %s" % (self.name, self.app)
def __init__(self, connection):
self.connection = connection
@property
def migration_qs(self):
return self.Migration.objects.using(self.connection.alias)
def ensure_schema(self):
"""
Ensures the table exists and has the correct schema.
"""
# If the table's there, that's fine - we've never changed its schema
# in the codebase.
if self.Migration._meta.db_table in self.connection.introspection.table_names(self.connection.cursor()):
return
# Make the table
try:
with self.connection.schema_editor() as editor:
editor.create_model(self.Migration)
except DatabaseError as exc:
raise MigrationSchemaMissing("Unable to create the django_migrations table (%s)" % exc)
def applied_migrations(self):
"""
Returns a set of (app, name) of applied migrations.
"""
self.ensure_schema()
return set(tuple(x) for x in self.migration_qs.values_list("app", "name"))
def record_applied(self, app, name):
"""
Records that a migration was applied.
"""
self.ensure_schema()
self.migration_qs.create(app=app, name=name)
def record_unapplied(self, app, name):
"""
Records that a migration was unapplied.
"""
self.ensure_schema()
self.migration_qs.filter(app=app, name=name).delete()
def flush(self):
"""
Deletes all migration records. Useful if you're testing migrations.
"""
self.migration_qs.all().delete()
|
alfredhq/alfred-db | refs/heads/develop | alfred_db/models/fix.py | 1 | from sqlalchemy import Column, Integer, Text, String, ForeignKey
from sqlalchemy.orm import relationship, backref
from .base import Base
class Fix(Base):
id = Column(Integer, primary_key=True)
description = Column(Text, nullable=False)
description_html = Column(Text, nullable=False)
path = Column(String, nullable=False)
line = Column(Integer, nullable=False)
source = Column(Text, nullable=False)
solution = Column(Text, nullable=False)
push_id = Column(
Integer,
ForeignKey('pushes.id', ondelete='CASCADE'),
nullable=False,
)
push = relationship(
'Push',
backref=backref(
name='fixes',
lazy='dynamic',
cascade='all, delete-orphan',
passive_deletes=True,
),
)
__tablename__ = 'fixes'
def __repr__(self):
return '<Fix({!r}, {!r})>'.format(self.path, self.line)
|
klim-iv/phantomjs-qt5 | refs/heads/qt5 | src/breakpad/src/tools/gyp/test/variables/gyptest-commands-ignore-env.py | 138 | #!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Test that environment variables are ignored when --ignore-environment is
specified.
"""
import os
import TestGyp
os.environ['GYP_DEFINES'] = 'FOO=BAR'
os.environ['GYP_GENERATORS'] = 'foo'
os.environ['GYP_GENERATOR_FLAGS'] = 'genflag=foo'
os.environ['GYP_GENERATOR_OUTPUT'] = 'somedir'
test = TestGyp.TestGyp(format='gypd')
expect = test.read('commands.gyp.ignore-env.stdout')
# Set $HOME so that gyp doesn't read the user's actual
# ~/.gyp/include.gypi file, which may contain variables
# and other settings that would change the output.
os.environ['HOME'] = test.workpath()
test.run_gyp('commands.gyp',
'--debug', 'variables', '--debug', 'general',
'--ignore-environment',
stdout=expect)
# Verify the commands.gypd against the checked-in expected contents.
#
# Normally, we should canonicalize line endings in the expected
# contents file setting the Subversion svn:eol-style to native,
# but that would still fail if multiple systems are sharing a single
# workspace on a network-mounted file system. Consequently, we
# massage the Windows line endings ('\r\n') in the output to the
# checked-in UNIX endings ('\n').
contents = test.read('commands.gypd').replace('\r\n', '\n')
expect = test.read('commands.gypd.golden')
if not test.match(contents, expect):
print "Unexpected contents of `commands.gypd'"
self.diff(expect, contents, 'commands.gypd ')
test.fail_test()
test.pass_test()
|
phlax/pootle | refs/heads/master | pootle/__init__.py | 10 | # -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
from pootle.core.utils.version import get_version
from pootle.constants import VERSION
__version__ = get_version(VERSION)
|
jasonwee/asus-rt-n14uhp-mrtg | refs/heads/master | src/lesson_data_persistence_and_exchange/sqlite3_row_factory.py | 1 | import sqlite3
db_filename = 'todo.db'
with sqlite3.connect(db_filename) as conn:
# Change the row factory to use Row
conn.row_factory = sqlite3.Row
cursor = conn.cursor()
cursor.execute("""
select name, description, deadline from project
where name = 'pymotw'
""")
name, description, deadline = cursor.fetchone()
print('Project details for {} ({})\n due {}'.format(
description, name, deadline))
cursor.execute("""
select id, priority, status, deadline, details from task
where project = 'pymotw' order by deadline
""")
print('\nNext 5 tasks:')
for row in cursor.fetchmany(5):
print('{:2d} [{:d}] {:<25} [{:<8}] ({})'.format(
row['id'], row['priority'], row['details'],
row['status'], row['deadline'],
))
|
anthonydillon/horizon | refs/heads/master | openstack_dashboard/dashboards/project/images/images/views.py | 13 | # Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Views for managing images.
"""
from django.core.urlresolvers import reverse
from django.core.urlresolvers import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import tabs
from horizon.utils import memoized
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.images.images \
import forms as project_forms
from openstack_dashboard.dashboards.project.images.images \
import tables as project_tables
from openstack_dashboard.dashboards.project.images.images \
import tabs as project_tabs
class CreateView(forms.ModalFormView):
form_class = project_forms.CreateImageForm
form_id = "create_image_form"
modal_header = _("Create An Image")
submit_label = _("Create Image")
submit_url = reverse_lazy('horizon:project:images:images:create')
template_name = 'project/images/images/create.html'
context_object_name = 'image'
success_url = reverse_lazy("horizon:project:images:index")
page_title = _("Create An Image")
def get_initial(self):
initial = {}
for name in [
'name',
'description',
'image_url',
'source_type',
'architecture',
'disk_format',
'minimum_disk',
'minimum_ram'
]:
tmp = self.request.GET.get(name)
if tmp:
initial[name] = tmp
return initial
class UpdateView(forms.ModalFormView):
form_class = project_forms.UpdateImageForm
form_id = "update_image_form"
modal_header = _("Update Image")
submit_label = _("Update Image")
submit_url = "horizon:project:images:images:update"
template_name = 'project/images/images/update.html'
success_url = reverse_lazy("horizon:project:images:index")
page_title = _("Update Image")
@memoized.memoized_method
def get_object(self):
try:
return api.glance.image_get(self.request, self.kwargs['image_id'])
except Exception:
msg = _('Unable to retrieve image.')
url = reverse('horizon:project:images:index')
exceptions.handle(self.request, msg, redirect=url)
def get_context_data(self, **kwargs):
context = super(UpdateView, self).get_context_data(**kwargs)
context['image'] = self.get_object()
args = (self.kwargs['image_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
return context
def get_initial(self):
image = self.get_object()
properties = getattr(image, 'properties', {})
data = {'image_id': self.kwargs['image_id'],
'name': getattr(image, 'name', None) or image.id,
'description': properties.get('description', ''),
'kernel': properties.get('kernel_id', ''),
'ramdisk': properties.get('ramdisk_id', ''),
'architecture': properties.get('architecture', ''),
'minimum_ram': getattr(image, 'min_ram', None),
'minimum_disk': getattr(image, 'min_disk', None),
'public': getattr(image, 'is_public', None),
'protected': getattr(image, 'protected', None)}
disk_format = getattr(image, 'disk_format', None)
if (disk_format == 'raw' and
getattr(image, 'container_format') == 'docker'):
disk_format = 'docker'
data['disk_format'] = disk_format
return data
class DetailView(tabs.TabView):
tab_group_class = project_tabs.ImageDetailTabs
template_name = 'project/images/images/detail.html'
page_title = _("Image Details: {{ image.name }}")
def get_context_data(self, **kwargs):
context = super(DetailView, self).get_context_data(**kwargs)
image = self.get_data()
table = project_tables.ImagesTable(self.request)
context["image"] = image
context["url"] = self.get_redirect_url()
context["actions"] = table.render_row_actions(image)
status_label = [label for (value, label) in
project_tables.ImagesTable.STATUS_DISPLAY_CHOICES
if value.lower() == (image.status or '').lower()]
if status_label:
image.status_label = status_label[0]
else:
image.status_label = image.status
return context
@staticmethod
def get_redirect_url():
return reverse_lazy('horizon:project:images:index')
@memoized.memoized_method
def get_data(self):
try:
return api.glance.image_get(self.request, self.kwargs['image_id'])
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve image details.'),
redirect=self.get_redirect_url())
def get_tabs(self, request, *args, **kwargs):
image = self.get_data()
return self.tab_group_class(request, image=image, **kwargs)
|
apple/swift-lldb | refs/heads/stable | packages/Python/lldbsuite/test/plugins/builder_base.py | 1 | """
If the build* function is passed the compiler argument, for example, 'llvm-gcc',
it is passed as a make variable to the make command. Otherwise, we check the
LLDB_CC environment variable; if it is defined, it is passed as a make variable
to the make command.
If neither the compiler keyword argument nor the LLDB_CC environment variable is
specified, no CC make variable is passed to the make command. The Makefile gets
to define the default CC being used.
Same idea holds for LLDB_ARCH environment variable, which maps to the ARCH make
variable.
"""
# System imports
import os
import platform
import subprocess
import sys
# Our imports
import lldbsuite.test.lldbtest as lldbtest
import lldbsuite.test.lldbutil as lldbutil
from lldbsuite.test_event import build_exception
import swift
def getArchitecture():
"""Returns the architecture in effect the test suite is running with."""
return os.environ["ARCH"] if "ARCH" in os.environ else ""
def getCompiler():
"""Returns the compiler in effect the test suite is running with."""
compiler = os.environ.get("CC", "clang")
compiler = lldbutil.which(compiler)
return os.path.realpath(compiler)
def getArchFlag():
"""Returns the flag required to specify the arch"""
compiler = getCompiler()
if compiler is None:
return ""
elif "gcc" in compiler:
archflag = "-m"
elif "clang" in compiler:
archflag = "-arch"
else:
archflag = None
return ("ARCHFLAG=" + archflag) if archflag else ""
def getMake(test_subdir, test_name):
"""Returns the invocation for GNU make.
The first argument is a tuple of the relative path to the testcase
and its filename stem."""
if platform.system() == "FreeBSD" or platform.system() == "NetBSD":
make = "gmake"
else:
make = "make"
# Construct the base make invocation.
lldb_test = os.environ["LLDB_TEST"]
lldb_build = os.environ["LLDB_BUILD"]
if not (lldb_test and lldb_build and test_subdir and test_name and
(not os.path.isabs(test_subdir))):
raise Exception("Could not derive test directories")
build_dir = os.path.join(lldb_build, test_subdir, test_name)
src_dir = os.path.join(lldb_test, test_subdir)
# This is a bit of a hack to make inline testcases work.
makefile = os.path.join(src_dir, "Makefile")
if not os.path.isfile(makefile):
makefile = os.path.join(build_dir, "Makefile")
return [make,
"VPATH="+src_dir,
"-C", build_dir,
"-I", src_dir,
"-I", os.path.join(lldb_test, "make"),
"-f", makefile]
def getArchSpec(architecture):
"""
Helper function to return the key-value string to specify the architecture
used for the make system.
"""
arch = architecture if architecture else None
if not arch and "ARCH" in os.environ:
arch = os.environ["ARCH"]
return ("ARCH=" + arch) if arch else ""
def getCCSpec(compiler):
"""
Helper function to return the key-value string to specify the compiler
used for the make system.
"""
lldbLib = os.environ[
"LLDB_LIB_DIR"] if "LLDB_LIB_DIR" in os.environ else None
cc = compiler if compiler else None
if not cc and "CC" in os.environ:
cc = os.environ["CC"]
swiftc = swift.getSwiftCompiler()
# Note the leading space character.
if cc:
if swiftc:
return (" CC=" + cc + " SWIFTC=" + swiftc)
else:
return "CC=\"%s\"" % cc
return ""
def getCmdLine(d):
"""
Helper function to return a properly formatted command line argument(s)
string used for the make system.
"""
# If d is None or an empty mapping, just return an empty string.
if not d:
return ""
pattern = '%s="%s"' if "win32" in sys.platform else "%s='%s'"
def setOrAppendVariable(k, v):
append_vars = ["CFLAGS", "CFLAGS_EXTRAS", "LD_EXTRAS"]
if k in append_vars and k in os.environ:
v = os.environ[k] + " " + v
return pattern % (k, v)
cmdline = " ".join([setOrAppendVariable(k, v) for k, v in list(d.items())])
return cmdline
def runBuildCommands(commands, sender):
try:
lldbtest.system(commands, sender=sender)
except subprocess.CalledProcessError as called_process_error:
# Convert to a build-specific error.
# We don't do that in lldbtest.system() since that
# is more general purpose.
raise build_exception.BuildError(called_process_error)
def buildDefault(
sender=None,
architecture=None,
compiler=None,
dictionary=None,
testdir=None,
testname=None):
"""Build the binaries the default way."""
commands = []
commands.append(getMake(testdir, testname) + ["all", getArchSpec(architecture),
getCCSpec(compiler), getCmdLine(dictionary)])
runBuildCommands(commands, sender=sender)
# True signifies that we can handle building default.
return True
def safeGetEnviron(name, default=None):
return os.environ[name] if name in os.environ else default
def buildDwarf(
sender=None,
architecture=None,
compiler=None,
dictionary=None,
testdir=None,
testname=None):
"""Build the binaries with dwarf debug info."""
commands = []
commands.append(getMake(testdir, testname) +
["MAKE_DSYM=NO", getArchSpec(architecture),
getCCSpec(compiler), getCmdLine(dictionary)])
runBuildCommands(commands, sender=sender)
# True signifies that we can handle building dwarf.
return True
def buildDwo(
sender=None,
architecture=None,
compiler=None,
dictionary=None,
testdir=None,
testname=None):
"""Build the binaries with dwarf debug info."""
commands = []
commands.append(getMake(testdir, testname) +
["MAKE_DSYM=NO", "MAKE_DWO=YES",
getArchSpec(architecture),
getCCSpec(compiler),
getCmdLine(dictionary)])
runBuildCommands(commands, sender=sender)
# True signifies that we can handle building dwo.
return True
def buildGModules(
sender=None,
architecture=None,
compiler=None,
dictionary=None,
testdir=None,
testname=None):
"""Build the binaries with dwarf debug info."""
commands = []
commands.append(getMake(testdir, testname) +
["MAKE_DSYM=NO",
"MAKE_GMODULES=YES",
getArchSpec(architecture),
getCCSpec(compiler),
getCmdLine(dictionary)])
lldbtest.system(commands, sender=sender)
# True signifies that we can handle building with gmodules.
return True
def cleanup(sender=None, dictionary=None):
"""Perform a platform-specific cleanup after the test."""
return True
|
rgeleta/odoo | refs/heads/8.0 | addons/sale_service/models/__init__.py | 354 | import sale_service |
nagyistoce/devide | refs/heads/master | modules/vtk_basic/vtkSimpleImageFilterExample.py | 7 | # class generated by DeVIDE::createDeVIDEModuleFromVTKObject
from module_kits.vtk_kit.mixins import SimpleVTKClassModuleBase
import vtk
class vtkSimpleImageFilterExample(SimpleVTKClassModuleBase):
def __init__(self, module_manager):
SimpleVTKClassModuleBase.__init__(
self, module_manager,
vtk.vtkSimpleImageFilterExample(), 'Processing.',
('vtkImageData',), ('vtkImageData',),
replaceDoc=True,
inputFunctions=None, outputFunctions=None)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.