repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
hodgem/XNATImageViewer | src/main/scripts/viewer/X/lib/selenium/selenium/webdriver/remote/utils.py | 37 | 3643 | # Copyright 2008-2009 WebDriver committers
# Copyright 2008-2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import tempfile
import zipfile
try:
import json
except ImportError: # < 2.6
import simplejson as json
if not hasattr(json, 'dumps'):
import simplejson as json
from selenium.common.exceptions import NoSuchElementException
LOGGER = logging.getLogger(__name__)
def format_json(json_struct):
return json.dumps(json_struct, indent=4)
def dump_json(json_struct):
return json.dumps(json_struct)
def load_json(s):
return json.loads(s)
def handle_find_element_exception(e):
if ("Unable to find" in e.response["value"]["message"] or
"Unable to locate" in e.response["value"]["message"]):
raise NoSuchElementException("Unable to locate element:")
else:
raise e
def return_value_if_exists(resp):
if resp and "value" in resp:
return resp["value"]
def get_root_parent(elem):
parent = elem.parent
while True:
try:
parent.parent
parent = parent.parent
except AttributeError:
return parent
def unzip_to_temp_dir(zip_file_name):
"""Unzip zipfile to a temporary directory.
The directory of the unzipped files is returned if success,
otherwise None is returned. """
if not zip_file_name or not os.path.exists(zip_file_name):
return None
zf = zipfile.ZipFile(zip_file_name)
if zf.testzip() is not None:
return None
# Unzip the files into a temporary directory
LOGGER.info("Extracting zipped file: %s" % zip_file_name)
tempdir = tempfile.mkdtemp()
try:
# Create directories that don't exist
for zip_name in zf.namelist():
# We have no knowledge on the os where the zipped file was
# created, so we restrict to zip files with paths without
# charactor "\" and "/".
name = (zip_name.replace("\\", os.path.sep).
replace("/", os.path.sep))
dest = os.path.join(tempdir, name)
if (name.endswith(os.path.sep) and not os.path.exists(dest)):
os.mkdir(dest)
LOGGER.debug("Directory %s created." % dest)
# Copy files
for zip_name in zf.namelist():
# We have no knowledge on the os where the zipped file was
# created, so we restrict to zip files with paths without
# charactor "\" and "/".
name = (zip_name.replace("\\", os.path.sep).
replace("/", os.path.sep))
dest = os.path.join(tempdir, name)
if not (name.endswith(os.path.sep)):
LOGGER.debug("Copying file %s......" % dest)
outfile = open(dest, 'wb')
outfile.write(zf.read(zip_name))
outfile.close()
LOGGER.debug("File %s copied." % dest)
LOGGER.info("Unzipped file can be found at %s" % tempdir)
return tempdir
except IOError, err:
LOGGER.error("Error in extracting webdriver.xpi: %s" % err)
return None
| bsd-3-clause |
GbalsaC/bitnamiP | pavelib/utils/envs.py | 1 | 6330 | """
Helper functions for loading environment settings.
"""
from __future__ import print_function
import os
import sys
import json
from lazy import lazy
from path import path
import memcache
class Env(object):
"""
Load information about the execution environment.
"""
# Root of the git repository (edx-platform)
REPO_ROOT = path(__file__).abspath().parent.parent.parent
# Reports Directory
REPORT_DIR = REPO_ROOT / 'reports'
METRICS_DIR = REPORT_DIR / 'metrics'
# Python unittest dirs
PYTHON_COVERAGERC = REPO_ROOT / ".coveragerc"
# Bok_choy dirs
BOK_CHOY_DIR = REPO_ROOT / "common" / "test" / "acceptance"
BOK_CHOY_LOG_DIR = REPO_ROOT / "test_root" / "log"
BOK_CHOY_REPORT_DIR = REPORT_DIR / "bok_choy"
BOK_CHOY_COVERAGERC = BOK_CHOY_DIR / ".coveragerc"
# If set, put reports for run in "unique" directories.
# The main purpose of this is to ensure that the reports can be 'slurped'
# in the main jenkins flow job without overwriting the reports from other
# build steps. For local development/testing, this shouldn't be needed.
if os.environ.get("SHARD", None):
shard_str = "shard_{}".format(os.environ.get("SHARD"))
BOK_CHOY_REPORT_DIR = BOK_CHOY_REPORT_DIR / shard_str
BOK_CHOY_LOG_DIR = BOK_CHOY_LOG_DIR / shard_str
# For the time being, stubs are used by both the bok-choy and lettuce acceptance tests
# For this reason, the stubs package is currently located in the Django app called "terrain"
# where other lettuce configuration is stored.
BOK_CHOY_STUB_DIR = REPO_ROOT / "common" / "djangoapps" / "terrain"
# Directory that videos are served from
VIDEO_SOURCE_DIR = REPO_ROOT / "test_root" / "data" / "video"
BOK_CHOY_SERVERS = {
'lms': {
'port': 8003,
'log': BOK_CHOY_LOG_DIR / "bok_choy_lms.log"
},
'cms': {
'port': 8031,
'log': BOK_CHOY_LOG_DIR / "bok_choy_studio.log"
}
}
BOK_CHOY_STUBS = {
'xqueue': {
'port': 8040,
'log': BOK_CHOY_LOG_DIR / "bok_choy_xqueue.log",
'config': 'register_submission_url=http://0.0.0.0:8041/test/register_submission',
},
'ora': {
'port': 8041,
'log': BOK_CHOY_LOG_DIR / "bok_choy_ora.log",
'config': '',
},
'comments': {
'port': 4567,
'log': BOK_CHOY_LOG_DIR / "bok_choy_comments.log",
},
'video': {
'port': 8777,
'log': BOK_CHOY_LOG_DIR / "bok_choy_video_sources.log",
'config': "root_dir={}".format(VIDEO_SOURCE_DIR),
},
'youtube': {
'port': 9080,
'log': BOK_CHOY_LOG_DIR / "bok_choy_youtube.log",
},
'edxnotes': {
'port': 8042,
'log': BOK_CHOY_LOG_DIR / "bok_choy_edxnotes.log",
}
}
# Mongo databases that will be dropped before/after the tests run
BOK_CHOY_MONGO_DATABASE = "test"
BOK_CHOY_CACHE = memcache.Client(['0.0.0.0:11211'], debug=0)
# Test Ids Directory
TEST_DIR = REPO_ROOT / ".testids"
# Files used to run each of the js test suites
# TODO: Store this as a dict. Order seems to matter for some
# reason. See issue TE-415.
JS_TEST_ID_FILES = [
REPO_ROOT / 'lms/static/js_test.yml',
REPO_ROOT / 'lms/static/js_test_coffee.yml',
REPO_ROOT / 'cms/static/js_test.yml',
REPO_ROOT / 'cms/static/js_test_squire.yml',
REPO_ROOT / 'common/lib/xmodule/xmodule/js/js_test.yml',
REPO_ROOT / 'common/static/js_test.yml',
REPO_ROOT / 'common/static/js_test_requirejs.yml',
]
JS_TEST_ID_KEYS = [
'lms',
'lms-coffee',
'cms',
'cms-squire',
'xmodule',
'common',
'common-requirejs'
]
JS_REPORT_DIR = REPORT_DIR / 'javascript'
# Directories used for common/lib/ tests
LIB_TEST_DIRS = []
for item in (REPO_ROOT / "common/lib").listdir():
if (REPO_ROOT / 'common/lib' / item).isdir():
LIB_TEST_DIRS.append(path("common/lib") / item.basename())
LIB_TEST_DIRS.append(path("pavelib/paver_tests"))
# Directory for i18n test reports
I18N_REPORT_DIR = REPORT_DIR / 'i18n'
# Service variant (lms, cms, etc.) configured with an environment variable
# We use this to determine which envs.json file to load.
SERVICE_VARIANT = os.environ.get('SERVICE_VARIANT', None)
# If service variant not configured in env, then pass the correct
# environment for lms / cms
if not SERVICE_VARIANT: # this will intentionally catch "";
if any(i in sys.argv[1:] for i in ('cms', 'studio')):
SERVICE_VARIANT = 'cms'
else:
SERVICE_VARIANT = 'lms'
@lazy
def env_tokens(self):
"""
Return a dict of environment settings.
If we couldn't find the JSON file, issue a warning and return an empty dict.
"""
# Find the env JSON file
if self.SERVICE_VARIANT:
env_path = self.REPO_ROOT.parent / "conf/{service}.env.json".format(service=self.SERVICE_VARIANT)
else:
env_path = path("env.json").abspath()
# If the file does not exist, here or one level up,
# issue a warning and return an empty dict
if not env_path.isfile():
env_path = env_path.parent.parent / env_path.basename()
if not env_path.isfile():
print(
"Warning: could not find environment JSON file "
"at '{path}'".format(path=env_path),
file=sys.stderr,
)
return dict()
# Otherwise, load the file as JSON and return the resulting dict
try:
with open(env_path) as env_file:
return json.load(env_file)
except ValueError:
print(
"Error: Could not parse JSON "
"in {path}".format(path=env_path),
file=sys.stderr,
)
sys.exit(1)
@lazy
def feature_flags(self):
"""
Return a dictionary of feature flags configured by the environment.
"""
return self.env_tokens.get('FEATURES', dict())
| agpl-3.0 |
patricksnape/imageio | imageio/plugins/_freeimage.py | 4 | 51820 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, imageio contributors
# imageio is distributed under the terms of the (new) BSD License.
# styletest: ignore E261
""" Module imageio/freeimage.py
This module contains the wrapper code for the freeimage library.
The functions defined in this module are relatively thin; just thin
enough so that arguments and results are native Python/numpy data
types.
"""
from __future__ import absolute_import, print_function, with_statement
import os
import sys
import ctypes
import threading
from logging import warn
import numpy
from ..core import (get_remote_file, load_lib, Dict, resource_dirs,
string_types, binary_type, IS_PYPY, get_platform,
InternetNotAllowedError)
TEST_NUMPY_NO_STRIDES = False # To test pypy fallback
FNAME_PER_PLATFORM = {
'osx32': 'libfreeimage-3.16.0-osx10.6.dylib', # universal library
'osx64': 'libfreeimage-3.16.0-osx10.6.dylib',
'win32': 'FreeImage-3.15.4-win32.dll',
'win64': 'FreeImage-3.15.1-win64.dll',
'linux32': 'libfreeimage-3.16.0-linux32.so',
'linux64': 'libfreeimage-3.16.0-linux64.so',
}
def get_freeimage_lib():
""" Ensure we have our version of the binary freeimage lib.
"""
lib = os.getenv('IMAGEIO_FREEIMAGE_LIB', None)
if lib: # pragma: no cover
return lib
# Get filename to load
# If we do not provide a binary, the system may still do ...
plat = get_platform()
if plat and plat in FNAME_PER_PLATFORM:
try:
return get_remote_file('freeimage/' + FNAME_PER_PLATFORM[plat])
except InternetNotAllowedError:
pass
except RuntimeError as e: # pragma: no cover
warn(str(e))
# Define function to encode a filename to bytes (for the current system)
efn = lambda x: x.encode(sys.getfilesystemencoding())
# 4-byte quads of 0,v,v,v from 0,0,0,0 to 0,255,255,255
GREY_PALETTE = numpy.arange(0, 0x01000000, 0x00010101, dtype=numpy.uint32)
class FI_TYPES(object):
FIT_UNKNOWN = 0
FIT_BITMAP = 1
FIT_UINT16 = 2
FIT_INT16 = 3
FIT_UINT32 = 4
FIT_INT32 = 5
FIT_FLOAT = 6
FIT_DOUBLE = 7
FIT_COMPLEX = 8
FIT_RGB16 = 9
FIT_RGBA16 = 10
FIT_RGBF = 11
FIT_RGBAF = 12
dtypes = {
FIT_BITMAP: numpy.uint8,
FIT_UINT16: numpy.uint16,
FIT_INT16: numpy.int16,
FIT_UINT32: numpy.uint32,
FIT_INT32: numpy.int32,
FIT_FLOAT: numpy.float32,
FIT_DOUBLE: numpy.float64,
FIT_COMPLEX: numpy.complex128,
FIT_RGB16: numpy.uint16,
FIT_RGBA16: numpy.uint16,
FIT_RGBF: numpy.float32,
FIT_RGBAF: numpy.float32
}
fi_types = {
(numpy.uint8, 1): FIT_BITMAP,
(numpy.uint8, 3): FIT_BITMAP,
(numpy.uint8, 4): FIT_BITMAP,
(numpy.uint16, 1): FIT_UINT16,
(numpy.int16, 1): FIT_INT16,
(numpy.uint32, 1): FIT_UINT32,
(numpy.int32, 1): FIT_INT32,
(numpy.float32, 1): FIT_FLOAT,
(numpy.float64, 1): FIT_DOUBLE,
(numpy.complex128, 1): FIT_COMPLEX,
(numpy.uint16, 3): FIT_RGB16,
(numpy.uint16, 4): FIT_RGBA16,
(numpy.float32, 3): FIT_RGBF,
(numpy.float32, 4): FIT_RGBAF
}
extra_dims = {
FIT_UINT16: [],
FIT_INT16: [],
FIT_UINT32: [],
FIT_INT32: [],
FIT_FLOAT: [],
FIT_DOUBLE: [],
FIT_COMPLEX: [],
FIT_RGB16: [3],
FIT_RGBA16: [4],
FIT_RGBF: [3],
FIT_RGBAF: [4]
}
class IO_FLAGS(object):
FIF_LOAD_NOPIXELS = 0x8000 # loading: load the image header only
# # (not supported by all plugins)
BMP_DEFAULT = 0
BMP_SAVE_RLE = 1
CUT_DEFAULT = 0
DDS_DEFAULT = 0
EXR_DEFAULT = 0 # save data as half with piz-based wavelet compression
EXR_FLOAT = 0x0001 # save data as float instead of half (not recommended)
EXR_NONE = 0x0002 # save with no compression
EXR_ZIP = 0x0004 # save with zlib compression, in blocks of 16 scan lines
EXR_PIZ = 0x0008 # save with piz-based wavelet compression
EXR_PXR24 = 0x0010 # save with lossy 24-bit float compression
EXR_B44 = 0x0020 # save with lossy 44% float compression
# # - goes to 22% when combined with EXR_LC
EXR_LC = 0x0040 # save images with one luminance and two chroma channels,
# # rather than as RGB (lossy compression)
FAXG3_DEFAULT = 0
GIF_DEFAULT = 0
GIF_LOAD256 = 1 # Load the image as a 256 color image with ununsed
# # palette entries, if it's 16 or 2 color
GIF_PLAYBACK = 2 # 'Play' the GIF to generate each frame (as 32bpp)
# # instead of returning raw frame data when loading
HDR_DEFAULT = 0
ICO_DEFAULT = 0
ICO_MAKEALPHA = 1 # convert to 32bpp and create an alpha channel from the
# # AND-mask when loading
IFF_DEFAULT = 0
J2K_DEFAULT = 0 # save with a 16:1 rate
JP2_DEFAULT = 0 # save with a 16:1 rate
JPEG_DEFAULT = 0 # loading (see JPEG_FAST);
# # saving (see JPEG_QUALITYGOOD|JPEG_SUBSAMPLING_420)
JPEG_FAST = 0x0001 # load the file as fast as possible,
# # sacrificing some quality
JPEG_ACCURATE = 0x0002 # load the file with the best quality,
# # sacrificing some speed
JPEG_CMYK = 0x0004 # load separated CMYK "as is"
# # (use | to combine with other load flags)
JPEG_EXIFROTATE = 0x0008 # load and rotate according to
# # Exif 'Orientation' tag if available
JPEG_QUALITYSUPERB = 0x80 # save with superb quality (100:1)
JPEG_QUALITYGOOD = 0x0100 # save with good quality (75:1)
JPEG_QUALITYNORMAL = 0x0200 # save with normal quality (50:1)
JPEG_QUALITYAVERAGE = 0x0400 # save with average quality (25:1)
JPEG_QUALITYBAD = 0x0800 # save with bad quality (10:1)
JPEG_PROGRESSIVE = 0x2000 # save as a progressive-JPEG
# # (use | to combine with other save flags)
JPEG_SUBSAMPLING_411 = 0x1000 # save with high 4x1 chroma
# # subsampling (4:1:1)
JPEG_SUBSAMPLING_420 = 0x4000 # save with medium 2x2 medium chroma
# # subsampling (4:2:0) - default value
JPEG_SUBSAMPLING_422 = 0x8000 # save /w low 2x1 chroma subsampling (4:2:2)
JPEG_SUBSAMPLING_444 = 0x10000 # save with no chroma subsampling (4:4:4)
JPEG_OPTIMIZE = 0x20000 # on saving, compute optimal Huffman coding tables
# # (can reduce a few percent of file size)
JPEG_BASELINE = 0x40000 # save basic JPEG, without metadata or any markers
KOALA_DEFAULT = 0
LBM_DEFAULT = 0
MNG_DEFAULT = 0
PCD_DEFAULT = 0
PCD_BASE = 1 # load the bitmap sized 768 x 512
PCD_BASEDIV4 = 2 # load the bitmap sized 384 x 256
PCD_BASEDIV16 = 3 # load the bitmap sized 192 x 128
PCX_DEFAULT = 0
PFM_DEFAULT = 0
PICT_DEFAULT = 0
PNG_DEFAULT = 0
PNG_IGNOREGAMMA = 1 # loading: avoid gamma correction
PNG_Z_BEST_SPEED = 0x0001 # save using ZLib level 1 compression flag
# # (default value is 6)
PNG_Z_DEFAULT_COMPRESSION = 0x0006 # save using ZLib level 6 compression
# # flag (default recommended value)
PNG_Z_BEST_COMPRESSION = 0x0009 # save using ZLib level 9 compression flag
# # (default value is 6)
PNG_Z_NO_COMPRESSION = 0x0100 # save without ZLib compression
PNG_INTERLACED = 0x0200 # save using Adam7 interlacing (use | to combine
# # with other save flags)
PNM_DEFAULT = 0
PNM_SAVE_RAW = 0 # Writer saves in RAW format (i.e. P4, P5 or P6)
PNM_SAVE_ASCII = 1 # Writer saves in ASCII format (i.e. P1, P2 or P3)
PSD_DEFAULT = 0
PSD_CMYK = 1 # reads tags for separated CMYK (default is conversion to RGB)
PSD_LAB = 2 # reads tags for CIELab (default is conversion to RGB)
RAS_DEFAULT = 0
RAW_DEFAULT = 0 # load the file as linear RGB 48-bit
RAW_PREVIEW = 1 # try to load the embedded JPEG preview with included
# # Exif Data or default to RGB 24-bit
RAW_DISPLAY = 2 # load the file as RGB 24-bit
SGI_DEFAULT = 0
TARGA_DEFAULT = 0
TARGA_LOAD_RGB888 = 1 # Convert RGB555 and ARGB8888 -> RGB888.
TARGA_SAVE_RLE = 2 # Save with RLE compression
TIFF_DEFAULT = 0
TIFF_CMYK = 0x0001 # reads/stores tags for separated CMYK
# # (use | to combine with compression flags)
TIFF_PACKBITS = 0x0100 # save using PACKBITS compression
TIFF_DEFLATE = 0x0200 # save using DEFLATE (a.k.a. ZLIB) compression
TIFF_ADOBE_DEFLATE = 0x0400 # save using ADOBE DEFLATE compression
TIFF_NONE = 0x0800 # save without any compression
TIFF_CCITTFAX3 = 0x1000 # save using CCITT Group 3 fax encoding
TIFF_CCITTFAX4 = 0x2000 # save using CCITT Group 4 fax encoding
TIFF_LZW = 0x4000 # save using LZW compression
TIFF_JPEG = 0x8000 # save using JPEG compression
TIFF_LOGLUV = 0x10000 # save using LogLuv compression
WBMP_DEFAULT = 0
XBM_DEFAULT = 0
XPM_DEFAULT = 0
class METADATA_MODELS(object):
FIMD_COMMENTS = 0
FIMD_EXIF_MAIN = 1
FIMD_EXIF_EXIF = 2
FIMD_EXIF_GPS = 3
FIMD_EXIF_MAKERNOTE = 4
FIMD_EXIF_INTEROP = 5
FIMD_IPTC = 6
FIMD_XMP = 7
FIMD_GEOTIFF = 8
FIMD_ANIMATION = 9
class METADATA_DATATYPE(object):
FIDT_BYTE = 1 # 8-bit unsigned integer
FIDT_ASCII = 2 # 8-bit bytes w/ last byte null
FIDT_SHORT = 3 # 16-bit unsigned integer
FIDT_LONG = 4 # 32-bit unsigned integer
FIDT_RATIONAL = 5 # 64-bit unsigned fraction
FIDT_SBYTE = 6 # 8-bit signed integer
FIDT_UNDEFINED = 7 # 8-bit untyped data
FIDT_SSHORT = 8 # 16-bit signed integer
FIDT_SLONG = 9 # 32-bit signed integer
FIDT_SRATIONAL = 10 # 64-bit signed fraction
FIDT_FLOAT = 11 # 32-bit IEEE floating point
FIDT_DOUBLE = 12 # 64-bit IEEE floating point
FIDT_IFD = 13 # 32-bit unsigned integer (offset)
FIDT_PALETTE = 14 # 32-bit RGBQUAD
FIDT_LONG8 = 16 # 64-bit unsigned integer
FIDT_SLONG8 = 17 # 64-bit signed integer
FIDT_IFD8 = 18 # 64-bit unsigned integer (offset)
dtypes = {
FIDT_BYTE: numpy.uint8,
FIDT_SHORT: numpy.uint16,
FIDT_LONG: numpy.uint32,
FIDT_RATIONAL: [('numerator', numpy.uint32),
('denominator', numpy.uint32)],
FIDT_LONG8: numpy.uint64,
FIDT_SLONG8: numpy.int64,
FIDT_IFD8: numpy.uint64,
FIDT_SBYTE: numpy.int8,
FIDT_UNDEFINED: numpy.uint8,
FIDT_SSHORT: numpy.int16,
FIDT_SLONG: numpy.int32,
FIDT_SRATIONAL: [('numerator', numpy.int32),
('denominator', numpy.int32)],
FIDT_FLOAT: numpy.float32,
FIDT_DOUBLE: numpy.float64,
FIDT_IFD: numpy.uint32,
FIDT_PALETTE: [('R', numpy.uint8), ('G', numpy.uint8),
('B', numpy.uint8), ('A', numpy.uint8)],
}
class Freeimage(object):
""" Class to represent an interface to the FreeImage library.
This class is relatively thin. It provides a Pythonic API that converts
Freeimage objects to Python objects, but that's about it.
The actual implementation should be provided by the plugins.
The recommended way to call into the Freeimage library (so that
errors and warnings show up in the right moment) is to use this
object as a context manager:
with imageio.fi as lib:
lib.FreeImage_GetPalette()
"""
_API = {
# All we're doing here is telling ctypes that some of the
# FreeImage functions return pointers instead of integers. (On
# 64-bit systems, without this information the pointers get
# truncated and crashes result). There's no need to list
# functions that return ints, or the types of the parameters
# to these or other functions -- that's fine to do implicitly.
# Note that the ctypes immediately converts the returned void_p
# back to a python int again! This is really not helpful,
# because then passing it back to another library call will
# cause truncation-to-32-bits on 64-bit systems. Thanks, ctypes!
# So after these calls one must immediately re-wrap the int as
# a c_void_p if it is to be passed back into FreeImage.
'FreeImage_AllocateT': (ctypes.c_void_p, None),
'FreeImage_FindFirstMetadata': (ctypes.c_void_p, None),
'FreeImage_GetBits': (ctypes.c_void_p, None),
'FreeImage_GetPalette': (ctypes.c_void_p, None),
'FreeImage_GetTagKey': (ctypes.c_char_p, None),
'FreeImage_GetTagValue': (ctypes.c_void_p, None),
'FreeImage_CreateTag': (ctypes.c_void_p, None),
'FreeImage_Save': (ctypes.c_void_p, None),
'FreeImage_Load': (ctypes.c_void_p, None),
'FreeImage_LoadFromMemory': (ctypes.c_void_p, None),
'FreeImage_OpenMultiBitmap': (ctypes.c_void_p, None),
'FreeImage_LoadMultiBitmapFromMemory': (ctypes.c_void_p, None),
'FreeImage_LockPage': (ctypes.c_void_p, None),
'FreeImage_OpenMemory': (ctypes.c_void_p, None),
#'FreeImage_ReadMemory': (ctypes.c_void_p, None),
#'FreeImage_CloseMemory': (ctypes.c_void_p, None),
'FreeImage_GetVersion': (ctypes.c_char_p, None),
'FreeImage_GetFIFExtensionList': (ctypes.c_char_p, None),
'FreeImage_GetFormatFromFIF': (ctypes.c_char_p, None),
'FreeImage_GetFIFDescription': (ctypes.c_char_p, None),
'FreeImage_ColorQuantizeEx': (ctypes.c_void_p, None),
# Pypy wants some extra definitions, so here we go ...
'FreeImage_IsLittleEndian': (ctypes.c_int, None),
'FreeImage_SetOutputMessage': (ctypes.c_void_p, None),
'FreeImage_GetFIFCount': (ctypes.c_int, None),
'FreeImage_IsPluginEnabled': (ctypes.c_int, None),
'FreeImage_GetFileType': (ctypes.c_int, None),
#
'FreeImage_GetTagType': (ctypes.c_int, None),
'FreeImage_GetTagLength': (ctypes.c_int, None),
'FreeImage_FindNextMetadata': (ctypes.c_int, None),
'FreeImage_FindCloseMetadata': (ctypes.c_void_p, None),
#
'FreeImage_GetFIFFromFilename': (ctypes.c_int, None),
'FreeImage_FIFSupportsReading': (ctypes.c_int, None),
'FreeImage_FIFSupportsWriting': (ctypes.c_int, None),
'FreeImage_FIFSupportsExportType': (ctypes.c_int, None),
'FreeImage_FIFSupportsExportBPP': (ctypes.c_int, None),
'FreeImage_GetHeight': (ctypes.c_int, None),
'FreeImage_GetWidth': (ctypes.c_int, None),
'FreeImage_GetImageType': (ctypes.c_int, None),
'FreeImage_GetBPP': (ctypes.c_int, None),
'FreeImage_GetColorsUsed': (ctypes.c_int, None),
'FreeImage_ConvertTo32Bits': (ctypes.c_void_p, None),
'FreeImage_GetPitch': (ctypes.c_int, None),
'FreeImage_Unload': (ctypes.c_void_p, None),
}
def __init__(self):
# Initialize freeimage lib as None
self._lib = None
# A lock to create thread-safety
self._lock = threading.RLock()
# Init log messages lists
self._messages = []
# Select functype for error handler
if sys.platform.startswith('win'):
functype = ctypes.WINFUNCTYPE
else:
functype = ctypes.CFUNCTYPE
# Create output message handler
@functype(None, ctypes.c_int, ctypes.c_char_p)
def error_handler(fif, message):
message = message.decode('utf-8')
self._messages.append(message)
while (len(self._messages)) > 256:
self._messages.pop(0)
# Make sure to keep a ref to function
self._error_handler = error_handler
@property
def lib(self):
if self._lib is None:
try:
self.load_freeimage()
except OSError as err:
self._lib = 'The freeimage library could not be loaded: '
self._lib += str(err)
if isinstance(self._lib, str):
raise RuntimeError(self._lib)
return self._lib
def has_lib(self):
try:
self.lib
except Exception:
return False
return True
def load_freeimage(self):
""" Try to load the freeimage lib from the system. If not successful,
try to download the imageio version and try again.
"""
# Load library and register API
success = False
try:
# Try without forcing a download, but giving preference
# to the imageio-provided lib (if previously downloaded)
self._load_freeimage()
self._register_api()
if self.lib.FreeImage_GetVersion().decode('utf-8') >= '3.15':
success = True
except OSError:
pass
if not success:
# Ensure we have our own lib, try again
get_freeimage_lib()
self._load_freeimage()
self._register_api()
# Wrap up
self.lib.FreeImage_SetOutputMessage(self._error_handler)
self.lib_version = self.lib.FreeImage_GetVersion().decode('utf-8')
def _load_freeimage(self):
# Define names
lib_names = ['freeimage', 'libfreeimage']
exact_lib_names = ['FreeImage', 'libfreeimage.dylib',
'libfreeimage.so', 'libfreeimage.so.3']
# Add names of libraries that we provide (that file may not exist)
res_dirs = resource_dirs()
plat = get_platform()
if plat: # Can be None on e.g. FreeBSD
fname = FNAME_PER_PLATFORM[plat]
for dir in res_dirs:
exact_lib_names.insert(0,
os.path.join(dir, 'freeimage', fname))
# Load
try:
lib, fname = load_lib(exact_lib_names, lib_names, res_dirs)
except OSError as err: # pragma: no cover
err_msg = str(err) + '\nPlease install the FreeImage library.'
raise OSError(err_msg)
# Store
self._lib = lib
self.lib_fname = fname
def _register_api(self):
# Albert's ctypes pattern
for f, (restype, argtypes) in self._API.items():
func = getattr(self.lib, f)
func.restype = restype
func.argtypes = argtypes
## Handling of output messages
def __enter__(self):
self._lock.acquire()
return self.lib
def __exit__(self, *args):
self._show_any_warnings()
self._lock.release()
def _reset_log(self):
""" Reset the list of output messages. Call this before
loading or saving an image with the FreeImage API.
"""
self._messages = []
def _get_error_message(self):
""" Get the output messages produced since the last reset as
one string. Returns 'No known reason.' if there are no messages.
Also resets the log.
"""
if self._messages:
res = ' '.join(self._messages)
self._reset_log()
return res
else:
return 'No known reason.'
def _show_any_warnings(self):
""" If there were any messages since the last reset, show them
as a warning. Otherwise do nothing. Also resets the messages.
"""
if self._messages:
warn('imageio.freeimage warning: ' + self._get_error_message())
self._reset_log()
def get_output_log(self):
""" Return a list of the last 256 output messages
(warnings and errors) produced by the FreeImage library.
"""
# This message log is not cleared/reset, but kept to 256 elements.
return [m for m in self._messages]
def getFIF(self, filename, mode, bytes=None):
""" Get the freeimage Format (FIF) from a given filename.
If mode is 'r', will try to determine the format by reading
the file, otherwise only the filename is used.
This function also tests whether the format supports reading/writing.
"""
with self as lib:
# Init
ftype = -1
if mode not in 'rw':
raise ValueError('Invalid mode (must be "r" or "w").')
# Try getting format from the content. Note that some files
# do not have a header that allows reading the format from
# the file.
if mode == 'r':
if bytes is not None:
fimemory = lib.FreeImage_OpenMemory(
ctypes.c_char_p(bytes), len(bytes))
ftype = lib.FreeImage_GetFileTypeFromMemory(
ctypes.c_void_p(fimemory), len(bytes))
lib.FreeImage_CloseMemory(ctypes.c_void_p(fimemory))
if (ftype == -1) and os.path.isfile(filename):
ftype = lib.FreeImage_GetFileType(efn(filename), 0)
# Try getting the format from the extension
if ftype == -1:
ftype = lib.FreeImage_GetFIFFromFilename(efn(filename))
# Test if ok
if ftype == -1:
raise ValueError('Cannot determine format of file "%s"' %
filename)
elif mode == 'w' and not lib.FreeImage_FIFSupportsWriting(ftype):
raise ValueError('Cannot write the format of file "%s"' %
filename)
elif mode == 'r' and not lib.FreeImage_FIFSupportsReading(ftype):
raise ValueError('Cannot read the format of file "%s"' %
filename)
else:
return ftype
def create_bitmap(self, filename, ftype, flags=0):
""" create_bitmap(filename, ftype, flags=0)
Create a wrapped bitmap object.
"""
return FIBitmap(self, filename, ftype, flags)
def create_multipage_bitmap(self, filename, ftype, flags=0):
""" create_multipage_bitmap(filename, ftype, flags=0)
Create a wrapped multipage bitmap object.
"""
return FIMultipageBitmap(self, filename, ftype, flags)
class FIBaseBitmap(object):
def __init__(self, fi, filename, ftype, flags):
self._fi = fi
self._filename = filename
self._ftype = ftype
self._flags = flags
self._bitmap = None
self._close_funcs = []
def __del__(self):
self.close()
def close(self):
if (self._bitmap is not None) and self._close_funcs:
for close_func in self._close_funcs:
try:
with self._fi:
fun = close_func[0]
fun(*close_func[1:])
except Exception: # pragma: no cover
pass
self._close_funcs = []
self._bitmap = None
def _set_bitmap(self, bitmap, close_func=None):
""" Function to set the bitmap and specify the function to unload it.
"""
if self._bitmap is not None:
pass # bitmap is converted
if close_func is None:
close_func = self._fi.lib.FreeImage_Unload, bitmap
self._bitmap = bitmap
if close_func:
self._close_funcs.append(close_func)
def get_meta_data(self):
# todo: there is also FreeImage_TagToString, is that useful?
# and would that work well when reading and then saving?
# Create a list of (model_name, number) tuples
models = [(name[5:], number) for name, number in
METADATA_MODELS.__dict__.items() if name.startswith('FIMD_')]
# Prepare
metadata = Dict()
tag = ctypes.c_void_p()
with self._fi as lib:
# Iterate over all FreeImage meta models
for model_name, number in models:
# Find beginning, get search handle
mdhandle = lib.FreeImage_FindFirstMetadata(number,
self._bitmap,
ctypes.byref(tag))
mdhandle = ctypes.c_void_p(mdhandle)
if mdhandle:
# Iterate over all tags in this model
more = True
while more:
# Get info about tag
tag_name = lib.FreeImage_GetTagKey(tag).decode('utf-8')
tag_type = lib.FreeImage_GetTagType(tag)
byte_size = lib.FreeImage_GetTagLength(tag)
char_ptr = ctypes.c_char * byte_size
data = char_ptr.from_address(
lib.FreeImage_GetTagValue(tag))
# Convert in a way compatible with Pypy
tag_bytes = binary_type(bytearray(data))
# The default value is the raw bytes
tag_val = tag_bytes
# Convert to a Python value in the metadata dict
if tag_type == METADATA_DATATYPE.FIDT_ASCII:
tag_val = tag_bytes.decode('utf-8', 'replace')
elif tag_type in METADATA_DATATYPE.dtypes:
dtype = METADATA_DATATYPE.dtypes[tag_type]
if IS_PYPY and isinstance(dtype, (list, tuple)):
pass # pragma: no cover - or we get a segfault
else:
try:
tag_val = numpy.fromstring(tag_bytes,
dtype=dtype)
if len(tag_val) == 1:
tag_val = tag_val[0]
except Exception: # pragma: no cover
pass
# Store data in dict
subdict = metadata.setdefault(model_name, Dict())
subdict[tag_name] = tag_val
# Next
more = lib.FreeImage_FindNextMetadata(
mdhandle, ctypes.byref(tag))
# Close search handle for current meta model
lib.FreeImage_FindCloseMetadata(mdhandle)
# Done
return metadata
def set_meta_data(self, metadata):
# Create a dict mapping model_name to number
models = {}
for name, number in METADATA_MODELS.__dict__.items():
if name.startswith('FIMD_'):
models[name[5:]] = number
# Create a mapping from numpy.dtype to METADATA_DATATYPE
def get_tag_type_number(dtype):
for number, numpy_dtype in METADATA_DATATYPE.dtypes.items():
if dtype == numpy_dtype:
return number
else:
return None
with self._fi as lib:
for model_name, subdict in metadata.items():
# Get model number
number = models.get(model_name, None)
if number is None:
continue # Unknown model, silent ignore
for tag_name, tag_val in subdict.items():
# Create new tag
tag = lib.FreeImage_CreateTag()
tag = ctypes.c_void_p(tag)
try:
# Convert Python value to FI type, val
is_ascii = False
if isinstance(tag_val, string_types):
try:
tag_bytes = tag_val.encode('ascii')
is_ascii = True
except UnicodeError:
pass
if is_ascii:
tag_type = METADATA_DATATYPE.FIDT_ASCII
tag_count = len(tag_bytes)
else:
if not hasattr(tag_val, 'dtype'):
tag_val = numpy.array([tag_val])
tag_type = get_tag_type_number(tag_val.dtype)
if tag_type is None:
warn('imageio.freeimage warning: Could not '
'determine tag type of %r.' % tag_name)
continue
tag_bytes = tag_val.tostring()
tag_count = tag_val.size
# Set properties
lib.FreeImage_SetTagKey(tag, tag_name.encode('utf-8'))
lib.FreeImage_SetTagType(tag, tag_type)
lib.FreeImage_SetTagCount(tag, tag_count)
lib.FreeImage_SetTagLength(tag, len(tag_bytes))
lib.FreeImage_SetTagValue(tag, tag_bytes)
# Store tag
tag_key = lib.FreeImage_GetTagKey(tag)
lib.FreeImage_SetMetadata(number, self._bitmap,
tag_key, tag)
except Exception as err: # pragma: no cover
warn('imagio.freeimage warning: Could not set tag '
'%r: %s, %s' % (tag_name,
self._fi._get_error_message(),
str(err)))
finally:
lib.FreeImage_DeleteTag(tag)
class FIBitmap(FIBaseBitmap):
""" Wrapper for the FI bitmap object.
"""
def allocate(self, array):
# Prepare array
assert isinstance(array, numpy.ndarray)
shape = array.shape
dtype = array.dtype
# Get shape and channel info
r, c = shape[:2]
if len(shape) == 2:
n_channels = 1
elif len(shape) == 3:
n_channels = shape[2]
else:
n_channels = shape[0]
# Get fi_type
try:
fi_type = FI_TYPES.fi_types[(dtype.type, n_channels)]
self._fi_type = fi_type
except KeyError:
raise ValueError('Cannot write arrays of given type and shape.')
# Allocate bitmap
with self._fi as lib:
bpp = 8 * dtype.itemsize * n_channels
bitmap = lib.FreeImage_AllocateT(fi_type, c, r, bpp, 0, 0, 0)
bitmap = ctypes.c_void_p(bitmap)
# Check and store
if not bitmap: # pragma: no cover
raise RuntimeError('Could not allocate bitmap for storage: %s'
% self._fi._get_error_message())
else:
self._set_bitmap(bitmap, (lib.FreeImage_Unload, bitmap))
def load_from_filename(self, filename=None):
if filename is None:
filename = self._filename
with self._fi as lib:
# Create bitmap
bitmap = lib.FreeImage_Load(self._ftype, efn(filename),
self._flags)
bitmap = ctypes.c_void_p(bitmap)
# Check and store
if not bitmap: # pragma: no cover
raise ValueError('Could not load bitmap "%s": %s' %
(self._filename,
self._fi._get_error_message()))
else:
self._set_bitmap(bitmap, (lib.FreeImage_Unload, bitmap))
# def load_from_bytes(self, bytes):
# with self._fi as lib:
# # Create bitmap
# fimemory = lib.FreeImage_OpenMemory(
# ctypes.c_char_p(bytes), len(bytes))
# bitmap = lib.FreeImage_LoadFromMemory(
# self._ftype, ctypes.c_void_p(fimemory), self._flags)
# bitmap = ctypes.c_void_p(bitmap)
# lib.FreeImage_CloseMemory(ctypes.c_void_p(fimemory))
#
# # Check
# if not bitmap:
# raise ValueError('Could not load bitmap "%s": %s'
# % (self._filename, self._fi._get_error_message()))
# else:
# self._set_bitmap(bitmap, (lib.FreeImage_Unload, bitmap))
def save_to_filename(self, filename=None):
if filename is None:
filename = self._filename
ftype = self._ftype
bitmap = self._bitmap
fi_type = self._fi_type # element type
with self._fi as lib:
# Check if can write
if fi_type == FI_TYPES.FIT_BITMAP:
can_write = lib.FreeImage_FIFSupportsExportBPP(
ftype, lib.FreeImage_GetBPP(bitmap))
else:
can_write = lib.FreeImage_FIFSupportsExportType(ftype, fi_type)
if not can_write:
raise TypeError('Cannot save image of this format '
'to this file type')
# Save to file
res = lib.FreeImage_Save(ftype, bitmap, efn(filename), self._flags)
# Check
if not res: # pragma: no cover, we do so many checks, this is rare
raise RuntimeError('Could not save file "%s": %s' %
(self._filename,
self._fi._get_error_message()))
# def save_to_bytes(self):
# ftype = self._ftype
# bitmap = self._bitmap
# fi_type = self._fi_type # element type
#
# with self._fi as lib:
# # Check if can write
# if fi_type == FI_TYPES.FIT_BITMAP:
# can_write = lib.FreeImage_FIFSupportsExportBPP(ftype,
# lib.FreeImage_GetBPP(bitmap))
# else:
# can_write = lib.FreeImage_FIFSupportsExportType(ftype, fi_type)
# if not can_write:
# raise TypeError('Cannot save image of this format '
# 'to this file type')
#
# # Extract the bytes
# fimemory = lib.FreeImage_OpenMemory(0, 0)
# res = lib.FreeImage_SaveToMemory(ftype, bitmap,
# ctypes.c_void_p(fimemory),
# self._flags)
# if res:
# N = lib.FreeImage_TellMemory(ctypes.c_void_p(fimemory))
# result = ctypes.create_string_buffer(N)
# lib.FreeImage_SeekMemory(ctypes.c_void_p(fimemory), 0)
# lib.FreeImage_ReadMemory(result, 1, N, ctypes.c_void_p(fimemory))
# result = result.raw
# lib.FreeImage_CloseMemory(ctypes.c_void_p(fimemory))
#
# # Check
# if not res:
# raise RuntimeError('Could not save file "%s": %s'
# % (self._filename, self._fi._get_error_message()))
#
# # Done
# return result
def get_image_data(self):
dtype, shape, bpp = self._get_type_and_shape()
array = self._wrap_bitmap_bits_in_array(shape, dtype, False)
with self._fi as lib:
isle = lib.FreeImage_IsLittleEndian()
# swizzle the color components and flip the scanlines to go from
# FreeImage's BGR[A] and upside-down internal memory format to
# something more normal
def n(arr):
#return arr[..., ::-1].T # Does not work on numpypy yet
if arr.ndim == 1: # pragma: no cover
return arr[::-1].T
elif arr.ndim == 2: # Always the case here ...
return arr[:, ::-1].T
elif arr.ndim == 3: # pragma: no cover
return arr[:, :, ::-1].T
elif arr.ndim == 4: # pragma: no cover
return arr[:, :, :, ::-1].T
if len(shape) == 3 and isle and dtype.type == numpy.uint8:
b = n(array[0])
g = n(array[1])
r = n(array[2])
if shape[0] == 3:
return numpy.dstack((r, g, b))
elif shape[0] == 4:
a = n(array[3])
return numpy.dstack((r, g, b, a))
else: # pragma: no cover - we check this earlier
raise ValueError('Cannot handle images of shape %s' % shape)
# We need to copy because array does *not* own its memory
# after bitmap is freed.
a = n(array).copy()
return a
def set_image_data(self, array):
# Prepare array
assert isinstance(array, numpy.ndarray)
shape = array.shape
dtype = array.dtype
with self._fi as lib:
isle = lib.FreeImage_IsLittleEndian()
# Calculate shape and channels
r, c = shape[:2]
if len(shape) == 2:
n_channels = 1
w_shape = (c, r)
elif len(shape) == 3:
n_channels = shape[2]
w_shape = (n_channels, c, r)
else:
n_channels = shape[0]
def n(arr): # normalise to freeimage's in-memory format
return arr.T[:, ::-1]
wrapped_array = self._wrap_bitmap_bits_in_array(w_shape, dtype, True)
# swizzle the color components and flip the scanlines to go to
# FreeImage's BGR[A] and upside-down internal memory format
if len(shape) == 3:
R = array[:, :, 0]
G = array[:, :, 1]
B = array[:, :, 2]
if isle:
if dtype.type == numpy.uint8:
wrapped_array[0] = n(B)
wrapped_array[1] = n(G)
wrapped_array[2] = n(R)
elif dtype.type == numpy.uint16:
wrapped_array[0] = n(R)
wrapped_array[1] = n(G)
wrapped_array[2] = n(B)
#
if shape[2] == 4:
A = array[:, :, 3]
wrapped_array[3] = n(A)
else:
wrapped_array[:] = n(array)
if self._need_finish:
self._finish_wrapped_array(wrapped_array)
if len(shape) == 2 and dtype.type == numpy.uint8:
with self._fi as lib:
palette = lib.FreeImage_GetPalette(self._bitmap)
palette = ctypes.c_void_p(palette)
if not palette:
raise RuntimeError('Could not get image palette')
try:
palette_data = GREY_PALETTE.ctypes.data
except Exception: # pragma: no cover - IS_PYPY
palette_data = GREY_PALETTE.__array_interface__['data'][0]
ctypes.memmove(palette, palette_data, 1024)
def _wrap_bitmap_bits_in_array(self, shape, dtype, save):
"""Return an ndarray view on the data in a FreeImage bitmap. Only
valid for as long as the bitmap is loaded (if single page) / locked
in memory (if multipage). This is used in loading data, but
also during saving, to prepare a strided numpy array buffer.
"""
# Get bitmap info
with self._fi as lib:
pitch = lib.FreeImage_GetPitch(self._bitmap)
bits = lib.FreeImage_GetBits(self._bitmap)
# Get more info
height = shape[-1]
byte_size = height * pitch
itemsize = dtype.itemsize
# Get strides
if len(shape) == 3:
strides = (itemsize, shape[0]*itemsize, pitch)
else:
strides = (itemsize, pitch)
# Create numpy array and return
data = (ctypes.c_char*byte_size).from_address(bits)
try:
self._need_finish = False
if TEST_NUMPY_NO_STRIDES:
raise NotImplementedError()
return numpy.ndarray(shape, dtype=dtype, buffer=data,
strides=strides)
except NotImplementedError:
# IS_PYPY - not very efficient. We create a C-contiguous
# numpy array (because pypy does not support Fortran-order)
# and shape it such that the rest of the code can remain.
if save:
self._need_finish = True # Flag to use _finish_wrapped_array
return numpy.zeros(shape, dtype=dtype)
else:
bytes = binary_type(bytearray(data))
array = numpy.fromstring(bytes, dtype=dtype)
# Deal with strides
if len(shape) == 3:
array.shape = shape[2], strides[-1]/shape[0], shape[0]
array2 = array[:shape[2], :shape[1], :shape[0]]
array = numpy.zeros(shape, dtype=array.dtype)
for i in range(shape[0]):
array[i] = array2[:, :, i].T
else:
array.shape = shape[1], strides[-1]
array = array[:shape[1], :shape[0]].T
return array
def _finish_wrapped_array(self, array): # IS_PYPY
""" Hardcore way to inject numpy array in bitmap.
"""
# Get bitmap info
with self._fi as lib:
pitch = lib.FreeImage_GetPitch(self._bitmap)
bits = lib.FreeImage_GetBits(self._bitmap)
bpp = lib.FreeImage_GetBPP(self._bitmap)
# Get channels and realwidth
nchannels = bpp // 8 // array.itemsize
realwidth = pitch // nchannels
# Apply padding for pitch if necessary
extra = realwidth - array.shape[-2]
assert extra >= 0 and extra < 10
# Make sort of Fortran, also take padding (i.e. pitch) into account
newshape = array.shape[-1], realwidth, nchannels
array2 = numpy.zeros(newshape, array.dtype)
if nchannels == 1:
array2[:, :array.shape[-2], 0] = array.T
else:
for i in range(nchannels):
array2[:, :array.shape[-2], i] = array[i, :, :].T
# copy data
data_ptr = array2.__array_interface__['data'][0]
ctypes.memmove(bits, data_ptr, array2.nbytes)
del array2
def _get_type_and_shape(self):
bitmap = self._bitmap
# Get info on bitmap
with self._fi as lib:
w = lib.FreeImage_GetWidth(bitmap)
h = lib.FreeImage_GetHeight(bitmap)
self._fi_type = fi_type = lib.FreeImage_GetImageType(bitmap)
if not fi_type:
raise ValueError('Unknown image pixel type')
# Determine required props for numpy array
bpp = None
dtype = FI_TYPES.dtypes[fi_type]
if fi_type == FI_TYPES.FIT_BITMAP:
with self._fi as lib:
bpp = lib.FreeImage_GetBPP(bitmap)
has_pallette = lib.FreeImage_GetColorsUsed(bitmap)
if has_pallette:
# Examine the palette. If it is grayscale, we return as such
if has_pallette == 256:
palette = lib.FreeImage_GetPalette(bitmap)
palette = ctypes.c_void_p(palette)
p = (ctypes.c_uint8*(256*4)).from_address(palette.value)
p = numpy.frombuffer(p, numpy.uint32)
if (GREY_PALETTE == p).all():
extra_dims = []
return numpy.dtype(dtype), extra_dims + [w, h], bpp
# Convert bitmap and call this method again
newbitmap = lib.FreeImage_ConvertTo32Bits(bitmap)
newbitmap = ctypes.c_void_p(newbitmap)
self._set_bitmap(newbitmap)
return self._get_type_and_shape()
elif bpp == 8:
extra_dims = []
elif bpp == 24:
extra_dims = [3]
elif bpp == 32:
extra_dims = [4]
else: # pragma: no cover
#raise ValueError('Cannot convert %d BPP bitmap' % bpp)
# Convert bitmap and call this method again
newbitmap = lib.FreeImage_ConvertTo32Bits(bitmap)
newbitmap = ctypes.c_void_p(newbitmap)
self._set_bitmap(newbitmap)
return self._get_type_and_shape()
else:
extra_dims = FI_TYPES.extra_dims[fi_type]
# Return dtype and shape
return numpy.dtype(dtype), extra_dims + [w, h], bpp
def quantize(self, quantizer=0, palettesize=256):
""" Quantize the bitmap to make it 8-bit (paletted). Returns a new
FIBitmap object.
Only for 24 bit images.
"""
with self._fi as lib:
# New bitmap
bitmap = lib.FreeImage_ColorQuantizeEx(self._bitmap, quantizer,
palettesize, 0, None)
bitmap = ctypes.c_void_p(bitmap)
# Check and return
if not bitmap:
raise ValueError('Could not quantize bitmap "%s": %s' %
(self._filename,
self._fi._get_error_message()))
else:
new = FIBitmap(self._fi, self._filename, self._ftype,
self._flags)
new._set_bitmap(bitmap, (lib.FreeImage_Unload, bitmap))
new._fi_type = self._fi_type
return new
# def convert_to_32bit(self):
# """ Convert to 32bit image.
# """
# with self._fi as lib:
# # New bitmap
# bitmap = lib.FreeImage_ConvertTo32Bits(self._bitmap)
# bitmap = ctypes.c_void_p(bitmap)
#
# # Check and return
# if not bitmap:
# raise ValueError('Could not convert bitmap to 32bit "%s": %s' %
# (self._filename,
# self._fi._get_error_message()))
# else:
# new = FIBitmap(self._fi, self._filename, self._ftype,
# self._flags)
# new._set_bitmap(bitmap, (lib.FreeImage_Unload, bitmap))
# new._fi_type = self._fi_type
# return new
class FIMultipageBitmap(FIBaseBitmap):
""" Wrapper for the multipage FI bitmap object.
"""
def load_from_filename(self, filename=None):
if filename is None: # pragma: no cover
filename = self._filename
# Prepare
create_new = False
read_only = True
keep_cache_in_memory = False
# Try opening
with self._fi as lib:
# Create bitmap
multibitmap = lib.FreeImage_OpenMultiBitmap(self._ftype,
efn(filename),
create_new, read_only,
keep_cache_in_memory,
self._flags)
multibitmap = ctypes.c_void_p(multibitmap)
# Check
if not multibitmap: # pragma: no cover
err = self._fi._get_error_message()
raise ValueError('Could not open file "%s" as multi-image: %s'
% (self._filename, err))
else:
self._set_bitmap(multibitmap,
(lib.FreeImage_CloseMultiBitmap, multibitmap))
# def load_from_bytes(self, bytes):
# with self._fi as lib:
# # Create bitmap
# fimemory = lib.FreeImage_OpenMemory(
# ctypes.c_char_p(bytes), len(bytes))
# multibitmap = lib.FreeImage_LoadMultiBitmapFromMemory(
# self._ftype, ctypes.c_void_p(fimemory), self._flags)
# multibitmap = ctypes.c_void_p(multibitmap)
# #lib.FreeImage_CloseMemory(ctypes.c_void_p(fimemory))
# self._mem = fimemory
# self._bytes = bytes
# # Check
# if not multibitmap:
# raise ValueError('Could not load multibitmap "%s": %s'
# % (self._filename, self._fi._get_error_message()))
# else:
# self._set_bitmap(multibitmap,
# (lib.FreeImage_CloseMultiBitmap, multibitmap))
def save_to_filename(self, filename=None):
if filename is None: # pragma: no cover
filename = self._filename
# Prepare
create_new = True
read_only = False
keep_cache_in_memory = False
# Open the file
# todo: Set flags at close func
with self._fi as lib:
multibitmap = lib.FreeImage_OpenMultiBitmap(self._ftype,
efn(filename),
create_new, read_only,
keep_cache_in_memory,
0)
multibitmap = ctypes.c_void_p(multibitmap)
# Check
if not multibitmap: # pragma: no cover
msg = ('Could not open file "%s" for writing multi-image: %s'
% (self._filename, self._fi._get_error_message()))
raise ValueError(msg)
else:
self._set_bitmap(multibitmap,
(lib.FreeImage_CloseMultiBitmap, multibitmap))
def __len__(self):
with self._fi as lib:
return lib.FreeImage_GetPageCount(self._bitmap)
def get_page(self, index):
""" Return the sub-bitmap for the given page index.
Please close the returned bitmap when done.
"""
with self._fi as lib:
# Create low-level bitmap in freeimage
bitmap = lib.FreeImage_LockPage(self._bitmap, index)
bitmap = ctypes.c_void_p(bitmap)
if not bitmap: # pragma: no cover
raise ValueError('Could not open sub-image %i in %r: %s' %
(index, self._filename,
self._fi._get_error_message()))
# Get bitmap object to wrap this bitmap
bm = FIBitmap(self._fi, self._filename, self._ftype, self._flags)
bm._set_bitmap(bitmap, (lib.FreeImage_UnlockPage, self._bitmap,
bitmap, False))
return bm
def append_bitmap(self, bitmap):
""" Add a sub-bitmap to the multi-page bitmap.
"""
with self._fi as lib:
# no return value
lib.FreeImage_AppendPage(self._bitmap, bitmap._bitmap)
# Create instance
fi = Freeimage()
| bsd-2-clause |
sebastian-code/system_overview | system_report.py | 1 | 9502 | #! /usr/bin/python3
# -*- coding:utf-8 -*-
import platform
import os
import subprocess
# cat /proc/cpuinfo | grep 'model name' | head -n 1 | sed 's/model name.*: //g'
def nom_proc():
p1 = subprocess.Popen(['cat', '/proc/cpuinfo'], stdout=subprocess.PIPE)
p2 = subprocess.Popen(['grep', 'model name'], stdin=p1.stdout,
stdout=subprocess.PIPE)
p3 = subprocess.Popen(['head', '-n 1'], stdin=p2.stdout,
stdout=subprocess.PIPE)
p4 = subprocess.Popen(['sed', 's/model name.*: //g'], stdin=p3.stdout,
stdout=subprocess.PIPE)
p1.stdout.close()
p2.stdout.close()
p3.stdout.close()
return str(p4.communicate()[0])[2:-3]
# cat /proc/cpuinfo | grep 'cpu MHz' | head -n 1 | sed 's/cpu MHz.*: //g'
def vel_proc():
p1 = subprocess.Popen(['cat', '/proc/cpuinfo'], stdout=subprocess.PIPE)
p2 = subprocess.Popen(['grep', 'cpu MHz'], stdin=p1.stdout,
stdout=subprocess.PIPE)
p3 = subprocess.Popen(['head', '-n 1'], stdin=p2.stdout,
stdout=subprocess.PIPE)
p4 = subprocess.Popen(['sed', 's/cpu MHz.*: //g'], stdin=p3.stdout,
stdout=subprocess.PIPE)
p1.stdout.close()
p2.stdout.close()
p3.stdout.close()
return str(p4.communicate()[0])[2:-3]
# cat /proc/cpuinfo | grep 'cpu cores' | head -n 1 | sed 's/cpu MHz.*: //g'
def num_nucleos():
p1 = subprocess.Popen(['cat', '/proc/cpuinfo'], stdout=subprocess.PIPE)
p2 = subprocess.Popen(['grep', 'cpu cores'], stdin=p1.stdout,
stdout=subprocess.PIPE)
p3 = subprocess.Popen(['head', '-n 1'], stdin=p2.stdout,
stdout=subprocess.PIPE)
p4 = subprocess.Popen(['sed', 's/cpu cores.*: //g'], stdin=p3.stdout,
stdout=subprocess.PIPE)
p1.stdout.close()
p2.stdout.close()
p3.stdout.close()
return str(p4.communicate()[0])[2:-3]
# lspci | grep VGA | sed 's/.*VGA compatible controller://g'
def vga_modelo():
p1 = subprocess.Popen('lspci', stdout=subprocess.PIPE)
p2 = subprocess.Popen(['grep', 'VGA'], stdin=p1.stdout,
stdout=subprocess.PIPE)
p3 = subprocess.Popen(['sed', 's/.*VGA compatible controller://g'],
stdin=p2.stdout, stdout=subprocess.PIPE)
p1.stdout.close()
p2.stdout.close()
return str(p3.communicate()[0])[3:-12]
# lsmod | grep 'fglrx | nvidia | i915 | i965 | intel_agp | r200 | r300 | r600 | swrast | svga | radeon | noveau'
def vga_driver():
p1 = subprocess.Popen('lsmod', stdout=subprocess.PIPE)
p2 = subprocess.Popen(['grep', 'fglrx\\|nvidia\\|i915\\|i965\\|intel_agp\\|r200\\|r300\\|r600\\|swrast\\|svga\\|radeon\\|noveau'],
stdin=p1.stdout, stdout=subprocess.PIPE)
p1.stdout.close()
st = str(p2.communicate()[0])[2:-3].replace('\\n', '<br />')
return st
# glxinfo 2>&1 | grep -i 'direct rendering'
def vga_rendering():
p1 = subprocess.Popen(['glxinfo'], stdout=subprocess.PIPE)
p2 = subprocess.Popen(['grep', '-i', 'direct rendering'],
stdin=p1.stdout, stdout=subprocess.PIPE)
p1.stdout.close()
return str(p2.communicate()[0])[2:-3]
# xrandr
def vga_displays():
p1 = subprocess.Popen('xrandr', stdout=subprocess.PIPE)
st = str(p1.communicate()[0])[2:-3].replace('\\n', '<br />')
return st
# lspci | grep Audio | sed 's/.*Audio device://g'
def snd_modelo():
p1 = subprocess.Popen('lspci', stdout=subprocess.PIPE)
p2 = subprocess.Popen(['grep', 'Audio'], stdin=p1.stdout,
stdout=subprocess.PIPE)
p3 = subprocess.Popen(['sed', 's/.*Audio device://g'],
stdin=p2.stdout, stdout=subprocess.PIPE)
p1.stdout.close()
p2.stdout.close()
return str(p3.communicate()[0])[3:-3]
# lsmod | grep 'snd'
def snd_driver():
p1 = subprocess.Popen('lsmod', stdout=subprocess.PIPE)
p2 = subprocess.Popen(['grep', 'snd'],
stdin=p1.stdout, stdout=subprocess.PIPE)
p1.stdout.close()
st = str(p2.communicate()[0])[2:-3].replace('\\n', '<br />')
return st
# grep MemTotal /proc/meminfo | sed 's/MemTotal: //g'
def mem_query(arg):
p1 = subprocess.Popen(['grep', '{}'.format(arg), '/proc/meminfo'],
stdout=subprocess.PIPE)
p2 = subprocess.Popen(['sed', 's/{}: //g'.format(arg)], stdin=p1.stdout,
stdout=subprocess.PIPE)
return str(p2.communicate()[0])[2:-3]
# df -h
def particionado():
p1 = subprocess.Popen(['df', '-h'], stdout=subprocess.PIPE)
st = str(p1.communicate()[0])[2:-3].replace('\\n', '<br />')
return st
# ifconfig
def redes():
p1 = subprocess.Popen('ifconfig', stdout=subprocess.PIPE)
st = str(p1.communicate()[0])[2:-3].replace('\\n', '<br />')
return st
# cat /etc/apt/sources.list
def sw_sources():
p1 = subprocess.Popen(['cat', '/etc/apt/sources.list'],
stdout=subprocess.PIPE)
st = str(p1.communicate()[0])[2:-3].replace('\\n', '<br />')
return st
# dpkg-query --show | sed 's/\n/<\/br>/g'
def sw_instalado():
p1 = subprocess.Popen(['dpkg-query', '--show'], stdout=subprocess.PIPE)
p2 = subprocess.Popen(['sed', 's/\\n/<\/br>/g'],
stdin=p1.stdout, stdout=subprocess.PIPE)
p1.stdout.close()
st = str(p2.communicate()[0])[2:-3].replace('\\n', '<br />')
return st
# lsmod
def kernel():
p1 = subprocess.Popen('lsmod', stdout=subprocess.PIPE)
st = str(p1.communicate()[0])[2:-3].replace('\\n', '<br />')
return st
def reporte(arg):
archivo_reporte.write(arg)
def header():
reporte('''<!DOCTYPE HTML>
<html>
<head>
<title>Reporte del Sistema Local Linux</title>
<style>
tr > td:first-child { font-weight:bold; }
</style>
</head>
<body>''')
reporte('''
<h2>Nombre del usuario e identificador de red</h2>
<h2>{}@{}</h2>'''.format(os.getlogin(), platform.node()))
def linea_tabla(arg1, arg2):
reporte('''
<tr><td>{0}</td><td>{1}</td></tr>'''.format(arg1, arg2))
def footer():
reporte('''
</body>
</html>
''')
def informacion_basica():
reporte('''
<h3>Informacion general del Sistema</h3>
<table border=0>''')
linea_tabla('Nombre del Host:', platform.node())
linea_tabla('Distribucion:', (platform.linux_distribution()[0],
platform.linux_distribution()[1]))
linea_tabla('Kernel:', platform.release())
linea_tabla('Arquitectura:', platform.machine())
reporte('</table>')
def informacion_cpu():
reporte('''
<h3>Configuracion de Hardware</h3>
<h4>Procesador</h4>
<table border=0>''')
linea_tabla('Nombre del Procesador:', nom_proc())
linea_tabla('Velocidad del Procesador:', vel_proc())
linea_tabla('Numero de Nucleos:', num_nucleos())
linea_tabla('Procesos en Paralelo:', os.cpu_count())
reporte('</table>')
def informacion_graph():
reporte('''
<h4>Graficos</h4>
<table border=0>
''')
linea_tabla('Modelo:', vga_modelo())
linea_tabla('Driver:', '''<pre>{}</pre>'''.format(vga_driver()))
linea_tabla('Rendering:', vga_rendering())
linea_tabla('Displays:', '<pre>{}</pre>'.format(vga_displays()))
reporte('</table>')
def informacion_snd():
reporte('''
<h4>Sonidos</h4>
<table border=0
''')
linea_tabla('Modelo:', snd_modelo())
linea_tabla('Driver:', '<pre>{}</pre>'.format(snd_driver()))
reporte('</table>')
def informacion_mem():
reporte('''
<h4>Memoria</h4>
<table border=0>
''')
linea_tabla('RAM Total:', mem_query('MemTotal'))
linea_tabla('RAM Libre:', mem_query('MemFree'))
linea_tabla('Swap Total:', mem_query('SwapTotal'))
linea_tabla('Swap Libre:', mem_query('SwapFree'))
reporte('</table>')
def informacion_part():
reporte('''
<h4>Particiones</h4>
<table border=0>
''')
linea_tabla('', '<pre>{}</pre>'.format(particionado()))
reporte('</table>')
def informacion_redes():
reporte('''
<h4>Interfaces de Red</h4>
<table border=0>
''')
linea_tabla('', '<pre>{}</pre>'.format(redes()))
reporte('</table>')
def informacion_sw():
reporte('''
<h3>Informacion del Software</h3>
<h4>Fuentes del Software</h4>
<table border=0>''')
linea_tabla('', '<pre>{}</pre>'.format(sw_sources()))
reporte('</table>')
reporte('''
<h4>Software Instalado</h4>
<table border=0>''')
linea_tabla('', '<pre>{}</pre>'.format(sw_instalado()))
reporte('</table>')
def informacion_kernel():
reporte('''
<h3>Informacion del Kernel</h3>
<h4>Resumen de Modulos del Kernel</h4>
<table border=0>''')
linea_tabla('', '<pre>{}</pre>'.format(kernel()))
reporte('</table>')
import time
if __name__ == '__main__':
os.chdir(os.path.expanduser('~'))
fecha = time.strftime('%Y-%m-%d_') + time.strftime('%H-%M-%S')
archivo_reporte = open('sys_report_{}.html'.format(fecha), 'a+')
header()
informacion_basica()
informacion_cpu()
informacion_graph()
informacion_snd()
informacion_mem()
informacion_part()
informacion_redes()
informacion_sw()
informacion_kernel()
footer()
archivo_reporte.close()
| mit |
eemirtekin/edx-platform | common/djangoapps/util/migrations/0002_default_rate_limit_config.py | 102 | 4097 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
"""Ensure that rate limiting is enabled by default. """
orm['util.RateLimitConfiguration'].objects.create(enabled=True)
def backwards(self, orm):
pass
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'util.ratelimitconfiguration': {
'Meta': {'object_name': 'RateLimitConfiguration'},
'change_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
}
}
complete_apps = ['util']
symmetrical = True
| agpl-3.0 |
kimjaejoong/nova | nova/image/glance.py | 3 | 25512 | # Copyright 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implementation of an image service that uses Glance as the backend."""
from __future__ import absolute_import
import copy
import itertools
import random
import sys
import time
import glanceclient
import glanceclient.exc
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import netutils
from oslo_utils import timeutils
import six
from six.moves import range
import six.moves.urllib.parse as urlparse
from nova import exception
from nova.i18n import _, _LE, _LW
import nova.image.download as image_xfers
glance_opts = [
cfg.StrOpt('host',
default='$my_ip',
help='Default glance hostname or IP address'),
cfg.IntOpt('port',
default=9292,
help='Default glance port'),
cfg.StrOpt('protocol',
default='http',
choices=('http', 'https'),
help='Default protocol to use when connecting to glance. '
'Set to https for SSL.'),
cfg.ListOpt('api_servers',
help='A list of the glance api servers available to nova. '
'Prefix with https:// for ssl-based glance api servers. '
'([hostname|ip]:port)'),
cfg.BoolOpt('api_insecure',
default=False,
help='Allow to perform insecure SSL (https) requests to '
'glance'),
cfg.IntOpt('num_retries',
default=0,
help='Number of retries when uploading / downloading an image '
'to / from glance.'),
cfg.ListOpt('allowed_direct_url_schemes',
default=[],
help='A list of url scheme that can be downloaded directly '
'via the direct_url. Currently supported schemes: '
'[file].'),
]
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.register_opts(glance_opts, 'glance')
CONF.import_opt('auth_strategy', 'nova.api.auth')
CONF.import_opt('my_ip', 'nova.netconf')
CONF.import_group('ssl', 'nova.openstack.common.sslutils')
def generate_glance_url():
"""Generate the URL to glance."""
glance_host = CONF.glance.host
if netutils.is_valid_ipv6(glance_host):
glance_host = '[%s]' % glance_host
return "%s://%s:%d" % (CONF.glance.protocol, glance_host,
CONF.glance.port)
def generate_image_url(image_ref):
"""Generate an image URL from an image_ref."""
return "%s/images/%s" % (generate_glance_url(), image_ref)
def _parse_image_ref(image_href):
"""Parse an image href into composite parts.
:param image_href: href of an image
:returns: a tuple of the form (image_id, host, port)
:raises ValueError
"""
o = urlparse.urlparse(image_href)
port = o.port or 80
host = o.netloc.rsplit(':', 1)[0]
image_id = o.path.split('/')[-1]
use_ssl = (o.scheme == 'https')
return (image_id, host, port, use_ssl)
def generate_identity_headers(context, status='Confirmed'):
return {
'X-Auth-Token': getattr(context, 'auth_token', None),
'X-User-Id': getattr(context, 'user', None),
'X-Tenant-Id': getattr(context, 'tenant', None),
'X-Roles': ','.join(context.roles),
'X-Identity-Status': status,
'X-Service-Catalog': jsonutils.dumps(context.service_catalog),
}
def _create_glance_client(context, host, port, use_ssl, version=1):
"""Instantiate a new glanceclient.Client object."""
params = {}
if use_ssl:
scheme = 'https'
# https specific params
params['insecure'] = CONF.glance.api_insecure
params['ssl_compression'] = False
if CONF.ssl.cert_file:
params['cert_file'] = CONF.ssl.cert_file
if CONF.ssl.key_file:
params['key_file'] = CONF.ssl.key_file
if CONF.ssl.ca_file:
params['cacert'] = CONF.ssl.ca_file
else:
scheme = 'http'
if CONF.auth_strategy == 'keystone':
# NOTE(isethi): Glanceclient <= 0.9.0.49 accepts only
# keyword 'token', but later versions accept both the
# header 'X-Auth-Token' and 'token'
params['token'] = context.auth_token
params['identity_headers'] = generate_identity_headers(context)
if netutils.is_valid_ipv6(host):
# if so, it is ipv6 address, need to wrap it with '[]'
host = '[%s]' % host
endpoint = '%s://%s:%s' % (scheme, host, port)
return glanceclient.Client(str(version), endpoint, **params)
def get_api_servers():
"""Shuffle a list of CONF.glance.api_servers and return an iterator
that will cycle through the list, looping around to the beginning
if necessary.
"""
api_servers = []
configured_servers = (['%s:%s' % (CONF.glance.host, CONF.glance.port)]
if CONF.glance.api_servers is None
else CONF.glance.api_servers)
for api_server in configured_servers:
if '//' not in api_server:
api_server = 'http://' + api_server
o = urlparse.urlparse(api_server)
port = o.port or 80
host = o.netloc.rsplit(':', 1)[0]
if host[0] == '[' and host[-1] == ']':
host = host[1:-1]
use_ssl = (o.scheme == 'https')
api_servers.append((host, port, use_ssl))
random.shuffle(api_servers)
return itertools.cycle(api_servers)
class GlanceClientWrapper(object):
"""Glance client wrapper class that implements retries."""
def __init__(self, context=None, host=None, port=None, use_ssl=False,
version=1):
if host is not None:
self.client = self._create_static_client(context,
host, port,
use_ssl, version)
else:
self.client = None
self.api_servers = None
def _create_static_client(self, context, host, port, use_ssl, version):
"""Create a client that we'll use for every call."""
self.host = host
self.port = port
self.use_ssl = use_ssl
self.version = version
return _create_glance_client(context,
self.host, self.port,
self.use_ssl, self.version)
def _create_onetime_client(self, context, version):
"""Create a client that will be used for one call."""
if self.api_servers is None:
self.api_servers = get_api_servers()
self.host, self.port, self.use_ssl = next(self.api_servers)
return _create_glance_client(context,
self.host, self.port,
self.use_ssl, version)
def call(self, context, version, method, *args, **kwargs):
"""Call a glance client method. If we get a connection error,
retry the request according to CONF.glance.num_retries.
"""
retry_excs = (glanceclient.exc.ServiceUnavailable,
glanceclient.exc.InvalidEndpoint,
glanceclient.exc.CommunicationError)
retries = CONF.glance.num_retries
if retries < 0:
LOG.warning(_LW("Treating negative config value (%(retries)s) for "
"'glance.num_retries' as 0."),
{'retries': retries})
retries = 0
num_attempts = retries + 1
for attempt in range(1, num_attempts + 1):
client = self.client or self._create_onetime_client(context,
version)
try:
return getattr(client.images, method)(*args, **kwargs)
except retry_excs as e:
host = self.host
port = self.port
if attempt < num_attempts:
extra = "retrying"
else:
extra = 'done trying'
error_msg = (_("Error contacting glance server "
"'%(host)s:%(port)s' for '%(method)s', "
"%(extra)s.") %
{'host': host, 'port': port,
'method': method, 'extra': extra})
LOG.exception(error_msg)
if attempt == num_attempts:
raise exception.GlanceConnectionFailed(
host=host, port=port, reason=six.text_type(e))
time.sleep(1)
class GlanceImageService(object):
"""Provides storage and retrieval of disk image objects within Glance."""
def __init__(self, client=None):
self._client = client or GlanceClientWrapper()
# NOTE(jbresnah) build the table of download handlers at the beginning
# so that operators can catch errors at load time rather than whenever
# a user attempts to use a module. Note this cannot be done in glance
# space when this python module is loaded because the download module
# may require configuration options to be parsed.
self._download_handlers = {}
download_modules = image_xfers.load_transfer_modules()
for scheme, mod in six.iteritems(download_modules):
if scheme not in CONF.glance.allowed_direct_url_schemes:
continue
try:
self._download_handlers[scheme] = mod.get_download_handler()
except Exception as ex:
LOG.error(_LE('When loading the module %(module_str)s the '
'following error occurred: %(ex)s'),
{'module_str': str(mod), 'ex': ex})
def detail(self, context, **kwargs):
"""Calls out to Glance for a list of detailed image information."""
params = _extract_query_params(kwargs)
try:
images = self._client.call(context, 1, 'list', **params)
except Exception:
_reraise_translated_exception()
_images = []
for image in images:
if _is_image_available(context, image):
_images.append(_translate_from_glance(image))
return _images
def show(self, context, image_id, include_locations=False,
show_deleted=True):
"""Returns a dict with image data for the given opaque image id.
:param context: The context object to pass to image client
:param image_id: The UUID of the image
:param include_locations: (Optional) include locations in the returned
dict of information if the image service API
supports it. If the image service API does
not support the locations attribute, it will
still be included in the returned dict, as an
empty list.
:param show_deleted: (Optional) show the image even the status of
image is deleted.
"""
version = 1
if include_locations:
version = 2
try:
image = self._client.call(context, version, 'get', image_id)
except Exception:
_reraise_translated_image_exception(image_id)
if not show_deleted and getattr(image, 'deleted', False):
raise exception.ImageNotFound(image_id=image_id)
if not _is_image_available(context, image):
raise exception.ImageNotFound(image_id=image_id)
image = _translate_from_glance(image,
include_locations=include_locations)
if include_locations:
locations = image.get('locations', None) or []
du = image.get('direct_url', None)
if du:
locations.append({'url': du, 'metadata': {}})
image['locations'] = locations
return image
def _get_transfer_module(self, scheme):
try:
return self._download_handlers[scheme]
except KeyError:
return None
except Exception:
LOG.error(_LE("Failed to instantiate the download handler "
"for %(scheme)s"), {'scheme': scheme})
return
def download(self, context, image_id, data=None, dst_path=None):
"""Calls out to Glance for data and writes data."""
if CONF.glance.allowed_direct_url_schemes and dst_path is not None:
image = self.show(context, image_id, include_locations=True)
for entry in image.get('locations', []):
loc_url = entry['url']
loc_meta = entry['metadata']
o = urlparse.urlparse(loc_url)
xfer_mod = self._get_transfer_module(o.scheme)
if xfer_mod:
try:
xfer_mod.download(context, o, dst_path, loc_meta)
msg = _("Successfully transferred "
"using %s") % o.scheme
LOG.info(msg)
return
except Exception:
LOG.exception(_LE("Download image error"))
try:
image_chunks = self._client.call(context, 1, 'data', image_id)
except Exception:
_reraise_translated_image_exception(image_id)
close_file = False
if data is None and dst_path:
data = open(dst_path, 'wb')
close_file = True
if data is None:
return image_chunks
else:
try:
for chunk in image_chunks:
data.write(chunk)
finally:
if close_file:
data.close()
def create(self, context, image_meta, data=None):
"""Store the image data and return the new image object."""
sent_service_image_meta = _translate_to_glance(image_meta)
if data:
sent_service_image_meta['data'] = data
try:
recv_service_image_meta = self._client.call(
context, 1, 'create', **sent_service_image_meta)
except glanceclient.exc.HTTPException:
_reraise_translated_exception()
return _translate_from_glance(recv_service_image_meta)
def update(self, context, image_id, image_meta, data=None,
purge_props=True):
"""Modify the given image with the new data."""
image_meta = _translate_to_glance(image_meta)
image_meta['purge_props'] = purge_props
# NOTE(bcwaldon): id is not an editable field, but it is likely to be
# passed in by calling code. Let's be nice and ignore it.
image_meta.pop('id', None)
if data:
image_meta['data'] = data
try:
image_meta = self._client.call(context, 1, 'update',
image_id, **image_meta)
except Exception:
_reraise_translated_image_exception(image_id)
else:
return _translate_from_glance(image_meta)
def delete(self, context, image_id):
"""Delete the given image.
:raises: ImageNotFound if the image does not exist.
:raises: NotAuthorized if the user is not an owner.
:raises: ImageNotAuthorized if the user is not authorized.
"""
try:
self._client.call(context, 1, 'delete', image_id)
except glanceclient.exc.NotFound:
raise exception.ImageNotFound(image_id=image_id)
except glanceclient.exc.HTTPForbidden:
raise exception.ImageNotAuthorized(image_id=image_id)
return True
def _extract_query_params(params):
_params = {}
accepted_params = ('filters', 'marker', 'limit',
'page_size', 'sort_key', 'sort_dir')
for param in accepted_params:
if params.get(param):
_params[param] = params.get(param)
# ensure filters is a dict
_params.setdefault('filters', {})
# NOTE(vish): don't filter out private images
_params['filters'].setdefault('is_public', 'none')
return _params
def _is_image_available(context, image):
"""Check image availability.
This check is needed in case Nova and Glance are deployed
without authentication turned on.
"""
# The presence of an auth token implies this is an authenticated
# request and we need not handle the noauth use-case.
if hasattr(context, 'auth_token') and context.auth_token:
return True
def _is_image_public(image):
# NOTE(jaypipes) V2 Glance API replaced the is_public attribute
# with a visibility attribute. We do this here to prevent the
# glanceclient for a V2 image model from throwing an
# exception from warlock when trying to access an is_public
# attribute.
if hasattr(image, 'visibility'):
return str(image.visibility).lower() == 'public'
else:
return image.is_public
if context.is_admin or _is_image_public(image):
return True
properties = image.properties
if context.project_id and ('owner_id' in properties):
return str(properties['owner_id']) == str(context.project_id)
if context.project_id and ('project_id' in properties):
return str(properties['project_id']) == str(context.project_id)
try:
user_id = properties['user_id']
except KeyError:
return False
return str(user_id) == str(context.user_id)
def _translate_to_glance(image_meta):
image_meta = _convert_to_string(image_meta)
image_meta = _remove_read_only(image_meta)
return image_meta
def _translate_from_glance(image, include_locations=False):
image_meta = _extract_attributes(image,
include_locations=include_locations)
image_meta = _convert_timestamps_to_datetimes(image_meta)
image_meta = _convert_from_string(image_meta)
return image_meta
def _convert_timestamps_to_datetimes(image_meta):
"""Returns image with timestamp fields converted to datetime objects."""
for attr in ['created_at', 'updated_at', 'deleted_at']:
if image_meta.get(attr):
image_meta[attr] = timeutils.parse_isotime(image_meta[attr])
return image_meta
# NOTE(bcwaldon): used to store non-string data in glance metadata
def _json_loads(properties, attr):
prop = properties[attr]
if isinstance(prop, six.string_types):
properties[attr] = jsonutils.loads(prop)
def _json_dumps(properties, attr):
prop = properties[attr]
if not isinstance(prop, six.string_types):
properties[attr] = jsonutils.dumps(prop)
_CONVERT_PROPS = ('block_device_mapping', 'mappings')
def _convert(method, metadata):
metadata = copy.deepcopy(metadata)
properties = metadata.get('properties')
if properties:
for attr in _CONVERT_PROPS:
if attr in properties:
method(properties, attr)
return metadata
def _convert_from_string(metadata):
return _convert(_json_loads, metadata)
def _convert_to_string(metadata):
return _convert(_json_dumps, metadata)
def _extract_attributes(image, include_locations=False):
# NOTE(hdd): If a key is not found, base.Resource.__getattr__() may perform
# a get(), resulting in a useless request back to glance. This list is
# therefore sorted, with dependent attributes as the end
# 'deleted_at' depends on 'deleted'
# 'checksum' depends on 'status' == 'active'
IMAGE_ATTRIBUTES = ['size', 'disk_format', 'owner',
'container_format', 'status', 'id',
'name', 'created_at', 'updated_at',
'deleted', 'deleted_at', 'checksum',
'min_disk', 'min_ram', 'is_public',
'direct_url', 'locations']
queued = getattr(image, 'status') == 'queued'
queued_exclude_attrs = ['disk_format', 'container_format']
include_locations_attrs = ['direct_url', 'locations']
output = {}
for attr in IMAGE_ATTRIBUTES:
if attr == 'deleted_at' and not output['deleted']:
output[attr] = None
elif attr == 'checksum' and output['status'] != 'active':
output[attr] = None
# image may not have 'name' attr
elif attr == 'name':
output[attr] = getattr(image, attr, None)
# NOTE(liusheng): queued image may not have these attributes and 'name'
elif queued and attr in queued_exclude_attrs:
output[attr] = getattr(image, attr, None)
# NOTE(mriedem): Only get location attrs if including locations.
elif attr in include_locations_attrs:
if include_locations:
output[attr] = getattr(image, attr, None)
else:
# NOTE(xarses): Anything that is caught with the default value
# will result in a additional lookup to glance for said attr.
# Notable attributes that could have this issue:
# disk_format, container_format, name, deleted, checksum
output[attr] = getattr(image, attr, None)
output['properties'] = getattr(image, 'properties', {})
return output
def _remove_read_only(image_meta):
IMAGE_ATTRIBUTES = ['status', 'updated_at', 'created_at', 'deleted_at']
output = copy.deepcopy(image_meta)
for attr in IMAGE_ATTRIBUTES:
if attr in output:
del output[attr]
return output
def _reraise_translated_image_exception(image_id):
"""Transform the exception for the image but keep its traceback intact."""
exc_type, exc_value, exc_trace = sys.exc_info()
new_exc = _translate_image_exception(image_id, exc_value)
six.reraise(new_exc, None, exc_trace)
def _reraise_translated_exception():
"""Transform the exception but keep its traceback intact."""
exc_type, exc_value, exc_trace = sys.exc_info()
new_exc = _translate_plain_exception(exc_value)
six.reraise(new_exc, None, exc_trace)
def _translate_image_exception(image_id, exc_value):
if isinstance(exc_value, (glanceclient.exc.Forbidden,
glanceclient.exc.Unauthorized)):
return exception.ImageNotAuthorized(image_id=image_id)
if isinstance(exc_value, glanceclient.exc.NotFound):
return exception.ImageNotFound(image_id=image_id)
if isinstance(exc_value, glanceclient.exc.BadRequest):
return exception.Invalid(six.text_type(exc_value))
return exc_value
def _translate_plain_exception(exc_value):
if isinstance(exc_value, (glanceclient.exc.Forbidden,
glanceclient.exc.Unauthorized)):
return exception.Forbidden(six.text_type(exc_value))
if isinstance(exc_value, glanceclient.exc.NotFound):
return exception.NotFound(six.text_type(exc_value))
if isinstance(exc_value, glanceclient.exc.BadRequest):
return exception.Invalid(six.text_type(exc_value))
return exc_value
def get_remote_image_service(context, image_href):
"""Create an image_service and parse the id from the given image_href.
The image_href param can be an href of the form
'http://example.com:9292/v1/images/b8b2c6f7-7345-4e2f-afa2-eedaba9cbbe3',
or just an id such as 'b8b2c6f7-7345-4e2f-afa2-eedaba9cbbe3'. If the
image_href is a standalone id, then the default image service is returned.
:param image_href: href that describes the location of an image
:returns: a tuple of the form (image_service, image_id)
"""
# NOTE(bcwaldon): If image_href doesn't look like a URI, assume its a
# standalone image ID
if '/' not in str(image_href):
image_service = get_default_image_service()
return image_service, image_href
try:
(image_id, glance_host, glance_port, use_ssl) = \
_parse_image_ref(image_href)
glance_client = GlanceClientWrapper(context=context,
host=glance_host, port=glance_port, use_ssl=use_ssl)
except ValueError:
raise exception.InvalidImageRef(image_href=image_href)
image_service = GlanceImageService(client=glance_client)
return image_service, image_id
def get_default_image_service():
return GlanceImageService()
class UpdateGlanceImage(object):
def __init__(self, context, image_id, metadata, stream):
self.context = context
self.image_id = image_id
self.metadata = metadata
self.image_stream = stream
def start(self):
image_service, image_id = (
get_remote_image_service(self.context, self.image_id))
image_service.update(self.context, image_id, self.metadata,
self.image_stream, purge_props=False)
| apache-2.0 |
ccoss/fas | fas/feeds.py | 4 | 2066 | # -*- coding: utf-8 -*-
''' Provides feeds interface to FAS '''
#
# Copyright © 2008 Ricky Zhou
# Copyright © 2008 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use, modify,
# copy, or redistribute it subject to the terms and conditions of the GNU
# General Public License v.2. This program is distributed in the hope that it
# will be useful, but WITHOUT ANY WARRANTY expressed or implied, including the
# implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details. You should have
# received a copy of the GNU General Public License along with this program;
# if not, write to the Free Software Foundation, Inc., 51 Franklin Street,
# Fifth Floor, Boston, MA 02110-1301, USA. Any Red Hat trademarks that are
# incorporated in the source code or documentation are not subject to the GNU
# General Public License and may only be used or replicated with the express
# permission of Red Hat, Inc.
#
# Author(s): Ricky Zhou <ricky@fedoraproject.org>
# Mike McGrath <mmcgrath@redhat.com>
#
import urllib
from xml.dom import minidom
class Koji:
''' Provide fas feeds for koji '''
def __init__(self, user_name,
url='http://publictest8/koji/recentbuilds?user='):
build_feed = minidom.parse(urllib.urlopen(url + user_name))
try:
self.user_link = build_feed.getElementsByTagName(
'link')[0].childNodes[0].data
self.builds = {}
for build in build_feed.getElementsByTagName('item'):
link = build.getElementsByTagName('link')[0].childNodes[0].data
self.builds[link] = {}
self.builds[link]['title'] = build.getElementsByTagName(
'title')[0].childNodes[0].data
self.builds[link]['pubDate'] = build.getElementsByTagName(
'pubDate')[0].childNodes[0].data
except IndexError:
return
| gpl-2.0 |
TileHalo/servo | tests/wpt/harness/wptrunner/update/state.py | 196 | 4417 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import os
import cPickle as pickle
here = os.path.abspath(os.path.split(__file__)[0])
class State(object):
filename = os.path.join(here, ".wpt-update.lock")
def __new__(cls, logger):
rv = cls.load(logger)
if rv is not None:
logger.debug("Existing state found")
return rv
logger.debug("No existing state found")
return object.__new__(cls, logger)
def __init__(self, logger):
"""Object containing state variables created when running Steps.
On write the state is serialized to disk, such that it can be restored in
the event that the program is interrupted before all steps are complete.
Note that this only works well if the values are immutable; mutating an
existing value will not cause the data to be serialized.
Variables are set and get as attributes e.g. state_obj.spam = "eggs".
:param parent: Parent State object or None if this is the root object.
"""
if hasattr(self, "_data"):
return
self._data = [{}]
self._logger = logger
self._index = 0
def __getstate__(self):
rv = self.__dict__.copy()
del rv["_logger"]
return rv
@classmethod
def load(cls, logger):
"""Load saved state from a file"""
try:
with open(cls.filename) as f:
try:
rv = pickle.load(f)
logger.debug("Loading data %r" % (rv._data,))
rv._logger = logger
rv._index = 0
return rv
except EOFError:
logger.warning("Found empty state file")
except IOError:
logger.debug("IOError loading stored state")
def push(self, init_values):
"""Push a new clean state dictionary
:param init_values: List of variable names in the current state dict to copy
into the new state dict."""
return StateContext(self, init_values)
def save(self):
"""Write the state to disk"""
with open(self.filename, "w") as f:
pickle.dump(self, f)
def is_empty(self):
return len(self._data) == 1 and self._data[0] == {}
def clear(self):
"""Remove all state and delete the stored copy."""
try:
os.unlink(self.filename)
except OSError:
pass
self._data = [{}]
def __setattr__(self, key, value):
if key.startswith("_"):
object.__setattr__(self, key, value)
else:
self._data[self._index][key] = value
self.save()
def __getattr__(self, key):
if key.startswith("_"):
raise AttributeError
try:
return self._data[self._index][key]
except KeyError:
raise AttributeError
def __contains__(self, key):
return key in self._data[self._index]
def update(self, items):
"""Add a dictionary of {name: value} pairs to the state"""
self._data[self._index].update(items)
self.save()
def keys(self):
return self._data[self._index].keys()
class StateContext(object):
def __init__(self, state, init_values):
self.state = state
self.init_values = init_values
def __enter__(self):
if len(self.state._data) == self.state._index + 1:
# This is the case where there is no stored state
new_state = {}
for key in self.init_values:
new_state[key] = self.state._data[self.state._index][key]
self.state._data.append(new_state)
self.state._index += 1
self.state._logger.debug("Incremented index to %s" % self.state._index)
def __exit__(self, *args, **kwargs):
if len(self.state._data) > 1:
assert self.state._index == len(self.state._data) - 1
self.state._data.pop()
self.state._index -= 1
self.state._logger.debug("Decremented index to %s" % self.state._index)
assert self.state._index >= 0
else:
raise ValueError("Tried to pop the top state")
| mpl-2.0 |
kensipe/dcos | packages/adminrouter/extra/src/test-harness/tests/test_auth.py | 4 | 11132 | # Copyright (C) Mesosphere, Inc. See LICENSE file for details.
import os
import time
import pytest
import requests
from generic_test_code.common import assert_endpoint_response
from util import GuardedSubprocess, SearchCriteria, auth_type_str
EXHIBITOR_PATH = "/exhibitor/foo/bar"
# Note(JP): this test assumes that the IAM is contacted when trying to reach
# /mesos_dns. This is not a good assumption. TODO: rewrite the test so that
# setting the User-Agent header is somehow tested differently.
# class TestAuthzIAMBackendQueryCommon:
# def test_if_master_ar_sets_correct_useragent_while_quering_iam(
# self, master_ar_process_pertest, mocker, valid_user_header):
# mocker.send_command(endpoint_id='http://127.0.0.1:8101',
# func_name='record_requests')
# assert_endpoint_response(
# master_ar_process_pertest,
# '/mesos_dns/v1/reflect/me',
# 200,
# headers=valid_user_header,
# )
# r_reqs = mocker.send_command(endpoint_id='http://127.0.0.1:8101',
# func_name='get_recorded_requests')
# assert len(r_reqs) == 1
# verify_header(r_reqs[0]['headers'], 'User-Agent', 'Master Admin Router')
class TestAuthnJWTValidator:
"""Tests scenarios where authentication token isn't provided or is provided
in different supported places (cookie, header)"""
def test_auth_token_not_provided(self, master_ar_process_perclass):
log_messages = {
"No auth token in request.": SearchCriteria(1, True),
}
assert_endpoint_response(
master_ar_process_perclass, EXHIBITOR_PATH, 401, assert_error_log=log_messages)
def test_invalid_auth_token_in_cookie(self, master_ar_process_perclass):
log_messages = {
"No auth token in request.": SearchCriteria(0, True),
"Invalid token. Reason: invalid jwt string":
SearchCriteria(1, True),
}
assert_endpoint_response(
master_ar_process_perclass,
EXHIBITOR_PATH,
401,
assert_error_log=log_messages,
cookies={"dcos-acs-auth-cookie": "invalid"},
)
# Note(JP): in the future we should simply test that only RS256 works, in
# both variants.
# def test_missmatched_auth_token_algo_in_cookie(
# self,
# master_ar_process_perclass,
# mismatch_alg_jwt_generator,
# repo_is_ee,
# ):
# log_messages = {
# ("Invalid token. Reason: whitelist unsupported alg: " +
# jwt_type_str(not repo_is_ee)): SearchCriteria(1, True),
# }
# token = mismatch_alg_jwt_generator(uid='user')
# assert_endpoint_response(
# master_ar_process_perclass,
# EXHIBITOR_PATH,
# 401,
# assert_error_log=log_messages,
# cookies={"dcos-acs-auth-cookie": token},
# )
def test_valid_auth_token_in_cookie_with_null_uid(
self,
master_ar_process_perclass,
jwt_generator,
):
log_messages = {
"No auth token in request.": SearchCriteria(0, True),
"Invalid token. Reason: invalid jwt string":
SearchCriteria(0, True),
"Unexpected token payload: missing uid.":
SearchCriteria(1, True),
}
token = jwt_generator(uid=None)
assert_endpoint_response(
master_ar_process_perclass,
EXHIBITOR_PATH,
401,
assert_error_log=log_messages,
cookies={"dcos-acs-auth-cookie": token},
)
def test_valid_auth_token_in_cookie(
self,
master_ar_process_perclass,
jwt_generator):
log_messages = {
"No auth token in request.": SearchCriteria(0, True),
"Invalid token. Reason: invalid jwt string":
SearchCriteria(0, True),
"UID from the valid DC/OS authentication token: `test`": SearchCriteria(1, True),
}
token = jwt_generator(uid='test')
assert_endpoint_response(
master_ar_process_perclass,
EXHIBITOR_PATH,
200,
assert_error_log=log_messages,
cookies={"dcos-acs-auth-cookie": token},
)
def test_valid_auth_token(self, master_ar_process_perclass, valid_user_header):
log_messages = {
"UID from the valid DC/OS authentication token: `bozydar`":
SearchCriteria(1, True),
}
assert_endpoint_response(
master_ar_process_perclass,
EXHIBITOR_PATH,
200,
assert_error_log=log_messages,
headers=valid_user_header,
)
def test_valid_auth_token_priority(
self,
master_ar_process_perclass,
valid_user_header,
jwt_generator,
):
log_messages = {
"UID from the valid DC/OS authentication token: `bozydar`":
SearchCriteria(1, True),
"UID from the valid DC/OS authentication token: `test`":
SearchCriteria(0, True),
}
token = jwt_generator(uid='test')
assert_endpoint_response(
master_ar_process_perclass,
EXHIBITOR_PATH,
200,
assert_error_log=log_messages,
headers=valid_user_header,
cookies={"dcos-acs-auth-cookie": token},
)
def test_valid_auth_token_without_uid(
self,
master_ar_process_perclass,
jwt_generator,
):
log_messages = {
"Invalid token. Reason: Missing one of claims - \[ uid \]":
SearchCriteria(1, True),
}
token = jwt_generator(uid='test', skip_uid_claim=True)
auth_header = {'Authorization': 'token={}'.format(token)}
assert_endpoint_response(
master_ar_process_perclass,
EXHIBITOR_PATH,
401,
assert_error_log=log_messages,
headers=auth_header,
)
def test_valid_auth_token_without_exp(
self,
master_ar_process_perclass,
jwt_generator,
):
# We accept "forever tokens"
token = jwt_generator(uid='test', skip_exp_claim=True)
auth_header = {'Authorization': 'token={}'.format(token)}
assert_endpoint_response(
master_ar_process_perclass,
EXHIBITOR_PATH,
200,
headers=auth_header,
)
def test_expired_auth_token(
self,
master_ar_process_perclass,
jwt_generator,
):
log_messages = {
"Invalid token. Reason: 'exp' claim expired at ":
SearchCriteria(1, True),
}
token = jwt_generator(uid='test', exp=time.time() - 15)
auth_header = {'Authorization': 'token={}'.format(token)}
assert_endpoint_response(
master_ar_process_perclass,
EXHIBITOR_PATH,
401,
assert_error_log=log_messages,
headers=auth_header,
)
def test_valid_auth_token_with_bearer_header(
self,
master_ar_process_perclass,
jwt_generator,
):
# We accept "forever tokens"
token = jwt_generator(uid='test')
auth_header = {'Authorization': 'Bearer {}'.format(token)}
assert_endpoint_response(
master_ar_process_perclass,
EXHIBITOR_PATH,
200,
headers=auth_header,
)
class TestAuthCustomErrorPages:
def test_correct_401_page_content(self, master_ar_process_pertest, repo_is_ee):
url = master_ar_process_pertest.make_url_from_path(EXHIBITOR_PATH)
resp = requests.get(url)
assert resp.status_code == 401
assert resp.headers["Content-Type"] == "text/html; charset=UTF-8"
assert resp.headers["WWW-Authenticate"] == auth_type_str(repo_is_ee)
path_401 = os.environ.get('AUTH_ERROR_PAGE_DIR_PATH') + "/401.html"
with open(path_401, 'rb') as f:
resp_content = resp.content.decode('utf-8').strip()
file_content = f.read().decode('utf-8').strip()
assert resp_content == file_content
class TestAuthPrecedence:
def test_if_service_endpoint_auth_precedence_is_enforced(
self,
valid_user_header,
master_ar_process_pertest):
url = master_ar_process_pertest.make_url_from_path("/service/i/do/not/exist")
resp = requests.get(
url,
allow_redirects=False)
assert resp.status_code == 401
resp = requests.get(
url,
allow_redirects=False,
headers=valid_user_header)
assert resp.status_code == 404
@pytest.mark.parametrize("path", ["/system/v1/agent/{}/logs{}", "/agent/{}{}"])
def test_if_agent_endpoint_auth_precedence_is_enforced(
self,
valid_user_header,
master_ar_process_pertest,
path):
uri = path.format("bdcd424a-b59e-4df4-b492-b54e38926bd8-S0", "/foo/bar")
url = master_ar_process_pertest.make_url_from_path(uri)
resp = requests.get(
url,
allow_redirects=False)
assert resp.status_code == 401
resp = requests.get(
url,
allow_redirects=False,
headers=valid_user_header)
assert resp.status_code == 404
def test_if_mleader_endpoint_auth_precedence_is_enforced(
self,
valid_user_header,
master_ar_process_pertest,
mocker):
# We have to remove the leader in order to make AR respond with 404
# which has a chance of being processed earlier than auth.
mocker.send_command(endpoint_id='http://127.0.0.1:8080',
func_name='remove_leader')
url = master_ar_process_pertest.make_url_from_path(
"/system/v1/leader/marathon/foo/bar")
resp = requests.get(
url,
allow_redirects=False)
assert resp.status_code == 401
resp = requests.get(
url,
allow_redirects=False,
headers=valid_user_header)
assert resp.status_code == 503
def test_if_historyservice_endpoint_auth_precedence_is_enforced(
self, valid_user_header, mocker, nginx_class):
ar = nginx_class(host_ip=None)
url = ar.make_url_from_path('/dcos-history-service/foo/bar')
with GuardedSubprocess(ar):
resp = requests.get(url, allow_redirects=False)
assert resp.status_code == 401
resp = requests.get(url, allow_redirects=False, headers=valid_user_header)
assert resp.status_code == 503
| apache-2.0 |
mpetyx/palmdrop | venv/lib/python2.7/site-packages/gunicorn/workers/ggevent.py | 7 | 4871 | # -*- coding: utf-8 -
#
# This file is part of gunicorn released under the MIT license.
# See the NOTICE for more information.
from __future__ import with_statement
import os
import sys
from datetime import datetime
from functools import partial
import time
# workaround on osx, disable kqueue
if sys.platform == "darwin":
os.environ['EVENT_NOKQUEUE'] = "1"
try:
import gevent
except ImportError:
raise RuntimeError("You need gevent installed to use this worker.")
from gevent.pool import Pool
from gevent.server import StreamServer
from gevent import pywsgi
import gunicorn
from gunicorn.workers.async import AsyncWorker
VERSION = "gevent/%s gunicorn/%s" % (gevent.__version__, gunicorn.__version__)
BASE_WSGI_ENV = {
'GATEWAY_INTERFACE': 'CGI/1.1',
'SERVER_SOFTWARE': VERSION,
'SCRIPT_NAME': '',
'wsgi.version': (1, 0),
'wsgi.multithread': False,
'wsgi.multiprocess': False,
'wsgi.run_once': False
}
class GeventWorker(AsyncWorker):
server_class = None
wsgi_handler = None
@classmethod
def setup(cls):
from gevent import monkey
monkey.noisy = False
monkey.patch_all()
def timeout_ctx(self):
return gevent.Timeout(self.cfg.keepalive, False)
def run(self):
servers = []
ssl_args = {}
if self.cfg.is_ssl:
ssl_args = dict(server_side=True,
do_handshake_on_connect=False, **self.cfg.ssl_options)
for s in self.sockets:
s.setblocking(1)
pool = Pool(self.worker_connections)
if self.server_class is not None:
server = self.server_class(
s, application=self.wsgi, spawn=pool, log=self.log,
handler_class=self.wsgi_handler, **ssl_args)
else:
hfun = partial(self.handle, s)
server = StreamServer(s, handle=hfun, spawn=pool, **ssl_args)
server.start()
servers.append(server)
pid = os.getpid()
try:
while self.alive:
self.notify()
if pid == os.getpid() and self.ppid != os.getppid():
self.log.info("Parent changed, shutting down: %s", self)
break
gevent.sleep(1.0)
except KeyboardInterrupt:
pass
try:
# Stop accepting requests
[server.stop_accepting() for server in servers]
# Handle current requests until graceful_timeout
ts = time.time()
while time.time() - ts <= self.cfg.graceful_timeout:
accepting = 0
for server in servers:
if server.pool.free_count() != server.pool.size:
accepting += 1
# if no server is accepting a connection, we can exit
if not accepting:
return
self.notify()
gevent.sleep(1.0)
# Force kill all active the handlers
self.log.warning("Worker graceful timeout (pid:%s)" % self.pid)
[server.stop(timeout=1) for server in servers]
except:
pass
def handle_request(self, *args):
try:
super(GeventWorker, self).handle_request(*args)
except gevent.GreenletExit:
pass
if gevent.version_info[0] == 0:
def init_process(self):
#gevent 0.13 and older doesn't reinitialize dns for us after forking
#here's the workaround
import gevent.core
gevent.core.dns_shutdown(fail_requests=1)
gevent.core.dns_init()
super(GeventWorker, self).init_process()
class GeventResponse(object):
status = None
headers = None
response_length = None
def __init__(self, status, headers, clength):
self.status = status
self.headers = headers
self.response_length = clength
class PyWSGIHandler(pywsgi.WSGIHandler):
def log_request(self):
start = datetime.fromtimestamp(self.time_start)
finish = datetime.fromtimestamp(self.time_finish)
response_time = finish - start
resp = GeventResponse(self.status, self.response_headers,
self.response_length)
req_headers = [h.split(":", 1) for h in self.headers.headers]
self.server.log.access(resp, req_headers, self.environ, response_time)
def get_environ(self):
env = super(PyWSGIHandler, self).get_environ()
env['gunicorn.sock'] = self.socket
env['RAW_URI'] = self.path
return env
class PyWSGIServer(pywsgi.WSGIServer):
base_env = BASE_WSGI_ENV
class GeventPyWSGIWorker(GeventWorker):
"The Gevent StreamServer based workers."
server_class = PyWSGIServer
wsgi_handler = PyWSGIHandler
| apache-2.0 |
firewalld/firewalld | src/firewall/core/logger.py | 3 | 31038 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2005-2007,2012 Red Hat, Inc.
#
# Authors:
# Thomas Woerner <twoerner@redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
__all__ = [ "LogTarget", "FileLog", "Logger", "log" ]
import sys
import types
import time
import inspect
import fnmatch
import syslog
import traceback
import fcntl
import os.path
import os
# ---------------------------------------------------------------------------
# abstract class for logging targets
class LogTarget(object):
""" Abstract class for logging targets. """
def __init__(self):
self.fd = None
def write(self, data, level, logger, is_debug=0):
raise NotImplementedError("LogTarget.write is an abstract method")
def flush(self):
raise NotImplementedError("LogTarget.flush is an abstract method")
def close(self):
raise NotImplementedError("LogTarget.close is an abstract method")
# ---------------------------------------------------------------------------
# private class for stdout
class _StdoutLog(LogTarget):
def __init__(self):
LogTarget.__init__(self)
self.fd = sys.stdout
def write(self, data, level, logger, is_debug=0):
# ignore level
self.fd.write(data)
self.flush()
def close(self):
self.flush()
def flush(self):
self.fd.flush()
# ---------------------------------------------------------------------------
# private class for stderr
class _StderrLog(_StdoutLog):
def __init__(self):
_StdoutLog.__init__(self)
self.fd = sys.stderr
# ---------------------------------------------------------------------------
# private class for syslog
class _SyslogLog(LogTarget):
def __init__(self):
# Only initialize LogTarget here as fs should be None
LogTarget.__init__(self)
#
# Derived from: https://github.com/canvon/firewalld/commit/af0edfee1cc1891b7b13f302ca5911b24e9b0f13
#
# Work around Python issue 27875, "Syslogs /usr/sbin/foo as /foo
# instead of as foo"
# (but using openlog explicitly might be better anyway)
#
# Set ident to basename, log PID as well, and log to facility "daemon".
syslog.openlog(os.path.basename(sys.argv[0]),
syslog.LOG_PID, syslog.LOG_DAEMON)
def write(self, data, level, logger, is_debug=0):
priority = None
if is_debug:
priority = syslog.LOG_DEBUG
else:
if level >= logger.INFO1:
priority = syslog.LOG_INFO
elif level == logger.WARNING:
priority = syslog.LOG_WARNING
elif level == logger.ERROR:
priority = syslog.LOG_ERR
elif level == logger.FATAL:
priority = syslog.LOG_CRIT
if data.endswith("\n"):
data = data[:len(data)-1]
if len(data) > 0:
if priority is None:
syslog.syslog(data)
else:
syslog.syslog(priority, data)
def close(self):
syslog.closelog()
def flush(self):
pass
# ---------------------------------------------------------------------------
class FileLog(LogTarget):
""" FileLog class.
File will be opened on the first write. """
def __init__(self, filename, mode="w"):
LogTarget.__init__(self)
self.filename = filename
self.mode = mode
def open(self):
if self.fd:
return
flags = os.O_CREAT | os.O_WRONLY
if self.mode.startswith('a'):
flags |= os.O_APPEND
self.fd = os.open(self.filename, flags, 0o640)
# Make sure that existing file has correct perms
os.fchmod(self.fd, 0o640)
# Make it an object
self.fd = os.fdopen(self.fd, self.mode)
fcntl.fcntl(self.fd, fcntl.F_SETFD, fcntl.FD_CLOEXEC)
def write(self, data, level, logger, is_debug=0):
if not self.fd:
self.open()
self.fd.write(data)
self.fd.flush()
def close(self):
if not self.fd:
return
self.fd.close()
self.fd = None
def flush(self):
if not self.fd:
return
self.fd.flush()
# ---------------------------------------------------------------------------
class Logger(object):
r"""
Format string:
%(class)s Calling class the function belongs to, else empty
%(date)s Date using Logger.date_format, see time module
%(domain)s Full Domain: %(module)s.%(class)s.%(function)s
%(file)s Filename of the module
%(function)s Function name, empty in __main__
%(label)s Label according to log function call from Logger.label
%(level)d Internal logging level
%(line)d Line number in module
%(module)s Module name
%(message)s Log message
Standard levels:
FATAL Fatal error messages
ERROR Error messages
WARNING Warning messages
INFOx, x in [1..5] Information
DEBUGy, y in [1..10] Debug messages
NO_INFO No info output
NO_DEBUG No debug output
INFO_MAX Maximum info level
DEBUG_MAX Maximum debug level
x and y depend on info_max and debug_max from Logger class
initialization. See __init__ function.
Default logging targets:
stdout Logs to stdout
stderr Logs to stderr
syslog Logs to syslog
Additional arguments for logging functions (fatal, error, warning, info
and debug):
nl Disable newline at the end with nl=0, default is nl=1.
fmt Format string for this logging entry, overloads global format
string. Example: fmt="%(file)s:%(line)d %(message)s"
nofmt Only output message with nofmt=1. The nofmt argument wins over
the fmt argument.
Example:
from logger import log
log.setInfoLogLevel(log.INFO1)
log.setDebugLogLevel(log.DEBUG1)
for i in range(1, log.INFO_MAX+1):
log.setInfoLogLabel(i, "INFO%d: " % i)
log.setFormat("%(date)s %(module)s:%(line)d [%(domain)s] %(label)s: "
"%(level)d %(message)s")
log.setDateFormat("%Y-%m-%d %H:%M:%S")
fl = FileLog("/tmp/log", "a")
log.addInfoLogging("*", fl)
log.addDebugLogging("*", fl)
log.addInfoLogging("*", log.syslog, fmt="%(label)s%(message)s")
log.debug3("debug3")
log.debug2("debug2")
log.debug1("debug1")
log.info2("info2")
log.info1("info1")
log.warning("warning\n", nl=0)
log.error("error\n", nl=0)
log.fatal("fatal")
log.info(log.INFO1, "nofmt info", nofmt=1)
"""
ALL = -5
NOTHING = -4
FATAL = -3
TRACEBACK = -2
ERROR = -1
WARNING = 0
# Additional levels are generated in class initilization
stdout = _StdoutLog()
stderr = _StderrLog()
syslog = _SyslogLog()
def __init__(self, info_max=5, debug_max=10):
""" Logger class initialization """
self._level = { }
self._debug_level = { }
self._format = ""
self._date_format = ""
self._label = { }
self._debug_label = { }
self._logging = { }
self._debug_logging = { }
self._domains = { }
self._debug_domains = { }
# INFO1 is required for standard log level
if info_max < 1:
raise ValueError("Logger: info_max %d is too low" % info_max)
if debug_max < 0:
raise ValueError("Logger: debug_max %d is too low" % debug_max)
self.NO_INFO = self.WARNING # = 0
self.INFO_MAX = info_max
self.NO_DEBUG = 0
self.DEBUG_MAX = debug_max
self.setInfoLogLabel(self.FATAL, "FATAL ERROR: ")
self.setInfoLogLabel(self.TRACEBACK, "")
self.setInfoLogLabel(self.ERROR, "ERROR: ")
self.setInfoLogLabel(self.WARNING, "WARNING: ")
# generate info levels and infox functions
for _level in range(1, self.INFO_MAX+1):
setattr(self, "INFO%d" % _level, _level)
self.setInfoLogLabel(_level, "")
setattr(self, "info%d" % (_level),
(lambda self, x:
lambda message, *args, **kwargs:
self.info(x, message, *args, **kwargs))(self, _level)) # pylint: disable=E0602
# generate debug levels and debugx functions
for _level in range(1, self.DEBUG_MAX+1):
setattr(self, "DEBUG%d" % _level, _level)
self.setDebugLogLabel(_level, "DEBUG%d: " % _level)
setattr(self, "debug%d" % (_level),
(lambda self, x:
lambda message, *args, **kwargs:
self.debug(x, message, *args, **kwargs))(self, _level)) # pylint: disable=E0602
# set initial log levels, formats and targets
self.setInfoLogLevel(self.INFO1)
self.setDebugLogLevel(self.NO_DEBUG)
self.setFormat("%(label)s%(message)s")
self.setDateFormat("%d %b %Y %H:%M:%S")
self.setInfoLogging("*", self.stderr, [ self.FATAL, self.ERROR,
self.WARNING ])
self.setInfoLogging("*", self.stdout,
[ i for i in range(self.INFO1, self.INFO_MAX+1) ])
self.setDebugLogging("*", self.stdout,
[ i for i in range(1, self.DEBUG_MAX+1) ])
def close(self):
""" Close all logging targets """
for level in range(self.FATAL, self.DEBUG_MAX+1):
if level not in self._logging:
continue
for (dummy, target, dummy) in self._logging[level]:
target.close()
def getInfoLogLevel(self, domain="*"):
""" Get info log level. """
self._checkDomain(domain)
if domain in self._level:
return self._level[domain]
return self.NOTHING
def setInfoLogLevel(self, level, domain="*"):
""" Set log level [NOTHING .. INFO_MAX] """
self._checkDomain(domain)
if level < self.NOTHING:
level = self.NOTHING
if level > self.INFO_MAX:
level = self.INFO_MAX
self._level[domain] = level
def getDebugLogLevel(self, domain="*"):
""" Get debug log level. """
self._checkDomain(domain)
if domain in self._debug_level:
return self._debug_level[domain] + self.NO_DEBUG
return self.NO_DEBUG
def setDebugLogLevel(self, level, domain="*"):
""" Set debug log level [NO_DEBUG .. DEBUG_MAX] """
self._checkDomain(domain)
if level < 0:
level = 0
if level > self.DEBUG_MAX:
level = self.DEBUG_MAX
self._debug_level[domain] = level - self.NO_DEBUG
def getFormat(self):
return self._format
def setFormat(self, _format):
self._format = _format
def getDateFormat(self):
return self._date_format
def setDateFormat(self, _format):
self._date_format = _format
def setInfoLogLabel(self, level, label):
""" Set log label for level. Level can be a single level or an array
of levels. """
levels = self._getLevels(level)
for level in levels:
self._checkLogLevel(level, min_level=self.FATAL,
max_level=self.INFO_MAX)
self._label[level] = label
def setDebugLogLabel(self, level, label):
""" Set log label for level. Level can be a single level or an array
of levels. """
levels = self._getLevels(level, is_debug=1)
for level in levels:
self._checkLogLevel(level, min_level=self.INFO1,
max_level=self.DEBUG_MAX)
self._debug_label[level] = label
def setInfoLogging(self, domain, target, level=ALL, fmt=None):
""" Set info log target for domain and level. Level can be a single
level or an array of levels. Use level ALL to set for all levels.
If no format is specified, the default format will be used. """
self._setLogging(domain, target, level, fmt, is_debug=0)
def setDebugLogging(self, domain, target, level=ALL, fmt=None):
""" Set debug log target for domain and level. Level can be a single
level or an array of levels. Use level ALL to set for all levels.
If no format is specified, the default format will be used. """
self._setLogging(domain, target, level, fmt, is_debug=1)
def addInfoLogging(self, domain, target, level=ALL, fmt=None):
""" Add info log target for domain and level. Level can be a single
level or an array of levels. Use level ALL to set for all levels.
If no format is specified, the default format will be used. """
self._addLogging(domain, target, level, fmt, is_debug=0)
def addDebugLogging(self, domain, target, level=ALL, fmt=None):
""" Add debg log target for domain and level. Level can be a single
level or an array of levels. Use level ALL to set for all levels.
If no format is specified, the default format will be used. """
self._addLogging(domain, target, level, fmt, is_debug=1)
def delInfoLogging(self, domain, target, level=ALL, fmt=None):
""" Delete info log target for domain and level. Level can be a single
level or an array of levels. Use level ALL to set for all levels.
If no format is specified, the default format will be used. """
self._delLogging(domain, target, level, fmt, is_debug=0)
def delDebugLogging(self, domain, target, level=ALL, fmt=None):
""" Delete debug log target for domain and level. Level can be a single
level or an array of levels. Use level ALL to set for all levels.
If no format is specified, the default format will be used. """
self._delLogging(domain, target, level, fmt, is_debug=1)
def isInfoLoggingHere(self, level):
""" Is there currently any info logging for this log level (and
domain)? """
return self._isLoggingHere(level, is_debug=0)
def isDebugLoggingHere(self, level):
""" Is there currently any debug logging for this log level (and
domain)? """
return self._isLoggingHere(level, is_debug=1)
### log functions
def fatal(self, _format, *args, **kwargs):
""" Fatal error log. """
self._checkKWargs(kwargs)
kwargs["is_debug"] = 0
self._log(self.FATAL, _format, *args, **kwargs)
def error(self, _format, *args, **kwargs):
""" Error log. """
self._checkKWargs(kwargs)
kwargs["is_debug"] = 0
self._log(self.ERROR, _format, *args, **kwargs)
def warning(self, _format, *args, **kwargs):
""" Warning log. """
self._checkKWargs(kwargs)
kwargs["is_debug"] = 0
self._log(self.WARNING, _format, *args, **kwargs)
def info(self, level, _format, *args, **kwargs):
""" Information log using info level [1..info_max].
There are additional infox functions according to info_max from
__init__"""
self._checkLogLevel(level, min_level=1, max_level=self.INFO_MAX)
self._checkKWargs(kwargs)
kwargs["is_debug"] = 0
self._log(level+self.NO_INFO, _format, *args, **kwargs)
def debug(self, level, _format, *args, **kwargs):
""" Debug log using debug level [1..debug_max].
There are additional debugx functions according to debug_max
from __init__"""
self._checkLogLevel(level, min_level=1, max_level=self.DEBUG_MAX)
self._checkKWargs(kwargs)
kwargs["is_debug"] = 1
self._log(level, _format, *args, **kwargs)
def exception(self):
self._log(self.TRACEBACK, traceback.format_exc(), args=[], kwargs={})
### internal functions
def _checkLogLevel(self, level, min_level, max_level):
if level < min_level or level > max_level:
raise ValueError("Level %d out of range, should be [%d..%d]." % \
(level, min_level, max_level))
def _checkKWargs(self, kwargs):
if not kwargs:
return
for key in kwargs.keys():
if key not in [ "nl", "fmt", "nofmt" ]:
raise ValueError("Key '%s' is not allowed as argument for logging." % key)
def _checkDomain(self, domain):
if not domain or domain == "":
raise ValueError("Domain '%s' is not valid." % domain)
def _getLevels(self, level, is_debug=0):
""" Generate log level array. """
if level != self.ALL:
if isinstance(level, list) or isinstance(level, tuple):
levels = level
else:
levels = [ level ]
for level in levels:
if is_debug:
self._checkLogLevel(level, min_level=1,
max_level=self.DEBUG_MAX)
else:
self._checkLogLevel(level, min_level=self.FATAL,
max_level=self.INFO_MAX)
else:
if is_debug:
levels = [ i for i in range(self.DEBUG1, self.DEBUG_MAX) ]
else:
levels = [ i for i in range(self.FATAL, self.INFO_MAX) ]
return levels
def _getTargets(self, target):
""" Generate target array. """
if isinstance(target, list) or isinstance(target, tuple):
targets = target
else:
targets = [ target ]
for _target in targets:
if not issubclass(_target.__class__, LogTarget):
raise ValueError("'%s' is no valid logging target." % \
_target.__class__.__name__)
return targets
def _genDomains(self, is_debug=0):
# private method for self._domains array creation, speeds up
""" Generate dict with domain by level. """
if is_debug:
_domains = self._debug_domains
_logging = self._debug_logging
_range = ( 1, self.DEBUG_MAX+1 )
else:
_domains = self._domains
_logging = self._logging
_range = ( self.FATAL, self.INFO_MAX+1 )
if len(_domains) > 0:
_domains.clear()
for level in range(_range[0], _range[1]):
if level not in _logging:
continue
for (domain, dummy, dummy) in _logging[level]:
if domain not in _domains:
_domains.setdefault(level, [ ]).append(domain)
def _setLogging(self, domain, target, level=ALL, fmt=None, is_debug=0):
self._checkDomain(domain)
levels = self._getLevels(level, is_debug)
targets = self._getTargets(target)
if is_debug:
_logging = self._debug_logging
else:
_logging = self._logging
for level in levels:
for target in targets:
_logging[level] = [ (domain, target, fmt) ]
self._genDomains(is_debug)
def _addLogging(self, domain, target, level=ALL, fmt=None, is_debug=0):
self._checkDomain(domain)
levels = self._getLevels(level, is_debug)
targets = self._getTargets(target)
if is_debug:
_logging = self._debug_logging
else:
_logging = self._logging
for level in levels:
for target in targets:
_logging.setdefault(level, [ ]).append((domain, target, fmt))
self._genDomains(is_debug)
def _delLogging(self, domain, target, level=ALL, fmt=None, is_debug=0):
self._checkDomain(domain)
levels = self._getLevels(level, is_debug)
targets = self._getTargets(target)
if is_debug:
_logging = self._debug_logging
else:
_logging = self._logging
for _level in levels:
for target in targets:
if _level not in _logging:
continue
if (domain, target, fmt) in _logging[_level]:
_logging[_level].remove( (domain, target, fmt) )
if len(_logging[_level]) == 0:
del _logging[_level]
continue
if level != self.ALL:
raise ValueError("No mathing logging for " \
"level %d, domain %s, target %s and format %s." % \
(_level, domain, target.__class__.__name__, fmt))
self._genDomains(is_debug)
def _isLoggingHere(self, level, is_debug=0):
_dict = self._genDict(level, is_debug)
if not _dict:
return False
point_domain = _dict["domain"] + "."
if is_debug:
_logging = self._debug_logging
else:
_logging = self._logging
# do we need to log?
for (domain, dummy, dummy) in _logging[level]:
if domain == "*" or \
point_domain.startswith(domain) or \
fnmatch.fnmatchcase(_dict["domain"], domain):
return True
return False
def _getClass(self, frame):
""" Function to get calling class. Returns class or None. """
# get class by first function argument, if there are any
if frame.f_code.co_argcount > 0:
selfname = frame.f_code.co_varnames[0]
if selfname in frame.f_locals:
_self = frame.f_locals[selfname]
obj = self._getClass2(_self.__class__, frame.f_code)
if obj:
return obj
module = inspect.getmodule(frame.f_code)
code = frame.f_code
# function in module?
if code.co_name in module.__dict__:
if hasattr(module.__dict__[code.co_name], "func_code") and \
module.__dict__[code.co_name].__code__ == code:
return None
# class in module
for (dummy, obj) in module.__dict__.items():
if isinstance(obj, types.ClassType):
if hasattr(obj, code.co_name):
value = getattr(obj, code.co_name)
if isinstance(value, types.FunctionType):
if value.__code__ == code:
return obj
# nothing found
return None
def _getClass2(self, obj, code):
""" Internal function to get calling class. Returns class or None. """
for value in obj.__dict__.values():
if isinstance(value, types.FunctionType):
if value.__code__ == code:
return obj
for base in obj.__bases__:
_obj = self._getClass2(base, code)
if _obj:
return _obj
return None
# internal log class
def _log(self, level, _format, *args, **kwargs):
is_debug = 0
if "is_debug" in kwargs:
is_debug = kwargs["is_debug"]
nl = 1
if "nl" in kwargs:
nl = kwargs["nl"]
nofmt = 0
if "nofmt" in kwargs:
nofmt = kwargs["nofmt"]
_dict = self._genDict(level, is_debug)
if not _dict:
return
if len(args) > 1:
_dict['message'] = _format % args
elif len(args) == 1: # needed for _format % _dict
_dict['message'] = _format % args[0]
else:
_dict['message'] = _format
point_domain = _dict["domain"] + "."
if is_debug:
_logging = self._debug_logging
else:
_logging = self._logging
used_targets = [ ]
# log to target(s)
for (domain, target, _format) in _logging[level]:
if target in used_targets:
continue
if domain == "*" \
or point_domain.startswith(domain+".") \
or fnmatch.fnmatchcase(_dict["domain"], domain):
if not _format:
_format = self._format
if "fmt" in kwargs:
_format = kwargs["fmt"]
if nofmt:
target.write(_dict["message"], level, self, is_debug)
else:
target.write(_format % _dict, level, self, is_debug)
if nl: # newline
target.write("\n", level, self, is_debug)
used_targets.append(target)
# internal function to generate the dict, needed for logging
def _genDict(self, level, is_debug=0):
""" Internal function. """
check_domains = [ ]
simple_match = False
if is_debug:
_dict = self._debug_level
_domains = self._debug_domains
_label = self._debug_label
else:
_dict = self._level
_domains = self._domains
_label = self._label
# no debug
for domain in _dict:
if domain == "*":
# '*' matches everything: simple match
if _dict[domain] >= level:
simple_match = True
if len(check_domains) > 0:
check_domains = [ ]
break
else:
if _dict[domain] >= level:
check_domains.append(domain)
if not simple_match and len(check_domains) < 1:
return None
if level not in _domains:
return None
f = inspect.currentframe()
# go outside of logger module as long as there is a lower frame
while f and f.f_back and f.f_globals["__name__"] == self.__module__:
f = f.f_back
if not f:
raise ValueError("Frame information not available.")
# get module name
module_name = f.f_globals["__name__"]
# simple module match test for all entries of check_domain
point_module = module_name + "."
for domain in check_domains:
if point_module.startswith(domain):
# found domain in module name
check_domains = [ ]
break
# get code
co = f.f_code
# optimization: bail out early if domain can not match at all
_len = len(module_name)
for domain in _domains[level]:
i = domain.find("*")
if i == 0:
continue
elif i > 0:
d = domain[:i]
else:
d = domain
if _len >= len(d):
if not module_name.startswith(d):
return None
else:
if not d.startswith(module_name):
return None
# generate _dict for format output
level_str = ""
if level in _label:
level_str = _label[level]
_dict = { 'file': co.co_filename,
'line': f.f_lineno,
'module': module_name,
'class': '',
'function': co.co_name,
'domain': '',
'label' : level_str,
'level' : level,
'date' : time.strftime(self._date_format, time.localtime()) }
if _dict["function"] == "?":
_dict["function"] = ""
# domain match needed?
domain_needed = False
for domain in _domains[level]:
# standard domain, matches everything
if domain == "*":
continue
# domain is needed
domain_needed = True
break
# do we need to get the class object?
if self._format.find("%(domain)") >= 0 or \
self._format.find("%(class)") >= 0 or \
domain_needed or \
len(check_domains) > 0:
obj = self._getClass(f)
if obj:
_dict["class"] = obj.__name__
# build domain string
_dict["domain"] = "" + _dict["module"]
if _dict["class"] != "":
_dict["domain"] += "." + _dict["class"]
if _dict["function"] != "":
_dict["domain"] += "." + _dict["function"]
if len(check_domains) < 1:
return _dict
point_domain = _dict["domain"] + "."
for domain in check_domains:
if point_domain.startswith(domain) or \
fnmatch.fnmatchcase(_dict["domain"], domain):
return _dict
return None
# ---------------------------------------------------------------------------
# Global logging object.
log = Logger()
# ---------------------------------------------------------------------------
"""
# Example
if __name__ == '__main__':
log.setInfoLogLevel(log.INFO2)
log.setDebugLogLevel(log.DEBUG5)
for i in range(log.INFO1, log.INFO_MAX+1):
log.setInfoLogLabel(i, "INFO%d: " % i)
for i in range(log.DEBUG1, log.DEBUG_MAX+1):
log.setDebugLogLabel(i, "DEBUG%d: " % i)
log.setFormat("%(date)s %(module)s:%(line)d %(label)s"
"%(message)s")
log.setDateFormat("%Y-%m-%d %H:%M:%S")
fl = FileLog("/tmp/log", "a")
log.addInfoLogging("*", fl)
log.delDebugLogging("*", log.stdout)
log.setDebugLogging("*", log.stdout, [ log.DEBUG1, log.DEBUG2 ] )
log.addDebugLogging("*", fl)
# log.addInfoLogging("*", log.syslog, fmt="%(label)s%(message)s")
# log.addDebugLogging("*", log.syslog, fmt="%(label)s%(message)s")
log.debug10("debug10")
log.debug9("debug9")
log.debug8("debug8")
log.debug7("debug7")
log.debug6("debug6")
log.debug5("debug5")
log.debug4("debug4")
log.debug3("debug3")
log.debug2("debug2", fmt="%(file)s:%(line)d %(message)s")
log.debug1("debug1", nofmt=1)
log.info5("info5")
log.info4("info4")
log.info3("info3")
log.info2("info2")
log.info1("info1")
log.warning("warning\n", nl=0)
log.error("error ", nl=0)
log.error("error", nofmt=1)
log.fatal("fatal")
log.info(log.INFO1, "nofmt info", nofmt=1)
log.info(log.INFO2, "info2 fmt", fmt="%(file)s:%(line)d %(message)s")
try:
a = b
except Exception as e:
log.exception()
"""
# vim:ts=4:sw=4:showmatch:expandtab
| gpl-2.0 |
LogicalDash/kivy | kivy/tests/test_urlrequest.py | 5 | 5098 | '''
UrlRequest tests
================
'''
import unittest
try:
# py3k
import _thread
except ImportError:
# py27
import thread as _thread
from kivy.network.urlrequest import UrlRequest
from time import sleep
from base64 import b64encode
from kivy.clock import Clock
import os
class UrlRequestTest(unittest.TestCase):
def _on_success(self, req, *args):
self.queue.append((_thread.get_ident(), 'success', args))
def _on_redirect(self, req, *args):
self.queue.append((_thread.get_ident(), 'redirect', args))
def _on_error(self, req, *args):
self.queue.append((_thread.get_ident(), 'error', args))
def _on_progress(self, req, *args):
self.queue.append((_thread.get_ident(), 'progress', args))
def test_callbacks(self):
if os.environ.get('NONETWORK'):
return
self.queue = []
req = UrlRequest('http://google.com',
on_success=self._on_success,
on_progress=self._on_progress,
on_error=self._on_error,
on_redirect=self._on_redirect,
debug=True)
# don't use wait, but maximum 10s timeout
for i in range(50):
Clock.tick()
sleep(.5)
if req.is_finished:
break
self.assertTrue(req.is_finished)
# we should have 2 progress minimum and one success
self.assertTrue(len(self.queue) >= 3)
# ensure the callback is called from this thread (main).
tid = _thread.get_ident()
self.assertEqual(self.queue[0][0], tid)
self.assertEqual(self.queue[-2][0], tid)
self.assertEqual(self.queue[-1][0], tid)
self.assertEqual(self.queue[0][1], 'progress')
self.assertEqual(self.queue[-2][1], 'progress')
self.assertIn(self.queue[-1][1], ('success', 'redirect'))
self.assertEqual(self.queue[0][2][0], 0)
self.assertEqual(self.queue[-2][2][0], self.queue[-2][2][1])
def test_auth_header(self):
if os.environ.get('NONETWORK'):
return
self.queue = []
head = {
"Authorization": "Basic {}".format(b64encode(
"{}:{}".format('user', 'passwd').encode('utf-8')
).decode('utf-8'))
}
req = UrlRequest(
'http://httpbin.org/basic-auth/user/passwd',
on_success=self._on_success,
on_progress=self._on_progress,
on_error=self._on_error,
on_redirect=self._on_redirect,
req_headers=head,
debug=True
)
# don't use wait, but maximum 10s timeout
for i in range(50):
Clock.tick()
sleep(.5)
if req.is_finished:
break
self.assertTrue(req.is_finished)
# we should have 2 progress minimum and one success
self.assertTrue(len(self.queue) >= 3)
# ensure the callback is called from this thread (main).
tid = _thread.get_ident()
self.assertEqual(self.queue[0][0], tid)
self.assertEqual(self.queue[-2][0], tid)
self.assertEqual(self.queue[-1][0], tid)
self.assertEqual(self.queue[0][1], 'progress')
self.assertEqual(self.queue[-2][1], 'progress')
self.assertIn(self.queue[-1][1], ('success', 'redirect'))
self.assertEqual(
self.queue[-1][2],
({'authenticated': True, 'user': 'user'}, )
)
self.assertEqual(self.queue[0][2][0], 0)
self.assertEqual(self.queue[-2][2][0], self.queue[-2][2][1])
def test_auth_auto(self):
if os.environ.get('NONETWORK'):
return
self.queue = []
req = UrlRequest(
'http://user:passwd@httpbin.org/basic-auth/user/passwd',
on_success=self._on_success,
on_progress=self._on_progress,
on_error=self._on_error,
on_redirect=self._on_redirect,
debug=True
)
# don't use wait, but maximum 10s timeout
for i in range(50):
Clock.tick()
sleep(.5)
if req.is_finished:
break
self.assertTrue(req.is_finished)
# we should have 2 progress minimum and one success
self.assertTrue(len(self.queue) >= 3)
# ensure the callback is called from this thread (main).
tid = _thread.get_ident()
self.assertEqual(self.queue[0][0], tid)
self.assertEqual(self.queue[-2][0], tid)
self.assertEqual(self.queue[-1][0], tid)
self.assertEqual(self.queue[0][1], 'progress')
self.assertEqual(self.queue[-2][1], 'progress')
self.assertIn(self.queue[-1][1], ('success', 'redirect'))
self.assertEqual(
self.queue[-1][2],
({'authenticated': True, 'user': 'user'}, )
)
self.assertEqual(self.queue[0][2][0], 0)
self.assertEqual(self.queue[-2][2][0], self.queue[-2][2][1])
if __name__ == '__main__':
unittest.main()
| mit |
JGarcia-Panach/odoo | doc/conf.py | 184 | 8222 | # -*- coding: utf-8 -*-
import sys, os
import sphinx
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
DIR = os.path.dirname(__file__)
sys.path.append(
os.path.abspath(
os.path.join(DIR, '_extensions')))
# autodoc
sys.path.append(os.path.abspath(os.path.join(DIR, '..')))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.2'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.ifconfig',
'sphinx.ext.todo',
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.linkcode',
'github_link',
'odoo',
'html_domain',
'exercise_admonition',
'patchqueue'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'odoo'
copyright = u'Odoo S.A.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '8.0'
# The full version, including alpha/beta/rc tags.
release = '8.0'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'odoo'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'odoo'
odoo_cover_default = 'banners/installing_odoo.jpg'
odoo_cover_external = {
'https://odoo.com/documentation/functional/accounting.html' : 'banners/m_accounting.jpg',
'https://odoo.com/documentation/functional/double-entry.html' : 'banners/m_1.jpg',
'https://odoo.com/documentation/functional/valuation.html' : 'banners/m_2.jpg',
}
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_extensions']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_add_permalinks = u''
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# FIXME: no sidebar on index?
html_sidebars = {
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
latex_elements = {
'papersize': r'a4paper',
'preamble': u'''\\setcounter{tocdepth}{2}
''',
}
# default must be set otherwise ifconfig blows up
todo_include_todos = False
intersphinx_mapping = {
'python': ('https://docs.python.org/2/', None),
'werkzeug': ('http://werkzeug.pocoo.org/docs/', None),
'sqlalchemy': ('http://docs.sqlalchemy.org/en/rel_0_9/', None),
'django': ('https://django.readthedocs.org/en/latest/', None),
}
github_user = 'odoo'
github_project = 'odoo'
# monkeypatch PHP lexer to not require <?php
from sphinx.highlighting import lexers
from pygments.lexers.web import PhpLexer
lexers['php'] = PhpLexer(startinline=True)
def setup(app):
app.connect('html-page-context', canonicalize)
app.add_config_value('canonical_root', None, 'env')
app.add_config_value('canonical_branch', 'master', 'env')
app.connect('html-page-context', versionize)
app.add_config_value('versions', '', 'env')
app.connect('html-page-context', analytics)
app.add_config_value('google_analytics_key', '', 'env')
def canonicalize(app, pagename, templatename, context, doctree):
""" Adds a 'canonical' URL for the current document in the rendering
context. Requires the ``canonical_root`` setting being set. The canonical
branch is ``master`` but can be overridden using ``canonical_branch``.
"""
if not app.config.canonical_root:
return
context['canonical'] = _build_url(
app.config.canonical_root, app.config.canonical_branch, pagename)
def versionize(app, pagename, templatename, context, doctree):
""" Adds a version switcher below the menu, requires ``canonical_root``
and ``versions`` (an ordered, space-separated lists of all possible
versions).
"""
if not (app.config.canonical_root and app.config.versions):
return
context['versions'] = [
(vs, _build_url(app.config.canonical_root, vs, pagename))
for vs in app.config.versions.split(',')
if vs != app.config.version
]
def analytics(app, pagename, templatename, context, doctree):
if not app.config.google_analytics_key:
return
context['google_analytics_key'] = app.config.google_analytics_key
def _build_url(root, branch, pagename):
return "{canonical_url}{canonical_branch}/{canonical_page}".format(
canonical_url=root,
canonical_branch=branch,
canonical_page=(pagename + '.html').replace('index.html', '')
.replace('index/', ''),
)
| agpl-3.0 |
haoyuchen1992/CourseBuilder | appengine_config.py | 4 | 3741 | # Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Custom configurations and functions for Google App Engine."""
__author__ = 'psimakov@google.com (Pavel Simakov)'
import logging
import os
import sys
# Whether we are running in the production environment.
PRODUCTION_MODE = not os.environ.get(
'SERVER_SOFTWARE', 'Development').startswith('Development')
# Set this flag to true to enable bulk downloads of Javascript/CSS files in lib
BUNDLE_LIB_FILES = True
# this is the official location of this app for computing of all relative paths
BUNDLE_ROOT = os.path.dirname(__file__)
# make all Windows and Linux paths have the same separator '/'
BUNDLE_ROOT = BUNDLE_ROOT.replace('\\', '/')
# Default namespace name is '' and not None.
DEFAULT_NAMESPACE_NAME = ''
class _Library(object):
"""DDO that represents a Python library contained in a .zip file."""
def __init__(self, zipfile, relative_path=None):
self._relative_path = relative_path
self._zipfile = zipfile
@property
def file_path(self):
"""Path to the library's file on disk."""
return os.path.join(BUNDLE_ROOT, 'lib', self._zipfile)
@property
def full_path(self):
"""Full path for imports, containing archive-relative paths if any."""
path = self.file_path
if self._relative_path:
path = os.path.join(path, self._relative_path)
return path
# Third-party library zip files.
THIRD_PARTY_LIBS = [
_Library('babel-0.9.6.zip'),
_Library('html5lib-0.95.zip'),
_Library('httplib2-0.8.zip', relative_path='httplib2-0.8/python2'),
_Library('gaepytz-2011h.zip'),
_Library(
'google-api-python-client-1.1.zip',
relative_path='google-api-python-client-1.1'),
_Library('mapreduce-r645.zip'),
# .zip repackaged from .tar.gz download.
_Library('mrs-mapreduce-0.9.zip', relative_path='mrs-mapreduce-0.9'),
# .zip repackaged from .tar.gz download.
_Library('python-gflags-2.0.zip', relative_path='python-gflags-2.0'),
_Library('pyparsing-1.5.7.zip'),
]
def gcb_force_default_encoding(encoding):
"""Force default encoding to a specific value."""
# Eclipse silently sets default encoding to 'utf-8', while GAE forces
# 'ascii'. We need to control this directly for consistency.
if sys.getdefaultencoding() != encoding:
reload(sys)
sys.setdefaultencoding(encoding)
def gcb_init_third_party():
"""Add all third party libraries to system path."""
for lib in THIRD_PARTY_LIBS:
if not os.path.exists(lib.file_path):
raise Exception('Library does not exist: %s' % lib.file_path)
sys.path.insert(0, lib.full_path)
def gcb_appstats_enabled():
return 'True' == os.environ.get('GCB_APPSTATS_ENABLED')
def webapp_add_wsgi_middleware(app):
"""Enable AppStats if requested."""
if gcb_appstats_enabled():
logging.info('Enabling AppStats.')
# pylint: disable-msg=g-import-not-at-top
from google.appengine.ext.appstats import recording
# pylint: enable-msg=g-import-not-at-top
app = recording.appstats_wsgi_middleware(app)
return app
gcb_init_third_party()
| apache-2.0 |
suprzer0/aoc2016 | day2/tests.py | 1 | 1060 | from io import StringIO
import unittest
from solve import load_data, solve_p1, solve_p2
class LoadDataTestCase(unittest.TestCase):
def test_example(self):
example = (
"ULL\n"
"RRDDD\n"
"LURDL\n"
"UUUUD\n")
s = StringIO(example)
data = load_data(s)
self.assertEqual(data,
['ULL', 'RRDDD', 'LURDL', 'UUUUD']
)
class Part1TestCase(unittest.TestCase):
def test_example(self):
example = (
"ULL\n"
"RRDDD\n"
"LURDL\n"
"UUUUD\n")
s = StringIO(example)
data = load_data(s)
solution = solve_p1(data)
self.assertEqual(solution, '1985')
class Part2TestCase(unittest.TestCase):
def test_example(self):
example = (
"ULL\n"
"RRDDD\n"
"LURDL\n"
"UUUUD\n")
s = StringIO(example)
data = load_data(s)
solution = solve_p2(data)
self.assertEqual(solution, '5DB3')
if __name__ == '__main__':
unittest.main()
| mit |
popas90/filebarn | filebarn/models.py | 1 | 1149 | from filebarn import db
from werkzeug.security import generate_password_hash, check_password_hash
class User(db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(64), index=True, unique=True)
password_hash = db.Column(db.String(128))
email = db.Column(db.String(120), index=True, unique=True)
def __repr__(self):
return '<User {0}, email {1}>'.format(self.username, self.email)
@property
def password(self):
raise AttributeError('password is not a readable attribute')
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
@property
def is_authenticated(self):
return True
@property
def is_active(self):
return True
@property
def is_anonymous(self):
return False
def get_id(self):
try:
return unicode(self.id) # python 2
except NameError:
return str(self.id) # python 3
| mit |
pprett/statsmodels | statsmodels/examples/tsa/arma_plots.py | 4 | 2478 | '''Plot acf and pacf for some ARMA(1,1)
'''
import numpy as np
import matplotlib.pyplot as plt
import statsmodels.tsa.arima_process as tsp
from statsmodels.sandbox.tsa.fftarma import ArmaFft as FftArmaProcess
import statsmodels.tsa.stattools as tss
from statsmodels.graphics.tsaplots import plotacf
np.set_printoptions(precision=2)
arcoefs = [0.9, 0., -0.5] #[0.9, 0.5, 0.1, 0., -0.5]
macoefs = [0.9, 0., -0.5] #[0.9, 0.5, 0.1, 0., -0.5]
nsample = 1000
nburnin = 1000
sig = 1
fig = plt.figure(figsize=(8, 13))
fig.suptitle('ARMA: Autocorrelation (left) and Partial Autocorrelation (right)')
subplotcount = 1
nrows = 4
for arcoef in arcoefs[:-1]:
for macoef in macoefs[:-1]:
ar = np.r_[1., -arcoef]
ma = np.r_[1., macoef]
#y = tsp.arma_generate_sample(ar,ma,nsample, sig, burnin)
#armaprocess = FftArmaProcess(ar, ma, nsample) #TODO: make n optional
#armaprocess.plot4()
armaprocess = tsp.ArmaProcess(ar, ma)
acf = armaprocess.acf(20)[:20]
pacf = armaprocess.pacf(20)[:20]
ax = fig.add_subplot(nrows, 2, subplotcount)
plotacf(acf, ax=ax)
## ax.set_title('Autocorrelation \nar=%s, ma=%rs' % (ar, ma),
## size='xx-small')
ax.text(0.7, 0.6, 'ar =%s \nma=%s' % (ar, ma),
transform=ax.transAxes,
horizontalalignment='left', #'right',
size='xx-small')
ax.set_xlim(-1,20)
subplotcount +=1
ax = fig.add_subplot(nrows, 2, subplotcount)
plotacf(pacf, ax=ax)
## ax.set_title('Partial Autocorrelation \nar=%s, ma=%rs' % (ar, ma),
## size='xx-small')
ax.text(0.7, 0.6, 'ar =%s \nma=%s' % (ar, ma),
transform=ax.transAxes,
horizontalalignment='left', #'right',
size='xx-small')
ax.set_xlim(-1,20)
subplotcount +=1
axs = fig.axes
### turn of the 2nd column y tick labels
##for ax in axs[1::2]:#[:,1].flat:
## for label in ax.get_yticklabels(): label.set_visible(False)
# turn off all but the bottom xtick labels
for ax in axs[:-2]:#[:-1,:].flat:
for label in ax.get_xticklabels(): label.set_visible(False)
# use a MaxNLocator on the first column y axis if you have a bunch of
# rows to avoid bunching; example below uses at most 3 ticks
import matplotlib.ticker as mticker
for ax in axs: #[::2]:#[:,1].flat:
ax.yaxis.set_major_locator( mticker.MaxNLocator(3 ))
plt.show()
| bsd-3-clause |
gvanhorn38/active_neurofinder | baselearners/train_full.py | 1 | 18123 | # Trains network on the full neurofinder dataset
## IMPORT
import json
import numpy as np
from scipy.misc import imread
from glob import glob
import os
import random
import neurofinder as nf
from keras.layers import Input, Convolution2D, MaxPooling2D, Convolution3D, MaxPooling3D, merge, Dropout, BatchNormalization, Reshape, Dense
from keras.optimizers import SGD
from keras.models import Model
###########################################################################################################################################
###########################################################################################################################################
###########################################################################################################################################
###########################################################################################################################################
## UTILITY FUNCTIONS
def load_images(im_dir):
# load the images
files = sorted(glob(os.path.join(im_dir,'*.tiff')))
imgs = np.array([imread(f) for f in files])
return imgs
def load_regions(reg_dir):
# load the regions (training data only)
with open(os.path.join(reg_dir,'regions.json')) as f:
regions = json.load(f)
return regions
def tomask(coords,dims):
# creates a mask from region labels
mask = np.zeros(dims)
mask[zip(*coords)] = 1
return mask
def get_mask(imgs,regions):
# creates the total mask
dims = imgs.shape[1:]
masks = np.array([tomask(s['coordinates'],dims) for s in regions])
total_mask = masks.sum(axis=0)
indices = total_mask >= 1.0
total_mask[indices] = 1.0
return total_mask
def subtract_background(imgs):
# subtracts off the background from the images
global_mean = np.mean(imgs, axis=0)
global_mean = global_mean.astype(np.float32)
preped_images = np.subtract(imgs.astype(np.float32), global_mean)
return preped_images
###########################################################################################################################################
###########################################################################################################################################
###########################################################################################################################################
###########################################################################################################################################
## PATCH FUNCTIONS
def create_patches(imgs,patch_size,stride=[1,1,1],dataset_num=0):
# enumerate coordinates for all patches of size patch_size using stride
total_frames,total_height,total_width = imgs.shape
patch_frames,patch_height,patch_width = patch_size
f_stride,h_stride,w_stride = stride
num_patches = len(xrange(0,total_height-patch_height+1,h_stride))*len(xrange(0,total_width-patch_width+1,w_stride))*len(xrange(0,total_frames-patch_frames+1,f_stride))
patch_coords = np.zeros((num_patches,4))
patch_num = 0
for h in range(0,total_height-patch_height+1,h_stride):
for w in range(0,total_width-patch_width+1,w_stride):
for f in range(0,total_frames-patch_frames+1,f_stride):
patch_coords[patch_num] = [dataset_num,f,h,w]
patch_num += 1
return patch_coords
def split_patches(patch_coords,mask,patch_size):
# splits the patch coordinates into neuron and non-neuron patches
num_patches = patch_coords.shape[0]
patch_frames,patch_height,patch_width = patch_size
neuron_patches = []
non_neuron_patches = []
for patch_num in range(num_patches):
dataset_num,f,h,w = [int(coord) for coord in patch_coords[patch_num]]
if mask[h+(patch_height/2),w+(patch_width/2)] == 1.0:
neuron_patches.append(patch_num)
else:
non_neuron_patches.append(patch_num)
neuron_patch_coords = patch_coords[neuron_patches]
non_neuron_patch_coords = patch_coords[non_neuron_patches]
return neuron_patch_coords,non_neuron_patch_coords
def extract_patches(coords,total_imgs,patch_size):
# extract patch(es) of size patch_size from imgs at location coords
patch_frames,patch_height,patch_width = patch_size
num_patches = coords.shape[0]
img_patches = np.zeros((num_patches,1,patch_frames,patch_height,patch_width))
for patch_num in range(num_patches):
dataset_num,f,h,w = [int(coord) for coord in coords[patch_num]]
imgs = total_imgs[dataset_num]
img_patches[patch_num,0,:,:,:] = imgs[f:f+patch_frames,h:h+patch_height,w:w+patch_width]
return img_patches
###########################################################################################################################################
###########################################################################################################################################
###########################################################################################################################################
###########################################################################################################################################
## MODEL
def create_model(input_size=[2900,15,15]):
t,h,w = input_size
input_sequence = Input(shape=(1,t,h,w)) # (channels,frames,height,width)
# conv1: spatial convolution (3 x 3), spatial pooling (2 x 2)
conv_1 = Convolution3D(50,1,3,3,activation='relu',border_mode='same')
conv1 = conv_1(input_sequence)
bn1 = BatchNormalization(axis=1)(conv1)
pool_1 = MaxPooling3D(pool_size=(1,2,2),strides=(1,2,2))
pool1 = pool_1(bn1) # output size: t, h/2, w/2
# conv2: temporal convolution (4), temporal pooling (2)
conv_2 = Convolution3D(50,5,1,1,activation='relu',border_mode='same')
conv2 = conv_2(pool1)
bn2 = BatchNormalization(axis=1)(conv2)
pool_2 = MaxPooling3D(pool_size=(2,1,1),strides=(2,1,1))
pool2 = pool_2(bn2) # output size: t/2, h/2, w/2
drop3 = Dropout(0.5)(pool2)
# conv3: spatial convolution (3 x 3), spatial pooling (2 x 2)
conv_3 = Convolution3D(50,1,3,3,activation='relu',border_mode='same')
conv3 = conv_3(drop3)
bn3 = BatchNormalization(axis=1)(conv3)
pool_3 = MaxPooling3D(pool_size=(1,2,2),strides=(1,2,2))
pool3 = pool_3(bn3) # output size: t/2, h/4, w/4
# conv4: temporal convolution (4), temporal pooling (2)
conv_4 = Convolution3D(50,4,1,1,activation='relu',border_mode='same')
conv4 = conv_4(pool3)
bn4 = BatchNormalization(axis=1)(conv4)
pool_4 = MaxPooling3D(pool_size=(2,1,1),strides=(2,1,1))
pool4 = pool_4(bn4) # output size: t/4, h/4, w/4
pool_5 = MaxPooling3D(pool_size=(t/4,1,1),strides=(t/4,1,1))
pool5 = pool_5(pool4) # output size: 1, h/4, w/4
drop5 = Dropout(0.5)(pool5)
# fully connected layers
reshape6 = Reshape((50*(h/4)*(w/4),))(drop5)
fc_6 = Dense(1000,activation = 'relu')
fc6 = fc_6(reshape6)
fc_7 = Dense(2,activation='softmax')
fc7 = fc_7(fc6)
model = Model(input=input_sequence,output=fc7)
sgd = SGD(lr=0.1,decay=1e-6,momentum=0.9,nesterov=True)
model.compile(optimizer=sgd,loss='categorical_crossentropy',metrics=['accuracy'])
return model
def shuffle_data(patch_coords):
num_patches = patch_coords.shape[0]
order = range(num_patches)
random.shuffle(order)
new_patch_coords = patch_coords[order]
return new_patch_coords
def train_model(model,train_patch_coords,val_patch_coords,train_imgs,val_imgs,patch_size,batch_size=1,val_batch_size=500,display_int=20,val_int=200):
# separate sets for neurons/non-neurons
neuron_train,non_neuron_train = train_patch_coords
neuron_val,non_neuron_val = val_patch_coords
num_neuron_train = neuron_train.shape[0]
num_non_neuron_train = non_neuron_train.shape[0]
num_neuron_val = neuron_val.shape[0]
num_non_neuron_val = non_neuron_val.shape[0]
# indices for neuron/non-neuron training/validation sets
neuron_train_it = 0
non_neuron_train_it = 0
neuron_val_it = 0
non_neuron_val_it = 0
num_batches = 0
display_loss = [] # hold the losses over display_int iterations
while True:
# create the batch
batch_coords = np.zeros((batch_size,4))
batch_labels = np.zeros((batch_size,2))
for i in range(batch_size):
if random.random() > 0.5: # pull in a neuron
batch_coords[i,:] = neuron_train[neuron_train_it:neuron_train_it+1]
batch_labels[i,1] = 1.0
neuron_train_it = (neuron_train_it+1) % num_neuron_train
else: # pull in a non-neuron
batch_coords[i,:] = non_neuron_train[non_neuron_train_it:non_neuron_train_it+1]
batch_labels[i,0] = 1.0
non_neuron_train_it = (non_neuron_train_it+1) % num_non_neuron_train
batch_examples = extract_patches(batch_coords,train_imgs,patch_size)
# train the model on the batch
loss = model.train_on_batch(batch_examples,batch_labels)
display_loss.append(loss)
# display the loss
if num_batches % display_int == 0:
print 'Iteration: ' + str(num_batches) + ', Loss: ' + str(np.mean(display_loss)) # show average loss since last display
display_loss = []
# validation
if num_batches % val_int == 0:
neuron_acc = 0
non_neuron_acc = 0
num_neurons = 0
for i in range(val_batch_size):
if random.random() > 0.5: # neuron example
coords = neuron_val[neuron_val_it:neuron_val_it+1]
example = extract_patches(coords,val_imgs,patch_size)
prediction = model.predict(example,batch_size=1)
if prediction[0,1] > prediction[0,0]: # correct prediction
neuron_acc += 1
num_neurons += 1
neuron_val_it = (neuron_val_it+1) % num_neuron_val
else: # non-neuron example
coords = non_neuron_val[non_neuron_val_it:non_neuron_val_it+1]
example = extract_patches(coords,val_imgs,patch_size)
prediction = model.predict(example,batch_size=1)
if prediction[0,0] > prediction[0,1]: # correct prediction
non_neuron_acc += 1
non_neuron_val_it = (non_neuron_val_it+1) % num_non_neuron_val
overall_acc = (neuron_acc + non_neuron_acc)*1.0/val_batch_size
neuron_acc /= 1.0*num_neurons
non_neuron_acc /= 1.0*(val_batch_size - num_neurons)
print 'Iteration: ' + str(num_batches) + ', Overall Accuracy: ' + str(overall_acc) + ', Neuron Accuracy: ' + str(neuron_acc) + ', Non-Neuron Accuracy: ' + str(non_neuron_acc)
num_batches += 1
return model
###########################################################################################################################################
###########################################################################################################################################
###########################################################################################################################################
###########################################################################################################################################
# PARAMETERS
PATCH_SIZE = [512,15,15]
PATCH_STRIDE = [100,3,3]
###########################################################################################################################################
###########################################################################################################################################
###########################################################################################################################################
###########################################################################################################################################
## LOAD THE DATA
#training_sets = ['00.00','00.01','00.03','00.04','00.05','00.06','00.07','00.09','00.10','00.11','01.00','01.01','02.01','03.00']
training_sets = ['00.00']
training_images = []
training_labels = []
print 'Loading Training Data'
for dataset in training_sets:
print 'Dataset ' + dataset
images = load_images(os.path.join('data/neurofinder.' + dataset,'images'))
preped_images = subtract_background(images)
regions = load_regions(os.path.join('data/neurofinder.' + dataset,'regions'))
mask = get_mask(images,regions)
training_images.append(preped_images)
training_labels.append(mask)
print '...done'
val_sets = ['00.02']
val_images = []
val_labels = []
print 'Loading Validation Data'
for dataset in val_sets:
print 'Dataset ' + dataset
images = load_images(os.path.join('data/neurofinder.' + dataset,'images'))
preped_images = subtract_background(images)
regions = load_regions(os.path.join('data/neurofinder.' + dataset,'regions'))
mask = get_mask(images,regions)
val_images.append(images)
val_labels.append(mask)
print '...done'
'''
testing_sets = ['00.00.test','00.01.test','01.00.test','01.01.test','02.00.test','02.01.test','03.00.test']
testing_images = {}
print 'Loading Testing Data'
for dataset in testing_sets:
print 'Dataset ' + dataset
images = load_images(os.path.join('data/neurofinder.' + dataset,'images'))
preped_images = subtract_background(images)
testing_images.append(images)
print '...done'
'''
###########################################################################################################################################
###########################################################################################################################################
###########################################################################################################################################
###########################################################################################################################################
## CREATE PATCHES
training_neuron_patches = []
training_non_neuron_patches = []
print 'Creating Training Patches'
for dataset in training_sets:
print 'Dataset ' + dataset
dataset_num = training_sets.index(dataset)
images = training_images[dataset_num]
mask = training_labels[dataset_num]
patch_coords = create_patches(images,PATCH_SIZE,PATCH_STRIDE,dataset_num) # create the patch coordinates
neuron_patch_coords,non_neuron_patch_coords = split_patches(patch_coords,mask,PATCH_SIZE) # split them according to label
neuron_patch_coords = shuffle_data(neuron_patch_coords)
non_neuron_patch_coords = shuffle_data(non_neuron_patch_coords)
if training_neuron_patches == []:
training_neuron_patches = neuron_patch_coords
training_non_neuron_patches = non_neuron_patch_coords
else:
training_neuron_patches = np.append(training_neuron_patches,neuron_patch_coords,axis=0)
training_non_neuron_patches = np.append(training_non_neuron_patches,non_neuron_patch_coords,axis=0)
training_patches = [training_neuron_patches,training_non_neuron_patches]
print '...done'
val_neuron_patches = []
val_non_neuron_patches = []
print 'Creating Validation Patches'
for dataset in val_sets:
print 'Dataset ' + dataset
dataset_num = val_sets.index(dataset)
images = val_images[dataset_num]
mask = val_labels[dataset_num]
patch_coords = create_patches(images,PATCH_SIZE,PATCH_STRIDE,dataset_num) # create the patch coordinates
neuron_patch_coords,non_neuron_patch_coords = split_patches(patch_coords,mask,PATCH_SIZE) # split them according to label
neuron_patch_coords = shuffle_data(neuron_patch_coords)
non_neuron_patch_coords = shuffle_data(non_neuron_patch_coords)
if val_neuron_patches == []:
val_neuron_patches = neuron_patch_coords
val_non_neuron_patches = non_neuron_patch_coords
else:
val_neuron_patches = np.append(val_neuron_patches,neuron_patch_coords,axis=0)
val_non_neuron_patches = np.append(val_non_neuron_patches,non_neuron_patch_coords,axis=0)
val_patches = [val_neuron_patches,val_non_neuron_patches]
print '...done'
'''
testing_patches = []
print 'Creating Testing Patches'
for dataset in testing_sets:
print 'Dataset ' + dataset
dataset_num = testing_sets.index(dataset)
images = testing_images[dataset_num]
patch_coords = create_patches(images,PATCH_SIZE,PATCH_STRIDE,dataset_num) # create the patch coordinates
if testing_patches == []:
testing_patches = patch_coords
else:
testing_patches = np.append(testing_patches,patch_coords,axis=0)
print '...done'
'''
###########################################################################################################################################
###########################################################################################################################################
###########################################################################################################################################
###########################################################################################################################################
print 'Creating model'
model = create_model(PATCH_SIZE)
print 'Training model'
model = train_model(model,training_patches,val_patches,training_images,val_images,PATCH_SIZE,batch_size=5)
| mit |
TheTypoMaster/calligra | 3rdparty/google-breakpad/src/tools/gyp/test/mac/gyptest-copies.py | 258 | 1437 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that 'copies' with app bundles are handled correctly.
"""
import TestGyp
import os
import sys
import time
if sys.platform == 'darwin':
test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
test.run_gyp('framework.gyp', chdir='framework')
test.build('framework.gyp', 'copy_target', chdir='framework')
# Check that the copy succeeded.
test.built_file_must_exist(
'Test Framework.framework/foo/Dependency Bundle.framework',
chdir='framework')
test.built_file_must_exist(
'Test Framework.framework/foo/Dependency Bundle.framework/Versions/A',
chdir='framework')
test.built_file_must_exist(
'Test Framework.framework/Versions/A/Libraries/empty.c',
chdir='framework')
# Check that rebuilding the target a few times works.
dep_bundle = test.built_file_path('Dependency Bundle.framework',
chdir='framework')
mtime = os.path.getmtime(dep_bundle)
atime = os.path.getatime(dep_bundle)
for i in range(3):
os.utime(dep_bundle, (atime + i * 1000, mtime + i * 1000))
test.build('framework.gyp', 'copy_target', chdir='framework')
# Check that actions ran.
test.built_file_must_exist('action_file', chdir='framework')
test.pass_test()
| gpl-2.0 |
faloi/pygobstones | examples/PixelArt/misc/generator.py | 3 | 4067 | #!/usr/bin/python
#
# Copyright (C) 2011-2013 Ary Pablo Batista <arypbatista@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import sys
from PIL import Image
def generate(filename, options, matrixGenerator):
indent = " "
output = """program {
IrAInicioT(Este, Sur)\n"""
for row in matrixGenerator(filename):
output += (indent + "Mover(Este)\n").join([indent + ("Pintar(%s, %s, %s, %s)\n" % pixel) for pixel in row])
output += indent + """
IrAlBorde(Oeste)
MoverSiPuede(Sur)
"""
return output + "}"
def rgbaPixelDigestor(pixel):
return tuple(pixel.split(","))
def hexPixelDigestor(pixel):
return tuple([int("0x" + pixel[i:i+2], 0) for i in range(0, len(pixel), 2)])
def fileMatrixGenerator(options, pixelDigestor):
def mat_(filename):
lines = open(filename).readlines()
pixel_separator = "|" if (not "pixel-separator" in options.keys() or
options["pixel-separator"] == []) else options["pixel-separator"]
return [[pixelDigestor(pixel) for pixel in line[:-1].split(pixel_separator)] for line in lines]
return mat_
def imagePixelDigestor(image, x, y):
return image.getpixel((x,y))
def imageMatrixGenerator(options, pixelDigestor):
def mat_(filename):
im = (Image.open(filename)).convert("RGBA")
return [[pixelDigestor(im, x, y) for x in xrange(im.size[0])] for y in xrange(im.size[1])]
return mat_
def main(args, options):
if options["image"]:
print(generate(args[0], options, imageMatrixGenerator(options, imagePixelDigestor)))
else:
if options["hex"]:
print(generate(args[0], options, fileMatrixGenerator(options, hexPixelDigestor)))
else:
print(generate(args[0], options, fileMatrixGenerator(options, rgbaPixelDigestor)))
"""
Argument parsing
"""
def default_options(option_switches):
opt = {}
for o in option_switches:
o = o.split(' ')
sw = o[0][2:]
if sw[:3] == 'no-':
neg = True
sw = sw[3:]
else:
neg = False
if len(o) == 1:
opt[sw] = neg
else:
opt[sw] = []
return opt
def parse_options(option_switches, args, max_args=None):
arguments = []
opt = default_options(option_switches)
i = 1
n = len(args)
while i < len(args):
o = None
for oi in option_switches:
oi = oi.split(' ')
if oi[0] == args[i]:
o = oi
break
if o is None:
if len(arguments) == max_args:
return False
arguments.append(args[i])
i += 1
continue
sw = o[0][2:]
if len(o) == 1:
if sw[:3] == 'no-':
neg = True
sw = sw[3:]
else:
neg = False
opt[sw] = not neg
i += 1
else:
k = 1
i += 1
while k < len(o):
if i >= n: return False
opt[sw].append(args[i])
i += 1
k += 1
return arguments, opt
SWITCHES = [
'--image',
'--csv',
'--hex',
'--rgba',
'--pixel-separator X'
]
def usage():
print("Argument needed:")
print( "\t" + "\n\t".join(SWITCHES))
main(*parse_options(SWITCHES, sys.argv)) | gpl-3.0 |
behzadnouri/scipy | tools/cythonize.py | 67 | 6228 | #!/usr/bin/env python
""" cythonize
Cythonize pyx files into C files as needed.
Usage: cythonize [root_dir]
Default [root_dir] is 'scipy'.
Checks pyx files to see if they have been changed relative to their
corresponding C files. If they have, then runs cython on these files to
recreate the C files.
The script thinks that the pyx files have changed relative to the C files
by comparing hashes stored in a database file.
Simple script to invoke Cython (and Tempita) on all .pyx (.pyx.in)
files; while waiting for a proper build system. Uses file hashes to
figure out if rebuild is needed.
For now, this script should be run by developers when changing Cython files
only, and the resulting C files checked in, so that end-users (and Python-only
developers) do not get the Cython/Tempita dependencies.
Originally written by Dag Sverre Seljebotn, and copied here from:
https://raw.github.com/dagss/private-scipy-refactor/cythonize/cythonize.py
Note: this script does not check any of the dependent C libraries; it only
operates on the Cython .pyx files.
"""
from __future__ import division, print_function, absolute_import
import os
import re
import sys
import hashlib
import subprocess
HASH_FILE = 'cythonize.dat'
DEFAULT_ROOT = 'scipy'
# WindowsError is not defined on unix systems
try:
WindowsError
except NameError:
WindowsError = None
#
# Rules
#
def process_pyx(fromfile, tofile):
try:
from Cython.Compiler.Version import version as cython_version
from distutils.version import LooseVersion
if LooseVersion(cython_version) < LooseVersion('0.22'):
raise Exception('Building SciPy requires Cython >= 0.22')
except ImportError:
pass
flags = ['--fast-fail']
if tofile.endswith('.cxx'):
flags += ['--cplus']
try:
try:
r = subprocess.call(['cython'] + flags + ["-o", tofile, fromfile])
if r != 0:
raise Exception('Cython failed')
except OSError:
# There are ways of installing Cython that don't result in a cython
# executable on the path, see gh-2397.
r = subprocess.call([sys.executable, '-c',
'import sys; from Cython.Compiler.Main import '
'setuptools_main as main; sys.exit(main())'] + flags +
["-o", tofile, fromfile])
if r != 0:
raise Exception("Cython either isn't installed or it failed.")
except OSError:
raise OSError('Cython needs to be installed')
def process_tempita_pyx(fromfile, tofile):
try:
try:
from Cython import Tempita as tempita
except ImportError:
import tempita
except ImportError:
raise Exception('Building SciPy requires Tempita: '
'pip install --user Tempita')
from_filename = tempita.Template.from_filename
template = from_filename(fromfile, encoding=sys.getdefaultencoding())
pyxcontent = template.substitute()
assert fromfile.endswith('.pyx.in')
pyxfile = fromfile[:-len('.pyx.in')] + '.pyx'
with open(pyxfile, "w") as f:
f.write(pyxcontent)
process_pyx(pyxfile, tofile)
rules = {
# fromext : function
'.pyx': process_pyx,
'.pyx.in': process_tempita_pyx
}
#
# Hash db
#
def load_hashes(filename):
# Return { filename : (sha1 of input, sha1 of output) }
if os.path.isfile(filename):
hashes = {}
with open(filename, 'r') as f:
for line in f:
filename, inhash, outhash = line.split()
hashes[filename] = (inhash, outhash)
else:
hashes = {}
return hashes
def save_hashes(hash_db, filename):
with open(filename, 'w') as f:
for key, value in sorted(hash_db.items()):
f.write("%s %s %s\n" % (key, value[0], value[1]))
def sha1_of_file(filename):
h = hashlib.sha1()
with open(filename, "rb") as f:
h.update(f.read())
return h.hexdigest()
#
# Main program
#
def normpath(path):
path = path.replace(os.sep, '/')
if path.startswith('./'):
path = path[2:]
return path
def get_hash(frompath, topath):
from_hash = sha1_of_file(frompath)
to_hash = sha1_of_file(topath) if os.path.exists(topath) else None
return (from_hash, to_hash)
def process(path, fromfile, tofile, processor_function, hash_db):
fullfrompath = os.path.join(path, fromfile)
fulltopath = os.path.join(path, tofile)
current_hash = get_hash(fullfrompath, fulltopath)
if current_hash == hash_db.get(normpath(fullfrompath), None):
print('%s has not changed' % fullfrompath)
return
orig_cwd = os.getcwd()
try:
os.chdir(path)
print('Processing %s' % fullfrompath)
processor_function(fromfile, tofile)
finally:
os.chdir(orig_cwd)
# changed target file, recompute hash
current_hash = get_hash(fullfrompath, fulltopath)
# store hash in db
hash_db[normpath(fullfrompath)] = current_hash
def find_process_files(root_dir):
hash_db = load_hashes(HASH_FILE)
for cur_dir, dirs, files in os.walk(root_dir):
for filename in files:
in_file = os.path.join(cur_dir, filename + ".in")
if filename.endswith('.pyx') and os.path.isfile(in_file):
continue
for fromext, function in rules.items():
if filename.endswith(fromext):
toext = ".c"
with open(os.path.join(cur_dir, filename), 'rb') as f:
data = f.read()
m = re.search(br"^\s*#\s*distutils:\s*language\s*=\s*c\+\+\s*$", data, re.I|re.M)
if m:
toext = ".cxx"
fromfile = filename
tofile = filename[:-len(fromext)] + toext
process(cur_dir, fromfile, tofile, function, hash_db)
save_hashes(hash_db, HASH_FILE)
def main():
try:
root_dir = sys.argv[1]
except IndexError:
root_dir = DEFAULT_ROOT
find_process_files(root_dir)
if __name__ == '__main__':
main()
| bsd-3-clause |
gndpig/hadoop | src/contrib/hod/hodlib/Hod/nodePool.py | 182 | 3753 | #Licensed to the Apache Software Foundation (ASF) under one
#or more contributor license agreements. See the NOTICE file
#distributed with this work for additional information
#regarding copyright ownership. The ASF licenses this file
#to you under the Apache License, Version 2.0 (the
#"License"); you may not use this file except in compliance
#with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
"""defines nodepool and nodeset as abstract interface for batch system"""
# -*- python -*-
from hodlib.GridServices.service import *
class NodeSet:
"""a set of nodes as one allocation unit"""
PENDING, COMMITTED, COMPLETE = range(3)
def __init__(self, id, numNodes, preferredList, isPreemptee):
self.id = id
self.numNodes = numNodes
self.isPreemptee = isPreemptee
self.preferredList = preferredList
self.cmdDescSet = []
def getId(self):
"""returns a unique id of the nodeset"""
return self.id
def registerCommand(self, cmdDesc):
"""register a command to the nodeset"""
self.cmdDescSet.append(cmdDesc)
def getAddrList(self):
"""get list of node host names
May return empty list if node set is not allocated yet"""
raise NotImplementedError
def _getNumNodes(self):
return self.numNodes
def _isPreemptee(self):
return self.isPreemptee
def _getPreferredList(self):
return self.preferredList
def _getCmdSet(self):
return self.cmdDescSet
class NodePool:
"""maintains a collection of node sets as they get allocated.
Also the base class for all kinds of nodepools. """
def __init__(self, nodePoolDesc, cfg, log):
self.nodePoolDesc = nodePoolDesc
self.nodeSetDict = {}
self._cfg = cfg
self.nextNodeSetId = 0
self._log = log
def newNodeSet(self, numNodes, preferred=[], isPreemptee=True, id=None):
"""create a nodeset possibly with asked properties"""
raise NotImplementedError
def submitNodeSet(self, nodeSet, walltime = None, qosLevel = None,
account = None, resourcelist = None):
"""submit the nodeset request to nodepool
return False if error happened"""
raise NotImplementedError
def pollNodeSet(self, nodeSet):
"""return status of node set"""
raise NotImplementedError
def getWorkers(self):
"""return the hosts that comprise this nodepool"""
raise NotImplementedError
def runWorkers(self, nodeSet = None, args = []):
"""Run node set workers."""
raise NotImplementedError
def freeNodeSet(self, nodeset):
"""free a node set"""
raise NotImplementedError
def finalize(self):
"""cleans up all nodesets"""
raise NotImplementedError
def getServiceId(self):
raise NotImplementedError
def getJobInfo(self, jobId=None):
raise NotImplementedError
def deleteJob(self, jobId):
"""Delete a job, given it's id"""
raise NotImplementedError
def isJobFeasible(self):
"""Check if job can run by looking at any user/job limits"""
raise NotImplementedError
def updateWorkerInfo(self, workerInfoMap, jobId):
"""Update information about the workers started by this NodePool."""
raise NotImplementedError
def getAccountString(self):
"""Return the account string for this job"""
raise NotImplementedError
def getNextNodeSetId(self):
id = self.nextNodeSetId
self.nextNodeSetId += 1
return id
| apache-2.0 |
Lingotek/client | python2/ltk/actions/reference_action.py | 2 | 11464 | from ltk.actions.action import *
class ReferenceAction(Action):
def __init__(self, path):
Action.__init__(self, path)
def reference_add_action(self, filename, doc_id):
if self._check_filename(filename, doc_id):
material = []
while True:
while True:
prompt_message = "Reference Material file: "
# Python 2
file_input = raw_input(prompt_message)
# End Python 2
# Python 3
# file_input = input(prompt_message)
# if not file_input:
# logger.warning("You must enter a path to reference material")
# continue
# ref_file = os.path.abspath(os.path.expanduser(file_input))
# if os.path.isfile(ref_file):
# break
# else:
# logger.error(ref_file+" could not be found")
# prompt_message = "Reference Material Name: "
# Python 2
name_input = raw_input(prompt_message)
# End Python 2
# Python 3
# name_input = input(prompt_message)
# prompt_message = "Reference Material Description: "
# Python 2
desc_input = raw_input(prompt_message)
# End Python 2
# Python 3
# desc_input = input(prompt_message)
# reference = {'file': ref_file}
# if name_input:
# reference['name'] = name_input
# else:
# reference['name'] = os.path.basename(ref_file)
# if desc_input:
# reference['description'] = desc_input
# material.append(reference)
# if not yes_no_prompt("Would you like to add another reference material?", default_yes=False):
# break
# if doc_id:
# document_id = filename
# else:
# doc_entity = self.doc_manager.get_doc_by_prop('file_name', self.norm_path(filename))
# if not doc_entity:
# logger.error("{0} could not be found in local database".format(self.norm_path(filename)))
# return
# document_id = doc_entity['id']
# for reference in material:
# response = self.api.document_add_reference(document_id, reference)
# if response.status_code == 404:
# logger.warning("The reference material could not be added because the document could not be found in Lingotek. The document may still be in the process of uploading.")
# elif response.status_code != 202:
# logger.info("The reference material could not be added")
# logger.error(response.json()['messages'])
# else:
# logger.info("{0} ({1}) has been added to the document".format(reference['name'], response.json()['properties']['id']))
#
#
# def reference_list_action(self, filename, doc_id):
# if self._check_filename(filename, doc_id):
# if doc_id:
# document_id = filename
# else:
# doc_entity = self.doc_manager.get_doc_by_prop('file_name', self.norm_path(filename))
# if not doc_entity:
# logger.error("{0} could not be found in local database".format(self.norm_path(filename)))
# return
# document_id = doc_entity['id']
# self._list_reference_material(document_id)
#
# def reference_download_action(self, filename, doc_id, get_all, path):
# if not path:
# path = self.path
# if self._check_filename(filename, doc_id):
# if doc_id:
# document_id = filename
# else:
# doc_entity = self.doc_manager.get_doc_by_prop('file_name', self.norm_path(filename))
# if not doc_entity:
# logger.error("{0} could not be found in local database".format(self.norm_path(filename)))
# return
# document_id = doc_entity['id']
# table = self._list_reference_material(document_id)
# tablemap = {}
# for row in table:
# tablemap.update({row[0]: {'name': row[1], 'id': row[2]}})
# if len(tablemap) > 0:
# chosen_list = []
# if get_all:
# chosen_list = tablemap.values()
# while not len(chosen_list) > 0:
# prompt_message = 'Reference materials to download: (Separate indices by comma) '
# Python 2
choice = raw_input(prompt_message)
# End Python 2
# Python 3
# choice = input(prompt_message)
# End Python 3
try:
choices = (choice.replace(", ",",")).split(",")
for index in choices:
chosen_list.append(tablemap[int(index)])
except ValueError:
logger.error('Some unexpected, non-integer value was included')
chosen_list = []
except KeyError:
logger.error('An index not in the list was included')
chosen_list = []
for reference in chosen_list:
response = self.api.document_download_reference(document_id, reference['id'])
if response.status_code == 404:
logger.error("{0} ({1}) not found".format(reference['name'], reference['id']))
elif response.status_code == 200:
self._download_reference(response, path, reference['name'])
else:
logger.info("{0} ({1}) could not be downloaded".format(reference['name'], reference['id']))
logger.error(response.json()['messages'])
def reference_remove_action(self, filename, doc_id, remove_all):
if self._check_filename(filename, doc_id):
if doc_id:
document_id = filename
else:
doc_entity = self.doc_manager.get_doc_by_prop('file_name', self.norm_path(filename))
if not doc_entity:
logger.error("{0} could not be found in local database".format(self.norm_path(filename)))
return
document_id = doc_entity['id']
table = self._list_reference_material(document_id)
tablemap = {}
for row in table:
tablemap.update({row[0]: {'name': row[1], 'id': row[2]}})
if len(tablemap) > 0:
chosen_list = []
if remove_all:
chosen_list = tablemap.values()
while not len(chosen_list) > 0:
prompt_message = 'Reference materials to remove: (Separate indices by comma) '
# Python 2
choice = raw_input(prompt_message)
# End Python 2
# Python 3
# choice = input(prompt_message)
# End Python 3
try:
choices = (choice.replace(", ",",")).split(",")
for index in choices:
chosen_list.append(tablemap[int(index)])
except ValueError:
logger.error('Some unexpected, non-integer value was included')
chosen_list = []
except KeyError:
logger.error('An index not in the list was included')
chosen_list = []
for reference in chosen_list:
response = self.api.document_remove_reference(document_id, reference['id'])
if response.status_code == 404:
logger.error("{0} ({1}) not found".format(reference['name'], reference['id']))
elif response.status_code == 204:
logger.info("{0} ({1}) deleted".format(reference['name'], reference['id']))
else:
logger.info("{0} ({1}) could not be deleted".format(reference['name'], reference['id']))
logger.error(response.json()['messages'])
def _check_filename(self, filename, doc_id):
if doc_id:
#if document ID is specified, no need to validate the filename. Just send the ID and let the API handle the error if the ID doesn't exist
return True
if os.path.isfile(filename):
foundfile = self.get_doc_filenames_in_path(filename)
if len(foundfile) == 0:
logger.warning(filename+" has not been added yet.")
return False
elif len(foundfile) == 1:
return True
else:
logger.warning("Only manage reference material on one file at a time")
return False
elif os.path.isdir(filename):
logger.error(filename+" is not a file")
return False
else:
logger.error(filename+" could not be found")
return False
def _list_reference_material(self, document_id):
response = self.api.document_list_reference(document_id)
if response.status_code == 404:
logger.warning("The document could not be found in Lingotek.")
return []
elif response.status_code != 200:
logger.info("The reference material list could not be retrieved")
logger.error(response.json()['messages'])
return []
else:
if response.json()['properties']['size'] > 0:
materials = response.json()['entities']
index = 0
table = []
for material in materials:
row = [index, material['properties']['name'], material['properties']['id']]
if 'description' in material['properties'] and material['properties']['description']:
row.append(material['properties']['description'])
table.append(row)
index += 1
print(tabulate(table, headers=['','Name','ID','Description']))
return table
else:
print("There is no reference material attached to this document")
return []
def _download_reference(self, response, path, name):
filepath = os.path.join(path, name)
if os.path.isfile(filepath):
if not yes_no_prompt("There is already a file {0}. Would you like to overwrite it?".format(filepath), default_yes=False):
return
try:
with open(filepath, 'wb') as file:
for chunk in response.iter_content(1024):
file.write(chunk)
except IOError as e:
print(e.errno)
print(e)
return
logger.info("Downloaded {0}".format(filepath)) | mit |
ltilve/ChromiumGStreamerBackend | tools/telemetry/third_party/gsutilz/gslib/boto_resumable_upload.py | 23 | 24348 | # -*- coding: utf-8 -*-
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""Boto translation layer for resumable uploads.
See http://code.google.com/apis/storage/docs/developer-guide.html#resumable
for details.
Resumable uploads will retry interrupted uploads, resuming at the byte
count completed by the last upload attempt. If too many retries happen with
no progress (per configurable num_retries param), the upload will be
aborted in the current process.
Unlike the boto implementation of resumable upload handler, this class does
not directly interact with tracker files.
Originally Google wrote and contributed this code to the boto project,
then copied that code back into gsutil on the release of gsutil 4.0 which
supports both boto and non-boto codepaths for resumable uploads. Any bug
fixes made to this file should also be integrated to resumable_upload_handler.py
in boto, where applicable.
TODO: gsutil-beta: Add a similar comment to the boto code.
"""
from __future__ import absolute_import
import errno
import httplib
import random
import re
import socket
import time
import urlparse
from boto import UserAgent
from boto.connection import AWSAuthConnection
from boto.exception import ResumableTransferDisposition
from boto.exception import ResumableUploadException
from gslib.exception import InvalidUrlError
from gslib.util import GetMaxRetryDelay
from gslib.util import GetNumRetries
from gslib.util import XML_PROGRESS_CALLBACKS
class BotoResumableUpload(object):
"""Upload helper class for resumable uploads via boto."""
BUFFER_SIZE = 8192
RETRYABLE_EXCEPTIONS = (httplib.HTTPException, IOError, socket.error,
socket.gaierror)
# (start, end) response indicating service has nothing (upload protocol uses
# inclusive numbering).
SERVICE_HAS_NOTHING = (0, -1)
def __init__(self, tracker_callback, logger,
resume_url=None, num_retries=None):
"""Constructor. Instantiate once for each uploaded file.
Args:
tracker_callback: Callback function that takes a string argument. Used
by caller to track this upload across upload
interruption.
logger: logging.logger instance to use for debug messages.
resume_url: If present, attempt to resume the upload at this URL.
num_retries: Number of times to retry the upload making no progress.
This count resets every time we make progress, so the upload
can span many more than this number of retries.
"""
if resume_url:
self._SetUploadUrl(resume_url)
else:
self.upload_url = None
self.num_retries = num_retries
self.service_has_bytes = 0 # Byte count at last service check.
# Save upload_start_point in instance state so caller can find how
# much was transferred by this ResumableUploadHandler (across retries).
self.upload_start_point = None
self.tracker_callback = tracker_callback
self.logger = logger
def _SetUploadUrl(self, url):
"""Saves URL and resets upload state.
Called when we start a new resumable upload or get a new tracker
URL for the upload.
Args:
url: URL string for the upload.
Raises InvalidUrlError if URL is syntactically invalid.
"""
parse_result = urlparse.urlparse(url)
if (parse_result.scheme.lower() not in ['http', 'https'] or
not parse_result.netloc):
raise InvalidUrlError('Invalid upload URL (%s)' % url)
self.upload_url = url
self.upload_url_host = parse_result.netloc
self.upload_url_path = '%s?%s' % (
parse_result.path, parse_result.query)
self.service_has_bytes = 0
def _BuildContentRangeHeader(self, range_spec='*', length_spec='*'):
return 'bytes %s/%s' % (range_spec, length_spec)
def _QueryServiceState(self, conn, file_length):
"""Queries service to find out state of given upload.
Note that this method really just makes special case use of the
fact that the upload service always returns the current start/end
state whenever a PUT doesn't complete.
Args:
conn: HTTPConnection to use for the query.
file_length: Total length of the file.
Returns:
HTTP response from sending request.
Raises:
ResumableUploadException if problem querying service.
"""
# Send an empty PUT so that service replies with this resumable
# transfer's state.
put_headers = {}
put_headers['Content-Range'] = (
self._BuildContentRangeHeader('*', file_length))
put_headers['Content-Length'] = '0'
return AWSAuthConnection.make_request(
conn, 'PUT', path=self.upload_url_path, auth_path=self.upload_url_path,
headers=put_headers, host=self.upload_url_host)
def _QueryServicePos(self, conn, file_length):
"""Queries service to find out what bytes it currently has.
Args:
conn: HTTPConnection to use for the query.
file_length: Total length of the file.
Returns:
(service_start, service_end), where the values are inclusive.
For example, (0, 2) would mean that the service has bytes 0, 1, *and* 2.
Raises:
ResumableUploadException if problem querying service.
"""
resp = self._QueryServiceState(conn, file_length)
if resp.status == 200:
# To handle the boundary condition where the service has the complete
# file, we return (service_start, file_length-1). That way the
# calling code can always simply read up through service_end. (If we
# didn't handle this boundary condition here, the caller would have
# to check whether service_end == file_length and read one fewer byte
# in that case.)
return (0, file_length - 1) # Completed upload.
if resp.status != 308:
# This means the service didn't have any state for the given
# upload ID, which can happen (for example) if the caller saved
# the upload URL to a file and then tried to restart the transfer
# after that upload ID has gone stale. In that case we need to
# start a new transfer (and the caller will then save the new
# upload URL to the tracker file).
raise ResumableUploadException(
'Got non-308 response (%s) from service state query' %
resp.status, ResumableTransferDisposition.START_OVER)
got_valid_response = False
range_spec = resp.getheader('range')
if range_spec:
# Parse 'bytes=<from>-<to>' range_spec.
m = re.search(r'bytes=(\d+)-(\d+)', range_spec)
if m:
service_start = long(m.group(1))
service_end = long(m.group(2))
got_valid_response = True
else:
# No Range header, which means the service does not yet have
# any bytes. Note that the Range header uses inclusive 'from'
# and 'to' values. Since Range 0-0 would mean that the service
# has byte 0, omitting the Range header is used to indicate that
# the service doesn't have any bytes.
return self.SERVICE_HAS_NOTHING
if not got_valid_response:
raise ResumableUploadException(
'Couldn\'t parse upload service state query response (%s)' %
str(resp.getheaders()), ResumableTransferDisposition.START_OVER)
if conn.debug >= 1:
self.logger.debug('Service has: Range: %d - %d.', service_start,
service_end)
return (service_start, service_end)
def _StartNewResumableUpload(self, key, headers=None):
"""Starts a new resumable upload.
Args:
key: Boto Key representing the object to upload.
headers: Headers to use in the upload requests.
Raises:
ResumableUploadException if any errors occur.
"""
conn = key.bucket.connection
if conn.debug >= 1:
self.logger.debug('Starting new resumable upload.')
self.service_has_bytes = 0
# Start a new resumable upload by sending a POST request with an
# empty body and the "X-Goog-Resumable: start" header. Include any
# caller-provided headers (e.g., Content-Type) EXCEPT Content-Length
# (and raise an exception if they tried to pass one, since it's
# a semantic error to specify it at this point, and if we were to
# include one now it would cause the service to expect that many
# bytes; the POST doesn't include the actual file bytes We set
# the Content-Length in the subsequent PUT, based on the uploaded
# file size.
post_headers = {}
for k in headers:
if k.lower() == 'content-length':
raise ResumableUploadException(
'Attempt to specify Content-Length header (disallowed)',
ResumableTransferDisposition.ABORT)
post_headers[k] = headers[k]
post_headers[conn.provider.resumable_upload_header] = 'start'
resp = conn.make_request(
'POST', key.bucket.name, key.name, post_headers)
# Get upload URL from response 'Location' header.
body = resp.read()
# Check for various status conditions.
if resp.status in [429, 500, 503]:
# Retry after a delay.
raise ResumableUploadException(
'Got status %d from attempt to start resumable upload. '
'Will wait/retry' % resp.status,
ResumableTransferDisposition.WAIT_BEFORE_RETRY)
elif resp.status != 200 and resp.status != 201:
raise ResumableUploadException(
'Got status %d from attempt to start resumable upload. '
'Aborting' % resp.status,
ResumableTransferDisposition.ABORT)
# Else we got 200 or 201 response code, indicating the resumable
# upload was created.
upload_url = resp.getheader('Location')
if not upload_url:
raise ResumableUploadException(
'No resumable upload URL found in resumable initiation '
'POST response (%s)' % body,
ResumableTransferDisposition.WAIT_BEFORE_RETRY)
self._SetUploadUrl(upload_url)
self.tracker_callback(upload_url)
def _UploadFileBytes(self, conn, http_conn, fp, file_length,
total_bytes_uploaded, cb, num_cb, headers):
"""Attempts to upload file bytes.
Makes a single attempt using an existing resumable upload connection.
Args:
conn: HTTPConnection from the boto Key.
http_conn: Separate HTTPConnection for the transfer.
fp: File pointer containing bytes to upload.
file_length: Total length of the file.
total_bytes_uploaded: The total number of bytes uploaded.
cb: Progress callback function that takes (progress, total_size).
num_cb: Granularity of the callback (maximum number of times the
callback will be called during the file transfer). If negative,
perform callback with each buffer read.
headers: Headers to be used in the upload requests.
Returns:
(etag, generation, metageneration) from service upon success.
Raises:
ResumableUploadException if any problems occur.
"""
buf = fp.read(self.BUFFER_SIZE)
if cb:
# The cb_count represents the number of full buffers to send between
# cb executions.
if num_cb > 2:
cb_count = file_length / self.BUFFER_SIZE / (num_cb-2)
elif num_cb < 0:
cb_count = -1
else:
cb_count = 0
i = 0
cb(total_bytes_uploaded, file_length)
# Build resumable upload headers for the transfer. Don't send a
# Content-Range header if the file is 0 bytes long, because the
# resumable upload protocol uses an *inclusive* end-range (so, sending
# 'bytes 0-0/1' would actually mean you're sending a 1-byte file).
put_headers = headers.copy() if headers else {}
if file_length:
if total_bytes_uploaded == file_length:
range_header = self._BuildContentRangeHeader(
'*', file_length)
else:
range_header = self._BuildContentRangeHeader(
'%d-%d' % (total_bytes_uploaded, file_length - 1),
file_length)
put_headers['Content-Range'] = range_header
# Set Content-Length to the total bytes we'll send with this PUT.
put_headers['Content-Length'] = str(file_length - total_bytes_uploaded)
http_request = AWSAuthConnection.build_base_http_request(
conn, 'PUT', path=self.upload_url_path, auth_path=None,
headers=put_headers, host=self.upload_url_host)
http_conn.putrequest('PUT', http_request.path)
for k in put_headers:
http_conn.putheader(k, put_headers[k])
http_conn.endheaders()
# Turn off debug on http connection so upload content isn't included
# in debug stream.
http_conn.set_debuglevel(0)
while buf:
http_conn.send(buf)
total_bytes_uploaded += len(buf)
if cb:
i += 1
if i == cb_count or cb_count == -1:
cb(total_bytes_uploaded, file_length)
i = 0
buf = fp.read(self.BUFFER_SIZE)
http_conn.set_debuglevel(conn.debug)
if cb:
cb(total_bytes_uploaded, file_length)
if total_bytes_uploaded != file_length:
# Abort (and delete the tracker file) so if the user retries
# they'll start a new resumable upload rather than potentially
# attempting to pick back up later where we left off.
raise ResumableUploadException(
'File changed during upload: EOF at %d bytes of %d byte file.' %
(total_bytes_uploaded, file_length),
ResumableTransferDisposition.ABORT)
resp = http_conn.getresponse()
# Restore http connection debug level.
http_conn.set_debuglevel(conn.debug)
if resp.status == 200:
# Success.
return (resp.getheader('etag'),
resp.getheader('x-goog-generation'),
resp.getheader('x-goog-metageneration'))
# Retry timeout (408) and status 429, 500 and 503 errors after a delay.
elif resp.status in [408, 429, 500, 503]:
disposition = ResumableTransferDisposition.WAIT_BEFORE_RETRY
else:
# Catch all for any other error codes.
disposition = ResumableTransferDisposition.ABORT
raise ResumableUploadException('Got response code %d while attempting '
'upload (%s)' %
(resp.status, resp.reason), disposition)
def _AttemptResumableUpload(self, key, fp, file_length, headers, cb,
num_cb):
"""Attempts a resumable upload.
Args:
key: Boto key representing object to upload.
fp: File pointer containing upload bytes.
file_length: Total length of the upload.
headers: Headers to be used in upload requests.
cb: Progress callback function that takes (progress, total_size).
num_cb: Granularity of the callback (maximum number of times the
callback will be called during the file transfer). If negative,
perform callback with each buffer read.
Returns:
(etag, generation, metageneration) from service upon success.
Raises:
ResumableUploadException if any problems occur.
"""
(service_start, service_end) = self.SERVICE_HAS_NOTHING
conn = key.bucket.connection
if self.upload_url:
# Try to resume existing resumable upload.
try:
(service_start, service_end) = (
self._QueryServicePos(conn, file_length))
self.service_has_bytes = service_start
if conn.debug >= 1:
self.logger.debug('Resuming transfer.')
except ResumableUploadException, e:
if conn.debug >= 1:
self.logger.debug('Unable to resume transfer (%s).', e.message)
self._StartNewResumableUpload(key, headers)
else:
self._StartNewResumableUpload(key, headers)
# upload_start_point allows the code that instantiated the
# ResumableUploadHandler to find out the point from which it started
# uploading (e.g., so it can correctly compute throughput).
if self.upload_start_point is None:
self.upload_start_point = service_end
total_bytes_uploaded = service_end + 1
# Start reading from the file based upon the number of bytes that the
# server has so far.
if total_bytes_uploaded < file_length:
fp.seek(total_bytes_uploaded)
conn = key.bucket.connection
# Get a new HTTP connection (vs conn.get_http_connection(), which reuses
# pool connections) because httplib requires a new HTTP connection per
# transaction. (Without this, calling http_conn.getresponse() would get
# "ResponseNotReady".)
http_conn = conn.new_http_connection(self.upload_url_host, conn.port,
conn.is_secure)
http_conn.set_debuglevel(conn.debug)
# Make sure to close http_conn at end so if a local file read
# failure occurs partway through service will terminate current upload
# and can report that progress on next attempt.
try:
return self._UploadFileBytes(conn, http_conn, fp, file_length,
total_bytes_uploaded, cb, num_cb,
headers)
except (ResumableUploadException, socket.error):
resp = self._QueryServiceState(conn, file_length)
if resp.status == 400:
raise ResumableUploadException(
'Got 400 response from service state query after failed resumable '
'upload attempt. This can happen for various reasons, including '
'specifying an invalid request (e.g., an invalid canned ACL) or '
'if the file size changed between upload attempts',
ResumableTransferDisposition.ABORT)
else:
raise
finally:
http_conn.close()
def HandleResumableUploadException(self, e, debug):
if e.disposition == ResumableTransferDisposition.ABORT_CUR_PROCESS:
if debug >= 1:
self.logger.debug('Caught non-retryable ResumableUploadException (%s); '
'aborting but retaining tracker file', e.message)
raise
elif e.disposition == ResumableTransferDisposition.ABORT:
if debug >= 1:
self.logger.debug('Caught non-retryable ResumableUploadException (%s); '
'aborting and removing tracker file', e.message)
raise
elif e.disposition == ResumableTransferDisposition.START_OVER:
raise
else:
if debug >= 1:
self.logger.debug(
'Caught ResumableUploadException (%s) - will retry', e.message)
def TrackProgressLessIterations(self, service_had_bytes_before_attempt,
debug=0):
"""Tracks the number of iterations without progress.
Performs randomized exponential backoff.
Args:
service_had_bytes_before_attempt: Number of bytes the service had prior
to this upload attempt.
debug: debug level 0..3
"""
# At this point we had a re-tryable failure; see if made progress.
if self.service_has_bytes > service_had_bytes_before_attempt:
self.progress_less_iterations = 0 # If progress, reset counter.
else:
self.progress_less_iterations += 1
if self.progress_less_iterations > self.num_retries:
# Don't retry any longer in the current process.
raise ResumableUploadException(
'Too many resumable upload attempts failed without '
'progress. You might try this upload again later',
ResumableTransferDisposition.ABORT_CUR_PROCESS)
# Use binary exponential backoff to desynchronize client requests.
sleep_time_secs = min(random.random() * (2**self.progress_less_iterations),
GetMaxRetryDelay())
if debug >= 1:
self.logger.debug('Got retryable failure (%d progress-less in a row).\n'
'Sleeping %3.1f seconds before re-trying',
self.progress_less_iterations, sleep_time_secs)
time.sleep(sleep_time_secs)
def SendFile(self, key, fp, size, headers, canned_acl=None, cb=None,
num_cb=XML_PROGRESS_CALLBACKS):
"""Upload a file to a key into a bucket on GS, resumable upload protocol.
Args:
key: `boto.s3.key.Key` or subclass representing the upload destination.
fp: File pointer to upload
size: Size of the file to upload.
headers: The headers to pass along with the PUT request
canned_acl: Optional canned ACL to apply to object.
cb: Callback function that will be called to report progress on
the upload. The callback should accept two integer parameters, the
first representing the number of bytes that have been successfully
transmitted to GS, and the second representing the total number of
bytes that need to be transmitted.
num_cb: (optional) If a callback is specified with the cb parameter, this
parameter determines the granularity of the callback by defining
the maximum number of times the callback will be called during the
file transfer. Providing a negative integer will cause your
callback to be called with each buffer read.
Raises:
ResumableUploadException if a problem occurs during the transfer.
"""
if not headers:
headers = {}
# If Content-Type header is present and set to None, remove it.
# This is gsutil's way of asking boto to refrain from auto-generating
# that header.
content_type = 'Content-Type'
if content_type in headers and headers[content_type] is None:
del headers[content_type]
if canned_acl:
headers[key.provider.acl_header] = canned_acl
headers['User-Agent'] = UserAgent
file_length = size
debug = key.bucket.connection.debug
# Use num-retries from constructor if one was provided; else check
# for a value specified in the boto config file; else default to 5.
if self.num_retries is None:
self.num_retries = GetNumRetries()
self.progress_less_iterations = 0
while True: # Retry as long as we're making progress.
service_had_bytes_before_attempt = self.service_has_bytes
try:
# Save generation and metageneration in class state so caller
# can find these values, for use in preconditions of future
# operations on the uploaded object.
(_, self.generation, self.metageneration) = (
self._AttemptResumableUpload(key, fp, file_length,
headers, cb, num_cb))
key.generation = self.generation
if debug >= 1:
self.logger.debug('Resumable upload complete.')
return
except self.RETRYABLE_EXCEPTIONS, e:
if debug >= 1:
self.logger.debug('Caught exception (%s)', e.__repr__())
if isinstance(e, IOError) and e.errno == errno.EPIPE:
# Broken pipe error causes httplib to immediately
# close the socket (http://bugs.python.org/issue5542),
# so we need to close the connection before we resume
# the upload (which will cause a new connection to be
# opened the next time an HTTP request is sent).
key.bucket.connection.connection.close()
except ResumableUploadException, e:
self.HandleResumableUploadException(e, debug)
self.TrackProgressLessIterations(service_had_bytes_before_attempt,
debug=debug)
| bsd-3-clause |
yanchen036/tensorflow | tensorflow/python/keras/utils/vis_utils.py | 26 | 5247 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=protected-access
# pylint: disable=g-import-not-at-top
"""Utilities related to model visualization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.python.util.tf_export import tf_export
try:
# pydot-ng is a fork of pydot that is better maintained.
import pydot_ng as pydot
except ImportError:
# pydotplus is an improved version of pydot
try:
import pydotplus as pydot
except ImportError:
# Fall back on pydot if necessary.
try:
import pydot
except ImportError:
pydot = None
def _check_pydot():
try:
# Attempt to create an image of a blank graph
# to check the pydot/graphviz installation.
pydot.Dot.create(pydot.Dot())
except Exception:
# pydot raises a generic Exception here,
# so no specific class can be caught.
raise ImportError('Failed to import pydot. You must install pydot'
' and graphviz for `pydotprint` to work.')
def model_to_dot(model, show_shapes=False, show_layer_names=True, rankdir='TB'):
"""Convert a Keras model to dot format.
Arguments:
model: A Keras model instance.
show_shapes: whether to display shape information.
show_layer_names: whether to display layer names.
rankdir: `rankdir` argument passed to PyDot,
a string specifying the format of the plot:
'TB' creates a vertical plot;
'LR' creates a horizontal plot.
Returns:
A `pydot.Dot` instance representing the Keras model.
"""
from tensorflow.python.keras.layers.wrappers import Wrapper
from tensorflow.python.keras.models import Sequential
_check_pydot()
dot = pydot.Dot()
dot.set('rankdir', rankdir)
dot.set('concentrate', True)
dot.set_node_defaults(shape='record')
if isinstance(model, Sequential):
if not model.built:
model.build()
layers = model.layers
# Create graph nodes.
for layer in layers:
layer_id = str(id(layer))
# Append a wrapped layer's label to node's label, if it exists.
layer_name = layer.name
class_name = layer.__class__.__name__
if isinstance(layer, Wrapper):
layer_name = '{}({})'.format(layer_name, layer.layer.name)
child_class_name = layer.layer.__class__.__name__
class_name = '{}({})'.format(class_name, child_class_name)
# Create node's label.
if show_layer_names:
label = '{}: {}'.format(layer_name, class_name)
else:
label = class_name
# Rebuild the label as a table including input/output shapes.
if show_shapes:
try:
outputlabels = str(layer.output_shape)
except AttributeError:
outputlabels = 'multiple'
if hasattr(layer, 'input_shape'):
inputlabels = str(layer.input_shape)
elif hasattr(layer, 'input_shapes'):
inputlabels = ', '.join([str(ishape) for ishape in layer.input_shapes])
else:
inputlabels = 'multiple'
label = '%s\n|{input:|output:}|{{%s}|{%s}}' % (label, inputlabels,
outputlabels)
node = pydot.Node(layer_id, label=label)
dot.add_node(node)
# Connect nodes with edges.
for layer in layers:
layer_id = str(id(layer))
for i, node in enumerate(layer._inbound_nodes):
node_key = layer.name + '_ib-' + str(i)
if node_key in model._network_nodes: # pylint: disable=protected-access
for inbound_layer in node.inbound_layers:
inbound_layer_id = str(id(inbound_layer))
layer_id = str(id(layer))
dot.add_edge(pydot.Edge(inbound_layer_id, layer_id))
return dot
@tf_export('keras.utils.plot_model')
def plot_model(model,
to_file='model.png',
show_shapes=False,
show_layer_names=True,
rankdir='TB'):
"""Converts a Keras model to dot format and save to a file.
Arguments:
model: A Keras model instance
to_file: File name of the plot image.
show_shapes: whether to display shape information.
show_layer_names: whether to display layer names.
rankdir: `rankdir` argument passed to PyDot,
a string specifying the format of the plot:
'TB' creates a vertical plot;
'LR' creates a horizontal plot.
"""
dot = model_to_dot(model, show_shapes, show_layer_names, rankdir)
_, extension = os.path.splitext(to_file)
if not extension:
extension = 'png'
else:
extension = extension[1:]
dot.write(to_file, format=extension)
| apache-2.0 |
andreimacavei/coala | coalib/tests/collecting/DependenciesTest.py | 3 | 1647 | import sys
import unittest
sys.path.insert(0, ".")
from coalib.bears.Bear import Bear
from coalib.collecting import Dependencies
class BearWithoutDeps(Bear):
@staticmethod
def get_dependencies():
return []
class ResolvableBear1(Bear):
@staticmethod
def get_dependencies():
return [BearWithoutDeps]
class ResolvableBear2(Bear):
@staticmethod
def get_dependencies():
return [ResolvableBear1, BearWithoutDeps]
class UnresolvableBear1(Bear):
@staticmethod
def get_dependencies():
return [ResolvableBear1, BearWithoutDeps, UnresolvableBear3]
class UnresolvableBear2(Bear):
@staticmethod
def get_dependencies():
return [ResolvableBear1, BearWithoutDeps, UnresolvableBear1]
class UnresolvableBear3(Bear):
@staticmethod
def get_dependencies():
return [ResolvableBear1, BearWithoutDeps, UnresolvableBear2]
class DependenciesTest(unittest.TestCase):
def test_no_deps(self):
self.assertEqual(
len(Dependencies.resolve([BearWithoutDeps,
BearWithoutDeps])),
1)
def test_resolvable_deps(self):
self.assertEqual(Dependencies.resolve([ResolvableBear1,
ResolvableBear2]),
[BearWithoutDeps, ResolvableBear1, ResolvableBear2])
def test_unresolvable_deps(self):
self.assertRaises(
Dependencies.CircularDependencyError,
Dependencies.resolve,
[UnresolvableBear1])
if __name__ == '__main__':
unittest.main(verbosity=2)
| agpl-3.0 |
fginter/docs-fginterfork | gen_index/index_page.py | 2 | 12420 | #Note: much of the at-glance table generation happens in templates/atglance.html -> go see there
# this code produces the dictionary which holds the data for rendering the template
# and then simply feeds that dictionary to jinja2 which does its magic with the template
import sys
import glob
import os.path
import json
import re
import codecs
import StringIO
import jinja2
jinja2_env=jinja2.Environment(loader=jinja2.FileSystemLoader("./templates", encoding='utf-8', followlinks=False))
languages_template=jinja2_env.get_template("atglance.html")
# categories={(u"Documentation status",u"stub"):"""<span class="widespan" style="color:gray"><span class="hint--top hint--info" data-hint="No documentation">-</span></span>""",
# (u"Documentation status",u"partial"):"""<span class="widespan"><span class="hint--top hint--info" data-hint="Partial documentation"><i class="fa fa-file-o"></i></span></span>""",
# (u"Documentation status",u"complete"):"""<span class="widespan"><span class="hint--top hint--info" data-hint="Complete documentation"><i class="fa fa-file-text-o"></i></span></span>""",
# (u"Data source",u"unknown"):"""<span class="widespan"><span class="hint--top hint--info" data-hint="Data source not known">-</span></span>""",
# (u"Data source",u"automatic"):"""<span class="widespan"><span class="hint--top hint--info" data-hint="Automatic conversion"><i class="fa fa-cogs"></i></span></span>""",
# (u"Data source",u"semi-automatic"):"""<span class="widespan"><span class="hint--top hint--info" data-hint="Automatic conversion with manual corrections"><i class="fa fa-cogs"></i><!--<i class="fa fa-plus" style="font-size: 0.75em; line-height: 1.33em; vertical-align: +10%;">--><i class="fa fa-check"></i></span></span>""",
# (u"Data source",u"manual"):"""<span class="widespan"><span class="hint--top hint--info" data-hint="Full manual check of the data"><i class="fa fa-user"></i></span></span>""",
# (u"License",u"none"):"""<span class="widespan"><span class="hint--top hint--info" data-hint="License not known">-</span></span>""",
# (u"Data available since",u"UD v1.0"):"""<span class="widespan"><span class="hint--top hint--info" data-hint="First released in UD version 1.0 (Jan 2015)"><i class="fa fa-check"></i></span></span>""",
# (u"Data available since",u"UD v1.1"):"""<span class="widespan"><span class="hint--top hint--info" data-hint="First released in UD version 1.1 (May 2015)"><i class="fa fa-check"></i></span></span>""",
# (u"Data available since",u"UD v1.2"):"""<span class="widespan"><span class="hint--top hint--info" data-hint="First released in UD version 1.2 (November 2015)"><i class="fa fa-check"></i></span></span>""",
# (u"Data available since",u"UD v1.3"):"""<span class="widespan"><span class="hint--top hint--info" data-hint="First released in UD version 1.3 (May 2016)"><i class="fa fa-check"></i></span></span>""",
# (u"Data available since",u"UD v2.0"):"""<span class="widespan"><span class="hint--top hint--info" data-hint="Scheduled for release in UD version 2.0 (November 2016)"><i class="fa fa-hourglass-end"></i></span></span>""",
# (u"Data available since",u"none"):"""<span class="widespan"><span class="hint--top hint--info" data-hint="No firm schedule for data release">-</span></span>"""}
valueRe=re.compile(u"^([a-zA-Z ]+): (.+)$")
def analyze_readme(dir_name):
readme_data={u"Documentation status":u"stub",u"Data source":u"unknown",u"License":u"unknown",u"Data available since":u"unknown", u"Genre":u"none",u"Contributors":u""}
readmes=sorted(x for x in glob.glob(os.path.join(dir_name,"*")) if "readme" in x.lower())
if not readmes: #No readme file!
return readme_data
with codecs.open(readmes[0],"r","utf-8") as f:
for line in f:
match=valueRe.match(line)
if match: #Maybe one of our values?
cat,val=match.group(1).strip(),match.group(2).strip()
readme_data[cat]=val
# if (cat,val) in categories:
# #Yes! this is a known category, we have a perfect match
# readme_data[cat]=val
# elif cat in readme_data: #Known category but unknown value, I guess it's okay
# #Known cat, but weird val --- should we warn?
# readme_data[cat]=val
return readme_data
def gen_table(args):
flags=json.loads(open("flags.json").read())
genres_map=json.loads(open("genre_symbols.json","r").read())
lcodes=json.loads(open("lcodes.json").read())
for k,v in genres_map.iteritems():
genres_map[k]=v.replace("_","-")
template_data={} #{language -> { }}
jekyll_data=[] #this will go to jekyll then as data
a_data=StringIO.StringIO()
print >> a_data, "<!-- content of _includes/at_glance.html -->"
print >> a_data, "<!-- do NOT edit by hand, that file is autogenerated using gen_index/index_page.py -->"
# Will create a line for every language which has a repository
langs=sorted(os.path.basename(x).replace(".json","") for x in glob.glob("_corpus_data/*.json"))
for l in langs:
with open(os.path.join("_corpus_data",l+".json"),"r") as f:
corpus_data=json.load(f)
readme_data=analyze_readme(os.path.join(args.ud_data,"UD_"+l))
language_name=l.split("-")[0]
ldict=template_data.setdefault(language_name,{"language_name":language_name,"language_code":lcodes[l].split("_")[0],"treebanks":[]})
cdict={"treebank_name":l,"treebank_language_code":lcodes[l],"flag":flags[l]}
ldict["treebanks"].append(cdict)
if corpus_data.get("token_count",0):
cdict["token_count_k"]="{:,}K".format(corpus_data.get("token_count")//1000)
cdict["count_hint"]="{token_count:,} tokens {word_count:,} words {tree_count:,} sentences".format(**corpus_data)
else:
cdict["token_count_k"]="-"
cdict["count_hint"]="No corpus data"
cdict["columns"]=[]
if corpus_data.get("words_with_lemma_count",0)>int(corpus_data.get("word_count")*0.9):
cdict["columns"].append("L")
else:
cdict["columns"].append("N/A")
if corpus_data.get("catvals",0)>0:
cdict["columns"].append("F")
else:
cdict["columns"].append("N/A")
if corpus_data.get("words_with_deps_count",0)>0:
cdict["columns"].append("D")
else:
cdict["columns"].append("N/A")
cdict["docstatus"]=readme_data.get("Documentation status","stub")
cdict["source"]=readme_data.get("Data source","unknown")
cdict["avail"]=readme_data["Data available since"]
lic=readme_data.get("License","Unknown license")
if "CC BY-NC-SA" in lic:
cdict["liclogo"]="by-nc-sa.svg"
elif "CC BY-SA" in lic:
cdict["liclogo"]="by-sa.svg"
elif "CC BY" in lic:
cdict["liclogo"]="by.svg"
elif "GPL" in lic:
cdict["liclogo"]="gpl.svg"
else:
cdict["liclogo"]="N/A"
cdict["lictext"]=lic
cdict["genres"]=[]
for g in readme_data["Genre"].split():
cdict["genres"].append(genres_map.get(g,genres_map["none"]))
cdict["genre_text"]=readme_data["Genre"]
print languages_template.render(languages=template_data)
# # # corpus_data[u"lang_code"]=lcodes[l]
# # # corpus_data[u"lang_name"]=l
# # # corpus_data[u"langfam_code"]=lcodes[l].split("_")[0]
# # # corpus_data[u"langfam_name"]=l.split("-")[0]
# # # print >> a_data, '<div data-lc="%s">' % lcodes[l]
# # # print >> a_data, get_flag_span(l)
# # # print >> a_data, get_language_span(l)
# # print >> a_data, get_token_count_span(corpus_data)
# # print >> a_data, get_column_icons(corpus_data)
# # readme_data=analyze_readme(os.path.join(args.ud_data,"UD_"+l))
# # print >> sys.stderr, l
# # for c in (u"Documentation status", u"Data source", u"Data available since"):
# # print >> a_data, categories.get((c,readme_data[c]),empty_wide_span.format(hint=readme_data[c]))
# # print >> a_data, get_license_span(readme_data[u"License"])
# # print >> a_data, get_genre_span(readme_data["Genre"])
# # print >> a_data, "</div>"
# # print >> a_data, "<div>"
# # print >> a_data, link_template.format(**corpus_data)
# # print >> a_data, "</div>"
# # ldict={}
# # ldict[u"lang_name"]=corpus_data[u"lang_name"]
# # ldict[u"lang_code"]=corpus_data[u"lang_code"]
# # ldict[u"contributors"]=[]
# # if readme_data["Contributors"].strip():
# # for c in readme_data["Contributors"].strip().split(u";"):
# # c=c.strip()
# # lf=c.split(u",",1)
# # if len(lf)==2:
# # ldict[u"contributors"].append({u"last":lf[0].strip(),u"first":lf[1].strip(), u"full":lf[1].strip()+u" "+lf[0].strip()})
# # else:
# # ldict[u"contributors"].append({u"last":c,u"first":u"?",u"full":c})
# # jekyll_data.append(ldict)
# # return a_data,jekyll_data
# def gen_table_old(args):
# jekyll_data=[] #this will go to jekyll then as data
# a_data=StringIO.StringIO()
# print >> a_data, "<!-- content of _includes/at_glance.html -->"
# print >> a_data, "<!-- do NOT edit by hand, that file is autogenerated using gen_index/index_page.py -->"
# # Will create a line for every language which has a repository
# langs=sorted(os.path.basename(x).replace(".json","") for x in glob.glob("_corpus_data/*.json"))
# for l in langs:
# with open(os.path.join("_corpus_data",l+".json"),"r") as f:
# corpus_data=json.load(f)
# corpus_data[u"lang_code"]=lcodes[l]
# corpus_data[u"lang_name"]=l
# corpus_data[u"langfam_code"]=lcodes[l].split("_")[0]
# corpus_data[u"langfam_name"]=l.split("-")[0]
# print >> a_data, '<div data-lc="%s">' % lcodes[l]
# print >> a_data, get_flag_span(l)
# print >> a_data, get_language_span(l)
# print >> a_data, get_token_count_span(corpus_data)
# print >> a_data, get_column_icons(corpus_data)
# readme_data=analyze_readme(os.path.join(args.ud_data,"UD_"+l))
# print >> sys.stderr, l
# for c in (u"Documentation status", u"Data source", u"Data available since"):
# print >> a_data, categories.get((c,readme_data[c]),empty_wide_span.format(hint=readme_data[c]))
# print >> a_data, get_license_span(readme_data[u"License"])
# print >> a_data, get_genre_span(readme_data["Genre"])
# print >> a_data, "</div>"
# print >> a_data, "<div>"
# print >> a_data, link_template.format(**corpus_data)
# print >> a_data, "</div>"
# ldict={}
# ldict[u"lang_name"]=corpus_data[u"lang_name"]
# ldict[u"lang_code"]=corpus_data[u"lang_code"]
# ldict[u"contributors"]=[]
# if readme_data["Contributors"].strip():
# for c in readme_data["Contributors"].strip().split(u";"):
# c=c.strip()
# lf=c.split(u",",1)
# if len(lf)==2:
# ldict[u"contributors"].append({u"last":lf[0].strip(),u"first":lf[1].strip(), u"full":lf[1].strip()+u" "+lf[0].strip()})
# else:
# ldict[u"contributors"].append({u"last":c,u"first":u"?",u"full":c})
# jekyll_data.append(ldict)
# return a_data,jekyll_data
if __name__=="__main__":
import argparse
parser = argparse.ArgumentParser(description='generates the index page')
parser.add_argument('--ud-data', required=True, help='Where is the UD data, so I can grab the readmes? (DIRECTORY)')
parser.add_argument('--ldict', default="../_data/ldata.json", help='Where to write the language dict file? (Default %(default)s)')
args = parser.parse_args()
gen_table(args)
# a_data,ldict=gen_table(args)
# print a_data.getvalue()
# if args.ldict:
# with open(args.ldict,"w") as out:
# json.dump(ldict,out,indent=2)
| apache-2.0 |
mttr/django | tests/model_fields/test_uuid.py | 81 | 6563 | import json
import uuid
from django.core import exceptions, serializers
from django.db import IntegrityError, models
from django.test import (
SimpleTestCase, TestCase, TransactionTestCase, skipUnlessDBFeature,
)
from .models import (
NullableUUIDModel, PrimaryKeyUUIDModel, RelatedToUUIDModel, UUIDGrandchild,
UUIDModel,
)
class TestSaveLoad(TestCase):
def test_uuid_instance(self):
instance = UUIDModel.objects.create(field=uuid.uuid4())
loaded = UUIDModel.objects.get()
self.assertEqual(loaded.field, instance.field)
def test_str_instance_no_hyphens(self):
UUIDModel.objects.create(field='550e8400e29b41d4a716446655440000')
loaded = UUIDModel.objects.get()
self.assertEqual(loaded.field, uuid.UUID('550e8400e29b41d4a716446655440000'))
def test_str_instance_hyphens(self):
UUIDModel.objects.create(field='550e8400-e29b-41d4-a716-446655440000')
loaded = UUIDModel.objects.get()
self.assertEqual(loaded.field, uuid.UUID('550e8400e29b41d4a716446655440000'))
def test_str_instance_bad_hyphens(self):
UUIDModel.objects.create(field='550e84-00-e29b-41d4-a716-4-466-55440000')
loaded = UUIDModel.objects.get()
self.assertEqual(loaded.field, uuid.UUID('550e8400e29b41d4a716446655440000'))
def test_null_handling(self):
NullableUUIDModel.objects.create(field=None)
loaded = NullableUUIDModel.objects.get()
self.assertEqual(loaded.field, None)
def test_pk_validated(self):
with self.assertRaisesMessage(TypeError, 'is not a valid UUID'):
PrimaryKeyUUIDModel.objects.get(pk={})
with self.assertRaisesMessage(TypeError, 'is not a valid UUID'):
PrimaryKeyUUIDModel.objects.get(pk=[])
def test_wrong_value(self):
self.assertRaisesMessage(
ValueError, 'badly formed hexadecimal UUID string',
UUIDModel.objects.get, field='not-a-uuid')
self.assertRaisesMessage(
ValueError, 'badly formed hexadecimal UUID string',
UUIDModel.objects.create, field='not-a-uuid')
class TestMigrations(SimpleTestCase):
def test_deconstruct(self):
field = models.UUIDField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(kwargs, {})
class TestQuerying(TestCase):
def setUp(self):
self.objs = [
NullableUUIDModel.objects.create(field=uuid.uuid4()),
NullableUUIDModel.objects.create(field='550e8400e29b41d4a716446655440000'),
NullableUUIDModel.objects.create(field=None),
]
def test_exact(self):
self.assertSequenceEqual(
NullableUUIDModel.objects.filter(field__exact='550e8400e29b41d4a716446655440000'),
[self.objs[1]]
)
def test_isnull(self):
self.assertSequenceEqual(
NullableUUIDModel.objects.filter(field__isnull=True),
[self.objs[2]]
)
class TestSerialization(SimpleTestCase):
test_data = '[{"fields": {"field": "550e8400-e29b-41d4-a716-446655440000"}, "model": "model_fields.uuidmodel", "pk": null}]'
def test_dumping(self):
instance = UUIDModel(field=uuid.UUID('550e8400e29b41d4a716446655440000'))
data = serializers.serialize('json', [instance])
self.assertEqual(json.loads(data), json.loads(self.test_data))
def test_loading(self):
instance = list(serializers.deserialize('json', self.test_data))[0].object
self.assertEqual(instance.field, uuid.UUID('550e8400-e29b-41d4-a716-446655440000'))
class TestValidation(SimpleTestCase):
def test_invalid_uuid(self):
field = models.UUIDField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('550e8400', None)
self.assertEqual(cm.exception.code, 'invalid')
self.assertEqual(cm.exception.message % cm.exception.params, "'550e8400' is not a valid UUID.")
def test_uuid_instance_ok(self):
field = models.UUIDField()
field.clean(uuid.uuid4(), None) # no error
class TestAsPrimaryKey(TestCase):
def test_creation(self):
PrimaryKeyUUIDModel.objects.create()
loaded = PrimaryKeyUUIDModel.objects.get()
self.assertIsInstance(loaded.pk, uuid.UUID)
def test_uuid_pk_on_save(self):
saved = PrimaryKeyUUIDModel.objects.create(id=None)
loaded = PrimaryKeyUUIDModel.objects.get()
self.assertIsNotNone(loaded.id, None)
self.assertEqual(loaded.id, saved.id)
def test_uuid_pk_on_bulk_create(self):
u1 = PrimaryKeyUUIDModel()
u2 = PrimaryKeyUUIDModel(id=None)
PrimaryKeyUUIDModel.objects.bulk_create([u1, u2])
# Check that the two objects were correctly created.
u1_found = PrimaryKeyUUIDModel.objects.filter(id=u1.id).exists()
u2_found = PrimaryKeyUUIDModel.objects.exclude(id=u1.id).exists()
self.assertTrue(u1_found)
self.assertTrue(u2_found)
self.assertEqual(PrimaryKeyUUIDModel.objects.count(), 2)
def test_underlying_field(self):
pk_model = PrimaryKeyUUIDModel.objects.create()
RelatedToUUIDModel.objects.create(uuid_fk=pk_model)
related = RelatedToUUIDModel.objects.get()
self.assertEqual(related.uuid_fk.pk, related.uuid_fk_id)
def test_update_with_related_model_instance(self):
# regression for #24611
u1 = PrimaryKeyUUIDModel.objects.create()
u2 = PrimaryKeyUUIDModel.objects.create()
r = RelatedToUUIDModel.objects.create(uuid_fk=u1)
RelatedToUUIDModel.objects.update(uuid_fk=u2)
r.refresh_from_db()
self.assertEqual(r.uuid_fk, u2)
def test_update_with_related_model_id(self):
u1 = PrimaryKeyUUIDModel.objects.create()
u2 = PrimaryKeyUUIDModel.objects.create()
r = RelatedToUUIDModel.objects.create(uuid_fk=u1)
RelatedToUUIDModel.objects.update(uuid_fk=u2.pk)
r.refresh_from_db()
self.assertEqual(r.uuid_fk, u2)
def test_two_level_foreign_keys(self):
# exercises ForeignKey.get_db_prep_value()
UUIDGrandchild().save()
class TestAsPrimaryKeyTransactionTests(TransactionTestCase):
# Need a TransactionTestCase to avoid deferring FK constraint checking.
available_apps = ['model_fields']
@skipUnlessDBFeature('supports_foreign_keys')
def test_unsaved_fk(self):
u1 = PrimaryKeyUUIDModel()
with self.assertRaises(IntegrityError):
RelatedToUUIDModel.objects.create(uuid_fk=u1)
| bsd-3-clause |
MicroTrustRepos/microkernel | src/l4/pkg/python/contrib/Lib/plat-mac/lib-scriptpackages/StdSuites/QuickDraw_Graphics_Suite.py | 81 | 11738 | """Suite QuickDraw Graphics Suite: A set of basic classes for graphics
Level 1, version 1
Generated from /Volumes/Sap/System Folder/Extensions/AppleScript
AETE/AEUT resource version 1/0, language 0, script 0
"""
import aetools
import MacOS
_code = 'qdrw'
class QuickDraw_Graphics_Suite_Events:
pass
class arc(aetools.ComponentItem):
"""arc - An arc """
want = 'carc'
class _Prop_arc_angle(aetools.NProperty):
"""arc angle - the angle of the arc in degrees """
which = 'parc'
want = 'fixd'
class _Prop_bounds(aetools.NProperty):
"""bounds - the smallest rectangle that contains the entire arc """
which = 'pbnd'
want = 'qdrt'
class _Prop_definition_rect(aetools.NProperty):
"""definition rect - the rectangle that contains the circle or oval used to define the arc """
which = 'pdrt'
want = 'qdrt'
class _Prop_fill_color(aetools.NProperty):
"""fill color - the fill color """
which = 'flcl'
want = 'cRGB'
class _Prop_fill_pattern(aetools.NProperty):
"""fill pattern - the fill pattern """
which = 'flpt'
want = 'cpix'
class _Prop_pen_color(aetools.NProperty):
"""pen color - the pen color """
which = 'ppcl'
want = 'cRGB'
class _Prop_pen_pattern(aetools.NProperty):
"""pen pattern - the pen pattern """
which = 'pppa'
want = 'cpix'
class _Prop_pen_width(aetools.NProperty):
"""pen width - the pen width """
which = 'ppwd'
want = 'shor'
class _Prop_start_angle(aetools.NProperty):
"""start angle - the angle that defines the start of the arc, in degrees """
which = 'pang'
want = 'fixd'
class _Prop_transfer_mode(aetools.NProperty):
"""transfer mode - the transfer mode """
which = 'pptm'
want = 'tran'
arcs = arc
class drawing_area(aetools.ComponentItem):
"""drawing area - Container for graphics and supporting information """
want = 'cdrw'
class _Prop_background_color(aetools.NProperty):
"""background color - the color used to fill in unoccupied areas """
which = 'pbcl'
want = 'cRGB'
class _Prop_background_pattern(aetools.NProperty):
"""background pattern - the pattern used to fill in unoccupied areas """
which = 'pbpt'
want = 'cpix'
class _Prop_color_table(aetools.NProperty):
"""color table - the color table """
which = 'cltb'
want = 'clrt'
class _Prop_default_font(aetools.NProperty):
"""default font - the name of the default font for text objects """
which = 'ptxf'
want = 'itxt'
class _Prop_default_location(aetools.NProperty):
"""default location - the default location of each new graphic object """
which = 'pnel'
want = 'QDpt'
class _Prop_default_size(aetools.NProperty):
"""default size - the default size for text objects """
which = 'ptps'
want = 'fixd'
class _Prop_name(aetools.NProperty):
"""name - the name """
which = 'pnam'
want = 'itxt'
class _Prop_ordering(aetools.NProperty):
"""ordering - the ordered list of graphic objects in the drawing area """
which = 'gobs'
want = 'obj '
class _Prop_pixel_depth(aetools.NProperty):
"""pixel depth - the number of bits per pixel """
which = 'pdpt'
want = 'shor'
class _Prop_style(aetools.NProperty):
"""style - the default text style for text objects """
which = 'txst'
want = 'tsty'
class _Prop_text_color(aetools.NProperty):
"""text color - the default color for text objects """
which = 'ptxc'
want = 'cRGB'
class _Prop_update_on_change(aetools.NProperty):
"""update on change - Redraw after each change? """
which = 'pupd'
want = 'bool'
class _Prop_writing_code(aetools.NProperty):
"""writing code - the script system and language of text objects in the drawing area """
which = 'psct'
want = 'intl'
drawing_areas = drawing_area
class graphic_objects(aetools.ComponentItem):
"""graphic objects - """
want = 'cgob'
graphic_object = graphic_objects
class graphic_shapes(aetools.ComponentItem):
"""graphic shapes - """
want = 'cgsh'
graphic_shape = graphic_shapes
class graphic_text(aetools.ComponentItem):
"""graphic text - A series of characters within a drawing area """
want = 'cgtx'
class _Prop_color(aetools.NProperty):
"""color - the color of the first character """
which = 'colr'
want = 'cRGB'
class _Prop_font(aetools.NProperty):
"""font - the name of the font of the first character """
which = 'font'
want = 'ctxt'
class _Prop_size(aetools.NProperty):
"""size - the size in points of the first character """
which = 'ptsz'
want = 'fixd'
class _Prop_uniform_styles(aetools.NProperty):
"""uniform styles - the text styles that are uniform throughout the text """
which = 'ustl'
want = 'tsty'
class ovals(aetools.ComponentItem):
"""ovals - """
want = 'covl'
oval = ovals
class polygon(aetools.ComponentItem):
"""polygon - A polygon """
want = 'cpgn'
class _Prop_point_list(aetools.NProperty):
"""point list - the list of points that define the polygon """
which = 'ptlt'
want = 'QDpt'
polygons = polygon
class graphic_groups(aetools.ComponentItem):
"""graphic groups - """
want = 'cpic'
graphic_group = graphic_groups
class pixel_maps(aetools.ComponentItem):
"""pixel maps - """
want = 'cpix'
pixel_map = pixel_maps
class pixel(aetools.ComponentItem):
"""pixel - A pixel """
want = 'cpxl'
pixels = pixel
class rectangles(aetools.ComponentItem):
"""rectangles - """
want = 'crec'
rectangle = rectangles
class rounded_rectangle(aetools.ComponentItem):
"""rounded rectangle - A rounded rectangle """
want = 'crrc'
class _Prop_corner_curve_height(aetools.NProperty):
"""corner curve height - the height of the oval used to define the shape of the rounded corners """
which = 'pchd'
want = 'shor'
class _Prop_corner_curve_width(aetools.NProperty):
"""corner curve width - the width of the oval used to define the shape of the rounded corners """
which = 'pcwd'
want = 'shor'
rounded_rectangles = rounded_rectangle
class graphic_line(aetools.ComponentItem):
"""graphic line - A graphic line """
want = 'glin'
class _Prop_arrow_style(aetools.NProperty):
"""arrow style - the arrow style """
which = 'arro'
want = 'arro'
class _Prop_dash_style(aetools.NProperty):
"""dash style - the dash style """
which = 'pdst'
want = 'tdas'
class _Prop_end_point(aetools.NProperty):
"""end point - the ending point of the line """
which = 'pend'
want = 'QDpt'
class _Prop_start_point(aetools.NProperty):
"""start point - the starting point of the line """
which = 'pstp'
want = 'QDpt'
graphic_lines = graphic_line
arc._superclassnames = []
arc._privpropdict = {
'arc_angle' : _Prop_arc_angle,
'bounds' : _Prop_bounds,
'definition_rect' : _Prop_definition_rect,
'fill_color' : _Prop_fill_color,
'fill_pattern' : _Prop_fill_pattern,
'pen_color' : _Prop_pen_color,
'pen_pattern' : _Prop_pen_pattern,
'pen_width' : _Prop_pen_width,
'start_angle' : _Prop_start_angle,
'transfer_mode' : _Prop_transfer_mode,
}
arc._privelemdict = {
}
drawing_area._superclassnames = []
drawing_area._privpropdict = {
'background_color' : _Prop_background_color,
'background_pattern' : _Prop_background_pattern,
'color_table' : _Prop_color_table,
'default_font' : _Prop_default_font,
'default_location' : _Prop_default_location,
'default_size' : _Prop_default_size,
'name' : _Prop_name,
'ordering' : _Prop_ordering,
'pixel_depth' : _Prop_pixel_depth,
'style' : _Prop_style,
'text_color' : _Prop_text_color,
'update_on_change' : _Prop_update_on_change,
'writing_code' : _Prop_writing_code,
}
drawing_area._privelemdict = {
}
graphic_objects._superclassnames = []
graphic_objects._privpropdict = {
}
graphic_objects._privelemdict = {
}
graphic_shapes._superclassnames = []
graphic_shapes._privpropdict = {
}
graphic_shapes._privelemdict = {
}
graphic_text._superclassnames = []
graphic_text._privpropdict = {
'color' : _Prop_color,
'font' : _Prop_font,
'size' : _Prop_size,
'uniform_styles' : _Prop_uniform_styles,
}
graphic_text._privelemdict = {
}
ovals._superclassnames = []
ovals._privpropdict = {
}
ovals._privelemdict = {
}
polygon._superclassnames = []
polygon._privpropdict = {
'point_list' : _Prop_point_list,
}
polygon._privelemdict = {
}
graphic_groups._superclassnames = []
graphic_groups._privpropdict = {
}
graphic_groups._privelemdict = {
}
pixel_maps._superclassnames = []
pixel_maps._privpropdict = {
}
pixel_maps._privelemdict = {
}
pixel._superclassnames = []
pixel._privpropdict = {
'color' : _Prop_color,
}
pixel._privelemdict = {
}
rectangles._superclassnames = []
rectangles._privpropdict = {
}
rectangles._privelemdict = {
}
rounded_rectangle._superclassnames = []
rounded_rectangle._privpropdict = {
'corner_curve_height' : _Prop_corner_curve_height,
'corner_curve_width' : _Prop_corner_curve_width,
}
rounded_rectangle._privelemdict = {
}
graphic_line._superclassnames = []
graphic_line._privpropdict = {
'arrow_style' : _Prop_arrow_style,
'dash_style' : _Prop_dash_style,
'end_point' : _Prop_end_point,
'start_point' : _Prop_start_point,
}
graphic_line._privelemdict = {
}
_Enum_arro = {
'no_arrow' : 'arno', # No arrow on line
'arrow_at_start' : 'arst', # Arrow at start of line
'arrow_at_end' : 'aren', # Arrow at end of line
'arrow_at_both_ends' : 'arbo', # Arrow at both the start and the end of the line
}
_Enum_tran = {
'copy_pixels' : 'cpy ', #
'not_copy_pixels' : 'ncpy', #
'or_pixels' : 'or ', #
'not_or_pixels' : 'ntor', #
'bic_pixels' : 'bic ', #
'not_bic_pixels' : 'nbic', #
'xor_pixels' : 'xor ', #
'not_xor_pixels' : 'nxor', #
'add_over_pixels' : 'addo', #
'add_pin_pixels' : 'addp', #
'sub_over_pixels' : 'subo', #
'sub_pin_pixels' : 'subp', #
'ad_max_pixels' : 'admx', #
'ad_min_pixels' : 'admn', #
'blend_pixels' : 'blnd', #
}
#
# Indices of types declared in this module
#
_classdeclarations = {
'carc' : arc,
'cdrw' : drawing_area,
'cgob' : graphic_objects,
'cgsh' : graphic_shapes,
'cgtx' : graphic_text,
'covl' : ovals,
'cpgn' : polygon,
'cpic' : graphic_groups,
'cpix' : pixel_maps,
'cpxl' : pixel,
'crec' : rectangles,
'crrc' : rounded_rectangle,
'glin' : graphic_line,
}
_propdeclarations = {
'arro' : _Prop_arrow_style,
'cltb' : _Prop_color_table,
'colr' : _Prop_color,
'flcl' : _Prop_fill_color,
'flpt' : _Prop_fill_pattern,
'font' : _Prop_font,
'gobs' : _Prop_ordering,
'pang' : _Prop_start_angle,
'parc' : _Prop_arc_angle,
'pbcl' : _Prop_background_color,
'pbnd' : _Prop_bounds,
'pbpt' : _Prop_background_pattern,
'pchd' : _Prop_corner_curve_height,
'pcwd' : _Prop_corner_curve_width,
'pdpt' : _Prop_pixel_depth,
'pdrt' : _Prop_definition_rect,
'pdst' : _Prop_dash_style,
'pend' : _Prop_end_point,
'pnam' : _Prop_name,
'pnel' : _Prop_default_location,
'ppcl' : _Prop_pen_color,
'pppa' : _Prop_pen_pattern,
'pptm' : _Prop_transfer_mode,
'ppwd' : _Prop_pen_width,
'psct' : _Prop_writing_code,
'pstp' : _Prop_start_point,
'ptlt' : _Prop_point_list,
'ptps' : _Prop_default_size,
'ptsz' : _Prop_size,
'ptxc' : _Prop_text_color,
'ptxf' : _Prop_default_font,
'pupd' : _Prop_update_on_change,
'txst' : _Prop_style,
'ustl' : _Prop_uniform_styles,
}
_compdeclarations = {
}
_enumdeclarations = {
'arro' : _Enum_arro,
'tran' : _Enum_tran,
}
| gpl-2.0 |
davidbradway/py-package-example | setup.py | 1 | 1232 | import os
from setuptools import setup
with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as readme:
README = readme.read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='django-polls',
version='0.1',
packages=['polls'],
include_package_data=True,
license='BSD License', # example license
description='A simple Django app to conduct Web-based polls.',
long_description=README,
url='http://www.example.com/',
author='David Bradway',
author_email='david.bradway@gmail.com',
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License', # example license
'Operating System :: OS Independent',
'Programming Language :: Python',
# Replace these appropriately if you are stuck on Python 2.
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
) | bsd-2-clause |
sbalde/edx-platform | cms/djangoapps/contentstore/features/pages.py | 36 | 5109 | # pylint: disable=missing-docstring
# pylint: disable=redefined-outer-name
# pylint: disable=unused-argument
from lettuce import world, step
from nose.tools import assert_equal, assert_in # pylint: disable=no-name-in-module
CSS_FOR_TAB_ELEMENT = "li[data-tab-id='{0}'] input.toggle-checkbox"
@step(u'I go to the pages page$')
def go_to_static(step):
menu_css = 'li.nav-course-courseware'
static_css = 'li.nav-course-courseware-pages a'
world.css_click(menu_css)
world.css_click(static_css)
@step(u'I add a new static page$')
def add_page(step):
button_css = 'a.new-button'
world.css_click(button_css)
@step(u'I should see a static page named "([^"]*)"$')
def see_a_static_page_named_foo(step, name):
pages_css = 'div.xmodule_StaticTabModule'
page_name_html = world.css_html(pages_css)
assert_equal(page_name_html.strip(), name)
@step(u'I should not see any static pages$')
def not_see_any_static_pages(step):
pages_css = 'div.xmodule_StaticTabModule'
assert (world.is_css_not_present(pages_css, wait_time=30))
@step(u'I "(edit|delete)" the static page$')
def click_edit_or_delete(step, edit_or_delete):
button_css = 'ul.component-actions a.%s-button' % edit_or_delete
world.css_click(button_css)
@step(u'I change the name to "([^"]*)"$')
def change_name(step, new_name):
settings_css = '.settings-button'
world.css_click(settings_css)
input_css = 'input.setting-input'
world.css_fill(input_css, new_name)
if world.is_firefox():
world.trigger_event(input_css)
world.save_component()
@step(u'I drag the first static page to the last$')
def drag_first_static_page_to_last(step):
drag_first_to_last_with_css('.component')
@step(u'I have created a static page$')
def create_static_page(step):
step.given('I have opened the pages page in a new course')
step.given('I add a new static page')
@step(u'I have opened the pages page in a new course$')
def open_pages_page_in_new_course(step):
step.given('I have opened a new course in Studio')
step.given('I go to the pages page')
@step(u'I have created two different static pages$')
def create_two_pages(step):
step.given('I have created a static page')
step.given('I "edit" the static page')
step.given('I change the name to "First"')
step.given('I add a new static page')
# Verify order of pages
_verify_page_names('First', 'Empty')
@step(u'the static pages are switched$')
def static_pages_are_switched(step):
_verify_page_names('Empty', 'First')
def _verify_page_names(first, second):
world.wait_for(
func=lambda _: len(world.css_find('.xmodule_StaticTabModule')) == 2,
timeout=200,
timeout_msg="Timed out waiting for two pages to be present"
)
pages = world.css_find('.xmodule_StaticTabModule')
assert_equal(pages[0].text, first)
assert_equal(pages[1].text, second)
@step(u'the built-in pages are in the default order$')
def built_in_pages_in_default_order(step):
expected_pages = ['Courseware', 'Course Info', 'Wiki', 'Progress']
see_pages_in_expected_order(expected_pages)
@step(u'the built-in pages are switched$')
def built_in_pages_switched(step):
expected_pages = ['Courseware', 'Course Info', 'Progress', 'Wiki']
see_pages_in_expected_order(expected_pages)
@step(u'the pages are in the default order$')
def pages_in_default_order(step):
expected_pages = ['Courseware', 'Course Info', 'Wiki', 'Progress', 'First', 'Empty']
see_pages_in_expected_order(expected_pages)
@step(u'the pages are switched$$')
def pages_are_switched(step):
expected_pages = ['Courseware', 'Course Info', 'Progress', 'First', 'Empty', 'Wiki']
see_pages_in_expected_order(expected_pages)
@step(u'I drag the first page to the last$')
def drag_first_page_to_last(step):
drag_first_to_last_with_css('.is-movable')
@step(u'I should see the "([^"]*)" page as "(visible|hidden)"$')
def page_is_visible_or_hidden(step, page_id, visible_or_hidden):
hidden = visible_or_hidden == "hidden"
assert_equal(world.css_find(CSS_FOR_TAB_ELEMENT.format(page_id)).checked, hidden)
@step(u'I toggle the visibility of the "([^"]*)" page$')
def page_toggle_visibility(step, page_id):
world.css_find(CSS_FOR_TAB_ELEMENT.format(page_id))[0].click()
def drag_first_to_last_with_css(css_class):
# For some reason, the drag_and_drop method did not work in this case.
draggables = world.css_find(css_class + ' .drag-handle')
source = draggables.first
target = draggables.last
source.action_chains.click_and_hold(source._element).perform() # pylint: disable=protected-access
source.action_chains.move_to_element_with_offset(target._element, 0, 50).perform() # pylint: disable=protected-access
source.action_chains.release().perform()
def see_pages_in_expected_order(page_names_in_expected_order):
pages = world.css_find("li.course-tab")
assert_equal(len(page_names_in_expected_order), len(pages))
for i, page_name in enumerate(page_names_in_expected_order):
assert_in(page_name, pages[i].text)
| agpl-3.0 |
Khan/git-bigfile | vendor/boto/rds/dbsecuritygroup.py | 185 | 6651 | # Copyright (c) 2009 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Represents an DBSecurityGroup
"""
from boto.ec2.securitygroup import SecurityGroup
class DBSecurityGroup(object):
"""
Represents an RDS database security group
Properties reference available from the AWS documentation at
http://docs.amazonwebservices.com/AmazonRDS/latest/APIReference/API_DeleteDBSecurityGroup.html
:ivar Status: The current status of the security group. Possible values are
[ active, ? ]. Reference documentation lacks specifics of possibilities
:ivar connection: :py:class:`boto.rds.RDSConnection` associated with the current object
:ivar description: The description of the security group
:ivar ec2_groups: List of :py:class:`EC2 Security Group
<boto.ec2.securitygroup.SecurityGroup>` objects that this security
group PERMITS
:ivar ip_ranges: List of :py:class:`boto.rds.dbsecuritygroup.IPRange`
objects (containing CIDR addresses) that this security group PERMITS
:ivar name: Name of the security group
:ivar owner_id: ID of the owner of the security group. Can be 'None'
"""
def __init__(self, connection=None, owner_id=None,
name=None, description=None):
self.connection = connection
self.owner_id = owner_id
self.name = name
self.description = description
self.ec2_groups = []
self.ip_ranges = []
def __repr__(self):
return 'DBSecurityGroup:%s' % self.name
def startElement(self, name, attrs, connection):
if name == 'IPRange':
cidr = IPRange(self)
self.ip_ranges.append(cidr)
return cidr
elif name == 'EC2SecurityGroup':
ec2_grp = EC2SecurityGroup(self)
self.ec2_groups.append(ec2_grp)
return ec2_grp
else:
return None
def endElement(self, name, value, connection):
if name == 'OwnerId':
self.owner_id = value
elif name == 'DBSecurityGroupName':
self.name = value
elif name == 'DBSecurityGroupDescription':
self.description = value
elif name == 'IPRanges':
pass
else:
setattr(self, name, value)
def delete(self):
return self.connection.delete_dbsecurity_group(self.name)
def authorize(self, cidr_ip=None, ec2_group=None):
"""
Add a new rule to this DBSecurity group.
You need to pass in either a CIDR block to authorize or
and EC2 SecurityGroup.
:type cidr_ip: string
:param cidr_ip: A valid CIDR IP range to authorize
:type ec2_group: :class:`boto.ec2.securitygroup.SecurityGroup`
:param ec2_group: An EC2 security group to authorize
:rtype: bool
:return: True if successful.
"""
if isinstance(ec2_group, SecurityGroup):
group_name = ec2_group.name
group_owner_id = ec2_group.owner_id
else:
group_name = None
group_owner_id = None
return self.connection.authorize_dbsecurity_group(self.name,
cidr_ip,
group_name,
group_owner_id)
def revoke(self, cidr_ip=None, ec2_group=None):
"""
Revoke access to a CIDR range or EC2 SecurityGroup.
You need to pass in either a CIDR block or
an EC2 SecurityGroup from which to revoke access.
:type cidr_ip: string
:param cidr_ip: A valid CIDR IP range to revoke
:type ec2_group: :class:`boto.ec2.securitygroup.SecurityGroup`
:param ec2_group: An EC2 security group to revoke
:rtype: bool
:return: True if successful.
"""
if isinstance(ec2_group, SecurityGroup):
group_name = ec2_group.name
group_owner_id = ec2_group.owner_id
return self.connection.revoke_dbsecurity_group(
self.name,
ec2_security_group_name=group_name,
ec2_security_group_owner_id=group_owner_id)
# Revoking by CIDR IP range
return self.connection.revoke_dbsecurity_group(
self.name, cidr_ip=cidr_ip)
class IPRange(object):
"""
Describes a CIDR address range for use in a DBSecurityGroup
:ivar cidr_ip: IP Address range
"""
def __init__(self, parent=None):
self.parent = parent
self.cidr_ip = None
self.status = None
def __repr__(self):
return 'IPRange:%s' % self.cidr_ip
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'CIDRIP':
self.cidr_ip = value
elif name == 'Status':
self.status = value
else:
setattr(self, name, value)
class EC2SecurityGroup(object):
"""
Describes an EC2 security group for use in a DBSecurityGroup
"""
def __init__(self, parent=None):
self.parent = parent
self.name = None
self.owner_id = None
def __repr__(self):
return 'EC2SecurityGroup:%s' % self.name
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'EC2SecurityGroupName':
self.name = value
elif name == 'EC2SecurityGroupOwnerId':
self.owner_id = value
else:
setattr(self, name, value)
| mit |
mrcrgl/gge-storage | lib/socket/management/commands/runsockettest.py | 1 | 2692 | from __future__ import unicode_literals
import socket
import multiprocessing
from django.core.management.base import BaseCommand, CommandError
import logging
logger = logging.getLogger(__name__)
class Socket:
'''demonstration class only
- coded for clarity, not efficiency
'''
close_bit = "\x00"
processor = None
connection = None
context = None
address = None
chunk_size = 2048
def __init__(self, connection, address):
print("Connected %r at %r", self.connection, self.address)
self.connection = connection
self.address = address
def run(self):
try:
message = ""
while True:
data = self.connection.recv(self.chunk_size).decode('utf-8')
if data == "":
logger.warning("Socket closed remotely")
break
message += data
if self.close_bit in message:
pos = message.index(self.close_bit)
partial_message = message[0:pos]
message = message[pos+len(self.close_bit):]
logger.debug("Received data %r", partial_message)
self.send(partial_message)
#context.add(data)
except Exception as e:
logger.critical("Problem handling request: %s" % e)
finally:
logger.critical("Closing socket")
self.connection.close()
def send(self, response):
message = response
if not message.endswith(self.close_bit):
message += self.close_bit
logger.debug("Sending: %s", message)
total_sent = 0
while total_sent < len(message):
sent = self.connection.send(message[total_sent:].encode('utf-8'))
if sent == 0:
raise RuntimeError("socket connection broken")
total_sent = total_sent + sent
def handle_client(connection, address):
socket = Socket(connection, address)
socket.run()
class Command(BaseCommand):
args = '<object object ...>'
#help = 'Help text goes here'
def handle(self, *args, **options):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(('127.0.0.1', 8019))
sock.listen(1)
while True:
conn, address = sock.accept()
logger.info("Got connection")
process = multiprocessing.Process(target=handle_client, args=(conn, address))
process.daemon = True
process.start()
logger.info("Started process %r", process) | mit |
akiyoko/oscar_project | src/oscar/apps/dashboard/communications/views.py | 20 | 3090 | from django.contrib import messages
from django.contrib.sites.shortcuts import get_current_site
from django.template import TemplateSyntaxError
from django.utils import six
from django.utils.translation import ugettext_lazy as _
from django.views import generic
from oscar.core.loading import get_class, get_model
CommunicationEventType = get_model('customer', 'CommunicationEventType')
CommunicationEventTypeForm = get_class('dashboard.communications.forms',
'CommunicationEventTypeForm')
Dispatcher = get_class('customer.utils', 'Dispatcher')
class ListView(generic.ListView):
model = CommunicationEventType
template_name = 'dashboard/comms/list.html'
context_object_name = 'commtypes'
class UpdateView(generic.UpdateView):
model = CommunicationEventType
form_class = CommunicationEventTypeForm
template_name = 'dashboard/comms/detail.html'
context_object_name = 'commtype'
success_url = '.'
slug_field = 'code'
def form_invalid(self, form):
messages.error(self.request,
_("The submitted form was not valid, please correct "
"the errors and resubmit"))
return super(UpdateView, self).form_invalid(form)
def form_valid(self, form):
if 'send_preview' in self.request.POST:
return self.send_preview(form)
if 'show_preview' in self.request.POST:
return self.show_preview(form)
messages.success(self.request, _("Email saved"))
return super(UpdateView, self).form_valid(form)
def get_messages_context(self, form):
ctx = {'user': self.request.user,
'site': get_current_site(self.request)}
ctx.update(form.get_preview_context())
return ctx
def show_preview(self, form):
ctx = super(UpdateView, self).get_context_data()
ctx['form'] = form
commtype = form.save(commit=False)
commtype_ctx = self.get_messages_context(form)
try:
msgs = commtype.get_messages(commtype_ctx)
except TemplateSyntaxError as e:
form.errors['__all__'] = form.error_class([six.text_type(e)])
return self.render_to_response(ctx)
ctx['show_preview'] = True
ctx['preview'] = msgs
return self.render_to_response(ctx)
def send_preview(self, form):
ctx = super(UpdateView, self).get_context_data()
ctx['form'] = form
commtype = form.save(commit=False)
commtype_ctx = self.get_messages_context(form)
try:
msgs = commtype.get_messages(commtype_ctx)
except TemplateSyntaxError as e:
form.errors['__all__'] = form.error_class([six.text_type(e)])
return self.render_to_response(ctx)
email = form.cleaned_data['preview_email']
dispatch = Dispatcher()
dispatch.send_email_messages(email, msgs)
messages.success(self.request,
_("A preview email has been sent to %s") % email)
return self.render_to_response(ctx)
| bsd-3-clause |
elzurdo/happynoamchomskyday | happynoamchomskyday/chomsky.py | 1 | 6340 | import os
import requests
import httplib2
from googleapiclient import discovery
from oauth2client import client
from oauth2client import tools
from oauth2client.file import Storage
SCOPES = ['https://www.googleapis.com/auth/calendar']
path = '{}/aux/'.format(os.getcwd())
CLIENT_SECRET_FILE = '{}client_secret_chomsky.json'.format(path)
APPLICATION_NAME = 'Noam Chomsky day'
def set_credentials():
try:
import argparse
flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()
except ImportError:
flags = None
return get_credentials(flags=flags)
def get_credentials(flags=None):
"""Gets valid user credentials from storage.
If nothing has been stored, or if the stored credentials are invalid,
the OAuth2 flow is completed to obtain the new credentials.
Returns:
Credentials, the obtained credential.
"""
home_dir = os.path.expanduser('~')
credential_dir = os.path.join(home_dir, '.credentials')
if not os.path.exists(credential_dir):
os.makedirs(credential_dir)
#credential_path = os.path.join(credential_dir, 'sheets2calendar.json')
credential_path = os.path.join(credential_dir, 'chomsky.json')
store = Storage(credential_path)
credentials = store.get()
if not credentials or credentials.invalid:
print CLIENT_SECRET_FILE
print SCOPES
flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)
flow.user_agent = APPLICATION_NAME
if flags:
credentials = tools.run_flow(flow, store, flags)
else: # Needed only for compatibility with Python 2.6
credentials = tools.run(flow, store)
print('Storing credentials to ' + credential_path)
return credentials
class Calendar():
def __init__(self, credentials):
http = credentials.authorize(httplib2.Http())
self.service = discovery.build('calendar', 'v3', http=http)
def print_cal_list(self):
page_token = None
while True:
calendar_list = self.service.calendarList().list(pageToken=page_token).execute()
for calendar_list_entry in calendar_list['items']:
print '\t', calendar_list_entry['summary'], calendar_list_entry['id']
print 'tz\t', calendar_list_entry['timeZone']
print '-' * 20
page_token = calendar_list.get('nextPageToken')
if not page_token:
break
def create_cal(self, summary=None, timeZone_ = 'Europe/London'):
if not summary:
raise ValueError('Must enter `summary` as str of Calendar name')
calendar = {
'summary': summary,
'timeZone': timeZone_
}
created_calendar = self.service.calendars().insert(body=calendar).execute()
return created_calendar['id']
def delete_cal(self, cal_id):
# Deleting a secondary calendar
self.service.calendars().delete(calendarId=cal_id).execute()
def _event(self, summary=None, description=None, dateStart=None, dateEnd=None, freq=None, interval=None):
if not dateStart:
raise Exception("dateStart required to be of format 'YYYY-MM-DD'")
EVENT = {'summary': summary, 'description': description}
EVENT['start'] = {'date': dateStart}
if not dateEnd:
dateEnd = dateStart
EVENT['end'] = {'date': dateEnd}
if freq:
recurrence = "RRULE:FREQ={}".format(freq) #"RRULE:FREQ={};INTERVAL={}".format(freq, interval)
if interval:
recurrence += ";INTERVAL={}".format(interval)
EVENT['recurrence'] = [recurrence] #['RRULE:FREQ={};INTERVAL={}'.format(freq, interval)]
return EVENT
def add_event(self, cal_id, notify=True, *args, **kwargs):
EVENT = self._event(*args, **kwargs)
e = self.service.events().insert(calendarId=cal_id, sendNotifications=notify, body=EVENT).execute()
return e
def add_events(self, cal_id, events, notify=True, verbose=True):
for event in events:
self.add_event(cal_id, notify=notify, **event)
if verbose:
print "Added {}".format(event['summary'])
def name2wikiurl(first, last):
# must be first name, last name
# does not deal with one named people like:
# stage names: https://en.wikipedia.org/wiki/Madonna_(entertainer)
f = str.title(first)
l = str.title(last)
url = 'https://en.wikipedia.org/w/index.php?action=raw&title={}_{}'.format(f, l)
return url
def wiki_parse_bdate(str_, verbose=False):
if not 'birth_date' in str_:
return None
# pinpointing the birthdate part
str_ = str_[:5000].split('birth_date')[1].split('}}')[0]
# 'YYYY-MM-DD'
zero_element = str_.split('|')[1]
# the response might have the 'df=yes' or 'mf=yes' before or after the birth date
# see differences between:
# before: requests.get('https://en.wikipedia.org/w/index.php?action=raw&title=Albert_Einstein')
# after: r = requests.get('https://en.wikipedia.org/w/index.php?action=raw&title=Michael_Jordan')
# For people who lived in the time of usage of the Julian calendar Wikipedia corrects for the Gragorian
# For example: https://en.wikipedia.org/w/index.php?action=raw&title=Isaac_Newton
if 'New Style' in zero_element:
str_ = str_.split('New Style')[1]
zero_element = str_
if 'yes' in zero_element:
idxs = [2, 3, 4]
else:
idxs = [1, 2, 3]
str_ = str_.split('{{')[1]
l_split = str_.split('|')
l_ = [l_split[idx] for idx in idxs]
if verbose:
print l_
# verifying month and day have two digets
l_[1] = "{:02.0f}".format(int(l_[1]))
l_[2] = "{:02.0f}".format(int(l_[2]))
# might have to verify year has four ...
#TBD
str_bday = "-".join(l_)
return str_bday
def name2wikibdate(first, last, verbose=False):
url = name2wikiurl(first, last)
if verbose:
print url
r = requests.get(url)
return wiki_parse_bdate(r.content, verbose=verbose)
def names2calendar(credentials, names, cal_name=None, cal_id=None,
interval=5, freq='YEARLY', summary = "Happy %s %s Day!", verbose=False):
cal = Calendar(credentials)
if not cal_id:
# need to check if calendar exists ....
if not cal_name:
raise ValueError("Since `cal_id` is not given `cal_name` needs to be set")
if verbose:
print "Creating {} calendar".format(cal_name)
cal_id = cal.create_cal(summary=cal_name)
# Pulling birth days from Wikipedia
l_events = []
for name in names:
bday = name2wikibdate(name[0], name[1])
summary_temp = summary%(name[0], name[1])
# creating an event
event = {'summary': summary_temp, 'dateStart': bday, 'freq':freq, 'interval':interval}
l_events.append(event)
# Adding all events to calendar
cal.add_events(cal_id, l_events)
return cal, cal_id
| mit |
scottpurdy/nupic | src/nupic/data/generators/__init__.py | 50 | 1027 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have purchased from
# Numenta, Inc. a separate commercial license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Module containing data generation tools."""
| agpl-3.0 |
nibanks/openthread | tests/scripts/thread-cert/__init__.py | 5 | 1600 | #!/usr/bin/python
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
| bsd-3-clause |
eclipse/ice | examples/org.eclipse.ice.examples.reflectivity/listFromScratchPython.py | 1 | 4003 |
# ****************************************************************************
# Copyright (c) 2015 UT-Battelle, LLC.
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Eclipse Public License v1.0
# which accompanies this distribution, and is available at
# http://www.eclipse.org/legal/epl-v10.html
#
# Contributors:
# Initial API and implementation and/or initial documentation - Kasper
# Gammeltoft.
#
# This is an example script designed to show how to use ease with ICE. It
# creates a new Reflectivity Model and shows how to customize and build up
# the layers in the model from scratch.
# ****************************************************************************
# Needed imports from ICE
from org.eclipse.ice.datastructures.form import Material
# Load the Platform module for accessing OSGi services
loadModule('/System/Platform')
# Get the core service from ICE for creating and accessing objects.
coreService = getService(org.eclipse.ice.core.iCore.ICore);
# Create the reflectivity model to be used and get its reference. The create item
# method will return a string representing the number of that item, so use int() to
# convert it to an integer.
reflectModel = coreService.getItem(int(coreService.createItem("Reflectivity Model")))
# Gets the list component used as the data for the table (is on tab 2)
listComp = reflectModel.getComponent(2)
# Now we want to build up the list from our own data, so we can do that here.
# The first step would be to clear the list so that we can start adding to it. Clearing
# the list requires the locks as multiple operations are happening and we need to
# protect the list from multiple threads trying to access it at the same time.
listComp.getReadWriteLock().writeLock().lock()
listComp.clear()
listComp.getReadWriteLock().writeLock().unlock()
# Create the layer of air
air = Material()
air.setName("Air")
air.setProperty("Material ID", 1)
air.setProperty("Thickness (A)", 200)
air.setProperty("Roughness (A)", 0)
air.setProperty(Material.SCAT_LENGTH_DENSITY, 0)
air.setProperty(Material.MASS_ABS_COHERENT, 0)
air.setProperty(Material.MASS_ABS_INCOHERENT, 0)
# Create the Aluminum Oxide layer
AlOx = Material()
AlOx.setName("AlOx")
AlOx.setProperty("Material ID", 2)
AlOx.setProperty("Thickness (A)", 25)
AlOx.setProperty("Roughness (A)", 10.2)
AlOx.setProperty(Material.SCAT_LENGTH_DENSITY, 1.436e-6)
AlOx.setProperty(Material.MASS_ABS_COHERENT, 6.125e-11)
AlOx.setProperty(Material.MASS_ABS_INCOHERENT, 4.47e-12)
# Create the Aluminum layer
Al = Material()
Al.setName("Al")
Al.setProperty("Material ID", 3)
Al.setProperty("Thickness (A)", 500)
Al.setProperty("Roughness (A)", 11.4)
Al.setProperty(Material.SCAT_LENGTH_DENSITY, 2.078e-6)
Al.setProperty(Material.MASS_ABS_COHERENT, 2.87e-13)
Al.setProperty(Material.MASS_ABS_INCOHERENT, 1.83e-12)
# Create the Aluminum Silicate layer
AlSiOx = Material()
AlSiOx.setName("AlSiOx")
AlSiOx.setProperty("Material ID", 4)
AlSiOx.setProperty("Thickness (A)", 10)
AlSiOx.setProperty("Roughness (A)", 17.2)
AlSiOx.setProperty(Material.SCAT_LENGTH_DENSITY, 1.489e-6)
AlSiOx.setProperty(Material.MASS_ABS_COHERENT, 8.609e-9)
AlSiOx.setProperty(Material.MASS_ABS_INCOHERENT, 6.307e-10)
# Create the Silicon layer
Si = Material()
Si.setName("Si")
Si.setProperty("Material ID", 5)
Si.setProperty("Thickness (A)", 100)
Si.setProperty("Roughness (A)", 17.5)
Si.setProperty(Material.SCAT_LENGTH_DENSITY, 2.07e-6)
Si.setProperty(Material.MASS_ABS_COHERENT, 4.7498e-11)
Si.setProperty(Material.MASS_ABS_INCOHERENT, 1.9977e-12)
# Add all of the materials back to the list (in top to bottom order)
listComp.getReadWriteLock().writeLock().lock()
listComp.add(air);
listComp.add(AlOx);
listComp.add(Al);
listComp.add(AlSiOx);
listComp.add(Si);
listComp.getReadWriteLock().writeLock().unlock()
# Finally process the model to get the results.
coreService.processItem(reflectModel.getId(), "Calculate Reflectivity", 1);
| epl-1.0 |
terhorstd/nest-simulator | doc/topology/grid_iaf_oc.py | 17 | 1696 | # -*- coding: utf-8 -*-
#
# grid_iaf_oc.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
'''
NEST Topology Module Example
Create three layers of 4x3 iaf_psc_alpha neurons, each with different center.
BCCN Tutorial @ CNS*09
Hans Ekkehard Plesser, UMB
'''
import pylab
import time
import nest
import nest.topology as topo
pylab.ion()
for ctr in [(0.0, 0.0), (-2.0, 2.0), (0.5, 1.0)]:
nest.ResetKernel()
pylab.clf()
l1 = topo.CreateLayer({'columns': 4, 'rows': 3,
'extent': [2.0, 1.5],
'center': ctr,
'elements': 'iaf_psc_alpha'})
topo.PlotLayer(l1, nodesize=50, fig=pylab.gcf())
# beautify
pylab.axis([-3, 3, -3, 3])
pylab.axes().set_aspect('equal', 'box')
pylab.axes().set_xticks(pylab.arange(-3.0, 3.1, 1.0))
pylab.axes().set_yticks(pylab.arange(-3.0, 3.1, 1.0))
pylab.grid(True)
pylab.xlabel('4 Columns, Extent: 1.5, Center: %.1f' % ctr[0])
pylab.ylabel('2 Rows, Extent: 1.0, Center: %.1f' % ctr[1])
pylab.draw()
| gpl-2.0 |
JeanFred/graphite-web | docs/conf.py | 27 | 7754 | # -*- coding: utf-8 -*-
#
# Graphite documentation build configuration file, created by
# sphinx-quickstart on Mon Mar 21 12:31:35 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.abspath('../webapp'))
sys.path.append(os.path.abspath('../whisper'))
sys.path.append(os.path.abspath('../carbon'))
os.environ['DJANGO_SETTINGS_MODULE'] = "graphite.settings"
# Prevent graphite logger from complaining about missing log dir.
from graphite import settings
settings.LOG_DIR = os.path.abspath('.')
# Bring in the new ReadTheDocs sphinx theme
import sphinx_rtd_theme
# Define a custom autodoc documenter for the render.functions module
# This will remove the requestContext parameter which doesnt make sense in the context of the docs
import re
from sphinx.ext import autodoc
class RenderFunctionDocumenter(autodoc.FunctionDocumenter):
priority = 10 # Override FunctionDocumenter
@classmethod
def can_document_member(cls, member, membername, isattr, parent):
return autodoc.FunctionDocumenter.can_document_member(member, membername, isattr, parent) and \
parent.name == 'graphite.render.functions'
def format_args(self):
args = autodoc.FunctionDocumenter.format_args(self)
if args is not None:
# Really, a regex sub here is by far the easiest way
return re.sub('requestContext, ','',args)
def setup(app):
app.add_autodocumenter(RenderFunctionDocumenter)
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx']
# Mapping for external links such as Python standard lib
intersphinx_mapping = {
'python': ('http://docs.python.org/', None)
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Graphite'
copyright = u'2008-2012, Chris Davis; 2011-2015 The Graphite Project'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.10.0'
# The full version, including alpha/beta/rc tags.
release = '0.10.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'Graphitedoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Graphite.tex', u'Graphite Documentation',
u'Chris Davis', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
| apache-2.0 |
gusg21/painless | lineparser.py | 1 | 2250 | import mistune, datetime, yaml
def tilde(line):
try: command = line[line.index("~")+1:line.index(":")] # get the command
except ValueError:
print("Malformed command: \"" + line + "\"")
sys.exit()
args = line[line.index(":")+1:].split() # get the rest of the args in a list
if command == "import":
with open("templates/" + args[0] + ".html", "r") as tempFile:
data = tempFile.read() # open the template
parsedTemp = []
for line in data.splitlines():
parsedTemp.append(parseLine(line))
data = "\n".join(parsedTemp)
if len(args) > 1:
for arg in args[1:]: # skip first arg (command)
key = arg[:arg.index("=")] # before the =
value = arg[arg.index("=")+1:] # after the =
data = data.replace("{{ " + key + " }}", value) # replace
return data # return the template
def dot(line):
if line.startswith(".."):
if line == "..":
return "</span>"
else:
return "<span class=\"" + line[2:] + "\">"
if line == ".":
return "</div>"
else:
return "<div class=\"" + line[1:] + "\">"
def parseLine(line): # Function for parsing our HTML templates
config = yaml.load(open("config/painless.yml", "r").read()) # load config
for name in config["customValues"]: # iterate
value = config["customValues"][name] # get name
line = replaceSpecials(line, name, value)
# pre-made one
line = replaceSpecials(line, "time", datetime.datetime.now().strftime(config["options"]["timeFormat"]))
line = replaceSpecials(line, "branding", "<p><i>Built <a href='http://github.com/gusg21/painless'>painlessly</a>.</i></p>")
return line
def replaceSpecials(line, name, value, BRACKETS="["):
bracketPairs = {
"[" : "]",
"{" : "}",
"(" : ")"
}
RBRACKETS = bracketPairs[BRACKETS]
line = line.replace((BRACKETS * 2) + " " + name + " " + (RBRACKETS * 2), value)
line = line.replace((BRACKETS * 2) + name + (RBRACKETS * 2), value)
return line
def default(line): # do to all lines in markdown
line = parseLine(line)
line = mistune.markdown(line)
return line
| apache-2.0 |
cproctor/scratch_nxt | nxtbrick.py | 1 | 2682 | import lightblue
from nxt import Motor, find_one_brick, PORT_A, PORT_B, PORT_C, PORT_1, PORT_2, PORT_3, PORT_4
from nxt.sensor import Touch, Sound, Ultrasonic, Light
from time import sleep
class PortMap(dict):
def __init__(self, ports, factory):
self.factory = factory
self.ports = ports
def __missing__(self, port):
if port in self.ports:
self[port] = self.factory(port)
return self[port]
else:
raise ValueError("{} is an invalid port; must be one of {}".format(
port, ", ".join(self.ports)))
class NXTBrick():
motorPorts = {
"a": PORT_A,
"b": PORT_B,
"c": PORT_C
}
sensorPorts = {
1: PORT_1,
2: PORT_2,
3: PORT_3,
4: PORT_4
}
sensorTypes = {
"none" : None,
"touch" : Touch,
"sound" : Sound,
"ultrasonic" : Ultrasonic,
"light" : Light
}
def __init__(self):
self.brick = find_one_brick()
self.motors = PortMap(self.motorPorts.keys(), self._motor_factory())
self.sensors = {}
def roll(self, port, power):
self.motors[port].run(max(-100, min(100, power)))
def halt(self, port):
self.motors[port].brake()
sleep(0.2)
self.motors[port].idle()
def read_sensor(self, port):
if port in self.sensors.keys():
return int(self.sensors[port].get_sample())
else:
return "None"
def read_sensors(self):
return {i: self.read_sensor(i) for i in self.sensorPorts.keys()}
def _motor_factory(self):
def create_motor(port):
return Motor(self.brick, self.motorPorts[port])
return create_motor
def add_sensor(self, port, sensorType):
if port in self.sensorPorts.keys() and sensorType in self.sensorTypes:
if sensorType == 'none':
del self.sensors[port]
else:
self.sensors[port] = self.sensorTypes[sensorType](
self.brick, self.sensorPorts[port])
if sensorType == 'light':
self.sensors[port].set_illuminated(True)
else:
if port not in self.sensorPorts.keys():
raise ValueError("{} is an invalid sensor port; must be in {}".format(
port, ", ".join(map(str, self.sensorPorts.keys()))))
else:
raise ValueError("{} is an invalid sensor type; must be in {}".format(
sensorType, ", ".join(self.sensorTypes)))
def remove_sensor(self, port):
self.add_sensor(port, 'none')
| mit |
shahar-stratoscale/nova | nova/openstack/common/systemd.py | 6 | 3056 | # Copyright 2012-2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Helper module for systemd service readiness notification.
"""
import os
import socket
import sys
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
def _abstractify(socket_name):
if socket_name.startswith('@'):
# abstract namespace socket
socket_name = '\0%s' % socket_name[1:]
return socket_name
def _sd_notify(unset_env, msg):
notify_socket = os.getenv('NOTIFY_SOCKET')
if notify_socket:
sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
try:
sock.connect(_abstractify(notify_socket))
sock.sendall(msg)
if unset_env:
del os.environ['NOTIFY_SOCKET']
except EnvironmentError:
LOG.debug("Systemd notification failed", exc_info=True)
finally:
sock.close()
def notify():
"""Send notification to Systemd that service is ready.
For details see
http://www.freedesktop.org/software/systemd/man/sd_notify.html
"""
_sd_notify(False, 'READY=1')
def notify_once():
"""Send notification once to Systemd that service is ready.
Systemd sets NOTIFY_SOCKET environment variable with the name of the
socket listening for notifications from services.
This method removes the NOTIFY_SOCKET environment variable to ensure
notification is sent only once.
"""
_sd_notify(True, 'READY=1')
def onready(notify_socket, timeout):
"""Wait for systemd style notification on the socket.
:param notify_socket: local socket address
:type notify_socket: string
:param timeout: socket timeout
:type timeout: float
:returns: 0 service ready
1 service not ready
2 timeout occured
"""
sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
sock.settimeout(timeout)
sock.bind(_abstractify(notify_socket))
try:
msg = sock.recv(512)
except socket.timeout:
return 2
finally:
sock.close()
if 'READY=1' in msg:
return 0
else:
return 1
if __name__ == '__main__':
# simple CLI for testing
if len(sys.argv) == 1:
notify()
elif len(sys.argv) >= 2:
timeout = float(sys.argv[1])
notify_socket = os.getenv('NOTIFY_SOCKET')
if notify_socket:
retval = onready(notify_socket, timeout)
sys.exit(retval)
| apache-2.0 |
ramanajee/phantomjs | src/breakpad/src/third_party/protobuf/protobuf/python/google/protobuf/internal/test_util.py | 295 | 27103 | # Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Utilities for Python proto2 tests.
This is intentionally modeled on C++ code in
//google/protobuf/test_util.*.
"""
__author__ = 'robinson@google.com (Will Robinson)'
import os.path
from google.protobuf import unittest_import_pb2
from google.protobuf import unittest_pb2
def SetAllFields(message):
"""Sets every field in the message to a unique value.
Args:
message: A unittest_pb2.TestAllTypes instance.
"""
#
# Optional fields.
#
message.optional_int32 = 101
message.optional_int64 = 102
message.optional_uint32 = 103
message.optional_uint64 = 104
message.optional_sint32 = 105
message.optional_sint64 = 106
message.optional_fixed32 = 107
message.optional_fixed64 = 108
message.optional_sfixed32 = 109
message.optional_sfixed64 = 110
message.optional_float = 111
message.optional_double = 112
message.optional_bool = True
# TODO(robinson): Firmly spec out and test how
# protos interact with unicode. One specific example:
# what happens if we change the literal below to
# u'115'? What *should* happen? Still some discussion
# to finish with Kenton about bytes vs. strings
# and forcing everything to be utf8. :-/
message.optional_string = '115'
message.optional_bytes = '116'
message.optionalgroup.a = 117
message.optional_nested_message.bb = 118
message.optional_foreign_message.c = 119
message.optional_import_message.d = 120
message.optional_nested_enum = unittest_pb2.TestAllTypes.BAZ
message.optional_foreign_enum = unittest_pb2.FOREIGN_BAZ
message.optional_import_enum = unittest_import_pb2.IMPORT_BAZ
message.optional_string_piece = '124'
message.optional_cord = '125'
#
# Repeated fields.
#
message.repeated_int32.append(201)
message.repeated_int64.append(202)
message.repeated_uint32.append(203)
message.repeated_uint64.append(204)
message.repeated_sint32.append(205)
message.repeated_sint64.append(206)
message.repeated_fixed32.append(207)
message.repeated_fixed64.append(208)
message.repeated_sfixed32.append(209)
message.repeated_sfixed64.append(210)
message.repeated_float.append(211)
message.repeated_double.append(212)
message.repeated_bool.append(True)
message.repeated_string.append('215')
message.repeated_bytes.append('216')
message.repeatedgroup.add().a = 217
message.repeated_nested_message.add().bb = 218
message.repeated_foreign_message.add().c = 219
message.repeated_import_message.add().d = 220
message.repeated_nested_enum.append(unittest_pb2.TestAllTypes.BAR)
message.repeated_foreign_enum.append(unittest_pb2.FOREIGN_BAR)
message.repeated_import_enum.append(unittest_import_pb2.IMPORT_BAR)
message.repeated_string_piece.append('224')
message.repeated_cord.append('225')
# Add a second one of each field.
message.repeated_int32.append(301)
message.repeated_int64.append(302)
message.repeated_uint32.append(303)
message.repeated_uint64.append(304)
message.repeated_sint32.append(305)
message.repeated_sint64.append(306)
message.repeated_fixed32.append(307)
message.repeated_fixed64.append(308)
message.repeated_sfixed32.append(309)
message.repeated_sfixed64.append(310)
message.repeated_float.append(311)
message.repeated_double.append(312)
message.repeated_bool.append(False)
message.repeated_string.append('315')
message.repeated_bytes.append('316')
message.repeatedgroup.add().a = 317
message.repeated_nested_message.add().bb = 318
message.repeated_foreign_message.add().c = 319
message.repeated_import_message.add().d = 320
message.repeated_nested_enum.append(unittest_pb2.TestAllTypes.BAZ)
message.repeated_foreign_enum.append(unittest_pb2.FOREIGN_BAZ)
message.repeated_import_enum.append(unittest_import_pb2.IMPORT_BAZ)
message.repeated_string_piece.append('324')
message.repeated_cord.append('325')
#
# Fields that have defaults.
#
message.default_int32 = 401
message.default_int64 = 402
message.default_uint32 = 403
message.default_uint64 = 404
message.default_sint32 = 405
message.default_sint64 = 406
message.default_fixed32 = 407
message.default_fixed64 = 408
message.default_sfixed32 = 409
message.default_sfixed64 = 410
message.default_float = 411
message.default_double = 412
message.default_bool = False
message.default_string = '415'
message.default_bytes = '416'
message.default_nested_enum = unittest_pb2.TestAllTypes.FOO
message.default_foreign_enum = unittest_pb2.FOREIGN_FOO
message.default_import_enum = unittest_import_pb2.IMPORT_FOO
message.default_string_piece = '424'
message.default_cord = '425'
def SetAllExtensions(message):
"""Sets every extension in the message to a unique value.
Args:
message: A unittest_pb2.TestAllExtensions instance.
"""
extensions = message.Extensions
pb2 = unittest_pb2
import_pb2 = unittest_import_pb2
#
# Optional fields.
#
extensions[pb2.optional_int32_extension] = 101
extensions[pb2.optional_int64_extension] = 102
extensions[pb2.optional_uint32_extension] = 103
extensions[pb2.optional_uint64_extension] = 104
extensions[pb2.optional_sint32_extension] = 105
extensions[pb2.optional_sint64_extension] = 106
extensions[pb2.optional_fixed32_extension] = 107
extensions[pb2.optional_fixed64_extension] = 108
extensions[pb2.optional_sfixed32_extension] = 109
extensions[pb2.optional_sfixed64_extension] = 110
extensions[pb2.optional_float_extension] = 111
extensions[pb2.optional_double_extension] = 112
extensions[pb2.optional_bool_extension] = True
extensions[pb2.optional_string_extension] = '115'
extensions[pb2.optional_bytes_extension] = '116'
extensions[pb2.optionalgroup_extension].a = 117
extensions[pb2.optional_nested_message_extension].bb = 118
extensions[pb2.optional_foreign_message_extension].c = 119
extensions[pb2.optional_import_message_extension].d = 120
extensions[pb2.optional_nested_enum_extension] = pb2.TestAllTypes.BAZ
extensions[pb2.optional_nested_enum_extension] = pb2.TestAllTypes.BAZ
extensions[pb2.optional_foreign_enum_extension] = pb2.FOREIGN_BAZ
extensions[pb2.optional_import_enum_extension] = import_pb2.IMPORT_BAZ
extensions[pb2.optional_string_piece_extension] = '124'
extensions[pb2.optional_cord_extension] = '125'
#
# Repeated fields.
#
extensions[pb2.repeated_int32_extension].append(201)
extensions[pb2.repeated_int64_extension].append(202)
extensions[pb2.repeated_uint32_extension].append(203)
extensions[pb2.repeated_uint64_extension].append(204)
extensions[pb2.repeated_sint32_extension].append(205)
extensions[pb2.repeated_sint64_extension].append(206)
extensions[pb2.repeated_fixed32_extension].append(207)
extensions[pb2.repeated_fixed64_extension].append(208)
extensions[pb2.repeated_sfixed32_extension].append(209)
extensions[pb2.repeated_sfixed64_extension].append(210)
extensions[pb2.repeated_float_extension].append(211)
extensions[pb2.repeated_double_extension].append(212)
extensions[pb2.repeated_bool_extension].append(True)
extensions[pb2.repeated_string_extension].append('215')
extensions[pb2.repeated_bytes_extension].append('216')
extensions[pb2.repeatedgroup_extension].add().a = 217
extensions[pb2.repeated_nested_message_extension].add().bb = 218
extensions[pb2.repeated_foreign_message_extension].add().c = 219
extensions[pb2.repeated_import_message_extension].add().d = 220
extensions[pb2.repeated_nested_enum_extension].append(pb2.TestAllTypes.BAR)
extensions[pb2.repeated_foreign_enum_extension].append(pb2.FOREIGN_BAR)
extensions[pb2.repeated_import_enum_extension].append(import_pb2.IMPORT_BAR)
extensions[pb2.repeated_string_piece_extension].append('224')
extensions[pb2.repeated_cord_extension].append('225')
# Append a second one of each field.
extensions[pb2.repeated_int32_extension].append(301)
extensions[pb2.repeated_int64_extension].append(302)
extensions[pb2.repeated_uint32_extension].append(303)
extensions[pb2.repeated_uint64_extension].append(304)
extensions[pb2.repeated_sint32_extension].append(305)
extensions[pb2.repeated_sint64_extension].append(306)
extensions[pb2.repeated_fixed32_extension].append(307)
extensions[pb2.repeated_fixed64_extension].append(308)
extensions[pb2.repeated_sfixed32_extension].append(309)
extensions[pb2.repeated_sfixed64_extension].append(310)
extensions[pb2.repeated_float_extension].append(311)
extensions[pb2.repeated_double_extension].append(312)
extensions[pb2.repeated_bool_extension].append(False)
extensions[pb2.repeated_string_extension].append('315')
extensions[pb2.repeated_bytes_extension].append('316')
extensions[pb2.repeatedgroup_extension].add().a = 317
extensions[pb2.repeated_nested_message_extension].add().bb = 318
extensions[pb2.repeated_foreign_message_extension].add().c = 319
extensions[pb2.repeated_import_message_extension].add().d = 320
extensions[pb2.repeated_nested_enum_extension].append(pb2.TestAllTypes.BAZ)
extensions[pb2.repeated_foreign_enum_extension].append(pb2.FOREIGN_BAZ)
extensions[pb2.repeated_import_enum_extension].append(import_pb2.IMPORT_BAZ)
extensions[pb2.repeated_string_piece_extension].append('324')
extensions[pb2.repeated_cord_extension].append('325')
#
# Fields with defaults.
#
extensions[pb2.default_int32_extension] = 401
extensions[pb2.default_int64_extension] = 402
extensions[pb2.default_uint32_extension] = 403
extensions[pb2.default_uint64_extension] = 404
extensions[pb2.default_sint32_extension] = 405
extensions[pb2.default_sint64_extension] = 406
extensions[pb2.default_fixed32_extension] = 407
extensions[pb2.default_fixed64_extension] = 408
extensions[pb2.default_sfixed32_extension] = 409
extensions[pb2.default_sfixed64_extension] = 410
extensions[pb2.default_float_extension] = 411
extensions[pb2.default_double_extension] = 412
extensions[pb2.default_bool_extension] = False
extensions[pb2.default_string_extension] = '415'
extensions[pb2.default_bytes_extension] = '416'
extensions[pb2.default_nested_enum_extension] = pb2.TestAllTypes.FOO
extensions[pb2.default_foreign_enum_extension] = pb2.FOREIGN_FOO
extensions[pb2.default_import_enum_extension] = import_pb2.IMPORT_FOO
extensions[pb2.default_string_piece_extension] = '424'
extensions[pb2.default_cord_extension] = '425'
def SetAllFieldsAndExtensions(message):
"""Sets every field and extension in the message to a unique value.
Args:
message: A unittest_pb2.TestAllExtensions message.
"""
message.my_int = 1
message.my_string = 'foo'
message.my_float = 1.0
message.Extensions[unittest_pb2.my_extension_int] = 23
message.Extensions[unittest_pb2.my_extension_string] = 'bar'
def ExpectAllFieldsAndExtensionsInOrder(serialized):
"""Ensures that serialized is the serialization we expect for a message
filled with SetAllFieldsAndExtensions(). (Specifically, ensures that the
serialization is in canonical, tag-number order).
"""
my_extension_int = unittest_pb2.my_extension_int
my_extension_string = unittest_pb2.my_extension_string
expected_strings = []
message = unittest_pb2.TestFieldOrderings()
message.my_int = 1 # Field 1.
expected_strings.append(message.SerializeToString())
message.Clear()
message.Extensions[my_extension_int] = 23 # Field 5.
expected_strings.append(message.SerializeToString())
message.Clear()
message.my_string = 'foo' # Field 11.
expected_strings.append(message.SerializeToString())
message.Clear()
message.Extensions[my_extension_string] = 'bar' # Field 50.
expected_strings.append(message.SerializeToString())
message.Clear()
message.my_float = 1.0
expected_strings.append(message.SerializeToString())
message.Clear()
expected = ''.join(expected_strings)
if expected != serialized:
raise ValueError('Expected %r, found %r' % (expected, serialized))
def ExpectAllFieldsSet(test_case, message):
"""Check all fields for correct values have after Set*Fields() is called."""
test_case.assertTrue(message.HasField('optional_int32'))
test_case.assertTrue(message.HasField('optional_int64'))
test_case.assertTrue(message.HasField('optional_uint32'))
test_case.assertTrue(message.HasField('optional_uint64'))
test_case.assertTrue(message.HasField('optional_sint32'))
test_case.assertTrue(message.HasField('optional_sint64'))
test_case.assertTrue(message.HasField('optional_fixed32'))
test_case.assertTrue(message.HasField('optional_fixed64'))
test_case.assertTrue(message.HasField('optional_sfixed32'))
test_case.assertTrue(message.HasField('optional_sfixed64'))
test_case.assertTrue(message.HasField('optional_float'))
test_case.assertTrue(message.HasField('optional_double'))
test_case.assertTrue(message.HasField('optional_bool'))
test_case.assertTrue(message.HasField('optional_string'))
test_case.assertTrue(message.HasField('optional_bytes'))
test_case.assertTrue(message.HasField('optionalgroup'))
test_case.assertTrue(message.HasField('optional_nested_message'))
test_case.assertTrue(message.HasField('optional_foreign_message'))
test_case.assertTrue(message.HasField('optional_import_message'))
test_case.assertTrue(message.optionalgroup.HasField('a'))
test_case.assertTrue(message.optional_nested_message.HasField('bb'))
test_case.assertTrue(message.optional_foreign_message.HasField('c'))
test_case.assertTrue(message.optional_import_message.HasField('d'))
test_case.assertTrue(message.HasField('optional_nested_enum'))
test_case.assertTrue(message.HasField('optional_foreign_enum'))
test_case.assertTrue(message.HasField('optional_import_enum'))
test_case.assertTrue(message.HasField('optional_string_piece'))
test_case.assertTrue(message.HasField('optional_cord'))
test_case.assertEqual(101, message.optional_int32)
test_case.assertEqual(102, message.optional_int64)
test_case.assertEqual(103, message.optional_uint32)
test_case.assertEqual(104, message.optional_uint64)
test_case.assertEqual(105, message.optional_sint32)
test_case.assertEqual(106, message.optional_sint64)
test_case.assertEqual(107, message.optional_fixed32)
test_case.assertEqual(108, message.optional_fixed64)
test_case.assertEqual(109, message.optional_sfixed32)
test_case.assertEqual(110, message.optional_sfixed64)
test_case.assertEqual(111, message.optional_float)
test_case.assertEqual(112, message.optional_double)
test_case.assertEqual(True, message.optional_bool)
test_case.assertEqual('115', message.optional_string)
test_case.assertEqual('116', message.optional_bytes)
test_case.assertEqual(117, message.optionalgroup.a)
test_case.assertEqual(118, message.optional_nested_message.bb)
test_case.assertEqual(119, message.optional_foreign_message.c)
test_case.assertEqual(120, message.optional_import_message.d)
test_case.assertEqual(unittest_pb2.TestAllTypes.BAZ,
message.optional_nested_enum)
test_case.assertEqual(unittest_pb2.FOREIGN_BAZ,
message.optional_foreign_enum)
test_case.assertEqual(unittest_import_pb2.IMPORT_BAZ,
message.optional_import_enum)
# -----------------------------------------------------------------
test_case.assertEqual(2, len(message.repeated_int32))
test_case.assertEqual(2, len(message.repeated_int64))
test_case.assertEqual(2, len(message.repeated_uint32))
test_case.assertEqual(2, len(message.repeated_uint64))
test_case.assertEqual(2, len(message.repeated_sint32))
test_case.assertEqual(2, len(message.repeated_sint64))
test_case.assertEqual(2, len(message.repeated_fixed32))
test_case.assertEqual(2, len(message.repeated_fixed64))
test_case.assertEqual(2, len(message.repeated_sfixed32))
test_case.assertEqual(2, len(message.repeated_sfixed64))
test_case.assertEqual(2, len(message.repeated_float))
test_case.assertEqual(2, len(message.repeated_double))
test_case.assertEqual(2, len(message.repeated_bool))
test_case.assertEqual(2, len(message.repeated_string))
test_case.assertEqual(2, len(message.repeated_bytes))
test_case.assertEqual(2, len(message.repeatedgroup))
test_case.assertEqual(2, len(message.repeated_nested_message))
test_case.assertEqual(2, len(message.repeated_foreign_message))
test_case.assertEqual(2, len(message.repeated_import_message))
test_case.assertEqual(2, len(message.repeated_nested_enum))
test_case.assertEqual(2, len(message.repeated_foreign_enum))
test_case.assertEqual(2, len(message.repeated_import_enum))
test_case.assertEqual(2, len(message.repeated_string_piece))
test_case.assertEqual(2, len(message.repeated_cord))
test_case.assertEqual(201, message.repeated_int32[0])
test_case.assertEqual(202, message.repeated_int64[0])
test_case.assertEqual(203, message.repeated_uint32[0])
test_case.assertEqual(204, message.repeated_uint64[0])
test_case.assertEqual(205, message.repeated_sint32[0])
test_case.assertEqual(206, message.repeated_sint64[0])
test_case.assertEqual(207, message.repeated_fixed32[0])
test_case.assertEqual(208, message.repeated_fixed64[0])
test_case.assertEqual(209, message.repeated_sfixed32[0])
test_case.assertEqual(210, message.repeated_sfixed64[0])
test_case.assertEqual(211, message.repeated_float[0])
test_case.assertEqual(212, message.repeated_double[0])
test_case.assertEqual(True, message.repeated_bool[0])
test_case.assertEqual('215', message.repeated_string[0])
test_case.assertEqual('216', message.repeated_bytes[0])
test_case.assertEqual(217, message.repeatedgroup[0].a)
test_case.assertEqual(218, message.repeated_nested_message[0].bb)
test_case.assertEqual(219, message.repeated_foreign_message[0].c)
test_case.assertEqual(220, message.repeated_import_message[0].d)
test_case.assertEqual(unittest_pb2.TestAllTypes.BAR,
message.repeated_nested_enum[0])
test_case.assertEqual(unittest_pb2.FOREIGN_BAR,
message.repeated_foreign_enum[0])
test_case.assertEqual(unittest_import_pb2.IMPORT_BAR,
message.repeated_import_enum[0])
test_case.assertEqual(301, message.repeated_int32[1])
test_case.assertEqual(302, message.repeated_int64[1])
test_case.assertEqual(303, message.repeated_uint32[1])
test_case.assertEqual(304, message.repeated_uint64[1])
test_case.assertEqual(305, message.repeated_sint32[1])
test_case.assertEqual(306, message.repeated_sint64[1])
test_case.assertEqual(307, message.repeated_fixed32[1])
test_case.assertEqual(308, message.repeated_fixed64[1])
test_case.assertEqual(309, message.repeated_sfixed32[1])
test_case.assertEqual(310, message.repeated_sfixed64[1])
test_case.assertEqual(311, message.repeated_float[1])
test_case.assertEqual(312, message.repeated_double[1])
test_case.assertEqual(False, message.repeated_bool[1])
test_case.assertEqual('315', message.repeated_string[1])
test_case.assertEqual('316', message.repeated_bytes[1])
test_case.assertEqual(317, message.repeatedgroup[1].a)
test_case.assertEqual(318, message.repeated_nested_message[1].bb)
test_case.assertEqual(319, message.repeated_foreign_message[1].c)
test_case.assertEqual(320, message.repeated_import_message[1].d)
test_case.assertEqual(unittest_pb2.TestAllTypes.BAZ,
message.repeated_nested_enum[1])
test_case.assertEqual(unittest_pb2.FOREIGN_BAZ,
message.repeated_foreign_enum[1])
test_case.assertEqual(unittest_import_pb2.IMPORT_BAZ,
message.repeated_import_enum[1])
# -----------------------------------------------------------------
test_case.assertTrue(message.HasField('default_int32'))
test_case.assertTrue(message.HasField('default_int64'))
test_case.assertTrue(message.HasField('default_uint32'))
test_case.assertTrue(message.HasField('default_uint64'))
test_case.assertTrue(message.HasField('default_sint32'))
test_case.assertTrue(message.HasField('default_sint64'))
test_case.assertTrue(message.HasField('default_fixed32'))
test_case.assertTrue(message.HasField('default_fixed64'))
test_case.assertTrue(message.HasField('default_sfixed32'))
test_case.assertTrue(message.HasField('default_sfixed64'))
test_case.assertTrue(message.HasField('default_float'))
test_case.assertTrue(message.HasField('default_double'))
test_case.assertTrue(message.HasField('default_bool'))
test_case.assertTrue(message.HasField('default_string'))
test_case.assertTrue(message.HasField('default_bytes'))
test_case.assertTrue(message.HasField('default_nested_enum'))
test_case.assertTrue(message.HasField('default_foreign_enum'))
test_case.assertTrue(message.HasField('default_import_enum'))
test_case.assertEqual(401, message.default_int32)
test_case.assertEqual(402, message.default_int64)
test_case.assertEqual(403, message.default_uint32)
test_case.assertEqual(404, message.default_uint64)
test_case.assertEqual(405, message.default_sint32)
test_case.assertEqual(406, message.default_sint64)
test_case.assertEqual(407, message.default_fixed32)
test_case.assertEqual(408, message.default_fixed64)
test_case.assertEqual(409, message.default_sfixed32)
test_case.assertEqual(410, message.default_sfixed64)
test_case.assertEqual(411, message.default_float)
test_case.assertEqual(412, message.default_double)
test_case.assertEqual(False, message.default_bool)
test_case.assertEqual('415', message.default_string)
test_case.assertEqual('416', message.default_bytes)
test_case.assertEqual(unittest_pb2.TestAllTypes.FOO,
message.default_nested_enum)
test_case.assertEqual(unittest_pb2.FOREIGN_FOO,
message.default_foreign_enum)
test_case.assertEqual(unittest_import_pb2.IMPORT_FOO,
message.default_import_enum)
def GoldenFile(filename):
"""Finds the given golden file and returns a file object representing it."""
# Search up the directory tree looking for the C++ protobuf source code.
path = '.'
while os.path.exists(path):
if os.path.exists(os.path.join(path, 'src/google/protobuf')):
# Found it. Load the golden file from the testdata directory.
full_path = os.path.join(path, 'src/google/protobuf/testdata', filename)
return open(full_path, 'rb')
path = os.path.join(path, '..')
raise RuntimeError(
'Could not find golden files. This test must be run from within the '
'protobuf source package so that it can read test data files from the '
'C++ source tree.')
def SetAllPackedFields(message):
"""Sets every field in the message to a unique value.
Args:
message: A unittest_pb2.TestPackedTypes instance.
"""
message.packed_int32.extend([601, 701])
message.packed_int64.extend([602, 702])
message.packed_uint32.extend([603, 703])
message.packed_uint64.extend([604, 704])
message.packed_sint32.extend([605, 705])
message.packed_sint64.extend([606, 706])
message.packed_fixed32.extend([607, 707])
message.packed_fixed64.extend([608, 708])
message.packed_sfixed32.extend([609, 709])
message.packed_sfixed64.extend([610, 710])
message.packed_float.extend([611.0, 711.0])
message.packed_double.extend([612.0, 712.0])
message.packed_bool.extend([True, False])
message.packed_enum.extend([unittest_pb2.FOREIGN_BAR,
unittest_pb2.FOREIGN_BAZ])
def SetAllPackedExtensions(message):
"""Sets every extension in the message to a unique value.
Args:
message: A unittest_pb2.TestPackedExtensions instance.
"""
extensions = message.Extensions
pb2 = unittest_pb2
extensions[pb2.packed_int32_extension].extend([601, 701])
extensions[pb2.packed_int64_extension].extend([602, 702])
extensions[pb2.packed_uint32_extension].extend([603, 703])
extensions[pb2.packed_uint64_extension].extend([604, 704])
extensions[pb2.packed_sint32_extension].extend([605, 705])
extensions[pb2.packed_sint64_extension].extend([606, 706])
extensions[pb2.packed_fixed32_extension].extend([607, 707])
extensions[pb2.packed_fixed64_extension].extend([608, 708])
extensions[pb2.packed_sfixed32_extension].extend([609, 709])
extensions[pb2.packed_sfixed64_extension].extend([610, 710])
extensions[pb2.packed_float_extension].extend([611.0, 711.0])
extensions[pb2.packed_double_extension].extend([612.0, 712.0])
extensions[pb2.packed_bool_extension].extend([True, False])
extensions[pb2.packed_enum_extension].extend([unittest_pb2.FOREIGN_BAR,
unittest_pb2.FOREIGN_BAZ])
def SetAllUnpackedFields(message):
"""Sets every field in the message to a unique value.
Args:
message: A unittest_pb2.TestUnpackedTypes instance.
"""
message.unpacked_int32.extend([601, 701])
message.unpacked_int64.extend([602, 702])
message.unpacked_uint32.extend([603, 703])
message.unpacked_uint64.extend([604, 704])
message.unpacked_sint32.extend([605, 705])
message.unpacked_sint64.extend([606, 706])
message.unpacked_fixed32.extend([607, 707])
message.unpacked_fixed64.extend([608, 708])
message.unpacked_sfixed32.extend([609, 709])
message.unpacked_sfixed64.extend([610, 710])
message.unpacked_float.extend([611.0, 711.0])
message.unpacked_double.extend([612.0, 712.0])
message.unpacked_bool.extend([True, False])
message.unpacked_enum.extend([unittest_pb2.FOREIGN_BAR,
unittest_pb2.FOREIGN_BAZ])
| bsd-3-clause |
wbsoft/frescobaldi | frescobaldi_app/backup.py | 3 | 1938 | # This file is part of the Frescobaldi project, http://www.frescobaldi.org/
#
# Copyright (c) 2008 - 2014 by Wilbert Berendsen
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# See http://www.gnu.org/licenses/ for more information.
"""
Backup files before overwriting
"""
import os
import shutil
from PyQt5.QtCore import QSettings
def backup(filename):
"""Makes a backup of 'filename'.
Returns True if the backup succeeded.
"""
if filename:
try:
shutil.copy(filename, backupName(filename))
return True
except (IOError, OSError):
pass
return False
def removeBackup(filename):
"""Removes filename's backup unless the user has configured to keep it."""
if filename and not QSettings().value("backup_keep", False, bool):
try:
os.remove(backupName(filename))
except (IOError, OSError):
pass
def scheme():
"""Returns a string that must contain "FILE".
Replacing that part yields the backup name.
"""
s = QSettings().value("backup_scheme", "FILE~")
assert 'FILE' in s and s != 'FILE'
return s
def backupName(filename):
"""Returns the backup file name for the given filename."""
return scheme().replace("FILE", filename)
| gpl-2.0 |
affan2/django-envelope | setup.py | 1 | 1120 | import os
from setuptools import setup, find_packages
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name='django-envelope',
version=__import__('envelope').__version__,
description='A contact form app for Django',
long_description=read('README.rst'),
author='Zbigniew Siciarz',
author_email='antyqjon@gmail.com',
url='http://github.com/zsiciarz/django-envelope',
download_url='http://pypi.python.org/pypi/django-envelope',
license='MIT',
install_requires=['Django>=1.4'],
packages=find_packages(exclude=['example_project']),
include_package_data=True,
classifiers=['Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Topic :: Utilities'],
)
| mit |
davidjb/sqlalchemy | test/orm/test_manytomany.py | 32 | 14049 | from sqlalchemy.testing import assert_raises, \
assert_raises_message, eq_
import sqlalchemy as sa
from sqlalchemy import testing
from sqlalchemy import Integer, String, ForeignKey
from sqlalchemy.testing.schema import Table
from sqlalchemy.testing.schema import Column
from sqlalchemy.orm import mapper, relationship, Session, \
exc as orm_exc, sessionmaker, backref
from sqlalchemy.testing import fixtures
class M2MTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table('place', metadata,
Column('place_id', Integer, test_needs_autoincrement=True,
primary_key=True),
Column('name', String(30), nullable=False),
test_needs_acid=True,
)
Table('transition', metadata,
Column('transition_id', Integer,
test_needs_autoincrement=True, primary_key=True),
Column('name', String(30), nullable=False),
test_needs_acid=True,
)
Table('place_thingy', metadata,
Column('thingy_id', Integer, test_needs_autoincrement=True,
primary_key=True),
Column('place_id', Integer, ForeignKey('place.place_id'),
nullable=False),
Column('name', String(30), nullable=False),
test_needs_acid=True,
)
# association table #1
Table('place_input', metadata,
Column('place_id', Integer, ForeignKey('place.place_id')),
Column('transition_id', Integer,
ForeignKey('transition.transition_id')),
test_needs_acid=True,
)
# association table #2
Table('place_output', metadata,
Column('place_id', Integer, ForeignKey('place.place_id')),
Column('transition_id', Integer,
ForeignKey('transition.transition_id')),
test_needs_acid=True,
)
Table('place_place', metadata,
Column('pl1_id', Integer, ForeignKey('place.place_id')),
Column('pl2_id', Integer, ForeignKey('place.place_id')),
test_needs_acid=True,
)
@classmethod
def setup_classes(cls):
class Place(cls.Basic):
def __init__(self, name):
self.name = name
class PlaceThingy(cls.Basic):
def __init__(self, name):
self.name = name
class Transition(cls.Basic):
def __init__(self, name):
self.name = name
def test_overlapping_attribute_error(self):
place, Transition, place_input, Place, transition = (self.tables.place,
self.classes.Transition,
self.tables.place_input,
self.classes.Place,
self.tables.transition)
mapper(Place, place, properties={
'transitions': relationship(Transition,
secondary=place_input, backref='places')
})
mapper(Transition, transition, properties={
'places': relationship(Place,
secondary=place_input, backref='transitions')
})
assert_raises_message(sa.exc.ArgumentError,
"property of that name exists",
sa.orm.configure_mappers)
def test_self_referential_roundtrip(self):
place, Place, place_place = (self.tables.place,
self.classes.Place,
self.tables.place_place)
mapper(Place, place, properties={
'places': relationship(
Place,
secondary=place_place,
primaryjoin=place.c.place_id == place_place.c.pl1_id,
secondaryjoin=place.c.place_id == place_place.c.pl2_id,
order_by=place_place.c.pl2_id
)
})
sess = Session()
p1 = Place('place1')
p2 = Place('place2')
p3 = Place('place3')
p4 = Place('place4')
p5 = Place('place5')
p6 = Place('place6')
p7 = Place('place7')
sess.add_all((p1, p2, p3, p4, p5, p6, p7))
p1.places.append(p2)
p1.places.append(p3)
p5.places.append(p6)
p6.places.append(p1)
p7.places.append(p1)
p1.places.append(p5)
p4.places.append(p3)
p3.places.append(p4)
sess.commit()
eq_(p1.places, [p2, p3, p5])
eq_(p5.places, [p6])
eq_(p7.places, [p1])
eq_(p6.places, [p1])
eq_(p4.places, [p3])
eq_(p3.places, [p4])
eq_(p2.places, [])
def test_self_referential_bidirectional_mutation(self):
place, Place, place_place = (self.tables.place,
self.classes.Place,
self.tables.place_place)
mapper(Place, place, properties={
'child_places': relationship(
Place,
secondary=place_place,
primaryjoin=place.c.place_id == place_place.c.pl1_id,
secondaryjoin=place.c.place_id == place_place.c.pl2_id,
order_by=place_place.c.pl2_id,
backref='parent_places'
)
})
sess = Session()
p1 = Place('place1')
p2 = Place('place2')
p2.parent_places = [p1]
sess.add_all([p1, p2])
p1.parent_places.append(p2)
sess.commit()
assert p1 in p2.parent_places
assert p2 in p1.parent_places
def test_joinedload_on_double(self):
"""test that a mapper can have two eager relationships to the same table, via
two different association tables. aliases are required."""
place_input, transition, Transition, PlaceThingy, \
place, place_thingy, Place, \
place_output = (self.tables.place_input,
self.tables.transition,
self.classes.Transition,
self.classes.PlaceThingy,
self.tables.place,
self.tables.place_thingy,
self.classes.Place,
self.tables.place_output)
mapper(PlaceThingy, place_thingy)
mapper(Place, place, properties={
'thingies': relationship(PlaceThingy, lazy='joined')
})
mapper(Transition, transition, properties=dict(
inputs=relationship(Place, place_output, lazy='joined'),
outputs=relationship(Place, place_input, lazy='joined'),
)
)
tran = Transition('transition1')
tran.inputs.append(Place('place1'))
tran.outputs.append(Place('place2'))
tran.outputs.append(Place('place3'))
sess = Session()
sess.add(tran)
sess.commit()
r = sess.query(Transition).all()
self.assert_unordered_result(r, Transition,
{'name': 'transition1',
'inputs': (Place, [{'name': 'place1'}]),
'outputs': (Place, [{'name': 'place2'}, {'name': 'place3'}])
})
def test_bidirectional(self):
place_input, transition, Transition, Place, place, place_output = (
self.tables.place_input,
self.tables.transition,
self.classes.Transition,
self.classes.Place,
self.tables.place,
self.tables.place_output)
mapper(Place, place)
mapper(Transition, transition, properties=dict(
inputs=relationship(Place, place_output,
backref=backref('inputs',
order_by=transition.c.transition_id),
order_by=Place.place_id),
outputs=relationship(Place, place_input,
backref=backref('outputs',
order_by=transition.c.transition_id),
order_by=Place.place_id),
)
)
t1 = Transition('transition1')
t2 = Transition('transition2')
t3 = Transition('transition3')
p1 = Place('place1')
p2 = Place('place2')
p3 = Place('place3')
sess = Session()
sess.add_all([p3, p1, t1, t2, p2, t3])
t1.inputs.append(p1)
t1.inputs.append(p2)
t1.outputs.append(p3)
t2.inputs.append(p1)
p2.inputs.append(t2)
p3.inputs.append(t2)
p1.outputs.append(t1)
sess.commit()
self.assert_result([t1],
Transition, {'outputs':
(Place, [{'name': 'place3'}, {'name': 'place1'}])})
self.assert_result([p2],
Place, {'inputs':
(Transition, [{'name': 'transition1'},
{'name': 'transition2'}])})
@testing.requires.sane_multi_rowcount
def test_stale_conditions(self):
Place, Transition, place_input, place, transition = (
self.classes.Place,
self.classes.Transition,
self.tables.place_input,
self.tables.place,
self.tables.transition)
mapper(Place, place, properties={
'transitions': relationship(Transition, secondary=place_input,
passive_updates=False)
})
mapper(Transition, transition)
p1 = Place('place1')
t1 = Transition('t1')
p1.transitions.append(t1)
sess = sessionmaker()()
sess.add_all([p1, t1])
sess.commit()
p1.place_id
p1.transitions
sess.execute("delete from place_input", mapper=Place)
p1.place_id = 7
assert_raises_message(
orm_exc.StaleDataError,
r"UPDATE statement on table 'place_input' expected to "
r"update 1 row\(s\); Only 0 were matched.",
sess.commit
)
sess.rollback()
p1.place_id
p1.transitions
sess.execute("delete from place_input", mapper=Place)
p1.transitions.remove(t1)
assert_raises_message(
orm_exc.StaleDataError,
r"DELETE statement on table 'place_input' expected to "
r"delete 1 row\(s\); Only 0 were matched.",
sess.commit
)
class AssortedPersistenceTests(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table("left", metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('data', String(30))
)
Table("right", metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('data', String(30)),
)
Table('secondary', metadata,
Column('left_id', Integer, ForeignKey('left.id'),
primary_key=True),
Column('right_id', Integer, ForeignKey('right.id'),
primary_key=True),
)
@classmethod
def setup_classes(cls):
class A(cls.Comparable):
pass
class B(cls.Comparable):
pass
def _standard_bidirectional_fixture(self):
left, secondary, right = self.tables.left, \
self.tables.secondary, self.tables.right
A, B = self.classes.A, self.classes.B
mapper(A, left, properties={
'bs': relationship(B, secondary=secondary,
backref='as', order_by=right.c.id)
})
mapper(B, right)
def _bidirectional_onescalar_fixture(self):
left, secondary, right = self.tables.left, \
self.tables.secondary, self.tables.right
A, B = self.classes.A, self.classes.B
mapper(A, left, properties={
'bs': relationship(B, secondary=secondary,
backref=backref('a', uselist=False),
order_by=right.c.id)
})
mapper(B, right)
def test_session_delete(self):
self._standard_bidirectional_fixture()
A, B = self.classes.A, self.classes.B
secondary = self.tables.secondary
sess = Session()
sess.add_all([
A(data='a1', bs=[B(data='b1')]),
A(data='a2', bs=[B(data='b2')])
])
sess.commit()
a1 = sess.query(A).filter_by(data='a1').one()
sess.delete(a1)
sess.flush()
eq_(sess.query(secondary).count(), 1)
a2 = sess.query(A).filter_by(data='a2').one()
sess.delete(a2)
sess.flush()
eq_(sess.query(secondary).count(), 0)
def test_remove_scalar(self):
# test setting a uselist=False to None
self._bidirectional_onescalar_fixture()
A, B = self.classes.A, self.classes.B
secondary = self.tables.secondary
sess = Session()
sess.add_all([
A(data='a1', bs=[B(data='b1'), B(data='b2')]),
])
sess.commit()
a1 = sess.query(A).filter_by(data='a1').one()
b2 = sess.query(B).filter_by(data='b2').one()
assert b2.a is a1
b2.a = None
sess.commit()
eq_(a1.bs, [B(data='b1')])
eq_(b2.a, None)
eq_(sess.query(secondary).count(), 1)
| mit |
ubiar/odoo | addons/sale/edi/__init__.py | 454 | 1065 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2011 OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import sale_order
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
3nids/QGIS | tests/src/python/test_qgsproviderregistry.py | 6 | 5049 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsProviderRegistry.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Nyall Dawson'
__date__ = '16/03/2020'
__copyright__ = 'Copyright 2020, The QGIS Project'
import qgis # NOQA
from qgis.core import (
QgsProviderRegistry,
QgsMapLayerType
)
from qgis.testing import start_app, unittest
# Convenience instances in case you may need them
# to find the srs.db
start_app()
class TestQgsProviderRegistry(unittest.TestCase):
def testProviderList(self):
"""
Test provider list
"""
providers = QgsProviderRegistry.instance().providerList()
self.assertIn('ogr', providers)
self.assertIn('gdal', providers)
def testProviderMetadata(self):
"""
Test retrieving provider metadata
"""
providers = QgsProviderRegistry.instance().providerList()
for p in providers:
self.assertTrue(QgsProviderRegistry.instance().providerMetadata(p))
# should be case-insensitive
self.assertTrue(QgsProviderRegistry.instance().providerMetadata(p.lower()))
self.assertTrue(QgsProviderRegistry.instance().providerMetadata(p.upper()))
self.assertIsNone(QgsProviderRegistry.instance().providerMetadata('asdasdasdasdasd'))
def testCreateProvider(self):
"""
Test creating provider instance
"""
providers = QgsProviderRegistry.instance().providerList()
for p in providers:
if p == 'geonode' or p == 'vectortile':
continue
self.assertTrue(QgsProviderRegistry.instance().createProvider(p, ''))
# should be case-insensitive
self.assertTrue(QgsProviderRegistry.instance().createProvider(p.lower(), ''))
self.assertTrue(QgsProviderRegistry.instance().createProvider(p.upper(), ''))
self.assertIsNone(QgsProviderRegistry.instance().createProvider('asdasdasdasdasd', ''))
@unittest.skipIf('ept' not in QgsProviderRegistry.instance().providerList(), 'EPT provider not available')
def testShouldDeferUriForOtherProvidersEpt(self):
self.assertTrue(QgsProviderRegistry.instance().shouldDeferUriForOtherProviders('/home/nyall/ept.json', 'ogr'))
self.assertFalse(QgsProviderRegistry.instance().shouldDeferUriForOtherProviders('/home/nyall/ept.json', 'ept'))
self.assertFalse(QgsProviderRegistry.instance().shouldDeferUriForOtherProviders('/home/nyall/my.json', 'ogr'))
def testUriIsBlocklisted(self):
self.assertFalse(QgsProviderRegistry.instance().uriIsBlocklisted('/home/nyall/me.tif'))
self.assertFalse(QgsProviderRegistry.instance().uriIsBlocklisted('/home/nyall/me.shp'))
# internal details only -- we should be hiding these uris!
self.assertTrue(QgsProviderRegistry.instance().uriIsBlocklisted('/home/nyall/me.shp.xml'))
self.assertTrue(QgsProviderRegistry.instance().uriIsBlocklisted('/home/nyall/me.aux.xml'))
self.assertTrue(QgsProviderRegistry.instance().uriIsBlocklisted('/home/nyall/me.AUX.XML'))
self.assertTrue(QgsProviderRegistry.instance().uriIsBlocklisted('/home/nyall/me.tif.aux.xml'))
self.assertTrue(QgsProviderRegistry.instance().uriIsBlocklisted('/home/nyall/me.tif.AUX.XML'))
self.assertTrue(QgsProviderRegistry.instance().uriIsBlocklisted('/home/nyall/me.png.aux.xml'))
self.assertTrue(QgsProviderRegistry.instance().uriIsBlocklisted('/home/nyall/me.tif.xml'))
@unittest.skipIf('ept' not in QgsProviderRegistry.instance().providerList(), 'EPT provider not available')
def testFilePointCloudFilters(self):
parts = QgsProviderRegistry.instance().filePointCloudFilters().split(';;')
self.assertTrue(parts[0].startswith('All Supported Files ('))
all_filter = parts[0][21:-1]
self.assertIn('ept.json', all_filter.split(' '))
self.assertIn('EPT.JSON', all_filter.split(' '))
self.assertEqual(parts[1], 'All Files (*.*)')
self.assertIn('Entwine Point Clouds (ept.json EPT.JSON)', parts)
def testUnusableUriDetails(self):
"""
Test retrieving user-friendly details about an unusable URI
"""
res, details = QgsProviderRegistry.instance().handleUnusableUri('')
self.assertFalse(res)
res, details = QgsProviderRegistry.instance().handleUnusableUri('/home/me/test.png')
self.assertFalse(res)
res, details = QgsProviderRegistry.instance().handleUnusableUri('/home/me/test.las')
self.assertTrue(res)
self.assertIn('LAS', details.warning)
res, details = QgsProviderRegistry.instance().handleUnusableUri('/home/me/test.laz')
self.assertTrue(res)
self.assertIn('LAZ', details.warning)
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
vmturbo/nova | nova/scheduler/filters/exact_disk_filter.py | 18 | 1846 | # Copyright (c) 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from nova.scheduler import filters
LOG = logging.getLogger(__name__)
class ExactDiskFilter(filters.BaseHostFilter):
"""Exact Disk Filter."""
def host_passes(self, host_state, spec_obj):
"""Return True if host has the exact amount of disk available."""
requested_disk = (1024 * (spec_obj.root_gb +
spec_obj.ephemeral_gb) +
spec_obj.swap)
if requested_disk != host_state.free_disk_mb:
LOG.debug("%(host_state)s does not have exactly "
"%(requested_disk)s MB usable disk, it "
"has %(usable_disk_mb)s.",
{'host_state': host_state,
'requested_disk': requested_disk,
'usable_disk_mb': host_state.free_disk_mb})
return False
# NOTE(mgoddard): Setting the limit ensures that it is enforced in
# compute. This ensures that if multiple instances are scheduled to a
# single host, then all after the first will fail in the claim.
host_state.limits['disk_gb'] = host_state.total_usable_disk_gb
return True
| apache-2.0 |
jupyter-widgets/ipywidgets | ipywidgets/widgets/tests/test_datetime_serializers.py | 1 | 2132 | #!/usr/bin/env python
# coding: utf-8
# Copyright (c) Vidar Tonaas Fauske.
# Distributed under the terms of the Modified BSD License.
import pytest
import datetime
import pytz
from traitlets import TraitError
from ..trait_types import (
time_to_json,
time_from_json,
datetime_to_json,
datetime_from_json,
)
def test_time_serialize_none():
assert time_to_json(None, None) == None
def test_time_serialize_value():
t = datetime.time(13, 37, 42, 7000)
assert time_to_json(t, None) == dict(
hours=13, minutes=37, seconds=42, milliseconds=7
)
def test_time_deserialize_none():
assert time_from_json(None, None) == None
def test_time_deserialize_value():
v = dict(hours=13, minutes=37, seconds=42, milliseconds=7)
assert time_from_json(v, None) == datetime.time(13, 37, 42, 7000)
def test_datetime_serialize_none():
assert datetime_to_json(None, None) == None
def test_datetime_serialize_value():
t = datetime.datetime(2002, 2, 20, 13, 37, 42, 7000, pytz.utc)
assert datetime_to_json(t, None) == dict(
year=2002,
month=1, # Months are 0-based indices in JS
date=20,
hours=13,
minutes=37,
seconds=42,
milliseconds=7,
)
def test_datetime_serialize_non_utz():
# Non-existant timezone, so it wil never be the local one:
tz = pytz.FixedOffset(42)
t = datetime.datetime(2002, 2, 20, 13, 37, 42, 7000, tz)
assert datetime_to_json(t, None) == dict(
year=2002,
month=1, # Months are 0-based indices in JS
date=20,
hours=12,
minutes=55,
seconds=42,
milliseconds=7,
)
def test_datetime_deserialize_none():
assert datetime_from_json(None, None) == None
def test_datetime_deserialize_value():
tz = pytz.FixedOffset(42)
v = dict(
year=2002,
month=1, # Months are 0-based indices in JS
date=20,
hours=13,
minutes=37,
seconds=42,
milliseconds=7,
)
assert datetime_from_json(v, None) == datetime.datetime(
2002, 2, 20, 14, 19, 42, 7000, tz
)
| bsd-3-clause |
Lawrence-Liu/scikit-learn | sklearn/tests/test_metaestimators.py | 226 | 4954 | """Common tests for metaestimators"""
import functools
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.externals.six import iterkeys
from sklearn.datasets import make_classification
from sklearn.utils.testing import assert_true, assert_false, assert_raises
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV, RandomizedSearchCV
from sklearn.feature_selection import RFE, RFECV
from sklearn.ensemble import BaggingClassifier
class DelegatorData(object):
def __init__(self, name, construct, skip_methods=(),
fit_args=make_classification()):
self.name = name
self.construct = construct
self.fit_args = fit_args
self.skip_methods = skip_methods
DELEGATING_METAESTIMATORS = [
DelegatorData('Pipeline', lambda est: Pipeline([('est', est)])),
DelegatorData('GridSearchCV',
lambda est: GridSearchCV(
est, param_grid={'param': [5]}, cv=2),
skip_methods=['score']),
DelegatorData('RandomizedSearchCV',
lambda est: RandomizedSearchCV(
est, param_distributions={'param': [5]}, cv=2, n_iter=1),
skip_methods=['score']),
DelegatorData('RFE', RFE,
skip_methods=['transform', 'inverse_transform', 'score']),
DelegatorData('RFECV', RFECV,
skip_methods=['transform', 'inverse_transform', 'score']),
DelegatorData('BaggingClassifier', BaggingClassifier,
skip_methods=['transform', 'inverse_transform', 'score',
'predict_proba', 'predict_log_proba', 'predict'])
]
def test_metaestimator_delegation():
# Ensures specified metaestimators have methods iff subestimator does
def hides(method):
@property
def wrapper(obj):
if obj.hidden_method == method.__name__:
raise AttributeError('%r is hidden' % obj.hidden_method)
return functools.partial(method, obj)
return wrapper
class SubEstimator(BaseEstimator):
def __init__(self, param=1, hidden_method=None):
self.param = param
self.hidden_method = hidden_method
def fit(self, X, y=None, *args, **kwargs):
self.coef_ = np.arange(X.shape[1])
return True
def _check_fit(self):
if not hasattr(self, 'coef_'):
raise RuntimeError('Estimator is not fit')
@hides
def inverse_transform(self, X, *args, **kwargs):
self._check_fit()
return X
@hides
def transform(self, X, *args, **kwargs):
self._check_fit()
return X
@hides
def predict(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def predict_proba(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def predict_log_proba(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def decision_function(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def score(self, X, *args, **kwargs):
self._check_fit()
return 1.0
methods = [k for k in iterkeys(SubEstimator.__dict__)
if not k.startswith('_') and not k.startswith('fit')]
methods.sort()
for delegator_data in DELEGATING_METAESTIMATORS:
delegate = SubEstimator()
delegator = delegator_data.construct(delegate)
for method in methods:
if method in delegator_data.skip_methods:
continue
assert_true(hasattr(delegate, method))
assert_true(hasattr(delegator, method),
msg="%s does not have method %r when its delegate does"
% (delegator_data.name, method))
# delegation before fit raises an exception
assert_raises(Exception, getattr(delegator, method),
delegator_data.fit_args[0])
delegator.fit(*delegator_data.fit_args)
for method in methods:
if method in delegator_data.skip_methods:
continue
# smoke test delegation
getattr(delegator, method)(delegator_data.fit_args[0])
for method in methods:
if method in delegator_data.skip_methods:
continue
delegate = SubEstimator(hidden_method=method)
delegator = delegator_data.construct(delegate)
assert_false(hasattr(delegate, method))
assert_false(hasattr(delegator, method),
msg="%s has method %r when its delegate does not"
% (delegator_data.name, method))
| bsd-3-clause |
Arundhatii/erpnext | erpnext/accounts/report/asset_depreciations_and_balances/asset_depreciations_and_balances.py | 32 | 6129 | # Copyright (c) 2013, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import formatdate, getdate, flt, add_days
def execute(filters=None):
filters.day_before_from_date = add_days(filters.from_date, -1)
columns, data = get_columns(filters), get_data(filters)
return columns, data
def get_data(filters):
data = []
asset_categories = get_asset_categories(filters)
assets = get_assets(filters)
asset_costs = get_asset_costs(assets, filters)
asset_depreciations = get_accumulated_depreciations(assets, filters)
for asset_category in asset_categories:
row = frappe._dict()
row.asset_category = asset_category
row.update(asset_costs.get(asset_category))
row.cost_as_on_to_date = (flt(row.cost_as_on_from_date) + flt(row.cost_of_new_purchase)
- flt(row.cost_of_sold_asset) - flt(row.cost_of_scrapped_asset))
row.update(asset_depreciations.get(asset_category))
row.accumulated_depreciation_as_on_to_date = (flt(row.accumulated_depreciation_as_on_from_date) +
flt(row.depreciation_amount_during_the_period) - flt(row.depreciation_eliminated))
row.net_asset_value_as_on_from_date = (flt(row.cost_as_on_from_date) -
flt(row.accumulated_depreciation_as_on_from_date))
row.net_asset_value_as_on_to_date = (flt(row.cost_as_on_to_date) -
flt(row.accumulated_depreciation_as_on_to_date))
data.append(row)
return data
def get_asset_categories(filters):
return frappe.db.sql_list("""
select distinct asset_category from `tabAsset`
where docstatus=1 and company=%s and purchase_date <= %s
""", (filters.company, filters.to_date))
def get_assets(filters):
return frappe.db.sql("""
select name, asset_category, purchase_date, gross_purchase_amount, disposal_date, status
from `tabAsset`
where docstatus=1 and company=%s and purchase_date <= %s""",
(filters.company, filters.to_date), as_dict=1)
def get_asset_costs(assets, filters):
asset_costs = frappe._dict()
for d in assets:
asset_costs.setdefault(d.asset_category, frappe._dict({
"cost_as_on_from_date": 0,
"cost_of_new_purchase": 0,
"cost_of_sold_asset": 0,
"cost_of_scrapped_asset": 0
}))
costs = asset_costs[d.asset_category]
if getdate(d.purchase_date) < getdate(filters.from_date):
if not d.disposal_date or getdate(d.disposal_date) >= getdate(filters.from_date):
costs.cost_as_on_from_date += flt(d.gross_purchase_amount)
else:
costs.cost_of_new_purchase += flt(d.gross_purchase_amount)
if d.disposal_date and getdate(d.disposal_date) >= getdate(filters.from_date) \
and getdate(d.disposal_date) <= getdate(filters.to_date):
if d.status == "Sold":
costs.cost_of_sold_asset += flt(d.gross_purchase_amount)
elif d.status == "Scrapped":
costs.cost_of_scrapped_asset += flt(d.gross_purchase_amount)
return asset_costs
def get_accumulated_depreciations(assets, filters):
asset_depreciations = frappe._dict()
for d in assets:
asset = frappe.get_doc("Asset", d.name)
asset_depreciations.setdefault(d.asset_category, frappe._dict({
"accumulated_depreciation_as_on_from_date": asset.opening_accumulated_depreciation,
"depreciation_amount_during_the_period": 0,
"depreciation_eliminated_during_the_period": 0
}))
depr = asset_depreciations[d.asset_category]
for schedule in asset.get("schedules"):
if getdate(schedule.schedule_date) < getdate(filters.from_date):
if not asset.disposal_date and getdate(asset.disposal_date) >= getdate(filters.from_date):
depr.accumulated_depreciation_as_on_from_date += flt(schedule.depreciation_amount)
elif getdate(schedule.schedule_date) <= getdate(filters.to_date):
depr.depreciation_amount_during_the_period += flt(schedule.depreciation_amount)
if asset.disposal_date and getdate(schedule.schedule_date) > getdate(asset.disposal_date):
depr.depreciation_eliminated_during_the_period += flt(schedule.depreciation_amount)
return asset_depreciations
def get_columns(filters):
return [
{
"label": _("Asset Category"),
"fieldname": "asset_category",
"fieldtype": "Link",
"options": "Asset Category",
"width": 120
},
{
"label": _("Cost as on") + " " + formatdate(filters.day_before_from_date),
"fieldname": "cost_as_on_from_date",
"fieldtype": "Currency",
"width": 140
},
{
"label": _("Cost of New Purchase"),
"fieldname": "cost_of_new_purchase",
"fieldtype": "Currency",
"width": 140
},
{
"label": _("Cost of Sold Asset"),
"fieldname": "cost_of_sold_asset",
"fieldtype": "Currency",
"width": 140
},
{
"label": _("Cost of Scrapped Asset"),
"fieldname": "cost_of_scrapped_asset",
"fieldtype": "Currency",
"width": 140
},
{
"label": _("Cost as on") + " " + formatdate(filters.to_date),
"fieldname": "cost_as_on_to_date",
"fieldtype": "Currency",
"width": 140
},
{
"label": _("Accumulated Depreciation as on") + " " + formatdate(filters.day_before_from_date),
"fieldname": "accumulated_depreciation_as_on_from_date",
"fieldtype": "Currency",
"width": 270
},
{
"label": _("Depreciation Amount during the period"),
"fieldname": "depreciation_amount_during_the_period",
"fieldtype": "Currency",
"width": 240
},
{
"label": _("Depreciation Eliminated due to disposal of assets"),
"fieldname": "depreciation_eliminated_during_the_period",
"fieldtype": "Currency",
"width": 300
},
{
"label": _("Accumulated Depreciation as on") + " " + formatdate(filters.to_date),
"fieldname": "accumulated_depreciation_as_on_to_date",
"fieldtype": "Currency",
"width": 270
},
{
"label": _("Net Asset value as on") + " " + formatdate(filters.day_before_from_date),
"fieldname": "net_asset_value_as_on_from_date",
"fieldtype": "Currency",
"width": 200
},
{
"label": _("Net Asset value as on") + " " + formatdate(filters.to_date),
"fieldname": "net_asset_value_as_on_to_date",
"fieldtype": "Currency",
"width": 200
}
]
| gpl-3.0 |
ceph/autotest | client/tests/kvm/tests/nic_promisc.py | 1 | 4334 | import logging, threading
from autotest_lib.client.common_lib import error
from autotest_lib.client.bin import utils
import kvm_utils, kvm_test_utils
def run_nic_promisc(test, params, env):
"""
Test nic driver in promisc mode:
1) Boot up a VM.
2) Repeatedly enable/disable promiscuous mode in guest.
3) TCP data transmission from host to guest, and from guest to host,
with 1/1460/65000/100000000 bytes payloads.
4) Clean temporary files.
5) Stop enable/disable promiscuous mode change.
@param test: KVM test object.
@param params: Dictionary with the test parameters.
@param env: Dictionary with test environment.
"""
vm = env.get_vm(params["main_vm"])
vm.verify_alive()
timeout = int(params.get("login_timeout", 360))
session = vm.wait_for_login(timeout=timeout)
session_serial = vm.wait_for_serial_login(timeout=timeout)
def compare(filename):
md5_host = utils.hash_file(filename, method="md5")
md5_guest = session.cmd("md5sum %s" % filename)
md5_guest = md5_guest.split()[0]
if md5_host != md5_guest:
logging.error("MD5 hash mismatch between file %s "
"present on guest and on host", filename)
logging.error("MD5 hash for file on guest: %s,"
"MD5 hash for file on host: %s", md5_host, md5_guest)
return False
return True
ethname = kvm_test_utils.get_linux_ifname(session, vm.get_mac_address(0))
class ThreadPromiscCmd(threading.Thread):
def __init__(self, session, termination_event):
self.session = session
self.termination_event = termination_event
super(ThreadPromiscCmd, self).__init__()
def run(self):
set_promisc_cmd = ("ip link set %s promisc on; sleep 0.01;"
"ip link set %s promisc off; sleep 0.01" %
(ethname, ethname))
while True:
self.session.cmd_output(set_promisc_cmd)
if self.termination_event.isSet():
break
logging.info("Started thread to change promisc mode in guest")
termination_event = threading.Event()
promisc_thread = ThreadPromiscCmd(session_serial, termination_event)
promisc_thread.start()
dd_cmd = "dd if=/dev/urandom of=%s bs=%d count=1"
filename = "/tmp/nic_promisc_file"
file_size = params.get("file_size", "1, 1460, 65000, 100000000").split(",")
success_counter = 0
try:
for size in file_size:
logging.info("Create %s bytes file on host", size)
utils.run(dd_cmd % (filename, int(size)))
logging.info("Transfer file from host to guest")
try:
vm.copy_files_to(filename, filename)
except kvm_utils.SCPError, e:
logging.error("File transfer failed (%s)", e)
continue
if not compare(filename):
logging.error("Compare file failed")
continue
else:
success_counter += 1
logging.info("Create %s bytes file on guest", size)
session.cmd(dd_cmd % (filename, int(size)), timeout=100)
logging.info("Transfer file from guest to host")
try:
vm.copy_files_from(filename, filename)
except kvm_utils.SCPError, e:
logging.error("File transfer failed (%s)", e)
continue
if not compare(filename):
logging.error("Compare file failed")
continue
else:
success_counter += 1
logging.info("Clean temporary files")
cmd = "rm -f %s" % filename
utils.run(cmd)
session.cmd_output(cmd)
finally:
logging.info("Stopping the promisc thread")
termination_event.set()
promisc_thread.join(10)
logging.info("Restore the %s to the nonpromisc mode", ethname)
session.cmd_output("ip link set %s promisc off" % ethname)
session.close()
if success_counter != 2 * len(file_size):
raise error.TestFail("Some tests failed, succss_ratio : %s/%s" %
(success_counter, len(file_size)))
| gpl-2.0 |
AgataGibas/python101 | bazy/orm/sqlalchemy/ormsa03.py | 2 | 1954 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import sys, os
from sqlalchemy import Column, ForeignKey, Integer, String, create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship, sessionmaker
if os.path.exists('test.db'):
os.remove('test.db')
# tworzymy instancję klasy Engine do obsługi bazy
baza = create_engine('sqlite:///test.db') # ':memory:'
# klasa bazowa
BazaModel = declarative_base()
# klasy Klasa i Uczen opisują rekordy tabel "klasa" i "uczen"
# oraz relacje między nimi
class Klasa(BazaModel):
__tablename__ = 'klasa'
id = Column(Integer, primary_key=True)
nazwa = Column(String(100), nullable=False)
profil = Column(String(100), default='')
uczniowie = relationship('Uczen', backref='klasa')
class Uczen(BazaModel):
__tablename__ = 'uczen'
id = Column(Integer, primary_key=True)
imie = Column(String(100), nullable=False)
nazwisko = Column(String(100), nullable=False)
klasa_id = Column(Integer, ForeignKey('klasa.id'))
# tworzymy tabele
BazaModel.metadata.create_all(baza)
# tworzymy sesję, która przechowuje obiekty i umożliwia "rozmowę" z bazą
BDSesja = sessionmaker(bind=baza)
sesja = BDSesja()
# dodajemy dwie klasy, jeżeli tabela jest pusta
if not sesja.query(Klasa).count():
sesja.add(Klasa(nazwa='1A', profil='matematyczny'))
sesja.add(Klasa(nazwa='1B', profil='humanistyczny'))
# tworzymy instancję klasy Klasa reprezentującą klasę "1A"
klasa = sesja.query(Klasa).filter(Klasa.nazwa == '1A').one()
# dodajemy dane wielu uczniów
sesja.add_all([
Uczen(imie='Tomasz',nazwisko='Nowak',klasa_id=klasa.id),
Uczen(imie='Jan',nazwisko='Kos',klasa_id=klasa.id),
Uczen(imie='Piotr',nazwisko='Kowalski',klasa_id=klasa.id),
])
def czytajdane():
for uczen in sesja.query(Uczen).join(Klasa).all():
print uczen.id, uczen.imie, uczen.nazwisko, uczen.klasa.nazwa
print ""
czytajdane()
| mit |
esc/pip | pip/_vendor/packaging/markers.py | 76 | 7341 | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import operator
import os
import platform
import sys
from pip._vendor.pyparsing import (
ParseException, ParseResults, stringStart, stringEnd,
)
from pip._vendor.pyparsing import ZeroOrMore, Group, Forward, QuotedString
from pip._vendor.pyparsing import Literal as L # noqa
from ._compat import string_types
from .specifiers import Specifier, InvalidSpecifier
__all__ = [
"InvalidMarker", "UndefinedComparison", "UndefinedEnvironmentName",
"Marker", "default_environment",
]
class InvalidMarker(ValueError):
"""
An invalid marker was found, users should refer to PEP 508.
"""
class UndefinedComparison(ValueError):
"""
An invalid operation was attempted on a value that doesn't support it.
"""
class UndefinedEnvironmentName(ValueError):
"""
A name was attempted to be used that does not exist inside of the
environment.
"""
class Node(object):
def __init__(self, value):
self.value = value
def __str__(self):
return str(self.value)
def __repr__(self):
return "<{0}({1!r})>".format(self.__class__.__name__, str(self))
class Variable(Node):
pass
class Value(Node):
pass
VARIABLE = (
L("implementation_version") |
L("platform_python_implementation") |
L("implementation_name") |
L("python_full_version") |
L("platform_release") |
L("platform_version") |
L("platform_machine") |
L("platform_system") |
L("python_version") |
L("sys_platform") |
L("os_name") |
L("extra")
)
VARIABLE.setParseAction(lambda s, l, t: Variable(t[0]))
VERSION_CMP = (
L("===") |
L("==") |
L(">=") |
L("<=") |
L("!=") |
L("~=") |
L(">") |
L("<")
)
MARKER_OP = VERSION_CMP | L("not in") | L("in")
MARKER_VALUE = QuotedString("'") | QuotedString('"')
MARKER_VALUE.setParseAction(lambda s, l, t: Value(t[0]))
BOOLOP = L("and") | L("or")
MARKER_VAR = VARIABLE | MARKER_VALUE
MARKER_ITEM = Group(MARKER_VAR + MARKER_OP + MARKER_VAR)
MARKER_ITEM.setParseAction(lambda s, l, t: tuple(t[0]))
LPAREN = L("(").suppress()
RPAREN = L(")").suppress()
MARKER_EXPR = Forward()
MARKER_ATOM = MARKER_ITEM | Group(LPAREN + MARKER_EXPR + RPAREN)
MARKER_EXPR << MARKER_ATOM + ZeroOrMore(BOOLOP + MARKER_EXPR)
MARKER = stringStart + MARKER_EXPR + stringEnd
def _coerce_parse_result(results):
if isinstance(results, ParseResults):
return [_coerce_parse_result(i) for i in results]
else:
return results
def _format_marker(marker, first=True):
assert isinstance(marker, (list, tuple, string_types))
# Sometimes we have a structure like [[...]] which is a single item list
# where the single item is itself it's own list. In that case we want skip
# the rest of this function so that we don't get extraneous () on the
# outside.
if (isinstance(marker, list) and len(marker) == 1 and
isinstance(marker[0], (list, tuple))):
return _format_marker(marker[0])
if isinstance(marker, list):
inner = (_format_marker(m, first=False) for m in marker)
if first:
return " ".join(inner)
else:
return "(" + " ".join(inner) + ")"
elif isinstance(marker, tuple):
return '{0} {1} "{2}"'.format(*marker)
else:
return marker
_operators = {
"in": lambda lhs, rhs: lhs in rhs,
"not in": lambda lhs, rhs: lhs not in rhs,
"<": operator.lt,
"<=": operator.le,
"==": operator.eq,
"!=": operator.ne,
">=": operator.ge,
">": operator.gt,
}
def _eval_op(lhs, op, rhs):
try:
spec = Specifier("".join([op, rhs]))
except InvalidSpecifier:
pass
else:
return spec.contains(lhs)
oper = _operators.get(op)
if oper is None:
raise UndefinedComparison(
"Undefined {0!r} on {1!r} and {2!r}.".format(op, lhs, rhs)
)
return oper(lhs, rhs)
_undefined = object()
def _get_env(environment, name):
value = environment.get(name, _undefined)
if value is _undefined:
raise UndefinedEnvironmentName(
"{0!r} does not exist in evaluation environment.".format(name)
)
return value
def _evaluate_markers(markers, environment):
groups = [[]]
for marker in markers:
assert isinstance(marker, (list, tuple, string_types))
if isinstance(marker, list):
groups[-1].append(_evaluate_markers(marker, environment))
elif isinstance(marker, tuple):
lhs, op, rhs = marker
if isinstance(lhs, Variable):
lhs_value = _get_env(environment, lhs.value)
rhs_value = rhs.value
else:
lhs_value = lhs.value
rhs_value = _get_env(environment, rhs.value)
groups[-1].append(_eval_op(lhs_value, op, rhs_value))
else:
assert marker in ["and", "or"]
if marker == "or":
groups.append([])
return any(all(item) for item in groups)
def format_full_version(info):
version = '{0.major}.{0.minor}.{0.micro}'.format(info)
kind = info.releaselevel
if kind != 'final':
version += kind[0] + str(info.serial)
return version
def default_environment():
if hasattr(sys, 'implementation'):
iver = format_full_version(sys.implementation.version)
implementation_name = sys.implementation.name
else:
iver = '0'
implementation_name = ''
return {
"implementation_name": implementation_name,
"implementation_version": iver,
"os_name": os.name,
"platform_machine": platform.machine(),
"platform_release": platform.release(),
"platform_system": platform.system(),
"platform_version": platform.version(),
"python_full_version": platform.python_version(),
"platform_python_implementation": platform.python_implementation(),
"python_version": platform.python_version()[:3],
"sys_platform": sys.platform,
}
class Marker(object):
def __init__(self, marker):
try:
self._markers = _coerce_parse_result(MARKER.parseString(marker))
except ParseException as e:
err_str = "Invalid marker: {0!r}, parse error at {1!r}".format(
marker, marker[e.loc:e.loc + 8])
raise InvalidMarker(err_str)
def __str__(self):
return _format_marker(self._markers)
def __repr__(self):
return "<Marker({0!r})>".format(str(self))
def evaluate(self, environment=None):
"""Evaluate a marker.
Return the boolean from evaluating the given marker against the
environment. environment is an optional argument to override all or
part of the determined environment.
The environment is determined from the current Python process.
"""
current_environment = default_environment()
if environment is not None:
current_environment.update(environment)
return _evaluate_markers(self._markers, current_environment)
| mit |
ephes/trivago2015 | config/settings/production.py | 1 | 5032 | # -*- coding: utf-8 -*-
'''
Production Configurations
- Use djangosecure
- Use Amazon's S3 for storing static files and uploaded media
- Use sendgrid to send emails
- Use MEMCACHIER on Heroku
'''
from __future__ import absolute_import, unicode_literals
from boto.s3.connection import OrdinaryCallingFormat
from django.utils import six
from .common import * # noqa
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Raises ImproperlyConfigured exception if DJANO_SECRET_KEY not in os.environ
SECRET_KEY = env("DJANGO_SECRET_KEY")
# This ensures that Django will be able to detect a secure connection
# properly on Heroku.
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# django-secure
# ------------------------------------------------------------------------------
INSTALLED_APPS += ("djangosecure", )
MIDDLEWARE_CLASSES = (
# Make sure djangosecure.middleware.SecurityMiddleware is listed first
'djangosecure.middleware.SecurityMiddleware',
) + MIDDLEWARE_CLASSES
# set this to 60 seconds and then to 518400 when you can prove it works
SECURE_HSTS_SECONDS = 60
SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool("DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS", default=True)
SECURE_FRAME_DENY = env.bool("DJANGO_SECURE_FRAME_DENY", default=True)
SECURE_CONTENT_TYPE_NOSNIFF = env.bool("DJANGO_SECURE_CONTENT_TYPE_NOSNIFF", default=True)
SECURE_BROWSER_XSS_FILTER = True
SESSION_COOKIE_SECURE = False
SESSION_COOKIE_HTTPONLY = True
SECURE_SSL_REDIRECT = env.bool("DJANGO_SECURE_SSL_REDIRECT", default=True)
# SITE CONFIGURATION
# ------------------------------------------------------------------------------
# Hosts/domain names that are valid for this site
# See https://docs.djangoproject.com/en/1.6/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ["*"]
# END SITE CONFIGURATION
INSTALLED_APPS += ("gunicorn", )
# STORAGE CONFIGURATION
# ------------------------------------------------------------------------------
# Uploaded Media Files
# ------------------------
# See: http://django-storages.readthedocs.org/en/latest/index.html
#INSTALLED_APPS += (
# 'storages',
#)
#DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
AWS_ACCESS_KEY_ID = env('DJANGO_AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = env('DJANGO_AWS_SECRET_ACCESS_KEY')
AWS_STORAGE_BUCKET_NAME = env('DJANGO_AWS_STORAGE_BUCKET_NAME')
AWS_AUTO_CREATE_BUCKET = True
AWS_QUERYSTRING_AUTH = False
AWS_S3_CALLING_FORMAT = OrdinaryCallingFormat()
# AWS cache settings, don't change unless you know what you're doing:
AWS_EXPIRY = 60 * 60 * 24 * 7
# TODO See: https://github.com/jschneier/django-storages/issues/47
# Revert the following and use str after the above-mentioned bug is fixed in
# either django-storage-redux or boto
AWS_HEADERS = {
'Cache-Control': six.b('max-age=%d, s-maxage=%d, must-revalidate' % (
AWS_EXPIRY, AWS_EXPIRY))
}
# URL that handles the media served from MEDIA_ROOT, used for managing stored files.
MEDIA_URL = '/media/'
# Static Assests
# ------------------------
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
# EMAIL
# ------------------------------------------------------------------------------
DEFAULT_FROM_EMAIL = env('DJANGO_DEFAULT_FROM_EMAIL',
default='trivago2015 <noreply@waaaasssuuuuppp.com>')
EMAIL_HOST = env("DJANGO_EMAIL_HOST", default='smtp.sendgrid.com')
EMAIL_HOST_PASSWORD = env("SENDGRID_PASSWORD")
EMAIL_HOST_USER = env('SENDGRID_USERNAME')
EMAIL_PORT = env.int("EMAIL_PORT", default=587)
EMAIL_SUBJECT_PREFIX = env("EMAIL_SUBJECT_PREFIX", default='[trivago2015] ')
EMAIL_USE_TLS = True
SERVER_EMAIL = EMAIL_HOST_USER
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/templates/api/#django.template.loaders.cached.Loader
TEMPLATES[0]['OPTIONS']['loaders'] = [
('django.template.loaders.cached.Loader', [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
]),
]
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ
DATABASES['default'] = env.db("DATABASE_URL")
# CACHING
# ------------------------------------------------------------------------------
try:
# Only do this here because thanks to django-pylibmc-sasl and pylibmc
# memcacheify is painful to install on windows.
# See: https://github.com/rdegges/django-heroku-memcacheify
from memcacheify import memcacheify
CACHES = memcacheify()
except ImportError:
CACHES = {
'default': env.cache_url("DJANGO_CACHE_URL", default="memcache://127.0.0.1:11211"),
}
# Your production stuff: Below this line define 3rd party library settings
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTOCOL', 'https')
| bsd-3-clause |
Lab41/try41 | api.py | 1 | 10454 | from flask import Flask
from flask import g
from flask import jsonify
from flask import redirect
from flask import render_template
from flask import render_template_string
from flask import request
from flask import send_from_directory
from flask import url_for
from flask.ext.babel import Babel
from flask.ext.mail import Mail
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext.user import current_user
from flask.ext.user import login_required
from flask.ext.user import SQLAlchemyAdapter
from flask.ext.user import UserManager
from flask.ext.user import UserMixin
from docker import Client
import json
import os
import re
import redis
import sys
import time
import uuid
# set defaults
IMAGE_NAME1 = "lab41/gestalt"
DOMAIN = "127.0.0.1"
REDIS_HOST = "localhost"
RSYSLOG_HOST = "rsyslog"
PARENT_HOST = "parent"
COOKIE="try41-uid"
REDIS_PORT=6379
# use user accounts
USERS=False
# use ssl
SSL=False
# gestalt
EXPOSED_PORT1=8000
r = redis.StrictRedis(host=REDIS_HOST, port=int(REDIS_PORT))
c = Client(version='auto', base_url='unix://var/run/docker.sock')
BAD = False
UUID4 = re.compile('[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}')
# Use a Class-based config to avoid needing a 2nd file
class ConfigClass(object):
# Configure Flask
SECRET_KEY = 'secret' # change for production
CSRF_ENABLED = True
if USERS:
SQLALCHEMY_DATABASE_URI = 'postgresql' # change for production
# Configure session cookie
if not USERS:
SESSION_COOKIE_SECURE = True
SESSION_REFRESH_EACH_REQUEST = False
SESSION_COOKIE_HTTPONLY = True
# Configure Flask-Mail
if USERS:
MAIL_SERVER = 'smtp' # change for production
MAIL_PORT = 25
MAIL_USE_SSL = False
MAIL_DEFAULT_SENDER = 'sender' # change for production
# Configure Flask-User
if USERS:
USER_ENABLE_USERNAME = True
USER_ENABLE_CONFIRM_EMAIL = True
USER_ENABLE_CHANGE_USERNAME = True
USER_ENABLE_CHANGE_PASSWORD = True
USER_ENABLE_FORGOT_PASSWORD = True
USER_ENABLE_RETYPE_PASSWORD = True
USER_LOGIN_TEMPLATE = 'flask_user/login_or_register.html'
USER_REGISTER_TEMPLATE = 'flask_user/login_or_register.html'
def create_app():
# Setup Flask and read config from ConfigClass defined above
app = Flask(__name__)
app.config.from_object(__name__+'.ConfigClass')
# Initialize Flask extensions
if USERS:
babel = Babel(app)
db = SQLAlchemy(app)
mail = Mail(app)
@babel.localeselector
def get_locale():
translations = [str(translation) for translation in babel.list_translations()]
return request.accept_languages.best_match(translations)
# Define User model. Make sure to add flask.ext.user UserMixin!!
class User(db.Model, UserMixin):
id = db.Column(db.Integer, primary_key=True)
active = db.Column(db.Boolean(), nullable=False, default=False)
username = db.Column(db.String(50), nullable=False, unique=True)
password = db.Column(db.String(255), nullable=False, default='')
email = db.Column(db.String(255), nullable=False, unique=True)
confirmed_at = db.Column(db.DateTime())
reset_password_token = db.Column(db.String(100), nullable=False, default='')
# Create all database tables
db.create_all()
# Setup Flask-User
db_adapter = SQLAlchemyAdapter(db, User)
user_manager = UserManager(db_adapter, app)
# The '/profile' page requires a logged-in user
@app.route('/profile')
@login_required
def profile():
return render_template_string("""
{% extends "base.html" %}
{% block content %}
<h2>{%trans%}Profile Page{%endtrans%}</h2>
<p> {%trans%}Hello{%endtrans%}
{{ current_user.username or current_user.email }},</p>
<p> <a href="{{ url_for('user.change_username') }}">
{%trans%}Change username{%endtrans%}</a></p>
<p> <a href="{{ url_for('user.change_password') }}">
{%trans%}Change password{%endtrans%}</a></p>
<p> <a href="{{ url_for('user.logout') }}?next={{ url_for('user.login') }}">
{%trans%}Sign out{%endtrans%}</a></p>
{% endblock %}
""")
def store_metadata(exposed_ports, container_id, container, image_name):
global BAD
urlport = ""
for exposed_port in exposed_ports:
container_port = c.port(container_id, exposed_port)
url = "%s:%s" % (DOMAIN, container_port[0]['HostPort'])
urlport += url+","
hmap = {}
hmap['container_id'] = container_id
hmap['container'] = container
hmap['url'] = urlport[:-1]
hmap['timestamp'] = int(time.time())
hmap['expired'] = 0
hmap['image'] = image_name
data = json.dumps(hmap)
check_cookie()
# check cookie formatting, ensure that it exists in sessions
# also check that it doesn't already exist
if not BAD:
cookie = request.cookies.get(COOKIE)
if re.match(UUID4, cookie):
if r.sismember('sessions', cookie):
r.lpush(cookie, data)
else:
app.logger.info('invalid session')
BAD = True
else:
app.logger.info('invalid uuid')
BAD = True
def get_url(request):
global BAD
# this is validated with check_cookie before_request
if not BAD:
uid = request.cookies.get(COOKIE)
container = r.lindex(uid, 0)
container = json.loads(container)
url = container['url']
if "," in url:
url_list = url.split(',')
url = url_list[-1]
return url
else:
return ""
def after_this_request(f):
if not hasattr(g, 'after_request_callbacks'):
g.after_request_callbacks = []
g.after_request_callbacks.append(f)
return f
@app.after_request
def call_after_request_callbacks(response):
for callback in getattr(g, 'after_request_callbacks', ()):
callback(response)
return response
@app.before_request
def check_cookie():
global BAD
uid = request.cookies.get(COOKIE)
if uid is None:
uid = str(uuid.uuid4())
@after_this_request
def save_cookie(response):
# validate uid formatting, and that it doesn't conflict
if re.match(UUID4, uid):
if r.sismember('sessions', uid):
app.logger.info('uuid already exists')
BAD = True
else:
r.sadd('sessions', uid)
g.uid = uid
BAD = False
response.set_cookie(COOKIE, uid, httponly=True)
else:
app.logger.info('invalid uuid')
BAD = True
BAD = False
@app.route('/')
def index():
return render_template("index.html")
@app.route('/github-buttons')
def github_buttons():
return render_template("github-btn.html")
@app.route('/details/wait')
def wait():
if SSL:
return redirect("/gestalt", code=302)
else:
return render_template("wait.html")
@app.route('/new', methods=["POST"])
def new():
if not USERS or current_user.is_authenticated():
exposed_ports = [EXPOSED_PORT1]
cookie = request.cookies.get(COOKIE)
if re.match(UUID4, cookie):
spinup = 1
# check if this image has already been spun up for this session
if r.exists(cookie):
# !! TODO error check
data = r.lrange(cookie, 0, -1)
for record in data:
jrec = json.loads(record)
if jrec['image'] == "lab41/gestalt":
if jrec['expired'] == 0:
app.logger.info('a gestalt container is already running for this session')
spinup = 0
return jsonify(url="wait")
if spinup == 1:
host_config = c.create_host_config(publish_all_ports=True)
if SSL:
container = c.create_container(image=IMAGE_NAME1, host_config=host_config, environment={'REMOTE_HOST': RSYSLOG_HOST, 'PARENT_HOST': PARENT_HOST, 'SSL': "True"})
else:
container = c.create_container(image=IMAGE_NAME1, host_config=host_config, environment={'REMOTE_HOST': RSYSLOG_HOST, 'PARENT_HOST': PARENT_HOST})
c.start(container=container.get('Id'))
b = c.inspect_container(container)
bad = store_metadata(exposed_ports, container.get('Id'), container, IMAGE_NAME1)
if bad:
return render_template("index.html")
else:
return jsonify(url="launch")
else:
return jsonify(url="wait")
else:
return jsonify(url="login")
@app.route('/details/login')
def details_login():
return redirect(url_for('user.login'))
@app.route('/details/launch')
def details():
if not USERS or current_user.is_authenticated():
url = get_url(request)
return render_template("details.html",url=url, USERS=USERS, SSL=SSL, DOMAIN=DOMAIN)
else:
return jsonify(url="login")
@app.route('/robot.txt')
def robot():
return render_template("robot.html")
@app.route('/favicon.ico')
def favicon():
return send_from_directory(os.path.join(app.root_path, 'static'),
'favicon.ico', mimetype='image/vnd.microsoft.icon')
return app
if __name__ == '__main__':
app = create_app()
app.run(host="0.0.0.0")
| apache-2.0 |
Yellowen/Owrang | patches/march_2013/p05_payment_reconciliation.py | 2 | 1304 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd.
# License: GNU General Public License v3. See license.txt
import webnotes
def execute():
# delete wrong gle entries created due to a bug in make_gl_entries of Account Controller
# when using payment reconciliation
res = webnotes.conn.sql_list("""select distinct gl1.voucher_no
from `tabGL Entry` gl1, `tabGL Entry` gl2
where
date(gl1.modified) >= "2013-03-11"
and date(gl1.modified) = date(gl2.modified)
and gl1.voucher_no = gl2.voucher_no
and gl1.voucher_type = "Journal Voucher"
and gl1.voucher_type = gl2.voucher_type
and gl1.posting_date = gl2.posting_date
and gl1.account = gl2.account
and ifnull(gl1.is_cancelled, 'No') = 'No' and ifnull(gl2.is_cancelled, 'No') = 'No'
and ifnull(gl1.against_voucher, '') = ifnull(gl2.against_voucher, '')
and ifnull(gl1.against_voucher_type, '') = ifnull(gl2.against_voucher_type, '')
and gl1.remarks = gl2.remarks
and ifnull(gl1.debit, 0) = ifnull(gl2.credit, 0)
and ifnull(gl1.credit, 0) = ifnull(gl2.debit, 0)
and gl1.name > gl2.name""")
for r in res:
webnotes.conn.sql("""update `tabGL Entry` set `is_cancelled`='Yes'
where voucher_type='Journal Voucher' and voucher_no=%s""", r)
jv = webnotes.bean("Journal Voucher", r)
jv.run_method("make_gl_entries")
| agpl-3.0 |
philipz/PyCV-time | opencv-official-samples/2.4.9/gabor_threads.py | 7 | 1704 | #!/usr/bin/env python
'''
gabor_threads.py
=========
Sample demonstrates:
- use of multiple Gabor filter convolutions to get Fractalius-like image effect (http://www.redfieldplugins.com/filterFractalius.htm)
- use of python threading to accelerate the computation
Usage
-----
gabor_threads.py [image filename]
'''
import numpy as np
import cv2
from multiprocessing.pool import ThreadPool
def build_filters():
filters = []
ksize = 31
for theta in np.arange(0, np.pi, np.pi / 16):
kern = cv2.getGaborKernel((ksize, ksize), 4.0, theta, 10.0, 0.5, 0, ktype=cv2.CV_32F)
kern /= 1.5*kern.sum()
filters.append(kern)
return filters
def process(img, filters):
accum = np.zeros_like(img)
for kern in filters:
fimg = cv2.filter2D(img, cv2.CV_8UC3, kern)
np.maximum(accum, fimg, accum)
return accum
def process_threaded(img, filters, threadn = 8):
accum = np.zeros_like(img)
def f(kern):
return cv2.filter2D(img, cv2.CV_8UC3, kern)
pool = ThreadPool(processes=threadn)
for fimg in pool.imap_unordered(f, filters):
np.maximum(accum, fimg, accum)
return accum
if __name__ == '__main__':
import sys
from common import Timer
print __doc__
try: img_fn = sys.argv[1]
except: img_fn = '../cpp/baboon.jpg'
img = cv2.imread(img_fn)
filters = build_filters()
with Timer('running single-threaded'):
res1 = process(img, filters)
with Timer('running multi-threaded'):
res2 = process_threaded(img, filters)
print 'res1 == res2: ', (res1 == res2).all()
cv2.imshow('img', img)
cv2.imshow('result', res2)
cv2.waitKey()
cv2.destroyAllWindows()
| mit |
tal-nino/ansible | lib/ansible/plugins/lookup/sequence.py | 82 | 7066 | # (c) 2013, Jayson Vantuyl <jayson@aggressive.ly>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from re import compile as re_compile, IGNORECASE
from ansible.errors import *
from ansible.parsing.splitter import parse_kv
from ansible.plugins.lookup import LookupBase
# shortcut format
NUM = "(0?x?[0-9a-f]+)"
SHORTCUT = re_compile(
"^(" + # Group 0
NUM + # Group 1: Start
"-)?" +
NUM + # Group 2: End
"(/" + # Group 3
NUM + # Group 4: Stride
")?" +
"(:(.+))?$", # Group 5, Group 6: Format String
IGNORECASE
)
class LookupModule(LookupBase):
"""
sequence lookup module
Used to generate some sequence of items. Takes arguments in two forms.
The simple / shortcut form is:
[start-]end[/stride][:format]
As indicated by the brackets: start, stride, and format string are all
optional. The format string is in the style of printf. This can be used
to pad with zeros, format in hexadecimal, etc. All of the numerical values
can be specified in octal (i.e. 0664) or hexadecimal (i.e. 0x3f8).
Negative numbers are not supported.
Some examples:
5 -> ["1","2","3","4","5"]
5-8 -> ["5", "6", "7", "8"]
2-10/2 -> ["2", "4", "6", "8", "10"]
4:host%02d -> ["host01","host02","host03","host04"]
The standard Ansible key-value form is accepted as well. For example:
start=5 end=11 stride=2 format=0x%02x -> ["0x05","0x07","0x09","0x0a"]
This format takes an alternate form of "end" called "count", which counts
some number from the starting value. For example:
count=5 -> ["1", "2", "3", "4", "5"]
start=0x0f00 count=4 format=%04x -> ["0f00", "0f01", "0f02", "0f03"]
start=0 count=5 stride=2 -> ["0", "2", "4", "6", "8"]
start=1 count=5 stride=2 -> ["1", "3", "5", "7", "9"]
The count option is mostly useful for avoiding off-by-one errors and errors
calculating the number of entries in a sequence when a stride is specified.
"""
def reset(self):
"""set sensible defaults"""
self.start = 1
self.count = None
self.end = None
self.stride = 1
self.format = "%d"
def parse_kv_args(self, args):
"""parse key-value style arguments"""
for arg in ["start", "end", "count", "stride"]:
try:
arg_raw = args.pop(arg, None)
if arg_raw is None:
continue
arg_cooked = int(arg_raw, 0)
setattr(self, arg, arg_cooked)
except ValueError:
raise AnsibleError(
"can't parse arg %s=%r as integer"
% (arg, arg_raw)
)
if 'format' in args:
self.format = args.pop("format")
if args:
raise AnsibleError(
"unrecognized arguments to with_sequence: %r"
% args.keys()
)
def parse_simple_args(self, term):
"""parse the shortcut forms, return True/False"""
match = SHORTCUT.match(term)
if not match:
return False
_, start, end, _, stride, _, format = match.groups()
if start is not None:
try:
start = int(start, 0)
except ValueError:
raise AnsibleError("can't parse start=%s as integer" % start)
if end is not None:
try:
end = int(end, 0)
except ValueError:
raise AnsibleError("can't parse end=%s as integer" % end)
if stride is not None:
try:
stride = int(stride, 0)
except ValueError:
raise AnsibleError("can't parse stride=%s as integer" % stride)
if start is not None:
self.start = start
if end is not None:
self.end = end
if stride is not None:
self.stride = stride
if format is not None:
self.format = format
def sanity_check(self):
if self.count is None and self.end is None:
raise AnsibleError( "must specify count or end in with_sequence")
elif self.count is not None and self.end is not None:
raise AnsibleError( "can't specify both count and end in with_sequence")
elif self.count is not None:
# convert count to end
if self.count != 0:
self.end = self.start + self.count * self.stride - 1
else:
self.start = 0
self.end = 0
self.stride = 0
del self.count
if self.stride > 0 and self.end < self.start:
raise AnsibleError("to count backwards make stride negative")
if self.stride < 0 and self.end > self.start:
raise AnsibleError("to count forward don't make stride negative")
if self.format.count('%') != 1:
raise AnsibleError("bad formatting string: %s" % self.format)
def generate_sequence(self):
if self.stride >= 0:
adjust = 1
else:
adjust = -1
numbers = xrange(self.start, self.end + adjust, self.stride)
for i in numbers:
try:
formatted = self.format % i
yield formatted
except (ValueError, TypeError):
raise AnsibleError(
"problem formatting %r with %r" % self.format
)
def run(self, terms, variables, **kwargs):
results = []
for term in terms:
try:
self.reset() # clear out things for this iteration
try:
if not self.parse_simple_args(term):
self.parse_kv_args(parse_kv(term))
except Exception as e:
raise AnsibleError("unknown error parsing with_sequence arguments: %r. Error was: %s" % (term, e))
self.sanity_check()
if self.stride != 0:
results.extend(self.generate_sequence())
except AnsibleError:
raise
except Exception as e:
raise AnsibleError(
"unknown error generating sequence: %s" % e
)
return results
| gpl-3.0 |
elijah513/django | django/contrib/sites/managers.py | 472 | 2132 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.core import checks
from django.core.exceptions import FieldDoesNotExist
from django.db import models
class CurrentSiteManager(models.Manager):
"Use this to limit objects to those associated with the current site."
use_in_migrations = True
def __init__(self, field_name=None):
super(CurrentSiteManager, self).__init__()
self.__field_name = field_name
def check(self, **kwargs):
errors = super(CurrentSiteManager, self).check(**kwargs)
errors.extend(self._check_field_name())
return errors
def _check_field_name(self):
field_name = self._get_field_name()
try:
field = self.model._meta.get_field(field_name)
except FieldDoesNotExist:
return [
checks.Error(
"CurrentSiteManager could not find a field named '%s'." % field_name,
hint=None,
obj=self,
id='sites.E001',
)
]
if not isinstance(field, (models.ForeignKey, models.ManyToManyField)):
return [
checks.Error(
"CurrentSiteManager cannot use '%s.%s' as it is not a ForeignKey or ManyToManyField." % (
self.model._meta.object_name, field_name
),
hint=None,
obj=self,
id='sites.E002',
)
]
return []
def _get_field_name(self):
""" Return self.__field_name or 'site' or 'sites'. """
if not self.__field_name:
try:
self.model._meta.get_field('site')
except FieldDoesNotExist:
self.__field_name = 'sites'
else:
self.__field_name = 'site'
return self.__field_name
def get_queryset(self):
return super(CurrentSiteManager, self).get_queryset().filter(
**{self._get_field_name() + '__id': settings.SITE_ID})
| bsd-3-clause |
bj7/pwndbg | pwndbg/constants/s390.py | 1 | 26907 | from .constant import Constant
__NR_exit = Constant('__NR_exit',1)
__NR_fork = Constant('__NR_fork',2)
__NR_read = Constant('__NR_read',3)
__NR_write = Constant('__NR_write',4)
__NR_open = Constant('__NR_open',5)
__NR_close = Constant('__NR_close',6)
__NR_creat = Constant('__NR_creat',8)
__NR_link = Constant('__NR_link',9)
__NR_unlink = Constant('__NR_unlink',10)
__NR_execve = Constant('__NR_execve',11)
__NR_chdir = Constant('__NR_chdir',12)
__NR_time = Constant('__NR_time',13)
__NR_mknod = Constant('__NR_mknod',14)
__NR_chmod = Constant('__NR_chmod',15)
__NR_lchown = Constant('__NR_lchown',16)
__NR_lseek = Constant('__NR_lseek',19)
__NR_getpid = Constant('__NR_getpid',20)
__NR_mount = Constant('__NR_mount',21)
__NR_umount = Constant('__NR_umount',22)
__NR_setuid = Constant('__NR_setuid',23)
__NR_getuid = Constant('__NR_getuid',24)
__NR_stime = Constant('__NR_stime',25)
__NR_ptrace = Constant('__NR_ptrace',26)
__NR_alarm = Constant('__NR_alarm',27)
__NR_pause = Constant('__NR_pause',29)
__NR_utime = Constant('__NR_utime',30)
__NR_access = Constant('__NR_access',33)
__NR_nice = Constant('__NR_nice',34)
__NR_sync = Constant('__NR_sync',36)
__NR_kill = Constant('__NR_kill',37)
__NR_rename = Constant('__NR_rename',38)
__NR_mkdir = Constant('__NR_mkdir',39)
__NR_rmdir = Constant('__NR_rmdir',40)
__NR_dup = Constant('__NR_dup',41)
__NR_pipe = Constant('__NR_pipe',42)
__NR_times = Constant('__NR_times',43)
__NR_brk = Constant('__NR_brk',45)
__NR_setgid = Constant('__NR_setgid',46)
__NR_getgid = Constant('__NR_getgid',47)
__NR_signal = Constant('__NR_signal',48)
__NR_geteuid = Constant('__NR_geteuid',49)
__NR_getegid = Constant('__NR_getegid',50)
__NR_acct = Constant('__NR_acct',51)
__NR_umount2 = Constant('__NR_umount2',52)
__NR_ioctl = Constant('__NR_ioctl',54)
__NR_fcntl = Constant('__NR_fcntl',55)
__NR_setpgid = Constant('__NR_setpgid',57)
__NR_umask = Constant('__NR_umask',60)
__NR_chroot = Constant('__NR_chroot',61)
__NR_ustat = Constant('__NR_ustat',62)
__NR_dup2 = Constant('__NR_dup2',63)
__NR_getppid = Constant('__NR_getppid',64)
__NR_getpgrp = Constant('__NR_getpgrp',65)
__NR_setsid = Constant('__NR_setsid',66)
__NR_sigaction = Constant('__NR_sigaction',67)
__NR_setreuid = Constant('__NR_setreuid',70)
__NR_setregid = Constant('__NR_setregid',71)
__NR_sigsuspend = Constant('__NR_sigsuspend',72)
__NR_sigpending = Constant('__NR_sigpending',73)
__NR_sethostname = Constant('__NR_sethostname',74)
__NR_setrlimit = Constant('__NR_setrlimit',75)
__NR_getrlimit = Constant('__NR_getrlimit',76)
__NR_getrusage = Constant('__NR_getrusage',77)
__NR_gettimeofday = Constant('__NR_gettimeofday',78)
__NR_settimeofday = Constant('__NR_settimeofday',79)
__NR_getgroups = Constant('__NR_getgroups',80)
__NR_setgroups = Constant('__NR_setgroups',81)
__NR_symlink = Constant('__NR_symlink',83)
__NR_readlink = Constant('__NR_readlink',85)
__NR_uselib = Constant('__NR_uselib',86)
__NR_swapon = Constant('__NR_swapon',87)
__NR_reboot = Constant('__NR_reboot',88)
__NR_readdir = Constant('__NR_readdir',89)
__NR_mmap = Constant('__NR_mmap',90)
__NR_munmap = Constant('__NR_munmap',91)
__NR_truncate = Constant('__NR_truncate',92)
__NR_ftruncate = Constant('__NR_ftruncate',93)
__NR_fchmod = Constant('__NR_fchmod',94)
__NR_fchown = Constant('__NR_fchown',95)
__NR_getpriority = Constant('__NR_getpriority',96)
__NR_setpriority = Constant('__NR_setpriority',97)
__NR_statfs = Constant('__NR_statfs',99)
__NR_fstatfs = Constant('__NR_fstatfs',100)
__NR_ioperm = Constant('__NR_ioperm',101)
__NR_socketcall = Constant('__NR_socketcall',102)
__NR_syslog = Constant('__NR_syslog',103)
__NR_setitimer = Constant('__NR_setitimer',104)
__NR_getitimer = Constant('__NR_getitimer',105)
__NR_stat = Constant('__NR_stat',106)
__NR_lstat = Constant('__NR_lstat',107)
__NR_fstat = Constant('__NR_fstat',108)
__NR_lookup_dcookie = Constant('__NR_lookup_dcookie',110)
__NR_vhangup = Constant('__NR_vhangup',111)
__NR_idle = Constant('__NR_idle',112)
__NR_wait4 = Constant('__NR_wait4',114)
__NR_swapoff = Constant('__NR_swapoff',115)
__NR_sysinfo = Constant('__NR_sysinfo',116)
__NR_ipc = Constant('__NR_ipc',117)
__NR_fsync = Constant('__NR_fsync',118)
__NR_sigreturn = Constant('__NR_sigreturn',119)
__NR_clone = Constant('__NR_clone',120)
__NR_setdomainname = Constant('__NR_setdomainname',121)
__NR_uname = Constant('__NR_uname',122)
__NR_adjtimex = Constant('__NR_adjtimex',124)
__NR_mprotect = Constant('__NR_mprotect',125)
__NR_sigprocmask = Constant('__NR_sigprocmask',126)
__NR_create_module = Constant('__NR_create_module',127)
__NR_init_module = Constant('__NR_init_module',128)
__NR_delete_module = Constant('__NR_delete_module',129)
__NR_get_kernel_syms = Constant('__NR_get_kernel_syms',130)
__NR_quotactl = Constant('__NR_quotactl',131)
__NR_getpgid = Constant('__NR_getpgid',132)
__NR_fchdir = Constant('__NR_fchdir',133)
__NR_bdflush = Constant('__NR_bdflush',134)
__NR_sysfs = Constant('__NR_sysfs',135)
__NR_personality = Constant('__NR_personality',136)
__NR_afs_syscall = Constant('__NR_afs_syscall',137)
__NR_setfsuid = Constant('__NR_setfsuid',138)
__NR_setfsgid = Constant('__NR_setfsgid',139)
__NR__llseek = Constant('__NR__llseek',140)
__NR_getdents = Constant('__NR_getdents',141)
__NR__newselect = Constant('__NR__newselect',142)
__NR_flock = Constant('__NR_flock',143)
__NR_msync = Constant('__NR_msync',144)
__NR_readv = Constant('__NR_readv',145)
__NR_writev = Constant('__NR_writev',146)
__NR_getsid = Constant('__NR_getsid',147)
__NR_fdatasync = Constant('__NR_fdatasync',148)
__NR__sysctl = Constant('__NR__sysctl',149)
__NR_mlock = Constant('__NR_mlock',150)
__NR_munlock = Constant('__NR_munlock',151)
__NR_mlockall = Constant('__NR_mlockall',152)
__NR_munlockall = Constant('__NR_munlockall',153)
__NR_sched_setparam = Constant('__NR_sched_setparam',154)
__NR_sched_getparam = Constant('__NR_sched_getparam',155)
__NR_sched_setscheduler = Constant('__NR_sched_setscheduler',156)
__NR_sched_getscheduler = Constant('__NR_sched_getscheduler',157)
__NR_sched_yield = Constant('__NR_sched_yield',158)
__NR_sched_get_priority_max = Constant('__NR_sched_get_priority_max',159)
__NR_sched_get_priority_min = Constant('__NR_sched_get_priority_min',160)
__NR_sched_rr_get_interval = Constant('__NR_sched_rr_get_interval',161)
__NR_nanosleep = Constant('__NR_nanosleep',162)
__NR_mremap = Constant('__NR_mremap',163)
__NR_setresuid = Constant('__NR_setresuid',164)
__NR_getresuid = Constant('__NR_getresuid',165)
__NR_query_module = Constant('__NR_query_module',167)
__NR_poll = Constant('__NR_poll',168)
__NR_nfsservctl = Constant('__NR_nfsservctl',169)
__NR_setresgid = Constant('__NR_setresgid',170)
__NR_getresgid = Constant('__NR_getresgid',171)
__NR_prctl = Constant('__NR_prctl',172)
__NR_rt_sigreturn = Constant('__NR_rt_sigreturn',173)
__NR_rt_sigaction = Constant('__NR_rt_sigaction',174)
__NR_rt_sigprocmask = Constant('__NR_rt_sigprocmask',175)
__NR_rt_sigpending = Constant('__NR_rt_sigpending',176)
__NR_rt_sigtimedwait = Constant('__NR_rt_sigtimedwait',177)
__NR_rt_sigqueueinfo = Constant('__NR_rt_sigqueueinfo',178)
__NR_rt_sigsuspend = Constant('__NR_rt_sigsuspend',179)
__NR_pread = Constant('__NR_pread',180)
__NR_pwrite = Constant('__NR_pwrite',181)
__NR_chown = Constant('__NR_chown',182)
__NR_getcwd = Constant('__NR_getcwd',183)
__NR_capget = Constant('__NR_capget',184)
__NR_capset = Constant('__NR_capset',185)
__NR_sigaltstack = Constant('__NR_sigaltstack',186)
__NR_sendfile = Constant('__NR_sendfile',187)
__NR_getpmsg = Constant('__NR_getpmsg',188)
__NR_putpmsg = Constant('__NR_putpmsg',189)
__NR_vfork = Constant('__NR_vfork',190)
__NR_ugetrlimit = Constant('__NR_ugetrlimit',191)
__NR_mmap2 = Constant('__NR_mmap2',192)
__NR_truncate64 = Constant('__NR_truncate64',193)
__NR_ftruncate64 = Constant('__NR_ftruncate64',194)
__NR_stat64 = Constant('__NR_stat64',195)
__NR_lstat64 = Constant('__NR_lstat64',196)
__NR_fstat64 = Constant('__NR_fstat64',197)
__NR_lchown32 = Constant('__NR_lchown32',198)
__NR_getuid32 = Constant('__NR_getuid32',199)
__NR_getgid32 = Constant('__NR_getgid32',200)
__NR_geteuid32 = Constant('__NR_geteuid32',201)
__NR_getegid32 = Constant('__NR_getegid32',202)
__NR_setreuid32 = Constant('__NR_setreuid32',203)
__NR_setregid32 = Constant('__NR_setregid32',204)
__NR_getgroups32 = Constant('__NR_getgroups32',205)
__NR_setgroups32 = Constant('__NR_setgroups32',206)
__NR_fchown32 = Constant('__NR_fchown32',207)
__NR_setresuid32 = Constant('__NR_setresuid32',208)
__NR_getresuid32 = Constant('__NR_getresuid32',209)
__NR_setresgid32 = Constant('__NR_setresgid32',210)
__NR_getresgid32 = Constant('__NR_getresgid32',211)
__NR_chown32 = Constant('__NR_chown32',212)
__NR_setuid32 = Constant('__NR_setuid32',213)
__NR_setgid32 = Constant('__NR_setgid32',214)
__NR_setfsuid32 = Constant('__NR_setfsuid32',215)
__NR_setfsgid32 = Constant('__NR_setfsgid32',216)
__NR_pivot_root = Constant('__NR_pivot_root',217)
__NR_mincore = Constant('__NR_mincore',218)
__NR_madvise = Constant('__NR_madvise',219)
__NR_getdents64 = Constant('__NR_getdents64',220)
__NR_fcntl64 = Constant('__NR_fcntl64',221)
__NR_readahead = Constant('__NR_readahead',222)
__NR_sendfile64 = Constant('__NR_sendfile64',223)
__NR_setxattr = Constant('__NR_setxattr',224)
__NR_lsetxattr = Constant('__NR_lsetxattr',225)
__NR_fsetxattr = Constant('__NR_fsetxattr',226)
__NR_getxattr = Constant('__NR_getxattr',227)
__NR_lgetxattr = Constant('__NR_lgetxattr',228)
__NR_fgetxattr = Constant('__NR_fgetxattr',229)
__NR_listxattr = Constant('__NR_listxattr',230)
__NR_llistxattr = Constant('__NR_llistxattr',231)
__NR_flistxattr = Constant('__NR_flistxattr',232)
__NR_removexattr = Constant('__NR_removexattr',233)
__NR_lremovexattr = Constant('__NR_lremovexattr',234)
__NR_fremovexattr = Constant('__NR_fremovexattr',235)
__NR_gettid = Constant('__NR_gettid',236)
__NR_tkill = Constant('__NR_tkill',237)
__NR_futex = Constant('__NR_futex',238)
__NR_sched_setaffinity = Constant('__NR_sched_setaffinity',239)
__NR_sched_getaffinity = Constant('__NR_sched_getaffinity',240)
__NR_tgkill = Constant('__NR_tgkill',241)
__NR_io_setup = Constant('__NR_io_setup',243)
__NR_io_destroy = Constant('__NR_io_destroy',244)
__NR_io_getevents = Constant('__NR_io_getevents',245)
__NR_io_submit = Constant('__NR_io_submit',246)
__NR_io_cancel = Constant('__NR_io_cancel',247)
__NR_exit_group = Constant('__NR_exit_group',248)
__NR_epoll_create = Constant('__NR_epoll_create',249)
__NR_epoll_ctl = Constant('__NR_epoll_ctl',250)
__NR_epoll_wait = Constant('__NR_epoll_wait',251)
__NR_set_tid_address = Constant('__NR_set_tid_address',252)
__NR_fadvise64 = Constant('__NR_fadvise64',253)
__NR_timer_create = Constant('__NR_timer_create',254)
__NR_timer_settime = Constant('__NR_timer_settime',(254+1))
__NR_timer_gettime = Constant('__NR_timer_gettime',(254+2))
__NR_timer_getoverrun = Constant('__NR_timer_getoverrun',(254+3))
__NR_timer_delete = Constant('__NR_timer_delete',(254+4))
__NR_clock_settime = Constant('__NR_clock_settime',(254+5))
__NR_clock_gettime = Constant('__NR_clock_gettime',(254+6))
__NR_clock_getres = Constant('__NR_clock_getres',(254+7))
__NR_clock_nanosleep = Constant('__NR_clock_nanosleep',(254+8))
__NR_fadvise64_64 = Constant('__NR_fadvise64_64',264)
__NR_statfs64 = Constant('__NR_statfs64',265)
__NR_fstatfs64 = Constant('__NR_fstatfs64',266)
__NR_remap_file_pages = Constant('__NR_remap_file_pages',267)
__NR_mq_open = Constant('__NR_mq_open',271)
__NR_mq_unlink = Constant('__NR_mq_unlink',272)
__NR_mq_timedsend = Constant('__NR_mq_timedsend',273)
__NR_mq_timedreceive = Constant('__NR_mq_timedreceive',274)
__NR_mq_notify = Constant('__NR_mq_notify',275)
__NR_mq_getsetattr = Constant('__NR_mq_getsetattr',276)
__NR_kexec_load = Constant('__NR_kexec_load',277)
__NR_add_key = Constant('__NR_add_key',278)
__NR_request_key = Constant('__NR_request_key',279)
__NR_keyctl = Constant('__NR_keyctl',280)
__NR_waitid = Constant('__NR_waitid',281)
__NR_ioprio_set = Constant('__NR_ioprio_set',282)
__NR_ioprio_get = Constant('__NR_ioprio_get',283)
__NR_inotify_init = Constant('__NR_inotify_init',284)
__NR_inotify_add_watch = Constant('__NR_inotify_add_watch',285)
__NR_inotify_rm_watch = Constant('__NR_inotify_rm_watch',286)
__NR_openat = Constant('__NR_openat',288)
__NR_mkdirat = Constant('__NR_mkdirat',289)
__NR_mknodat = Constant('__NR_mknodat',290)
__NR_fchownat = Constant('__NR_fchownat',291)
__NR_futimesat = Constant('__NR_futimesat',292)
__NR_unlinkat = Constant('__NR_unlinkat',294)
__NR_renameat = Constant('__NR_renameat',295)
__NR_linkat = Constant('__NR_linkat',296)
__NR_symlinkat = Constant('__NR_symlinkat',297)
__NR_readlinkat = Constant('__NR_readlinkat',298)
__NR_fchmodat = Constant('__NR_fchmodat',299)
__NR_faccessat = Constant('__NR_faccessat',300)
__NR_pselect6 = Constant('__NR_pselect6',301)
__NR_ppoll = Constant('__NR_ppoll',302)
__NR_unshare = Constant('__NR_unshare',303)
__NR_set_robust_list = Constant('__NR_set_robust_list',304)
__NR_get_robust_list = Constant('__NR_get_robust_list',305)
__NR_splice = Constant('__NR_splice',306)
__NR_sync_file_range = Constant('__NR_sync_file_range',307)
__NR_tee = Constant('__NR_tee',308)
__NR_vmsplice = Constant('__NR_vmsplice',309)
__NR_getcpu = Constant('__NR_getcpu',311)
__NR_epoll_pwait = Constant('__NR_epoll_pwait',312)
__NR_utimes = Constant('__NR_utimes',313)
__NR_fallocate = Constant('__NR_fallocate',314)
__NR_utimensat = Constant('__NR_utimensat',315)
__NR_signalfd = Constant('__NR_signalfd',316)
__NR_timerfd = Constant('__NR_timerfd',317)
__NR_eventfd = Constant('__NR_eventfd',318)
__NR_timerfd_create = Constant('__NR_timerfd_create',319)
__NR_timerfd_settime = Constant('__NR_timerfd_settime',320)
__NR_timerfd_gettime = Constant('__NR_timerfd_gettime',321)
__SYS_NERR = Constant('__SYS_NERR',((129) + 1))
_SYS_TIME_H = Constant('_SYS_TIME_H',1)
SYS_access = Constant('SYS_access',33)
SYS_acct = Constant('SYS_acct',51)
SYS_add_key = Constant('SYS_add_key',278)
SYS_adjtimex = Constant('SYS_adjtimex',124)
SYS_afs_syscall = Constant('SYS_afs_syscall',137)
SYS_alarm = Constant('SYS_alarm',27)
SYS_bdflush = Constant('SYS_bdflush',134)
SYS_brk = Constant('SYS_brk',45)
SYS_capget = Constant('SYS_capget',184)
SYS_capset = Constant('SYS_capset',185)
SYS_chdir = Constant('SYS_chdir',12)
SYS_chmod = Constant('SYS_chmod',15)
SYS_chown = Constant('SYS_chown',182)
SYS_chown32 = Constant('SYS_chown32',212)
SYS_chroot = Constant('SYS_chroot',61)
SYS_clock_getres = Constant('SYS_clock_getres',(254+7))
SYS_clock_gettime = Constant('SYS_clock_gettime',(254+6))
SYS_clock_nanosleep = Constant('SYS_clock_nanosleep',(254+8))
SYS_clock_settime = Constant('SYS_clock_settime',(254+5))
SYS_clone = Constant('SYS_clone',120)
SYS_close = Constant('SYS_close',6)
SYS_creat = Constant('SYS_creat',8)
SYS_create_module = Constant('SYS_create_module',127)
SYS_delete_module = Constant('SYS_delete_module',129)
SYS_dup = Constant('SYS_dup',41)
SYS_dup2 = Constant('SYS_dup2',63)
SYS_epoll_create = Constant('SYS_epoll_create',249)
SYS_epoll_ctl = Constant('SYS_epoll_ctl',250)
SYS_epoll_pwait = Constant('SYS_epoll_pwait',312)
SYS_epoll_wait = Constant('SYS_epoll_wait',251)
SYS_eventfd = Constant('SYS_eventfd',318)
SYS_execve = Constant('SYS_execve',11)
SYS_exit = Constant('SYS_exit',1)
SYS_exit_group = Constant('SYS_exit_group',248)
SYS_faccessat = Constant('SYS_faccessat',300)
SYS_fadvise64 = Constant('SYS_fadvise64',253)
SYS_fadvise64_64 = Constant('SYS_fadvise64_64',264)
SYS_fallocate = Constant('SYS_fallocate',314)
SYS_fchdir = Constant('SYS_fchdir',133)
SYS_fchmod = Constant('SYS_fchmod',94)
SYS_fchmodat = Constant('SYS_fchmodat',299)
SYS_fchown = Constant('SYS_fchown',95)
SYS_fchown32 = Constant('SYS_fchown32',207)
SYS_fchownat = Constant('SYS_fchownat',291)
SYS_fcntl = Constant('SYS_fcntl',55)
SYS_fcntl64 = Constant('SYS_fcntl64',221)
SYS_fdatasync = Constant('SYS_fdatasync',148)
SYS_fgetxattr = Constant('SYS_fgetxattr',229)
SYS_flistxattr = Constant('SYS_flistxattr',232)
SYS_flock = Constant('SYS_flock',143)
SYS_fork = Constant('SYS_fork',2)
SYS_fremovexattr = Constant('SYS_fremovexattr',235)
SYS_fsetxattr = Constant('SYS_fsetxattr',226)
SYS_fstat = Constant('SYS_fstat',108)
SYS_fstat64 = Constant('SYS_fstat64',197)
SYS_fstatfs = Constant('SYS_fstatfs',100)
SYS_fstatfs64 = Constant('SYS_fstatfs64',266)
SYS_fsync = Constant('SYS_fsync',118)
SYS_ftruncate = Constant('SYS_ftruncate',93)
SYS_ftruncate64 = Constant('SYS_ftruncate64',194)
SYS_futex = Constant('SYS_futex',238)
SYS_futimesat = Constant('SYS_futimesat',292)
SYS_getcpu = Constant('SYS_getcpu',311)
SYS_getcwd = Constant('SYS_getcwd',183)
SYS_getdents = Constant('SYS_getdents',141)
SYS_getdents64 = Constant('SYS_getdents64',220)
SYS_getegid = Constant('SYS_getegid',50)
SYS_getegid32 = Constant('SYS_getegid32',202)
SYS_geteuid = Constant('SYS_geteuid',49)
SYS_geteuid32 = Constant('SYS_geteuid32',201)
SYS_getgid = Constant('SYS_getgid',47)
SYS_getgid32 = Constant('SYS_getgid32',200)
SYS_getgroups = Constant('SYS_getgroups',80)
SYS_getgroups32 = Constant('SYS_getgroups32',205)
SYS_getitimer = Constant('SYS_getitimer',105)
SYS_get_kernel_syms = Constant('SYS_get_kernel_syms',130)
SYS_getpgid = Constant('SYS_getpgid',132)
SYS_getpgrp = Constant('SYS_getpgrp',65)
SYS_getpid = Constant('SYS_getpid',20)
SYS_getpmsg = Constant('SYS_getpmsg',188)
SYS_getppid = Constant('SYS_getppid',64)
SYS_getpriority = Constant('SYS_getpriority',96)
SYS_getresgid = Constant('SYS_getresgid',171)
SYS_getresgid32 = Constant('SYS_getresgid32',211)
SYS_getresuid = Constant('SYS_getresuid',165)
SYS_getresuid32 = Constant('SYS_getresuid32',209)
SYS_getrlimit = Constant('SYS_getrlimit',76)
SYS_get_robust_list = Constant('SYS_get_robust_list',305)
SYS_getrusage = Constant('SYS_getrusage',77)
SYS_getsid = Constant('SYS_getsid',147)
SYS_gettid = Constant('SYS_gettid',236)
SYS_gettimeofday = Constant('SYS_gettimeofday',78)
SYS_getuid = Constant('SYS_getuid',24)
SYS_getuid32 = Constant('SYS_getuid32',199)
SYS_getxattr = Constant('SYS_getxattr',227)
SYS_idle = Constant('SYS_idle',112)
SYS_init_module = Constant('SYS_init_module',128)
SYS_inotify_add_watch = Constant('SYS_inotify_add_watch',285)
SYS_inotify_init = Constant('SYS_inotify_init',284)
SYS_inotify_rm_watch = Constant('SYS_inotify_rm_watch',286)
SYS_io_cancel = Constant('SYS_io_cancel',247)
SYS_ioctl = Constant('SYS_ioctl',54)
SYS_io_destroy = Constant('SYS_io_destroy',244)
SYS_io_getevents = Constant('SYS_io_getevents',245)
SYS_ioperm = Constant('SYS_ioperm',101)
SYS_ioprio_get = Constant('SYS_ioprio_get',283)
SYS_ioprio_set = Constant('SYS_ioprio_set',282)
SYS_io_setup = Constant('SYS_io_setup',243)
SYS_io_submit = Constant('SYS_io_submit',246)
SYS_ipc = Constant('SYS_ipc',117)
SYS_kexec_load = Constant('SYS_kexec_load',277)
SYS_keyctl = Constant('SYS_keyctl',280)
SYS_kill = Constant('SYS_kill',37)
SYS_lchown = Constant('SYS_lchown',16)
SYS_lchown32 = Constant('SYS_lchown32',198)
SYS_lgetxattr = Constant('SYS_lgetxattr',228)
SYS_link = Constant('SYS_link',9)
SYS_linkat = Constant('SYS_linkat',296)
SYS_listxattr = Constant('SYS_listxattr',230)
SYS_llistxattr = Constant('SYS_llistxattr',231)
SYS__llseek = Constant('SYS__llseek',140)
SYS_lookup_dcookie = Constant('SYS_lookup_dcookie',110)
SYS_lremovexattr = Constant('SYS_lremovexattr',234)
SYS_lseek = Constant('SYS_lseek',19)
SYS_lsetxattr = Constant('SYS_lsetxattr',225)
SYS_lstat = Constant('SYS_lstat',107)
SYS_lstat64 = Constant('SYS_lstat64',196)
SYS_madvise = Constant('SYS_madvise',219)
SYS_mincore = Constant('SYS_mincore',218)
SYS_mkdir = Constant('SYS_mkdir',39)
SYS_mkdirat = Constant('SYS_mkdirat',289)
SYS_mknod = Constant('SYS_mknod',14)
SYS_mknodat = Constant('SYS_mknodat',290)
SYS_mlock = Constant('SYS_mlock',150)
SYS_mlockall = Constant('SYS_mlockall',152)
SYS_mmap = Constant('SYS_mmap',90)
SYS_mmap2 = Constant('SYS_mmap2',192)
SYS_mount = Constant('SYS_mount',21)
SYS_mprotect = Constant('SYS_mprotect',125)
SYS_mq_getsetattr = Constant('SYS_mq_getsetattr',276)
SYS_mq_notify = Constant('SYS_mq_notify',275)
SYS_mq_open = Constant('SYS_mq_open',271)
SYS_mq_timedreceive = Constant('SYS_mq_timedreceive',274)
SYS_mq_timedsend = Constant('SYS_mq_timedsend',273)
SYS_mq_unlink = Constant('SYS_mq_unlink',272)
SYS_mremap = Constant('SYS_mremap',163)
SYS_msync = Constant('SYS_msync',144)
SYS_munlock = Constant('SYS_munlock',151)
SYS_munlockall = Constant('SYS_munlockall',153)
SYS_munmap = Constant('SYS_munmap',91)
SYS_nanosleep = Constant('SYS_nanosleep',162)
SYS__newselect = Constant('SYS__newselect',142)
SYS_nfsservctl = Constant('SYS_nfsservctl',169)
SYS_nice = Constant('SYS_nice',34)
SYS_open = Constant('SYS_open',5)
SYS_openat = Constant('SYS_openat',288)
SYS_pause = Constant('SYS_pause',29)
SYS_personality = Constant('SYS_personality',136)
SYS_pipe = Constant('SYS_pipe',42)
SYS_pivot_root = Constant('SYS_pivot_root',217)
SYS_poll = Constant('SYS_poll',168)
SYS_ppoll = Constant('SYS_ppoll',302)
SYS_prctl = Constant('SYS_prctl',172)
SYS_pread = Constant('SYS_pread',180)
SYS_pselect6 = Constant('SYS_pselect6',301)
SYS_ptrace = Constant('SYS_ptrace',26)
SYS_putpmsg = Constant('SYS_putpmsg',189)
SYS_pwrite = Constant('SYS_pwrite',181)
SYS_query_module = Constant('SYS_query_module',167)
SYS_quotactl = Constant('SYS_quotactl',131)
SYS_read = Constant('SYS_read',3)
SYS_readahead = Constant('SYS_readahead',222)
SYS_readdir = Constant('SYS_readdir',89)
SYS_readlink = Constant('SYS_readlink',85)
SYS_readlinkat = Constant('SYS_readlinkat',298)
SYS_readv = Constant('SYS_readv',145)
SYS_reboot = Constant('SYS_reboot',88)
SYS_remap_file_pages = Constant('SYS_remap_file_pages',267)
SYS_removexattr = Constant('SYS_removexattr',233)
SYS_rename = Constant('SYS_rename',38)
SYS_renameat = Constant('SYS_renameat',295)
SYS_request_key = Constant('SYS_request_key',279)
SYS_rmdir = Constant('SYS_rmdir',40)
SYS_rt_sigaction = Constant('SYS_rt_sigaction',174)
SYS_rt_sigpending = Constant('SYS_rt_sigpending',176)
SYS_rt_sigprocmask = Constant('SYS_rt_sigprocmask',175)
SYS_rt_sigqueueinfo = Constant('SYS_rt_sigqueueinfo',178)
SYS_rt_sigreturn = Constant('SYS_rt_sigreturn',173)
SYS_rt_sigsuspend = Constant('SYS_rt_sigsuspend',179)
SYS_rt_sigtimedwait = Constant('SYS_rt_sigtimedwait',177)
SYS_sched_getaffinity = Constant('SYS_sched_getaffinity',240)
SYS_sched_getparam = Constant('SYS_sched_getparam',155)
SYS_sched_get_priority_max = Constant('SYS_sched_get_priority_max',159)
SYS_sched_get_priority_min = Constant('SYS_sched_get_priority_min',160)
SYS_sched_getscheduler = Constant('SYS_sched_getscheduler',157)
SYS_sched_rr_get_interval = Constant('SYS_sched_rr_get_interval',161)
SYS_sched_setaffinity = Constant('SYS_sched_setaffinity',239)
SYS_sched_setparam = Constant('SYS_sched_setparam',154)
SYS_sched_setscheduler = Constant('SYS_sched_setscheduler',156)
SYS_sched_yield = Constant('SYS_sched_yield',158)
SYS_sendfile = Constant('SYS_sendfile',187)
SYS_sendfile64 = Constant('SYS_sendfile64',223)
SYS_setdomainname = Constant('SYS_setdomainname',121)
SYS_setfsgid = Constant('SYS_setfsgid',139)
SYS_setfsgid32 = Constant('SYS_setfsgid32',216)
SYS_setfsuid = Constant('SYS_setfsuid',138)
SYS_setfsuid32 = Constant('SYS_setfsuid32',215)
SYS_setgid = Constant('SYS_setgid',46)
SYS_setgid32 = Constant('SYS_setgid32',214)
SYS_setgroups = Constant('SYS_setgroups',81)
SYS_setgroups32 = Constant('SYS_setgroups32',206)
SYS_sethostname = Constant('SYS_sethostname',74)
SYS_setitimer = Constant('SYS_setitimer',104)
SYS_setpgid = Constant('SYS_setpgid',57)
SYS_setpriority = Constant('SYS_setpriority',97)
SYS_setregid = Constant('SYS_setregid',71)
SYS_setregid32 = Constant('SYS_setregid32',204)
SYS_setresgid = Constant('SYS_setresgid',170)
SYS_setresgid32 = Constant('SYS_setresgid32',210)
SYS_setresuid = Constant('SYS_setresuid',164)
SYS_setresuid32 = Constant('SYS_setresuid32',208)
SYS_setreuid = Constant('SYS_setreuid',70)
SYS_setreuid32 = Constant('SYS_setreuid32',203)
SYS_setrlimit = Constant('SYS_setrlimit',75)
SYS_set_robust_list = Constant('SYS_set_robust_list',304)
SYS_setsid = Constant('SYS_setsid',66)
SYS_set_tid_address = Constant('SYS_set_tid_address',252)
SYS_settimeofday = Constant('SYS_settimeofday',79)
SYS_setuid = Constant('SYS_setuid',23)
SYS_setuid32 = Constant('SYS_setuid32',213)
SYS_setxattr = Constant('SYS_setxattr',224)
SYS_sigaction = Constant('SYS_sigaction',67)
SYS_sigaltstack = Constant('SYS_sigaltstack',186)
SYS_signal = Constant('SYS_signal',48)
SYS_signalfd = Constant('SYS_signalfd',316)
SYS_sigpending = Constant('SYS_sigpending',73)
SYS_sigprocmask = Constant('SYS_sigprocmask',126)
SYS_sigreturn = Constant('SYS_sigreturn',119)
SYS_sigsuspend = Constant('SYS_sigsuspend',72)
SYS_socketcall = Constant('SYS_socketcall',102)
SYS_splice = Constant('SYS_splice',306)
SYS_stat = Constant('SYS_stat',106)
SYS_stat64 = Constant('SYS_stat64',195)
SYS_statfs = Constant('SYS_statfs',99)
SYS_statfs64 = Constant('SYS_statfs64',265)
SYS_stime = Constant('SYS_stime',25)
SYS_swapoff = Constant('SYS_swapoff',115)
SYS_swapon = Constant('SYS_swapon',87)
SYS_symlink = Constant('SYS_symlink',83)
SYS_symlinkat = Constant('SYS_symlinkat',297)
SYS_sync = Constant('SYS_sync',36)
SYS_sync_file_range = Constant('SYS_sync_file_range',307)
SYS__sysctl = Constant('SYS__sysctl',149)
SYS_sysfs = Constant('SYS_sysfs',135)
SYS_sysinfo = Constant('SYS_sysinfo',116)
SYS_syslog = Constant('SYS_syslog',103)
SYS_tee = Constant('SYS_tee',308)
SYS_tgkill = Constant('SYS_tgkill',241)
SYS_time = Constant('SYS_time',13)
SYS_timer_create = Constant('SYS_timer_create',254)
SYS_timer_delete = Constant('SYS_timer_delete',(254+4))
SYS_timerfd = Constant('SYS_timerfd',317)
SYS_timerfd_create = Constant('SYS_timerfd_create',319)
SYS_timerfd_gettime = Constant('SYS_timerfd_gettime',321)
SYS_timerfd_settime = Constant('SYS_timerfd_settime',320)
SYS_timer_getoverrun = Constant('SYS_timer_getoverrun',(254+3))
SYS_timer_gettime = Constant('SYS_timer_gettime',(254+2))
SYS_timer_settime = Constant('SYS_timer_settime',(254+1))
SYS_times = Constant('SYS_times',43)
SYS_tkill = Constant('SYS_tkill',237)
SYS_truncate = Constant('SYS_truncate',92)
SYS_truncate64 = Constant('SYS_truncate64',193)
SYS_ugetrlimit = Constant('SYS_ugetrlimit',191)
SYS_umask = Constant('SYS_umask',60)
SYS_umount = Constant('SYS_umount',22)
SYS_umount2 = Constant('SYS_umount2',52)
SYS_uname = Constant('SYS_uname',122)
SYS_unlink = Constant('SYS_unlink',10)
SYS_unlinkat = Constant('SYS_unlinkat',294)
SYS_unshare = Constant('SYS_unshare',303)
SYS_uselib = Constant('SYS_uselib',86)
SYS_ustat = Constant('SYS_ustat',62)
SYS_utime = Constant('SYS_utime',30)
SYS_utimensat = Constant('SYS_utimensat',315)
SYS_utimes = Constant('SYS_utimes',313)
SYS_vfork = Constant('SYS_vfork',190)
SYS_vhangup = Constant('SYS_vhangup',111)
SYS_vmsplice = Constant('SYS_vmsplice',309)
SYS_wait4 = Constant('SYS_wait4',114)
SYS_waitid = Constant('SYS_waitid',281)
SYS_write = Constant('SYS_write',4)
SYS_writev = Constant('SYS_writev',146)
| mit |
mollstam/UnrealPy | UnrealPyEmbed/Source/Python/Lib/python27/xdrlib.py | 54 | 6069 | """Implements (a subset of) Sun XDR -- eXternal Data Representation.
See: RFC 1014
"""
import struct
try:
from cStringIO import StringIO as _StringIO
except ImportError:
from StringIO import StringIO as _StringIO
from functools import wraps
__all__ = ["Error", "Packer", "Unpacker", "ConversionError"]
# exceptions
class Error(Exception):
"""Exception class for this module. Use:
except xdrlib.Error, var:
# var has the Error instance for the exception
Public ivars:
msg -- contains the message
"""
def __init__(self, msg):
self.msg = msg
def __repr__(self):
return repr(self.msg)
def __str__(self):
return str(self.msg)
class ConversionError(Error):
pass
def raise_conversion_error(function):
""" Wrap any raised struct.errors in a ConversionError. """
@wraps(function)
def result(self, value):
try:
return function(self, value)
except struct.error as e:
raise ConversionError(e.args[0])
return result
class Packer:
"""Pack various data representations into a buffer."""
def __init__(self):
self.reset()
def reset(self):
self.__buf = _StringIO()
def get_buffer(self):
return self.__buf.getvalue()
# backwards compatibility
get_buf = get_buffer
@raise_conversion_error
def pack_uint(self, x):
self.__buf.write(struct.pack('>L', x))
@raise_conversion_error
def pack_int(self, x):
self.__buf.write(struct.pack('>l', x))
pack_enum = pack_int
def pack_bool(self, x):
if x: self.__buf.write('\0\0\0\1')
else: self.__buf.write('\0\0\0\0')
def pack_uhyper(self, x):
try:
self.pack_uint(x>>32 & 0xffffffffL)
except (TypeError, struct.error) as e:
raise ConversionError(e.args[0])
try:
self.pack_uint(x & 0xffffffffL)
except (TypeError, struct.error) as e:
raise ConversionError(e.args[0])
pack_hyper = pack_uhyper
@raise_conversion_error
def pack_float(self, x):
self.__buf.write(struct.pack('>f', x))
@raise_conversion_error
def pack_double(self, x):
self.__buf.write(struct.pack('>d', x))
def pack_fstring(self, n, s):
if n < 0:
raise ValueError, 'fstring size must be nonnegative'
data = s[:n]
n = ((n+3)//4)*4
data = data + (n - len(data)) * '\0'
self.__buf.write(data)
pack_fopaque = pack_fstring
def pack_string(self, s):
n = len(s)
self.pack_uint(n)
self.pack_fstring(n, s)
pack_opaque = pack_string
pack_bytes = pack_string
def pack_list(self, list, pack_item):
for item in list:
self.pack_uint(1)
pack_item(item)
self.pack_uint(0)
def pack_farray(self, n, list, pack_item):
if len(list) != n:
raise ValueError, 'wrong array size'
for item in list:
pack_item(item)
def pack_array(self, list, pack_item):
n = len(list)
self.pack_uint(n)
self.pack_farray(n, list, pack_item)
class Unpacker:
"""Unpacks various data representations from the given buffer."""
def __init__(self, data):
self.reset(data)
def reset(self, data):
self.__buf = data
self.__pos = 0
def get_position(self):
return self.__pos
def set_position(self, position):
self.__pos = position
def get_buffer(self):
return self.__buf
def done(self):
if self.__pos < len(self.__buf):
raise Error('unextracted data remains')
def unpack_uint(self):
i = self.__pos
self.__pos = j = i+4
data = self.__buf[i:j]
if len(data) < 4:
raise EOFError
x = struct.unpack('>L', data)[0]
try:
return int(x)
except OverflowError:
return x
def unpack_int(self):
i = self.__pos
self.__pos = j = i+4
data = self.__buf[i:j]
if len(data) < 4:
raise EOFError
return struct.unpack('>l', data)[0]
unpack_enum = unpack_int
def unpack_bool(self):
return bool(self.unpack_int())
def unpack_uhyper(self):
hi = self.unpack_uint()
lo = self.unpack_uint()
return long(hi)<<32 | lo
def unpack_hyper(self):
x = self.unpack_uhyper()
if x >= 0x8000000000000000L:
x = x - 0x10000000000000000L
return x
def unpack_float(self):
i = self.__pos
self.__pos = j = i+4
data = self.__buf[i:j]
if len(data) < 4:
raise EOFError
return struct.unpack('>f', data)[0]
def unpack_double(self):
i = self.__pos
self.__pos = j = i+8
data = self.__buf[i:j]
if len(data) < 8:
raise EOFError
return struct.unpack('>d', data)[0]
def unpack_fstring(self, n):
if n < 0:
raise ValueError, 'fstring size must be nonnegative'
i = self.__pos
j = i + (n+3)//4*4
if j > len(self.__buf):
raise EOFError
self.__pos = j
return self.__buf[i:i+n]
unpack_fopaque = unpack_fstring
def unpack_string(self):
n = self.unpack_uint()
return self.unpack_fstring(n)
unpack_opaque = unpack_string
unpack_bytes = unpack_string
def unpack_list(self, unpack_item):
list = []
while 1:
x = self.unpack_uint()
if x == 0: break
if x != 1:
raise ConversionError, '0 or 1 expected, got %r' % (x,)
item = unpack_item()
list.append(item)
return list
def unpack_farray(self, n, unpack_item):
list = []
for i in range(n):
list.append(unpack_item())
return list
def unpack_array(self, unpack_item):
n = self.unpack_uint()
return self.unpack_farray(n, unpack_item)
| mit |
rockyzhang/zhangyanhit-python-for-android-mips | python-modules/twisted/twisted/conch/test/test_userauth.py | 59 | 39075 | # -*- test-case-name: twisted.conch.test.test_userauth -*-
# Copyright (c) 2007-2010 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for the implementation of the ssh-userauth service.
Maintainer: Paul Swartz
"""
from zope.interface import implements
from twisted.cred.checkers import ICredentialsChecker
from twisted.cred.credentials import IUsernamePassword, ISSHPrivateKey
from twisted.cred.credentials import IPluggableAuthenticationModules
from twisted.cred.credentials import IAnonymous
from twisted.cred.error import UnauthorizedLogin
from twisted.cred.portal import IRealm, Portal
from twisted.conch.error import ConchError, ValidPublicKey
from twisted.internet import defer, task
from twisted.protocols import loopback
from twisted.trial import unittest
try:
import Crypto.Cipher.DES3, Crypto.Cipher.XOR
import pyasn1
except ImportError:
keys = None
class transport:
class SSHTransportBase:
"""
A stub class so that later class definitions won't die.
"""
class userauth:
class SSHUserAuthClient:
"""
A stub class so that leter class definitions won't die.
"""
else:
from twisted.conch.ssh.common import NS
from twisted.conch.checkers import SSHProtocolChecker
from twisted.conch.ssh import keys, userauth, transport
from twisted.conch.test import keydata
class ClientUserAuth(userauth.SSHUserAuthClient):
"""
A mock user auth client.
"""
def getPublicKey(self):
"""
If this is the first time we've been called, return a blob for
the DSA key. Otherwise, return a blob
for the RSA key.
"""
if self.lastPublicKey:
return keys.Key.fromString(keydata.publicRSA_openssh)
else:
return defer.succeed(keys.Key.fromString(keydata.publicDSA_openssh))
def getPrivateKey(self):
"""
Return the private key object for the RSA key.
"""
return defer.succeed(keys.Key.fromString(keydata.privateRSA_openssh))
def getPassword(self, prompt=None):
"""
Return 'foo' as the password.
"""
return defer.succeed('foo')
def getGenericAnswers(self, name, information, answers):
"""
Return 'foo' as the answer to two questions.
"""
return defer.succeed(('foo', 'foo'))
class OldClientAuth(userauth.SSHUserAuthClient):
"""
The old SSHUserAuthClient returned a PyCrypto key object from
getPrivateKey() and a string from getPublicKey
"""
def getPrivateKey(self):
return defer.succeed(keys.Key.fromString(
keydata.privateRSA_openssh).keyObject)
def getPublicKey(self):
return keys.Key.fromString(keydata.publicRSA_openssh).blob()
class ClientAuthWithoutPrivateKey(userauth.SSHUserAuthClient):
"""
This client doesn't have a private key, but it does have a public key.
"""
def getPrivateKey(self):
return
def getPublicKey(self):
return keys.Key.fromString(keydata.publicRSA_openssh)
class FakeTransport(transport.SSHTransportBase):
"""
L{userauth.SSHUserAuthServer} expects an SSH transport which has a factory
attribute which has a portal attribute. Because the portal is important for
testing authentication, we need to be able to provide an interesting portal
object to the L{SSHUserAuthServer}.
In addition, we want to be able to capture any packets sent over the
transport.
@ivar packets: a list of 2-tuples: (messageType, data). Each 2-tuple is
a sent packet.
@type packets: C{list}
@param lostConnecion: True if loseConnection has been called on us.
@type lostConnection: C{bool}
"""
class Service(object):
"""
A mock service, representing the other service offered by the server.
"""
name = 'nancy'
def serviceStarted(self):
pass
class Factory(object):
"""
A mock factory, representing the factory that spawned this user auth
service.
"""
def getService(self, transport, service):
"""
Return our fake service.
"""
if service == 'none':
return FakeTransport.Service
def __init__(self, portal):
self.factory = self.Factory()
self.factory.portal = portal
self.lostConnection = False
self.transport = self
self.packets = []
def sendPacket(self, messageType, message):
"""
Record the packet sent by the service.
"""
self.packets.append((messageType, message))
def isEncrypted(self, direction):
"""
Pretend that this transport encrypts traffic in both directions. The
SSHUserAuthServer disables password authentication if the transport
isn't encrypted.
"""
return True
def loseConnection(self):
self.lostConnection = True
class Realm(object):
"""
A mock realm for testing L{userauth.SSHUserAuthServer}.
This realm is not actually used in the course of testing, so it returns the
simplest thing that could possibly work.
"""
implements(IRealm)
def requestAvatar(self, avatarId, mind, *interfaces):
return defer.succeed((interfaces[0], None, lambda: None))
class PasswordChecker(object):
"""
A very simple username/password checker which authenticates anyone whose
password matches their username and rejects all others.
"""
credentialInterfaces = (IUsernamePassword,)
implements(ICredentialsChecker)
def requestAvatarId(self, creds):
if creds.username == creds.password:
return defer.succeed(creds.username)
return defer.fail(UnauthorizedLogin("Invalid username/password pair"))
class PrivateKeyChecker(object):
"""
A very simple public key checker which authenticates anyone whose
public/private keypair is the same keydata.public/privateRSA_openssh.
"""
credentialInterfaces = (ISSHPrivateKey,)
implements(ICredentialsChecker)
def requestAvatarId(self, creds):
if creds.blob == keys.Key.fromString(keydata.publicRSA_openssh).blob():
if creds.signature is not None:
obj = keys.Key.fromString(creds.blob)
if obj.verify(creds.signature, creds.sigData):
return creds.username
else:
raise ValidPublicKey()
raise UnauthorizedLogin()
class PAMChecker(object):
"""
A simple PAM checker which asks the user for a password, verifying them
if the password is the same as their username.
"""
credentialInterfaces = (IPluggableAuthenticationModules,)
implements(ICredentialsChecker)
def requestAvatarId(self, creds):
d = creds.pamConversion([('Name: ', 2), ("Password: ", 1)])
def check(values):
if values == [(creds.username, 0), (creds.username, 0)]:
return creds.username
raise UnauthorizedLogin()
return d.addCallback(check)
class AnonymousChecker(object):
"""
A simple checker which isn't supported by L{SSHUserAuthServer}.
"""
credentialInterfaces = (IAnonymous,)
implements(ICredentialsChecker)
class SSHUserAuthServerTestCase(unittest.TestCase):
"""
Tests for SSHUserAuthServer.
"""
if keys is None:
skip = "cannot run w/o PyCrypto"
def setUp(self):
self.realm = Realm()
self.portal = Portal(self.realm)
self.portal.registerChecker(PasswordChecker())
self.portal.registerChecker(PrivateKeyChecker())
self.portal.registerChecker(PAMChecker())
self.authServer = userauth.SSHUserAuthServer()
self.authServer.transport = FakeTransport(self.portal)
self.authServer.serviceStarted()
self.authServer.supportedAuthentications.sort() # give a consistent
# order
def tearDown(self):
self.authServer.serviceStopped()
self.authServer = None
def _checkFailed(self, ignored):
"""
Check that the authentication has failed.
"""
self.assertEquals(self.authServer.transport.packets[-1],
(userauth.MSG_USERAUTH_FAILURE,
NS('keyboard-interactive,password,publickey') + '\x00'))
def test_noneAuthentication(self):
"""
A client may request a list of authentication 'method name' values
that may continue by using the "none" authentication 'method name'.
See RFC 4252 Section 5.2.
"""
d = self.authServer.ssh_USERAUTH_REQUEST(NS('foo') + NS('service') +
NS('none'))
return d.addCallback(self._checkFailed)
def test_successfulPasswordAuthentication(self):
"""
When provided with correct password authentication information, the
server should respond by sending a MSG_USERAUTH_SUCCESS message with
no other data.
See RFC 4252, Section 5.1.
"""
packet = NS('foo') + NS('none') + NS('password') + chr(0) + NS('foo')
d = self.authServer.ssh_USERAUTH_REQUEST(packet)
def check(ignored):
self.assertEqual(
self.authServer.transport.packets,
[(userauth.MSG_USERAUTH_SUCCESS, '')])
return d.addCallback(check)
def test_failedPasswordAuthentication(self):
"""
When provided with invalid authentication details, the server should
respond by sending a MSG_USERAUTH_FAILURE message which states whether
the authentication was partially successful, and provides other, open
options for authentication.
See RFC 4252, Section 5.1.
"""
# packet = username, next_service, authentication type, FALSE, password
packet = NS('foo') + NS('none') + NS('password') + chr(0) + NS('bar')
self.authServer.clock = task.Clock()
d = self.authServer.ssh_USERAUTH_REQUEST(packet)
self.assertEquals(self.authServer.transport.packets, [])
self.authServer.clock.advance(2)
return d.addCallback(self._checkFailed)
def test_successfulPrivateKeyAuthentication(self):
"""
Test that private key authentication completes sucessfully,
"""
blob = keys.Key.fromString(keydata.publicRSA_openssh).blob()
obj = keys.Key.fromString(keydata.privateRSA_openssh)
packet = (NS('foo') + NS('none') + NS('publickey') + '\xff'
+ NS(obj.sshType()) + NS(blob))
self.authServer.transport.sessionID = 'test'
signature = obj.sign(NS('test') + chr(userauth.MSG_USERAUTH_REQUEST)
+ packet)
packet += NS(signature)
d = self.authServer.ssh_USERAUTH_REQUEST(packet)
def check(ignored):
self.assertEquals(self.authServer.transport.packets,
[(userauth.MSG_USERAUTH_SUCCESS, '')])
return d.addCallback(check)
def test_requestRaisesConchError(self):
"""
ssh_USERAUTH_REQUEST should raise a ConchError if tryAuth returns
None. Added to catch a bug noticed by pyflakes.
"""
d = defer.Deferred()
def mockCbFinishedAuth(self, ignored):
self.fail('request should have raised ConochError')
def mockTryAuth(kind, user, data):
return None
def mockEbBadAuth(reason):
d.errback(reason.value)
self.patch(self.authServer, 'tryAuth', mockTryAuth)
self.patch(self.authServer, '_cbFinishedAuth', mockCbFinishedAuth)
self.patch(self.authServer, '_ebBadAuth', mockEbBadAuth)
packet = NS('user') + NS('none') + NS('public-key') + NS('data')
# If an error other than ConchError is raised, this will trigger an
# exception.
self.authServer.ssh_USERAUTH_REQUEST(packet)
return self.assertFailure(d, ConchError)
def test_verifyValidPrivateKey(self):
"""
Test that verifying a valid private key works.
"""
blob = keys.Key.fromString(keydata.publicRSA_openssh).blob()
packet = (NS('foo') + NS('none') + NS('publickey') + '\x00'
+ NS('ssh-rsa') + NS(blob))
d = self.authServer.ssh_USERAUTH_REQUEST(packet)
def check(ignored):
self.assertEquals(self.authServer.transport.packets,
[(userauth.MSG_USERAUTH_PK_OK, NS('ssh-rsa') + NS(blob))])
return d.addCallback(check)
def test_failedPrivateKeyAuthenticationWithoutSignature(self):
"""
Test that private key authentication fails when the public key
is invalid.
"""
blob = keys.Key.fromString(keydata.publicDSA_openssh).blob()
packet = (NS('foo') + NS('none') + NS('publickey') + '\x00'
+ NS('ssh-dsa') + NS(blob))
d = self.authServer.ssh_USERAUTH_REQUEST(packet)
return d.addCallback(self._checkFailed)
def test_failedPrivateKeyAuthenticationWithSignature(self):
"""
Test that private key authentication fails when the public key
is invalid.
"""
blob = keys.Key.fromString(keydata.publicRSA_openssh).blob()
obj = keys.Key.fromString(keydata.privateRSA_openssh)
packet = (NS('foo') + NS('none') + NS('publickey') + '\xff'
+ NS('ssh-rsa') + NS(blob) + NS(obj.sign(blob)))
self.authServer.transport.sessionID = 'test'
d = self.authServer.ssh_USERAUTH_REQUEST(packet)
return d.addCallback(self._checkFailed)
def test_successfulPAMAuthentication(self):
"""
Test that keyboard-interactive authentication succeeds.
"""
packet = (NS('foo') + NS('none') + NS('keyboard-interactive')
+ NS('') + NS(''))
response = '\x00\x00\x00\x02' + NS('foo') + NS('foo')
d = self.authServer.ssh_USERAUTH_REQUEST(packet)
self.authServer.ssh_USERAUTH_INFO_RESPONSE(response)
def check(ignored):
self.assertEquals(self.authServer.transport.packets,
[(userauth.MSG_USERAUTH_INFO_REQUEST, (NS('') + NS('')
+ NS('') + '\x00\x00\x00\x02' + NS('Name: ') + '\x01'
+ NS('Password: ') + '\x00')),
(userauth.MSG_USERAUTH_SUCCESS, '')])
return d.addCallback(check)
def test_failedPAMAuthentication(self):
"""
Test that keyboard-interactive authentication fails.
"""
packet = (NS('foo') + NS('none') + NS('keyboard-interactive')
+ NS('') + NS(''))
response = '\x00\x00\x00\x02' + NS('bar') + NS('bar')
d = self.authServer.ssh_USERAUTH_REQUEST(packet)
self.authServer.ssh_USERAUTH_INFO_RESPONSE(response)
def check(ignored):
self.assertEquals(self.authServer.transport.packets[0],
(userauth.MSG_USERAUTH_INFO_REQUEST, (NS('') + NS('')
+ NS('') + '\x00\x00\x00\x02' + NS('Name: ') + '\x01'
+ NS('Password: ') + '\x00')))
return d.addCallback(check).addCallback(self._checkFailed)
def test_invalid_USERAUTH_INFO_RESPONSE_not_enough_data(self):
"""
If ssh_USERAUTH_INFO_RESPONSE gets an invalid packet,
the user authentication should fail.
"""
packet = (NS('foo') + NS('none') + NS('keyboard-interactive')
+ NS('') + NS(''))
d = self.authServer.ssh_USERAUTH_REQUEST(packet)
self.authServer.ssh_USERAUTH_INFO_RESPONSE(NS('\x00\x00\x00\x00' +
NS('hi')))
return d.addCallback(self._checkFailed)
def test_invalid_USERAUTH_INFO_RESPONSE_too_much_data(self):
"""
If ssh_USERAUTH_INFO_RESPONSE gets too much data, the user
authentication should fail.
"""
packet = (NS('foo') + NS('none') + NS('keyboard-interactive')
+ NS('') + NS(''))
response = '\x00\x00\x00\x02' + NS('foo') + NS('foo') + NS('foo')
d = self.authServer.ssh_USERAUTH_REQUEST(packet)
self.authServer.ssh_USERAUTH_INFO_RESPONSE(response)
return d.addCallback(self._checkFailed)
def test_onlyOnePAMAuthentication(self):
"""
Because it requires an intermediate message, one can't send a second
keyboard-interactive request while the first is still pending.
"""
packet = (NS('foo') + NS('none') + NS('keyboard-interactive')
+ NS('') + NS(''))
self.authServer.ssh_USERAUTH_REQUEST(packet)
self.authServer.ssh_USERAUTH_REQUEST(packet)
self.assertEquals(self.authServer.transport.packets[-1][0],
transport.MSG_DISCONNECT)
self.assertEquals(self.authServer.transport.packets[-1][1][3],
chr(transport.DISCONNECT_PROTOCOL_ERROR))
def test_ignoreUnknownCredInterfaces(self):
"""
L{SSHUserAuthServer} sets up
C{SSHUserAuthServer.supportedAuthentications} by checking the portal's
credentials interfaces and mapping them to SSH authentication method
strings. If the Portal advertises an interface that
L{SSHUserAuthServer} can't map, it should be ignored. This is a white
box test.
"""
server = userauth.SSHUserAuthServer()
server.transport = FakeTransport(self.portal)
self.portal.registerChecker(AnonymousChecker())
server.serviceStarted()
server.serviceStopped()
server.supportedAuthentications.sort() # give a consistent order
self.assertEquals(server.supportedAuthentications,
['keyboard-interactive', 'password', 'publickey'])
def test_removePasswordIfUnencrypted(self):
"""
Test that the userauth service does not advertise password
authentication if the password would be send in cleartext.
"""
self.assertIn('password', self.authServer.supportedAuthentications)
# no encryption
clearAuthServer = userauth.SSHUserAuthServer()
clearAuthServer.transport = FakeTransport(self.portal)
clearAuthServer.transport.isEncrypted = lambda x: False
clearAuthServer.serviceStarted()
clearAuthServer.serviceStopped()
self.failIfIn('password', clearAuthServer.supportedAuthentications)
# only encrypt incoming (the direction the password is sent)
halfAuthServer = userauth.SSHUserAuthServer()
halfAuthServer.transport = FakeTransport(self.portal)
halfAuthServer.transport.isEncrypted = lambda x: x == 'in'
halfAuthServer.serviceStarted()
halfAuthServer.serviceStopped()
self.assertIn('password', halfAuthServer.supportedAuthentications)
def test_removeKeyboardInteractiveIfUnencrypted(self):
"""
Test that the userauth service does not advertise keyboard-interactive
authentication if the password would be send in cleartext.
"""
self.assertIn('keyboard-interactive',
self.authServer.supportedAuthentications)
# no encryption
clearAuthServer = userauth.SSHUserAuthServer()
clearAuthServer.transport = FakeTransport(self.portal)
clearAuthServer.transport.isEncrypted = lambda x: False
clearAuthServer.serviceStarted()
clearAuthServer.serviceStopped()
self.failIfIn('keyboard-interactive',
clearAuthServer.supportedAuthentications)
# only encrypt incoming (the direction the password is sent)
halfAuthServer = userauth.SSHUserAuthServer()
halfAuthServer.transport = FakeTransport(self.portal)
halfAuthServer.transport.isEncrypted = lambda x: x == 'in'
halfAuthServer.serviceStarted()
halfAuthServer.serviceStopped()
self.assertIn('keyboard-interactive',
halfAuthServer.supportedAuthentications)
def test_unencryptedConnectionWithoutPasswords(self):
"""
If the L{SSHUserAuthServer} is not advertising passwords, then an
unencrypted connection should not cause any warnings or exceptions.
This is a white box test.
"""
# create a Portal without password authentication
portal = Portal(self.realm)
portal.registerChecker(PrivateKeyChecker())
# no encryption
clearAuthServer = userauth.SSHUserAuthServer()
clearAuthServer.transport = FakeTransport(portal)
clearAuthServer.transport.isEncrypted = lambda x: False
clearAuthServer.serviceStarted()
clearAuthServer.serviceStopped()
self.assertEquals(clearAuthServer.supportedAuthentications,
['publickey'])
# only encrypt incoming (the direction the password is sent)
halfAuthServer = userauth.SSHUserAuthServer()
halfAuthServer.transport = FakeTransport(portal)
halfAuthServer.transport.isEncrypted = lambda x: x == 'in'
halfAuthServer.serviceStarted()
halfAuthServer.serviceStopped()
self.assertEquals(clearAuthServer.supportedAuthentications,
['publickey'])
def test_loginTimeout(self):
"""
Test that the login times out.
"""
timeoutAuthServer = userauth.SSHUserAuthServer()
timeoutAuthServer.clock = task.Clock()
timeoutAuthServer.transport = FakeTransport(self.portal)
timeoutAuthServer.serviceStarted()
timeoutAuthServer.clock.advance(11 * 60 * 60)
timeoutAuthServer.serviceStopped()
self.assertEquals(timeoutAuthServer.transport.packets,
[(transport.MSG_DISCONNECT,
'\x00' * 3 +
chr(transport.DISCONNECT_NO_MORE_AUTH_METHODS_AVAILABLE) +
NS("you took too long") + NS(''))])
self.assertTrue(timeoutAuthServer.transport.lostConnection)
def test_cancelLoginTimeout(self):
"""
Test that stopping the service also stops the login timeout.
"""
timeoutAuthServer = userauth.SSHUserAuthServer()
timeoutAuthServer.clock = task.Clock()
timeoutAuthServer.transport = FakeTransport(self.portal)
timeoutAuthServer.serviceStarted()
timeoutAuthServer.serviceStopped()
timeoutAuthServer.clock.advance(11 * 60 * 60)
self.assertEquals(timeoutAuthServer.transport.packets, [])
self.assertFalse(timeoutAuthServer.transport.lostConnection)
def test_tooManyAttempts(self):
"""
Test that the server disconnects if the client fails authentication
too many times.
"""
packet = NS('foo') + NS('none') + NS('password') + chr(0) + NS('bar')
self.authServer.clock = task.Clock()
for i in range(21):
d = self.authServer.ssh_USERAUTH_REQUEST(packet)
self.authServer.clock.advance(2)
def check(ignored):
self.assertEquals(self.authServer.transport.packets[-1],
(transport.MSG_DISCONNECT,
'\x00' * 3 +
chr(transport.DISCONNECT_NO_MORE_AUTH_METHODS_AVAILABLE) +
NS("too many bad auths") + NS('')))
return d.addCallback(check)
def test_failIfUnknownService(self):
"""
If the user requests a service that we don't support, the
authentication should fail.
"""
packet = NS('foo') + NS('') + NS('password') + chr(0) + NS('foo')
self.authServer.clock = task.Clock()
d = self.authServer.ssh_USERAUTH_REQUEST(packet)
return d.addCallback(self._checkFailed)
def test__pamConvErrors(self):
"""
_pamConv should fail if it gets a message that's not 1 or 2.
"""
def secondTest(ignored):
d2 = self.authServer._pamConv([('', 90)])
return self.assertFailure(d2, ConchError)
d = self.authServer._pamConv([('', 3)])
return self.assertFailure(d, ConchError).addCallback(secondTest)
def test_tryAuthEdgeCases(self):
"""
tryAuth() has two edge cases that are difficult to reach.
1) an authentication method auth_* returns None instead of a Deferred.
2) an authentication type that is defined does not have a matching
auth_* method.
Both these cases should return a Deferred which fails with a
ConchError.
"""
def mockAuth(packet):
return None
self.patch(self.authServer, 'auth_publickey', mockAuth) # first case
self.patch(self.authServer, 'auth_password', None) # second case
def secondTest(ignored):
d2 = self.authServer.tryAuth('password', None, None)
return self.assertFailure(d2, ConchError)
d1 = self.authServer.tryAuth('publickey', None, None)
return self.assertFailure(d1, ConchError).addCallback(secondTest)
class SSHUserAuthClientTestCase(unittest.TestCase):
"""
Tests for SSHUserAuthClient.
"""
if keys is None:
skip = "cannot run w/o PyCrypto"
def setUp(self):
self.authClient = ClientUserAuth('foo', FakeTransport.Service())
self.authClient.transport = FakeTransport(None)
self.authClient.transport.sessionID = 'test'
self.authClient.serviceStarted()
def tearDown(self):
self.authClient.serviceStopped()
self.authClient = None
def test_init(self):
"""
Test that client is initialized properly.
"""
self.assertEquals(self.authClient.user, 'foo')
self.assertEquals(self.authClient.instance.name, 'nancy')
self.assertEquals(self.authClient.transport.packets,
[(userauth.MSG_USERAUTH_REQUEST, NS('foo') + NS('nancy')
+ NS('none'))])
def test_USERAUTH_SUCCESS(self):
"""
Test that the client succeeds properly.
"""
instance = [None]
def stubSetService(service):
instance[0] = service
self.authClient.transport.setService = stubSetService
self.authClient.ssh_USERAUTH_SUCCESS('')
self.assertEquals(instance[0], self.authClient.instance)
def test_publickey(self):
"""
Test that the client can authenticate with a public key.
"""
self.authClient.ssh_USERAUTH_FAILURE(NS('publickey') + '\x00')
self.assertEquals(self.authClient.transport.packets[-1],
(userauth.MSG_USERAUTH_REQUEST, NS('foo') + NS('nancy')
+ NS('publickey') + '\x00' + NS('ssh-dss')
+ NS(keys.Key.fromString(
keydata.publicDSA_openssh).blob())))
# that key isn't good
self.authClient.ssh_USERAUTH_FAILURE(NS('publickey') + '\x00')
blob = NS(keys.Key.fromString(keydata.publicRSA_openssh).blob())
self.assertEquals(self.authClient.transport.packets[-1],
(userauth.MSG_USERAUTH_REQUEST, (NS('foo') + NS('nancy')
+ NS('publickey') + '\x00'+ NS('ssh-rsa') + blob)))
self.authClient.ssh_USERAUTH_PK_OK(NS('ssh-rsa')
+ NS(keys.Key.fromString(keydata.publicRSA_openssh).blob()))
sigData = (NS(self.authClient.transport.sessionID)
+ chr(userauth.MSG_USERAUTH_REQUEST) + NS('foo')
+ NS('nancy') + NS('publickey') + '\xff' + NS('ssh-rsa')
+ blob)
obj = keys.Key.fromString(keydata.privateRSA_openssh)
self.assertEquals(self.authClient.transport.packets[-1],
(userauth.MSG_USERAUTH_REQUEST, NS('foo') + NS('nancy')
+ NS('publickey') + '\xff' + NS('ssh-rsa') + blob
+ NS(obj.sign(sigData))))
def test_publickey_without_privatekey(self):
"""
If the SSHUserAuthClient doesn't return anything from signData,
the client should start the authentication over again by requesting
'none' authentication.
"""
authClient = ClientAuthWithoutPrivateKey('foo',
FakeTransport.Service())
authClient.transport = FakeTransport(None)
authClient.transport.sessionID = 'test'
authClient.serviceStarted()
authClient.tryAuth('publickey')
authClient.transport.packets = []
self.assertIdentical(authClient.ssh_USERAUTH_PK_OK(''), None)
self.assertEquals(authClient.transport.packets, [
(userauth.MSG_USERAUTH_REQUEST, NS('foo') + NS('nancy') +
NS('none'))])
def test_old_publickey_getPublicKey(self):
"""
Old SSHUserAuthClients returned strings of public key blobs from
getPublicKey(). Test that a Deprecation warning is raised but the key is
verified correctly.
"""
oldAuth = OldClientAuth('foo', FakeTransport.Service())
oldAuth.transport = FakeTransport(None)
oldAuth.transport.sessionID = 'test'
oldAuth.serviceStarted()
oldAuth.transport.packets = []
self.assertWarns(DeprecationWarning, "Returning a string from "
"SSHUserAuthClient.getPublicKey() is deprecated since "
"Twisted 9.0. Return a keys.Key() instead.",
userauth.__file__, oldAuth.tryAuth, 'publickey')
self.assertEquals(oldAuth.transport.packets, [
(userauth.MSG_USERAUTH_REQUEST, NS('foo') + NS('nancy') +
NS('publickey') + '\x00' + NS('ssh-rsa') +
NS(keys.Key.fromString(keydata.publicRSA_openssh).blob()))])
def test_old_publickey_getPrivateKey(self):
"""
Old SSHUserAuthClients returned a PyCrypto key object from
getPrivateKey(). Test that _cbSignData signs the data warns the
user about the deprecation, but signs the data correctly.
"""
oldAuth = OldClientAuth('foo', FakeTransport.Service())
d = self.assertWarns(DeprecationWarning, "Returning a PyCrypto key "
"object from SSHUserAuthClient.getPrivateKey() is "
"deprecated since Twisted 9.0. "
"Return a keys.Key() instead.", userauth.__file__,
oldAuth.signData, None, 'data')
def _checkSignedData(sig):
self.assertEquals(sig,
keys.Key.fromString(keydata.privateRSA_openssh).sign(
'data'))
d.addCallback(_checkSignedData)
return d
def test_no_publickey(self):
"""
If there's no public key, auth_publickey should return a Deferred
called back with a False value.
"""
self.authClient.getPublicKey = lambda x: None
d = self.authClient.tryAuth('publickey')
def check(result):
self.assertFalse(result)
return d.addCallback(check)
def test_password(self):
"""
Test that the client can authentication with a password. This
includes changing the password.
"""
self.authClient.ssh_USERAUTH_FAILURE(NS('password') + '\x00')
self.assertEquals(self.authClient.transport.packets[-1],
(userauth.MSG_USERAUTH_REQUEST, NS('foo') + NS('nancy')
+ NS('password') + '\x00' + NS('foo')))
self.authClient.ssh_USERAUTH_PK_OK(NS('') + NS(''))
self.assertEquals(self.authClient.transport.packets[-1],
(userauth.MSG_USERAUTH_REQUEST, NS('foo') + NS('nancy')
+ NS('password') + '\xff' + NS('foo') * 2))
def test_no_password(self):
"""
If getPassword returns None, tryAuth should return False.
"""
self.authClient.getPassword = lambda: None
self.assertFalse(self.authClient.tryAuth('password'))
def test_keyboardInteractive(self):
"""
Test that the client can authenticate using keyboard-interactive
authentication.
"""
self.authClient.ssh_USERAUTH_FAILURE(NS('keyboard-interactive')
+ '\x00')
self.assertEquals(self.authClient.transport.packets[-1],
(userauth.MSG_USERAUTH_REQUEST, NS('foo') + NS('nancy')
+ NS('keyboard-interactive') + NS('')*2))
self.authClient.ssh_USERAUTH_PK_OK(NS('')*3 + '\x00\x00\x00\x02'
+ NS('Name: ') + '\xff' + NS('Password: ') + '\x00')
self.assertEquals(self.authClient.transport.packets[-1],
(userauth.MSG_USERAUTH_INFO_RESPONSE, '\x00\x00\x00\x02'
+ NS('foo')*2))
def test_USERAUTH_PK_OK_unknown_method(self):
"""
If C{SSHUserAuthClient} gets a MSG_USERAUTH_PK_OK packet when it's not
expecting it, it should fail the current authentication and move on to
the next type.
"""
self.authClient.lastAuth = 'unknown'
self.authClient.transport.packets = []
self.authClient.ssh_USERAUTH_PK_OK('')
self.assertEquals(self.authClient.transport.packets,
[(userauth.MSG_USERAUTH_REQUEST, NS('foo') +
NS('nancy') + NS('none'))])
def test_USERAUTH_FAILURE_sorting(self):
"""
ssh_USERAUTH_FAILURE should sort the methods by their position
in SSHUserAuthClient.preferredOrder. Methods that are not in
preferredOrder should be sorted at the end of that list.
"""
def auth_firstmethod():
self.authClient.transport.sendPacket(255, 'here is data')
def auth_anothermethod():
self.authClient.transport.sendPacket(254, 'other data')
return True
self.authClient.auth_firstmethod = auth_firstmethod
self.authClient.auth_anothermethod = auth_anothermethod
# although they shouldn't get called, method callbacks auth_* MUST
# exist in order for the test to work properly.
self.authClient.ssh_USERAUTH_FAILURE(NS('anothermethod,password') +
'\x00')
# should send password packet
self.assertEquals(self.authClient.transport.packets[-1],
(userauth.MSG_USERAUTH_REQUEST, NS('foo') + NS('nancy')
+ NS('password') + '\x00' + NS('foo')))
self.authClient.ssh_USERAUTH_FAILURE(
NS('firstmethod,anothermethod,password') + '\xff')
self.assertEquals(self.authClient.transport.packets[-2:],
[(255, 'here is data'), (254, 'other data')])
def test_disconnectIfNoMoreAuthentication(self):
"""
If there are no more available user authentication messages,
the SSHUserAuthClient should disconnect with code
DISCONNECT_NO_MORE_AUTH_METHODS_AVAILABLE.
"""
self.authClient.ssh_USERAUTH_FAILURE(NS('password') + '\x00')
self.authClient.ssh_USERAUTH_FAILURE(NS('password') + '\xff')
self.assertEquals(self.authClient.transport.packets[-1],
(transport.MSG_DISCONNECT, '\x00\x00\x00\x0e' +
NS('no more authentication methods available') +
'\x00\x00\x00\x00'))
def test_ebAuth(self):
"""
_ebAuth (the generic authentication error handler) should send
a request for the 'none' authentication method.
"""
self.authClient.transport.packets = []
self.authClient._ebAuth(None)
self.assertEquals(self.authClient.transport.packets,
[(userauth.MSG_USERAUTH_REQUEST, NS('foo') + NS('nancy')
+ NS('none'))])
def test_defaults(self):
"""
getPublicKey() should return None. getPrivateKey() should return a
failed Deferred. getPassword() should return a failed Deferred.
getGenericAnswers() should return a failed Deferred.
"""
authClient = userauth.SSHUserAuthClient('foo', FakeTransport.Service())
self.assertIdentical(authClient.getPublicKey(), None)
def check(result):
result.trap(NotImplementedError)
d = authClient.getPassword()
return d.addCallback(self.fail).addErrback(check2)
def check2(result):
result.trap(NotImplementedError)
d = authClient.getGenericAnswers(None, None, None)
return d.addCallback(self.fail).addErrback(check3)
def check3(result):
result.trap(NotImplementedError)
d = authClient.getPrivateKey()
return d.addCallback(self.fail).addErrback(check)
class LoopbackTestCase(unittest.TestCase):
if keys is None:
skip = "cannot run w/o PyCrypto or PyASN1"
class Factory:
class Service:
name = 'TestService'
def serviceStarted(self):
self.transport.loseConnection()
def serviceStopped(self):
pass
def getService(self, avatar, name):
return self.Service
def test_loopback(self):
"""
Test that the userauth server and client play nicely with each other.
"""
server = userauth.SSHUserAuthServer()
client = ClientUserAuth('foo', self.Factory.Service())
# set up transports
server.transport = transport.SSHTransportBase()
server.transport.service = server
server.transport.isEncrypted = lambda x: True
client.transport = transport.SSHTransportBase()
client.transport.service = client
server.transport.sessionID = client.transport.sessionID = ''
# don't send key exchange packet
server.transport.sendKexInit = client.transport.sendKexInit = \
lambda: None
# set up server authentication
server.transport.factory = self.Factory()
server.passwordDelay = 0 # remove bad password delay
realm = Realm()
portal = Portal(realm)
checker = SSHProtocolChecker()
checker.registerChecker(PasswordChecker())
checker.registerChecker(PrivateKeyChecker())
checker.registerChecker(PAMChecker())
checker.areDone = lambda aId: (
len(checker.successfulCredentials[aId]) == 3)
portal.registerChecker(checker)
server.transport.factory.portal = portal
d = loopback.loopbackAsync(server.transport, client.transport)
server.transport.transport.logPrefix = lambda: '_ServerLoopback'
client.transport.transport.logPrefix = lambda: '_ClientLoopback'
server.serviceStarted()
client.serviceStarted()
def check(ignored):
self.assertEquals(server.transport.service.name, 'TestService')
return d.addCallback(check)
| apache-2.0 |
Perferom/android_external_chromium | chrome/common/extensions/docs/examples/apps/hello-python/oauth2/__init__.py | 257 | 25629 | """
The MIT License
Copyright (c) 2007-2010 Leah Culver, Joe Stump, Mark Paschal, Vic Fryzel
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import urllib
import time
import random
import urlparse
import hmac
import binascii
import httplib2
try:
from urlparse import parse_qs, parse_qsl
except ImportError:
from cgi import parse_qs, parse_qsl
VERSION = '1.0' # Hi Blaine!
HTTP_METHOD = 'GET'
SIGNATURE_METHOD = 'PLAINTEXT'
class Error(RuntimeError):
"""Generic exception class."""
def __init__(self, message='OAuth error occurred.'):
self._message = message
@property
def message(self):
"""A hack to get around the deprecation errors in 2.6."""
return self._message
def __str__(self):
return self._message
class MissingSignature(Error):
pass
def build_authenticate_header(realm=''):
"""Optional WWW-Authenticate header (401 error)"""
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
def build_xoauth_string(url, consumer, token=None):
"""Build an XOAUTH string for use in SMTP/IMPA authentication."""
request = Request.from_consumer_and_token(consumer, token,
"GET", url)
signing_method = SignatureMethod_HMAC_SHA1()
request.sign_request(signing_method, consumer, token)
params = []
for k, v in sorted(request.iteritems()):
if v is not None:
params.append('%s="%s"' % (k, escape(v)))
return "%s %s %s" % ("GET", url, ','.join(params))
def escape(s):
"""Escape a URL including any /."""
return urllib.quote(s, safe='~')
def generate_timestamp():
"""Get seconds since epoch (UTC)."""
return int(time.time())
def generate_nonce(length=8):
"""Generate pseudorandom number."""
return ''.join([str(random.randint(0, 9)) for i in range(length)])
def generate_verifier(length=8):
"""Generate pseudorandom number."""
return ''.join([str(random.randint(0, 9)) for i in range(length)])
class Consumer(object):
"""A consumer of OAuth-protected services.
The OAuth consumer is a "third-party" service that wants to access
protected resources from an OAuth service provider on behalf of an end
user. It's kind of the OAuth client.
Usually a consumer must be registered with the service provider by the
developer of the consumer software. As part of that process, the service
provider gives the consumer a *key* and a *secret* with which the consumer
software can identify itself to the service. The consumer will include its
key in each request to identify itself, but will use its secret only when
signing requests, to prove that the request is from that particular
registered consumer.
Once registered, the consumer can then use its consumer credentials to ask
the service provider for a request token, kicking off the OAuth
authorization process.
"""
key = None
secret = None
def __init__(self, key, secret):
self.key = key
self.secret = secret
if self.key is None or self.secret is None:
raise ValueError("Key and secret must be set.")
def __str__(self):
data = {'oauth_consumer_key': self.key,
'oauth_consumer_secret': self.secret}
return urllib.urlencode(data)
class Token(object):
"""An OAuth credential used to request authorization or a protected
resource.
Tokens in OAuth comprise a *key* and a *secret*. The key is included in
requests to identify the token being used, but the secret is used only in
the signature, to prove that the requester is who the server gave the
token to.
When first negotiating the authorization, the consumer asks for a *request
token* that the live user authorizes with the service provider. The
consumer then exchanges the request token for an *access token* that can
be used to access protected resources.
"""
key = None
secret = None
callback = None
callback_confirmed = None
verifier = None
def __init__(self, key, secret):
self.key = key
self.secret = secret
if self.key is None or self.secret is None:
raise ValueError("Key and secret must be set.")
def set_callback(self, callback):
self.callback = callback
self.callback_confirmed = 'true'
def set_verifier(self, verifier=None):
if verifier is not None:
self.verifier = verifier
else:
self.verifier = generate_verifier()
def get_callback_url(self):
if self.callback and self.verifier:
# Append the oauth_verifier.
parts = urlparse.urlparse(self.callback)
scheme, netloc, path, params, query, fragment = parts[:6]
if query:
query = '%s&oauth_verifier=%s' % (query, self.verifier)
else:
query = 'oauth_verifier=%s' % self.verifier
return urlparse.urlunparse((scheme, netloc, path, params,
query, fragment))
return self.callback
def to_string(self):
"""Returns this token as a plain string, suitable for storage.
The resulting string includes the token's secret, so you should never
send or store this string where a third party can read it.
"""
data = {
'oauth_token': self.key,
'oauth_token_secret': self.secret,
}
if self.callback_confirmed is not None:
data['oauth_callback_confirmed'] = self.callback_confirmed
return urllib.urlencode(data)
@staticmethod
def from_string(s):
"""Deserializes a token from a string like one returned by
`to_string()`."""
if not len(s):
raise ValueError("Invalid parameter string.")
params = parse_qs(s, keep_blank_values=False)
if not len(params):
raise ValueError("Invalid parameter string.")
try:
key = params['oauth_token'][0]
except Exception:
raise ValueError("'oauth_token' not found in OAuth request.")
try:
secret = params['oauth_token_secret'][0]
except Exception:
raise ValueError("'oauth_token_secret' not found in "
"OAuth request.")
token = Token(key, secret)
try:
token.callback_confirmed = params['oauth_callback_confirmed'][0]
except KeyError:
pass # 1.0, no callback confirmed.
return token
def __str__(self):
return self.to_string()
def setter(attr):
name = attr.__name__
def getter(self):
try:
return self.__dict__[name]
except KeyError:
raise AttributeError(name)
def deleter(self):
del self.__dict__[name]
return property(getter, attr, deleter)
class Request(dict):
"""The parameters and information for an HTTP request, suitable for
authorizing with OAuth credentials.
When a consumer wants to access a service's protected resources, it does
so using a signed HTTP request identifying itself (the consumer) with its
key, and providing an access token authorized by the end user to access
those resources.
"""
version = VERSION
def __init__(self, method=HTTP_METHOD, url=None, parameters=None):
self.method = method
self.url = url
if parameters is not None:
self.update(parameters)
@setter
def url(self, value):
self.__dict__['url'] = value
if value is not None:
scheme, netloc, path, params, query, fragment = urlparse.urlparse(value)
# Exclude default port numbers.
if scheme == 'http' and netloc[-3:] == ':80':
netloc = netloc[:-3]
elif scheme == 'https' and netloc[-4:] == ':443':
netloc = netloc[:-4]
if scheme not in ('http', 'https'):
raise ValueError("Unsupported URL %s (%s)." % (value, scheme))
# Normalized URL excludes params, query, and fragment.
self.normalized_url = urlparse.urlunparse((scheme, netloc, path, None, None, None))
else:
self.normalized_url = None
self.__dict__['url'] = None
@setter
def method(self, value):
self.__dict__['method'] = value.upper()
def _get_timestamp_nonce(self):
return self['oauth_timestamp'], self['oauth_nonce']
def get_nonoauth_parameters(self):
"""Get any non-OAuth parameters."""
return dict([(k, v) for k, v in self.iteritems()
if not k.startswith('oauth_')])
def to_header(self, realm=''):
"""Serialize as a header for an HTTPAuth request."""
oauth_params = ((k, v) for k, v in self.items()
if k.startswith('oauth_'))
stringy_params = ((k, escape(str(v))) for k, v in oauth_params)
header_params = ('%s="%s"' % (k, v) for k, v in stringy_params)
params_header = ', '.join(header_params)
auth_header = 'OAuth realm="%s"' % realm
if params_header:
auth_header = "%s, %s" % (auth_header, params_header)
return {'Authorization': auth_header}
def to_postdata(self):
"""Serialize as post data for a POST request."""
# tell urlencode to deal with sequence values and map them correctly
# to resulting querystring. for example self["k"] = ["v1", "v2"] will
# result in 'k=v1&k=v2' and not k=%5B%27v1%27%2C+%27v2%27%5D
return urllib.urlencode(self, True).replace('+', '%20')
def to_url(self):
"""Serialize as a URL for a GET request."""
base_url = urlparse.urlparse(self.url)
try:
query = base_url.query
except AttributeError:
# must be python <2.5
query = base_url[4]
query = parse_qs(query)
for k, v in self.items():
query.setdefault(k, []).append(v)
try:
scheme = base_url.scheme
netloc = base_url.netloc
path = base_url.path
params = base_url.params
fragment = base_url.fragment
except AttributeError:
# must be python <2.5
scheme = base_url[0]
netloc = base_url[1]
path = base_url[2]
params = base_url[3]
fragment = base_url[5]
url = (scheme, netloc, path, params,
urllib.urlencode(query, True), fragment)
return urlparse.urlunparse(url)
def get_parameter(self, parameter):
ret = self.get(parameter)
if ret is None:
raise Error('Parameter not found: %s' % parameter)
return ret
def get_normalized_parameters(self):
"""Return a string that contains the parameters that must be signed."""
items = []
for key, value in self.iteritems():
if key == 'oauth_signature':
continue
# 1.0a/9.1.1 states that kvp must be sorted by key, then by value,
# so we unpack sequence values into multiple items for sorting.
if hasattr(value, '__iter__'):
items.extend((key, item) for item in value)
else:
items.append((key, value))
# Include any query string parameters from the provided URL
query = urlparse.urlparse(self.url)[4]
url_items = self._split_url_string(query).items()
non_oauth_url_items = list([(k, v) for k, v in url_items if not k.startswith('oauth_')])
items.extend(non_oauth_url_items)
encoded_str = urllib.urlencode(sorted(items))
# Encode signature parameters per Oauth Core 1.0 protocol
# spec draft 7, section 3.6
# (http://tools.ietf.org/html/draft-hammer-oauth-07#section-3.6)
# Spaces must be encoded with "%20" instead of "+"
return encoded_str.replace('+', '%20').replace('%7E', '~')
def sign_request(self, signature_method, consumer, token):
"""Set the signature parameter to the result of sign."""
if 'oauth_consumer_key' not in self:
self['oauth_consumer_key'] = consumer.key
if token and 'oauth_token' not in self:
self['oauth_token'] = token.key
self['oauth_signature_method'] = signature_method.name
self['oauth_signature'] = signature_method.sign(self, consumer, token)
@classmethod
def make_timestamp(cls):
"""Get seconds since epoch (UTC)."""
return str(int(time.time()))
@classmethod
def make_nonce(cls):
"""Generate pseudorandom number."""
return str(random.randint(0, 100000000))
@classmethod
def from_request(cls, http_method, http_url, headers=None, parameters=None,
query_string=None):
"""Combines multiple parameter sources."""
if parameters is None:
parameters = {}
# Headers
if headers and 'Authorization' in headers:
auth_header = headers['Authorization']
# Check that the authorization header is OAuth.
if auth_header[:6] == 'OAuth ':
auth_header = auth_header[6:]
try:
# Get the parameters from the header.
header_params = cls._split_header(auth_header)
parameters.update(header_params)
except:
raise Error('Unable to parse OAuth parameters from '
'Authorization header.')
# GET or POST query string.
if query_string:
query_params = cls._split_url_string(query_string)
parameters.update(query_params)
# URL parameters.
param_str = urlparse.urlparse(http_url)[4] # query
url_params = cls._split_url_string(param_str)
parameters.update(url_params)
if parameters:
return cls(http_method, http_url, parameters)
return None
@classmethod
def from_consumer_and_token(cls, consumer, token=None,
http_method=HTTP_METHOD, http_url=None, parameters=None):
if not parameters:
parameters = {}
defaults = {
'oauth_consumer_key': consumer.key,
'oauth_timestamp': cls.make_timestamp(),
'oauth_nonce': cls.make_nonce(),
'oauth_version': cls.version,
}
defaults.update(parameters)
parameters = defaults
if token:
parameters['oauth_token'] = token.key
if token.verifier:
parameters['oauth_verifier'] = token.verifier
return Request(http_method, http_url, parameters)
@classmethod
def from_token_and_callback(cls, token, callback=None,
http_method=HTTP_METHOD, http_url=None, parameters=None):
if not parameters:
parameters = {}
parameters['oauth_token'] = token.key
if callback:
parameters['oauth_callback'] = callback
return cls(http_method, http_url, parameters)
@staticmethod
def _split_header(header):
"""Turn Authorization: header into parameters."""
params = {}
parts = header.split(',')
for param in parts:
# Ignore realm parameter.
if param.find('realm') > -1:
continue
# Remove whitespace.
param = param.strip()
# Split key-value.
param_parts = param.split('=', 1)
# Remove quotes and unescape the value.
params[param_parts[0]] = urllib.unquote(param_parts[1].strip('\"'))
return params
@staticmethod
def _split_url_string(param_str):
"""Turn URL string into parameters."""
parameters = parse_qs(param_str, keep_blank_values=False)
for k, v in parameters.iteritems():
parameters[k] = urllib.unquote(v[0])
return parameters
class Client(httplib2.Http):
"""OAuthClient is a worker to attempt to execute a request."""
def __init__(self, consumer, token=None, cache=None, timeout=None,
proxy_info=None):
if consumer is not None and not isinstance(consumer, Consumer):
raise ValueError("Invalid consumer.")
if token is not None and not isinstance(token, Token):
raise ValueError("Invalid token.")
self.consumer = consumer
self.token = token
self.method = SignatureMethod_HMAC_SHA1()
httplib2.Http.__init__(self, cache=cache, timeout=timeout,
proxy_info=proxy_info)
def set_signature_method(self, method):
if not isinstance(method, SignatureMethod):
raise ValueError("Invalid signature method.")
self.method = method
def request(self, uri, method="GET", body=None, headers=None,
redirections=httplib2.DEFAULT_MAX_REDIRECTS, connection_type=None):
DEFAULT_CONTENT_TYPE = 'application/x-www-form-urlencoded'
if not isinstance(headers, dict):
headers = {}
is_multipart = method == 'POST' and headers.get('Content-Type',
DEFAULT_CONTENT_TYPE) != DEFAULT_CONTENT_TYPE
if body and method == "POST" and not is_multipart:
parameters = dict(parse_qsl(body))
else:
parameters = None
req = Request.from_consumer_and_token(self.consumer,
token=self.token, http_method=method, http_url=uri,
parameters=parameters)
req.sign_request(self.method, self.consumer, self.token)
if method == "POST":
headers['Content-Type'] = headers.get('Content-Type',
DEFAULT_CONTENT_TYPE)
if is_multipart:
headers.update(req.to_header())
else:
body = req.to_postdata()
elif method == "GET":
uri = req.to_url()
else:
headers.update(req.to_header())
return httplib2.Http.request(self, uri, method=method, body=body,
headers=headers, redirections=redirections,
connection_type=connection_type)
class Server(object):
"""A skeletal implementation of a service provider, providing protected
resources to requests from authorized consumers.
This class implements the logic to check requests for authorization. You
can use it with your web server or web framework to protect certain
resources with OAuth.
"""
timestamp_threshold = 300 # In seconds, five minutes.
version = VERSION
signature_methods = None
def __init__(self, signature_methods=None):
self.signature_methods = signature_methods or {}
def add_signature_method(self, signature_method):
self.signature_methods[signature_method.name] = signature_method
return self.signature_methods
def verify_request(self, request, consumer, token):
"""Verifies an api call and checks all the parameters."""
version = self._get_version(request)
self._check_signature(request, consumer, token)
parameters = request.get_nonoauth_parameters()
return parameters
def build_authenticate_header(self, realm=''):
"""Optional support for the authenticate header."""
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
def _get_version(self, request):
"""Verify the correct version request for this server."""
try:
version = request.get_parameter('oauth_version')
except:
version = VERSION
if version and version != self.version:
raise Error('OAuth version %s not supported.' % str(version))
return version
def _get_signature_method(self, request):
"""Figure out the signature with some defaults."""
try:
signature_method = request.get_parameter('oauth_signature_method')
except:
signature_method = SIGNATURE_METHOD
try:
# Get the signature method object.
signature_method = self.signature_methods[signature_method]
except:
signature_method_names = ', '.join(self.signature_methods.keys())
raise Error('Signature method %s not supported try one of the following: %s' % (signature_method, signature_method_names))
return signature_method
def _get_verifier(self, request):
return request.get_parameter('oauth_verifier')
def _check_signature(self, request, consumer, token):
timestamp, nonce = request._get_timestamp_nonce()
self._check_timestamp(timestamp)
signature_method = self._get_signature_method(request)
try:
signature = request.get_parameter('oauth_signature')
except:
raise MissingSignature('Missing oauth_signature.')
# Validate the signature.
valid = signature_method.check(request, consumer, token, signature)
if not valid:
key, base = signature_method.signing_base(request, consumer, token)
raise Error('Invalid signature. Expected signature base '
'string: %s' % base)
built = signature_method.sign(request, consumer, token)
def _check_timestamp(self, timestamp):
"""Verify that timestamp is recentish."""
timestamp = int(timestamp)
now = int(time.time())
lapsed = now - timestamp
if lapsed > self.timestamp_threshold:
raise Error('Expired timestamp: given %d and now %s has a '
'greater difference than threshold %d' % (timestamp, now,
self.timestamp_threshold))
class SignatureMethod(object):
"""A way of signing requests.
The OAuth protocol lets consumers and service providers pick a way to sign
requests. This interface shows the methods expected by the other `oauth`
modules for signing requests. Subclass it and implement its methods to
provide a new way to sign requests.
"""
def signing_base(self, request, consumer, token):
"""Calculates the string that needs to be signed.
This method returns a 2-tuple containing the starting key for the
signing and the message to be signed. The latter may be used in error
messages to help clients debug their software.
"""
raise NotImplementedError
def sign(self, request, consumer, token):
"""Returns the signature for the given request, based on the consumer
and token also provided.
You should use your implementation of `signing_base()` to build the
message to sign. Otherwise it may be less useful for debugging.
"""
raise NotImplementedError
def check(self, request, consumer, token, signature):
"""Returns whether the given signature is the correct signature for
the given consumer and token signing the given request."""
built = self.sign(request, consumer, token)
return built == signature
class SignatureMethod_HMAC_SHA1(SignatureMethod):
name = 'HMAC-SHA1'
def signing_base(self, request, consumer, token):
if request.normalized_url is None:
raise ValueError("Base URL for request is not set.")
sig = (
escape(request.method),
escape(request.normalized_url),
escape(request.get_normalized_parameters()),
)
key = '%s&' % escape(consumer.secret)
if token:
key += escape(token.secret)
raw = '&'.join(sig)
return key, raw
def sign(self, request, consumer, token):
"""Builds the base signature string."""
key, raw = self.signing_base(request, consumer, token)
# HMAC object.
try:
from hashlib import sha1 as sha
except ImportError:
import sha # Deprecated
hashed = hmac.new(key, raw, sha)
# Calculate the digest base 64.
return binascii.b2a_base64(hashed.digest())[:-1]
class SignatureMethod_PLAINTEXT(SignatureMethod):
name = 'PLAINTEXT'
def signing_base(self, request, consumer, token):
"""Concatenates the consumer key and secret with the token's
secret."""
sig = '%s&' % escape(consumer.secret)
if token:
sig = sig + escape(token.secret)
return sig, sig
def sign(self, request, consumer, token):
key, raw = self.signing_base(request, consumer, token)
return raw
| bsd-3-clause |
clouddocx/boto | tests/integration/s3/test_multidelete.py | 111 | 6815 | # -*- coding: utf-8 -*-
# Copyright (c) 2011 Mitch Garnaat http://garnaat.org/
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Some unit tests for the S3 MultiDelete
"""
import unittest
import time
from boto.s3.key import Key
from boto.s3.deletemarker import DeleteMarker
from boto.s3.prefix import Prefix
from boto.s3.connection import S3Connection
from boto.exception import S3ResponseError
class S3MultiDeleteTest(unittest.TestCase):
s3 = True
def setUp(self):
self.conn = S3Connection()
self.bucket_name = 'multidelete-%d' % int(time.time())
self.bucket = self.conn.create_bucket(self.bucket_name)
def tearDown(self):
for key in self.bucket:
key.delete()
self.bucket.delete()
def test_delete_nothing(self):
result = self.bucket.delete_keys([])
self.assertEqual(len(result.deleted), 0)
self.assertEqual(len(result.errors), 0)
def test_delete_illegal(self):
result = self.bucket.delete_keys([{"dict":"notallowed"}])
self.assertEqual(len(result.deleted), 0)
self.assertEqual(len(result.errors), 1)
def test_delete_mix(self):
result = self.bucket.delete_keys(["king",
("mice", None),
Key(name="regular"),
Key(),
Prefix(name="folder/"),
DeleteMarker(name="deleted"),
{"bad":"type"}])
self.assertEqual(len(result.deleted), 4)
self.assertEqual(len(result.errors), 3)
def test_delete_quietly(self):
result = self.bucket.delete_keys(["king"], quiet=True)
self.assertEqual(len(result.deleted), 0)
self.assertEqual(len(result.errors), 0)
def test_delete_must_escape(self):
result = self.bucket.delete_keys([Key(name=">_<;")])
self.assertEqual(len(result.deleted), 1)
self.assertEqual(len(result.errors), 0)
def test_delete_unknown_version(self):
no_ver = Key(name="no")
no_ver.version_id = "version"
result = self.bucket.delete_keys([no_ver])
self.assertEqual(len(result.deleted), 0)
self.assertEqual(len(result.errors), 1)
def test_delete_kanji(self):
result = self.bucket.delete_keys([u"漢字", Key(name=u"日本語")])
self.assertEqual(len(result.deleted), 2)
self.assertEqual(len(result.errors), 0)
def test_delete_empty_by_list(self):
result = self.bucket.delete_keys(self.bucket.list())
self.assertEqual(len(result.deleted), 0)
self.assertEqual(len(result.errors), 0)
def test_delete_kanji_by_list(self):
for key_name in [u"漢字", u"日本語", u"テスト"]:
key = self.bucket.new_key(key_name)
key.set_contents_from_string('this is a test')
result = self.bucket.delete_keys(self.bucket.list())
self.assertEqual(len(result.deleted), 3)
self.assertEqual(len(result.errors), 0)
def test_delete_with_prefixes(self):
for key_name in ["a", "a/b", "b"]:
key = self.bucket.new_key(key_name)
key.set_contents_from_string('this is a test')
# First delete all "files": "a" and "b"
result = self.bucket.delete_keys(self.bucket.list(delimiter="/"))
self.assertEqual(len(result.deleted), 2)
# Using delimiter will cause 1 common prefix to be listed
# which will be skipped as an error.
self.assertEqual(len(result.errors), 1)
self.assertEqual(result.errors[0].key, "a/")
# Next delete any remaining objects: "a/b"
result = self.bucket.delete_keys(self.bucket.list())
self.assertEqual(len(result.deleted), 1)
self.assertEqual(len(result.errors), 0)
self.assertEqual(result.deleted[0].key, "a/b")
def test_delete_too_many_versions(self):
# configure versioning first
self.bucket.configure_versioning(True)
# Add 1000 initial versions as DMs by deleting them :-)
# Adding 1000 objects is painful otherwise...
key_names = ['key-%03d' % i for i in range(0, 1000)]
result = self.bucket.delete_keys(key_names)
self.assertEqual(len(result.deleted), 1000)
self.assertEqual(len(result.errors), 0)
# delete them again to create 1000 more delete markers
result = self.bucket.delete_keys(key_names)
self.assertEqual(len(result.deleted), 1000)
self.assertEqual(len(result.errors), 0)
# Sometimes takes AWS sometime to settle
time.sleep(10)
# delete all versions to delete 2000 objects.
# this tests the 1000 limit.
result = self.bucket.delete_keys(self.bucket.list_versions())
self.assertEqual(len(result.deleted), 2000)
self.assertEqual(len(result.errors), 0)
def test_1(self):
nkeys = 100
# create a bunch of keynames
key_names = ['key-%03d' % i for i in range(0, nkeys)]
# create the corresponding keys
for key_name in key_names:
key = self.bucket.new_key(key_name)
key.set_contents_from_string('this is a test')
# now count keys in bucket
n = 0
for key in self.bucket:
n += 1
self.assertEqual(n, nkeys)
# now delete them all
result = self.bucket.delete_keys(key_names)
self.assertEqual(len(result.deleted), nkeys)
self.assertEqual(len(result.errors), 0)
time.sleep(5)
# now count keys in bucket
n = 0
for key in self.bucket:
n += 1
self.assertEqual(n, 0)
| mit |
endlessm/chromium-browser | v8/third_party/inspector_protocol/pdl.py | 14 | 4966 | # Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
import collections
import json
import os.path
import re
import sys
description = ''
primitiveTypes = ['integer', 'number', 'boolean', 'string', 'object',
'any', 'array', 'binary']
def assignType(item, type, is_array=False, map_binary_to_string=False):
if is_array:
item['type'] = 'array'
item['items'] = collections.OrderedDict()
assignType(item['items'], type, False, map_binary_to_string)
return
if type == 'enum':
type = 'string'
if map_binary_to_string and type == 'binary':
type = 'string'
if type in primitiveTypes:
item['type'] = type
else:
item['$ref'] = type
def createItem(d, experimental, deprecated, name=None):
result = collections.OrderedDict(d)
if name:
result['name'] = name
global description
if description:
result['description'] = description.strip()
if experimental:
result['experimental'] = True
if deprecated:
result['deprecated'] = True
return result
def parse(data, file_name, map_binary_to_string=False):
protocol = collections.OrderedDict()
protocol['version'] = collections.OrderedDict()
protocol['domains'] = []
domain = None
item = None
subitems = None
nukeDescription = False
global description
lines = data.split('\n')
for i in range(0, len(lines)):
if nukeDescription:
description = ''
nukeDescription = False
line = lines[i]
trimLine = line.strip()
if trimLine.startswith('#'):
if len(description):
description += '\n'
description += trimLine[2:]
continue
else:
nukeDescription = True
if len(trimLine) == 0:
continue
match = re.compile(
r'^(experimental )?(deprecated )?domain (.*)').match(line)
if match:
domain = createItem({'domain' : match.group(3)}, match.group(1),
match.group(2))
protocol['domains'].append(domain)
continue
match = re.compile(r'^ depends on ([^\s]+)').match(line)
if match:
if 'dependencies' not in domain:
domain['dependencies'] = []
domain['dependencies'].append(match.group(1))
continue
match = re.compile(r'^ (experimental )?(deprecated )?type (.*) '
r'extends (array of )?([^\s]+)').match(line)
if match:
if 'types' not in domain:
domain['types'] = []
item = createItem({'id': match.group(3)}, match.group(1), match.group(2))
assignType(item, match.group(5), match.group(4), map_binary_to_string)
domain['types'].append(item)
continue
match = re.compile(
r'^ (experimental )?(deprecated )?(command|event) (.*)').match(line)
if match:
list = []
if match.group(3) == 'command':
if 'commands' in domain:
list = domain['commands']
else:
list = domain['commands'] = []
else:
if 'events' in domain:
list = domain['events']
else:
list = domain['events'] = []
item = createItem({}, match.group(1), match.group(2), match.group(4))
list.append(item)
continue
match = re.compile(
r'^ (experimental )?(deprecated )?(optional )?'
r'(array of )?([^\s]+) ([^\s]+)').match(line)
if match:
param = createItem({}, match.group(1), match.group(2), match.group(6))
if match.group(3):
param['optional'] = True
assignType(param, match.group(5), match.group(4), map_binary_to_string)
if match.group(5) == 'enum':
enumliterals = param['enum'] = []
subitems.append(param)
continue
match = re.compile(r'^ (parameters|returns|properties)').match(line)
if match:
subitems = item[match.group(1)] = []
continue
match = re.compile(r'^ enum').match(line)
if match:
enumliterals = item['enum'] = []
continue
match = re.compile(r'^version').match(line)
if match:
continue
match = re.compile(r'^ major (\d+)').match(line)
if match:
protocol['version']['major'] = match.group(1)
continue
match = re.compile(r'^ minor (\d+)').match(line)
if match:
protocol['version']['minor'] = match.group(1)
continue
match = re.compile(r'^ redirect ([^\s]+)').match(line)
if match:
item['redirect'] = match.group(1)
continue
match = re.compile(r'^ ( )?[^\s]+$').match(line)
if match:
# enum literal
enumliterals.append(trimLine)
continue
print('Error in %s:%s, illegal token: \t%s' % (file_name, i, line))
sys.exit(1)
return protocol
def loads(data, file_name, map_binary_to_string=False):
if file_name.endswith(".pdl"):
return parse(data, file_name, map_binary_to_string)
return json.loads(data)
| bsd-3-clause |
sidzan/netforce | netforce_ecom/netforce_ecom/controllers/ecom_brands.py | 4 | 2070 | # Copyright (c) 2012-2015 Netforce Co. Ltd.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
from netforce.controller import Controller
from netforce.template import render
from netforce.model import get_model
from netforce.database import get_connection, get_active_db # XXX: move this
from netforce.locale import set_active_locale, get_active_locale
from .cms_base import BaseController
class Brands(BaseController):
_path = "/ecom_brands"
def get(self):
db = get_connection()
try:
ctx = self.context
ctx["brands"] = get_model("product.brand").search_browse([])
ctx["brand_groups"] = get_model("product.brand.group").search_browse([])
content = render("ecom_brands", ctx)
ctx["content"] = content
html = render("cms_layout", ctx)
self.write(html)
db.commit()
except:
self.redirect("/cms_page_not_found")
import traceback
traceback.print_exc()
db.rollback()
Brands.register()
| mit |
wendlers/edubot-nodemcu-fw | firmware/mp/ultrasonic.py | 1 | 2227 | ##
# Ultrasonic library for MicroPython's pyboard.
# Compatible with HC-SR04 and SRF04.
#
# Copyright 2014 - Sergio Conde Gómez <skgsergio@gmail.com>
# Improved by Mithru Vigneshwara
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
##
import pyb
class Ultrasonic:
def __init__(self, tPin, ePin):
# WARNING: Don't use PA4-X5 or PA5-X6 as echo pin without a 1k resistor
self.triggerPin = tPin
self.echoPin = ePin
# Init trigger pin (out)
self.trigger = pyb.Pin(self.triggerPin)
self.trigger.init(pyb.Pin.OUT_PP, pyb.Pin.PULL_NONE)
self.trigger.low()
# Init echo pin (in)
self.echo = pyb.Pin(self.echoPin)
self.echo.init(pyb.Pin.IN, pyb.Pin.PULL_NONE)
def dist(self):
start = 0
end = 0
# Send a 10us pulse.
self.trigger.high()
pyb.udelay(10)
self.trigger.low()
# Wait 'till whe pulse starts.
start_tout = pyb.micros() + 1000
while self.echo.value() == 0:
start = pyb.micros()
if start > start_tout:
print("start_tout")
return -1
# Wait 'till the pulse is gone.
end_tout = pyb.micros() + 10000
while self.echo.value() == 1:
end = pyb.micros()
if end > end_tout:
print("end_tout")
return -1
# Calc the duration of the recieved pulse, divide the result by
# 2 (round-trip) and divide it by 29 (the speed of sound is
# 340 m/s and that is 29 us/cm).
dist_in_cm = end - start
return dist_in_cm
| mit |
cristiana214/cristianachavez214-cristianachavez | python/src/Demo/pdist/client.py | 47 | 4708 | """RPC Client module."""
import sys
import socket
import pickle
import __builtin__
import os
# Default verbosity (0 = silent, 1 = print connections, 2 = print requests too)
VERBOSE = 1
class Client:
"""RPC Client class. No need to derive a class -- it's fully generic."""
def __init__(self, address, verbose = VERBOSE):
self._pre_init(address, verbose)
self._post_init()
def _pre_init(self, address, verbose = VERBOSE):
if type(address) == type(0):
address = ('', address)
self._address = address
self._verbose = verbose
if self._verbose: print "Connecting to %s ..." % repr(address)
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.connect(address)
if self._verbose: print "Connected."
self._lastid = 0 # Last id for which a reply has been received
self._nextid = 1 # Id of next request
self._replies = {} # Unprocessed replies
self._rf = self._socket.makefile('r')
self._wf = self._socket.makefile('w')
def _post_init(self):
self._methods = self._call('.methods')
def __del__(self):
self._close()
def _close(self):
if self._rf: self._rf.close()
self._rf = None
if self._wf: self._wf.close()
self._wf = None
if self._socket: self._socket.close()
self._socket = None
def __getattr__(self, name):
if name in self._methods:
method = _stub(self, name)
setattr(self, name, method) # XXX circular reference
return method
raise AttributeError, name
def _setverbose(self, verbose):
self._verbose = verbose
def _call(self, name, *args):
return self._vcall(name, args)
def _vcall(self, name, args):
return self._recv(self._vsend(name, args))
def _send(self, name, *args):
return self._vsend(name, args)
def _send_noreply(self, name, *args):
return self._vsend(name, args, 0)
def _vsend_noreply(self, name, args):
return self._vsend(name, args, 0)
def _vsend(self, name, args, wantreply = 1):
id = self._nextid
self._nextid = id+1
if not wantreply: id = -id
request = (name, args, id)
if self._verbose > 1: print "sending request: %s" % repr(request)
wp = pickle.Pickler(self._wf)
wp.dump(request)
return id
def _recv(self, id):
exception, value, rid = self._vrecv(id)
if rid != id:
raise RuntimeError, "request/reply id mismatch: %d/%d" % (id, rid)
if exception is None:
return value
x = exception
if hasattr(__builtin__, exception):
x = getattr(__builtin__, exception)
elif exception in ('posix.error', 'mac.error'):
x = os.error
if x == exception:
exception = x
raise exception, value
def _vrecv(self, id):
self._flush()
if self._replies.has_key(id):
if self._verbose > 1: print "retrieving previous reply, id = %d" % id
reply = self._replies[id]
del self._replies[id]
return reply
aid = abs(id)
while 1:
if self._verbose > 1: print "waiting for reply, id = %d" % id
rp = pickle.Unpickler(self._rf)
reply = rp.load()
del rp
if self._verbose > 1: print "got reply: %s" % repr(reply)
rid = reply[2]
arid = abs(rid)
if arid == aid:
if self._verbose > 1: print "got it"
return reply
self._replies[rid] = reply
if arid > aid:
if self._verbose > 1: print "got higher id, assume all ok"
return (None, None, id)
def _flush(self):
self._wf.flush()
from security import Security
class SecureClient(Client, Security):
def __init__(self, *args):
import string
apply(self._pre_init, args)
Security.__init__(self)
self._wf.flush()
line = self._rf.readline()
challenge = string.atoi(string.strip(line))
response = self._encode_challenge(challenge)
line = repr(long(response))
if line[-1] in 'Ll': line = line[:-1]
self._wf.write(line + '\n')
self._wf.flush()
self._post_init()
class _stub:
"""Helper class for Client -- each instance serves as a method of the client."""
def __init__(self, client, name):
self._client = client
self._name = name
def __call__(self, *args):
return self._client._vcall(self._name, args)
| apache-2.0 |
ofermend/medicare-demo | socialite/jython/Lib/SimpleXMLRPCServer.py | 87 | 20934 | """Simple XML-RPC Server.
This module can be used to create simple XML-RPC servers
by creating a server and either installing functions, a
class instance, or by extending the SimpleXMLRPCServer
class.
It can also be used to handle XML-RPC requests in a CGI
environment using CGIXMLRPCRequestHandler.
A list of possible usage patterns follows:
1. Install functions:
server = SimpleXMLRPCServer(("localhost", 8000))
server.register_function(pow)
server.register_function(lambda x,y: x+y, 'add')
server.serve_forever()
2. Install an instance:
class MyFuncs:
def __init__(self):
# make all of the string functions available through
# string.func_name
import string
self.string = string
def _listMethods(self):
# implement this method so that system.listMethods
# knows to advertise the strings methods
return list_public_methods(self) + \
['string.' + method for method in list_public_methods(self.string)]
def pow(self, x, y): return pow(x, y)
def add(self, x, y) : return x + y
server = SimpleXMLRPCServer(("localhost", 8000))
server.register_introspection_functions()
server.register_instance(MyFuncs())
server.serve_forever()
3. Install an instance with custom dispatch method:
class Math:
def _listMethods(self):
# this method must be present for system.listMethods
# to work
return ['add', 'pow']
def _methodHelp(self, method):
# this method must be present for system.methodHelp
# to work
if method == 'add':
return "add(2,3) => 5"
elif method == 'pow':
return "pow(x, y[, z]) => number"
else:
# By convention, return empty
# string if no help is available
return ""
def _dispatch(self, method, params):
if method == 'pow':
return pow(*params)
elif method == 'add':
return params[0] + params[1]
else:
raise 'bad method'
server = SimpleXMLRPCServer(("localhost", 8000))
server.register_introspection_functions()
server.register_instance(Math())
server.serve_forever()
4. Subclass SimpleXMLRPCServer:
class MathServer(SimpleXMLRPCServer):
def _dispatch(self, method, params):
try:
# We are forcing the 'export_' prefix on methods that are
# callable through XML-RPC to prevent potential security
# problems
func = getattr(self, 'export_' + method)
except AttributeError:
raise Exception('method "%s" is not supported' % method)
else:
return func(*params)
def export_add(self, x, y):
return x + y
server = MathServer(("localhost", 8000))
server.serve_forever()
5. CGI script:
server = CGIXMLRPCRequestHandler()
server.register_function(pow)
server.handle_request()
"""
# Written by Brian Quinlan (brian@sweetapp.com).
# Based on code written by Fredrik Lundh.
import xmlrpclib
from xmlrpclib import Fault
import SocketServer
import BaseHTTPServer
import sys
import os
try:
import fcntl
except ImportError:
fcntl = None
def resolve_dotted_attribute(obj, attr, allow_dotted_names=True):
"""resolve_dotted_attribute(a, 'b.c.d') => a.b.c.d
Resolves a dotted attribute name to an object. Raises
an AttributeError if any attribute in the chain starts with a '_'.
If the optional allow_dotted_names argument is false, dots are not
supported and this function operates similar to getattr(obj, attr).
"""
if allow_dotted_names:
attrs = attr.split('.')
else:
attrs = [attr]
for i in attrs:
if i.startswith('_'):
raise AttributeError(
'attempt to access private attribute "%s"' % i
)
else:
obj = getattr(obj,i)
return obj
def list_public_methods(obj):
"""Returns a list of attribute strings, found in the specified
object, which represent callable attributes"""
return [member for member in dir(obj)
if not member.startswith('_') and
callable(getattr(obj, member))]
def remove_duplicates(lst):
"""remove_duplicates([2,2,2,1,3,3]) => [3,1,2]
Returns a copy of a list without duplicates. Every list
item must be hashable and the order of the items in the
resulting list is not defined.
"""
u = {}
for x in lst:
u[x] = 1
return u.keys()
class SimpleXMLRPCDispatcher:
"""Mix-in class that dispatches XML-RPC requests.
This class is used to register XML-RPC method handlers
and then to dispatch them. There should never be any
reason to instantiate this class directly.
"""
def __init__(self, allow_none, encoding):
self.funcs = {}
self.instance = None
self.allow_none = allow_none
self.encoding = encoding
def register_instance(self, instance, allow_dotted_names=False):
"""Registers an instance to respond to XML-RPC requests.
Only one instance can be installed at a time.
If the registered instance has a _dispatch method then that
method will be called with the name of the XML-RPC method and
its parameters as a tuple
e.g. instance._dispatch('add',(2,3))
If the registered instance does not have a _dispatch method
then the instance will be searched to find a matching method
and, if found, will be called. Methods beginning with an '_'
are considered private and will not be called by
SimpleXMLRPCServer.
If a registered function matches a XML-RPC request, then it
will be called instead of the registered instance.
If the optional allow_dotted_names argument is true and the
instance does not have a _dispatch method, method names
containing dots are supported and resolved, as long as none of
the name segments start with an '_'.
*** SECURITY WARNING: ***
Enabling the allow_dotted_names options allows intruders
to access your module's global variables and may allow
intruders to execute arbitrary code on your machine. Only
use this option on a secure, closed network.
"""
self.instance = instance
self.allow_dotted_names = allow_dotted_names
def register_function(self, function, name = None):
"""Registers a function to respond to XML-RPC requests.
The optional name argument can be used to set a Unicode name
for the function.
"""
if name is None:
name = function.__name__
self.funcs[name] = function
def register_introspection_functions(self):
"""Registers the XML-RPC introspection methods in the system
namespace.
see http://xmlrpc.usefulinc.com/doc/reserved.html
"""
self.funcs.update({'system.listMethods' : self.system_listMethods,
'system.methodSignature' : self.system_methodSignature,
'system.methodHelp' : self.system_methodHelp})
def register_multicall_functions(self):
"""Registers the XML-RPC multicall method in the system
namespace.
see http://www.xmlrpc.com/discuss/msgReader$1208"""
self.funcs.update({'system.multicall' : self.system_multicall})
def _marshaled_dispatch(self, data, dispatch_method = None):
"""Dispatches an XML-RPC method from marshalled (XML) data.
XML-RPC methods are dispatched from the marshalled (XML) data
using the _dispatch method and the result is returned as
marshalled data. For backwards compatibility, a dispatch
function can be provided as an argument (see comment in
SimpleXMLRPCRequestHandler.do_POST) but overriding the
existing method through subclassing is the prefered means
of changing method dispatch behavior.
"""
try:
params, method = xmlrpclib.loads(data)
# generate response
if dispatch_method is not None:
response = dispatch_method(method, params)
else:
response = self._dispatch(method, params)
# wrap response in a singleton tuple
response = (response,)
response = xmlrpclib.dumps(response, methodresponse=1,
allow_none=self.allow_none, encoding=self.encoding)
except Fault, fault:
response = xmlrpclib.dumps(fault, allow_none=self.allow_none,
encoding=self.encoding)
except:
# report exception back to server
response = xmlrpclib.dumps(
xmlrpclib.Fault(1, "%s:%s" % (sys.exc_type, sys.exc_value)),
encoding=self.encoding, allow_none=self.allow_none,
)
return response
def system_listMethods(self):
"""system.listMethods() => ['add', 'subtract', 'multiple']
Returns a list of the methods supported by the server."""
methods = self.funcs.keys()
if self.instance is not None:
# Instance can implement _listMethod to return a list of
# methods
if hasattr(self.instance, '_listMethods'):
methods = remove_duplicates(
methods + self.instance._listMethods()
)
# if the instance has a _dispatch method then we
# don't have enough information to provide a list
# of methods
elif not hasattr(self.instance, '_dispatch'):
methods = remove_duplicates(
methods + list_public_methods(self.instance)
)
methods.sort()
return methods
def system_methodSignature(self, method_name):
"""system.methodSignature('add') => [double, int, int]
Returns a list describing the signature of the method. In the
above example, the add method takes two integers as arguments
and returns a double result.
This server does NOT support system.methodSignature."""
# See http://xmlrpc.usefulinc.com/doc/sysmethodsig.html
return 'signatures not supported'
def system_methodHelp(self, method_name):
"""system.methodHelp('add') => "Adds two integers together"
Returns a string containing documentation for the specified method."""
method = None
if self.funcs.has_key(method_name):
method = self.funcs[method_name]
elif self.instance is not None:
# Instance can implement _methodHelp to return help for a method
if hasattr(self.instance, '_methodHelp'):
return self.instance._methodHelp(method_name)
# if the instance has a _dispatch method then we
# don't have enough information to provide help
elif not hasattr(self.instance, '_dispatch'):
try:
method = resolve_dotted_attribute(
self.instance,
method_name,
self.allow_dotted_names
)
except AttributeError:
pass
# Note that we aren't checking that the method actually
# be a callable object of some kind
if method is None:
return ""
else:
import pydoc
return pydoc.getdoc(method)
def system_multicall(self, call_list):
"""system.multicall([{'methodName': 'add', 'params': [2, 2]}, ...]) => \
[[4], ...]
Allows the caller to package multiple XML-RPC calls into a single
request.
See http://www.xmlrpc.com/discuss/msgReader$1208
"""
results = []
for call in call_list:
method_name = call['methodName']
params = call['params']
try:
# XXX A marshalling error in any response will fail the entire
# multicall. If someone cares they should fix this.
results.append([self._dispatch(method_name, params)])
except Fault, fault:
results.append(
{'faultCode' : fault.faultCode,
'faultString' : fault.faultString}
)
except:
results.append(
{'faultCode' : 1,
'faultString' : "%s:%s" % (sys.exc_type, sys.exc_value)}
)
return results
def _dispatch(self, method, params):
"""Dispatches the XML-RPC method.
XML-RPC calls are forwarded to a registered function that
matches the called XML-RPC method name. If no such function
exists then the call is forwarded to the registered instance,
if available.
If the registered instance has a _dispatch method then that
method will be called with the name of the XML-RPC method and
its parameters as a tuple
e.g. instance._dispatch('add',(2,3))
If the registered instance does not have a _dispatch method
then the instance will be searched to find a matching method
and, if found, will be called.
Methods beginning with an '_' are considered private and will
not be called.
"""
func = None
try:
# check to see if a matching function has been registered
func = self.funcs[method]
except KeyError:
if self.instance is not None:
# check for a _dispatch method
if hasattr(self.instance, '_dispatch'):
return self.instance._dispatch(method, params)
else:
# call instance method directly
try:
func = resolve_dotted_attribute(
self.instance,
method,
self.allow_dotted_names
)
except AttributeError:
pass
if func is not None:
return func(*params)
else:
raise Exception('method "%s" is not supported' % method)
class SimpleXMLRPCRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""Simple XML-RPC request handler class.
Handles all HTTP POST requests and attempts to decode them as
XML-RPC requests.
"""
# Class attribute listing the accessible path components;
# paths not on this list will result in a 404 error.
rpc_paths = ('/', '/RPC2')
def is_rpc_path_valid(self):
if self.rpc_paths:
return self.path in self.rpc_paths
else:
# If .rpc_paths is empty, just assume all paths are legal
return True
def do_POST(self):
"""Handles the HTTP POST request.
Attempts to interpret all HTTP POST requests as XML-RPC calls,
which are forwarded to the server's _dispatch method for handling.
"""
# Check that the path is legal
if not self.is_rpc_path_valid():
self.report_404()
return
try:
# Get arguments by reading body of request.
# We read this in chunks to avoid straining
# socket.read(); around the 10 or 15Mb mark, some platforms
# begin to have problems (bug #792570).
max_chunk_size = 10*1024*1024
size_remaining = int(self.headers["content-length"])
L = []
while size_remaining:
chunk_size = min(size_remaining, max_chunk_size)
L.append(self.rfile.read(chunk_size))
size_remaining -= len(L[-1])
data = ''.join(L)
# In previous versions of SimpleXMLRPCServer, _dispatch
# could be overridden in this class, instead of in
# SimpleXMLRPCDispatcher. To maintain backwards compatibility,
# check to see if a subclass implements _dispatch and dispatch
# using that method if present.
response = self.server._marshaled_dispatch(
data, getattr(self, '_dispatch', None)
)
except: # This should only happen if the module is buggy
# internal error, report as HTTP server error
self.send_response(500)
self.end_headers()
else:
# got a valid XML RPC response
self.send_response(200)
self.send_header("Content-type", "text/xml")
self.send_header("Content-length", str(len(response)))
self.end_headers()
self.wfile.write(response)
# shut down the connection
self.wfile.flush()
self.connection.shutdown(1)
def report_404 (self):
# Report a 404 error
self.send_response(404)
response = 'No such page'
self.send_header("Content-type", "text/plain")
self.send_header("Content-length", str(len(response)))
self.end_headers()
self.wfile.write(response)
# shut down the connection
self.wfile.flush()
self.connection.shutdown(1)
def log_request(self, code='-', size='-'):
"""Selectively log an accepted request."""
if self.server.logRequests:
BaseHTTPServer.BaseHTTPRequestHandler.log_request(self, code, size)
class SimpleXMLRPCServer(SocketServer.TCPServer,
SimpleXMLRPCDispatcher):
"""Simple XML-RPC server.
Simple XML-RPC server that allows functions and a single instance
to be installed to handle requests. The default implementation
attempts to dispatch XML-RPC calls to the functions or instance
installed in the server. Override the _dispatch method inhereted
from SimpleXMLRPCDispatcher to change this behavior.
"""
allow_reuse_address = True
def __init__(self, addr, requestHandler=SimpleXMLRPCRequestHandler,
logRequests=True, allow_none=False, encoding=None):
self.logRequests = logRequests
SimpleXMLRPCDispatcher.__init__(self, allow_none, encoding)
SocketServer.TCPServer.__init__(self, addr, requestHandler)
# [Bug #1222790] If possible, set close-on-exec flag; if a
# method spawns a subprocess, the subprocess shouldn't have
# the listening socket open.
if fcntl is not None and hasattr(fcntl, 'FD_CLOEXEC'):
flags = fcntl.fcntl(self.fileno(), fcntl.F_GETFD)
flags |= fcntl.FD_CLOEXEC
fcntl.fcntl(self.fileno(), fcntl.F_SETFD, flags)
class CGIXMLRPCRequestHandler(SimpleXMLRPCDispatcher):
"""Simple handler for XML-RPC data passed through CGI."""
def __init__(self, allow_none=False, encoding=None):
SimpleXMLRPCDispatcher.__init__(self, allow_none, encoding)
def handle_xmlrpc(self, request_text):
"""Handle a single XML-RPC request"""
response = self._marshaled_dispatch(request_text)
print 'Content-Type: text/xml'
print 'Content-Length: %d' % len(response)
print
sys.stdout.write(response)
def handle_get(self):
"""Handle a single HTTP GET request.
Default implementation indicates an error because
XML-RPC uses the POST method.
"""
code = 400
message, explain = \
BaseHTTPServer.BaseHTTPRequestHandler.responses[code]
response = BaseHTTPServer.DEFAULT_ERROR_MESSAGE % \
{
'code' : code,
'message' : message,
'explain' : explain
}
print 'Status: %d %s' % (code, message)
print 'Content-Type: text/html'
print 'Content-Length: %d' % len(response)
print
sys.stdout.write(response)
def handle_request(self, request_text = None):
"""Handle a single XML-RPC request passed through a CGI post method.
If no XML data is given then it is read from stdin. The resulting
XML-RPC response is printed to stdout along with the correct HTTP
headers.
"""
if request_text is None and \
os.environ.get('REQUEST_METHOD', None) == 'GET':
self.handle_get()
else:
# POST data is normally available through stdin
if request_text is None:
request_text = sys.stdin.read()
self.handle_xmlrpc(request_text)
if __name__ == '__main__':
print 'Running XML-RPC server on port 8000'
server = SimpleXMLRPCServer(("localhost", 8000))
server.register_function(pow)
server.register_function(lambda x,y: x+y, 'add')
server.serve_forever()
| apache-2.0 |
Nowheresly/account-financial-tools | account_partner_required/tests/__init__.py | 18 | 1078 | # -*- encoding: utf-8 -*-
##############################################################################
#
# Account partner required module for OpenERP
# Copyright (C) 2014 Acsone (http://acsone.eu).
# @author Stéphane Bidoul <stephane.bidoul@acsone.eu>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import test_account_partner_required
| agpl-3.0 |
watspidererik/testenv | flask/lib/python2.7/site-packages/whoosh/matching/combo.py | 92 | 9926 | # Copyright 2010 Matt Chaput. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Matt Chaput.
from __future__ import division
from array import array
from whoosh.compat import xrange
from whoosh.matching import mcore
class CombinationMatcher(mcore.Matcher):
def __init__(self, submatchers, boost=1.0):
self._submatchers = submatchers
self._boost = boost
def supports_block_quality(self):
return all(m.supports_block_quality() for m in self._submatchers)
def max_quality(self):
return max(m.max_quality() for m in self._submatchers
if m.is_active()) * self._boost
def supports(self, astype):
return all(m.supports(astype) for m in self._submatchers)
def children(self):
return iter(self._submatchers)
def score(self):
return sum(m.score() for m in self._submatchers) * self._boost
class PreloadedUnionMatcher(CombinationMatcher):
"""Instead of marching the sub-matchers along in parallel, this
matcher pre-reads the scores for EVERY MATCHING DOCUMENT, trading memory
for speed.
This is faster than the implementation using a binary tree of
:class:`~whoosh.matching.binary.UnionMatcher` objects (possibly just
because of less overhead), but it doesn't allow getting information about
the "current" document other than the score, because there isn't really a
current document, just an array of scores.
"""
def __init__(self, submatchers, doccount, boost=1.0, scored=True):
CombinationMatcher.__init__(self, submatchers, boost=boost)
self._doccount = doccount
a = array("d")
active = [subm for subm in self._submatchers if subm.is_active()]
if active:
offset = self._docnum = min(m.id() for m in active)
for m in active:
while m.is_active():
if scored:
score = m.score() * boost
else:
score = boost
docnum = m.id()
place = docnum - offset
if len(a) <= place:
a.extend(0 for _ in xrange(place - len(a) + 1))
a[place] += score
m.next()
self._a = a
self._offset = offset
else:
self._docnum = 0
self._offset = 0
self._a = a
def is_active(self):
return self._docnum - self._offset < len(self._a)
def id(self):
return self._docnum
def score(self):
return self._a[self._docnum - self._offset]
def next(self):
a = self._a
offset = self._offset
place = self._docnum - offset
place += 1
while place < len(a) and a[place] == 0:
place += 1
self._docnum = place + offset
def max_quality(self):
return max(self._a[self._docnum - self._offset:])
def block_quality(self):
return self.max_quality()
def skip_to(self, docnum):
if docnum < self._docnum:
return
self._docnum = docnum
i = docnum - self._offset
if i < len(self._a) and self._a[i] == 0:
self.next()
def skip_to_quality(self, minquality):
a = self._a
offset = self._offset
place = self._docnum - offset
skipped = 0
while place < len(a) and a[place] <= minquality:
place += 1
skipped = 1
self._docnum = place + offset
return skipped
def supports(self, astype):
# This matcher doesn't support any posting values
return False
def all_ids(self):
a = self._a
offset = self._offset
place = self._docnum - offset
while place < len(a):
if a[place] > 0:
yield place + offset
place += 1
class ArrayUnionMatcher(CombinationMatcher):
"""Instead of marching the sub-matchers along in parallel, this matcher
pre-reads the scores for a large block of documents at a time from each
matcher, accumulating the scores in an array.
This is faster than the implementation using a binary tree of
:class:`~whoosh.matching.binary.UnionMatcher` objects (possibly just
because of less overhead), but it doesn't allow getting information about
the "current" document other than the score, because there isn't really a
current document, just an array of scores.
"""
def __init__(self, submatchers, doccount, boost=1.0, scored=True,
partsize=2048):
CombinationMatcher.__init__(self, submatchers, boost=boost)
self._scored = scored
self._doccount = doccount
if not partsize:
partsize = doccount
self._partsize = partsize
self._a = array("d", (0 for _ in xrange(self._partsize)))
self._docnum = self._min_id()
self._read_part()
def __repr__(self):
return ("%s(%r, boost=%f, scored=%r, partsize=%d)"
% (self.__class__.__name__, self._submatchers, self._boost,
self._scored, self._partsize))
def _min_id(self):
active = [subm for subm in self._submatchers if subm.is_active()]
if active:
return min(subm.id() for subm in active)
else:
return self._doccount
def _read_part(self):
scored = self._scored
boost = self._boost
limit = min(self._docnum + self._partsize, self._doccount)
offset = self._docnum
a = self._a
# Clear the array
for i in xrange(self._partsize):
a[i] = 0
# Add the scores from the submatchers into the array
for m in self._submatchers:
while m.is_active() and m.id() < limit:
i = m.id() - offset
if scored:
a[i] += m.score() * boost
else:
a[i] = 1
m.next()
self._offset = offset
self._limit = limit
def _find_next(self):
a = self._a
docnum = self._docnum
offset = self._offset
limit = self._limit
while docnum < limit:
if a[docnum - offset] > 0:
break
docnum += 1
if docnum == limit:
self._docnum = self._min_id()
self._read_part()
else:
self._docnum = docnum
def supports(self, astype):
# This matcher doesn't support any posting values
return False
def is_active(self):
return self._docnum < self._doccount
def max_quality(self):
return max(m.max_quality() for m in self._submatchers)
def block_quality(self):
return max(self._a)
def skip_to(self, docnum):
if docnum < self._offset:
# We've already passed it
return
elif docnum < self._limit:
# It's in the current part
self._docnum = docnum
self._find_next()
return
# Advance all active submatchers
submatchers = self._submatchers
active = False
for subm in submatchers:
if subm.is_active():
subm.skip_to(docnum)
if any(subm.is_active() for subm in submatchers):
# Rebuffer
self._docnum = self._min_id()
self._read_part()
else:
self._docnum = self._doccount
def skip_to_quality(self, minquality):
skipped = 0
while self.is_active() and self.block_quality() <= minquality:
skipped += 1
self._docnum = self._limit
self._read_part()
if self.is_active():
self._find_next()
return skipped
def id(self):
return self._docnum
def all_ids(self):
doccount = self._doccount
docnum = self._docnum
offset = self._offset
limit = self._limit
a = self._a
while docnum < doccount:
if a[docnum - offset] > 0:
yield docnum
docnum += 1
if docnum == limit:
self._docnum = docnum
self._read_part()
offset = self._offset
limit = self._limit
def next(self):
self._docnum += 1
return self._find_next()
def score(self):
return self._a[self._docnum - self._offset]
| mit |
ubgarbage/gae-blog | django/contrib/gis/tests/layermap/tests.py | 152 | 12640 | import os
from decimal import Decimal
from django.utils.copycompat import copy
from django.utils.unittest import TestCase
from django.contrib.gis.gdal import DataSource, OGRException
from django.contrib.gis.tests.utils import mysql
from django.contrib.gis.utils.layermapping import LayerMapping, LayerMapError, InvalidDecimal, MissingForeignKey
from models import \
City, County, CountyFeat, Interstate, ICity1, ICity2, Invalid, State, \
city_mapping, co_mapping, cofeat_mapping, inter_mapping
shp_path = os.path.realpath(os.path.join(os.path.dirname(__file__), os.pardir, 'data'))
city_shp = os.path.join(shp_path, 'cities', 'cities.shp')
co_shp = os.path.join(shp_path, 'counties', 'counties.shp')
inter_shp = os.path.join(shp_path, 'interstates', 'interstates.shp')
invalid_shp = os.path.join(shp_path, 'invalid', 'emptypoints.shp')
# Dictionaries to hold what's expected in the county shapefile.
NAMES = ['Bexar', 'Galveston', 'Harris', 'Honolulu', 'Pueblo']
NUMS = [1, 2, 1, 19, 1] # Number of polygons for each.
STATES = ['Texas', 'Texas', 'Texas', 'Hawaii', 'Colorado']
class LayerMapTest(TestCase):
def test01_init(self):
"Testing LayerMapping initialization."
# Model field that does not exist.
bad1 = copy(city_mapping)
bad1['foobar'] = 'FooField'
# Shapefile field that does not exist.
bad2 = copy(city_mapping)
bad2['name'] = 'Nombre'
# Nonexistent geographic field type.
bad3 = copy(city_mapping)
bad3['point'] = 'CURVE'
# Incrementing through the bad mapping dictionaries and
# ensuring that a LayerMapError is raised.
for bad_map in (bad1, bad2, bad3):
try:
lm = LayerMapping(City, city_shp, bad_map)
except LayerMapError:
pass
else:
self.fail('Expected a LayerMapError.')
# A LookupError should be thrown for bogus encodings.
try:
lm = LayerMapping(City, city_shp, city_mapping, encoding='foobar')
except LookupError:
pass
else:
self.fail('Expected a LookupError')
def test02_simple_layermap(self):
"Test LayerMapping import of a simple point shapefile."
# Setting up for the LayerMapping.
lm = LayerMapping(City, city_shp, city_mapping)
lm.save()
# There should be three cities in the shape file.
self.assertEqual(3, City.objects.count())
# Opening up the shapefile, and verifying the values in each
# of the features made it to the model.
ds = DataSource(city_shp)
layer = ds[0]
for feat in layer:
city = City.objects.get(name=feat['Name'].value)
self.assertEqual(feat['Population'].value, city.population)
self.assertEqual(Decimal(str(feat['Density'])), city.density)
self.assertEqual(feat['Created'].value, city.dt)
# Comparing the geometries.
pnt1, pnt2 = feat.geom, city.point
self.assertAlmostEqual(pnt1.x, pnt2.x, 6)
self.assertAlmostEqual(pnt1.y, pnt2.y, 6)
def test03_layermap_strict(self):
"Testing the `strict` keyword, and import of a LineString shapefile."
# When the `strict` keyword is set an error encountered will force
# the importation to stop.
try:
lm = LayerMapping(Interstate, inter_shp, inter_mapping)
lm.save(silent=True, strict=True)
except InvalidDecimal:
# No transactions for geoms on MySQL; delete added features.
if mysql: Interstate.objects.all().delete()
else:
self.fail('Should have failed on strict import with invalid decimal values.')
# This LayerMapping should work b/c `strict` is not set.
lm = LayerMapping(Interstate, inter_shp, inter_mapping)
lm.save(silent=True)
# Two interstate should have imported correctly.
self.assertEqual(2, Interstate.objects.count())
# Verifying the values in the layer w/the model.
ds = DataSource(inter_shp)
# Only the first two features of this shapefile are valid.
valid_feats = ds[0][:2]
for feat in valid_feats:
istate = Interstate.objects.get(name=feat['Name'].value)
if feat.fid == 0:
self.assertEqual(Decimal(str(feat['Length'])), istate.length)
elif feat.fid == 1:
# Everything but the first two decimal digits were truncated,
# because the Interstate model's `length` field has decimal_places=2.
self.assertAlmostEqual(feat.get('Length'), float(istate.length), 2)
for p1, p2 in zip(feat.geom, istate.path):
self.assertAlmostEqual(p1[0], p2[0], 6)
self.assertAlmostEqual(p1[1], p2[1], 6)
def county_helper(self, county_feat=True):
"Helper function for ensuring the integrity of the mapped County models."
for name, n, st in zip(NAMES, NUMS, STATES):
# Should only be one record b/c of `unique` keyword.
c = County.objects.get(name=name)
self.assertEqual(n, len(c.mpoly))
self.assertEqual(st, c.state.name) # Checking ForeignKey mapping.
# Multiple records because `unique` was not set.
if county_feat:
qs = CountyFeat.objects.filter(name=name)
self.assertEqual(n, qs.count())
def test04_layermap_unique_multigeometry_fk(self):
"Testing the `unique`, and `transform`, geometry collection conversion, and ForeignKey mappings."
# All the following should work.
try:
# Telling LayerMapping that we want no transformations performed on the data.
lm = LayerMapping(County, co_shp, co_mapping, transform=False)
# Specifying the source spatial reference system via the `source_srs` keyword.
lm = LayerMapping(County, co_shp, co_mapping, source_srs=4269)
lm = LayerMapping(County, co_shp, co_mapping, source_srs='NAD83')
# Unique may take tuple or string parameters.
for arg in ('name', ('name', 'mpoly')):
lm = LayerMapping(County, co_shp, co_mapping, transform=False, unique=arg)
except:
self.fail('No exception should be raised for proper use of keywords.')
# Testing invalid params for the `unique` keyword.
for e, arg in ((TypeError, 5.0), (ValueError, 'foobar'), (ValueError, ('name', 'mpolygon'))):
self.assertRaises(e, LayerMapping, County, co_shp, co_mapping, transform=False, unique=arg)
# No source reference system defined in the shapefile, should raise an error.
if not mysql:
self.assertRaises(LayerMapError, LayerMapping, County, co_shp, co_mapping)
# Passing in invalid ForeignKey mapping parameters -- must be a dictionary
# mapping for the model the ForeignKey points to.
bad_fk_map1 = copy(co_mapping); bad_fk_map1['state'] = 'name'
bad_fk_map2 = copy(co_mapping); bad_fk_map2['state'] = {'nombre' : 'State'}
self.assertRaises(TypeError, LayerMapping, County, co_shp, bad_fk_map1, transform=False)
self.assertRaises(LayerMapError, LayerMapping, County, co_shp, bad_fk_map2, transform=False)
# There exist no State models for the ForeignKey mapping to work -- should raise
# a MissingForeignKey exception (this error would be ignored if the `strict`
# keyword is not set).
lm = LayerMapping(County, co_shp, co_mapping, transform=False, unique='name')
self.assertRaises(MissingForeignKey, lm.save, silent=True, strict=True)
# Now creating the state models so the ForeignKey mapping may work.
co, hi, tx = State(name='Colorado'), State(name='Hawaii'), State(name='Texas')
co.save(), hi.save(), tx.save()
# If a mapping is specified as a collection, all OGR fields that
# are not collections will be converted into them. For example,
# a Point column would be converted to MultiPoint. Other things being done
# w/the keyword args:
# `transform=False`: Specifies that no transform is to be done; this
# has the effect of ignoring the spatial reference check (because the
# county shapefile does not have implicit spatial reference info).
#
# `unique='name'`: Creates models on the condition that they have
# unique county names; geometries from each feature however will be
# appended to the geometry collection of the unique model. Thus,
# all of the various islands in Honolulu county will be in in one
# database record with a MULTIPOLYGON type.
lm = LayerMapping(County, co_shp, co_mapping, transform=False, unique='name')
lm.save(silent=True, strict=True)
# A reference that doesn't use the unique keyword; a new database record will
# created for each polygon.
lm = LayerMapping(CountyFeat, co_shp, cofeat_mapping, transform=False)
lm.save(silent=True, strict=True)
# The county helper is called to ensure integrity of County models.
self.county_helper()
def test05_test_fid_range_step(self):
"Tests the `fid_range` keyword and the `step` keyword of .save()."
# Function for clearing out all the counties before testing.
def clear_counties(): County.objects.all().delete()
# Initializing the LayerMapping object to use in these tests.
lm = LayerMapping(County, co_shp, co_mapping, transform=False, unique='name')
# Bad feature id ranges should raise a type error.
clear_counties()
bad_ranges = (5.0, 'foo', co_shp)
for bad in bad_ranges:
self.assertRaises(TypeError, lm.save, fid_range=bad)
# Step keyword should not be allowed w/`fid_range`.
fr = (3, 5) # layer[3:5]
self.assertRaises(LayerMapError, lm.save, fid_range=fr, step=10)
lm.save(fid_range=fr)
# Features IDs 3 & 4 are for Galveston County, Texas -- only
# one model is returned because the `unique` keyword was set.
qs = County.objects.all()
self.assertEqual(1, qs.count())
self.assertEqual('Galveston', qs[0].name)
# Features IDs 5 and beyond for Honolulu County, Hawaii, and
# FID 0 is for Pueblo County, Colorado.
clear_counties()
lm.save(fid_range=slice(5, None), silent=True, strict=True) # layer[5:]
lm.save(fid_range=slice(None, 1), silent=True, strict=True) # layer[:1]
# Only Pueblo & Honolulu counties should be present because of
# the `unique` keyword. Have to set `order_by` on this QuerySet
# or else MySQL will return a different ordering than the other dbs.
qs = County.objects.order_by('name')
self.assertEqual(2, qs.count())
hi, co = tuple(qs)
hi_idx, co_idx = tuple(map(NAMES.index, ('Honolulu', 'Pueblo')))
self.assertEqual('Pueblo', co.name); self.assertEqual(NUMS[co_idx], len(co.mpoly))
self.assertEqual('Honolulu', hi.name); self.assertEqual(NUMS[hi_idx], len(hi.mpoly))
# Testing the `step` keyword -- should get the same counties
# regardless of we use a step that divides equally, that is odd,
# or that is larger than the dataset.
for st in (4,7,1000):
clear_counties()
lm.save(step=st, strict=True)
self.county_helper(county_feat=False)
def test06_model_inheritance(self):
"Tests LayerMapping on inherited models. See #12093."
icity_mapping = {'name' : 'Name',
'population' : 'Population',
'density' : 'Density',
'point' : 'POINT',
'dt' : 'Created',
}
# Parent model has geometry field.
lm1 = LayerMapping(ICity1, city_shp, icity_mapping)
lm1.save()
# Grandparent has geometry field.
lm2 = LayerMapping(ICity2, city_shp, icity_mapping)
lm2.save()
self.assertEqual(6, ICity1.objects.count())
self.assertEqual(3, ICity2.objects.count())
def test07_invalid_layer(self):
"Tests LayerMapping on invalid geometries. See #15378."
invalid_mapping = {'point': 'POINT'}
lm = LayerMapping(Invalid, invalid_shp, invalid_mapping,
source_srs=4326)
lm.save(silent=True)
| bsd-3-clause |
The-Compiler/qutebrowser | scripts/dev/check_doc_changes.py | 1 | 1899 | #!/usr/bin/env python3
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2016-2020 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Check if docs changed and output an error if so."""
import sys
import subprocess
import os
import os.path
sys.path.insert(0, os.path.join(os.path.dirname(__file__), os.pardir,
os.pardir))
from scripts import utils
code = subprocess.run(['git', '--no-pager', 'diff', '--exit-code', '--stat',
'--', 'doc'], check=False).returncode
if os.environ.get('GITHUB_REF', 'refs/heads/master') != 'refs/heads/master':
if code != 0:
print("Docs changed but ignoring change as we're building a PR")
sys.exit(0)
if code != 0:
print()
print('The autogenerated docs changed, please run this to update them:')
print(' tox -e docs')
print(' git commit -am "Update docs"')
print()
print('(Or you have uncommitted changes, in which case you can ignore '
'this.)')
if utils.ON_CI:
utils.gha_error('The autogenerated docs changed')
print()
with utils.gha_group('Diff'):
subprocess.run(['git', '--no-pager', 'diff'], check=True)
sys.exit(code)
| gpl-3.0 |
ThirdProject/android_external_chromium_org | chrome/app/nibs/PRESUBMIT.py | 126 | 3062 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Presubmit script to verify that XIB changes are done with the right version.
See http://dev.chromium.org/developers/design-documents/mac-xib-files for more
information.
"""
import re
# Minimum is Mac OS X 10.8.1 (12B19).
HUMAN_DARWIN_VERSION = '10.8.x, x >= 1'
ALLOWED_DARWIN_VERSION = 12 # Darwin 12 = 10.8.
MINIMUM_DARWIN_RELEASE = 'B' # Release B = 10.8.1.
MINIMUM_IB_VERSION = 2549 # Xcode 4.4.1.
MAXIMUM_IB_VERSION = 3084 # Xcode 4.6.x.
HUMAN_IB_VERSION = '>= 4.4.1, <= 4.6.x'
SYSTEM_VERSION_RE = r'<string key="IBDocument\.SystemVersion">' + \
'([0-9]{,2})([A-Z])([0-9]+)</string>'
IB_VERSION_RE = \
r'<string key="IBDocument\.InterfaceBuilderVersion">([0-9]+)</string>'
def _CheckXIBSystemAndXcodeVersions(input_api, output_api, error_type):
affected_xibs = [x for x in input_api.AffectedFiles()
if x.LocalPath().endswith('.xib')]
incorrect_system_versions = []
incorrect_ib_versions = []
for xib in affected_xibs:
if len(xib.NewContents()) == 0:
continue
system_version = None
ib_version = None
new_contents = xib.NewContents()
if not new_contents:
# Deleting files is always fine.
continue
for line in new_contents:
m = re.search(SYSTEM_VERSION_RE, line)
if m:
system_version = (m.group(1), m.group(2), m.group(3))
m = re.search(IB_VERSION_RE, line)
if m:
ib_version = m.group(1)
if system_version is not None and ib_version is not None:
break
if system_version is None:
incorrect_system_versions.append(xib.LocalPath())
continue
if int(system_version[0]) != ALLOWED_DARWIN_VERSION:
incorrect_system_versions.append(xib.LocalPath())
continue
if system_version[1] < MINIMUM_DARWIN_RELEASE:
incorrect_system_versions.append(xib.LocalPath())
continue
if ib_version is None or int(ib_version) < MINIMUM_IB_VERSION or \
int(ib_version) > MAXIMUM_IB_VERSION:
incorrect_ib_versions.append(xib.LocalPath())
continue
problems = []
if incorrect_system_versions:
problems.append(error_type(
'XIB files need to be saved on Mac OS X ' + HUMAN_DARWIN_VERSION,
items=incorrect_system_versions))
if incorrect_ib_versions:
problems.append(error_type(
'XIB files need to be saved using Xcode version ' + HUMAN_IB_VERSION,
items=incorrect_ib_versions))
return problems
def CheckChangeOnUpload(input_api, output_api):
# Allow uploads to happen even if the presubmit fails, so that contributors
# can ask their reviewer or another person to re-save the XIBs for them.
return _CheckXIBSystemAndXcodeVersions(input_api, output_api,
error_type=output_api.PresubmitPromptWarning)
def CheckChangeOnCommit(input_api, output_api):
return _CheckXIBSystemAndXcodeVersions(input_api, output_api,
error_type=output_api.PresubmitError)
| bsd-3-clause |
kmod/icbd | stdlib/python2.5/idlelib/GrepDialog.py | 67 | 4023 | import os
import fnmatch
import sys
from Tkinter import *
import SearchEngine
from SearchDialogBase import SearchDialogBase
def grep(text, io=None, flist=None):
root = text._root()
engine = SearchEngine.get(root)
if not hasattr(engine, "_grepdialog"):
engine._grepdialog = GrepDialog(root, engine, flist)
dialog = engine._grepdialog
searchphrase = text.get("sel.first", "sel.last")
dialog.open(text, searchphrase, io)
class GrepDialog(SearchDialogBase):
title = "Find in Files Dialog"
icon = "Grep"
needwrapbutton = 0
def __init__(self, root, engine, flist):
SearchDialogBase.__init__(self, root, engine)
self.flist = flist
self.globvar = StringVar(root)
self.recvar = BooleanVar(root)
def open(self, text, searchphrase, io=None):
SearchDialogBase.open(self, text, searchphrase)
if io:
path = io.filename or ""
else:
path = ""
dir, base = os.path.split(path)
head, tail = os.path.splitext(base)
if not tail:
tail = ".py"
self.globvar.set(os.path.join(dir, "*" + tail))
def create_entries(self):
SearchDialogBase.create_entries(self)
self.globent = self.make_entry("In files:", self.globvar)
def create_other_buttons(self):
f = self.make_frame()
btn = Checkbutton(f, anchor="w",
variable=self.recvar,
text="Recurse down subdirectories")
btn.pack(side="top", fill="both")
btn.select()
def create_command_buttons(self):
SearchDialogBase.create_command_buttons(self)
self.make_button("Search Files", self.default_command, 1)
def default_command(self, event=None):
prog = self.engine.getprog()
if not prog:
return
path = self.globvar.get()
if not path:
self.top.bell()
return
from OutputWindow import OutputWindow
save = sys.stdout
try:
sys.stdout = OutputWindow(self.flist)
self.grep_it(prog, path)
finally:
sys.stdout = save
def grep_it(self, prog, path):
dir, base = os.path.split(path)
list = self.findfiles(dir, base, self.recvar.get())
list.sort()
self.close()
pat = self.engine.getpat()
print "Searching %r in %s ..." % (pat, path)
hits = 0
for fn in list:
try:
f = open(fn)
except IOError, msg:
print msg
continue
lineno = 0
while 1:
block = f.readlines(100000)
if not block:
break
for line in block:
lineno = lineno + 1
if line[-1:] == '\n':
line = line[:-1]
if prog.search(line):
sys.stdout.write("%s: %s: %s\n" % (fn, lineno, line))
hits = hits + 1
if hits:
if hits == 1:
s = ""
else:
s = "s"
print "Found", hits, "hit%s." % s
print "(Hint: right-click to open locations.)"
else:
print "No hits."
def findfiles(self, dir, base, rec):
try:
names = os.listdir(dir or os.curdir)
except os.error, msg:
print msg
return []
list = []
subdirs = []
for name in names:
fn = os.path.join(dir, name)
if os.path.isdir(fn):
subdirs.append(fn)
else:
if fnmatch.fnmatch(name, base):
list.append(fn)
if rec:
for subdir in subdirs:
list.extend(self.findfiles(subdir, base, rec))
return list
def close(self, event=None):
if self.top:
self.top.grab_release()
self.top.withdraw()
| mit |
bmazin/ARCONS-pipeline | examples/Pal2014-J0337/plotLightCurve.py | 1 | 1690 | import numpy as np
import matplotlib.pyplot as plt
import figureHeader
def plotPulseProfile(phaseBinEdges,pulseProfile,profileErrors=None,plotDoublePulse=True,ax=None,**kwargs):
label = kwargs.pop('label','')
if plotDoublePulse:
doublePhaseBinEdges = np.concatenate([phaseBinEdges,phaseBinEdges[1:]+1.])
doubleSteppedPulseProfile = np.concatenate([pulseProfile,pulseProfile,[pulseProfile[-1]]])
ax.plot(doublePhaseBinEdges,doubleSteppedPulseProfile,drawstyle='steps-post',label=label,**kwargs)
if not (profileErrors is None):
doublePulseProfile = np.concatenate([pulseProfile,pulseProfile])
doubleProfileErrors = np.concatenate([profileErrors,profileErrors])
doubleBinCenters = doublePhaseBinEdges[0:-1]+np.diff(doublePhaseBinEdges)/2.
ax.errorbar(doubleBinCenters,doublePulseProfile,yerr=doubleProfileErrors,linestyle='',**kwargs)
else:
steppedPulseProfile = np.concatenate([pulseProfile,[pulseProfile[-1]]])
ax.plot(phaseBinEdges,steppedPulseProfile,drawstyle='steps-post',label=label,**kwargs)
if not (profileErrors is None):
binCenters = phaseBinEdges[0:-1]+np.diff(phaseBinEdges)/2.
ax.errorbar(binCenters,pulseProfile,yerr=profileErrors,linestyle='',**kwargs)
lcData = np.load('lightcurvePlot.npz')
phaseBinEdges = lcData['phaseBinEdges']
phaseProfile = lcData['phaseProfile']
profileErrors = lcData['profileErrors']
fig,ax = plt.subplots()
plotPulseProfile(phaseBinEdges,phaseProfile,profileErrors,color='k',plotDoublePulse=False,ax=ax,linewidth=1.2)
ax.set_xlabel('phase')
ax.set_ylabel('counts')
fig.savefig('lightcurve.eps')
plt.show()
| gpl-2.0 |
JimCircadian/ansible | lib/ansible/modules/cloud/amazon/ec2_asg.py | 3 | 64972 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'certified'}
DOCUMENTATION = """
---
module: ec2_asg
short_description: Create or delete AWS Autoscaling Groups
description:
- Can create or delete AWS Autoscaling Groups
- Works with the ec2_lc module to manage Launch Configurations
version_added: "1.6"
author: "Gareth Rushgrove (@garethr)"
requirements: [ "boto3", "botocore" ]
options:
state:
description:
- register or deregister the instance
choices: ['present', 'absent']
default: present
name:
description:
- Unique name for group to be created or deleted
required: true
load_balancers:
description:
- List of ELB names to use for the group. Use for classic load balancers.
target_group_arns:
description:
- List of target group ARNs to use for the group. Use for application load balancers.
version_added: "2.4"
availability_zones:
description:
- List of availability zone names in which to create the group. Defaults to all the availability zones in the region if vpc_zone_identifier is not set.
launch_config_name:
description:
- Name of the Launch configuration to use for the group. See the ec2_lc module for managing these.
If unspecified then the current group value will be used.
required: true
min_size:
description:
- Minimum number of instances in group, if unspecified then the current group value will be used.
max_size:
description:
- Maximum number of instances in group, if unspecified then the current group value will be used.
placement_group:
description:
- Physical location of your cluster placement group created in Amazon EC2.
version_added: "2.3"
desired_capacity:
description:
- Desired number of instances in group, if unspecified then the current group value will be used.
replace_all_instances:
description:
- In a rolling fashion, replace all instances with an old launch configuration with one from the current launch configuration.
version_added: "1.8"
default: 'no'
replace_batch_size:
description:
- Number of instances you'd like to replace at a time. Used with replace_all_instances.
required: false
version_added: "1.8"
default: 1
replace_instances:
description:
- List of instance_ids belonging to the named ASG that you would like to terminate and be replaced with instances matching the current launch
configuration.
version_added: "1.8"
lc_check:
description:
- Check to make sure instances that are being replaced with replace_instances do not already have the current launch_config.
version_added: "1.8"
default: 'yes'
vpc_zone_identifier:
description:
- List of VPC subnets to use
tags:
description:
- A list of tags to add to the Auto Scale Group. Optional key is 'propagate_at_launch', which defaults to true.
version_added: "1.7"
health_check_period:
description:
- Length of time in seconds after a new EC2 instance comes into service that Auto Scaling starts checking its health.
required: false
default: 500 seconds
version_added: "1.7"
health_check_type:
description:
- The service you want the health status from, Amazon EC2 or Elastic Load Balancer.
required: false
default: EC2
version_added: "1.7"
choices: ['EC2', 'ELB']
default_cooldown:
description:
- The number of seconds after a scaling activity completes before another can begin.
default: 300 seconds
version_added: "2.0"
wait_timeout:
description:
- How long to wait for instances to become viable when replaced. If you experience the error "Waited too long for ELB instances to be healthy",
try increasing this value.
default: 300
version_added: "1.8"
wait_for_instances:
description:
- Wait for the ASG instances to be in a ready state before exiting. If instances are behind an ELB, it will wait until the ELB determines all
instances have a lifecycle_state of "InService" and a health_status of "Healthy".
version_added: "1.9"
default: 'yes'
termination_policies:
description:
- An ordered list of criteria used for selecting instances to be removed from the Auto Scaling group when reducing capacity.
- For 'Default', when used to create a new autoscaling group, the "Default"i value is used. When used to change an existent autoscaling group, the
current termination policies are maintained.
default: Default
choices: ['OldestInstance', 'NewestInstance', 'OldestLaunchConfiguration', 'ClosestToNextInstanceHour', 'Default']
version_added: "2.0"
notification_topic:
description:
- A SNS topic ARN to send auto scaling notifications to.
version_added: "2.2"
notification_types:
description:
- A list of auto scaling events to trigger notifications on.
default:
- 'autoscaling:EC2_INSTANCE_LAUNCH'
- 'autoscaling:EC2_INSTANCE_LAUNCH_ERROR'
- 'autoscaling:EC2_INSTANCE_TERMINATE'
- 'autoscaling:EC2_INSTANCE_TERMINATE_ERROR'
required: false
version_added: "2.2"
suspend_processes:
description:
- A list of scaling processes to suspend.
default: []
choices: ['Launch', 'Terminate', 'HealthCheck', 'ReplaceUnhealthy', 'AZRebalance', 'AlarmNotification', 'ScheduledActions', 'AddToLoadBalancer']
version_added: "2.3"
metrics_collection:
description:
- Enable ASG metrics collection
type: bool
default: 'no'
version_added: "2.5"
metrics_granularity:
description:
- When metrics_collection is enabled this will determine granularity of metrics collected by CloudWatch
default: "1minute"
version_added: "2.5"
metrics_list:
description:
- List of autoscaling metrics to collect when enabling metrics_collection
default:
- 'GroupMinSize'
- 'GroupMaxSize'
- 'GroupDesiredCapacity'
- 'GroupInServiceInstances'
- 'GroupPendingInstances'
- 'GroupStandbyInstances'
- 'GroupTerminatingInstances'
- 'GroupTotalInstances'
version_added: "2.5"
extends_documentation_fragment:
- aws
- ec2
"""
EXAMPLES = '''
# Basic configuration
- ec2_asg:
name: special
load_balancers: [ 'lb1', 'lb2' ]
availability_zones: [ 'eu-west-1a', 'eu-west-1b' ]
launch_config_name: 'lc-1'
min_size: 1
max_size: 10
desired_capacity: 5
vpc_zone_identifier: [ 'subnet-abcd1234', 'subnet-1a2b3c4d' ]
tags:
- environment: production
propagate_at_launch: no
# Rolling ASG Updates
# Below is an example of how to assign a new launch config to an ASG and terminate old instances.
#
# All instances in "myasg" that do not have the launch configuration named "my_new_lc" will be terminated in
# a rolling fashion with instances using the current launch configuration, "my_new_lc".
#
# This could also be considered a rolling deploy of a pre-baked AMI.
#
# If this is a newly created group, the instances will not be replaced since all instances
# will have the current launch configuration.
- name: create launch config
ec2_lc:
name: my_new_lc
image_id: ami-lkajsf
key_name: mykey
region: us-east-1
security_groups: sg-23423
instance_type: m1.small
assign_public_ip: yes
- ec2_asg:
name: myasg
launch_config_name: my_new_lc
health_check_period: 60
health_check_type: ELB
replace_all_instances: yes
min_size: 5
max_size: 5
desired_capacity: 5
region: us-east-1
# To only replace a couple of instances instead of all of them, supply a list
# to "replace_instances":
- ec2_asg:
name: myasg
launch_config_name: my_new_lc
health_check_period: 60
health_check_type: ELB
replace_instances:
- i-b345231
- i-24c2931
min_size: 5
max_size: 5
desired_capacity: 5
region: us-east-1
'''
RETURN = '''
---
auto_scaling_group_name:
description: The unique name of the auto scaling group
returned: success
type: str
sample: "myasg"
auto_scaling_group_arn:
description: The unique ARN of the autoscaling group
returned: success
type: str
sample: "arn:aws:autoscaling:us-east-1:123456789012:autoScalingGroup:6a09ad6d-eeee-1234-b987-ee123ced01ad:autoScalingGroupName/myasg"
availability_zones:
description: The availability zones for the auto scaling group
returned: success
type: list
sample: [
"us-east-1d"
]
created_time:
description: Timestamp of create time of the auto scaling group
returned: success
type: str
sample: "2017-11-08T14:41:48.272000+00:00"
default_cooldown:
description: The default cooldown time in seconds.
returned: success
type: int
sample: 300
desired_capacity:
description: The number of EC2 instances that should be running in this group.
returned: success
type: int
sample: 3
healthcheck_period:
description: Length of time in seconds after a new EC2 instance comes into service that Auto Scaling starts checking its health.
returned: success
type: int
sample: 30
healthcheck_type:
description: The service you want the health status from, one of "EC2" or "ELB".
returned: success
type: str
sample: "ELB"
healthy_instances:
description: Number of instances in a healthy state
returned: success
type: int
sample: 5
in_service_instances:
description: Number of instances in service
returned: success
type: int
sample: 3
instance_facts:
description: Dictionary of EC2 instances and their status as it relates to the ASG.
returned: success
type: dict
sample: {
"i-0123456789012": {
"health_status": "Healthy",
"launch_config_name": "public-webapp-production-1",
"lifecycle_state": "InService"
}
}
instances:
description: list of instance IDs in the ASG
returned: success
type: list
sample: [
"i-0123456789012"
]
launch_config_name:
description: >
Name of launch configuration associated with the ASG. Same as launch_configuration_name,
provided for compatibility with ec2_asg module.
returned: success
type: str
sample: "public-webapp-production-1"
load_balancers:
description: List of load balancers names attached to the ASG.
returned: success
type: list
sample: ["elb-webapp-prod"]
max_size:
description: Maximum size of group
returned: success
type: int
sample: 3
min_size:
description: Minimum size of group
returned: success
type: int
sample: 1
pending_instances:
description: Number of instances in pending state
returned: success
type: int
sample: 1
tags:
description: List of tags for the ASG, and whether or not each tag propagates to instances at launch.
returned: success
type: list
sample: [
{
"key": "Name",
"value": "public-webapp-production-1",
"resource_id": "public-webapp-production-1",
"resource_type": "auto-scaling-group",
"propagate_at_launch": "true"
},
{
"key": "env",
"value": "production",
"resource_id": "public-webapp-production-1",
"resource_type": "auto-scaling-group",
"propagate_at_launch": "true"
}
]
target_group_arns:
description: List of ARNs of the target groups that the ASG populates
returned: success
type: list
sample: [
"arn:aws:elasticloadbalancing:ap-southeast-2:123456789012:targetgroup/target-group-host-hello/1a2b3c4d5e6f1a2b",
"arn:aws:elasticloadbalancing:ap-southeast-2:123456789012:targetgroup/target-group-path-world/abcd1234abcd1234"
]
target_group_names:
description: List of names of the target groups that the ASG populates
returned: success
type: list
sample: [
"target-group-host-hello",
"target-group-path-world"
]
termination_policies:
description: A list of termination policies for the group.
returned: success
type: str
sample: ["Default"]
unhealthy_instances:
description: Number of instances in an unhealthy state
returned: success
type: int
sample: 0
viable_instances:
description: Number of instances in a viable state
returned: success
type: int
sample: 1
vpc_zone_identifier:
description: VPC zone ID / subnet id for the auto scaling group
returned: success
type: str
sample: "subnet-a31ef45f"
metrics_collection:
description: List of enabled AutosSalingGroup metrics
returned: success
type: list
sample: [
{
"Granularity": "1Minute",
"Metric": "GroupInServiceInstances"
}
]
'''
import time
import traceback
from ansible.module_utils._text import to_native
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import boto3_conn, ec2_argument_spec, HAS_BOTO3, camel_dict_to_snake_dict, get_aws_connection_info, AWSRetry
try:
import botocore
except ImportError:
pass # will be detected by imported HAS_BOTO3
ASG_ATTRIBUTES = ('AvailabilityZones', 'DefaultCooldown', 'DesiredCapacity',
'HealthCheckGracePeriod', 'HealthCheckType', 'LaunchConfigurationName',
'LoadBalancerNames', 'MaxSize', 'MinSize', 'AutoScalingGroupName', 'PlacementGroup',
'TerminationPolicies', 'VPCZoneIdentifier')
INSTANCE_ATTRIBUTES = ('instance_id', 'health_status', 'lifecycle_state', 'launch_config_name')
backoff_params = dict(tries=10, delay=3, backoff=1.5)
@AWSRetry.backoff(**backoff_params)
def describe_autoscaling_groups(connection, group_name):
pg = connection.get_paginator('describe_auto_scaling_groups')
return pg.paginate(AutoScalingGroupNames=[group_name]).build_full_result().get('AutoScalingGroups', [])
@AWSRetry.backoff(**backoff_params)
def deregister_lb_instances(connection, lb_name, instance_id):
connection.deregister_instances_from_load_balancer(LoadBalancerName=lb_name, Instances=[dict(InstanceId=instance_id)])
@AWSRetry.backoff(**backoff_params)
def describe_instance_health(connection, lb_name, instances):
params = dict(LoadBalancerName=lb_name)
if instances:
params.update(Instances=instances)
return connection.describe_instance_health(**params)
@AWSRetry.backoff(**backoff_params)
def describe_target_health(connection, target_group_arn, instances):
return connection.describe_target_health(TargetGroupArn=target_group_arn, Targets=instances)
@AWSRetry.backoff(**backoff_params)
def suspend_asg_processes(connection, asg_name, processes):
connection.suspend_processes(AutoScalingGroupName=asg_name, ScalingProcesses=processes)
@AWSRetry.backoff(**backoff_params)
def resume_asg_processes(connection, asg_name, processes):
connection.resume_processes(AutoScalingGroupName=asg_name, ScalingProcesses=processes)
@AWSRetry.backoff(**backoff_params)
def describe_launch_configurations(connection, launch_config_name):
pg = connection.get_paginator('describe_launch_configurations')
return pg.paginate(LaunchConfigurationNames=[launch_config_name]).build_full_result()
@AWSRetry.backoff(**backoff_params)
def create_asg(connection, **params):
connection.create_auto_scaling_group(**params)
@AWSRetry.backoff(**backoff_params)
def put_notification_config(connection, asg_name, topic_arn, notification_types):
connection.put_notification_configuration(
AutoScalingGroupName=asg_name,
TopicARN=topic_arn,
NotificationTypes=notification_types
)
@AWSRetry.backoff(**backoff_params)
def del_notification_config(connection, asg_name, topic_arn):
connection.delete_notification_configuration(
AutoScalingGroupName=asg_name,
TopicARN=topic_arn
)
@AWSRetry.backoff(**backoff_params)
def attach_load_balancers(connection, asg_name, load_balancers):
connection.attach_load_balancers(AutoScalingGroupName=asg_name, LoadBalancerNames=load_balancers)
@AWSRetry.backoff(**backoff_params)
def detach_load_balancers(connection, asg_name, load_balancers):
connection.detach_load_balancers(AutoScalingGroupName=asg_name, LoadBalancerNames=load_balancers)
@AWSRetry.backoff(**backoff_params)
def attach_lb_target_groups(connection, asg_name, target_group_arns):
connection.attach_load_balancer_target_groups(AutoScalingGroupName=asg_name, TargetGroupARNs=target_group_arns)
@AWSRetry.backoff(**backoff_params)
def detach_lb_target_groups(connection, asg_name, target_group_arns):
connection.detach_load_balancer_target_groups(AutoScalingGroupName=asg_name, TargetGroupARNs=target_group_arns)
@AWSRetry.backoff(**backoff_params)
def update_asg(connection, **params):
connection.update_auto_scaling_group(**params)
@AWSRetry.backoff(**backoff_params)
def delete_asg(connection, asg_name, force_delete):
connection.delete_auto_scaling_group(AutoScalingGroupName=asg_name, ForceDelete=force_delete)
@AWSRetry.backoff(**backoff_params)
def terminate_asg_instance(connection, instance_id, decrement_capacity):
connection.terminate_instance_in_auto_scaling_group(InstanceId=instance_id,
ShouldDecrementDesiredCapacity=decrement_capacity)
def enforce_required_arguments():
''' As many arguments are not required for autoscale group deletion
they cannot be mandatory arguments for the module, so we enforce
them here '''
missing_args = []
for arg in ('min_size', 'max_size', 'launch_config_name'):
if module.params[arg] is None:
missing_args.append(arg)
if missing_args:
module.fail_json(msg="Missing required arguments for autoscaling group create/update: %s" % ",".join(missing_args))
def get_properties(autoscaling_group):
properties = dict()
properties['healthy_instances'] = 0
properties['in_service_instances'] = 0
properties['unhealthy_instances'] = 0
properties['pending_instances'] = 0
properties['viable_instances'] = 0
properties['terminating_instances'] = 0
instance_facts = dict()
autoscaling_group_instances = autoscaling_group.get('Instances')
if autoscaling_group_instances:
properties['instances'] = [i['InstanceId'] for i in autoscaling_group_instances]
for i in autoscaling_group_instances:
instance_facts[i['InstanceId']] = {'health_status': i['HealthStatus'],
'lifecycle_state': i['LifecycleState'],
'launch_config_name': i.get('LaunchConfigurationName')}
if i['HealthStatus'] == 'Healthy' and i['LifecycleState'] == 'InService':
properties['viable_instances'] += 1
if i['HealthStatus'] == 'Healthy':
properties['healthy_instances'] += 1
else:
properties['unhealthy_instances'] += 1
if i['LifecycleState'] == 'InService':
properties['in_service_instances'] += 1
if i['LifecycleState'] == 'Terminating':
properties['terminating_instances'] += 1
if i['LifecycleState'] == 'Pending':
properties['pending_instances'] += 1
else:
properties['instances'] = []
properties['auto_scaling_group_name'] = autoscaling_group.get('AutoScalingGroupName')
properties['auto_scaling_group_arn'] = autoscaling_group.get('AutoScalingGroupARN')
properties['availability_zones'] = autoscaling_group.get('AvailabilityZones')
properties['created_time'] = autoscaling_group.get('CreatedTime')
properties['instance_facts'] = instance_facts
properties['load_balancers'] = autoscaling_group.get('LoadBalancerNames')
properties['launch_config_name'] = autoscaling_group.get('LaunchConfigurationName')
properties['tags'] = autoscaling_group.get('Tags')
properties['min_size'] = autoscaling_group.get('MinSize')
properties['max_size'] = autoscaling_group.get('MaxSize')
properties['desired_capacity'] = autoscaling_group.get('DesiredCapacity')
properties['default_cooldown'] = autoscaling_group.get('DefaultCooldown')
properties['healthcheck_grace_period'] = autoscaling_group.get('HealthCheckGracePeriod')
properties['healthcheck_type'] = autoscaling_group.get('HealthCheckType')
properties['default_cooldown'] = autoscaling_group.get('DefaultCooldown')
properties['termination_policies'] = autoscaling_group.get('TerminationPolicies')
properties['target_group_arns'] = autoscaling_group.get('TargetGroupARNs')
properties['vpc_zone_identifier'] = autoscaling_group.get('VPCZoneIdentifier')
properties['metrics_collection'] = autoscaling_group.get('EnabledMetrics')
if properties['target_group_arns']:
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
elbv2_connection = boto3_conn(module,
conn_type='client',
resource='elbv2',
region=region,
endpoint=ec2_url,
**aws_connect_params)
tg_paginator = elbv2_connection.get_paginator('describe_target_groups')
tg_result = tg_paginator.paginate(TargetGroupArns=properties['target_group_arns']).build_full_result()
target_groups = tg_result['TargetGroups']
else:
target_groups = []
properties['target_group_names'] = [tg['TargetGroupName'] for tg in target_groups]
return properties
def elb_dreg(asg_connection, group_name, instance_id):
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
as_group = describe_autoscaling_groups(asg_connection, group_name)[0]
wait_timeout = module.params.get('wait_timeout')
count = 1
if as_group['LoadBalancerNames'] and as_group['HealthCheckType'] == 'ELB':
elb_connection = boto3_conn(module,
conn_type='client',
resource='elb',
region=region,
endpoint=ec2_url,
**aws_connect_params)
else:
return
for lb in as_group['LoadBalancerNames']:
deregister_lb_instances(elb_connection, lb, instance_id)
module.debug("De-registering %s from ELB %s" % (instance_id, lb))
wait_timeout = time.time() + wait_timeout
while wait_timeout > time.time() and count > 0:
count = 0
for lb in as_group['LoadBalancerNames']:
lb_instances = describe_instance_health(elb_connection, lb, [])
for i in lb_instances['InstanceStates']:
if i['InstanceId'] == instance_id and i['State'] == "InService":
count += 1
module.debug("%s: %s, %s" % (i['InstanceId'], i['State'], i['Description']))
time.sleep(10)
if wait_timeout <= time.time():
# waiting took too long
module.fail_json(msg="Waited too long for instance to deregister. {0}".format(time.asctime()))
def elb_healthy(asg_connection, elb_connection, group_name):
healthy_instances = set()
as_group = describe_autoscaling_groups(asg_connection, group_name)[0]
props = get_properties(as_group)
# get healthy, inservice instances from ASG
instances = []
for instance, settings in props['instance_facts'].items():
if settings['lifecycle_state'] == 'InService' and settings['health_status'] == 'Healthy':
instances.append(dict(InstanceId=instance))
module.debug("ASG considers the following instances InService and Healthy: %s" % instances)
module.debug("ELB instance status:")
lb_instances = list()
for lb in as_group.get('LoadBalancerNames'):
# we catch a race condition that sometimes happens if the instance exists in the ASG
# but has not yet show up in the ELB
try:
lb_instances = describe_instance_health(elb_connection, lb, instances)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == 'InvalidInstance':
return None
module.fail_json(msg="Failed to get load balancer.",
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
except botocore.exceptions.BotoCoreError as e:
module.fail_json(msg="Failed to get load balancer.",
exception=traceback.format_exc())
for i in lb_instances.get('InstanceStates'):
if i['State'] == "InService":
healthy_instances.add(i['InstanceId'])
module.debug("ELB Health State %s: %s" % (i['InstanceId'], i['State']))
return len(healthy_instances)
def tg_healthy(asg_connection, elbv2_connection, group_name):
healthy_instances = set()
as_group = describe_autoscaling_groups(asg_connection, group_name)[0]
props = get_properties(as_group)
# get healthy, inservice instances from ASG
instances = []
for instance, settings in props['instance_facts'].items():
if settings['lifecycle_state'] == 'InService' and settings['health_status'] == 'Healthy':
instances.append(dict(Id=instance))
module.debug("ASG considers the following instances InService and Healthy: %s" % instances)
module.debug("Target Group instance status:")
tg_instances = list()
for tg in as_group.get('TargetGroupARNs'):
# we catch a race condition that sometimes happens if the instance exists in the ASG
# but has not yet show up in the ELB
try:
tg_instances = describe_target_health(elbv2_connection, tg, instances)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == 'InvalidInstance':
return None
module.fail_json(msg="Failed to get target group.",
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
except botocore.exceptions.BotoCoreError as e:
module.fail_json(msg="Failed to get target group.",
exception=traceback.format_exc())
for i in tg_instances.get('TargetHealthDescriptions'):
if i['TargetHealth']['State'] == "healthy":
healthy_instances.add(i['Target']['Id'])
module.debug("Target Group Health State %s: %s" % (i['Target']['Id'], i['TargetHealth']['State']))
return len(healthy_instances)
def wait_for_elb(asg_connection, group_name):
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
wait_timeout = module.params.get('wait_timeout')
# if the health_check_type is ELB, we want to query the ELBs directly for instance
# status as to avoid health_check_grace period that is awarded to ASG instances
as_group = describe_autoscaling_groups(asg_connection, group_name)[0]
if as_group.get('LoadBalancerNames') and as_group.get('HealthCheckType') == 'ELB':
module.debug("Waiting for ELB to consider instances healthy.")
elb_connection = boto3_conn(module,
conn_type='client',
resource='elb',
region=region,
endpoint=ec2_url,
**aws_connect_params)
wait_timeout = time.time() + wait_timeout
healthy_instances = elb_healthy(asg_connection, elb_connection, group_name)
while healthy_instances < as_group.get('MinSize') and wait_timeout > time.time():
healthy_instances = elb_healthy(asg_connection, elb_connection, group_name)
module.debug("ELB thinks %s instances are healthy." % healthy_instances)
time.sleep(10)
if wait_timeout <= time.time():
# waiting took too long
module.fail_json(msg="Waited too long for ELB instances to be healthy. %s" % time.asctime())
module.debug("Waiting complete. ELB thinks %s instances are healthy." % healthy_instances)
def wait_for_target_group(asg_connection, group_name):
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
wait_timeout = module.params.get('wait_timeout')
# if the health_check_type is ELB, we want to query the ELBs directly for instance
# status as to avoid health_check_grace period that is awarded to ASG instances
as_group = describe_autoscaling_groups(asg_connection, group_name)[0]
if as_group.get('TargetGroupARNs') and as_group.get('HealthCheckType') == 'ELB':
module.debug("Waiting for Target Group to consider instances healthy.")
elbv2_connection = boto3_conn(module,
conn_type='client',
resource='elbv2',
region=region,
endpoint=ec2_url,
**aws_connect_params)
wait_timeout = time.time() + wait_timeout
healthy_instances = tg_healthy(asg_connection, elbv2_connection, group_name)
while healthy_instances < as_group.get('MinSize') and wait_timeout > time.time():
healthy_instances = tg_healthy(asg_connection, elbv2_connection, group_name)
module.debug("Target Group thinks %s instances are healthy." % healthy_instances)
time.sleep(10)
if wait_timeout <= time.time():
# waiting took too long
module.fail_json(msg="Waited too long for ELB instances to be healthy. %s" % time.asctime())
module.debug("Waiting complete. Target Group thinks %s instances are healthy." % healthy_instances)
def suspend_processes(ec2_connection, as_group):
suspend_processes = set(module.params.get('suspend_processes'))
try:
suspended_processes = set([p['ProcessName'] for p in as_group['SuspendedProcesses']])
except AttributeError:
# New ASG being created, no suspended_processes defined yet
suspended_processes = set()
if suspend_processes == suspended_processes:
return False
resume_processes = list(suspended_processes - suspend_processes)
if resume_processes:
resume_asg_processes(ec2_connection, module.params.get('name'), resume_processes)
if suspend_processes:
suspend_asg_processes(ec2_connection, module.params.get('name'), list(suspend_processes))
return True
def create_autoscaling_group(connection):
group_name = module.params.get('name')
load_balancers = module.params['load_balancers']
target_group_arns = module.params['target_group_arns']
availability_zones = module.params['availability_zones']
launch_config_name = module.params.get('launch_config_name')
min_size = module.params['min_size']
max_size = module.params['max_size']
placement_group = module.params.get('placement_group')
desired_capacity = module.params.get('desired_capacity')
vpc_zone_identifier = module.params.get('vpc_zone_identifier')
set_tags = module.params.get('tags')
health_check_period = module.params.get('health_check_period')
health_check_type = module.params.get('health_check_type')
default_cooldown = module.params.get('default_cooldown')
wait_for_instances = module.params.get('wait_for_instances')
wait_timeout = module.params.get('wait_timeout')
termination_policies = module.params.get('termination_policies')
notification_topic = module.params.get('notification_topic')
notification_types = module.params.get('notification_types')
metrics_collection = module.params.get('metrics_collection')
metrics_granularity = module.params.get('metrics_granularity')
metrics_list = module.params.get('metrics_list')
try:
as_groups = describe_autoscaling_groups(connection, group_name)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json(msg="Failed to describe auto scaling groups.",
exception=traceback.format_exc())
if not vpc_zone_identifier and not availability_zones:
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
ec2_connection = boto3_conn(module,
conn_type='client',
resource='ec2',
region=region,
endpoint=ec2_url,
**aws_connect_params)
elif vpc_zone_identifier:
vpc_zone_identifier = ','.join(vpc_zone_identifier)
asg_tags = []
for tag in set_tags:
for k, v in tag.items():
if k != 'propagate_at_launch':
asg_tags.append(dict(Key=k,
Value=to_native(v),
PropagateAtLaunch=bool(tag.get('propagate_at_launch', True)),
ResourceType='auto-scaling-group',
ResourceId=group_name))
if not as_groups:
if not vpc_zone_identifier and not availability_zones:
availability_zones = module.params['availability_zones'] = [zone['ZoneName'] for
zone in ec2_connection.describe_availability_zones()['AvailabilityZones']]
enforce_required_arguments()
try:
launch_configs = describe_launch_configurations(connection, launch_config_name)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json(msg="Failed to describe launch configurations",
exception=traceback.format_exc())
if len(launch_configs['LaunchConfigurations']) == 0:
module.fail_json(msg="No launch config found with name %s" % launch_config_name)
if desired_capacity is None:
desired_capacity = min_size
ag = dict(
AutoScalingGroupName=group_name,
LaunchConfigurationName=launch_configs['LaunchConfigurations'][0]['LaunchConfigurationName'],
MinSize=min_size,
MaxSize=max_size,
DesiredCapacity=desired_capacity,
Tags=asg_tags,
HealthCheckGracePeriod=health_check_period,
HealthCheckType=health_check_type,
DefaultCooldown=default_cooldown,
TerminationPolicies=termination_policies)
if vpc_zone_identifier:
ag['VPCZoneIdentifier'] = vpc_zone_identifier
if availability_zones:
ag['AvailabilityZones'] = availability_zones
if placement_group:
ag['PlacementGroup'] = placement_group
if load_balancers:
ag['LoadBalancerNames'] = load_balancers
if target_group_arns:
ag['TargetGroupARNs'] = target_group_arns
try:
create_asg(connection, **ag)
if metrics_collection:
connection.enable_metrics_collection(AutoScalingGroupName=group_name, Granularity=metrics_granularity, Metrics=metrics_list)
all_ag = describe_autoscaling_groups(connection, group_name)
if len(all_ag) == 0:
module.fail_json(msg="No auto scaling group found with the name %s" % group_name)
as_group = all_ag[0]
suspend_processes(connection, as_group)
if wait_for_instances:
wait_for_new_inst(connection, group_name, wait_timeout, desired_capacity, 'viable_instances')
if load_balancers:
wait_for_elb(connection, group_name)
# Wait for target group health if target group(s)defined
if target_group_arns:
wait_for_target_group(connection, group_name)
if notification_topic:
put_notification_config(connection, group_name, notification_topic, notification_types)
as_group = describe_autoscaling_groups(connection, group_name)[0]
asg_properties = get_properties(as_group)
changed = True
return changed, asg_properties
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Failed to create Autoscaling Group.",
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
except botocore.exceptions.BotoCoreError as e:
module.fail_json(msg="Failed to create Autoscaling Group.",
exception=traceback.format_exc())
else:
as_group = as_groups[0]
initial_asg_properties = get_properties(as_group)
changed = False
if suspend_processes(connection, as_group):
changed = True
# process tag changes
if len(set_tags) > 0:
have_tags = as_group.get('Tags')
want_tags = asg_tags
dead_tags = []
have_tag_keyvals = [x['Key'] for x in have_tags]
want_tag_keyvals = [x['Key'] for x in want_tags]
for dead_tag in set(have_tag_keyvals).difference(want_tag_keyvals):
changed = True
dead_tags.append(dict(ResourceId=as_group['AutoScalingGroupName'],
ResourceType='auto-scaling-group', Key=dead_tag))
have_tags = [have_tag for have_tag in have_tags if have_tag['Key'] != dead_tag]
if dead_tags:
connection.delete_tags(Tags=dead_tags)
zipped = zip(have_tags, want_tags)
if len(have_tags) != len(want_tags) or not all(x == y for x, y in zipped):
changed = True
connection.create_or_update_tags(Tags=asg_tags)
# Handle load balancer attachments/detachments
# Attach load balancers if they are specified but none currently exist
if load_balancers and not as_group['LoadBalancerNames']:
changed = True
try:
attach_load_balancers(connection, group_name, load_balancers)
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Failed to update Autoscaling Group.",
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
except botocore.exceptions.BotoCoreError as e:
module.fail_json(msg="Failed to update Autoscaling Group.",
exception=traceback.format_exc())
# Update load balancers if they are specified and one or more already exists
elif as_group['LoadBalancerNames']:
change_load_balancers = load_balancers is not None
# Get differences
if not load_balancers:
load_balancers = list()
wanted_elbs = set(load_balancers)
has_elbs = set(as_group['LoadBalancerNames'])
# check if all requested are already existing
if has_elbs - wanted_elbs and change_load_balancers:
# if wanted contains less than existing, then we need to delete some
elbs_to_detach = has_elbs.difference(wanted_elbs)
if elbs_to_detach:
changed = True
try:
detach_load_balancers(connection, group_name, list(elbs_to_detach))
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json(msg="Failed to detach load balancers %s: %s." % (elbs_to_detach, to_native(e)),
exception=traceback.format_exc())
if wanted_elbs - has_elbs:
# if has contains less than wanted, then we need to add some
elbs_to_attach = wanted_elbs.difference(has_elbs)
if elbs_to_attach:
changed = True
try:
attach_load_balancers(connection, group_name, list(elbs_to_attach))
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json(msg="Failed to attach load balancers %s: %s." % (elbs_to_attach, to_native(e)),
exception=traceback.format_exc())
# Handle target group attachments/detachments
# Attach target groups if they are specified but none currently exist
if target_group_arns and not as_group['TargetGroupARNs']:
changed = True
try:
attach_lb_target_groups(connection, group_name, target_group_arns)
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Failed to update Autoscaling Group.",
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
except botocore.exceptions.BotoCoreError as e:
module.fail_json(msg="Failed to update Autoscaling Group.",
exception=traceback.format_exc())
# Update target groups if they are specified and one or more already exists
elif target_group_arns is not None and as_group['TargetGroupARNs']:
# Get differences
wanted_tgs = set(target_group_arns)
has_tgs = set(as_group['TargetGroupARNs'])
# check if all requested are already existing
if has_tgs.issuperset(wanted_tgs):
# if wanted contains less than existing, then we need to delete some
tgs_to_detach = has_tgs.difference(wanted_tgs)
if tgs_to_detach:
changed = True
try:
detach_lb_target_groups(connection, group_name, list(tgs_to_detach))
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json(msg="Failed to detach load balancer target groups %s: %s" % (tgs_to_detach, to_native(e)),
exception=traceback.format_exc())
if wanted_tgs.issuperset(has_tgs):
# if has contains less than wanted, then we need to add some
tgs_to_attach = wanted_tgs.difference(has_tgs)
if tgs_to_attach:
changed = True
try:
attach_lb_target_groups(connection, group_name, list(tgs_to_attach))
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json(msg="Failed to attach load balancer target groups %s: %s" % (tgs_to_attach, to_native(e)),
exception=traceback.format_exc())
# check for attributes that aren't required for updating an existing ASG
# check if min_size/max_size/desired capacity have been specified and if not use ASG values
if min_size is None:
min_size = as_group['MinSize']
if max_size is None:
max_size = as_group['MaxSize']
if desired_capacity is None:
desired_capacity = as_group['DesiredCapacity']
launch_config_name = launch_config_name or as_group['LaunchConfigurationName']
try:
launch_configs = describe_launch_configurations(connection, launch_config_name)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json(msg="Failed to describe launch configurations",
exception=traceback.format_exc())
if len(launch_configs['LaunchConfigurations']) == 0:
module.fail_json(msg="No launch config found with name %s" % launch_config_name)
ag = dict(
AutoScalingGroupName=group_name,
LaunchConfigurationName=launch_configs['LaunchConfigurations'][0]['LaunchConfigurationName'],
MinSize=min_size,
MaxSize=max_size,
DesiredCapacity=desired_capacity,
HealthCheckGracePeriod=health_check_period,
HealthCheckType=health_check_type,
DefaultCooldown=default_cooldown,
TerminationPolicies=termination_policies)
if availability_zones:
ag['AvailabilityZones'] = availability_zones
if vpc_zone_identifier:
ag['VPCZoneIdentifier'] = vpc_zone_identifier
try:
update_asg(connection, **ag)
if metrics_collection:
connection.enable_metrics_collection(AutoScalingGroupName=group_name, Granularity=metrics_granularity, Metrics=metrics_list)
else:
connection.disable_metrics_collection(AutoScalingGroupName=group_name, Metrics=metrics_list)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json(msg="Failed to update autoscaling group: %s" % to_native(e),
exception=traceback.format_exc())
if notification_topic:
try:
put_notification_config(connection, group_name, notification_topic, notification_types)
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Failed to update Autoscaling Group notifications.",
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
except botocore.exceptions.BotoCoreError as e:
module.fail_json(msg="Failed to update Autoscaling Group notifications.",
exception=traceback.format_exc())
if wait_for_instances:
wait_for_new_inst(connection, group_name, wait_timeout, desired_capacity, 'viable_instances')
# Wait for ELB health if ELB(s)defined
if load_balancers:
module.debug('\tWAITING FOR ELB HEALTH')
wait_for_elb(connection, group_name)
# Wait for target group health if target group(s)defined
if target_group_arns:
module.debug('\tWAITING FOR TG HEALTH')
wait_for_target_group(connection, group_name)
try:
as_group = describe_autoscaling_groups(connection, group_name)[0]
asg_properties = get_properties(as_group)
if asg_properties != initial_asg_properties:
changed = True
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Failed to read existing Autoscaling Groups.",
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
except botocore.exceptions.BotoCoreError as e:
module.fail_json(msg="Failed to read existing Autoscaling Groups.",
exception=traceback.format_exc())
return changed, asg_properties
def delete_autoscaling_group(connection):
group_name = module.params.get('name')
notification_topic = module.params.get('notification_topic')
wait_for_instances = module.params.get('wait_for_instances')
wait_timeout = module.params.get('wait_timeout')
if notification_topic:
del_notification_config(connection, group_name, notification_topic)
groups = describe_autoscaling_groups(connection, group_name)
if groups:
wait_timeout = time.time() + wait_timeout
if not wait_for_instances:
delete_asg(connection, group_name, force_delete=True)
else:
updated_params = dict(AutoScalingGroupName=group_name, MinSize=0, MaxSize=0, DesiredCapacity=0)
update_asg(connection, **updated_params)
instances = True
while instances and wait_for_instances and wait_timeout >= time.time():
tmp_groups = describe_autoscaling_groups(connection, group_name)
if tmp_groups:
tmp_group = tmp_groups[0]
if not tmp_group.get('Instances'):
instances = False
time.sleep(10)
if wait_timeout <= time.time():
# waiting took too long
module.fail_json(msg="Waited too long for old instances to terminate. %s" % time.asctime())
delete_asg(connection, group_name, force_delete=False)
while describe_autoscaling_groups(connection, group_name) and wait_timeout >= time.time():
time.sleep(5)
if wait_timeout <= time.time():
# waiting took too long
module.fail_json(msg="Waited too long for ASG to delete. %s" % time.asctime())
return True
return False
def get_chunks(l, n):
for i in range(0, len(l), n):
yield l[i:i + n]
def update_size(connection, group, max_size, min_size, dc):
module.debug("setting ASG sizes")
module.debug("minimum size: %s, desired_capacity: %s, max size: %s" % (min_size, dc, max_size))
updated_group = dict()
updated_group['AutoScalingGroupName'] = group['AutoScalingGroupName']
updated_group['MinSize'] = min_size
updated_group['MaxSize'] = max_size
updated_group['DesiredCapacity'] = dc
update_asg(connection, **updated_group)
def replace(connection):
batch_size = module.params.get('replace_batch_size')
wait_timeout = module.params.get('wait_timeout')
group_name = module.params.get('name')
max_size = module.params.get('max_size')
min_size = module.params.get('min_size')
desired_capacity = module.params.get('desired_capacity')
lc_check = module.params.get('lc_check')
replace_instances = module.params.get('replace_instances')
replace_all_instances = module.params.get('replace_all_instances')
as_group = describe_autoscaling_groups(connection, group_name)[0]
if desired_capacity is None:
desired_capacity = as_group['DesiredCapacity']
wait_for_new_inst(connection, group_name, wait_timeout, as_group['MinSize'], 'viable_instances')
props = get_properties(as_group)
instances = props['instances']
if replace_all_instances:
# If replacing all instances, then set replace_instances to current set
# This allows replace_instances and replace_all_instances to behave same
replace_instances = instances
if replace_instances:
instances = replace_instances
# check to see if instances are replaceable if checking launch configs
new_instances, old_instances = get_instances_by_lc(props, lc_check, instances)
num_new_inst_needed = desired_capacity - len(new_instances)
if lc_check:
if num_new_inst_needed == 0 and old_instances:
module.debug("No new instances needed, but old instances are present. Removing old instances")
terminate_batch(connection, old_instances, instances, True)
as_group = describe_autoscaling_groups(connection, group_name)[0]
props = get_properties(as_group)
changed = True
return(changed, props)
# we don't want to spin up extra instances if not necessary
if num_new_inst_needed < batch_size:
module.debug("Overriding batch size to %s" % num_new_inst_needed)
batch_size = num_new_inst_needed
if not old_instances:
changed = False
return(changed, props)
# check if min_size/max_size/desired capacity have been specified and if not use ASG values
if min_size is None:
min_size = as_group['MinSize']
if max_size is None:
max_size = as_group['MaxSize']
# set temporary settings and wait for them to be reached
# This should get overwritten if the number of instances left is less than the batch size.
as_group = describe_autoscaling_groups(connection, group_name)[0]
update_size(connection, as_group, max_size + batch_size, min_size + batch_size, desired_capacity + batch_size)
wait_for_new_inst(connection, group_name, wait_timeout, as_group['MinSize'] + batch_size, 'viable_instances')
wait_for_elb(connection, group_name)
wait_for_target_group(connection, group_name)
as_group = describe_autoscaling_groups(connection, group_name)[0]
props = get_properties(as_group)
instances = props['instances']
if replace_instances:
instances = replace_instances
module.debug("beginning main loop")
for i in get_chunks(instances, batch_size):
# break out of this loop if we have enough new instances
break_early, desired_size, term_instances = terminate_batch(connection, i, instances, False)
wait_for_term_inst(connection, term_instances)
wait_for_new_inst(connection, group_name, wait_timeout, desired_size, 'viable_instances')
wait_for_elb(connection, group_name)
wait_for_target_group(connection, group_name)
as_group = describe_autoscaling_groups(connection, group_name)[0]
if break_early:
module.debug("breaking loop")
break
update_size(connection, as_group, max_size, min_size, desired_capacity)
as_group = describe_autoscaling_groups(connection, group_name)[0]
asg_properties = get_properties(as_group)
module.debug("Rolling update complete.")
changed = True
return(changed, asg_properties)
def get_instances_by_lc(props, lc_check, initial_instances):
new_instances = []
old_instances = []
# old instances are those that have the old launch config
if lc_check:
for i in props['instances']:
if props['instance_facts'][i]['launch_config_name'] == props['launch_config_name']:
new_instances.append(i)
else:
old_instances.append(i)
else:
module.debug("Comparing initial instances with current: %s" % initial_instances)
for i in props['instances']:
if i not in initial_instances:
new_instances.append(i)
else:
old_instances.append(i)
module.debug("New instances: %s, %s" % (len(new_instances), new_instances))
module.debug("Old instances: %s, %s" % (len(old_instances), old_instances))
return new_instances, old_instances
def list_purgeable_instances(props, lc_check, replace_instances, initial_instances):
instances_to_terminate = []
instances = (inst_id for inst_id in replace_instances if inst_id in props['instances'])
# check to make sure instances given are actually in the given ASG
# and they have a non-current launch config
if lc_check:
for i in instances:
if props['instance_facts'][i]['launch_config_name'] != props['launch_config_name']:
instances_to_terminate.append(i)
else:
for i in instances:
if i in initial_instances:
instances_to_terminate.append(i)
return instances_to_terminate
def terminate_batch(connection, replace_instances, initial_instances, leftovers=False):
batch_size = module.params.get('replace_batch_size')
min_size = module.params.get('min_size')
desired_capacity = module.params.get('desired_capacity')
group_name = module.params.get('name')
lc_check = module.params.get('lc_check')
decrement_capacity = False
break_loop = False
as_group = describe_autoscaling_groups(connection, group_name)[0]
if desired_capacity is None:
desired_capacity = as_group['DesiredCapacity']
props = get_properties(as_group)
desired_size = as_group['MinSize']
new_instances, old_instances = get_instances_by_lc(props, lc_check, initial_instances)
num_new_inst_needed = desired_capacity - len(new_instances)
# check to make sure instances given are actually in the given ASG
# and they have a non-current launch config
instances_to_terminate = list_purgeable_instances(props, lc_check, replace_instances, initial_instances)
module.debug("new instances needed: %s" % num_new_inst_needed)
module.debug("new instances: %s" % new_instances)
module.debug("old instances: %s" % old_instances)
module.debug("batch instances: %s" % ",".join(instances_to_terminate))
if num_new_inst_needed == 0:
decrement_capacity = True
if as_group['MinSize'] != min_size:
updated_params = dict(AutoScalingGroupName=as_group['AutoScalingGroupName'], MinSize=min_size)
update_asg(connection, **updated_params)
module.debug("Updating minimum size back to original of %s" % min_size)
# if are some leftover old instances, but we are already at capacity with new ones
# we don't want to decrement capacity
if leftovers:
decrement_capacity = False
break_loop = True
instances_to_terminate = old_instances
desired_size = min_size
module.debug("No new instances needed")
if num_new_inst_needed < batch_size and num_new_inst_needed != 0:
instances_to_terminate = instances_to_terminate[:num_new_inst_needed]
decrement_capacity = False
break_loop = False
module.debug("%s new instances needed" % num_new_inst_needed)
module.debug("decrementing capacity: %s" % decrement_capacity)
for instance_id in instances_to_terminate:
elb_dreg(connection, group_name, instance_id)
module.debug("terminating instance: %s" % instance_id)
terminate_asg_instance(connection, instance_id, decrement_capacity)
# we wait to make sure the machines we marked as Unhealthy are
# no longer in the list
return break_loop, desired_size, instances_to_terminate
def wait_for_term_inst(connection, term_instances):
wait_timeout = module.params.get('wait_timeout')
group_name = module.params.get('name')
as_group = describe_autoscaling_groups(connection, group_name)[0]
count = 1
wait_timeout = time.time() + wait_timeout
while wait_timeout > time.time() and count > 0:
module.debug("waiting for instances to terminate")
count = 0
as_group = describe_autoscaling_groups(connection, group_name)[0]
props = get_properties(as_group)
instance_facts = props['instance_facts']
instances = (i for i in instance_facts if i in term_instances)
for i in instances:
lifecycle = instance_facts[i]['lifecycle_state']
health = instance_facts[i]['health_status']
module.debug("Instance %s has state of %s,%s" % (i, lifecycle, health))
if lifecycle.startswith('Terminating') or health == 'Unhealthy':
count += 1
time.sleep(10)
if wait_timeout <= time.time():
# waiting took too long
module.fail_json(msg="Waited too long for old instances to terminate. %s" % time.asctime())
def wait_for_new_inst(connection, group_name, wait_timeout, desired_size, prop):
# make sure we have the latest stats after that last loop.
as_group = describe_autoscaling_groups(connection, group_name)[0]
props = get_properties(as_group)
module.debug("Waiting for %s = %s, currently %s" % (prop, desired_size, props[prop]))
# now we make sure that we have enough instances in a viable state
wait_timeout = time.time() + wait_timeout
while wait_timeout > time.time() and desired_size > props[prop]:
module.debug("Waiting for %s = %s, currently %s" % (prop, desired_size, props[prop]))
time.sleep(10)
as_group = describe_autoscaling_groups(connection, group_name)[0]
props = get_properties(as_group)
if wait_timeout <= time.time():
# waiting took too long
module.fail_json(msg="Waited too long for new instances to become viable. %s" % time.asctime())
module.debug("Reached %s: %s" % (prop, desired_size))
return props
def asg_exists(connection):
group_name = module.params.get('name')
as_group = describe_autoscaling_groups(connection, group_name)
return bool(len(as_group))
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
name=dict(required=True, type='str'),
load_balancers=dict(type='list'),
target_group_arns=dict(type='list'),
availability_zones=dict(type='list'),
launch_config_name=dict(type='str'),
min_size=dict(type='int'),
max_size=dict(type='int'),
placement_group=dict(type='str'),
desired_capacity=dict(type='int'),
vpc_zone_identifier=dict(type='list'),
replace_batch_size=dict(type='int', default=1),
replace_all_instances=dict(type='bool', default=False),
replace_instances=dict(type='list', default=[]),
lc_check=dict(type='bool', default=True),
wait_timeout=dict(type='int', default=300),
state=dict(default='present', choices=['present', 'absent']),
tags=dict(type='list', default=[]),
health_check_period=dict(type='int', default=300),
health_check_type=dict(default='EC2', choices=['EC2', 'ELB']),
default_cooldown=dict(type='int', default=300),
wait_for_instances=dict(type='bool', default=True),
termination_policies=dict(type='list', default='Default'),
notification_topic=dict(type='str', default=None),
notification_types=dict(type='list', default=[
'autoscaling:EC2_INSTANCE_LAUNCH',
'autoscaling:EC2_INSTANCE_LAUNCH_ERROR',
'autoscaling:EC2_INSTANCE_TERMINATE',
'autoscaling:EC2_INSTANCE_TERMINATE_ERROR'
]),
suspend_processes=dict(type='list', default=[]),
metrics_collection=dict(type='bool', default=False),
metrics_granularity=dict(type='str', default='1Minute'),
metrics_list=dict(type='list', default=[
'GroupMinSize',
'GroupMaxSize',
'GroupDesiredCapacity',
'GroupInServiceInstances',
'GroupPendingInstances',
'GroupStandbyInstances',
'GroupTerminatingInstances',
'GroupTotalInstances'
])
),
)
global module
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive=[['replace_all_instances', 'replace_instances']]
)
if not HAS_BOTO3:
module.fail_json(msg='boto3 required for this module')
state = module.params.get('state')
replace_instances = module.params.get('replace_instances')
replace_all_instances = module.params.get('replace_all_instances')
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
connection = boto3_conn(module,
conn_type='client',
resource='autoscaling',
region=region,
endpoint=ec2_url,
**aws_connect_params)
changed = create_changed = replace_changed = False
exists = asg_exists(connection)
if state == 'present':
create_changed, asg_properties = create_autoscaling_group(connection)
elif state == 'absent':
changed = delete_autoscaling_group(connection)
module.exit_json(changed=changed)
# Only replace instances if asg existed at start of call
if exists and (replace_all_instances or replace_instances):
replace_changed, asg_properties = replace(connection)
if create_changed or replace_changed:
changed = True
module.exit_json(changed=changed, **asg_properties)
if __name__ == '__main__':
main()
| gpl-3.0 |
wpjesus/codematch | ietf/doc/tests_conflict_review.py | 1 | 18487 | # -*- coding: utf-8 -*-
import os
import shutil
from pyquery import PyQuery
from StringIO import StringIO
from textwrap import wrap
from django.conf import settings
from django.core.urlresolvers import reverse as urlreverse
from ietf.doc.models import Document, DocEvent, NewRevisionDocEvent, BallotPositionDocEvent, TelechatDocEvent, State
from ietf.doc.utils import create_ballot_if_not_open
from ietf.doc.views_conflict_review import default_approval_text
from ietf.group.models import Person
from ietf.iesg.models import TelechatDate
from ietf.name.models import StreamName
from ietf.utils.test_utils import TestCase, unicontent
from ietf.utils.mail import outbox, empty_outbox
from ietf.utils.test_data import make_test_data
from ietf.utils.test_utils import login_testing_unauthorized
class ConflictReviewTests(TestCase):
def test_start_review_as_secretary(self):
doc = Document.objects.get(name='draft-imaginary-independent-submission')
url = urlreverse('conflict_review_start',kwargs=dict(name=doc.name))
login_testing_unauthorized(self, "secretary", url)
# can't start conflict reviews on documents not in the ise or irtf streams
r = self.client.get(url)
self.assertEqual(r.status_code, 404)
doc.stream=StreamName.objects.get(slug='ise')
doc.save()
# normal get should succeed and get a reasonable form
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
q = PyQuery(r.content)
self.assertEqual(len(q('form select[name=create_in_state]')),1)
# faulty posts
r = self.client.post(url,dict(create_in_state=""))
self.assertEqual(r.status_code, 200)
q = PyQuery(r.content)
self.assertTrue(len(q('form .has-error')) > 0)
self.assertEqual(Document.objects.filter(name='conflict-review-imaginary-independent-submission').count() , 0)
r = self.client.post(url,dict(ad=""))
self.assertEqual(r.status_code, 200)
q = PyQuery(r.content)
self.assertTrue(len(q('form .has-error')) > 0)
self.assertEqual(Document.objects.filter(name='conflict-review-imaginary-independent-submission').count() , 0)
# successful review start
ad_strpk = str(Person.objects.get(name='Areað Irector').pk)
state_strpk = str(State.objects.get(used=True, slug='needshep',type__slug='conflrev').pk)
r = self.client.post(url,dict(ad=ad_strpk,create_in_state=state_strpk,notify='ipu@ietf.org'))
self.assertEqual(r.status_code, 302)
review_doc = Document.objects.get(name='conflict-review-imaginary-independent-submission')
self.assertEqual(review_doc.get_state('conflrev').slug,'needshep')
self.assertEqual(review_doc.rev,u'00')
self.assertEqual(review_doc.ad.name,u'Areað Irector')
self.assertEqual(review_doc.notify,u'ipu@ietf.org')
doc = Document.objects.get(name='draft-imaginary-independent-submission')
self.assertTrue(doc in [x.target.document for x in review_doc.relateddocument_set.filter(relationship__slug='conflrev')])
self.assertTrue(review_doc.latest_event(DocEvent,type="added_comment").desc.startswith("IETF conflict review requested"))
self.assertTrue(doc.latest_event(DocEvent,type="added_comment").desc.startswith("IETF conflict review initiated"))
self.assertTrue('Conflict Review requested' in outbox[-1]['Subject'])
# verify you can't start a review when a review is already in progress
r = self.client.post(url,dict(ad="Areað Irector",create_in_state="Needs Shepherd",notify='ipu@ietf.org'))
self.assertEqual(r.status_code, 404)
def test_start_review_as_stream_owner(self):
doc = Document.objects.get(name='draft-imaginary-independent-submission')
url = urlreverse('conflict_review_start',kwargs=dict(name=doc.name))
login_testing_unauthorized(self, "ise-chair", url)
# can't start conflict reviews on documents not in a stream
r = self.client.get(url)
self.assertEquals(r.status_code, 404)
# can't start conflict reviews on documents in some other stream
doc.stream=StreamName.objects.get(slug='irtf')
doc.save()
r = self.client.get(url)
self.assertEquals(r.status_code, 404)
# successful get
doc.stream=StreamName.objects.get(slug='ise')
doc.save()
r = self.client.get(url)
self.assertEquals(r.status_code, 200)
q = PyQuery(r.content)
self.assertEquals(len(q('form input[name=notify]')),1)
self.assertEquals(len(q('form select[name=ad]')),0)
# successfully starts a review, and notifies the secretariat
messages_before = len(outbox)
r = self.client.post(url,dict(notify='ipu@ietf.org'))
self.assertEquals(r.status_code, 302)
review_doc = Document.objects.get(name='conflict-review-imaginary-independent-submission')
self.assertEquals(review_doc.get_state('conflrev').slug,'needshep')
self.assertEquals(review_doc.rev,u'00')
self.assertEquals(review_doc.telechat_date(),None)
self.assertEquals(review_doc.ad.name,u'Ietf Chair')
self.assertEquals(review_doc.notify,u'ipu@ietf.org')
doc = Document.objects.get(name='draft-imaginary-independent-submission')
self.assertTrue(doc in [x.target.document for x in review_doc.relateddocument_set.filter(relationship__slug='conflrev')])
self.assertEqual(len(outbox), messages_before + 2)
self.assertTrue('Conflict Review requested' in outbox[-1]['Subject'])
self.assertTrue('drafts-eval@icann.org' in outbox[-1]['To'])
self.assertTrue('Conflict Review requested' in outbox[-2]['Subject'])
self.assertTrue('iesg-secretary@' in outbox[-2]['To'])
def test_change_state(self):
doc = Document.objects.get(name='conflict-review-imaginary-irtf-submission')
url = urlreverse('conflict_review_change_state',kwargs=dict(name=doc.name))
login_testing_unauthorized(self, "ad", url)
# normal get
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
q = PyQuery(r.content)
self.assertEqual(len(q('form select[name=review_state]')),1)
# faulty post
r = self.client.post(url,dict(review_state=""))
self.assertEqual(r.status_code, 200)
q = PyQuery(r.content)
self.assertTrue(len(q('form .has-error')) > 0)
# successful change to AD Review
adrev_pk = str(State.objects.get(used=True, slug='adrev',type__slug='conflrev').pk)
r = self.client.post(url,dict(review_state=adrev_pk,comment='RDNK84ZD'))
self.assertEqual(r.status_code, 302)
review_doc = Document.objects.get(name='conflict-review-imaginary-irtf-submission')
self.assertEqual(review_doc.get_state('conflrev').slug,'adrev')
self.assertTrue(review_doc.latest_event(DocEvent,type="added_comment").desc.startswith('RDNK84ZD'))
self.assertFalse(review_doc.active_ballot())
# successful change to IESG Evaluation
iesgeval_pk = str(State.objects.get(used=True, slug='iesgeval',type__slug='conflrev').pk)
r = self.client.post(url,dict(review_state=iesgeval_pk,comment='TGmZtEjt'))
self.assertEqual(r.status_code, 302)
review_doc = Document.objects.get(name='conflict-review-imaginary-irtf-submission')
self.assertEqual(review_doc.get_state('conflrev').slug,'iesgeval')
self.assertTrue(review_doc.latest_event(DocEvent,type="added_comment").desc.startswith('TGmZtEjt'))
self.assertTrue(review_doc.active_ballot())
self.assertEqual(review_doc.latest_event(BallotPositionDocEvent, type="changed_ballot_position").pos_id,'yes')
def test_edit_notices(self):
doc = Document.objects.get(name='conflict-review-imaginary-irtf-submission')
url = urlreverse('conflict_review_notices',kwargs=dict(name=doc.name))
login_testing_unauthorized(self, "ad", url)
# normal get
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
q = PyQuery(r.content)
self.assertEqual(len(q('form input[name=notify]')),1)
self.assertEqual(doc.notify,q('form input[name=notify]')[0].value)
# change notice list
newlist = '"Foo Bar" <foo@bar.baz.com>'
r = self.client.post(url,dict(notify=newlist,save_addresses="1"))
self.assertEqual(r.status_code,302)
doc = Document.objects.get(name='conflict-review-imaginary-irtf-submission')
self.assertEqual(doc.notify,newlist)
self.assertTrue(doc.latest_event(DocEvent,type="added_comment").desc.startswith('Notification list changed'))
# Ask the form to regenerate the list
r = self.client.post(url,dict(regenerate_addresses="1"))
self.assertEqual(r.status_code,200)
doc = Document.objects.get(name='conflict-review-imaginary-irtf-submission')
# Regenerate does not save!
self.assertEqual(doc.notify,newlist)
q = PyQuery(r.content)
self.assertEqual(None,q('form input[name=notify]')[0].value)
def test_edit_ad(self):
doc = Document.objects.get(name='conflict-review-imaginary-irtf-submission')
url = urlreverse('conflict_review_ad',kwargs=dict(name=doc.name))
login_testing_unauthorized(self, "ad", url)
# normal get
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
q = PyQuery(r.content)
self.assertEqual(len(q('select[name=ad]')),1)
# change ads
ad2 = Person.objects.get(name='Ad No2')
r = self.client.post(url,dict(ad=str(ad2.pk)))
self.assertEqual(r.status_code,302)
doc = Document.objects.get(name='conflict-review-imaginary-irtf-submission')
self.assertEqual(doc.ad,ad2)
self.assertTrue(doc.latest_event(DocEvent,type="added_comment").desc.startswith('Shepherding AD changed'))
def test_edit_telechat_date(self):
doc = Document.objects.get(name='conflict-review-imaginary-irtf-submission')
url = urlreverse('conflict_review_telechat_date',kwargs=dict(name=doc.name))
login_testing_unauthorized(self, "ad", url)
# normal get
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
q = PyQuery(r.content)
self.assertEqual(len(q('select[name=telechat_date]')),1)
# set a date
self.assertFalse(doc.latest_event(TelechatDocEvent, "scheduled_for_telechat"))
telechat_date = TelechatDate.objects.active().order_by('date')[0].date
r = self.client.post(url,dict(telechat_date=telechat_date.isoformat()))
self.assertEqual(r.status_code,302)
doc = Document.objects.get(name='conflict-review-imaginary-irtf-submission')
self.assertEqual(doc.latest_event(TelechatDocEvent, "scheduled_for_telechat").telechat_date,telechat_date)
# move it forward a telechat (this should NOT set the returning item bit)
telechat_date = TelechatDate.objects.active().order_by('date')[1].date
r = self.client.post(url,dict(telechat_date=telechat_date.isoformat()))
self.assertEqual(r.status_code,302)
doc = Document.objects.get(name='conflict-review-imaginary-irtf-submission')
self.assertFalse(doc.returning_item())
# set the returning item bit without changing the date
r = self.client.post(url,dict(telechat_date=telechat_date.isoformat(),returning_item="on"))
self.assertEqual(r.status_code,302)
doc = Document.objects.get(name='conflict-review-imaginary-irtf-submission')
self.assertTrue(doc.returning_item())
# clear the returning item bit
r = self.client.post(url,dict(telechat_date=telechat_date.isoformat()))
self.assertEqual(r.status_code,302)
doc = Document.objects.get(name='conflict-review-imaginary-irtf-submission')
self.assertFalse(doc.returning_item())
# Take the doc back off any telechat
r = self.client.post(url,dict(telechat_date=""))
self.assertEqual(r.status_code, 302)
self.assertEqual(doc.latest_event(TelechatDocEvent, "scheduled_for_telechat").telechat_date,None)
def approve_test_helper(self,approve_type):
doc = Document.objects.get(name='conflict-review-imaginary-irtf-submission')
url = urlreverse('conflict_review_approve',kwargs=dict(name=doc.name))
login_testing_unauthorized(self, "secretary", url)
# Some additional setup
create_ballot_if_not_open(doc,Person.objects.get(name="Sec Retary"),"conflrev")
doc.set_state(State.objects.get(used=True, slug=approve_type+'-pend',type='conflrev'))
doc.save()
# get
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
q = PyQuery(r.content)
self.assertEqual(len(q('[type=submit]:contains("Send announcement")')), 1)
if approve_type == 'appr-noprob':
self.assertTrue( 'IESG has no problem' in ''.join(wrap(r.content,2**16)))
else:
self.assertTrue( 'NOT be published' in ''.join(wrap(r.content,2**16)))
# submit
empty_outbox()
r = self.client.post(url,dict(announcement_text=default_approval_text(doc)))
self.assertEqual(r.status_code, 302)
doc = Document.objects.get(name='conflict-review-imaginary-irtf-submission')
self.assertEqual(doc.get_state_slug(),approve_type+'-sent')
self.assertFalse(doc.ballot_open("conflrev"))
self.assertEqual(len(outbox), 1)
self.assertTrue('Results of IETF-conflict review' in outbox[0]['Subject'])
self.assertTrue('irtf-chair' in outbox[0]['To'])
self.assertTrue('ietf-announce@' in outbox[0]['Cc'])
self.assertTrue('iana@' in outbox[0]['Cc'])
if approve_type == 'appr-noprob':
self.assertTrue( 'IESG has no problem' in ''.join(wrap(unicode(outbox[0]),2**16)))
else:
self.assertTrue( 'NOT be published' in ''.join(wrap(unicode(outbox[0]),2**16)))
def test_approve_reqnopub(self):
self.approve_test_helper('appr-reqnopub')
def test_approve_noprob(self):
self.approve_test_helper('appr-noprob')
def setUp(self):
make_test_data()
class ConflictReviewSubmitTests(TestCase):
def test_initial_submission(self):
doc = Document.objects.get(name='conflict-review-imaginary-irtf-submission')
url = urlreverse('conflict_review_submit',kwargs=dict(name=doc.name))
login_testing_unauthorized(self, "ad", url)
# normal get
r = self.client.get(url)
self.assertEqual(r.status_code,200)
q = PyQuery(r.content)
self.assertTrue(q('textarea[name="content"]')[0].text.strip().startswith("[Edit this page"))
# Faulty posts using textbox
# Right now, nothing to test - we let people put whatever the web browser will let them put into that textbox
# sane post using textbox
path = os.path.join(settings.CONFLICT_REVIEW_PATH, '%s-%s.txt' % (doc.canonical_name(), doc.rev))
self.assertEqual(doc.rev,u'00')
self.assertFalse(os.path.exists(path))
r = self.client.post(url,dict(content="Some initial review text\n",submit_response="1"))
self.assertEqual(r.status_code,302)
doc = Document.objects.get(name='conflict-review-imaginary-irtf-submission')
self.assertEqual(doc.rev,u'00')
with open(path) as f:
self.assertEqual(f.read(),"Some initial review text\n")
f.close()
self.assertTrue( "submission-00" in doc.latest_event(NewRevisionDocEvent).desc)
def test_subsequent_submission(self):
doc = Document.objects.get(name='conflict-review-imaginary-irtf-submission')
url = urlreverse('conflict_review_submit',kwargs=dict(name=doc.name))
login_testing_unauthorized(self, "ad", url)
# A little additional setup
# doc.rev is u'00' per the test setup - double-checking that here - if it fails, the breakage is in setUp
self.assertEqual(doc.rev,u'00')
path = os.path.join(settings.CONFLICT_REVIEW_PATH, '%s-%s.txt' % (doc.canonical_name(), doc.rev))
with open(path,'w') as f:
f.write('This is the old proposal.')
f.close()
# normal get
r = self.client.get(url)
self.assertEqual(r.status_code,200)
q = PyQuery(r.content)
self.assertTrue(q('textarea')[0].text.strip().startswith("This is the old proposal."))
# faulty posts trying to use file upload
# Copied from wgtracker tests - is this really testing the server code, or is it testing
# how client.post populates Content-Type?
test_file = StringIO("\x10\x11\x12") # post binary file
test_file.name = "unnamed"
r = self.client.post(url, dict(txt=test_file,submit_response="1"))
self.assertEqual(r.status_code, 200)
self.assertTrue("does not appear to be a text file" in unicontent(r))
# sane post uploading a file
test_file = StringIO("This is a new proposal.")
test_file.name = "unnamed"
r = self.client.post(url,dict(txt=test_file,submit_response="1"))
self.assertEqual(r.status_code, 302)
doc = Document.objects.get(name='conflict-review-imaginary-irtf-submission')
self.assertEqual(doc.rev,u'01')
path = os.path.join(settings.CONFLICT_REVIEW_PATH, '%s-%s.txt' % (doc.canonical_name(), doc.rev))
with open(path) as f:
self.assertEqual(f.read(),"This is a new proposal.")
f.close()
self.assertTrue( "submission-01" in doc.latest_event(NewRevisionDocEvent).desc)
# verify reset text button works
r = self.client.post(url,dict(reset_text="1"))
self.assertEqual(r.status_code, 200)
q = PyQuery(r.content)
self.assertTrue(q('textarea')[0].text.strip().startswith("[Edit this page"))
def setUp(self):
make_test_data()
self.test_dir = os.path.abspath("tmp-conflict-review-testdir")
os.mkdir(self.test_dir)
settings.CONFLICT_REVIEW_PATH = self.test_dir
def tearDown(self):
shutil.rmtree(self.test_dir)
| bsd-3-clause |
arifsetiawan/edx-platform | lms/djangoapps/instructor/tests/test_proctoring.py | 4 | 2726 | """
Unit tests for Edx Proctoring feature flag in new instructor dashboard.
"""
from mock import patch
from django.conf import settings
from django.core.urlresolvers import reverse
from nose.plugins.attrib import attr
from student.roles import CourseFinanceAdminRole
from student.tests.factories import AdminFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
@attr('shard_1')
@patch.dict(settings.FEATURES, {'ENABLE_PROCTORED_EXAMS': True})
class TestProctoringDashboardViews(ModuleStoreTestCase):
"""
Check for Proctoring view on the new instructor dashboard
"""
def setUp(self):
super(TestProctoringDashboardViews, self).setUp()
self.course = CourseFactory.create()
self.course.enable_proctored_exams = True
# Create instructor account
self.instructor = AdminFactory.create()
self.client.login(username=self.instructor.username, password="test")
self.course = self.update_course(self.course, self.instructor.id)
# URL for instructor dash
self.url = reverse('instructor_dashboard', kwargs={'course_id': self.course.id.to_deprecated_string()})
self.proctoring_link = '<a href="" data-section="proctoring">Proctoring</a>'
CourseFinanceAdminRole(self.course.id).add_users(self.instructor)
def test_pass_proctoring_tab_in_instructor_dashboard(self):
"""
Test Pass Proctoring Tab is in the Instructor Dashboard
"""
self.instructor.is_staff = True
self.instructor.save()
response = self.client.get(self.url)
self.assertTrue(self.proctoring_link in response.content)
self.assertTrue('Allowance Section' in response.content)
def test_no_tab_non_global_staff(self):
"""
Test Pass Proctoring Tab is not in the Instructor Dashboard
for non global staff users
"""
self.instructor.is_staff = False
self.instructor.save()
response = self.client.get(self.url)
self.assertFalse(self.proctoring_link in response.content)
self.assertFalse('Allowance Section' in response.content)
@patch.dict(settings.FEATURES, {'ENABLE_PROCTORED_EXAMS': False})
def test_no_tab_flag_unset(self):
"""
Test Pass Proctoring Tab is not in the Instructor Dashboard
if the feature flag 'ENABLE_PROCTORED_EXAMS' is unset.
"""
self.instructor.is_staff = True
self.instructor.save()
response = self.client.get(self.url)
self.assertFalse(self.proctoring_link in response.content)
self.assertFalse('Allowance Section' in response.content)
| agpl-3.0 |
meredith-digops/ansible | lib/ansible/modules/web_infrastructure/jenkins_plugin.py | 35 | 25710 | #!/usr/bin/python
# encoding: utf-8
# (c) 2016, Jiri Tyr <jiri.tyr@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: jenkins_plugin
author: Jiri Tyr (@jtyr)
version_added: '2.2'
short_description: Add or remove Jenkins plugin
description:
- Ansible module which helps to manage Jenkins plugins.
options:
group:
required: false
default: jenkins
description:
- Name of the Jenkins group on the OS.
jenkins_home:
required: false
default: /var/lib/jenkins
description:
- Home directory of the Jenkins user.
mode:
required: false
default: '0664'
description:
- File mode applied on versioned plugins.
name:
required: true
description:
- Plugin name.
owner:
required: false
default: jenkins
description:
- Name of the Jenkins user on the OS.
params:
required: false
default: null
description:
- Option used to allow the user to overwrite any of the other options. To
remove an option, set the value of the option to C(null).
state:
required: false
choices: [absent, present, pinned, unpinned, enabled, disabled, latest]
default: present
description:
- Desired plugin state.
- If the C(latest) is set, the check for new version will be performed
every time. This is suitable to keep the plugin up-to-date.
timeout:
required: false
default: 30
description:
- Server connection timeout in secs.
updates_expiration:
required: false
default: 86400
description:
- Number of seconds after which a new copy of the I(update-center.json)
file is downloaded. This is used to avoid the need to download the
plugin to calculate its checksum when C(latest) is specified.
- Set it to C(0) if no cache file should be used. In that case, the
plugin file will always be downloaded to calculate its checksum when
C(latest) is specified.
updates_url:
required: false
default: https://updates.jenkins-ci.org
description:
- URL of the Update Centre.
- Used as the base URL to download the plugins and the
I(update-center.json) JSON file.
url:
required: false
default: http://localhost:8080
description:
- URL of the Jenkins server.
version:
required: false
default: null
description:
- Plugin version number.
- If this option is specified, all plugin dependencies must be installed
manually.
- It might take longer to verify that the correct version is installed.
This is especially true if a specific version number is specified.
- Quote the version to prevent the value to be interpreted as float. For
example if C(1.20) would be unquoted, it would become C(1.2).
with_dependencies:
required: false
choices: ['yes', 'no']
default: 'yes'
description:
- Defines whether to install plugin dependencies.
- This option takes effect only if the I(version) is not defined.
notes:
- Plugin installation should be run under root or the same user which owns
the plugin files on the disk. Only if the plugin is not installed yet and
no version is specified, the API installation is performed which requires
only the Web UI credentials.
- It's necessary to notify the handler or call the I(service) module to
restart the Jenkins service after a new plugin was installed.
- Pinning works only if the plugin is installed and Jenkis service was
successfully restarted after the plugin installation.
- It is not possible to run the module remotely by changing the I(url)
parameter to point to the Jenkins server. The module must be used on the
host where Jenkins runs as it needs direct access to the plugin files.
'''
EXAMPLES = '''
- name: Install plugin
jenkins_plugin:
name: build-pipeline-plugin
- name: Install plugin without its dependencies
jenkins_plugin:
name: build-pipeline-plugin
with_dependencies: no
- name: Make sure the plugin is always up-to-date
jenkins_plugin:
name: token-macro
state: latest
- name: Install specific version of the plugin
jenkins_plugin:
name: token-macro
version: "1.15"
- name: Pin the plugin
jenkins_plugin:
name: token-macro
state: pinned
- name: Unpin the plugin
jenkins_plugin:
name: token-macro
state: unpinned
- name: Enable the plugin
jenkins_plugin:
name: token-macro
state: enabled
- name: Disable the plugin
jenkins_plugin:
name: token-macro
state: disabled
- name: Uninstall plugin
jenkins_plugin:
name: build-pipeline-plugin
state: absent
#
# Example of how to use the params
#
# Define a variable and specify all default parameters you want to use across
# all jenkins_plugin calls:
#
# my_jenkins_params:
# url_username: admin
# url_password: p4ssw0rd
# url: http://localhost:8888
#
- name: Install plugin
jenkins_plugin:
name: build-pipeline-plugin
params: "{{ my_jenkins_params }}"
#
# Example of a Play which handles Jenkins restarts during the state changes
#
- name: Jenkins Master play
hosts: jenkins-master
vars:
my_jenkins_plugins:
token-macro:
enabled: yes
build-pipeline-plugin:
version: "1.4.9"
pinned: no
enabled: yes
tasks:
- name: Install plugins without a specific version
jenkins_plugin:
name: "{{ item.key }}"
register: my_jenkins_plugin_unversioned
when: >
'version' not in item.value
with_dict: "{{ my_jenkins_plugins }}"
- name: Install plugins with a specific version
jenkins_plugin:
name: "{{ item.key }}"
version: "{{ item.value['version'] }}"
register: my_jenkins_plugin_versioned
when: >
'version' in item.value
with_dict: "{{ my_jenkins_plugins }}"
- name: Initiate the fact
set_fact:
jenkins_restart_required: no
- name: Check if restart is required by any of the versioned plugins
set_fact:
jenkins_restart_required: yes
when: item.changed
with_items: "{{ my_jenkins_plugin_versioned.results }}"
- name: Check if restart is required by any of the unversioned plugins
set_fact:
jenkins_restart_required: yes
when: item.changed
with_items: "{{ my_jenkins_plugin_unversioned.results }}"
- name: Restart Jenkins if required
service:
name: jenkins
state: restarted
when: jenkins_restart_required
- name: Wait for Jenkins to start up
uri:
url: http://localhost:8080
status_code: 200
timeout: 5
register: jenkins_service_status
# Keep trying for 5 mins in 5 sec intervals
retries: 60
delay: 5
until: >
'status' in jenkins_service_status and
jenkins_service_status['status'] == 200
when: jenkins_restart_required
- name: Reset the fact
set_fact:
jenkins_restart_required: no
when: jenkins_restart_required
- name: Plugin pinning
jenkins_plugin:
name: "{{ item.key }}"
state: "{{ 'pinned' if item.value['pinned'] else 'unpinned'}}"
when: >
'pinned' in item.value
with_dict: "{{ my_jenkins_plugins }}"
- name: Plugin enabling
jenkins_plugin:
name: "{{ item.key }}"
state: "{{ 'enabled' if item.value['enabled'] else 'disabled'}}"
when: >
'enabled' in item.value
with_dict: "{{ my_jenkins_plugins }}"
'''
RETURN = '''
plugin:
description: plugin name
returned: success
type: string
sample: build-pipeline-plugin
state:
description: state of the target, after execution
returned: success
type: string
sample: "present"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
from ansible.module_utils.urls import fetch_url, url_argument_spec
from ansible.module_utils._text import to_native
import base64
import hashlib
import json
import os
import tempfile
import time
import urllib
class JenkinsPlugin(object):
def __init__(self, module):
# To be able to call fail_json
self.module = module
# Shortcuts for the params
self.params = self.module.params
self.url = self.params['url']
self.timeout = self.params['timeout']
# Crumb
self.crumb = {}
if self._csrf_enabled():
self.crumb = self._get_crumb()
# Get list of installed plugins
self._get_installed_plugins()
def _csrf_enabled(self):
csrf_data = self._get_json_data(
"%s/%s" % (self.url, "api/json"), 'CSRF')
return csrf_data["useCrumbs"]
def _get_json_data(self, url, what, **kwargs):
# Get the JSON data
r = self._get_url_data(url, what, **kwargs)
# Parse the JSON data
try:
json_data = json.load(r)
except Exception:
e = get_exception()
self.module.fail_json(
msg="Cannot parse %s JSON data." % what,
details=e.message)
return json_data
def _get_url_data(
self, url, what=None, msg_status=None, msg_exception=None,
**kwargs):
# Compose default messages
if msg_status is None:
msg_status = "Cannot get %s" % what
if msg_exception is None:
msg_exception = "Retrieval of %s failed." % what
# Get the URL data
try:
response, info = fetch_url(
self.module, url, timeout=self.timeout, **kwargs)
if info['status'] != 200:
self.module.fail_json(msg=msg_status, details=info['msg'])
except Exception:
e = get_exception()
self.module.fail_json(msg=msg_exception, details=e.message)
return response
def _get_crumb(self):
crumb_data = self._get_json_data(
"%s/%s" % (self.url, "crumbIssuer/api/json"), 'Crumb')
if 'crumbRequestField' in crumb_data and 'crumb' in crumb_data:
ret = {
crumb_data['crumbRequestField']: crumb_data['crumb']
}
else:
self.module.fail_json(
msg="Required fields not found in the Crum response.",
details=crumb_data)
return ret
def _get_installed_plugins(self):
plugins_data = self._get_json_data(
"%s/%s" % (self.url, "pluginManager/api/json?depth=1"),
'list of plugins')
# Check if we got valid data
if 'plugins' not in plugins_data:
self.module.fail_json(msg="No valid plugin data found.")
# Create final list of installed/pined plugins
self.is_installed = False
self.is_pinned = False
self.is_enabled = False
for p in plugins_data['plugins']:
if p['shortName'] == self.params['name']:
self.is_installed = True
if p['pinned']:
self.is_pinned = True
if p['enabled']:
self.is_enabled = True
break
def install(self):
changed = False
plugin_file = (
'%s/plugins/%s.jpi' % (
self.params['jenkins_home'],
self.params['name']))
if not self.is_installed and self.params['version'] is None:
if not self.module.check_mode:
# Install the plugin (with dependencies)
install_script = (
'd = Jenkins.instance.updateCenter.getPlugin("%s")'
'.deploy(); d.get();' % self.params['name'])
if self.params['with_dependencies']:
install_script = (
'Jenkins.instance.updateCenter.getPlugin("%s")'
'.getNeededDependencies().each{it.deploy()}; %s' % (
self.params['name'], install_script))
script_data = {
'script': install_script
}
script_data.update(self.crumb)
data = urllib.urlencode(script_data)
# Send the installation request
r = self._get_url_data(
"%s/scriptText" % self.url,
msg_status="Cannot install plugin.",
msg_exception="Plugin installation has failed.",
data=data)
hpi_file = '%s/plugins/%s.hpi' % (
self.params['jenkins_home'],
self.params['name'])
if os.path.isfile(hpi_file):
os.remove(hpi_file)
changed = True
else:
# Check if the plugin directory exists
if not os.path.isdir(self.params['jenkins_home']):
self.module.fail_json(
msg="Jenkins home directory doesn't exist.")
md5sum_old = None
if os.path.isfile(plugin_file):
# Make the checksum of the currently installed plugin
md5sum_old = hashlib.md5(
open(plugin_file, 'rb').read()).hexdigest()
if self.params['version'] in [None, 'latest']:
# Take latest version
plugin_url = (
"%s/latest/%s.hpi" % (
self.params['updates_url'],
self.params['name']))
else:
# Take specific version
plugin_url = (
"{0}/download/plugins/"
"{1}/{2}/{1}.hpi".format(
self.params['updates_url'],
self.params['name'],
self.params['version']))
if (
self.params['updates_expiration'] == 0 or
self.params['version'] not in [None, 'latest'] or
md5sum_old is None):
# Download the plugin file directly
r = self._download_plugin(plugin_url)
# Write downloaded plugin into file if checksums don't match
if md5sum_old is None:
# No previously installed plugin
if not self.module.check_mode:
self._write_file(plugin_file, r)
changed = True
else:
# Get data for the MD5
data = r.read()
# Make new checksum
md5sum_new = hashlib.md5(data).hexdigest()
# If the checksum is different from the currently installed
# plugin, store the new plugin
if md5sum_old != md5sum_new:
if not self.module.check_mode:
self._write_file(plugin_file, data)
changed = True
else:
# Check for update from the updates JSON file
plugin_data = self._download_updates()
try:
sha1_old = hashlib.sha1(open(plugin_file, 'rb').read())
except Exception:
e = get_exception()
self.module.fail_json(
msg="Cannot calculate SHA1 of the old plugin.",
details=e.message)
sha1sum_old = base64.b64encode(sha1_old.digest())
# If the latest version changed, download it
if sha1sum_old != plugin_data['sha1']:
if not self.module.check_mode:
r = self._download_plugin(plugin_url)
self._write_file(plugin_file, r)
changed = True
# Change file attributes if needed
if os.path.isfile(plugin_file):
params = {
'dest': plugin_file
}
params.update(self.params)
file_args = self.module.load_file_common_arguments(params)
if not self.module.check_mode:
# Not sure how to run this in the check mode
changed = self.module.set_fs_attributes_if_different(
file_args, changed)
else:
# See the comment above
changed = True
return changed
def _download_updates(self):
updates_filename = 'jenkins-plugin-cache.json'
updates_dir = os.path.expanduser('~/.ansible/tmp')
updates_file = "%s/%s" % (updates_dir, updates_filename)
download_updates = True
# Check if we need to download new updates file
if os.path.isfile(updates_file):
# Get timestamp when the file was changed last time
ts_file = os.stat(updates_file).st_mtime
ts_now = time.time()
if ts_now - ts_file < self.params['updates_expiration']:
download_updates = False
updates_file_orig = updates_file
# Download the updates file if needed
if download_updates:
url = "%s/update-center.json" % self.params['updates_url']
# Get the data
r = self._get_url_data(
url,
msg_status="Remote updates not found.",
msg_exception="Updates download failed.")
# Write the updates file
update_fd, updates_file = tempfile.mkstemp()
os.write(update_fd, r.read())
try:
os.close(update_fd)
except IOError:
e = get_exception()
self.module.fail_json(
msg="Cannot close the tmp updates file %s." % updates_file,
details=to_native(e))
# Open the updates file
try:
f = open(updates_file)
except IOError:
e = get_exception()
self.module.fail_json(
msg="Cannot open temporal updates file.",
details=to_native(e))
i = 0
for line in f:
# Read only the second line
if i == 1:
try:
data = json.loads(line)
except Exception:
e = get_exception()
self.module.fail_json(
msg="Cannot load JSON data from the tmp updates file.",
details=e.message)
break
i += 1
# Move the updates file to the right place if we could read it
if download_updates:
# Make sure the destination directory exists
if not os.path.isdir(updates_dir):
try:
os.makedirs(updates_dir, int('0700', 8))
except OSError:
e = get_exception()
self.module.fail_json(
msg="Cannot create temporal directory.",
details=e.message)
self.module.atomic_move(updates_file, updates_file_orig)
# Check if we have the plugin data available
if 'plugins' not in data or self.params['name'] not in data['plugins']:
self.module.fail_json(
msg="Cannot find plugin data in the updates file.")
return data['plugins'][self.params['name']]
def _download_plugin(self, plugin_url):
# Download the plugin
r = self._get_url_data(
plugin_url,
msg_status="Plugin not found.",
msg_exception="Plugin download failed.")
return r
def _write_file(self, f, data):
# Store the plugin into a temp file and then move it
tmp_f_fd, tmp_f = tempfile.mkstemp()
if isinstance(data, str):
os.write(tmp_f_fd, data)
else:
os.write(tmp_f_fd, data.read())
try:
os.close(tmp_f_fd)
except IOError:
e = get_exception()
self.module.fail_json(
msg='Cannot close the temporal plugin file %s.' % tmp_f,
details=to_native(e))
# Move the file onto the right place
self.module.atomic_move(tmp_f, f)
def uninstall(self):
changed = False
# Perform the action
if self.is_installed:
if not self.module.check_mode:
self._pm_query('doUninstall', 'Uninstallation')
changed = True
return changed
def pin(self):
return self._pinning('pin')
def unpin(self):
return self._pinning('unpin')
def _pinning(self, action):
changed = False
# Check if the plugin is pinned/unpinned
if (
action == 'pin' and not self.is_pinned or
action == 'unpin' and self.is_pinned):
# Perform the action
if not self.module.check_mode:
self._pm_query(action, "%sning" % action.capitalize())
changed = True
return changed
def enable(self):
return self._enabling('enable')
def disable(self):
return self._enabling('disable')
def _enabling(self, action):
changed = False
# Check if the plugin is pinned/unpinned
if (
action == 'enable' and not self.is_enabled or
action == 'disable' and self.is_enabled):
# Perform the action
if not self.module.check_mode:
self._pm_query(
"make%sd" % action.capitalize(),
"%sing" % action[:-1].capitalize())
changed = True
return changed
def _pm_query(self, action, msg):
url = "%s/pluginManager/plugin/%s/%s" % (
self.params['url'], self.params['name'], action)
data = urllib.urlencode(self.crumb)
# Send the request
self._get_url_data(
url,
msg_status="Plugin not found. %s" % url,
msg_exception="%s has failed." % msg,
data=data)
def main():
# Module arguments
argument_spec = url_argument_spec()
argument_spec.update(
group=dict(default='jenkins'),
jenkins_home=dict(default='/var/lib/jenkins'),
mode=dict(default='0644', type='raw'),
name=dict(required=True),
owner=dict(default='jenkins'),
params=dict(type='dict'),
state=dict(
choices=[
'present',
'absent',
'pinned',
'unpinned',
'enabled',
'disabled',
'latest'],
default='present'),
timeout=dict(default=30, type="int"),
updates_expiration=dict(default=86400, type="int"),
updates_url=dict(default='https://updates.jenkins-ci.org'),
url=dict(default='http://localhost:8080'),
url_password=dict(no_log=True),
version=dict(),
with_dependencies=dict(default=True, type='bool'),
)
# Module settings
module = AnsibleModule(
argument_spec=argument_spec,
add_file_common_args=True,
supports_check_mode=True,
)
# Update module parameters by user's parameters if defined
if 'params' in module.params and isinstance(module.params['params'], dict):
module.params.update(module.params['params'])
# Remove the params
module.params.pop('params', None)
# Force basic authentication
module.params['force_basic_auth'] = True
# Convert timeout to float
try:
module.params['timeout'] = float(module.params['timeout'])
except ValueError:
e = get_exception()
module.fail_json(
msg='Cannot convert %s to float.' % module.params['timeout'],
details=to_native(e))
# Set version to latest if state is latest
if module.params['state'] == 'latest':
module.params['state'] = 'present'
module.params['version'] = 'latest'
# Create some shortcuts
name = module.params['name']
state = module.params['state']
# Initial change state of the task
changed = False
# Instantiate the JenkinsPlugin object
jp = JenkinsPlugin(module)
# Perform action depending on the requested state
if state == 'present':
changed = jp.install()
elif state == 'absent':
changed = jp.uninstall()
elif state == 'pinned':
changed = jp.pin()
elif state == 'unpinned':
changed = jp.unpin()
elif state == 'enabled':
changed = jp.enable()
elif state == 'disabled':
changed = jp.disable()
# Print status of the change
module.exit_json(changed=changed, plugin=name, state=state)
if __name__ == '__main__':
main()
| gpl-3.0 |
GoogleCloudPlatform/declarative-resource-client-library | python/services/servicenetworking/connection.py | 1 | 4860 | # Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from connector import channel
from google3.cloud.graphite.mmv2.services.google.service_networking import (
connection_pb2,
)
from google3.cloud.graphite.mmv2.services.google.service_networking import (
connection_pb2_grpc,
)
from typing import List
class Connection(object):
def __init__(
self,
network: str = None,
project: str = None,
name: str = None,
reserved_peering_ranges: list = None,
service: str = None,
service_account_file: str = "",
):
channel.initialize()
self.network = network
self.project = project
self.reserved_peering_ranges = reserved_peering_ranges
self.service = service
self.service_account_file = service_account_file
def apply(self):
stub = connection_pb2_grpc.ServicenetworkingConnectionServiceStub(
channel.Channel()
)
request = connection_pb2.ApplyServicenetworkingConnectionRequest()
if Primitive.to_proto(self.network):
request.resource.network = Primitive.to_proto(self.network)
if Primitive.to_proto(self.project):
request.resource.project = Primitive.to_proto(self.project)
if Primitive.to_proto(self.reserved_peering_ranges):
request.resource.reserved_peering_ranges.extend(
Primitive.to_proto(self.reserved_peering_ranges)
)
if Primitive.to_proto(self.service):
request.resource.service = Primitive.to_proto(self.service)
request.service_account_file = self.service_account_file
response = stub.ApplyServicenetworkingConnection(request)
self.network = Primitive.from_proto(response.network)
self.project = Primitive.from_proto(response.project)
self.name = Primitive.from_proto(response.name)
self.reserved_peering_ranges = Primitive.from_proto(
response.reserved_peering_ranges
)
self.service = Primitive.from_proto(response.service)
def delete(self):
stub = connection_pb2_grpc.ServicenetworkingConnectionServiceStub(
channel.Channel()
)
request = connection_pb2.DeleteServicenetworkingConnectionRequest()
request.service_account_file = self.service_account_file
if Primitive.to_proto(self.network):
request.resource.network = Primitive.to_proto(self.network)
if Primitive.to_proto(self.project):
request.resource.project = Primitive.to_proto(self.project)
if Primitive.to_proto(self.reserved_peering_ranges):
request.resource.reserved_peering_ranges.extend(
Primitive.to_proto(self.reserved_peering_ranges)
)
if Primitive.to_proto(self.service):
request.resource.service = Primitive.to_proto(self.service)
response = stub.DeleteServicenetworkingConnection(request)
@classmethod
def list(self, project, network, service, service_account_file=""):
stub = connection_pb2_grpc.ServicenetworkingConnectionServiceStub(
channel.Channel()
)
request = connection_pb2.ListServicenetworkingConnectionRequest()
request.service_account_file = service_account_file
request.Project = project
request.Network = network
request.Service = service
return stub.ListServicenetworkingConnection(request).items
def to_proto(self):
resource = connection_pb2.ServicenetworkingConnection()
if Primitive.to_proto(self.network):
resource.network = Primitive.to_proto(self.network)
if Primitive.to_proto(self.project):
resource.project = Primitive.to_proto(self.project)
if Primitive.to_proto(self.reserved_peering_ranges):
resource.reserved_peering_ranges.extend(
Primitive.to_proto(self.reserved_peering_ranges)
)
if Primitive.to_proto(self.service):
resource.service = Primitive.to_proto(self.service)
return resource
class Primitive(object):
@classmethod
def to_proto(self, s):
if not s:
return ""
return s
@classmethod
def from_proto(self, s):
return s
| apache-2.0 |
muku42/bokeh | bokeh/charts/builder/tests/test_step_builder.py | 4 | 2479 | """ This is the Bokeh charts testing interface.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
from collections import OrderedDict
import unittest
import numpy as np
from numpy.testing import assert_array_equal
import pandas as pd
from bokeh.charts import Step
from bokeh.util.testing import create_chart
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
class TestStep(unittest.TestCase):
def test_supported_input(self):
xyvalues = OrderedDict()
xyvalues['python'] = [2, 3, 7, 5, 26]
xyvalues['pypy'] = [12, 33, 47, 15, 126]
xyvalues['jython'] = [22, 43, 10, 25, 26]
xyvaluesdf = pd.DataFrame(xyvalues)
y_python = [ 2., 2., 3., 3., 7., 7., 5., 5., 26.]
y_jython = [ 22., 22.,43., 43., 10., 10., 25., 25., 26.]
y_pypy = [ 12., 12., 33., 33., 47., 47., 15., 15., 126.]
x = [0, 1, 1, 2, 2, 3, 3, 4, 4]
for i, _xy in enumerate([xyvalues, xyvaluesdf]):
hm = create_chart(Step, _xy)
builder = hm._builders[0]
self.assertEqual(sorted(builder._groups), sorted(list(xyvalues.keys())))
assert_array_equal(builder._data['x'], x)
assert_array_equal(builder._data['y_python'], y_python)
assert_array_equal(builder._data['y_jython'], y_jython)
assert_array_equal(builder._data['y_pypy'], y_pypy)
lvalues = [[2, 3, 7, 5, 26], [12, 33, 47, 15, 126], [22, 43, 10, 25, 26]]
for _xy in [lvalues, np.array(lvalues)]:
hm = create_chart(Step, _xy)
builder = hm._builders[0]
self.assertEqual(builder._groups, ['0', '1', '2'])
assert_array_equal(builder._data['y_0'], y_python)
assert_array_equal(builder._data['y_1'], y_pypy)
assert_array_equal(builder._data['y_2'], y_jython)
| bsd-3-clause |
idncom/odoo | addons/account_check_writing/__openerp__.py | 313 | 1808 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Check Writing',
'version': '1.1',
'author': 'OpenERP SA, NovaPoint Group',
'category': 'Generic Modules/Accounting',
'description': """
Module for the Check Writing and Check Printing.
================================================
""",
'website': 'https://www.odoo.com/page/accounting',
'depends' : ['account_voucher'],
'data': [
'wizard/account_check_batch_printing_view.xml',
'account_view.xml',
'account_voucher_view.xml',
'account_check_writing_data.xml',
'data/report_paperformat.xml',
'views/report_check.xml',
'account_check_writing_report.xml',
],
'demo': ['account_demo.xml'],
'test': [],
'installable': True,
'active': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
bradallred/gemrb | gemrb/GUIScripts/ie_stats.py | 2 | 10616 | # -*-python-*-
# GemRB - Infinity Engine Emulator
# Copyright (C) 2003 The GemRB Project
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ie_stats.py - definitions of creature stats codes
# !!! NOTE: Keep this file synchronized with gemrb/includes/ie_stats.h !!!
# EA values
INANIMATE = 1
PC = 2
FAMILIAR = 3
ALLY = 4
CONTROLLED = 5
#charmed is 7 inside the engine???
CHARMED = 7
GOODBUTRED = 28
GOODBUTBLUE = 29
GOODCUTOFF = 30
NOTGOOD = 31
ANYTHING = 126
NEUTRAL = 128
NOTEVIL = 199
EVILCUTOFF = 200
EVILBUTGREEN = 201
EVILBUTBLUE = 202
CHARMEDPC = 254
ENEMY = 255
# state bits (IE_STATE)
STATE_BERSERK = 2
STATE_PANIC = 4
STATE_HELPLESS = 1 + 32
STATE_PETRIFIED = 8 + 64 + 128
STATE_DEAD = 2048
STATE_POISONED = 0x4000
MC_WAS_FIGHTER = 0x0008
MC_WAS_MAGE = 0x0010
MC_WAS_CLERIC = 0x0020
MC_WAS_THIEF = 0x0040
MC_WAS_DRUID = 0x0080
MC_WAS_RANGER = 0x0100
MC_WAS_ANY_CLASS = MC_WAS_FIGHTER|MC_WAS_MAGE|MC_WAS_CLERIC|MC_WAS_THIEF|MC_WAS_DRUID|MC_WAS_RANGER
MC_FALLEN_PALADIN = 0x200
MC_FALLEN_RANGER = 0x400
MC_EXPORTABLE = 0x800
MC_PLOT_CRITICAL = 0x2000
MC_BEENINPARTY = 0x8000
MC_SEENPARTY = 0x10000
# specflag bits
SPECF_DRIVEN = 1
SPECF_CRITIMMUNITY = 2
SPECF_PALADINOFF = 4
SPECF_MONKOFF = 8
# stats
IE_HITPOINTS = 0
IE_MAXHITPOINTS = 1
IE_ARMORCLASS = 2
IE_ACCRUSHINGMOD = 3
IE_ACMISSILEMOD = 4
IE_ACPIERCINGMOD = 5
IE_ACSLASHINGMOD = 6
IE_TOHIT = 7
IE_NUMBEROFATTACKS = 8
IE_SAVEVSDEATH = 9
IE_SAVEVSWANDS = 10
IE_SAVEVSPOLY = 11
IE_SAVEVSBREATH = 12
IE_SAVEVSSPELL = 13
IE_SAVEFORTITUDE = 9
IE_SAVEREFLEX = 10
IE_SAVEWILL = 11
IE_RESISTFIRE = 14
IE_RESISTCOLD = 15
IE_RESISTELECTRICITY = 16
IE_RESISTACID = 17
IE_RESISTMAGIC = 18
IE_RESISTMAGICFIRE = 19
IE_RESISTMAGICCOLD = 20
IE_RESISTSLASHING = 21
IE_RESISTCRUSHING = 22
IE_RESISTPIERCING = 23
IE_RESISTMISSILE = 24
IE_LORE = 25
IE_LOCKPICKING = 26
IE_STEALTH = 27
IE_TRAPS = 28
IE_PICKPOCKET = 29
IE_FATIGUE = 30
IE_INTOXICATION = 31
IE_LUCK = 32
IE_TRACKING = 33
IE_LEVEL = 34
IE_LEVELFIGHTER = 34 # pst, iwd2
IE_SEX = 35
IE_STR = 36
IE_STREXTRA = 37
IE_INT = 38
IE_WIS = 39
IE_DEX = 40
IE_CON = 41
IE_CHR = 42
IE_XPVALUE = 43
IE_XP = 44
IE_GOLD = 45
IE_MORALEBREAK = 46
IE_MORALERECOVERYTIME = 47
IE_REPUTATION = 48
IE_HATEDRACE = 49
IE_DAMAGEBONUS = 50
IE_SPELLFAILUREMAGE = 51
IE_SPELLFAILUREPRIEST = 52
IE_SPELLDURATIONMODMAGE = 53
IE_SPELLDURATIONMODPRIEST = 54
IE_TURNUNDEADLEVEL = 55
IE_BACKSTABDAMAGEMULTIPLIER = 56
IE_LAYONHANDSAMOUNT = 57
IE_HELD = 58
IE_POLYMORPHED = 59
IE_TRANSLUCENT = 60
IE_IDENTIFYMODE = 61
IE_ENTANGLE = 62
IE_SANCTUARY = 63
IE_MINORGLOBE = 64
IE_SHIELDGLOBE = 65
IE_GREASE = 66
IE_WEB = 67
IE_LEVEL2 = 68
IE_LEVELMAGE = 68 # pst, iwd2
IE_LEVEL3 = 69
IE_LEVELTHIEF = 69 # pst, iwd2
IE_CASTERHOLD = 70
IE_ENCUMBRANCE = 71
IE_MISSILEHITBONUS = 72
IE_MAGICDAMAGERESISTANCE = 73
IE_RESISTPOISON = 74
IE_DONOTJUMP = 75
IE_AURACLEANSING = 76
IE_MENTALSPEED = 77
IE_PHYSICALSPEED = 78
IE_CASTINGLEVELBONUSMAGE = 79
IE_CASTINGLEVELBONUSCLERIC = 80
IE_SEEINVISIBLE = 81
IE_IGNOREDIALOGPAUSE = 82
IE_MINHITPOINTS = 83
IE_HITBONUSRIGHT = 84
IE_HITBONUSLEFT = 85
IE_DAMAGEBONUSRIGHT = 86
IE_DAMAGEBONUSLEFT = 87
IE_STONESKINS = 88
IE_FEAT_BOW = 89
IE_FEAT_CROSSBOW = 90
IE_FEAT_SLING = 91
IE_FEAT_AXE = 92
IE_FEAT_MACE = 93
IE_FEAT_FLAIL = 94
IE_FEAT_POLEARM = 95
IE_FEAT_HAMMER = 96
IE_FEAT_STAFF = 97
IE_FEAT_GREAT_SWORD = 98
IE_FEAT_LARGE_SWORD = 99
IE_FEAT_SMALL_SWORD = 100
IE_FEAT_TOUGHNESS = 101
IE_FEAT_ARMORED_ARCANA = 102
IE_FEAT_CLEAVE = 103
IE_FEAT_ARMOUR = 104
IE_FEAT_ENCHANTMENT = 105
IE_FEAT_EVOCATION = 106
IE_FEAT_NECROMANCY = 107
IE_FEAT_TRANSMUTATION = 108
IE_FEAT_SPELL_PENETRATION = 109
IE_FEAT_EXTRA_RAGE = 110
IE_FEAT_EXTRA_SHAPE = 111
IE_FEAT_EXTRA_SMITING = 112
IE_FEAT_EXTRA_TURNING = 113
IE_FEAT_BASTARDSWORD = 114
IE_ALCHEMY = 115
IE_ANIMALS = 116
IE_BLUFF = 117
IE_CONCENTRATION = 118
IE_DIPLOMACY = 119
IE_INTIMIDATE = 120
IE_SEARCH = 121
IE_SPELLCRAFT = 122
IE_MAGICDEVICE = 123
IE_SPECFLAGS = 124
IE_PROFICIENCYBASTARDSWORD = 89
IE_PROFICIENCYLONGSWORD = 90
IE_PROFICIENCYSHORTSWORD = 91
IE_PROFICIENCYAXE = 92
IE_PROFICIENCYTWOHANDEDSWORD = 93
IE_PROFICIENCYKATANA = 94
IE_PROFICIENCYSCIMITARWAKISASHININJATO = 95
IE_PROFICIENCYDAGGER = 96
IE_PROFICIENCYWARHAMMER = 97
IE_PROFICIENCYSPEAR = 98
IE_PROFICIENCYHALBERD = 99
IE_PROFICIENCYFLAILMORNINGSTAR = 100
IE_PROFICIENCYMACE = 101
IE_PROFICIENCYQUARTERSTAFF = 102
IE_PROFICIENCYCROSSBOW = 103
IE_PROFICIENCYLONGBOW = 104
IE_PROFICIENCYSHORTBOW = 105
IE_PROFICIENCYDART = 106
IE_PROFICIENCYSLING = 107
IE_PROFICIENCYBLACKJACK = 108
IE_PROFICIENCYGUN = 109
IE_PROFICIENCYMARTIALARTS = 110
IE_PROFICIENCY2HANDED = 111
IE_PROFICIENCYSWORDANDSHIELD = 112
IE_PROFICIENCYSINGLEWEAPON = 113
IE_PROFICIENCY2WEAPON = 114
IE_EXTRAPROFICIENCY1 = 115
IE_EXTRAPROFICIENCY2 = 116
IE_EXTRAPROFICIENCY3 = 117
IE_EXTRAPROFICIENCY4 = 118
IE_EXTRAPROFICIENCY5 = 119
IE_EXTRAPROFICIENCY6 = 120
IE_EXTRAPROFICIENCY7 = 121
IE_EXTRAPROFICIENCY8 = 122
IE_EXTRAPROFICIENCY9 = 123
IE_EXTRAPROFICIENCY10 = 124
IE_EXTRAPROFICIENCY11 = 125
IE_EXTRAPROFICIENCY12 = 126
IE_EXTRAPROFICIENCY13 = 127
IE_EXTRAPROFICIENCY14 = 128
IE_EXTRAPROFICIENCY15 = 129
IE_EXTRAPROFICIENCY16 = 130
IE_EXTRAPROFICIENCY17 = 131
IE_FEATS1 = 131
IE_EXTRAPROFICIENCY18 = 132
IE_FEATS2 = 132
IE_EXTRAPROFICIENCY19 = 133
IE_FEATS3 = 133
IE_EXTRAPROFICIENCY20 = 134
IE_FREESLOTS = 134 #not an error (PST)
IE_HIDEINSHADOWS = 135
IE_DETECTILLUSIONS = 136
IE_SETTRAPS = 137
IE_PUPPETMASTERID = 138
IE_PUPPETMASTERTYPE = 139
IE_PUPPETTYPE = 140
IE_PUPPETID = 141
IE_CHECKFORBERSERK = 142
IE_BERSERKSTAGE1 = 143
IE_BERSERKSTAGE2 = 144
IE_DAMAGELUCK = 145
IE_CRITICALHITBONUS = 146
IE_VISUALRANGE = 147
IE_EXPLORE = 148
IE_THRULLCHARM = 149
IE_SUMMONDISABLE = 150
IE_HITBONUS = 151
IE_KIT = 152
IE_FORCESURGE = 153
IE_SURGEMOD = 154
IE_IMPROVEDHASTE = 155
IE_SCRIPTINGSTATE1 = 156
IE_SCRIPTINGSTATE2 = 157
IE_SCRIPTINGSTATE3 = 158
IE_SCRIPTINGSTATE4 = 159
IE_SCRIPTINGSTATE5 = 160
IE_SCRIPTINGSTATE6 = 161
IE_SCRIPTINGSTATE7 = 162
IE_SCRIPTINGSTATE8 = 163
IE_SCRIPTINGSTATE9 = 164
IE_SCRIPTINGSTATE10 = 165
IE_MELEETOHIT = 166
IE_MELEEDAMAGE = 167
IE_MISSILEDAMAGE = 168
IE_NOCIRCLE = 169
IE_FISTHIT = 170
IE_FISTDAMAGE = 171
IE_TITLE1 = 172
IE_TITLE2 = 173
IE_DISABLEOVERLAY = 174
IE_DISABLEBACKSTAB = 175
#176-182 overwritten by us
IE_XP_MAGE = 176 # In PST this stores secondary level exp
IE_XP_THIEF = 177 # In PST this stores tertiary level exp
IE_DIALOGRANGE = 178 # distance for dialogue
IE_MOVEMENTRATE = 179
IE_MORALE = 180
IE_BOUNCE = 181
IE_MIRRORIMAGES = 182
#these are original
IE_ENABLEOFFSCREENAI = 183
IE_EXISTANCEDELAY = 184
IE_ATTACKNUMBERDOUBLE = 185
IE_DISABLECHUNKING = 186
IE_NOTURNABLE = 187
#188 was summondisable2 in original
IE_STONESKINSGOLEM = 199
IE_LEVELDRAIN = 200
IE_AVATARREMOVAL = 201
#202 is unused
# GemRB Specific Defines
IE_IMMUNITY = 203
IE_DISABLEDBUTTON = 204
IE_ANIMATION_ID = 205
IE_STATE_ID = 206
IE_EXTSTATE_ID = 207
IE_METAL_COLOR = 208
IE_MINOR_COLOR = 209
IE_MAJOR_COLOR = 210
IE_SKIN_COLOR = 211
IE_LEATHER_COLOR = 212
IE_ARMOR_COLOR = 213
IE_HAIR_COLOR = 214
IE_MC_FLAGS = 215
IE_CLASSLEVELSUM = 216
IE_ALIGNMENT = 217
IE_CASTING = 218
IE_ARMOR_TYPE = 219
IE_TEAM = 220
IE_FACTION = 221
IE_SUBRACE = 222
IE_UNUSED_SKILLPTS = 223
IE_SPECIES = 223 #pst specific
IE_HATEDRACE2 = 224
IE_HATEDRACE3 = 225
IE_HATEDRACE4 = 226
IE_HATEDRACE5 = 227
IE_HATEDRACE6 = 228
IE_HATEDRACE7 = 229
IE_HATEDRACE8 = 230
# These are in original PST, IWD, IWD2, but not as stats
IE_RACE = 231
IE_CLASS = 232
IE_GENERAL = 233
IE_EA = 234
IE_SPECIFIC = 235
IE_SAVEDXPOS = 236
IE_SAVEDYPOS = 237
IE_SAVEDFACE = 238
#239 user defined stat
IE_LEVELBARBARIAN = 240
IE_LEVELBARD = 241
IE_LEVELCLERIC = 242
IE_LEVELDRUID = 243
IE_LEVELMONK = 244
IE_LEVELPALADIN = 245
IE_LEVELRANGER = 246
IE_LEVELSORCERER = 247
#248 IE_LEVELCLASS12
#249 IE_LEVELCLASS13
#the remaining six stats are spell states
IE_SPLSTATE_ID1 = 250
#these stats exist only in PC's (but we access only PCs anyway)
IE_EXPERTISE = 0x1003
IE_POWERATTACK = 0x1004
IE_ARTERIAL_STRIKE = 0x1005
IE_HAMSTRING = 0x1006
IE_RAPID_SHOT = 0x1007
# End of file ie_stats.py
| gpl-2.0 |
sosterwalder/ldif3 | ldif3.py | 1 | 11669 | """ldif3 - generate and parse LDIF data (see RFC 2849)."""
from __future__ import unicode_literals
__version__ = '3.1.1'
__all__ = [
# constants
'LDIF_PATTERN',
# classes
'LDIFWriter',
'LDIFParser',
]
import base64
import re
import logging
from collections import OrderedDict
try: # pragma: nocover
from urlparse import urlparse
from urllib import urlopen
except ImportError: # pragma: nocover
from urllib.parse import urlparse
from urllib.request import urlopen
log = logging.getLogger('ldif3')
ATTRTYPE_PATTERN = r'[\w;.-]+(;[\w_-]+)*'
ATTRVALUE_PATTERN = r'(([^,]|\\,)+|".*?")'
ATTR_PATTERN = ATTRTYPE_PATTERN + r'[ ]*=[ ]*' + ATTRVALUE_PATTERN
RDN_PATTERN = ATTR_PATTERN + r'([ ]*\+[ ]*' + ATTR_PATTERN + r')*[ ]*'
DN_PATTERN = RDN_PATTERN + r'([ ]*,[ ]*' + RDN_PATTERN + r')*[ ]*'
DN_REGEX = re.compile('^%s$' % DN_PATTERN)
LDIF_PATTERN = ('^((dn(:|::) %(DN_PATTERN)s)|(%(ATTRTYPE_PATTERN)'
's(:|::) .*)$)+' % vars())
MOD_OPS = ['add', 'delete', 'replace']
CHANGE_TYPES = ['add', 'delete', 'modify', 'modrdn']
def is_dn(s):
"""Return True if s is a LDAP DN."""
if s == '':
return True
rm = DN_REGEX.match(s)
return rm is not None and rm.group(0) == s
UNSAFE_STRING_PATTERN = '(^[ :<]|[\000\n\r\200-\377])'
UNSAFE_STRING_RE = re.compile(UNSAFE_STRING_PATTERN)
def lower(l):
"""Return a list with the lowercased items of l."""
return [i.lower() for i in l or []]
class LDIFWriter(object):
"""Write LDIF entry or change records to file object.
:type output_file: file-like object in binary mode
:param output_file: File for output
:type base64_attrs: List[string]
:param base64_attrs: List of attribute types to be base64-encoded in any
case
:type cols: int
:param cols: Specifies how many columns a line may have before it is
folded into many lines
:type line_sep: bytearray
:param line_sep: line separator
"""
def __init__(
self, output_file, base64_attrs=[], cols=76, line_sep=b'\n'):
self._output_file = output_file
self._base64_attrs = lower(base64_attrs)
self._cols = cols
self._line_sep = line_sep
self.records_written = 0 #: number of records that have been written
def _fold_line(self, line):
"""Write string line as one or more folded lines."""
if len(line) <= self._cols:
self._output_file.write(line)
self._output_file.write(self._line_sep)
else:
pos = self._cols
self._output_file.write(line[0:self._cols])
self._output_file.write(self._line_sep)
while pos < len(line):
self._output_file.write(b' ')
end = min(len(line), pos + self._cols - 1)
self._output_file.write(line[pos:end])
self._output_file.write(self._line_sep)
pos = end
def _needs_base64_encoding(self, attr_type, attr_value):
"""Return True if attr_value has to be base-64 encoded.
This is the case because of special chars or because attr_type is in
self._base64_attrs
"""
return attr_type.lower() in self._base64_attrs or \
UNSAFE_STRING_RE.search(attr_value) is not None
def _unparse_attr(self, attr_type, attr_value):
"""Write a single attribute type/value pair."""
if self._needs_base64_encoding(attr_type, attr_value):
encoded = base64.encodestring(attr_value.encode('utf8'))\
.replace(b'\n', b'')\
.decode('utf8')
line = ':: '.join([attr_type, encoded])
else:
line = ': '.join([attr_type, attr_value])
self._fold_line(line.encode('utf8'))
def _unparse_entry_record(self, entry):
"""
:type entry: Dict[string, List[string]]
:param entry: Dictionary holding an entry
"""
for attr_type in sorted(entry.keys()):
for attr_value in entry[attr_type]:
self._unparse_attr(attr_type, attr_value)
def _unparse_changetype(self, mod_len):
"""Detect and write the changetype."""
if mod_len == 2:
changetype = 'add'
elif mod_len == 3:
changetype = 'modify'
else:
raise ValueError("modlist item of wrong length")
self._unparse_attr('changetype', changetype)
def _unparse_change_record(self, modlist):
"""
:type modlist: List[Tuple]
:param modlist: List of additions (2-tuple) or modifications (3-tuple)
"""
mod_len = len(modlist[0])
self._unparse_changetype(mod_len)
for mod in modlist:
if len(mod) != mod_len:
raise ValueError("Subsequent modlist item of wrong length")
if mod_len == 2:
mod_type, mod_vals = mod
elif mod_len == 3:
mod_op, mod_type, mod_vals = mod
self._unparse_attr(MOD_OPS[mod_op], mod_type)
for mod_val in mod_vals:
self._unparse_attr(mod_type, mod_val)
if mod_len == 3:
self._output_file.write(b'-' + self._line_sep)
def unparse(self, dn, record):
"""Write an entry or change record to the output file.
:type dn: string
:param dn: distinguished name
:type record: Union[Dict[string, List[string]], List[Tuple]]
:param record: Either a dictionary holding an entry or a list of
additions (2-tuple) or modifications (3-tuple).
"""
self._unparse_attr('dn', dn)
if isinstance(record, dict):
self._unparse_entry_record(record)
elif isinstance(record, list):
self._unparse_change_record(record)
else:
raise ValueError("Argument record must be dictionary or list")
self._output_file.write(self._line_sep)
self.records_written += 1
class LDIFParser(object):
"""Read LDIF entry or change records from file object.
:type input_file: file-like object in binary mode
:param input_file: file to read the LDIF input from
:type ignored_attr_types: List[string]
:param ignored_attr_types: List of attribute types that will be ignored
:type process_url_schemes: List[bytearray]
:param process_url_schemes: List of URL schemes to process with urllib.
An empty list turns off all URL processing and the attribute is
ignored completely.
:type line_sep: bytearray
:param line_sep: line separator
:type strict: boolean
:param strict: If set to ``False``, recoverable parse errors will produce
log warnings rather than exceptions.
"""
def _strip_line_sep(self, s):
"""Strip trailing line separators from s, but no other whitespaces."""
if s[-2:] == b'\r\n':
return s[:-2]
elif s[-1:] == b'\n':
return s[:-1]
else:
return s
def __init__(
self,
input_file,
ignored_attr_types=[],
process_url_schemes=[],
line_sep=b'\n',
strict=True):
self._input_file = input_file
self._process_url_schemes = lower(process_url_schemes)
self._ignored_attr_types = lower(ignored_attr_types)
self._line_sep = line_sep
self._strict = strict
self.line_counter = 0 #: number of lines that have been read
self.byte_counter = 0 #: number of bytes that have been read
self.records_read = 0 #: number of records that have been read
def _iter_unfolded_lines(self):
"""Iter input unfoled lines. Skip comments."""
line = self._input_file.readline()
while line:
self.line_counter += 1
self.byte_counter += len(line)
line = self._strip_line_sep(line)
nextline = self._input_file.readline()
while nextline and nextline[:1] == b' ':
line += self._strip_line_sep(nextline)[1:]
nextline = self._input_file.readline()
if not line.startswith(b'#'):
yield line
line = nextline
def _iter_blocks(self):
"""Iter input lines in blocks separated by blank lines."""
lines = []
for line in self._iter_unfolded_lines():
if line:
lines.append(line)
else:
self.records_read += 1
yield lines
lines = []
if lines:
self.records_read += 1
yield lines
def _parse_attr(self, line):
"""Parse a single attribute type/value pair."""
colon_pos = line.index(b':')
attr_type = line[0:colon_pos]
value_spec = line[colon_pos:colon_pos + 2]
if value_spec == b': ':
attr_value = line[colon_pos + 2:].lstrip()
elif value_spec == b'::':
attr_value = base64.decodestring(line[colon_pos + 2:])
elif value_spec == b':<':
url = line[colon_pos + 2:].strip()
attr_value = b''
if self._process_url_schemes:
u = urlparse(url)
if u[0] in self._process_url_schemes:
attr_value = urlopen(url.decode('ascii')).read()
elif value_spec == b':\r\n' or value_spec == b'\n':
attr_value = b''
else:
attr_value = b''
return attr_type.decode('utf8'), attr_value.decode('utf8')
def _error(self, msg):
if self._strict:
raise ValueError(msg)
else:
log.warning(msg)
def _check_dn(self, dn, attr_value):
"""Check dn attribute for issues."""
if dn is not None:
self._error('Two lines starting with dn: in one record.')
if not is_dn(attr_value):
self._error('No valid string-representation of '
'distinguished name %s.' % attr_value)
def _check_changetype(self, dn, changetype, attr_value):
"""Check changetype attribute for issues."""
if dn is None:
self._error('Read changetype: before getting valid dn: line.')
if changetype is not None:
self._error('Two lines starting with changetype: in one record.')
if attr_value not in CHANGE_TYPES:
self._error('changetype value %s is invalid.' % attr_value)
def _parse_entry_record(self, lines):
"""Parse a single entry record from a list of lines."""
dn = None
entry = OrderedDict()
for line in lines:
attr_type, attr_value = self._parse_attr(line)
if attr_type == 'dn':
self._check_dn(dn, attr_value)
dn = attr_value
elif attr_type == 'version' and dn is None:
pass # version = 1
else:
if dn is None:
self._error('First line of record does not start '
'with "dn:": %s' % attr_type)
if attr_value is not None and \
attr_type.lower() not in self._ignored_attr_types:
if attr_type in entry:
entry[attr_type].append(attr_value)
else:
entry[attr_type] = [attr_value]
return dn, entry
def parse(self):
"""Iterate LDIF entry records.
:rtype: Iterator[Tuple[string, Dict]]
:return: (dn, entry)
"""
for block in self._iter_blocks():
yield self._parse_entry_record(block)
| bsd-2-clause |
twobob/buildroot-kindle | output/build/host-python-2.7.2/Lib/plat-mac/lib-scriptpackages/StdSuites/Type_Names_Suite.py | 82 | 9491 | """Suite Type Names Suite: Terminology for Registry data types
Level 1, version 1
Generated from /Volumes/Sap/System Folder/Extensions/AppleScript
AETE/AEUT resource version 1/0, language 0, script 0
"""
import aetools
import MacOS
_code = 'tpnm'
class Type_Names_Suite_Events:
pass
class PostScript_picture(aetools.ComponentItem):
"""PostScript picture - """
want = 'EPS '
class point(aetools.ComponentItem):
"""point - point coordinates """
want = 'QDpt'
class string(aetools.ComponentItem):
"""string - a string of characters """
want = 'TEXT'
plain_text = string
plain_text = string
class TIFF_picture(aetools.ComponentItem):
"""TIFF picture - """
want = 'TIFF'
class application_dictionary(aetools.ComponentItem):
"""application dictionary - """
want = 'aete'
class system_dictionary(aetools.ComponentItem):
"""system dictionary - """
want = 'aeut'
class color_table(aetools.ComponentItem):
"""color table - """
want = 'clrt'
class menu_item(aetools.ComponentItem):
"""menu item - """
want = 'cmen'
class menu(aetools.ComponentItem):
"""menu - """
want = 'cmnu'
class double_integer(aetools.ComponentItem):
"""double integer - """
want = 'comp'
class type_element_info(aetools.ComponentItem):
"""type element info - """
want = 'elin'
class type_event_info(aetools.ComponentItem):
"""type event info - information about an event """
want = 'evin'
class extended_real(aetools.ComponentItem):
"""extended real - """
want = 'exte'
class fixed(aetools.ComponentItem):
"""fixed - a real number """
want = 'fixd'
class fixed_point(aetools.ComponentItem):
"""fixed point - """
want = 'fpnt'
class fixed_rectangle(aetools.ComponentItem):
"""fixed rectangle - """
want = 'frct'
class type_class_info(aetools.ComponentItem):
"""type class info - information about properties and elements of a class """
want = 'gcli'
class location_reference(aetools.ComponentItem):
"""location reference - """
want = 'insl'
class long_fixed_point(aetools.ComponentItem):
"""long fixed point - """
want = 'lfpt'
class long_fixed_rectangle(aetools.ComponentItem):
"""long fixed rectangle - """
want = 'lfrc'
class long_fixed(aetools.ComponentItem):
"""long fixed - """
want = 'lfxd'
class long_point(aetools.ComponentItem):
"""long point - """
want = 'lpnt'
class long_rectangle(aetools.ComponentItem):
"""long rectangle - """
want = 'lrct'
class machine_location(aetools.ComponentItem):
"""machine location - """
want = 'mLoc'
class unsigned_integer(aetools.ComponentItem):
"""unsigned integer - """
want = 'magn'
class null(aetools.ComponentItem):
"""null - """
want = 'null'
class type_property_info(aetools.ComponentItem):
"""type property info - """
want = 'pinf'
class type_parameter_info(aetools.ComponentItem):
"""type parameter info - """
want = 'pmin'
class bounding_rectangle(aetools.ComponentItem):
"""bounding rectangle - bounding rectangle """
want = 'qdrt'
class small_integer(aetools.ComponentItem):
"""small integer - """
want = 'shor'
class small_real(aetools.ComponentItem):
"""small real - """
want = 'sing'
class scrap_styles(aetools.ComponentItem):
"""scrap styles - """
want = 'styl'
class type_suite_info(aetools.ComponentItem):
"""type suite info - """
want = 'suin'
class target_id(aetools.ComponentItem):
"""target id - """
want = 'targ'
class dash_style(aetools.ComponentItem):
"""dash style - """
want = 'tdas'
class pixel_map_record(aetools.ComponentItem):
"""pixel map record - """
want = 'tpmm'
class RGB16_color(aetools.ComponentItem):
"""RGB16 color - """
want = 'tr16'
class RGB96_color(aetools.ComponentItem):
"""RGB96 color - """
want = 'tr96'
class rotation(aetools.ComponentItem):
"""rotation - """
want = 'trot'
class version(aetools.ComponentItem):
"""version - """
want = 'vers'
PostScript_picture._superclassnames = []
PostScript_picture._privpropdict = {
}
PostScript_picture._privelemdict = {
}
point._superclassnames = []
point._privpropdict = {
}
point._privelemdict = {
}
string._superclassnames = []
string._privpropdict = {
}
string._privelemdict = {
}
TIFF_picture._superclassnames = []
TIFF_picture._privpropdict = {
}
TIFF_picture._privelemdict = {
}
application_dictionary._superclassnames = []
application_dictionary._privpropdict = {
}
application_dictionary._privelemdict = {
}
system_dictionary._superclassnames = []
system_dictionary._privpropdict = {
}
system_dictionary._privelemdict = {
}
color_table._superclassnames = []
color_table._privpropdict = {
}
color_table._privelemdict = {
}
menu_item._superclassnames = []
menu_item._privpropdict = {
}
menu_item._privelemdict = {
}
menu._superclassnames = []
menu._privpropdict = {
}
menu._privelemdict = {
}
double_integer._superclassnames = []
double_integer._privpropdict = {
}
double_integer._privelemdict = {
}
type_element_info._superclassnames = []
type_element_info._privpropdict = {
}
type_element_info._privelemdict = {
}
type_event_info._superclassnames = []
type_event_info._privpropdict = {
}
type_event_info._privelemdict = {
}
extended_real._superclassnames = []
extended_real._privpropdict = {
}
extended_real._privelemdict = {
}
fixed._superclassnames = []
fixed._privpropdict = {
}
fixed._privelemdict = {
}
fixed_point._superclassnames = []
fixed_point._privpropdict = {
}
fixed_point._privelemdict = {
}
fixed_rectangle._superclassnames = []
fixed_rectangle._privpropdict = {
}
fixed_rectangle._privelemdict = {
}
type_class_info._superclassnames = []
type_class_info._privpropdict = {
}
type_class_info._privelemdict = {
}
location_reference._superclassnames = []
location_reference._privpropdict = {
}
location_reference._privelemdict = {
}
long_fixed_point._superclassnames = []
long_fixed_point._privpropdict = {
}
long_fixed_point._privelemdict = {
}
long_fixed_rectangle._superclassnames = []
long_fixed_rectangle._privpropdict = {
}
long_fixed_rectangle._privelemdict = {
}
long_fixed._superclassnames = []
long_fixed._privpropdict = {
}
long_fixed._privelemdict = {
}
long_point._superclassnames = []
long_point._privpropdict = {
}
long_point._privelemdict = {
}
long_rectangle._superclassnames = []
long_rectangle._privpropdict = {
}
long_rectangle._privelemdict = {
}
machine_location._superclassnames = []
machine_location._privpropdict = {
}
machine_location._privelemdict = {
}
unsigned_integer._superclassnames = []
unsigned_integer._privpropdict = {
}
unsigned_integer._privelemdict = {
}
null._superclassnames = []
null._privpropdict = {
}
null._privelemdict = {
}
type_property_info._superclassnames = []
type_property_info._privpropdict = {
}
type_property_info._privelemdict = {
}
type_parameter_info._superclassnames = []
type_parameter_info._privpropdict = {
}
type_parameter_info._privelemdict = {
}
bounding_rectangle._superclassnames = []
bounding_rectangle._privpropdict = {
}
bounding_rectangle._privelemdict = {
}
small_integer._superclassnames = []
small_integer._privpropdict = {
}
small_integer._privelemdict = {
}
small_real._superclassnames = []
small_real._privpropdict = {
}
small_real._privelemdict = {
}
scrap_styles._superclassnames = []
scrap_styles._privpropdict = {
}
scrap_styles._privelemdict = {
}
type_suite_info._superclassnames = []
type_suite_info._privpropdict = {
}
type_suite_info._privelemdict = {
}
target_id._superclassnames = []
target_id._privpropdict = {
}
target_id._privelemdict = {
}
dash_style._superclassnames = []
dash_style._privpropdict = {
}
dash_style._privelemdict = {
}
pixel_map_record._superclassnames = []
pixel_map_record._privpropdict = {
}
pixel_map_record._privelemdict = {
}
RGB16_color._superclassnames = []
RGB16_color._privpropdict = {
}
RGB16_color._privelemdict = {
}
RGB96_color._superclassnames = []
RGB96_color._privpropdict = {
}
RGB96_color._privelemdict = {
}
rotation._superclassnames = []
rotation._privpropdict = {
}
rotation._privelemdict = {
}
version._superclassnames = []
version._privpropdict = {
}
version._privelemdict = {
}
#
# Indices of types declared in this module
#
_classdeclarations = {
'EPS ' : PostScript_picture,
'QDpt' : point,
'TEXT' : string,
'TIFF' : TIFF_picture,
'aete' : application_dictionary,
'aeut' : system_dictionary,
'clrt' : color_table,
'cmen' : menu_item,
'cmnu' : menu,
'comp' : double_integer,
'elin' : type_element_info,
'evin' : type_event_info,
'exte' : extended_real,
'fixd' : fixed,
'fpnt' : fixed_point,
'frct' : fixed_rectangle,
'gcli' : type_class_info,
'insl' : location_reference,
'lfpt' : long_fixed_point,
'lfrc' : long_fixed_rectangle,
'lfxd' : long_fixed,
'lpnt' : long_point,
'lrct' : long_rectangle,
'mLoc' : machine_location,
'magn' : unsigned_integer,
'null' : null,
'pinf' : type_property_info,
'pmin' : type_parameter_info,
'qdrt' : bounding_rectangle,
'shor' : small_integer,
'sing' : small_real,
'styl' : scrap_styles,
'suin' : type_suite_info,
'targ' : target_id,
'tdas' : dash_style,
'tpmm' : pixel_map_record,
'tr16' : RGB16_color,
'tr96' : RGB96_color,
'trot' : rotation,
'vers' : version,
}
_propdeclarations = {
}
_compdeclarations = {
}
_enumdeclarations = {
}
| gpl-2.0 |
emidln/django_roa | env/lib/python2.7/site-packages/django/views/decorators/http.py | 151 | 7044 | """
Decorators for views based on HTTP headers.
"""
try:
from functools import wraps
except ImportError:
from django.utils.functional import wraps # Python 2.4 fallback.
from calendar import timegm
from datetime import timedelta
from django.utils.decorators import decorator_from_middleware, available_attrs
from django.utils.http import http_date, parse_http_date_safe, parse_etags, quote_etag
from django.utils.log import getLogger
from django.middleware.http import ConditionalGetMiddleware
from django.http import HttpResponseNotAllowed, HttpResponseNotModified, HttpResponse
conditional_page = decorator_from_middleware(ConditionalGetMiddleware)
logger = getLogger('django.request')
def require_http_methods(request_method_list):
"""
Decorator to make a view only accept particular request methods. Usage::
@require_http_methods(["GET", "POST"])
def my_view(request):
# I can assume now that only GET or POST requests make it this far
# ...
Note that request methods should be in uppercase.
"""
def decorator(func):
def inner(request, *args, **kwargs):
if request.method not in request_method_list:
logger.warning('Method Not Allowed (%s): %s' % (request.method, request.path),
extra={
'status_code': 405,
'request': request
}
)
return HttpResponseNotAllowed(request_method_list)
return func(request, *args, **kwargs)
return wraps(func, assigned=available_attrs(func))(inner)
return decorator
require_GET = require_http_methods(["GET"])
require_GET.__doc__ = "Decorator to require that a view only accept the GET method."
require_POST = require_http_methods(["POST"])
require_POST.__doc__ = "Decorator to require that a view only accept the POST method."
def condition(etag_func=None, last_modified_func=None):
"""
Decorator to support conditional retrieval (or change) for a view
function.
The parameters are callables to compute the ETag and last modified time for
the requested resource, respectively. The callables are passed the same
parameters as the view itself. The Etag function should return a string (or
None if the resource doesn't exist), whilst the last_modified function
should return a datetime object (or None if the resource doesn't exist).
If both parameters are provided, all the preconditions must be met before
the view is processed.
This decorator will either pass control to the wrapped view function or
return an HTTP 304 response (unmodified) or 412 response (preconditions
failed), depending upon the request method.
Any behavior marked as "undefined" in the HTTP spec (e.g. If-none-match
plus If-modified-since headers) will result in the view function being
called.
"""
def decorator(func):
def inner(request, *args, **kwargs):
# Get HTTP request headers
if_modified_since = request.META.get("HTTP_IF_MODIFIED_SINCE")
if if_modified_since:
if_modified_since = parse_http_date_safe(if_modified_since)
if_none_match = request.META.get("HTTP_IF_NONE_MATCH")
if_match = request.META.get("HTTP_IF_MATCH")
if if_none_match or if_match:
# There can be more than one ETag in the request, so we
# consider the list of values.
try:
etags = parse_etags(if_none_match or if_match)
except ValueError:
# In case of invalid etag ignore all ETag headers.
# Apparently Opera sends invalidly quoted headers at times
# (we should be returning a 400 response, but that's a
# little extreme) -- this is Django bug #10681.
if_none_match = None
if_match = None
# Compute values (if any) for the requested resource.
if etag_func:
res_etag = etag_func(request, *args, **kwargs)
else:
res_etag = None
if last_modified_func:
dt = last_modified_func(request, *args, **kwargs)
if dt:
res_last_modified = timegm(dt.utctimetuple())
else:
res_last_modified = None
else:
res_last_modified = None
response = None
if not ((if_match and (if_modified_since or if_none_match)) or
(if_match and if_none_match)):
# We only get here if no undefined combinations of headers are
# specified.
if ((if_none_match and (res_etag in etags or
"*" in etags and res_etag)) and
(not if_modified_since or
(res_last_modified and if_modified_since and
res_last_modified <= if_modified_since))):
if request.method in ("GET", "HEAD"):
response = HttpResponseNotModified()
else:
logger.warning('Precondition Failed: %s' % request.path,
extra={
'status_code': 412,
'request': request
}
)
response = HttpResponse(status=412)
elif if_match and ((not res_etag and "*" in etags) or
(res_etag and res_etag not in etags)):
logger.warning('Precondition Failed: %s' % request.path,
extra={
'status_code': 412,
'request': request
}
)
response = HttpResponse(status=412)
elif (not if_none_match and request.method == "GET" and
res_last_modified and if_modified_since and
res_last_modified <= if_modified_since):
response = HttpResponseNotModified()
if response is None:
response = func(request, *args, **kwargs)
# Set relevant headers on the response if they don't already exist.
if res_last_modified and not response.has_header('Last-Modified'):
response['Last-Modified'] = http_date(res_last_modified)
if res_etag and not response.has_header('ETag'):
response['ETag'] = quote_etag(res_etag)
return response
return inner
return decorator
# Shortcut decorators for common cases based on ETag or Last-Modified only
def etag(etag_func):
return condition(etag_func=etag_func)
def last_modified(last_modified_func):
return condition(last_modified_func=last_modified_func)
| bsd-3-clause |
gmacchi93/serverInfoParaguay | apps/venv/lib/python2.7/site-packages/django/contrib/gis/forms/widgets.py | 422 | 3659 | from __future__ import unicode_literals
import logging
from django.conf import settings
from django.contrib.gis import gdal
from django.contrib.gis.geos import GEOSException, GEOSGeometry
from django.forms.widgets import Widget
from django.template import loader
from django.utils import six, translation
logger = logging.getLogger('django.contrib.gis')
class BaseGeometryWidget(Widget):
"""
The base class for rich geometry widgets.
Renders a map using the WKT of the geometry.
"""
geom_type = 'GEOMETRY'
map_srid = 4326
map_width = 600
map_height = 400
display_raw = False
supports_3d = False
template_name = '' # set on subclasses
def __init__(self, attrs=None):
self.attrs = {}
for key in ('geom_type', 'map_srid', 'map_width', 'map_height', 'display_raw'):
self.attrs[key] = getattr(self, key)
if attrs:
self.attrs.update(attrs)
def serialize(self, value):
return value.wkt if value else ''
def deserialize(self, value):
try:
return GEOSGeometry(value, self.map_srid)
except (GEOSException, ValueError) as err:
logger.error(
"Error creating geometry from value '%s' (%s)" % (
value, err)
)
return None
def render(self, name, value, attrs=None):
# If a string reaches here (via a validation error on another
# field) then just reconstruct the Geometry.
if isinstance(value, six.string_types):
value = self.deserialize(value)
if value:
# Check that srid of value and map match
if value.srid != self.map_srid:
try:
ogr = value.ogr
ogr.transform(self.map_srid)
value = ogr
except gdal.GDALException as err:
logger.error(
"Error transforming geometry from srid '%s' to srid '%s' (%s)" % (
value.srid, self.map_srid, err)
)
context = self.build_attrs(
attrs,
name=name,
module='geodjango_%s' % name.replace('-', '_'), # JS-safe
serialized=self.serialize(value),
geom_type=gdal.OGRGeomType(self.attrs['geom_type']),
STATIC_URL=settings.STATIC_URL,
LANGUAGE_BIDI=translation.get_language_bidi(),
)
return loader.render_to_string(self.template_name, context)
class OpenLayersWidget(BaseGeometryWidget):
template_name = 'gis/openlayers.html'
class Media:
js = (
'http://openlayers.org/api/2.13/OpenLayers.js',
'gis/js/OLMapWidget.js',
)
class OSMWidget(BaseGeometryWidget):
"""
An OpenLayers/OpenStreetMap-based widget.
"""
template_name = 'gis/openlayers-osm.html'
default_lon = 5
default_lat = 47
class Media:
js = (
'http://openlayers.org/api/2.13/OpenLayers.js',
'http://www.openstreetmap.org/openlayers/OpenStreetMap.js',
'gis/js/OLMapWidget.js',
)
def __init__(self, attrs=None):
super(OSMWidget, self).__init__()
for key in ('default_lon', 'default_lat'):
self.attrs[key] = getattr(self, key)
if attrs:
self.attrs.update(attrs)
@property
def map_srid(self):
# Use the official spherical mercator projection SRID when GDAL is
# available; otherwise, fallback to 900913.
if gdal.HAS_GDAL:
return 3857
else:
return 900913
| apache-2.0 |
rahuldan/sympy | sympy/polys/tests/test_sqfreetools.py | 123 | 4389 | """Tests for square-free decomposition algorithms and related tools. """
from sympy.polys.rings import ring
from sympy.polys.domains import FF, ZZ, QQ
from sympy.polys.specialpolys import f_polys
from sympy.utilities.pytest import raises
f_0, f_1, f_2, f_3, f_4, f_5, f_6 = f_polys()
def test_dup_sqf():
R, x = ring("x", ZZ)
assert R.dup_sqf_part(0) == 0
assert R.dup_sqf_p(0) is True
assert R.dup_sqf_part(7) == 1
assert R.dup_sqf_p(7) is True
assert R.dup_sqf_part(2*x + 2) == x + 1
assert R.dup_sqf_p(2*x + 2) is True
assert R.dup_sqf_part(x**3 + x + 1) == x**3 + x + 1
assert R.dup_sqf_p(x**3 + x + 1) is True
assert R.dup_sqf_part(-x**3 + x + 1) == x**3 - x - 1
assert R.dup_sqf_p(-x**3 + x + 1) is True
assert R.dup_sqf_part(2*x**3 + 3*x**2) == 2*x**2 + 3*x
assert R.dup_sqf_p(2*x**3 + 3*x**2) is False
assert R.dup_sqf_part(-2*x**3 + 3*x**2) == 2*x**2 - 3*x
assert R.dup_sqf_p(-2*x**3 + 3*x**2) is False
assert R.dup_sqf_list(0) == (0, [])
assert R.dup_sqf_list(1) == (1, [])
assert R.dup_sqf_list(x) == (1, [(x, 1)])
assert R.dup_sqf_list(2*x**2) == (2, [(x, 2)])
assert R.dup_sqf_list(3*x**3) == (3, [(x, 3)])
assert R.dup_sqf_list(-x**5 + x**4 + x - 1) == \
(-1, [(x**3 + x**2 + x + 1, 1), (x - 1, 2)])
assert R.dup_sqf_list(x**8 + 6*x**6 + 12*x**4 + 8*x**2) == \
( 1, [(x, 2), (x**2 + 2, 3)])
assert R.dup_sqf_list(2*x**2 + 4*x + 2) == (2, [(x + 1, 2)])
R, x = ring("x", QQ)
assert R.dup_sqf_list(2*x**2 + 4*x + 2) == (2, [(x + 1, 2)])
R, x = ring("x", FF(2))
assert R.dup_sqf_list(x**2 + 1) == (1, [(x + 1, 2)])
R, x = ring("x", FF(3))
assert R.dup_sqf_list(x**10 + 2*x**7 + 2*x**4 + x) == \
(1, [(x, 1),
(x + 1, 3),
(x + 2, 6)])
R1, x = ring("x", ZZ)
R2, y = ring("y", FF(3))
f = x**3 + 1
g = y**3 + 1
assert R1.dup_sqf_part(f) == f
assert R2.dup_sqf_part(g) == y + 1
assert R1.dup_sqf_p(f) is True
assert R2.dup_sqf_p(g) is False
R, x, y = ring("x,y", ZZ)
A = x**4 - 3*x**2 + 6
D = x**6 - 5*x**4 + 5*x**2 + 4
f, g = D, R.dmp_sub(A, R.dmp_mul(R.dmp_diff(D, 1), y))
res = R.dmp_resultant(f, g)
h = (4*y**2 + 1).drop(x)
assert R.drop(x).dup_sqf_list(res) == (45796, [(h, 3)])
Rt, t = ring("t", ZZ)
R, x = ring("x", Rt)
assert R.dup_sqf_list_include(t**3*x**2) == [(t**3, 1), (x, 2)]
def test_dmp_sqf():
R, x, y = ring("x,y", ZZ)
assert R.dmp_sqf_part(0) == 0
assert R.dmp_sqf_p(0) is True
assert R.dmp_sqf_part(7) == 1
assert R.dmp_sqf_p(7) is True
assert R.dmp_sqf_list(3) == (3, [])
assert R.dmp_sqf_list_include(3) == [(3, 1)]
R, x, y, z = ring("x,y,z", ZZ)
assert R.dmp_sqf_p(f_0) is True
assert R.dmp_sqf_p(f_0**2) is False
assert R.dmp_sqf_p(f_1) is True
assert R.dmp_sqf_p(f_1**2) is False
assert R.dmp_sqf_p(f_2) is True
assert R.dmp_sqf_p(f_2**2) is False
assert R.dmp_sqf_p(f_3) is True
assert R.dmp_sqf_p(f_3**2) is False
assert R.dmp_sqf_p(f_5) is False
assert R.dmp_sqf_p(f_5**2) is False
assert R.dmp_sqf_p(f_4) is True
assert R.dmp_sqf_part(f_4) == -f_4
assert R.dmp_sqf_part(f_5) == x + y - z
R, x, y, z, t = ring("x,y,z,t", ZZ)
assert R.dmp_sqf_p(f_6) is True
assert R.dmp_sqf_part(f_6) == f_6
R, x = ring("x", ZZ)
f = -x**5 + x**4 + x - 1
assert R.dmp_sqf_list(f) == (-1, [(x**3 + x**2 + x + 1, 1), (x - 1, 2)])
assert R.dmp_sqf_list_include(f) == [(-x**3 - x**2 - x - 1, 1), (x - 1, 2)]
R, x, y = ring("x,y", ZZ)
f = -x**5 + x**4 + x - 1
assert R.dmp_sqf_list(f) == (-1, [(x**3 + x**2 + x + 1, 1), (x - 1, 2)])
assert R.dmp_sqf_list_include(f) == [(-x**3 - x**2 - x - 1, 1), (x - 1, 2)]
f = -x**2 + 2*x - 1
assert R.dmp_sqf_list_include(f) == [(-1, 1), (x - 1, 2)]
R, x, y = ring("x,y", FF(2))
raises(NotImplementedError, lambda: R.dmp_sqf_list(y**2 + 1))
def test_dup_gff_list():
R, x = ring("x", ZZ)
f = x**5 + 2*x**4 - x**3 - 2*x**2
assert R.dup_gff_list(f) == [(x, 1), (x + 2, 4)]
g = x**9 - 20*x**8 + 166*x**7 - 744*x**6 + 1965*x**5 - 3132*x**4 + 2948*x**3 - 1504*x**2 + 320*x
assert R.dup_gff_list(g) == [(x**2 - 5*x + 4, 1), (x**2 - 5*x + 4, 2), (x, 3)]
raises(ValueError, lambda: R.dup_gff_list(0))
| bsd-3-clause |
wskplho/sl4a | python/src/Demo/tkinter/matt/canvas-gridding.py | 47 | 2271 | from Tkinter import *
# this is the same as simple-demo-1.py, but uses
# subclassing.
# note that there is no explicit call to start Tk.
# Tkinter is smart enough to start the system if it's not already going.
class Test(Frame):
def printit(self):
print "hi"
def createWidgets(self):
self.QUIT = Button(self, text='QUIT',
background='red',
foreground='white',
height=3,
command=self.quit)
self.QUIT.pack(side=BOTTOM, fill=BOTH)
self.canvasObject = Canvas(self, width="5i", height="5i")
self.canvasObject.pack(side=LEFT)
def mouseDown(self, event):
# canvas x and y take the screen coords from the event and translate
# them into the coordinate system of the canvas object
self.startx = self.canvasObject.canvasx(event.x, self.griddingSize)
self.starty = self.canvasObject.canvasy(event.y, self.griddingSize)
def mouseMotion(self, event):
# canvas x and y take the screen coords from the event and translate
# them into the coordinate system of the canvas object
x = self.canvasObject.canvasx(event.x, self.griddingSize)
y = self.canvasObject.canvasy(event.y, self.griddingSize)
if (self.startx != event.x) and (self.starty != event.y) :
self.canvasObject.delete(self.rubberbandBox)
self.rubberbandBox = self.canvasObject.create_rectangle(
self.startx, self.starty, x, y)
# this flushes the output, making sure that
# the rectangle makes it to the screen
# before the next event is handled
self.update_idletasks()
def __init__(self, master=None):
Frame.__init__(self, master)
Pack.config(self)
self.createWidgets()
# this is a "tagOrId" for the rectangle we draw on the canvas
self.rubberbandBox = None
# this is the size of the gridding squares
self.griddingSize = 50
Widget.bind(self.canvasObject, "<Button-1>", self.mouseDown)
Widget.bind(self.canvasObject, "<Button1-Motion>", self.mouseMotion)
test = Test()
test.mainloop()
| apache-2.0 |
team-xue/xue | xue/cms/tests/apphooks.py | 2 | 5527 | # -*- coding: utf-8 -*-
from __future__ import with_statement
from cms.apphook_pool import apphook_pool
from cms.appresolver import applications_page_check, clear_app_resolvers
from cms.models.titlemodels import Title
from cms.test.testcases import CMSTestCase
from cms.test.util.context_managers import SettingsOverride
from django.contrib.auth.models import User
from django.core.urlresolvers import clear_url_caches, reverse
import sys
APP_NAME = 'SampleApp'
APP_MODULE = "testapp.sampleapp.cms_app"
class ApphooksTestCase(CMSTestCase):
def setUp(self):
clear_app_resolvers()
clear_url_caches()
def test_01_explicit_apphooks(self):
"""
Test explicit apphook loading with the CMS_APPHOOKS setting.
"""
if APP_MODULE in sys.modules:
del sys.modules[APP_MODULE]
apphooks = (
'%s.%s' % (APP_MODULE, APP_NAME),
)
with SettingsOverride(CMS_APPHOOKS=apphooks):
apphook_pool.clear()
hooks = apphook_pool.get_apphooks()
app_names = [hook[0] for hook in hooks]
self.assertEqual(len(hooks), 1)
self.assertEqual(app_names, [APP_NAME])
apphook_pool.clear()
def test_02_implicit_apphooks(self):
"""
Test implicit apphook loading with INSTALLED_APPS + cms_app.py
"""
if APP_MODULE in sys.modules:
del sys.modules[APP_MODULE]
apps = ['testapp.sampleapp']
with SettingsOverride(INSTALLED_APPS=apps, ROOT_URLCONF='testapp.urls_for_apphook_tests'):
apphook_pool.clear()
hooks = apphook_pool.get_apphooks()
app_names = [hook[0] for hook in hooks]
self.assertEqual(len(hooks), 1)
self.assertEqual(app_names, [APP_NAME])
apphook_pool.clear()
def test_03_apphook_on_root(self):
if APP_MODULE in sys.modules:
del sys.modules[APP_MODULE]
with SettingsOverride(ROOT_URLCONF='testapp.urls_for_apphook_tests'):
apphook_pool.clear()
superuser = User.objects.create_superuser('admin', 'admin@admin.com', 'admin')
page = self.create_page(user=superuser, published=True)
english_title = page.title_set.all()[0]
self.assertEquals(english_title.language, 'en')
Title.objects.create(
language='de',
title='%s DE' % english_title.title,
slug=english_title.slug,
path=english_title.path,
page=page,
)
page.title_set.all().update(application_urls='SampleApp')
self.assertTrue(page.publish())
response = self.client.get(self.get_pages_root())
self.assertTemplateUsed(response, 'sampleapp/home.html')
apphook_pool.clear()
def test_04_get_page_for_apphook(self):
if APP_MODULE in sys.modules:
del sys.modules[APP_MODULE]
with SettingsOverride(ROOT_URLCONF='testapp.second_urls_for_apphook_tests'):
apphook_pool.clear()
superuser = User.objects.create_superuser('admin', 'admin@admin.com', 'admin')
page = self.create_page(user=superuser, published=True)
self.create_title(page.get_title(), page.get_slug(), 'de', page)
child_page = self.create_page(page, user=superuser, published=True)
self.create_title(child_page.get_title(), child_page.get_slug(), 'de', child_page)
child_child_page = self.create_page(child_page, user=superuser, published=True)
self.create_title(child_child_page.get_title(), child_child_page.get_slug(), 'de', child_child_page)
child_child_page.title_set.all().update(application_urls='SampleApp')
child_child_page.publish()
# publisher_public is set to draft on publish, issue with onetoone reverse
child_child_page = self.reload(child_child_page)
en_title = child_child_page.publisher_public.get_title_obj('en')
path = reverse('en:sample-settings')
request = self.get_request(path)
request.LANGUAGE_CODE = 'en'
attached_to_page = applications_page_check(request, path=path[1:]) # strip leading slash
self.assertEquals(attached_to_page.pk, en_title.page.pk)
response = self.client.get(path)
self.assertEquals(response.status_code, 200)
self.assertTemplateUsed(response, 'sampleapp/home.html')
self.assertContains(response, en_title.title)
de_title = child_child_page.publisher_public.get_title_obj('de')
path = reverse('de:sample-settings')
request = self.get_request(path)
request.LANGUAGE_CODE = 'de'
attached_to_page = applications_page_check(request, path=path[4:]) # strip leading slash and language prefix
self.assertEquals(attached_to_page.pk, de_title.page.pk)
response = self.client.get(path)
self.assertEquals(response.status_code, 200)
self.assertTemplateUsed(response, 'sampleapp/home.html')
self.assertContains(response, de_title.title)
apphook_pool.clear() | bsd-3-clause |
thaim/ansible | lib/ansible/modules/network/system/_net_user.py | 20 | 4718 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ansible by Red Hat, inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['deprecated'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: net_user
version_added: "2.4"
author: "Trishna Guha (@trishnaguha)"
short_description: Manage the aggregate of local users on network device
description:
- This module provides declarative management of the local usernames
configured on network devices. It allows playbooks to manage
either individual usernames or the aggregate of usernames in the
current running config. It also supports purging usernames from the
configuration that are not explicitly defined.
deprecated:
removed_in: "2.13"
alternative: Use platform-specific "[netos]_user" module
why: Updated modules released with more functionality
extends_documentation_fragment: network_agnostic
options:
aggregate:
description:
- The set of username objects to be configured on the remote
network device. The list entries can either be the username
or a hash of username and properties. This argument is mutually
exclusive with the C(name) argument.
name:
description:
- The username to be configured on the remote network device.
This argument accepts a string value and is mutually exclusive
with the C(aggregate) argument.
Please note that this option is not same as C(provider username).
configured_password:
description:
- The password to be configured on the remote network device. The
password needs to be provided in clear and it will be encrypted
on the device.
Please note that this option is not same as C(provider password).
update_password:
description:
- Since passwords are encrypted in the device running config, this
argument will instruct the module when to change the password. When
set to C(always), the password will always be updated in the device
and when set to C(on_create) the password will be updated only if
the username is created.
default: always
choices: ['on_create', 'always']
privilege:
description:
- The C(privilege) argument configures the privilege level of the
user when logged into the system. This argument accepts integer
values in the range of 1 to 15.
role:
description:
- Configures the role for the username in the
device running configuration. The argument accepts a string value
defining the role name. This argument does not check if the role
has been configured on the device.
sshkey:
description:
- Specifies the SSH public key to configure
for the given username. This argument accepts a valid SSH key value.
nopassword:
description:
- Defines the username without assigning
a password. This will allow the user to login to the system
without being authenticated by a password.
type: bool
purge:
description:
- Instructs the module to consider the
resource definition absolute. It will remove any previously
configured usernames on the device with the exception of the
`admin` user (the current defined set of users).
type: bool
default: false
state:
description:
- Configures the state of the username definition
as it relates to the device operational configuration. When set
to I(present), the username(s) should be configured in the device active
configuration and when set to I(absent) the username(s) should not be
in the device active configuration
default: present
choices: ['present', 'absent']
"""
EXAMPLES = """
- name: create a new user
net_user:
name: ansible
sshkey: "{{ lookup('file', '~/.ssh/id_rsa.pub') }}"
state: present
- name: remove all users except admin
net_user:
purge: yes
- name: set multiple users to privilege level 15
net_user:
aggregate:
- { name: netop }
- { name: netend }
privilege: 15
state: present
- name: Change Password for User netop
net_user:
name: netop
configured_password: "{{ new_password }}"
update_password: always
state: present
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always
type: list
sample:
- username ansible secret password
- username admin secret admin
"""
| mit |
angstwad/ansible | contrib/inventory/lxc_inventory.py | 89 | 2556 | #!/usr/bin/env python
#
# (c) 2015-16 Florian Haas, hastexo Professional Services GmbH
# <florian@hastexo.com>
# Based in part on:
# libvirt_lxc.py, (c) 2013, Michael Scherer <misc@zarb.org>
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
"""
Ansible inventory script for LXC containers. Requires Python
bindings for LXC API.
In LXC, containers can be grouped by setting the lxc.group option,
which may be found more than once in a container's
configuration. So, we enumerate all containers, fetch their list
of groups, and then build the dictionary in the way Ansible expects
it.
"""
from __future__ import print_function
import sys
import lxc
import json
def build_dict():
"""Returns a dictionary keyed to the defined LXC groups. All
containers, including the ones not in any group, are included in the
"all" group."""
# Enumerate all containers, and list the groups they are in. Also,
# implicitly add every container to the 'all' group.
containers = dict([(c,
['all'] +
(lxc.Container(c).get_config_item('lxc.group') or []))
for c in lxc.list_containers()])
# Extract the groups, flatten the list, and remove duplicates
groups = set(sum([g for g in containers.values()], []))
# Create a dictionary for each group (including the 'all' group
return dict([(g, {'hosts': [k for k, v in containers.items() if g in v],
'vars': {'ansible_connection':'lxc'}}) for g in groups])
def main(argv):
"""Returns a JSON dictionary as expected by Ansible"""
result = build_dict()
if len(argv) == 2 and argv[1] == '--list':
json.dump(result, sys.stdout)
elif len(argv) == 3 and argv[1] == '--host':
json.dump({'ansible_connection': 'lxc'}, sys.stdout)
else:
print("Need an argument, either --list or --host <host>", file=sys.stderr)
if __name__ == '__main__':
main(sys.argv)
| gpl-3.0 |
CM-zenfone2/android_kernel_asus_moorefield | tools/perf/scripts/python/netdev-times.py | 11271 | 15048 | # Display a process of packets and processed time.
# It helps us to investigate networking or network device.
#
# options
# tx: show only tx chart
# rx: show only rx chart
# dev=: show only thing related to specified device
# debug: work with debug mode. It shows buffer status.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
all_event_list = []; # insert all tracepoint event related with this script
irq_dic = {}; # key is cpu and value is a list which stacks irqs
# which raise NET_RX softirq
net_rx_dic = {}; # key is cpu and value include time of NET_RX softirq-entry
# and a list which stacks receive
receive_hunk_list = []; # a list which include a sequence of receive events
rx_skb_list = []; # received packet list for matching
# skb_copy_datagram_iovec
buffer_budget = 65536; # the budget of rx_skb_list, tx_queue_list and
# tx_xmit_list
of_count_rx_skb_list = 0; # overflow count
tx_queue_list = []; # list of packets which pass through dev_queue_xmit
of_count_tx_queue_list = 0; # overflow count
tx_xmit_list = []; # list of packets which pass through dev_hard_start_xmit
of_count_tx_xmit_list = 0; # overflow count
tx_free_list = []; # list of packets which is freed
# options
show_tx = 0;
show_rx = 0;
dev = 0; # store a name of device specified by option "dev="
debug = 0;
# indices of event_info tuple
EINFO_IDX_NAME= 0
EINFO_IDX_CONTEXT=1
EINFO_IDX_CPU= 2
EINFO_IDX_TIME= 3
EINFO_IDX_PID= 4
EINFO_IDX_COMM= 5
# Calculate a time interval(msec) from src(nsec) to dst(nsec)
def diff_msec(src, dst):
return (dst - src) / 1000000.0
# Display a process of transmitting a packet
def print_transmit(hunk):
if dev != 0 and hunk['dev'].find(dev) < 0:
return
print "%7s %5d %6d.%06dsec %12.3fmsec %12.3fmsec" % \
(hunk['dev'], hunk['len'],
nsecs_secs(hunk['queue_t']),
nsecs_nsecs(hunk['queue_t'])/1000,
diff_msec(hunk['queue_t'], hunk['xmit_t']),
diff_msec(hunk['xmit_t'], hunk['free_t']))
# Format for displaying rx packet processing
PF_IRQ_ENTRY= " irq_entry(+%.3fmsec irq=%d:%s)"
PF_SOFT_ENTRY=" softirq_entry(+%.3fmsec)"
PF_NAPI_POLL= " napi_poll_exit(+%.3fmsec %s)"
PF_JOINT= " |"
PF_WJOINT= " | |"
PF_NET_RECV= " |---netif_receive_skb(+%.3fmsec skb=%x len=%d)"
PF_NET_RX= " |---netif_rx(+%.3fmsec skb=%x)"
PF_CPY_DGRAM= " | skb_copy_datagram_iovec(+%.3fmsec %d:%s)"
PF_KFREE_SKB= " | kfree_skb(+%.3fmsec location=%x)"
PF_CONS_SKB= " | consume_skb(+%.3fmsec)"
# Display a process of received packets and interrputs associated with
# a NET_RX softirq
def print_receive(hunk):
show_hunk = 0
irq_list = hunk['irq_list']
cpu = irq_list[0]['cpu']
base_t = irq_list[0]['irq_ent_t']
# check if this hunk should be showed
if dev != 0:
for i in range(len(irq_list)):
if irq_list[i]['name'].find(dev) >= 0:
show_hunk = 1
break
else:
show_hunk = 1
if show_hunk == 0:
return
print "%d.%06dsec cpu=%d" % \
(nsecs_secs(base_t), nsecs_nsecs(base_t)/1000, cpu)
for i in range(len(irq_list)):
print PF_IRQ_ENTRY % \
(diff_msec(base_t, irq_list[i]['irq_ent_t']),
irq_list[i]['irq'], irq_list[i]['name'])
print PF_JOINT
irq_event_list = irq_list[i]['event_list']
for j in range(len(irq_event_list)):
irq_event = irq_event_list[j]
if irq_event['event'] == 'netif_rx':
print PF_NET_RX % \
(diff_msec(base_t, irq_event['time']),
irq_event['skbaddr'])
print PF_JOINT
print PF_SOFT_ENTRY % \
diff_msec(base_t, hunk['sirq_ent_t'])
print PF_JOINT
event_list = hunk['event_list']
for i in range(len(event_list)):
event = event_list[i]
if event['event_name'] == 'napi_poll':
print PF_NAPI_POLL % \
(diff_msec(base_t, event['event_t']), event['dev'])
if i == len(event_list) - 1:
print ""
else:
print PF_JOINT
else:
print PF_NET_RECV % \
(diff_msec(base_t, event['event_t']), event['skbaddr'],
event['len'])
if 'comm' in event.keys():
print PF_WJOINT
print PF_CPY_DGRAM % \
(diff_msec(base_t, event['comm_t']),
event['pid'], event['comm'])
elif 'handle' in event.keys():
print PF_WJOINT
if event['handle'] == "kfree_skb":
print PF_KFREE_SKB % \
(diff_msec(base_t,
event['comm_t']),
event['location'])
elif event['handle'] == "consume_skb":
print PF_CONS_SKB % \
diff_msec(base_t,
event['comm_t'])
print PF_JOINT
def trace_begin():
global show_tx
global show_rx
global dev
global debug
for i in range(len(sys.argv)):
if i == 0:
continue
arg = sys.argv[i]
if arg == 'tx':
show_tx = 1
elif arg =='rx':
show_rx = 1
elif arg.find('dev=',0, 4) >= 0:
dev = arg[4:]
elif arg == 'debug':
debug = 1
if show_tx == 0 and show_rx == 0:
show_tx = 1
show_rx = 1
def trace_end():
# order all events in time
all_event_list.sort(lambda a,b :cmp(a[EINFO_IDX_TIME],
b[EINFO_IDX_TIME]))
# process all events
for i in range(len(all_event_list)):
event_info = all_event_list[i]
name = event_info[EINFO_IDX_NAME]
if name == 'irq__softirq_exit':
handle_irq_softirq_exit(event_info)
elif name == 'irq__softirq_entry':
handle_irq_softirq_entry(event_info)
elif name == 'irq__softirq_raise':
handle_irq_softirq_raise(event_info)
elif name == 'irq__irq_handler_entry':
handle_irq_handler_entry(event_info)
elif name == 'irq__irq_handler_exit':
handle_irq_handler_exit(event_info)
elif name == 'napi__napi_poll':
handle_napi_poll(event_info)
elif name == 'net__netif_receive_skb':
handle_netif_receive_skb(event_info)
elif name == 'net__netif_rx':
handle_netif_rx(event_info)
elif name == 'skb__skb_copy_datagram_iovec':
handle_skb_copy_datagram_iovec(event_info)
elif name == 'net__net_dev_queue':
handle_net_dev_queue(event_info)
elif name == 'net__net_dev_xmit':
handle_net_dev_xmit(event_info)
elif name == 'skb__kfree_skb':
handle_kfree_skb(event_info)
elif name == 'skb__consume_skb':
handle_consume_skb(event_info)
# display receive hunks
if show_rx:
for i in range(len(receive_hunk_list)):
print_receive(receive_hunk_list[i])
# display transmit hunks
if show_tx:
print " dev len Qdisc " \
" netdevice free"
for i in range(len(tx_free_list)):
print_transmit(tx_free_list[i])
if debug:
print "debug buffer status"
print "----------------------------"
print "xmit Qdisc:remain:%d overflow:%d" % \
(len(tx_queue_list), of_count_tx_queue_list)
print "xmit netdevice:remain:%d overflow:%d" % \
(len(tx_xmit_list), of_count_tx_xmit_list)
print "receive:remain:%d overflow:%d" % \
(len(rx_skb_list), of_count_rx_skb_list)
# called from perf, when it finds a correspoinding event
def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_exit(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_raise(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__irq_handler_entry(name, context, cpu, sec, nsec, pid, comm,
irq, irq_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
irq, irq_name)
all_event_list.append(event_info)
def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, irq, ret):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, ret)
all_event_list.append(event_info)
def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, napi, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
napi, dev_name)
all_event_list.append(event_info)
def net__netif_receive_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__netif_rx(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_queue(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_xmit(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, rc, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, rc ,dev_name)
all_event_list.append(event_info)
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
skbaddr, protocol, location):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, protocol, location)
all_event_list.append(event_info)
def skb__consume_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr)
all_event_list.append(event_info)
def skb__skb_copy_datagram_iovec(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen)
all_event_list.append(event_info)
def handle_irq_handler_entry(event_info):
(name, context, cpu, time, pid, comm, irq, irq_name) = event_info
if cpu not in irq_dic.keys():
irq_dic[cpu] = []
irq_record = {'irq':irq, 'name':irq_name, 'cpu':cpu, 'irq_ent_t':time}
irq_dic[cpu].append(irq_record)
def handle_irq_handler_exit(event_info):
(name, context, cpu, time, pid, comm, irq, ret) = event_info
if cpu not in irq_dic.keys():
return
irq_record = irq_dic[cpu].pop()
if irq != irq_record['irq']:
return
irq_record.update({'irq_ext_t':time})
# if an irq doesn't include NET_RX softirq, drop.
if 'event_list' in irq_record.keys():
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_raise(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'sirq_raise'})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_entry(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
net_rx_dic[cpu] = {'sirq_ent_t':time, 'event_list':[]}
def handle_irq_softirq_exit(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
irq_list = []
event_list = 0
if cpu in irq_dic.keys():
irq_list = irq_dic[cpu]
del irq_dic[cpu]
if cpu in net_rx_dic.keys():
sirq_ent_t = net_rx_dic[cpu]['sirq_ent_t']
event_list = net_rx_dic[cpu]['event_list']
del net_rx_dic[cpu]
if irq_list == [] or event_list == 0:
return
rec_data = {'sirq_ent_t':sirq_ent_t, 'sirq_ext_t':time,
'irq_list':irq_list, 'event_list':event_list}
# merge information realted to a NET_RX softirq
receive_hunk_list.append(rec_data)
def handle_napi_poll(event_info):
(name, context, cpu, time, pid, comm, napi, dev_name) = event_info
if cpu in net_rx_dic.keys():
event_list = net_rx_dic[cpu]['event_list']
rec_data = {'event_name':'napi_poll',
'dev':dev_name, 'event_t':time}
event_list.append(rec_data)
def handle_netif_rx(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'netif_rx',
'skbaddr':skbaddr, 'skblen':skblen, 'dev_name':dev_name})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_netif_receive_skb(event_info):
global of_count_rx_skb_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu in net_rx_dic.keys():
rec_data = {'event_name':'netif_receive_skb',
'event_t':time, 'skbaddr':skbaddr, 'len':skblen}
event_list = net_rx_dic[cpu]['event_list']
event_list.append(rec_data)
rx_skb_list.insert(0, rec_data)
if len(rx_skb_list) > buffer_budget:
rx_skb_list.pop()
of_count_rx_skb_list += 1
def handle_net_dev_queue(event_info):
global of_count_tx_queue_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
skb = {'dev':dev_name, 'skbaddr':skbaddr, 'len':skblen, 'queue_t':time}
tx_queue_list.insert(0, skb)
if len(tx_queue_list) > buffer_budget:
tx_queue_list.pop()
of_count_tx_queue_list += 1
def handle_net_dev_xmit(event_info):
global of_count_tx_xmit_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, rc, dev_name) = event_info
if rc == 0: # NETDEV_TX_OK
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
skb['xmit_t'] = time
tx_xmit_list.insert(0, skb)
del tx_queue_list[i]
if len(tx_xmit_list) > buffer_budget:
tx_xmit_list.pop()
of_count_tx_xmit_list += 1
return
def handle_kfree_skb(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, protocol, location) = event_info
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
del tx_queue_list[i]
return
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if rec_data['skbaddr'] == skbaddr:
rec_data.update({'handle':"kfree_skb",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
def handle_consume_skb(event_info):
(name, context, cpu, time, pid, comm, skbaddr) = event_info
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
def handle_skb_copy_datagram_iovec(event_info):
(name, context, cpu, time, pid, comm, skbaddr, skblen) = event_info
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if skbaddr == rec_data['skbaddr']:
rec_data.update({'handle':"skb_copy_datagram_iovec",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
| gpl-2.0 |
mishravikas/geonode-permissions | geonode/base/templatetags/base_tags.py | 2 | 2227 | from django import template
from agon_ratings.models import Rating
from django.contrib.contenttypes.models import ContentType
from django.contrib.auth import get_user_model
from guardian.shortcuts import get_objects_for_user
from geonode import settings
from geonode.layers.models import Layer
from geonode.maps.models import Map
from geonode.documents.models import Document
register = template.Library()
@register.assignment_tag
def num_ratings(obj):
ct = ContentType.objects.get_for_model(obj)
return len(Rating.objects.filter(object_id=obj.pk, content_type=ct))
@register.assignment_tag(takes_context=True)
def facets(context):
request = context['request']
facets = {
'raster': 0,
'vector': 0,
}
vectors = Layer.objects.filter(storeType='dataStore').values_list('id', flat=True)
rasters = Layer.objects.filter(storeType='coverageStore').values_list('id', flat=True)
remote = Layer.objects.filter(storeType='remoteStore').values_list('id', flat=True)
if settings.SKIP_PERMS_FILTER:
facets['raster'] = rasters.count()
facets['vector'] = vectors.count()
facets['remote'] = remote.count()
else:
resources = get_objects_for_user(request.user, 'base.view_resourcebase')
facets['raster'] = resources.filter(id__in=rasters).count()
facets['vector'] = resources.filter(id__in=vectors).count()
facets['remote'] = resources.filter(id__in=remote).count()
facet_type = context['facet_type'] if 'facet_type' in context else 'all'
# Break early if only_layers is set.
if facet_type == 'layers':
return facets
if settings.SKIP_PERMS_FILTER:
facets['map'] = Map.objects.all().count()
facets['document'] = Document.objects.all().count()
else:
facets['map'] = resources.filter(id__in=Map.objects.values_list('id', flat=True)).count()
facets['document'] = resources.filter(id__in=Document.objects.values_list('id', flat=True)).count()
if facet_type == 'home':
facets['user'] = get_user_model().objects.exclude(username='AnonymousUser').count()
facets['layer'] = facets['raster'] + facets['vector'] + facets['remote']
return facets
| gpl-3.0 |
w1kke/pylearn2 | pylearn2/models/tests/test_mlp.py | 2 | 31654 | from __future__ import print_function
from itertools import product
import numpy as np
from theano.compat import six
from theano.compat.six.moves import reduce, xrange
import theano
from theano import tensor, config
from nose.tools import assert_raises
from pylearn2.datasets.vector_spaces_dataset import VectorSpacesDataset
from pylearn2.datasets.dense_design_matrix import DenseDesignMatrix
from pylearn2.termination_criteria import EpochCounter
from pylearn2.training_algorithms.sgd import SGD
from pylearn2.train import Train
from pylearn2.models.mlp import (FlattenerLayer, MLP, Linear, Softmax, Sigmoid,
exhaustive_dropout_average,
sampled_dropout_average, CompositeLayer,
mean_pool)
from pylearn2.space import VectorSpace, CompositeSpace, Conv2DSpace
from pylearn2.utils import is_iterable, sharedX
from pylearn2.expr.nnet import pseudoinverse_softmax_numpy
class IdentityLayer(Linear):
dropout_input_mask_value = -np.inf
def fprop(self, state_below):
return state_below
def test_masked_fprop():
# Construct a dirt-simple linear network with identity weights.
mlp = MLP(nvis=2, layers=[Linear(2, 'h0', irange=0),
Linear(2, 'h1', irange=0)])
mlp.layers[0].set_weights(np.eye(2, dtype=mlp.get_weights().dtype))
mlp.layers[1].set_weights(np.eye(2, dtype=mlp.get_weights().dtype))
mlp.layers[0].set_biases(np.arange(1, 3, dtype=mlp.get_weights().dtype))
mlp.layers[1].set_biases(np.arange(3, 5, dtype=mlp.get_weights().dtype))
# Verify that get_total_input_dimension works.
np.testing.assert_equal(mlp.get_total_input_dimension(['h0', 'h1']), 4)
inp = theano.tensor.matrix()
# Accumulate the sum of output of all masked networks.
l = []
for mask in xrange(16):
l.append(mlp.masked_fprop(inp, mask))
outsum = reduce(lambda x, y: x + y, l)
f = theano.function([inp], outsum, allow_input_downcast=True)
np.testing.assert_equal(f([[5, 3]]), [[144., 144.]])
np.testing.assert_equal(f([[2, 7]]), [[96., 208.]])
np.testing.assert_raises(ValueError, mlp.masked_fprop, inp, 22)
np.testing.assert_raises(ValueError, mlp.masked_fprop, inp, 2,
['h3'])
np.testing.assert_raises(ValueError, mlp.masked_fprop, inp, 2,
None, 2., {'h3': 4})
def test_sampled_dropout_average():
# This is only a smoke test: verifies that it compiles and runs,
# not any particular value.
inp = theano.tensor.matrix()
mlp = MLP(nvis=2, layers=[Linear(2, 'h0', irange=0.8),
Linear(2, 'h1', irange=0.8),
Softmax(3, 'out', irange=0.8)])
out = sampled_dropout_average(mlp, inp, 5)
f = theano.function([inp], out, allow_input_downcast=True)
f([[2.3, 4.9]])
def test_exhaustive_dropout_average():
# This is only a smoke test: verifies that it compiles and runs,
# not any particular value.
inp = theano.tensor.matrix()
mlp = MLP(nvis=2, layers=[Linear(2, 'h0', irange=0.8),
Linear(2, 'h1', irange=0.8),
Softmax(3, 'out', irange=0.8)])
out = exhaustive_dropout_average(mlp, inp)
f = theano.function([inp], out, allow_input_downcast=True)
f([[2.3, 4.9]])
out = exhaustive_dropout_average(mlp, inp, input_scales={'h0': 3})
f = theano.function([inp], out, allow_input_downcast=True)
f([[2.3, 4.9]])
out = exhaustive_dropout_average(mlp, inp, masked_input_layers=['h1'])
f = theano.function([inp], out, allow_input_downcast=True)
f([[2.3, 4.9]])
np.testing.assert_raises(ValueError, exhaustive_dropout_average, mlp,
inp, ['h5'])
np.testing.assert_raises(ValueError, exhaustive_dropout_average, mlp,
inp, ['h0'], 2., {'h5': 3.})
def test_dropout_input_mask_value():
# Construct a dirt-simple linear network with identity weights.
mlp = MLP(nvis=2, layers=[IdentityLayer(2, 'h0', irange=0)])
mlp.layers[0].set_weights(np.eye(2, dtype=mlp.get_weights().dtype))
mlp.layers[0].set_biases(np.arange(1, 3, dtype=mlp.get_weights().dtype))
mlp.layers[0].dropout_input_mask_value = -np.inf
inp = theano.tensor.matrix()
mode = theano.compile.mode.get_default_mode()
mode.check_isfinite = False
f = theano.function([inp], mlp.masked_fprop(inp, 1, default_input_scale=1),
allow_input_downcast=True, mode=mode)
np.testing.assert_equal(f([[4., 3.]]), [[4., -np.inf]])
def test_sigmoid_layer_misclass_reporting():
mlp = MLP(nvis=3, layers=[Sigmoid(layer_name='h0', dim=1, irange=0.005,
monitor_style='bit_vector_class')])
target = theano.tensor.matrix(dtype=theano.config.floatX)
batch = theano.tensor.matrix(dtype=theano.config.floatX)
rval = mlp.layers[0].get_layer_monitoring_channels(state_below=batch,
state=mlp.fprop(batch),
targets=target)
f = theano.function([batch, target], [tensor.gt(mlp.fprop(batch), 0.5),
rval['misclass']],
allow_input_downcast=True)
rng = np.random.RandomState(0)
for _ in range(10): # repeat a few times for statistical strength
targets = (rng.uniform(size=(30, 1)) > 0.5).astype('uint8')
out, misclass = f(rng.normal(size=(30, 3)), targets)
np.testing.assert_allclose((targets != out).mean(), misclass)
def test_batchwise_dropout():
mlp = MLP(nvis=2, layers=[IdentityLayer(2, 'h0', irange=0)])
mlp.layers[0].set_weights(np.eye(2, dtype=mlp.get_weights().dtype))
mlp.layers[0].set_biases(np.arange(1, 3, dtype=mlp.get_weights().dtype))
mlp.layers[0].dropout_input_mask_value = 0
inp = theano.tensor.matrix()
f = theano.function([inp], mlp.dropout_fprop(inp, per_example=False),
allow_input_downcast=True)
for _ in range(10):
d = f([[3.0, 4.5]] * 3)
np.testing.assert_equal(d[0], d[1])
np.testing.assert_equal(d[0], d[2])
f = theano.function([inp], mlp.dropout_fprop(inp, per_example=True),
allow_input_downcast=True)
d = f([[3.0, 4.5]] * 3)
print(d)
np.testing.assert_(np.any(d[0] != d[1]) or np.any(d[0] != d[2]))
def test_str():
"""
Make sure the __str__ method returns a string
"""
mlp = MLP(nvis=2, layers=[Linear(2, 'h0', irange=0),
Linear(2, 'h1', irange=0)])
s = str(mlp)
assert isinstance(s, six.string_types)
def test_sigmoid_detection_cost():
# This is only a smoke test: verifies that it compiles and runs,
# not any particular value.
rng = np.random.RandomState(0)
y = (rng.uniform(size=(4, 3)) > 0.5).astype('uint8')
X = theano.shared(rng.uniform(size=(4, 2)))
model = MLP(nvis=2, layers=[Sigmoid(monitor_style='detection', dim=3,
layer_name='y', irange=0.8)])
y_hat = model.fprop(X)
model.cost(y, y_hat).eval()
def test_weight_decay_0():
nested_mlp = MLP(layer_name='nested_mlp',
layers=[IdentityLayer(2, 'h0', irange=0)])
mlp = MLP(nvis=2, layers=[nested_mlp])
weight_decay = mlp.get_weight_decay([0])
assert isinstance(weight_decay, theano.tensor.TensorConstant)
assert weight_decay.dtype == theano.config.floatX
weight_decay = mlp.get_weight_decay([[0]])
assert isinstance(weight_decay, theano.tensor.TensorConstant)
assert weight_decay.dtype == theano.config.floatX
nested_mlp.add_layers([IdentityLayer(2, 'h1', irange=0)])
weight_decay = mlp.get_weight_decay([[0, 0.1]])
assert weight_decay.dtype == theano.config.floatX
if __name__ == "__main__":
test_masked_fprop()
test_sampled_dropout_average()
test_exhaustive_dropout_average()
test_dropout_input_mask_value()
test_sigmoid_layer_misclass_reporting()
test_batchwise_dropout()
test_sigmoid_detection_cost()
test_weight_decay_0()
def test_composite_layer():
"""
Test the routing functionality of the CompositeLayer
"""
# Without routing
composite_layer = CompositeLayer('composite_layer',
[Linear(2, 'h0', irange=0),
Linear(2, 'h1', irange=0),
Linear(2, 'h2', irange=0)])
mlp = MLP(nvis=2, layers=[composite_layer])
for i in range(3):
composite_layer.layers[i].set_weights(
np.eye(2, dtype=theano.config.floatX)
)
composite_layer.layers[i].set_biases(
np.zeros(2, dtype=theano.config.floatX)
)
X = tensor.matrix()
y = mlp.fprop(X)
funs = [theano.function([X], y_elem) for y_elem in y]
x_numeric = np.random.rand(2, 2).astype('float32')
y_numeric = [f(x_numeric) for f in funs]
assert np.all(x_numeric == y_numeric)
# With routing
for inputs_to_layers in [{0: [1], 1: [2], 2: [0]},
{0: [1], 1: [0, 2], 2: []},
{0: [], 1: []}]:
composite_layer = CompositeLayer('composite_layer',
[Linear(2, 'h0', irange=0),
Linear(2, 'h1', irange=0),
Linear(2, 'h2', irange=0)],
inputs_to_layers)
input_space = CompositeSpace([VectorSpace(dim=2),
VectorSpace(dim=2),
VectorSpace(dim=2)])
input_source = ('features0', 'features1', 'features2')
mlp = MLP(input_space=input_space, input_source=input_source,
layers=[composite_layer])
for i in range(3):
composite_layer.layers[i].set_weights(
np.eye(2, dtype=theano.config.floatX)
)
composite_layer.layers[i].set_biases(
np.zeros(2, dtype=theano.config.floatX)
)
X = [tensor.matrix() for _ in range(3)]
y = mlp.fprop(X)
funs = [theano.function(X, y_elem, on_unused_input='ignore')
for y_elem in y]
x_numeric = [np.random.rand(2, 2).astype(theano.config.floatX)
for _ in range(3)]
y_numeric = [f(*x_numeric) for f in funs]
assert all([all([np.all(x_numeric[i] == y_numeric[j])
for j in inputs_to_layers[i]])
for i in inputs_to_layers])
# Get the weight decay expressions from a composite layer
composite_layer = CompositeLayer('composite_layer',
[Linear(2, 'h0', irange=0.1),
Linear(2, 'h1', irange=0.1)])
input_space = VectorSpace(dim=10)
mlp = MLP(input_space=input_space, layers=[composite_layer])
for attr, coeff in product(['get_weight_decay', 'get_l1_weight_decay'],
[[0.7, 0.3], 0.5]):
f = theano.function([], getattr(composite_layer, attr)(coeff))
if is_iterable(coeff):
g = theano.function(
[], tensor.sum([getattr(layer, attr)(c) for c, layer
in zip(coeff, composite_layer.layers)])
)
assert np.allclose(f(), g())
else:
g = theano.function(
[], tensor.sum([getattr(layer, attr)(coeff) for layer
in composite_layer.layers])
)
assert np.allclose(f(), g())
def test_multiple_inputs():
"""
Create a VectorSpacesDataset with two inputs (features0 and features1)
and train an MLP which takes both inputs for 1 epoch.
"""
mlp = MLP(
layers=[
FlattenerLayer(
CompositeLayer(
'composite',
[Linear(10, 'h0', 0.1),
Linear(10, 'h1', 0.1)],
{
0: [1],
1: [0]
}
)
),
Softmax(5, 'softmax', 0.1)
],
input_space=CompositeSpace([VectorSpace(15), VectorSpace(20)]),
input_source=('features0', 'features1')
)
dataset = VectorSpacesDataset(
(np.random.rand(20, 20).astype(theano.config.floatX),
np.random.rand(20, 15).astype(theano.config.floatX),
np.random.rand(20, 5).astype(theano.config.floatX)),
(CompositeSpace([
VectorSpace(20),
VectorSpace(15),
VectorSpace(5)]),
('features1', 'features0', 'targets')))
train = Train(dataset, mlp, SGD(0.1, batch_size=5))
train.algorithm.termination_criterion = EpochCounter(1)
train.main_loop()
def test_input_and_target_source():
"""
Create a MLP and test input_source and target_source
for default and non-default options.
"""
mlp = MLP(
layers=[CompositeLayer(
'composite',
[Linear(10, 'h0', 0.1),
Linear(10, 'h1', 0.1)],
{
0: [1],
1: [0]
}
)
],
input_space=CompositeSpace([VectorSpace(15), VectorSpace(20)]),
input_source=('features0', 'features1'),
target_source=('targets0', 'targets1')
)
np.testing.assert_equal(mlp.get_input_source(), ('features0', 'features1'))
np.testing.assert_equal(mlp.get_target_source(), ('targets0', 'targets1'))
mlp = MLP(
layers=[Linear(10, 'h0', 0.1)],
input_space=VectorSpace(15)
)
np.testing.assert_equal(mlp.get_input_source(), 'features')
np.testing.assert_equal(mlp.get_target_source(), 'targets')
def test_get_layer_monitor_channels():
"""
Create a MLP with multiple layer types
and get layer monitoring channels for MLP.
"""
mlp = MLP(
layers=[
FlattenerLayer(
CompositeLayer(
'composite',
[Linear(10, 'h0', 0.1),
Linear(10, 'h1', 0.1)],
{
0: [1],
1: [0]
}
)
),
Softmax(5, 'softmax', 0.1)
],
input_space=CompositeSpace([VectorSpace(15), VectorSpace(20)]),
input_source=('features0', 'features1')
)
dataset = VectorSpacesDataset(
(np.random.rand(20, 20).astype(theano.config.floatX),
np.random.rand(20, 15).astype(theano.config.floatX),
np.random.rand(20, 5).astype(theano.config.floatX)),
(CompositeSpace([
VectorSpace(20),
VectorSpace(15),
VectorSpace(5)]),
('features1', 'features0', 'targets'))
)
state_below = mlp.get_input_space().make_theano_batch()
targets = mlp.get_target_space().make_theano_batch()
mlp.get_layer_monitoring_channels(state_below=state_below,
state=None, targets=targets)
def test_flattener_layer():
# To test the FlattenerLayer we create a very simple feed-forward neural
# network with two parallel linear layers. We then create two separate
# feed-forward neural networks with single linear layers. In principle,
# these two models should be identical if we start from the same
# parameters. This makes it easy to test that the composite layer works
# as expected.
# Create network with composite layers.
mlp_composite = MLP(
layers=[
FlattenerLayer(
CompositeLayer(
'composite',
[Linear(2, 'h0', 0.1),
Linear(2, 'h1', 0.1)],
{
0: [0],
1: [1]
}
)
)
],
input_space=CompositeSpace([VectorSpace(5), VectorSpace(10)]),
input_source=('features0', 'features1')
)
# Create network with single linear layer, corresponding to first
# layer in the composite network.
mlp_first_part = MLP(
layers=[
Linear(2, 'h0', 0.1)
],
input_space=VectorSpace(5),
input_source=('features0')
)
# Create network with single linear layer, corresponding to second
# layer in the composite network.
mlp_second_part = MLP(
layers=[
Linear(2, 'h1', 0.1)
],
input_space=VectorSpace(10),
input_source=('features1')
)
# Create dataset which we will test our networks against.
shared_dataset = np.random.rand(20, 19).astype(theano.config.floatX)
# Make dataset for composite network.
dataset_composite = VectorSpacesDataset(
(shared_dataset[:, 0:5],
shared_dataset[:, 5:15],
shared_dataset[:, 15:19]),
(CompositeSpace([
VectorSpace(5),
VectorSpace(10),
VectorSpace(4)]),
('features0', 'features1', 'targets'))
)
# Make dataset for first single linear layer network.
dataset_first_part = VectorSpacesDataset(
(shared_dataset[:, 0:5],
shared_dataset[:, 15:17]),
(CompositeSpace([
VectorSpace(5),
VectorSpace(2)]),
('features0', 'targets'))
)
# Make dataset for second single linear layer network.
dataset_second_part = VectorSpacesDataset(
(shared_dataset[:, 5:15],
shared_dataset[:, 17:19]),
(CompositeSpace([
VectorSpace(10),
VectorSpace(2)]),
('features1', 'targets'))
)
# Initialize all MLPs to start from zero weights.
mlp_composite.layers[0].raw_layer.layers[0].set_weights(
mlp_composite.layers[0].raw_layer.layers[0].get_weights() * 0.0)
mlp_composite.layers[0].raw_layer.layers[1].set_weights(
mlp_composite.layers[0].raw_layer.layers[1].get_weights() * 0.0)
mlp_first_part.layers[0].set_weights(
mlp_first_part.layers[0].get_weights() * 0.0)
mlp_second_part.layers[0].set_weights(
mlp_second_part.layers[0].get_weights() * 0.0)
# Train all models with their respective datasets.
train_composite = Train(dataset_composite, mlp_composite,
SGD(0.0001, batch_size=20))
train_composite.algorithm.termination_criterion = EpochCounter(1)
train_composite.main_loop()
train_first_part = Train(dataset_first_part, mlp_first_part,
SGD(0.0001, batch_size=20))
train_first_part.algorithm.termination_criterion = EpochCounter(1)
train_first_part.main_loop()
train_second_part = Train(dataset_second_part, mlp_second_part,
SGD(0.0001, batch_size=20))
train_second_part.algorithm.termination_criterion = EpochCounter(1)
train_second_part.main_loop()
# Check that the composite feed-forward neural network has learned
# same parameters as each individual feed-forward neural network.
np.testing.assert_allclose(
mlp_composite.layers[0].raw_layer.layers[0].get_weights(),
mlp_first_part.layers[0].get_weights())
np.testing.assert_allclose(
mlp_composite.layers[0].raw_layer.layers[1].get_weights(),
mlp_second_part.layers[0].get_weights())
# Check that we get same output given the same input on a randomly
# generated dataset.
X_composite = mlp_composite.get_input_space().make_theano_batch()
X_first_part = mlp_first_part.get_input_space().make_theano_batch()
X_second_part = mlp_second_part.get_input_space().make_theano_batch()
fprop_composite = theano.function(X_composite,
mlp_composite.fprop(X_composite))
fprop_first_part = theano.function([X_first_part],
mlp_first_part.fprop(X_first_part))
fprop_second_part = theano.function([X_second_part],
mlp_second_part.fprop(X_second_part))
X_data = np.random.random(size=(10, 15)).astype(theano.config.floatX)
y_data = np.random.randint(low=0, high=10, size=(10, 4))
np.testing.assert_allclose(fprop_composite(X_data[:, 0:5],
X_data[:, 5:15])[:, 0:2],
fprop_first_part(X_data[:, 0:5]))
np.testing.assert_allclose(fprop_composite(X_data[:, 0:5],
X_data[:, 5:15])[:, 2:4],
fprop_second_part(X_data[:, 5:15]))
# Finally check that calling the internal FlattenerLayer behaves
# as we would expect. First, retrieve the FlattenerLayer.
fl = mlp_composite.layers[0]
# Check that it agrees on the input space.
assert mlp_composite.get_input_space() == fl.get_input_space()
# Check that it agrees on the parameters.
for i in range(0, 4):
np.testing.assert_allclose(fl.get_params()[i].eval(),
mlp_composite.get_params()[i].eval())
def test_flattener_layer_state_separation_for_softmax():
"""
Creates a CompositeLayer wrapping two Softmax layers
and ensures that state gets correctly picked apart.
"""
mlp = MLP(
layers=[
FlattenerLayer(
CompositeLayer(
'composite',
[Softmax(5, 'sf1', 0.1),
Softmax(5, 'sf2', 0.1)]
)
)
],
nvis=2
)
dataset = DenseDesignMatrix(
X=np.random.rand(20, 2).astype(theano.config.floatX),
y=np.random.rand(20, 10).astype(theano.config.floatX))
train = Train(dataset, mlp,
SGD(0.1, batch_size=5, monitoring_dataset=dataset))
train.algorithm.termination_criterion = EpochCounter(1)
train.main_loop()
def test_nested_mlp():
"""
Constructs a nested MLP and tries to fprop through it
"""
inner_mlp = MLP(layers=[Linear(10, 'h0', 0.1), Linear(10, 'h1', 0.1)],
layer_name='inner_mlp')
outer_mlp = MLP(layers=[CompositeLayer(layer_name='composite',
layers=[inner_mlp,
Linear(10, 'h2', 0.1)])],
nvis=10)
X = outer_mlp.get_input_space().make_theano_batch()
f = theano.function([X], outer_mlp.fprop(X))
f(np.random.rand(5, 10).astype(theano.config.floatX))
def test_softmax_binary_targets():
"""
Constructs softmax layers with binary target and with vector targets
to check that they give the same cost.
"""
num_classes = 10
batch_size = 20
mlp_bin = MLP(
layers=[Softmax(num_classes, 's1', irange=0.1, binary_target_dim=1)],
nvis=100
)
mlp_vec = MLP(
layers=[Softmax(num_classes, 's1', irange=0.1)],
nvis=100
)
X = mlp_bin.get_input_space().make_theano_batch()
y_bin = mlp_bin.get_target_space().make_theano_batch()
y_vec = mlp_vec.get_target_space().make_theano_batch()
y_hat_bin = mlp_bin.fprop(X)
y_hat_vec = mlp_vec.fprop(X)
cost_bin = theano.function([X, y_bin], mlp_bin.cost(y_bin, y_hat_bin),
allow_input_downcast=True)
cost_vec = theano.function([X, y_vec], mlp_vec.cost(y_vec, y_hat_vec),
allow_input_downcast=True)
X_data = np.random.random(size=(batch_size, 100))
y_bin_data = np.random.randint(low=0, high=10, size=(batch_size, 1))
y_vec_data = np.zeros((batch_size, num_classes))
y_vec_data[np.arange(batch_size), y_bin_data.flatten()] = 1
np.testing.assert_allclose(cost_bin(X_data, y_bin_data),
cost_vec(X_data, y_vec_data))
def test_softmax_two_binary_targets():
"""
Constructs softmax layers with two binary targets and with vector targets
to check that they give the same cost.
"""
num_classes = 10
batch_size = 20
mlp_bin = MLP(
layers=[Softmax(num_classes, 's1', irange=0.1, binary_target_dim=2)],
nvis=100
)
mlp_vec = MLP(
layers=[Softmax(num_classes, 's1', irange=0.1)],
nvis=100
)
X = mlp_bin.get_input_space().make_theano_batch()
y_bin = mlp_bin.get_target_space().make_theano_batch()
y_vec = mlp_vec.get_target_space().make_theano_batch()
y_hat_bin = mlp_bin.fprop(X)
y_hat_vec = mlp_vec.fprop(X)
cost_bin = theano.function([X, y_bin], mlp_bin.cost(y_bin, y_hat_bin),
allow_input_downcast=True)
cost_vec = theano.function([X, y_vec], mlp_vec.cost(y_vec, y_hat_vec),
allow_input_downcast=True)
X_data = np.random.random(size=(batch_size, 100))
# binary and vector costs can only match
# if binary targets are mutually exclusive
y_bin_data = np.concatenate([np.random.permutation(10)[:2].reshape((1, 2))
for _ in range(batch_size)])
y_vec_data = np.zeros((batch_size, num_classes))
y_vec_data[np.arange(batch_size), y_bin_data[:, 0].flatten()] = 1
y_vec_data[np.arange(batch_size), y_bin_data[:, 1].flatten()] = 1
np.testing.assert_allclose(cost_bin(X_data, y_bin_data),
cost_vec(X_data, y_vec_data))
def test_softmax_weight_init():
"""
Constructs softmax layers with different weight initialization
parameters.
"""
nvis = 5
num_classes = 10
MLP(layers=[Softmax(num_classes, 's', irange=0.1)], nvis=nvis)
MLP(layers=[Softmax(num_classes, 's', istdev=0.1)], nvis=nvis)
MLP(layers=[Softmax(num_classes, 's', sparse_init=2)], nvis=nvis)
def test_softmax_bin_targets_channels(seed=0):
"""
Constructs softmax layers with binary target and with vector targets
to check that they give the same 'misclass' channel value.
"""
np.random.seed(seed)
num_classes = 2
batch_size = 5
mlp_bin = MLP(
layers=[Softmax(num_classes, 's1', irange=0.1,
binary_target_dim=1)],
nvis=100
)
mlp_vec = MLP(
layers=[Softmax(num_classes, 's1', irange=0.1)],
nvis=100
)
X = mlp_bin.get_input_space().make_theano_batch()
y_bin = mlp_bin.get_target_space().make_theano_batch()
y_vec = mlp_vec.get_target_space().make_theano_batch()
X_data = np.random.random(size=(batch_size, 100))
X_data = X_data.astype(theano.config.floatX)
y_bin_data = np.random.randint(low=0, high=num_classes,
size=(batch_size, 1))
y_vec_data = np.zeros((batch_size, num_classes),
dtype=theano.config.floatX)
y_vec_data[np.arange(batch_size), y_bin_data.flatten()] = 1
def channel_value(channel_name, model, y, y_data):
chans = model.get_monitoring_channels((X, y))
f_channel = theano.function([X, y], chans['s1_' + channel_name])
return f_channel(X_data, y_data)
for channel_name in ['misclass', 'nll']:
vec_val = channel_value(channel_name, mlp_vec, y_vec, y_vec_data)
bin_val = channel_value(channel_name, mlp_bin, y_bin, y_bin_data)
print(channel_name, vec_val, bin_val)
np.testing.assert_allclose(vec_val, bin_val)
def test_set_get_weights_Softmax():
"""
Tests setting and getting weights for Softmax layer.
"""
num_classes = 2
dim = 3
conv_dim = [3, 4, 5]
# VectorSpace input space
layer = Softmax(num_classes, 's', irange=.1)
softmax_mlp = MLP(layers=[layer], input_space=VectorSpace(dim=dim))
vec_weights = np.random.randn(dim, num_classes).astype(config.floatX)
layer.set_weights(vec_weights)
assert np.allclose(layer.W.get_value(), vec_weights)
layer.W.set_value(vec_weights)
assert np.allclose(layer.get_weights(), vec_weights)
# Conv2DSpace input space
layer = Softmax(num_classes, 's', irange=.1)
softmax_mlp = MLP(layers=[layer],
input_space=Conv2DSpace(shape=(conv_dim[0], conv_dim[1]),
num_channels=conv_dim[2]))
conv_weights = np.random.randn(conv_dim[0], conv_dim[1], conv_dim[2],
num_classes).astype(config.floatX)
layer.set_weights(conv_weights.reshape(np.prod(conv_dim), num_classes))
assert np.allclose(layer.W.get_value(),
conv_weights.reshape(np.prod(conv_dim), num_classes))
layer.W.set_value(conv_weights.reshape(np.prod(conv_dim), num_classes))
assert np.allclose(layer.get_weights_topo(),
np.transpose(conv_weights, axes=(3, 0, 1, 2)))
def test_init_bias_target_marginals():
"""
Test `Softmax` layer instantiation with `init_bias_target_marginals`.
"""
batch_size = 5
n_features = 5
n_classes = 3
n_targets = 3
irange = 0.1
learning_rate = 0.1
X_data = np.random.random(size=(batch_size, n_features))
Y_categorical = np.asarray([[0], [1], [1], [2], [2]])
class_frequencies = np.asarray([.2, .4, .4])
categorical_dataset = DenseDesignMatrix(X_data,
y=Y_categorical,
y_labels=n_classes)
Y_continuous = np.random.random(size=(batch_size, n_targets))
Y_means = np.mean(Y_continuous, axis=0)
continuous_dataset = DenseDesignMatrix(X_data,
y=Y_continuous)
Y_multiclass = np.random.randint(n_classes,
size=(batch_size, n_targets))
multiclass_dataset = DenseDesignMatrix(X_data,
y=Y_multiclass,
y_labels=n_classes)
def softmax_layer(dataset):
return Softmax(n_classes, 'h0', irange=irange,
init_bias_target_marginals=dataset)
valid_categorical_mlp = MLP(
layers=[softmax_layer(categorical_dataset)],
nvis=n_features
)
actual = valid_categorical_mlp.layers[0].b.get_value()
expected = pseudoinverse_softmax_numpy(class_frequencies)
assert np.allclose(actual, expected)
valid_continuous_mlp = MLP(
layers=[softmax_layer(continuous_dataset)],
nvis=n_features
)
actual = valid_continuous_mlp.layers[0].b.get_value()
expected = pseudoinverse_softmax_numpy(Y_means)
assert np.allclose(actual, expected)
def invalid_multiclass_mlp():
return MLP(
layers=[softmax_layer(multiclass_dataset)],
nvis=n_features
)
assert_raises(AssertionError, invalid_multiclass_mlp)
def test_mean_pool():
X_sym = tensor.tensor4('X')
pool_it = mean_pool(X_sym, pool_shape=(2, 2), pool_stride=(2, 2),
image_shape=(6, 4))
f = theano.function(inputs=[X_sym], outputs=pool_it)
t = np.array([[1, 1, 3, 3],
[1, 1, 3, 3],
[5, 5, 7, 7],
[5, 5, 7, 7],
[9, 9, 11, 11],
[9, 9, 11, 11]], dtype=theano.config.floatX)
X = np.zeros((3, t.shape[0], t.shape[1]), dtype=theano.config.floatX)
X[:] = t
X = X[np.newaxis]
expected = np.array([[1, 3],
[5, 7],
[9, 11]], dtype=theano.config.floatX)
actual = f(X)
assert np.allclose(expected, actual)
| bsd-3-clause |
appsembler/edx-platform | common/djangoapps/util/tests/test_memcache.py | 24 | 3676 | """
Tests for memcache in util app
"""
from django.core.cache import caches
from django.test import TestCase
from util.memcache import safe_key
class MemcacheTest(TestCase):
"""
Test memcache key cleanup
"""
# Test whitespace, control characters, and some non-ASCII UTF-16
UNICODE_CHAR_CODES = (range(30) + [127] +
[129, 500, 2 ** 8 - 1, 2 ** 8 + 1, 2 ** 16 - 1])
def setUp(self):
super(MemcacheTest, self).setUp()
self.cache = caches['default']
def test_safe_key(self):
key = safe_key('test', 'prefix', 'version')
self.assertEqual(key, 'prefix:version:test')
def test_numeric_inputs(self):
# Numeric key
self.assertEqual(safe_key(1, 'prefix', 'version'), 'prefix:version:1')
# Numeric prefix
self.assertEqual(safe_key('test', 5, 'version'), '5:version:test')
# Numeric version
self.assertEqual(safe_key('test', 'prefix', 5), 'prefix:5:test')
def test_safe_key_long(self):
# Choose lengths close to memcached's cutoff (250)
for length in [248, 249, 250, 251, 252]:
# Generate a key of that length
key = 'a' * length
# Make the key safe
key = safe_key(key, '', '')
# The key should now be valid
self.assertTrue(self._is_valid_key(key),
msg="Failed for key length {0}".format(length))
def test_long_key_prefix_version(self):
# Long key
key = safe_key('a' * 300, 'prefix', 'version')
self.assertTrue(self._is_valid_key(key))
# Long prefix
key = safe_key('key', 'a' * 300, 'version')
self.assertTrue(self._is_valid_key(key))
# Long version
key = safe_key('key', 'prefix', 'a' * 300)
self.assertTrue(self._is_valid_key(key))
def test_safe_key_unicode(self):
for unicode_char in self.UNICODE_CHAR_CODES:
# Generate a key with that character
key = unichr(unicode_char)
# Make the key safe
key = safe_key(key, '', '')
# The key should now be valid
self.assertTrue(self._is_valid_key(key),
msg="Failed for unicode character {0}".format(unicode_char))
def test_safe_key_prefix_unicode(self):
for unicode_char in self.UNICODE_CHAR_CODES:
# Generate a prefix with that character
prefix = unichr(unicode_char)
# Make the key safe
key = safe_key('test', prefix, '')
# The key should now be valid
self.assertTrue(self._is_valid_key(key),
msg="Failed for unicode character {0}".format(unicode_char))
def test_safe_key_version_unicode(self):
for unicode_char in self.UNICODE_CHAR_CODES:
# Generate a version with that character
version = unichr(unicode_char)
# Make the key safe
key = safe_key('test', '', version)
# The key should now be valid
self.assertTrue(self._is_valid_key(key),
msg="Failed for unicode character {0}".format(unicode_char))
def _is_valid_key(self, key):
"""
Test that a key is memcache-compatible.
Based on Django's validator in core.cache.backends.base
"""
# Check the length
if len(key) > 250:
return False
# Check that there are no spaces or control characters
for char in key:
if ord(char) < 33 or ord(char) == 127:
return False
return True
| agpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.