repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
kaiyuanheshang/zulip
|
refs/heads/master
|
zilencer/management/commands/print_initial_password.py
|
116
|
from __future__ import absolute_import
from django.core.management.base import BaseCommand
from zerver.lib.initial_password import initial_password
from zerver.models import get_user_profile_by_email
class Command(BaseCommand):
help = "Print the initial password and API key for accounts as created by populate_db"
fmt = '%-30s %-16s %-32s'
def add_arguments(self, parser):
parser.add_argument('emails', metavar='<email>', type=str, nargs='*',
help="email of user to show password and API key for")
def handle(self, *args, **options):
print self.fmt % ('email', 'password', 'API key')
for email in options['emails']:
if '@' not in email:
print 'ERROR: %s does not look like an email address' % (email,)
continue
print self.fmt % (email, initial_password(email), get_user_profile_by_email(email).api_key)
|
darolt/ndnSIMQoS
|
refs/heads/master
|
src/create-module.py
|
17
|
#! /usr/bin/env python
import sys
from optparse import OptionParser
import os
WSCRIPT_TEMPLATE = '''# -*- Mode: python; py-indent-offset: 4; indent-tabs-mode: nil; coding: utf-8; -*-
# def options(opt):
# pass
# def configure(conf):
# conf.check_nonfatal(header_name='stdint.h', define_name='HAVE_STDINT_H')
def build(bld):
module = bld.create_ns3_module(%(MODULE)r, ['core'])
module.source = [
'model/%(MODULE)s.cc',
'helper/%(MODULE)s-helper.cc',
]
module_test = bld.create_ns3_module_test_library('%(MODULE)s')
module_test.source = [
'test/%(MODULE)s-test-suite.cc',
]
headers = bld(features='ns3header')
headers.module = %(MODULE)r
headers.source = [
'model/%(MODULE)s.h',
'helper/%(MODULE)s-helper.h',
]
if bld.env.ENABLE_EXAMPLES:
bld.recurse('examples')
# bld.ns3_python_bindings()
'''
MODEL_CC_TEMPLATE = '''/* -*- Mode:C++; c-file-style:"gnu"; indent-tabs-mode:nil; -*- */
#include "%(MODULE)s.h"
namespace ns3 {
/* ... */
}
'''
MODEL_H_TEMPLATE = '''/* -*- Mode:C++; c-file-style:"gnu"; indent-tabs-mode:nil; -*- */
#ifndef %(INCLUDE_GUARD)s
#define %(INCLUDE_GUARD)s
namespace ns3 {
/* ... */
}
#endif /* %(INCLUDE_GUARD)s */
'''
HELPER_CC_TEMPLATE = '''/* -*- Mode:C++; c-file-style:"gnu"; indent-tabs-mode:nil; -*- */
#include "%(MODULE)s-helper.h"
namespace ns3 {
/* ... */
}
'''
HELPER_H_TEMPLATE = '''/* -*- Mode:C++; c-file-style:"gnu"; indent-tabs-mode:nil; -*- */
#ifndef %(INCLUDE_GUARD)s
#define %(INCLUDE_GUARD)s
#include "ns3/%(MODULE)s.h"
namespace ns3 {
/* ... */
}
#endif /* %(INCLUDE_GUARD)s */
'''
EXAMPLES_WSCRIPT_TEMPLATE = '''# -*- Mode: python; py-indent-offset: 4; indent-tabs-mode: nil; coding: utf-8; -*-
def build(bld):
obj = bld.create_ns3_program('%(MODULE)s-example', [%(MODULE)r])
obj.source = '%(MODULE)s-example.cc'
'''
EXAMPLE_CC_TEMPLATE = '''/* -*- Mode:C++; c-file-style:"gnu"; indent-tabs-mode:nil; -*- */
#include "ns3/core-module.h"
#include "ns3/%(MODULE)s-helper.h"
using namespace ns3;
int
main (int argc, char *argv[])
{
bool verbose = true;
CommandLine cmd;
cmd.AddValue ("verbose", "Tell application to log if true", verbose);
cmd.Parse (argc,argv);
/* ... */
Simulator::Run ();
Simulator::Destroy ();
return 0;
}
'''
TEST_CC_TEMPLATE = '''/* -*- Mode:C++; c-file-style:"gnu"; indent-tabs-mode:nil; -*- */
// Include a header file from your module to test.
#include "ns3/%(MODULE)s.h"
// An essential include is test.h
#include "ns3/test.h"
// Do not put your test classes in namespace ns3. You may find it useful
// to use the using directive to access the ns3 namespace directly
using namespace ns3;
// This is an example TestCase.
class %(CAPITALIZED)sTestCase1 : public TestCase
{
public:
%(CAPITALIZED)sTestCase1 ();
virtual ~%(CAPITALIZED)sTestCase1 ();
private:
virtual void DoRun (void);
};
// Add some help text to this case to describe what it is intended to test
%(CAPITALIZED)sTestCase1::%(CAPITALIZED)sTestCase1 ()
: TestCase ("%(CAPITALIZED)s test case (does nothing)")
{
}
// This destructor does nothing but we include it as a reminder that
// the test case should clean up after itself
%(CAPITALIZED)sTestCase1::~%(CAPITALIZED)sTestCase1 ()
{
}
//
// This method is the pure virtual method from class TestCase that every
// TestCase must implement
//
void
%(CAPITALIZED)sTestCase1::DoRun (void)
{
// A wide variety of test macros are available in src/core/test.h
NS_TEST_ASSERT_MSG_EQ (true, true, "true doesn't equal true for some reason");
// Use this one for floating point comparisons
NS_TEST_ASSERT_MSG_EQ_TOL (0.01, 0.01, 0.001, "Numbers are not equal within tolerance");
}
// The TestSuite class names the TestSuite, identifies what type of TestSuite,
// and enables the TestCases to be run. Typically, only the constructor for
// this class must be defined
//
class %(CAPITALIZED)sTestSuite : public TestSuite
{
public:
%(CAPITALIZED)sTestSuite ();
};
%(CAPITALIZED)sTestSuite::%(CAPITALIZED)sTestSuite ()
: TestSuite ("%(MODULE)s", UNIT)
{
// TestDuration for TestCase can be QUICK, EXTENSIVE or TAKES_FOREVER
AddTestCase (new %(CAPITALIZED)sTestCase1, TestCase::QUICK);
}
// Do not forget to allocate an instance of this TestSuite
static %(CAPITALIZED)sTestSuite %(COMPOUND)sTestSuite;
'''
DOC_RST_TEMPLATE = '''Example Module Documentation
----------------------------
.. include:: replace.txt
.. heading hierarchy:
------------- Chapter
************* Section (#.#)
============= Subsection (#.#.#)
############# Paragraph (no number)
This is a suggested outline for adding new module documentation to |ns3|.
See ``src/click/doc/click.rst`` for an example.
The introductory paragraph is for describing what this code is trying to
model.
For consistency (italicized formatting), please use |ns3| to refer to
ns-3 in the documentation (and likewise, |ns2| for ns-2). These macros
are defined in the file ``replace.txt``.
Model Description
*****************
The source code for the new module lives in the directory ``src/%(MODULE)s``.
Add here a basic description of what is being modeled.
Design
======
Briefly describe the software design of the model and how it fits into
the existing ns-3 architecture.
Scope and Limitations
=====================
What can the model do? What can it not do? Please use this section to
describe the scope and limitations of the model.
References
==========
Add academic citations here, such as if you published a paper on this
model, or if readers should read a particular specification or other work.
Usage
*****
This section is principally concerned with the usage of your model, using
the public API. Focus first on most common usage patterns, then go
into more advanced topics.
Building New Module
===================
Include this subsection only if there are special build instructions or
platform limitations.
Helpers
=======
What helper API will users typically use? Describe it here.
Attributes
==========
What classes hold attributes, and what are the key ones worth mentioning?
Output
======
What kind of data does the model generate? What are the key trace
sources? What kind of logging output can be enabled?
Advanced Usage
==============
Go into further details (such as using the API outside of the helpers)
in additional sections, as needed.
Examples
========
What examples using this new code are available? Describe them here.
Troubleshooting
===============
Add any tips for avoiding pitfalls, etc.
Validation
**********
Describe how the model has been tested/validated. What tests run in the
test suite? How much API and code is covered by the tests? Again,
references to outside published work may help here.
'''
def main(argv):
parser = OptionParser(usage=("Usage: %prog [options] modulename\n"
"Utility script to create a basic template for a new ns-3 module"))
(options, args) = parser.parse_args()
if len(args) != 1:
parser.print_help()
return 1
modname = args[0].lower()
if False in [word.isalnum() for word in modname.split("-")]:
print >> sys.stderr, "Module name should only contain alphanumeric characters and dashes"
return 2
assert os.path.sep not in modname
moduledir = os.path.join(os.path.dirname(__file__), modname)
if os.path.exists(moduledir):
print >> sys.stderr, "Module %r already exists" % (modname,)
return 2
print "Creating module %r" % (modname,)
os.mkdir(moduledir)
wscript = file(os.path.join(moduledir, "wscript"), "wt")
wscript.write(WSCRIPT_TEMPLATE % dict(MODULE=modname))
wscript.close()
#
# model
#
modeldir = os.path.join(moduledir, "model")
os.mkdir(modeldir)
model_cc = file(os.path.join(moduledir, "model", "%s.cc" % modname), "wt")
model_cc.write(MODEL_CC_TEMPLATE % dict(MODULE=modname))
model_cc.close()
model_h = file(os.path.join(moduledir, "model", "%s.h" % modname), "wt")
model_h.write(MODEL_H_TEMPLATE % dict(MODULE=modname, INCLUDE_GUARD="%s_H" % (modname.replace("-", "_").upper()),))
model_h.close()
#
# test
#
testdir = os.path.join(moduledir, "test")
os.mkdir(testdir)
test_cc = file(os.path.join(moduledir, "test", "%s-test-suite.cc" % modname), "wt")
test_cc.write(TEST_CC_TEMPLATE % dict(MODULE=modname,
CAPITALIZED=''.join([word.capitalize() for word in modname.split('-')]),
COMPOUND=''.join([modname.split('-')[0]] + [word.capitalize() for word in modname.split('-')[1:]]),
))
test_cc.close()
#
# helper
#
helperdir = os.path.join(moduledir, "helper")
os.mkdir(helperdir)
helper_cc = file(os.path.join(moduledir, "helper", "%s-helper.cc" % modname), "wt")
helper_cc.write(HELPER_CC_TEMPLATE % dict(MODULE=modname))
helper_cc.close()
helper_h = file(os.path.join(moduledir, "helper", "%s-helper.h" % modname), "wt")
helper_h.write(HELPER_H_TEMPLATE % dict(MODULE=modname, INCLUDE_GUARD="%s_HELPER_H" % (modname.replace("-", "_").upper()),))
helper_h.close()
#
# examples
#
examplesdir = os.path.join(moduledir, "examples")
os.mkdir(examplesdir)
examples_wscript = file(os.path.join(examplesdir, "wscript"), "wt")
examples_wscript.write(EXAMPLES_WSCRIPT_TEMPLATE % dict(MODULE=modname))
examples_wscript.close()
example_cc = file(os.path.join(moduledir, "examples", "%s-example.cc" % modname), "wt")
example_cc.write(EXAMPLE_CC_TEMPLATE % dict(MODULE=modname))
example_cc.close()
#
# doc
#
docdir = os.path.join(moduledir, "doc")
os.mkdir(docdir)
doc_rst = file(os.path.join(moduledir, "doc", "%s.rst" % modname), "wt")
doc_rst.write(DOC_RST_TEMPLATE % dict(MODULE=modname))
doc_rst.close()
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
40223137/2015cd_midterm
|
refs/heads/master
|
static/Brython3.1.1-20150328-091302/Lib/_sysconfigdata.py
|
731
|
build_time_vars={'HAVE_SYS_WAIT_H': 1, 'HAVE_UTIL_H': 0, 'HAVE_SYMLINKAT': 1, 'HAVE_LIBSENDFILE': 0, 'SRCDIRS': 'Parser Grammar Objects Python Modules Mac', 'SIZEOF_OFF_T': 8, 'BASECFLAGS': '-Wno-unused-result', 'HAVE_UTIME_H': 1, 'EXTRAMACHDEPPATH': '', 'HAVE_SYS_TIME_H': 1, 'CFLAGSFORSHARED': '-fPIC', 'HAVE_HYPOT': 1, 'PGSRCS': '\\', 'HAVE_LIBUTIL_H': 0, 'HAVE_COMPUTED_GOTOS': 1, 'HAVE_LUTIMES': 1, 'HAVE_MAKEDEV': 1, 'HAVE_REALPATH': 1, 'HAVE_LINUX_TIPC_H': 1, 'MULTIARCH': 'i386-linux-gnu', 'HAVE_GETWD': 1, 'HAVE_GCC_ASM_FOR_X64': 0, 'HAVE_INET_PTON': 1, 'HAVE_GETHOSTBYNAME_R_6_ARG': 1, 'SIZEOF__BOOL': 1, 'HAVE_ZLIB_COPY': 1, 'ASDLGEN': 'python3.3 ../Parser/asdl_c.py', 'GRAMMAR_INPUT': '../Grammar/Grammar', 'HOST_GNU_TYPE': 'i686-pc-linux-gnu', 'HAVE_SCHED_RR_GET_INTERVAL': 1, 'HAVE_BLUETOOTH_H': 0, 'HAVE_MKFIFO': 1, 'TIMEMODULE_LIB': 0, 'LIBM': '-lm', 'PGENOBJS': '\\ \\', 'PYTHONFRAMEWORK': '', 'GETPGRP_HAVE_ARG': 0, 'HAVE_MMAP': 1, 'SHLIB_SUFFIX': '.so', 'SIZEOF_FLOAT': 4, 'HAVE_RENAMEAT': 1, 'HAVE_LANGINFO_H': 1, 'HAVE_STDLIB_H': 1, 'PY_CORE_CFLAGS': '-Wno-unused-result -DNDEBUG -g -fwrapv -O2 -Wall -Wstrict-prototypes -g -fstack-protector --param=ssp-buffer-size=4 -Wformat -Werror=format-security -I. -IInclude -I../Include -D_FORTIFY_SOURCE=2 -fPIC -DPy_BUILD_CORE', 'HAVE_BROKEN_PIPE_BUF': 0, 'HAVE_CONFSTR': 1, 'HAVE_SIGTIMEDWAIT': 1, 'HAVE_FTELLO': 1, 'READELF': 'readelf', 'HAVE_SIGALTSTACK': 1, 'TESTTIMEOUT': 3600, 'PYTHONPATH': ':plat-i386-linux-gnu', 'SIZEOF_WCHAR_T': 4, 'LIBOBJS': '', 'HAVE_SYSCONF': 1, 'MAKESETUP': '../Modules/makesetup', 'HAVE_UTIMENSAT': 1, 'HAVE_FCHOWNAT': 1, 'HAVE_WORKING_TZSET': 1, 'HAVE_FINITE': 1, 'HAVE_ASINH': 1, 'HAVE_SETEUID': 1, 'CONFIGFILES': 'configure configure.ac acconfig.h pyconfig.h.in Makefile.pre.in', 'HAVE_SETGROUPS': 1, 'PARSER_OBJS': '\\ Parser/myreadline.o Parser/parsetok.o Parser/tokenizer.o', 'HAVE_MBRTOWC': 1, 'SIZEOF_INT': 4, 'HAVE_STDARG_PROTOTYPES': 1, 'TM_IN_SYS_TIME': 0, 'HAVE_SYS_TIMES_H': 1, 'HAVE_LCHOWN': 1, 'HAVE_SSIZE_T': 1, 'HAVE_PAUSE': 1, 'SYSLIBS': '-lm', 'POSIX_SEMAPHORES_NOT_ENABLED': 0, 'HAVE_DEVICE_MACROS': 1, 'BLDSHARED': 'i686-linux-gnu-gcc -pthread -shared -Wl,-O1 -Wl,-Bsymbolic-functions -Wl,-Bsymbolic-functions -Wl,-z,relro -Wno-unused-result -DNDEBUG -g -fwrapv -O2 -Wall -Wstrict-prototypes -g -fstack-protector --param=ssp-buffer-size=4 -Wformat -Werror=format-security ', 'LIBSUBDIRS': 'tkinter tkinter/test tkinter/test/test_tkinter \\', 'HAVE_SYS_UN_H': 1, 'HAVE_SYS_STAT_H': 1, 'VPATH': '..', 'INCLDIRSTOMAKE': '/usr/include /usr/include /usr/include/python3.3m /usr/include/python3.3m', 'HAVE_BROKEN_SEM_GETVALUE': 0, 'HAVE_TIMEGM': 1, 'PACKAGE_VERSION': 0, 'MAJOR_IN_SYSMACROS': 0, 'HAVE_ATANH': 1, 'HAVE_GAI_STRERROR': 1, 'HAVE_SYS_POLL_H': 1, 'SIZEOF_PTHREAD_T': 4, 'SIZEOF_FPOS_T': 16, 'HAVE_CTERMID': 1, 'HAVE_TMPFILE': 1, 'HAVE_SETUID': 1, 'CXX': 'i686-linux-gnu-g++ -pthread', 'srcdir': '..', 'HAVE_UINT32_T': 1, 'HAVE_ADDRINFO': 1, 'HAVE_GETSPENT': 1, 'SIZEOF_DOUBLE': 8, 'HAVE_INT32_T': 1, 'LIBRARY_OBJS_OMIT_FROZEN': '\\', 'HAVE_FUTIMES': 1, 'CONFINCLUDEPY': '/usr/include/python3.3m', 'HAVE_RL_COMPLETION_APPEND_CHARACTER': 1, 'LIBFFI_INCLUDEDIR': '', 'HAVE_SETGID': 1, 'HAVE_UINT64_T': 1, 'EXEMODE': 755, 'UNIVERSALSDK': '', 'HAVE_LIBDL': 1, 'HAVE_GETNAMEINFO': 1, 'HAVE_STDINT_H': 1, 'COREPYTHONPATH': ':plat-i386-linux-gnu', 'HAVE_SOCKADDR_STORAGE': 1, 'HAVE_WAITID': 1, 'EXTRAPLATDIR': '@EXTRAPLATDIR@', 'HAVE_ACCEPT4': 1, 'RUNSHARED': 'LD_LIBRARY_PATH=/build/buildd/python3.3-3.3.1/build-shared:', 'EXE': '', 'HAVE_SIGACTION': 1, 'HAVE_CHOWN': 1, 'HAVE_GETLOGIN': 1, 'HAVE_TZNAME': 0, 'PACKAGE_NAME': 0, 'HAVE_GETPGID': 1, 'HAVE_GLIBC_MEMMOVE_BUG': 0, 'BUILD_GNU_TYPE': 'i686-pc-linux-gnu', 'HAVE_LINUX_CAN_H': 1, 'DYNLOADFILE': 'dynload_shlib.o', 'HAVE_PWRITE': 1, 'BUILDEXE': '', 'HAVE_OPENPTY': 1, 'HAVE_LOCKF': 1, 'HAVE_COPYSIGN': 1, 'HAVE_PREAD': 1, 'HAVE_DLOPEN': 1, 'HAVE_SYS_KERN_CONTROL_H': 0, 'PY_FORMAT_LONG_LONG': '"ll"', 'HAVE_TCSETPGRP': 1, 'HAVE_SETSID': 1, 'HAVE_STRUCT_STAT_ST_BIRTHTIME': 0, 'HAVE_STRING_H': 1, 'LDLIBRARY': 'libpython3.3m.so', 'INSTALL_SCRIPT': '/usr/bin/install -c', 'HAVE_SYS_XATTR_H': 1, 'HAVE_CURSES_IS_TERM_RESIZED': 1, 'HAVE_TMPNAM_R': 1, 'STRICT_SYSV_CURSES': "/* Don't use ncurses extensions */", 'WANT_SIGFPE_HANDLER': 1, 'HAVE_INT64_T': 1, 'HAVE_STAT_TV_NSEC': 1, 'HAVE_SYS_MKDEV_H': 0, 'HAVE_BROKEN_POLL': 0, 'HAVE_IF_NAMEINDEX': 1, 'HAVE_GETPWENT': 1, 'PSRCS': '\\', 'RANLIB': 'ranlib', 'HAVE_WCSCOLL': 1, 'WITH_NEXT_FRAMEWORK': 0, 'ASDLGEN_FILES': '../Parser/asdl.py ../Parser/asdl_c.py', 'HAVE_RL_PRE_INPUT_HOOK': 1, 'PACKAGE_URL': 0, 'SHLIB_EXT': 0, 'HAVE_SYS_LOADAVG_H': 0, 'HAVE_LIBIEEE': 0, 'HAVE_SEM_OPEN': 1, 'HAVE_TERM_H': 1, 'IO_OBJS': '\\', 'IO_H': 'Modules/_io/_iomodule.h', 'HAVE_STATVFS': 1, 'VERSION': '3.3', 'HAVE_GETC_UNLOCKED': 1, 'MACHDEPS': 'plat-i386-linux-gnu @EXTRAPLATDIR@', 'SUBDIRSTOO': 'Include Lib Misc', 'HAVE_SETREUID': 1, 'HAVE_ERFC': 1, 'HAVE_SETRESUID': 1, 'LINKFORSHARED': '-Xlinker -export-dynamic -Wl,-O1 -Wl,-Bsymbolic-functions', 'HAVE_SYS_TYPES_H': 1, 'HAVE_GETPAGESIZE': 1, 'HAVE_SETEGID': 1, 'HAVE_PTY_H': 1, 'HAVE_STRUCT_STAT_ST_FLAGS': 0, 'HAVE_WCHAR_H': 1, 'HAVE_FSEEKO': 1, 'Py_ENABLE_SHARED': 1, 'HAVE_SIGRELSE': 1, 'HAVE_PTHREAD_INIT': 0, 'FILEMODE': 644, 'HAVE_SYS_RESOURCE_H': 1, 'HAVE_READLINKAT': 1, 'PYLONG_BITS_IN_DIGIT': 0, 'LINKCC': 'i686-linux-gnu-gcc -pthread', 'HAVE_SETLOCALE': 1, 'HAVE_CHROOT': 1, 'HAVE_OPENAT': 1, 'HAVE_FEXECVE': 1, 'LDCXXSHARED': 'i686-linux-gnu-g++ -pthread -shared -Wl,-O1 -Wl,-Bsymbolic-functions', 'DIST': 'README ChangeLog configure configure.ac acconfig.h pyconfig.h.in Makefile.pre.in Include Lib Misc Ext-dummy', 'HAVE_MKNOD': 1, 'PY_LDFLAGS': '-Wl,-Bsymbolic-functions -Wl,-z,relro', 'HAVE_BROKEN_MBSTOWCS': 0, 'LIBRARY_OBJS': '\\', 'HAVE_LOG1P': 1, 'SIZEOF_VOID_P': 4, 'HAVE_FCHOWN': 1, 'PYTHONFRAMEWORKPREFIX': '', 'HAVE_LIBDLD': 0, 'HAVE_TGAMMA': 1, 'HAVE_ERRNO_H': 1, 'HAVE_IO_H': 0, 'OTHER_LIBTOOL_OPT': '', 'HAVE_POLL_H': 1, 'PY_CPPFLAGS': '-I. -IInclude -I../Include -D_FORTIFY_SOURCE=2', 'XMLLIBSUBDIRS': 'xml xml/dom xml/etree xml/parsers xml/sax', 'GRAMMAR_H': 'Include/graminit.h', 'TANH_PRESERVES_ZERO_SIGN': 1, 'HAVE_GETLOADAVG': 1, 'UNICODE_DEPS': '\\ \\', 'HAVE_GETCWD': 1, 'MANDIR': '/usr/share/man', 'MACHDESTLIB': '/usr/lib/python3.3', 'GRAMMAR_C': 'Python/graminit.c', 'PGOBJS': '\\', 'HAVE_DEV_PTMX': 1, 'HAVE_UINTPTR_T': 1, 'HAVE_SCHED_SETAFFINITY': 1, 'PURIFY': '', 'HAVE_DECL_ISINF': 1, 'HAVE_RL_CALLBACK': 1, 'HAVE_WRITEV': 1, 'HAVE_GETHOSTBYNAME_R_5_ARG': 0, 'HAVE_SYS_AUDIOIO_H': 0, 'EXT_SUFFIX': '.cpython-33m.so', 'SIZEOF_LONG_LONG': 8, 'DLINCLDIR': '.', 'HAVE_PATHCONF': 1, 'HAVE_UNLINKAT': 1, 'MKDIR_P': '/bin/mkdir -p', 'HAVE_ALTZONE': 0, 'SCRIPTDIR': '/usr/lib', 'OPCODETARGETGEN_FILES': '\\', 'HAVE_GETSPNAM': 1, 'HAVE_SYS_TERMIO_H': 0, 'HAVE_ATTRIBUTE_FORMAT_PARSETUPLE': 0, 'HAVE_PTHREAD_H': 1, 'Py_DEBUG': 0, 'HAVE_STRUCT_STAT_ST_BLOCKS': 1, 'X87_DOUBLE_ROUNDING': 1, 'SIZEOF_TIME_T': 4, 'HAVE_DYNAMIC_LOADING': 1, 'HAVE_DIRECT_H': 0, 'SRC_GDB_HOOKS': '../Tools/gdb/libpython.py', 'HAVE_GETADDRINFO': 1, 'HAVE_BROKEN_NICE': 0, 'HAVE_DIRENT_H': 1, 'HAVE_WCSXFRM': 1, 'HAVE_RL_COMPLETION_DISPLAY_MATCHES_HOOK': 1, 'HAVE_FSTATVFS': 1, 'PYTHON': 'python', 'HAVE_OSX105_SDK': 0, 'BINDIR': '/usr/bin', 'TESTPYTHON': 'LD_LIBRARY_PATH=/build/buildd/python3.3-3.3.1/build-shared: ./python', 'ARFLAGS': 'rc', 'PLATDIR': 'plat-i386-linux-gnu', 'HAVE_ASM_TYPES_H': 1, 'PY3LIBRARY': 'libpython3.so', 'HAVE_PLOCK': 0, 'FLOCK_NEEDS_LIBBSD': 0, 'WITH_TSC': 0, 'HAVE_LIBREADLINE': 1, 'MACHDEP': 'linux', 'HAVE_SELECT': 1, 'LDFLAGS': '-Wl,-Bsymbolic-functions -Wl,-z,relro', 'HAVE_HSTRERROR': 1, 'SOABI': 'cpython-33m', 'HAVE_GETTIMEOFDAY': 1, 'HAVE_LIBRESOLV': 0, 'HAVE_UNSETENV': 1, 'HAVE_TM_ZONE': 1, 'HAVE_GETPGRP': 1, 'HAVE_FLOCK': 1, 'HAVE_SYS_BSDTTY_H': 0, 'SUBDIRS': '', 'PYTHONFRAMEWORKINSTALLDIR': '', 'PACKAGE_BUGREPORT': 0, 'HAVE_CLOCK': 1, 'HAVE_GETPEERNAME': 1, 'SIZEOF_PID_T': 4, 'HAVE_CONIO_H': 0, 'HAVE_FSTATAT': 1, 'HAVE_NETPACKET_PACKET_H': 1, 'HAVE_WAIT3': 1, 'DESTPATH': '', 'HAVE_STAT_TV_NSEC2': 0, 'HAVE_GETRESGID': 1, 'HAVE_UCS4_TCL': 0, 'SIGNED_RIGHT_SHIFT_ZERO_FILLS': 0, 'HAVE_TIMES': 1, 'HAVE_UNAME': 1, 'HAVE_ERF': 1, 'SIZEOF_SHORT': 2, 'HAVE_NCURSES_H': 1, 'HAVE_SYS_SENDFILE_H': 1, 'HAVE_CTERMID_R': 0, 'HAVE_TMPNAM': 1, 'prefix': '/usr', 'HAVE_NICE': 1, 'WITH_THREAD': 1, 'LN': 'ln', 'TESTRUNNER': 'LD_LIBRARY_PATH=/build/buildd/python3.3-3.3.1/build-shared: ./python ../Tools/scripts/run_tests.py', 'HAVE_SIGINTERRUPT': 1, 'HAVE_SETPGID': 1, 'RETSIGTYPE': 'void', 'HAVE_SCHED_GET_PRIORITY_MAX': 1, 'HAVE_SYS_SYS_DOMAIN_H': 0, 'HAVE_SYS_DIR_H': 0, 'HAVE__GETPTY': 0, 'HAVE_BLUETOOTH_BLUETOOTH_H': 1, 'HAVE_BIND_TEXTDOMAIN_CODESET': 1, 'HAVE_POLL': 1, 'PYTHON_OBJS': '\\', 'HAVE_WAITPID': 1, 'USE_INLINE': 1, 'HAVE_FUTIMENS': 1, 'USE_COMPUTED_GOTOS': 1, 'MAINCC': 'i686-linux-gnu-gcc -pthread', 'HAVE_SOCKETPAIR': 1, 'HAVE_PROCESS_H': 0, 'HAVE_SETVBUF': 1, 'HAVE_FDOPENDIR': 1, 'CONFINCLUDEDIR': '/usr/include', 'BINLIBDEST': '/usr/lib/python3.3', 'HAVE_SYS_IOCTL_H': 1, 'HAVE_SYSEXITS_H': 1, 'LDLAST': '', 'HAVE_SYS_FILE_H': 1, 'HAVE_RL_COMPLETION_SUPPRESS_APPEND': 1, 'HAVE_RL_COMPLETION_MATCHES': 1, 'HAVE_TCGETPGRP': 1, 'SIZEOF_SIZE_T': 4, 'HAVE_EPOLL_CREATE1': 1, 'HAVE_SYS_SELECT_H': 1, 'HAVE_CLOCK_GETTIME': 1, 'CFLAGS': '-Wno-unused-result -DNDEBUG -g -fwrapv -O2 -Wall -Wstrict-prototypes -g -fstack-protector --param=ssp-buffer-size=4 -Wformat -Werror=format-security ', 'HAVE_SNPRINTF': 1, 'BLDLIBRARY': '-lpython3.3m', 'PARSER_HEADERS': '\\', 'SO': '.so', 'LIBRARY': 'libpython3.3m.a', 'HAVE_FPATHCONF': 1, 'HAVE_TERMIOS_H': 1, 'HAVE_BROKEN_PTHREAD_SIGMASK': 0, 'AST_H': 'Include/Python-ast.h', 'HAVE_GCC_UINT128_T': 0, 'HAVE_ACOSH': 1, 'MODOBJS': 'Modules/_threadmodule.o Modules/signalmodule.o Modules/arraymodule.o Modules/mathmodule.o Modules/_math.o Modules/_struct.o Modules/timemodule.o Modules/_randommodule.o Modules/atexitmodule.o Modules/_elementtree.o Modules/_pickle.o Modules/_datetimemodule.o Modules/_bisectmodule.o Modules/_heapqmodule.o Modules/unicodedata.o Modules/fcntlmodule.o Modules/spwdmodule.o Modules/grpmodule.o Modules/selectmodule.o Modules/socketmodule.o Modules/_posixsubprocess.o Modules/md5module.o Modules/sha1module.o Modules/sha256module.o Modules/sha512module.o Modules/syslogmodule.o Modules/binascii.o Modules/zlibmodule.o Modules/pyexpat.o Modules/posixmodule.o Modules/errnomodule.o Modules/pwdmodule.o Modules/_sre.o Modules/_codecsmodule.o Modules/_weakref.o Modules/_functoolsmodule.o Modules/operator.o Modules/_collectionsmodule.o Modules/itertoolsmodule.o Modules/_localemodule.o Modules/_iomodule.o Modules/iobase.o Modules/fileio.o Modules/bytesio.o Modules/bufferedio.o Modules/textio.o Modules/stringio.o Modules/zipimport.o Modules/faulthandler.o Modules/symtablemodule.o Modules/xxsubtype.o', 'AST_C': 'Python/Python-ast.c', 'HAVE_SYS_NDIR_H': 0, 'DESTDIRS': '/usr /usr/lib /usr/lib/python3.3 /usr/lib/python3.3/lib-dynload', 'HAVE_SIGNAL_H': 1, 'PACKAGE_TARNAME': 0, 'HAVE_GETPRIORITY': 1, 'INCLUDEDIR': '/usr/include', 'HAVE_INTTYPES_H': 1, 'SIGNAL_OBJS': '', 'HAVE_READV': 1, 'HAVE_SETHOSTNAME': 1, 'MODLIBS': '-lrt -lexpat -L/usr/lib -lz -lexpat', 'CC': 'i686-linux-gnu-gcc -pthread', 'HAVE_LCHMOD': 0, 'SIZEOF_UINTPTR_T': 4, 'LIBPC': '/usr/lib/i386-linux-gnu/pkgconfig', 'BYTESTR_DEPS': '\\', 'HAVE_MKDIRAT': 1, 'LIBPL': '/usr/lib/python3.3/config-3.3m-i386-linux-gnu', 'HAVE_SHADOW_H': 1, 'HAVE_SYS_EVENT_H': 0, 'INSTALL': '/usr/bin/install -c', 'HAVE_GCC_ASM_FOR_X87': 1, 'HAVE_BROKEN_UNSETENV': 0, 'BASECPPFLAGS': '', 'DOUBLE_IS_BIG_ENDIAN_IEEE754': 0, 'HAVE_STRUCT_STAT_ST_RDEV': 1, 'HAVE_SEM_UNLINK': 1, 'BUILDPYTHON': 'python', 'HAVE_RL_CATCH_SIGNAL': 1, 'HAVE_DECL_TZNAME': 0, 'RESSRCDIR': 'Mac/Resources/framework', 'HAVE_PTHREAD_SIGMASK': 1, 'HAVE_UTIMES': 1, 'DISTDIRS': 'Include Lib Misc Ext-dummy', 'HAVE_FDATASYNC': 1, 'HAVE_USABLE_WCHAR_T': 0, 'PY_FORMAT_SIZE_T': '"z"', 'HAVE_SCHED_SETSCHEDULER': 1, 'VA_LIST_IS_ARRAY': 0, 'HAVE_LINUX_NETLINK_H': 1, 'HAVE_SETREGID': 1, 'HAVE_STROPTS_H': 1, 'LDVERSION': '3.3m', 'abs_builddir': '/build/buildd/python3.3-3.3.1/build-shared', 'SITEPATH': '', 'HAVE_GETHOSTBYNAME': 0, 'HAVE_SIGPENDING': 1, 'HAVE_KQUEUE': 0, 'HAVE_SYNC': 1, 'HAVE_GETSID': 1, 'HAVE_ROUND': 1, 'HAVE_STRFTIME': 1, 'AST_H_DIR': 'Include', 'HAVE_PIPE2': 1, 'AST_C_DIR': 'Python', 'TESTPYTHONOPTS': '', 'HAVE_DEV_PTC': 0, 'GETTIMEOFDAY_NO_TZ': 0, 'HAVE_NET_IF_H': 1, 'HAVE_SENDFILE': 1, 'HAVE_SETPGRP': 1, 'HAVE_SEM_GETVALUE': 1, 'CONFIGURE_LDFLAGS': '-Wl,-Bsymbolic-functions -Wl,-z,relro', 'DLLLIBRARY': '', 'PYTHON_FOR_BUILD': './python -E', 'SETPGRP_HAVE_ARG': 0, 'HAVE_INET_ATON': 1, 'INSTALL_SHARED': '/usr/bin/install -c -m 555', 'WITH_DOC_STRINGS': 1, 'OPCODETARGETS_H': '\\', 'HAVE_INITGROUPS': 1, 'HAVE_LINKAT': 1, 'BASEMODLIBS': '', 'SGI_ABI': '', 'HAVE_SCHED_SETPARAM': 1, 'OPT': '-DNDEBUG -g -fwrapv -O2 -Wall -Wstrict-prototypes', 'HAVE_POSIX_FADVISE': 1, 'datarootdir': '/usr/share', 'HAVE_MEMRCHR': 1, 'HGTAG': '', 'HAVE_MEMMOVE': 1, 'HAVE_GETRESUID': 1, 'DOUBLE_IS_ARM_MIXED_ENDIAN_IEEE754': 0, 'HAVE_LSTAT': 1, 'AR': 'ar', 'HAVE_WAIT4': 1, 'HAVE_SYS_MODEM_H': 0, 'INSTSONAME': 'libpython3.3m.so.1.0', 'HAVE_SYS_STATVFS_H': 1, 'HAVE_LGAMMA': 1, 'HAVE_PROTOTYPES': 1, 'HAVE_SYS_UIO_H': 1, 'MAJOR_IN_MKDEV': 0, 'QUICKTESTOPTS': '-x test_subprocess test_io test_lib2to3 \\', 'HAVE_SYS_DEVPOLL_H': 0, 'HAVE_CHFLAGS': 0, 'HAVE_FSYNC': 1, 'HAVE_FCHMOD': 1, 'INCLUDEPY': '/usr/include/python3.3m', 'HAVE_SEM_TIMEDWAIT': 1, 'LDLIBRARYDIR': '', 'HAVE_STRUCT_TM_TM_ZONE': 1, 'HAVE_CURSES_H': 1, 'TIME_WITH_SYS_TIME': 1, 'HAVE_DUP2': 1, 'ENABLE_IPV6': 1, 'WITH_VALGRIND': 0, 'HAVE_SETITIMER': 1, 'THREADOBJ': 'Python/thread.o', 'LOCALMODLIBS': '-lrt -lexpat -L/usr/lib -lz -lexpat', 'HAVE_MEMORY_H': 1, 'HAVE_GETITIMER': 1, 'HAVE_C99_BOOL': 1, 'INSTALL_DATA': '/usr/bin/install -c -m 644', 'PGEN': 'Parser/pgen', 'HAVE_GRP_H': 1, 'HAVE_WCSFTIME': 1, 'AIX_GENUINE_CPLUSPLUS': 0, 'HAVE_LIBINTL_H': 1, 'SHELL': '/bin/sh', 'HAVE_UNISTD_H': 1, 'EXTRATESTOPTS': '', 'HAVE_EXECV': 1, 'HAVE_FSEEK64': 0, 'MVWDELCH_IS_EXPRESSION': 1, 'DESTSHARED': '/usr/lib/python3.3/lib-dynload', 'OPCODETARGETGEN': '\\', 'LIBDEST': '/usr/lib/python3.3', 'CCSHARED': '-fPIC', 'HAVE_EXPM1': 1, 'HAVE_DLFCN_H': 1, 'exec_prefix': '/usr', 'HAVE_READLINK': 1, 'WINDOW_HAS_FLAGS': 1, 'HAVE_FTELL64': 0, 'HAVE_STRLCPY': 0, 'MACOSX_DEPLOYMENT_TARGET': '', 'HAVE_SYS_SYSCALL_H': 1, 'DESTLIB': '/usr/lib/python3.3', 'LDSHARED': 'i686-linux-gnu-gcc -pthread -shared -Wl,-O1 -Wl,-Bsymbolic-functions -Wl,-Bsymbolic-functions -Wl,-z,relro -Wno-unused-result -DNDEBUG -g -fwrapv -O2 -Wall -Wstrict-prototypes -g -fstack-protector --param=ssp-buffer-size=4 -Wformat -Werror=format-security ', 'HGVERSION': '', 'PYTHON_HEADERS': '\\', 'HAVE_STRINGS_H': 1, 'DOUBLE_IS_LITTLE_ENDIAN_IEEE754': 1, 'HAVE_POSIX_FALLOCATE': 1, 'HAVE_DIRFD': 1, 'HAVE_LOG2': 1, 'HAVE_GETPID': 1, 'HAVE_ALARM': 1, 'MACHDEP_OBJS': '', 'HAVE_SPAWN_H': 1, 'HAVE_FORK': 1, 'HAVE_SETRESGID': 1, 'HAVE_FCHMODAT': 1, 'HAVE_CLOCK_GETRES': 1, 'MACHDEPPATH': ':plat-i386-linux-gnu', 'STDC_HEADERS': 1, 'HAVE_SETPRIORITY': 1, 'LIBC': '', 'HAVE_SYS_EPOLL_H': 1, 'HAVE_SYS_UTSNAME_H': 1, 'HAVE_PUTENV': 1, 'HAVE_CURSES_RESIZE_TERM': 1, 'HAVE_FUTIMESAT': 1, 'WITH_DYLD': 0, 'INSTALL_PROGRAM': '/usr/bin/install -c', 'LIBS': '-lpthread -ldl -lutil', 'HAVE_TRUNCATE': 1, 'TESTOPTS': '', 'PROFILE_TASK': '../Tools/pybench/pybench.py -n 2 --with-gc --with-syscheck', 'HAVE_CURSES_RESIZETERM': 1, 'ABIFLAGS': 'm', 'HAVE_GETGROUPLIST': 1, 'OBJECT_OBJS': '\\', 'HAVE_MKNODAT': 1, 'HAVE_ST_BLOCKS': 1, 'HAVE_STRUCT_STAT_ST_GEN': 0, 'SYS_SELECT_WITH_SYS_TIME': 1, 'SHLIBS': '-lpthread -ldl -lutil', 'HAVE_GETGROUPS': 1, 'MODULE_OBJS': '\\', 'PYTHONFRAMEWORKDIR': 'no-framework', 'HAVE_FCNTL_H': 1, 'HAVE_LINK': 1, 'HAVE_SIGWAIT': 1, 'HAVE_GAMMA': 1, 'HAVE_SYS_LOCK_H': 0, 'HAVE_FORKPTY': 1, 'HAVE_SOCKADDR_SA_LEN': 0, 'HAVE_TEMPNAM': 1, 'HAVE_STRUCT_STAT_ST_BLKSIZE': 1, 'HAVE_MKFIFOAT': 1, 'HAVE_SIGWAITINFO': 1, 'HAVE_FTIME': 1, 'HAVE_EPOLL': 1, 'HAVE_SYS_SOCKET_H': 1, 'HAVE_LARGEFILE_SUPPORT': 1, 'CONFIGURE_CFLAGS': '-g -fstack-protector --param=ssp-buffer-size=4 -Wformat -Werror=format-security', 'HAVE_PTHREAD_DESTRUCTOR': 0, 'CONFIGURE_CPPFLAGS': '-D_FORTIFY_SOURCE=2', 'HAVE_SYMLINK': 1, 'HAVE_LONG_LONG': 1, 'HAVE_IEEEFP_H': 0, 'LIBDIR': '/usr/lib', 'HAVE_PTHREAD_KILL': 1, 'TESTPATH': '', 'HAVE_STRDUP': 1, 'POBJS': '\\', 'NO_AS_NEEDED': '-Wl,--no-as-needed', 'HAVE_LONG_DOUBLE': 1, 'HGBRANCH': '', 'DISTFILES': 'README ChangeLog configure configure.ac acconfig.h pyconfig.h.in Makefile.pre.in', 'PTHREAD_SYSTEM_SCHED_SUPPORTED': 1, 'HAVE_FACCESSAT': 1, 'AST_ASDL': '../Parser/Python.asdl', 'CPPFLAGS': '-I. -IInclude -I../Include -D_FORTIFY_SOURCE=2', 'HAVE_MKTIME': 1, 'HAVE_NDIR_H': 0, 'PY_CFLAGS': '-Wno-unused-result -DNDEBUG -g -fwrapv -O2 -Wall -Wstrict-prototypes -g -fstack-protector --param=ssp-buffer-size=4 -Wformat -Werror=format-security ', 'LIBOBJDIR': 'Python/', 'HAVE_LINUX_CAN_RAW_H': 1, 'HAVE_GETHOSTBYNAME_R_3_ARG': 0, 'PACKAGE_STRING': 0, 'GNULD': 'yes', 'LOG1P_DROPS_ZERO_SIGN': 0, 'HAVE_FTRUNCATE': 1, 'WITH_LIBINTL': 0, 'HAVE_MREMAP': 1, 'HAVE_DECL_ISNAN': 1, 'HAVE_KILLPG': 1, 'SIZEOF_LONG': 4, 'HAVE_DECL_ISFINITE': 1, 'HAVE_IPA_PURE_CONST_BUG': 0, 'WITH_PYMALLOC': 1, 'abs_srcdir': '/build/buildd/python3.3-3.3.1/build-shared/..', 'HAVE_FCHDIR': 1, 'HAVE_BROKEN_POSIX_SEMAPHORES': 0, 'AC_APPLE_UNIVERSAL_BUILD': 0, 'PGENSRCS': '\\ \\', 'DIRMODE': 755, 'HAVE_GETHOSTBYNAME_R': 1, 'HAVE_LCHFLAGS': 0, 'HAVE_SYS_PARAM_H': 1, 'SIZEOF_LONG_DOUBLE': 12, 'CONFIG_ARGS': "'--enable-shared' '--prefix=/usr' '--enable-ipv6' '--enable-loadable-sqlite-extensions' '--with-dbmliborder=bdb:gdbm' '--with-computed-gotos' '--with-system-expat' '--with-system-ffi' '--with-fpectl' 'CC=i686-linux-gnu-gcc' 'CFLAGS=-g -fstack-protector --param=ssp-buffer-size=4 -Wformat -Werror=format-security ' 'LDFLAGS=-Wl,-Bsymbolic-functions -Wl,-z,relro' 'CPPFLAGS=-D_FORTIFY_SOURCE=2'", 'HAVE_SCHED_H': 1, 'HAVE_KILL': 1}
|
sclabs/sitestatus-nonrel
|
refs/heads/master
|
django/contrib/sitemaps/models.py
|
914
|
# This file intentionally left blank
|
jgcaaprom/android_external_chromium_org
|
refs/heads/cm-12.1
|
chrome/browser/PRESUBMIT.py
|
36
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Presubmit script for Chromium browser code.
This script currently only checks HTML/CSS/JS files in resources/.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into gcl/git cl, and see
http://www.chromium.org/developers/web-development-style-guide for the rules
checked for here.
"""
def CheckChangeOnUpload(input_api, output_api):
return _CommonChecks(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return _CommonChecks(input_api, output_api)
def _CommonChecks(input_api, output_api):
"""Checks common to both upload and commit."""
results = []
path = input_api.os_path
cwd = input_api.PresubmitLocalPath()
resources = path.join(cwd, 'resources')
webui = path.join(cwd, 'ui', 'webui')
affected_files = (f.AbsoluteLocalPath() for f in input_api.AffectedFiles())
would_affect_tests = (
path.join(cwd, 'PRESUBMIT.py'),
path.join(cwd, 'test_presubmit.py'),
path.join(cwd, 'web_dev_style', 'css_checker.py'),
path.join(cwd, 'web_dev_style', 'html_checker.py'),
path.join(cwd, 'web_dev_style', 'js_checker.py'),
)
if any(f for f in affected_files if f in would_affect_tests):
tests = [path.join(cwd, 'test_presubmit.py')]
results.extend(
input_api.canned_checks.RunUnitTests(input_api, output_api, tests))
import sys
old_path = sys.path
try:
sys.path = [cwd] + old_path
from web_dev_style import (resource_checker, css_checker, html_checker,
js_checker)
search_dirs = (resources, webui)
def _html_css_js_resource(p):
return p.endswith(('.html', '.css', '.js')) and p.startswith(search_dirs)
BLACKLIST = ['chrome/browser/resources/pdf/index.html',
'chrome/browser/resources/pdf/index.js']
def is_resource(maybe_resource):
return (maybe_resource.LocalPath() not in BLACKLIST and
_html_css_js_resource(maybe_resource.AbsoluteLocalPath()))
results.extend(resource_checker.ResourceChecker(
input_api, output_api, file_filter=is_resource).RunChecks())
results.extend(css_checker.CSSChecker(
input_api, output_api, file_filter=is_resource).RunChecks())
results.extend(html_checker.HtmlChecker(
input_api, output_api, file_filter=is_resource).RunChecks())
results.extend(js_checker.JSChecker(
input_api, output_api, file_filter=is_resource).RunChecks())
finally:
sys.path = old_path
return results
|
yawnosnorous/python-for-android
|
refs/heads/master
|
python3-alpha/python3-src/Lib/tkinter/_fix.py
|
51
|
import sys, os
# Delay import _tkinter until we have set TCL_LIBRARY,
# so that Tcl_FindExecutable has a chance to locate its
# encoding directory.
# Unfortunately, we cannot know the TCL_LIBRARY directory
# if we don't know the tcl version, which we cannot find out
# without import Tcl. Fortunately, Tcl will itself look in
# <TCL_LIBRARY>\..\tcl<TCL_VERSION>, so anything close to
# the real Tcl library will do.
# Expand symbolic links on Vista
try:
import ctypes
ctypes.windll.kernel32.GetFinalPathNameByHandleW
except (ImportError, AttributeError):
def convert_path(s):
return s
else:
def convert_path(s):
if isinstance(s, bytes):
s = s.decode("mbcs")
hdir = ctypes.windll.kernel32.\
CreateFileW(s, 0x80, # FILE_READ_ATTRIBUTES
1, # FILE_SHARE_READ
None, 3, # OPEN_EXISTING
0x02000000, # FILE_FLAG_BACKUP_SEMANTICS
None)
if hdir == -1:
# Cannot open directory, give up
return s
buf = ctypes.create_unicode_buffer("", 32768)
res = ctypes.windll.kernel32.\
GetFinalPathNameByHandleW(hdir, buf, len(buf),
0) # VOLUME_NAME_DOS
ctypes.windll.kernel32.CloseHandle(hdir)
if res == 0:
# Conversion failed (e.g. network location)
return s
s = buf[:res]
# Ignore leading \\?\
if s.startswith("\\\\?\\"):
s = s[4:]
if s.startswith("UNC"):
s = "\\" + s[3:]
return s
prefix = os.path.join(sys.prefix,"tcl")
if not os.path.exists(prefix):
# devdir/../tcltk/lib
prefix = os.path.join(sys.prefix, os.path.pardir, "tcltk", "lib")
prefix = os.path.abspath(prefix)
# if this does not exist, no further search is needed
if os.path.exists(prefix):
prefix = convert_path(prefix)
if "TCL_LIBRARY" not in os.environ:
for name in os.listdir(prefix):
if name.startswith("tcl"):
tcldir = os.path.join(prefix,name)
if os.path.isdir(tcldir):
os.environ["TCL_LIBRARY"] = tcldir
# Compute TK_LIBRARY, knowing that it has the same version
# as Tcl
import _tkinter
ver = str(_tkinter.TCL_VERSION)
if "TK_LIBRARY" not in os.environ:
v = os.path.join(prefix, 'tk'+ver)
if os.path.exists(os.path.join(v, "tclIndex")):
os.environ['TK_LIBRARY'] = v
# We don't know the Tix version, so we must search the entire
# directory
if "TIX_LIBRARY" not in os.environ:
for name in os.listdir(prefix):
if name.startswith("tix"):
tixdir = os.path.join(prefix,name)
if os.path.isdir(tixdir):
os.environ["TIX_LIBRARY"] = tixdir
|
cristiana214/cristianachavez214-cristianachavez
|
refs/heads/master
|
python/src/Lib/test/test_strptime.py
|
55
|
"""PyUnit testing against strptime"""
import unittest
import time
import locale
import re
import sys
from test import test_support
from datetime import date as datetime_date
import _strptime
class getlang_Tests(unittest.TestCase):
"""Test _getlang"""
def test_basic(self):
self.failUnlessEqual(_strptime._getlang(), locale.getlocale(locale.LC_TIME))
class LocaleTime_Tests(unittest.TestCase):
"""Tests for _strptime.LocaleTime.
All values are lower-cased when stored in LocaleTime, so make sure to
compare values after running ``lower`` on them.
"""
def setUp(self):
"""Create time tuple based on current time."""
self.time_tuple = time.localtime()
self.LT_ins = _strptime.LocaleTime()
def compare_against_time(self, testing, directive, tuple_position,
error_msg):
"""Helper method that tests testing against directive based on the
tuple_position of time_tuple. Uses error_msg as error message.
"""
strftime_output = time.strftime(directive, self.time_tuple).lower()
comparison = testing[self.time_tuple[tuple_position]]
self.failUnless(strftime_output in testing, "%s: not found in tuple" %
error_msg)
self.failUnless(comparison == strftime_output,
"%s: position within tuple incorrect; %s != %s" %
(error_msg, comparison, strftime_output))
def test_weekday(self):
# Make sure that full and abbreviated weekday names are correct in
# both string and position with tuple
self.compare_against_time(self.LT_ins.f_weekday, '%A', 6,
"Testing of full weekday name failed")
self.compare_against_time(self.LT_ins.a_weekday, '%a', 6,
"Testing of abbreviated weekday name failed")
def test_month(self):
# Test full and abbreviated month names; both string and position
# within the tuple
self.compare_against_time(self.LT_ins.f_month, '%B', 1,
"Testing against full month name failed")
self.compare_against_time(self.LT_ins.a_month, '%b', 1,
"Testing against abbreviated month name failed")
def test_am_pm(self):
# Make sure AM/PM representation done properly
strftime_output = time.strftime("%p", self.time_tuple).lower()
self.failUnless(strftime_output in self.LT_ins.am_pm,
"AM/PM representation not in tuple")
if self.time_tuple[3] < 12: position = 0
else: position = 1
self.failUnless(strftime_output == self.LT_ins.am_pm[position],
"AM/PM representation in the wrong position within the tuple")
def test_timezone(self):
# Make sure timezone is correct
timezone = time.strftime("%Z", self.time_tuple).lower()
if timezone:
self.failUnless(timezone in self.LT_ins.timezone[0] or \
timezone in self.LT_ins.timezone[1],
"timezone %s not found in %s" %
(timezone, self.LT_ins.timezone))
def test_date_time(self):
# Check that LC_date_time, LC_date, and LC_time are correct
# the magic date is used so as to not have issues with %c when day of
# the month is a single digit and has a leading space. This is not an
# issue since strptime still parses it correctly. The problem is
# testing these directives for correctness by comparing strftime
# output.
magic_date = (1999, 3, 17, 22, 44, 55, 2, 76, 0)
strftime_output = time.strftime("%c", magic_date)
self.failUnless(strftime_output == time.strftime(self.LT_ins.LC_date_time,
magic_date),
"LC_date_time incorrect")
strftime_output = time.strftime("%x", magic_date)
self.failUnless(strftime_output == time.strftime(self.LT_ins.LC_date,
magic_date),
"LC_date incorrect")
strftime_output = time.strftime("%X", magic_date)
self.failUnless(strftime_output == time.strftime(self.LT_ins.LC_time,
magic_date),
"LC_time incorrect")
LT = _strptime.LocaleTime()
LT.am_pm = ('', '')
self.failUnless(LT.LC_time, "LocaleTime's LC directives cannot handle "
"empty strings")
def test_lang(self):
# Make sure lang is set to what _getlang() returns
# Assuming locale has not changed between now and when self.LT_ins was created
self.failUnlessEqual(self.LT_ins.lang, _strptime._getlang())
class TimeRETests(unittest.TestCase):
"""Tests for TimeRE."""
def setUp(self):
"""Construct generic TimeRE object."""
self.time_re = _strptime.TimeRE()
self.locale_time = _strptime.LocaleTime()
def test_pattern(self):
# Test TimeRE.pattern
pattern_string = self.time_re.pattern(r"%a %A %d")
self.failUnless(pattern_string.find(self.locale_time.a_weekday[2]) != -1,
"did not find abbreviated weekday in pattern string '%s'" %
pattern_string)
self.failUnless(pattern_string.find(self.locale_time.f_weekday[4]) != -1,
"did not find full weekday in pattern string '%s'" %
pattern_string)
self.failUnless(pattern_string.find(self.time_re['d']) != -1,
"did not find 'd' directive pattern string '%s'" %
pattern_string)
def test_pattern_escaping(self):
# Make sure any characters in the format string that might be taken as
# regex syntax is escaped.
pattern_string = self.time_re.pattern("\d+")
self.failUnless(r"\\d\+" in pattern_string,
"%s does not have re characters escaped properly" %
pattern_string)
def test_compile(self):
# Check that compiled regex is correct
found = self.time_re.compile(r"%A").match(self.locale_time.f_weekday[6])
self.failUnless(found and found.group('A') == self.locale_time.f_weekday[6],
"re object for '%A' failed")
compiled = self.time_re.compile(r"%a %b")
found = compiled.match("%s %s" % (self.locale_time.a_weekday[4],
self.locale_time.a_month[4]))
self.failUnless(found,
"Match failed with '%s' regex and '%s' string" %
(compiled.pattern, "%s %s" % (self.locale_time.a_weekday[4],
self.locale_time.a_month[4])))
self.failUnless(found.group('a') == self.locale_time.a_weekday[4] and
found.group('b') == self.locale_time.a_month[4],
"re object couldn't find the abbreviated weekday month in "
"'%s' using '%s'; group 'a' = '%s', group 'b' = %s'" %
(found.string, found.re.pattern, found.group('a'),
found.group('b')))
for directive in ('a','A','b','B','c','d','H','I','j','m','M','p','S',
'U','w','W','x','X','y','Y','Z','%'):
compiled = self.time_re.compile("%" + directive)
found = compiled.match(time.strftime("%" + directive))
self.failUnless(found, "Matching failed on '%s' using '%s' regex" %
(time.strftime("%" + directive),
compiled.pattern))
def test_blankpattern(self):
# Make sure when tuple or something has no values no regex is generated.
# Fixes bug #661354
test_locale = _strptime.LocaleTime()
test_locale.timezone = (frozenset(), frozenset())
self.failUnless(_strptime.TimeRE(test_locale).pattern("%Z") == '',
"with timezone == ('',''), TimeRE().pattern('%Z') != ''")
def test_matching_with_escapes(self):
# Make sure a format that requires escaping of characters works
compiled_re = self.time_re.compile("\w+ %m")
found = compiled_re.match("\w+ 10")
self.failUnless(found, "Escaping failed of format '\w+ 10'")
def test_locale_data_w_regex_metacharacters(self):
# Check that if locale data contains regex metacharacters they are
# escaped properly.
# Discovered by bug #1039270 .
locale_time = _strptime.LocaleTime()
locale_time.timezone = (frozenset(("utc", "gmt",
"Tokyo (standard time)")),
frozenset("Tokyo (daylight time)"))
time_re = _strptime.TimeRE(locale_time)
self.failUnless(time_re.compile("%Z").match("Tokyo (standard time)"),
"locale data that contains regex metacharacters is not"
" properly escaped")
def test_whitespace_substitution(self):
# When pattern contains whitespace, make sure it is taken into account
# so as to not allow to subpatterns to end up next to each other and
# "steal" characters from each other.
pattern = self.time_re.pattern('%j %H')
self.failUnless(not re.match(pattern, "180"))
self.failUnless(re.match(pattern, "18 0"))
class StrptimeTests(unittest.TestCase):
"""Tests for _strptime.strptime."""
def setUp(self):
"""Create testing time tuple."""
self.time_tuple = time.gmtime()
def test_ValueError(self):
# Make sure ValueError is raised when match fails or format is bad
self.assertRaises(ValueError, _strptime._strptime_time, data_string="%d",
format="%A")
for bad_format in ("%", "% ", "%e"):
try:
_strptime._strptime_time("2005", bad_format)
except ValueError:
continue
except Exception, err:
self.fail("'%s' raised %s, not ValueError" %
(bad_format, err.__class__.__name__))
else:
self.fail("'%s' did not raise ValueError" % bad_format)
def test_unconverteddata(self):
# Check ValueError is raised when there is unconverted data
self.assertRaises(ValueError, _strptime._strptime_time, "10 12", "%m")
def helper(self, directive, position):
"""Helper fxn in testing."""
strf_output = time.strftime("%" + directive, self.time_tuple)
strp_output = _strptime._strptime_time(strf_output, "%" + directive)
self.failUnless(strp_output[position] == self.time_tuple[position],
"testing of '%s' directive failed; '%s' -> %s != %s" %
(directive, strf_output, strp_output[position],
self.time_tuple[position]))
def test_year(self):
# Test that the year is handled properly
for directive in ('y', 'Y'):
self.helper(directive, 0)
# Must also make sure %y values are correct for bounds set by Open Group
for century, bounds in ((1900, ('69', '99')), (2000, ('00', '68'))):
for bound in bounds:
strp_output = _strptime._strptime_time(bound, '%y')
expected_result = century + int(bound)
self.failUnless(strp_output[0] == expected_result,
"'y' test failed; passed in '%s' "
"and returned '%s'" % (bound, strp_output[0]))
def test_month(self):
# Test for month directives
for directive in ('B', 'b', 'm'):
self.helper(directive, 1)
def test_day(self):
# Test for day directives
self.helper('d', 2)
def test_hour(self):
# Test hour directives
self.helper('H', 3)
strf_output = time.strftime("%I %p", self.time_tuple)
strp_output = _strptime._strptime_time(strf_output, "%I %p")
self.failUnless(strp_output[3] == self.time_tuple[3],
"testing of '%%I %%p' directive failed; '%s' -> %s != %s" %
(strf_output, strp_output[3], self.time_tuple[3]))
def test_minute(self):
# Test minute directives
self.helper('M', 4)
def test_second(self):
# Test second directives
self.helper('S', 5)
def test_fraction(self):
import datetime
now = datetime.datetime.now()
tup, frac = _strptime._strptime(str(now), format="%Y-%m-%d %H:%M:%S.%f")
self.assertEqual(frac, now.microsecond)
def test_weekday(self):
# Test weekday directives
for directive in ('A', 'a', 'w'):
self.helper(directive,6)
def test_julian(self):
# Test julian directives
self.helper('j', 7)
def test_timezone(self):
# Test timezone directives.
# When gmtime() is used with %Z, entire result of strftime() is empty.
# Check for equal timezone names deals with bad locale info when this
# occurs; first found in FreeBSD 4.4.
strp_output = _strptime._strptime_time("UTC", "%Z")
self.failUnlessEqual(strp_output.tm_isdst, 0)
strp_output = _strptime._strptime_time("GMT", "%Z")
self.failUnlessEqual(strp_output.tm_isdst, 0)
if sys.platform == "mac":
# Timezones don't really work on MacOS9
return
time_tuple = time.localtime()
strf_output = time.strftime("%Z") #UTC does not have a timezone
strp_output = _strptime._strptime_time(strf_output, "%Z")
locale_time = _strptime.LocaleTime()
if time.tzname[0] != time.tzname[1] or not time.daylight:
self.failUnless(strp_output[8] == time_tuple[8],
"timezone check failed; '%s' -> %s != %s" %
(strf_output, strp_output[8], time_tuple[8]))
else:
self.failUnless(strp_output[8] == -1,
"LocaleTime().timezone has duplicate values and "
"time.daylight but timezone value not set to -1")
def test_bad_timezone(self):
# Explicitly test possibility of bad timezone;
# when time.tzname[0] == time.tzname[1] and time.daylight
if sys.platform == "mac":
return #MacOS9 has severely broken timezone support.
tz_name = time.tzname[0]
if tz_name.upper() in ("UTC", "GMT"):
return
try:
original_tzname = time.tzname
original_daylight = time.daylight
time.tzname = (tz_name, tz_name)
time.daylight = 1
tz_value = _strptime._strptime_time(tz_name, "%Z")[8]
self.failUnlessEqual(tz_value, -1,
"%s lead to a timezone value of %s instead of -1 when "
"time.daylight set to %s and passing in %s" %
(time.tzname, tz_value, time.daylight, tz_name))
finally:
time.tzname = original_tzname
time.daylight = original_daylight
def test_date_time(self):
# Test %c directive
for position in range(6):
self.helper('c', position)
def test_date(self):
# Test %x directive
for position in range(0,3):
self.helper('x', position)
def test_time(self):
# Test %X directive
for position in range(3,6):
self.helper('X', position)
def test_percent(self):
# Make sure % signs are handled properly
strf_output = time.strftime("%m %% %Y", self.time_tuple)
strp_output = _strptime._strptime_time(strf_output, "%m %% %Y")
self.failUnless(strp_output[0] == self.time_tuple[0] and
strp_output[1] == self.time_tuple[1],
"handling of percent sign failed")
def test_caseinsensitive(self):
# Should handle names case-insensitively.
strf_output = time.strftime("%B", self.time_tuple)
self.failUnless(_strptime._strptime_time(strf_output.upper(), "%B"),
"strptime does not handle ALL-CAPS names properly")
self.failUnless(_strptime._strptime_time(strf_output.lower(), "%B"),
"strptime does not handle lowercase names properly")
self.failUnless(_strptime._strptime_time(strf_output.capitalize(), "%B"),
"strptime does not handle capword names properly")
def test_defaults(self):
# Default return value should be (1900, 1, 1, 0, 0, 0, 0, 1, 0)
defaults = (1900, 1, 1, 0, 0, 0, 0, 1, -1)
strp_output = _strptime._strptime_time('1', '%m')
self.failUnless(strp_output == defaults,
"Default values for strptime() are incorrect;"
" %s != %s" % (strp_output, defaults))
def test_escaping(self):
# Make sure all characters that have regex significance are escaped.
# Parentheses are in a purposeful order; will cause an error of
# unbalanced parentheses when the regex is compiled if they are not
# escaped.
# Test instigated by bug #796149 .
need_escaping = ".^$*+?{}\[]|)("
self.failUnless(_strptime._strptime_time(need_escaping, need_escaping))
class Strptime12AMPMTests(unittest.TestCase):
"""Test a _strptime regression in '%I %p' at 12 noon (12 PM)"""
def test_twelve_noon_midnight(self):
eq = self.assertEqual
eq(time.strptime('12 PM', '%I %p')[3], 12)
eq(time.strptime('12 AM', '%I %p')[3], 0)
eq(_strptime._strptime_time('12 PM', '%I %p')[3], 12)
eq(_strptime._strptime_time('12 AM', '%I %p')[3], 0)
class JulianTests(unittest.TestCase):
"""Test a _strptime regression that all julian (1-366) are accepted"""
def test_all_julian_days(self):
eq = self.assertEqual
for i in range(1, 367):
# use 2004, since it is a leap year, we have 366 days
eq(_strptime._strptime_time('%d 2004' % i, '%j %Y')[7], i)
class CalculationTests(unittest.TestCase):
"""Test that strptime() fills in missing info correctly"""
def setUp(self):
self.time_tuple = time.gmtime()
def test_julian_calculation(self):
# Make sure that when Julian is missing that it is calculated
format_string = "%Y %m %d %H %M %S %w %Z"
result = _strptime._strptime_time(time.strftime(format_string, self.time_tuple),
format_string)
self.failUnless(result.tm_yday == self.time_tuple.tm_yday,
"Calculation of tm_yday failed; %s != %s" %
(result.tm_yday, self.time_tuple.tm_yday))
def test_gregorian_calculation(self):
# Test that Gregorian date can be calculated from Julian day
format_string = "%Y %H %M %S %w %j %Z"
result = _strptime._strptime_time(time.strftime(format_string, self.time_tuple),
format_string)
self.failUnless(result.tm_year == self.time_tuple.tm_year and
result.tm_mon == self.time_tuple.tm_mon and
result.tm_mday == self.time_tuple.tm_mday,
"Calculation of Gregorian date failed;"
"%s-%s-%s != %s-%s-%s" %
(result.tm_year, result.tm_mon, result.tm_mday,
self.time_tuple.tm_year, self.time_tuple.tm_mon,
self.time_tuple.tm_mday))
def test_day_of_week_calculation(self):
# Test that the day of the week is calculated as needed
format_string = "%Y %m %d %H %S %j %Z"
result = _strptime._strptime_time(time.strftime(format_string, self.time_tuple),
format_string)
self.failUnless(result.tm_wday == self.time_tuple.tm_wday,
"Calculation of day of the week failed;"
"%s != %s" % (result.tm_wday, self.time_tuple.tm_wday))
def test_week_of_year_and_day_of_week_calculation(self):
# Should be able to infer date if given year, week of year (%U or %W)
# and day of the week
def test_helper(ymd_tuple, test_reason):
for directive in ('W', 'U'):
format_string = "%%Y %%%s %%w" % directive
dt_date = datetime_date(*ymd_tuple)
strp_input = dt_date.strftime(format_string)
strp_output = _strptime._strptime_time(strp_input, format_string)
self.failUnless(strp_output[:3] == ymd_tuple,
"%s(%s) test failed w/ '%s': %s != %s (%s != %s)" %
(test_reason, directive, strp_input,
strp_output[:3], ymd_tuple,
strp_output[7], dt_date.timetuple()[7]))
test_helper((1901, 1, 3), "week 0")
test_helper((1901, 1, 8), "common case")
test_helper((1901, 1, 13), "day on Sunday")
test_helper((1901, 1, 14), "day on Monday")
test_helper((1905, 1, 1), "Jan 1 on Sunday")
test_helper((1906, 1, 1), "Jan 1 on Monday")
test_helper((1906, 1, 7), "first Sunday in a year starting on Monday")
test_helper((1905, 12, 31), "Dec 31 on Sunday")
test_helper((1906, 12, 31), "Dec 31 on Monday")
test_helper((2008, 12, 29), "Monday in the last week of the year")
test_helper((2008, 12, 22), "Monday in the second-to-last week of the "
"year")
test_helper((1978, 10, 23), "randomly chosen date")
test_helper((2004, 12, 18), "randomly chosen date")
test_helper((1978, 10, 23), "year starting and ending on Monday while "
"date not on Sunday or Monday")
test_helper((1917, 12, 17), "year starting and ending on Monday with "
"a Monday not at the beginning or end "
"of the year")
test_helper((1917, 12, 31), "Dec 31 on Monday with year starting and "
"ending on Monday")
test_helper((2007, 01, 07), "First Sunday of 2007")
test_helper((2007, 01, 14), "Second Sunday of 2007")
test_helper((2006, 12, 31), "Last Sunday of 2006")
test_helper((2006, 12, 24), "Second to last Sunday of 2006")
class CacheTests(unittest.TestCase):
"""Test that caching works properly."""
def test_time_re_recreation(self):
# Make sure cache is recreated when current locale does not match what
# cached object was created with.
_strptime._strptime_time("10", "%d")
_strptime._strptime_time("2005", "%Y")
_strptime._TimeRE_cache.locale_time.lang = "Ni"
original_time_re = id(_strptime._TimeRE_cache)
_strptime._strptime_time("10", "%d")
self.failIfEqual(original_time_re, id(_strptime._TimeRE_cache))
self.failUnlessEqual(len(_strptime._regex_cache), 1)
def test_regex_cleanup(self):
# Make sure cached regexes are discarded when cache becomes "full".
try:
del _strptime._regex_cache['%d']
except KeyError:
pass
bogus_key = 0
while len(_strptime._regex_cache) <= _strptime._CACHE_MAX_SIZE:
_strptime._regex_cache[bogus_key] = None
bogus_key += 1
_strptime._strptime_time("10", "%d")
self.failUnlessEqual(len(_strptime._regex_cache), 1)
def test_new_localetime(self):
# A new LocaleTime instance should be created when a new TimeRE object
# is created.
locale_time_id = id(_strptime._TimeRE_cache.locale_time)
_strptime._TimeRE_cache.locale_time.lang = "Ni"
_strptime._strptime_time("10", "%d")
self.failIfEqual(locale_time_id,
id(_strptime._TimeRE_cache.locale_time))
def test_TimeRE_recreation(self):
# The TimeRE instance should be recreated upon changing the locale.
locale_info = locale.getlocale(locale.LC_TIME)
try:
locale.setlocale(locale.LC_TIME, ('en_US', 'UTF8'))
except locale.Error:
return
try:
_strptime._strptime_time('10', '%d')
# Get id of current cache object.
first_time_re_id = id(_strptime._TimeRE_cache)
try:
# Change the locale and force a recreation of the cache.
locale.setlocale(locale.LC_TIME, ('de_DE', 'UTF8'))
_strptime._strptime_time('10', '%d')
# Get the new cache object's id.
second_time_re_id = id(_strptime._TimeRE_cache)
# They should not be equal.
self.failIfEqual(first_time_re_id, second_time_re_id)
# Possible test locale is not supported while initial locale is.
# If this is the case just suppress the exception and fall-through
# to the reseting to the original locale.
except locale.Error:
pass
# Make sure we don't trample on the locale setting once we leave the
# test.
finally:
locale.setlocale(locale.LC_TIME, locale_info)
def test_main():
test_support.run_unittest(
getlang_Tests,
LocaleTime_Tests,
TimeRETests,
StrptimeTests,
Strptime12AMPMTests,
JulianTests,
CalculationTests,
CacheTests
)
if __name__ == '__main__':
test_main()
|
essamjoubori/girder
|
refs/heads/master
|
girder/models/setting.py
|
2
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
from collections import OrderedDict
import cherrypy
import six
from ..constants import SettingDefault
from .model_base import Model, ValidationException
from girder.utility import camelcase, plugin_utilities
from bson.objectid import ObjectId
class Setting(Model):
"""
This model represents server-wide configuration settings as key/value pairs.
"""
def initialize(self):
self.name = 'setting'
self.ensureIndices(['key'])
def validate(self, doc):
"""
This method is in charge of validating that the setting key is a valid
key, and that for that key, the provided value is valid. It first
allows plugins to validate the setting, but if none of them can, it
assumes it is a core setting and does the validation here.
"""
key = doc['key']
funcName = 'validate'+camelcase(key)
if callable(getattr(self, funcName, None)):
getattr(self, funcName)(doc)
else:
raise ValidationException(
'Invalid setting key "%s".' % key, 'key')
return doc
def validateCorePluginsEnabled(self, doc):
"""
Ensures that the set of plugins passed in is a list of valid plugin
names. Removes any invalid plugin names, removes duplicates, and adds
all transitive dependencies to the enabled list.
"""
if not isinstance(doc['value'], list):
raise ValidationException(
'Plugins enabled setting must be a list.', 'value')
# Add all transitive dependencies and store in toposorted order
doc['value'] = list(plugin_utilities.getToposortedPlugins(doc['value']))
def validateCoreAddToGroupPolicy(self, doc):
doc['value'] = doc['value'].lower()
if doc['value'] not in ('never', 'noadmin', 'nomod', 'yesadmin',
'yesmod', ''):
raise ValidationException(
'Add to group policy must be one of "never", "noadmin", '
'"nomod", "yesadmin", or "yesmod".', 'value')
def validateCoreCollectionCreatePolicy(self, doc):
value = doc['value']
if not isinstance(value, dict):
raise ValidationException('Collection creation policy must be a '
'JSON object.')
for i, groupId in enumerate(value.get('groups', ())):
self.model('group').load(groupId, force=True, exc=True)
value['groups'][i] = ObjectId(value['groups'][i])
for i, userId in enumerate(value.get('users', ())):
self.model('user').load(userId, force=True, exc=True)
value['users'][i] = ObjectId(value['users'][i])
value['open'] = value.get('open', False)
def validateCoreCookieLifetime(self, doc):
try:
doc['value'] = int(doc['value'])
if doc['value'] > 0:
return
except ValueError:
pass # We want to raise the ValidationException
raise ValidationException(
'Cookie lifetime must be an integer > 0.', 'value')
def validateCoreCorsAllowMethods(self, doc):
if isinstance(doc['value'], six.string_types):
methods = doc['value'].replace(",", " ").strip().upper().split()
# remove duplicates
methods = list(OrderedDict.fromkeys(methods))
doc['value'] = ", ".join(methods)
return
raise ValidationException(
'Allowed methods must be a comma-separated list or an empty '
'string.', 'value')
def validateCoreCorsAllowHeaders(self, doc):
if isinstance(doc['value'], six.string_types):
headers = doc['value'].replace(",", " ").strip().split()
# remove duplicates
headers = list(OrderedDict.fromkeys(headers))
doc['value'] = ", ".join(headers)
return
raise ValidationException(
'Allowed headers must be a comma-separated list or an empty '
'string.', 'value')
def validateCoreCorsAllowOrigin(self, doc):
if isinstance(doc['value'], six.string_types):
origins = doc['value'].replace(",", " ").strip().split()
origins = [origin.rstrip('/') for origin in origins]
# remove duplicates
origins = list(OrderedDict.fromkeys(origins))
doc['value'] = ", ".join(origins)
return
raise ValidationException(
'Allowed origin must be a comma-separated list of base urls or * '
'or an empty string.', 'value')
def validateCoreEmailFromAddress(self, doc):
if not doc['value']:
raise ValidationException(
'Email from address must not be blank.', 'value')
def validateCoreEmailHost(self, doc):
if isinstance(doc['value'], six.string_types):
doc['value'] = doc['value'].strip()
return
raise ValidationException(
'Email host must be a string.', 'value')
def defaultCoreEmailHost(self):
if (cherrypy.request and cherrypy.request.local and
cherrypy.request.local.name):
host = '://'.join((cherrypy.request.scheme,
cherrypy.request.local.name))
if cherrypy.request.local.port != 80:
host += ':%d' % cherrypy.request.local.port
return host
def validateCoreRegistrationPolicy(self, doc):
doc['value'] = doc['value'].lower()
if doc['value'] not in ('open', 'closed'):
raise ValidationException(
'Registration policy must be either "open" or "closed".',
'value')
def validateCoreSmtpHost(self, doc):
if not doc['value']:
raise ValidationException(
'SMTP host must not be blank.', 'value')
def validateCoreSmtpPort(self, doc):
try:
doc['value'] = int(doc['value'])
if doc['value'] > 0:
return
except ValueError:
pass # We want to raise the ValidationException
raise ValidationException('SMTP port must be an integer > 0.', 'value')
def validateCoreSmtpEncryption(self, doc):
if not doc['value'] in ['none', 'starttls', 'ssl']:
raise ValidationException(
'SMTP encryption must be one of "none", "starttls", or "ssl".',
'value')
def validateCoreSmtpUsername(self, doc):
# any string is acceptable
pass
def validateCoreSmtpPassword(self, doc):
# any string is acceptable
pass
def validateCoreUploadMinimumChunkSize(self, doc):
try:
doc['value'] = int(doc['value'])
if doc['value'] >= 0:
return
except ValueError:
pass # We want to raise the ValidationException
raise ValidationException(
'Upload minimum chunk size must be an integer >= 0.',
'value')
def validateCoreUserDefaultFolders(self, doc):
if doc['value'] not in ('public_private', 'none'):
raise ValidationException(
'User default folders must be either "public_private" or '
'"none".', 'value')
def get(self, key, default='__default__'):
"""
Retrieve a setting by its key.
:param key: The key identifying the setting.
:type key: str
:param default: If no such setting exists, returns this value instead.
:returns: The value, or the default value if the key is not found.
"""
setting = self.findOne({'key': key})
if setting is None:
if default is '__default__':
default = self.getDefault(key)
return default
else:
return setting['value']
def set(self, key, value):
"""
Save a setting. If a setting for this key already exists, this will
replace the existing value.
:param key: The key identifying the setting.
:type key: str
:param value: The object to store for this setting.
:returns: The document representing the saved Setting.
"""
setting = self.findOne({'key': key})
if setting is None:
setting = {
'key': key,
'value': value
}
else:
setting['value'] = value
return self.save(setting)
def unset(self, key):
"""
Remove the setting for this key. If no such setting exists, this is
a no-op.
:param key: The key identifying the setting to be removed.
:type key: str
"""
for setting in self.find({'key': key}):
self.remove(setting)
def getDefault(self, key):
"""
Retrieve the system default for a value.
:param key: The key identifying the setting.
:type key: str
:returns: The default value if the key is present in both SettingKey
and referenced in SettingDefault; otherwise None.
"""
default = None
if key in SettingDefault.defaults:
default = SettingDefault.defaults[key]
else:
funcName = 'default'+camelcase(key)
if callable(getattr(self, funcName, None)):
default = getattr(self, funcName)()
return default
|
yl565/statsmodels
|
refs/heads/master
|
statsmodels/nonparametric/tests/test_kde.py
|
29
|
import os
import numpy.testing as npt
from nose import SkipTest
from nose.tools import raises
import numpy as np
from statsmodels.distributions.mixture_rvs import mixture_rvs
from statsmodels.nonparametric.kde import KDEUnivariate as KDE
import statsmodels.sandbox.nonparametric.kernels as kernels
from scipy import stats
# get results from Stata
curdir = os.path.dirname(os.path.abspath(__file__))
rfname = os.path.join(curdir,'results','results_kde.csv')
#print rfname
KDEResults = np.genfromtxt(open(rfname, 'rb'), delimiter=",", names=True)
rfname = os.path.join(curdir,'results','results_kde_univ_weights.csv')
KDEWResults = np.genfromtxt(open(rfname, 'rb'), delimiter=",", names=True)
# get results from R
curdir = os.path.dirname(os.path.abspath(__file__))
rfname = os.path.join(curdir,'results','results_kcde.csv')
#print rfname
KCDEResults = np.genfromtxt(open(rfname, 'rb'), delimiter=",", names=True)
# setup test data
np.random.seed(12345)
Xi = mixture_rvs([.25,.75], size=200, dist=[stats.norm, stats.norm],
kwargs = (dict(loc=-1,scale=.5),dict(loc=1,scale=.5)))
class TestKDEExceptions(object):
@classmethod
def setupClass(cls):
cls.kde = KDE(Xi)
cls.weights_200 = np.linspace(1, 100, 200)
cls.weights_100 = np.linspace(1, 100, 100)
@raises(ValueError)
def test_check_is_fit_exception(self):
self.kde.evaluate(0)
@raises(NotImplementedError)
def test_non_weighted_fft_exception(self):
self.kde.fit(kernel="gau", gridsize=50, weights=self.weights_200, fft=True,
bw="silverman")
@raises(ValueError)
def test_wrong_weight_length_exception(self):
self.kde.fit(kernel="gau", gridsize=50, weights=self.weights_100, fft=False,
bw="silverman")
@raises(NotImplementedError)
def test_non_gaussian_fft_exception(self):
self.kde.fit(kernel="epa", gridsize=50, fft=True,
bw="silverman")
class CheckKDE(object):
decimal_density = 7
def test_density(self):
npt.assert_almost_equal(self.res1.density, self.res_density,
self.decimal_density)
def test_evaluate(self):
# disable test
# fails for Epan, Triangular and Biweight, only Gaussian is correct
# added it as test method to TestKDEGauss below
# inDomain is not vectorized
#kde_vals = self.res1.evaluate(self.res1.support)
kde_vals = [self.res1.evaluate(xi) for xi in self.res1.support]
kde_vals = np.squeeze(kde_vals) #kde_vals is a "column_list"
mask_valid = np.isfinite(kde_vals)
# TODO: nans at the boundaries
kde_vals[~mask_valid] = 0
npt.assert_almost_equal(kde_vals, self.res_density,
self.decimal_density)
class TestKDEGauss(CheckKDE):
@classmethod
def setupClass(cls):
res1 = KDE(Xi)
res1.fit(kernel="gau", fft=False, bw="silverman")
cls.res1 = res1
cls.res_density = KDEResults["gau_d"]
def test_evaluate(self):
#kde_vals = self.res1.evaluate(self.res1.support)
kde_vals = [self.res1.evaluate(xi) for xi in self.res1.support]
kde_vals = np.squeeze(kde_vals) #kde_vals is a "column_list"
mask_valid = np.isfinite(kde_vals)
# TODO: nans at the boundaries
kde_vals[~mask_valid] = 0
npt.assert_almost_equal(kde_vals, self.res_density,
self.decimal_density)
# The following tests are regression tests
# Values have been checked to be very close to R 'ks' package (Dec 2013)
def test_support_gridded(self):
kde = self.res1
support = KCDEResults['gau_support']
npt.assert_allclose(support, kde.support)
def test_cdf_gridded(self):
kde = self.res1
cdf = KCDEResults['gau_cdf']
npt.assert_allclose(cdf, kde.cdf)
def test_sf_gridded(self):
kde = self.res1
sf = KCDEResults['gau_sf']
npt.assert_allclose(sf, kde.sf)
def test_icdf_gridded(self):
kde = self.res1
icdf = KCDEResults['gau_icdf']
npt.assert_allclose(icdf, kde.icdf)
class TestKDEEpanechnikov(CheckKDE):
@classmethod
def setupClass(cls):
res1 = KDE(Xi)
res1.fit(kernel="epa", fft=False, bw="silverman")
cls.res1 = res1
cls.res_density = KDEResults["epa2_d"]
class TestKDETriangular(CheckKDE):
@classmethod
def setupClass(cls):
res1 = KDE(Xi)
res1.fit(kernel="tri", fft=False, bw="silverman")
cls.res1 = res1
cls.res_density = KDEResults["tri_d"]
class TestKDEBiweight(CheckKDE):
@classmethod
def setupClass(cls):
res1 = KDE(Xi)
res1.fit(kernel="biw", fft=False, bw="silverman")
cls.res1 = res1
cls.res_density = KDEResults["biw_d"]
#NOTE: This is a knownfailure due to a definitional difference of Cosine kernel
#class TestKDECosine(CheckKDE):
# @classmethod
# def setupClass(cls):
# res1 = KDE(Xi)
# res1.fit(kernel="cos", fft=False, bw="silverman")
# cls.res1 = res1
# cls.res_density = KDEResults["cos_d"]
#weighted estimates taken from matlab so we can allow len(weights) != gridsize
class TestKdeWeights(CheckKDE):
@classmethod
def setupClass(cls):
res1 = KDE(Xi)
weights = np.linspace(1,100,200)
res1.fit(kernel="gau", gridsize=50, weights=weights, fft=False,
bw="silverman")
cls.res1 = res1
rfname = os.path.join(curdir,'results','results_kde_weights.csv')
cls.res_density = np.genfromtxt(open(rfname, 'rb'), skip_header=1)
def test_evaluate(self):
#kde_vals = self.res1.evaluate(self.res1.support)
kde_vals = [self.res1.evaluate(xi) for xi in self.res1.support]
kde_vals = np.squeeze(kde_vals) #kde_vals is a "column_list"
mask_valid = np.isfinite(kde_vals)
# TODO: nans at the boundaries
kde_vals[~mask_valid] = 0
npt.assert_almost_equal(kde_vals, self.res_density,
self.decimal_density)
class TestKDEGaussFFT(CheckKDE):
@classmethod
def setupClass(cls):
cls.decimal_density = 2 # low accuracy because binning is different
res1 = KDE(Xi)
res1.fit(kernel="gau", fft=True, bw="silverman")
cls.res1 = res1
rfname2 = os.path.join(curdir,'results','results_kde_fft.csv')
cls.res_density = np.genfromtxt(open(rfname2, 'rb'))
class CheckKDEWeights(object):
@classmethod
def setupClass(cls):
cls.x = x = KDEWResults['x']
weights = KDEWResults['weights']
res1 = KDE(x)
# default kernel was scott when reference values computed
res1.fit(kernel=cls.kernel_name, weights=weights, fft=False, bw="scott")
cls.res1 = res1
cls.res_density = KDEWResults[cls.res_kernel_name]
decimal_density = 7
def t_est_density(self):
npt.assert_almost_equal(self.res1.density, self.res_density,
self.decimal_density)
def test_evaluate(self):
if self.kernel_name == 'cos':
raise SkipTest("Cosine kernel fails against Stata")
kde_vals = [self.res1.evaluate(xi) for xi in self.x]
kde_vals = np.squeeze(kde_vals) #kde_vals is a "column_list"
npt.assert_almost_equal(kde_vals, self.res_density,
self.decimal_density)
def test_compare(self):
xx = self.res1.support
kde_vals = [self.res1.evaluate(xi) for xi in xx]
kde_vals = np.squeeze(kde_vals) #kde_vals is a "column_list"
mask_valid = np.isfinite(kde_vals)
# TODO: nans at the boundaries
kde_vals[~mask_valid] = 0
npt.assert_almost_equal(self.res1.density, kde_vals,
self.decimal_density)
# regression test, not compared to another package
nobs = len(self.res1.endog)
kern = self.res1.kernel
v = kern.density_var(kde_vals, nobs)
v_direct = kde_vals * kern.L2Norm / kern.h / nobs
npt.assert_allclose(v, v_direct, rtol=1e-10)
ci = kern.density_confint(kde_vals, nobs)
crit = 1.9599639845400545 #stats.norm.isf(0.05 / 2)
hw = kde_vals - ci[:, 0]
npt.assert_allclose(hw, crit * np.sqrt(v), rtol=1e-10)
hw = ci[:, 1] - kde_vals
npt.assert_allclose(hw, crit * np.sqrt(v), rtol=1e-10)
def test_kernel_constants(self):
kern = self.res1.kernel
nc = kern.norm_const
# trigger numerical integration
kern._norm_const = None
nc2 = kern.norm_const
npt.assert_allclose(nc, nc2, rtol=1e-10)
l2n = kern.L2Norm
# trigger numerical integration
kern._L2Norm = None
l2n2 = kern.L2Norm
npt.assert_allclose(l2n, l2n2, rtol=1e-10)
v = kern.kernel_var
# trigger numerical integration
kern._kernel_var = None
v2 = kern.kernel_var
npt.assert_allclose(v, v2, rtol=1e-10)
class TestKDEWGauss(CheckKDEWeights):
kernel_name = "gau"
res_kernel_name = "x_gau_wd"
class TestKDEWEpa(CheckKDEWeights):
kernel_name = "epa"
res_kernel_name = "x_epan2_wd"
class TestKDEWTri(CheckKDEWeights):
kernel_name = "tri"
res_kernel_name = "x_" + kernel_name + "_wd"
class TestKDEWBiw(CheckKDEWeights):
kernel_name = "biw"
res_kernel_name = "x_bi_wd"
class TestKDEWCos(CheckKDEWeights):
kernel_name = "cos"
res_kernel_name = "x_cos_wd"
class TestKDEWCos2(CheckKDEWeights):
kernel_name = "cos2"
res_kernel_name = "x_cos_wd"
class T_estKDEWRect(CheckKDEWeights):
#TODO in docstring but not in kernel_switch
kernel_name = "rect"
res_kernel_name = "x_rec_wd"
class T_estKDEWPar(CheckKDEWeights):
# TODO in docstring but not implemented in kernels
kernel_name = "par"
res_kernel_name = "x_par_wd"
class TestKdeRefit():
np.random.seed(12345)
data1 = np.random.randn(100) * 100
pdf = KDE(data1)
pdf.fit()
data2 = np.random.randn(100) * 100
pdf2 = KDE(data2)
pdf2.fit()
for attr in ['icdf', 'cdf', 'sf']:
npt.assert_(not np.allclose(getattr(pdf, attr)[:10],
getattr(pdf2, attr)[:10]))
class TestNormConstant():
def test_norm_constant_calculation(self):
custom_gauss = kernels.CustomKernel(lambda x: np.exp(-x**2/2.0))
gauss_true_const = 0.3989422804014327
npt.assert_almost_equal(gauss_true_const, custom_gauss.norm_const)
if __name__ == "__main__":
import nose
nose.runmodule(argv=[__file__,'-vvs','-x','--pdb'],
exit=False)
|
kiyoto/statsmodels
|
refs/heads/master
|
statsmodels/iolib/api.py
|
33
|
from .foreign import StataReader, genfromdta, savetxt, StataWriter
from .table import SimpleTable, csv2st
from .smpickle import save_pickle, load_pickle
|
wujf/rethinkdb
|
refs/heads/next
|
test/rdb_workloads/stress_workloads/x_connect.py
|
50
|
#!/usr/bin/env python
import sys, os, random
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, 'common')))
import utils
r = utils.import_python_driver()
class Workload:
def __init__(self, options):
self.db = options["db"]
self.table = options["table"]
self.hosts = options["hosts"]
def run(self, conn):
host = random.choice(self.hosts)
with r.connect(host[0], host[1]) as conn:
pass
return {}
|
zenodo/invenio
|
refs/heads/zenodo-master
|
invenio/legacy/websubmit/functions/Convert_RecXML_to_RecALEPH_DELETE.py
|
13
|
# This file is part of Invenio.
# Copyright (C) 2008, 2009, 2010, 2011 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""This is the Convert_RecXML_to_RecALEPH module. It contains the
Convert_RecXML_to_RecALEPH WebSubmit function.
"""
__revision__ = "$Id$"
import os
from os import access, R_OK, W_OK
from invenio.config import CFG_BINDIR
from invenio.legacy.websubmit.config import InvenioWebSubmitFunctionError
from invenio.utils.text import wash_for_xml
def Convert_RecXML_to_RecALEPH_DELETE(parameters, curdir, form, user_info=None):
"""
Function to create an ALEPH 500 MARC DELETE record from a MARC XML
record.
This function depends upon the following:
* "recmysql" is a file that already exists in the working
submission directory. I.e. "Make_Record" has already been called and
the MARC XML record created.
* "recmysql" must contain an ALEPH 500 SYS in the field "970__a". That
is to say, the function "Allocate_ALEPH_SYS" should have been called
and an ALEPH 500 SYS allocated to this record.
*** NOTE: "xmlmarc2textmarc" is left to check for this in the record
It is run in --aleph-marc=d mode, which creates an ALEPH
"delete" record.
Given the valid "recmysql" in the working submission directory, this
function will use the "xmlmarc2textmarc" tool to convert that record into
the ALEPH MARC record. The record will then be written into the file
"recaleph500" in the current working submission directory.
@parameters: None
@return: (string) - Empty string.
"""
## If recmysql does not exist in the current working submission directory,
## or it is not readable, fail by raising a InvenioWebSubmitFunctionError:
if not access("%s/recmysql" % curdir, R_OK|W_OK):
## FAIL - recmysql cannot be accessed:
msg = """No recmysql in submission dir %s - """ \
"""Cannot create recaleph500!""" % curdir
raise InvenioWebSubmitFunctionError(msg)
## Wash possible xml-invalid characters in recmysql
recmysql_fd = file(os.path.join(curdir, 'recmysql'), 'r')
recmysql = recmysql_fd.read()
recmysql_fd.close()
recmysql = wash_for_xml(recmysql)
recmysql_fd = file(os.path.join(curdir, 'recmysql'), 'w')
recmysql_fd.write(recmysql)
recmysql_fd.close()
## Command to perform conversion of recmysql -> recaleph500:
convert_cmd = \
"""%(bindir)s/xmlmarc2textmarc --aleph-marc=d %(curdir)s/recmysql > """ \
"""%(curdir)s/recaleph500""" \
% { 'bindir' : CFG_BINDIR,
'curdir' : curdir,
}
## Perform the conversion of MARC XML record to ALEPH500 record:
pipe_in, pipe_out, pipe_err = os.popen3("%s" % convert_cmd)
pipe_in.close()
pipe_out.close()
conversion_errors = pipe_err.readlines()
pipe_err.close()
## Check that the conversion was performed without error:
if conversion_errors != []:
## It was not possible to successfully create the ALEPH500
## record, quit:
msg = """An error was encountered when attempting to """ \
"""convert %s/recmysql into recaleph500 - stopping [%s]""" \
% (curdir, "".join(conversion_errors))
raise InvenioWebSubmitFunctionError(msg)
## Check for presence of recaleph500 in the current
## working submission directory:
if not access("%s/recaleph500" % curdir, R_OK|W_OK):
## Either not present, or not readable - ERROR
msg = """An error was encountered when attempting to convert """ \
"""%s/recmysql into recaleph500. After the conversion, """ \
"""recaleph500 could not be accessed.""" % curdir
raise InvenioWebSubmitFunctionError(msg)
## Everything went OK:
return ""
|
Eveler/libs
|
refs/heads/splited_document_engine
|
__Python__/ufms_blanks/appy3/appy/pod/test/contexts/XhtmlComplex7.py
|
2
|
xhtmlInput = '''
<div class="document">
<p>Some <strong>bold</strong> and some <em>italic</em> text.</p>
<p>A new paragraph.</p>
<p>A list with three items:</p>
<ul>
<li>the first item</li>
<li>another item</li>
<li>the last item</li>
</ul>
<p>A last paragraph.</p>
</div>
'''
|
seanfisk/buzzword-bingo-server
|
refs/heads/master
|
django/contrib/localflavor/cz/cz_regions.py
|
514
|
"""
Czech regions, translations get from http://www.crwflags.com/fotw/Flags/cz-re.html
"""
from django.utils.translation import ugettext_lazy as _
REGION_CHOICES = (
('PR', _('Prague')),
('CE', _('Central Bohemian Region')),
('SO', _('South Bohemian Region')),
('PI', _('Pilsen Region')),
('CA', _('Carlsbad Region')),
('US', _('Usti Region')),
('LB', _('Liberec Region')),
('HK', _('Hradec Region')),
('PA', _('Pardubice Region')),
('VY', _('Vysocina Region')),
('SM', _('South Moravian Region')),
('OL', _('Olomouc Region')),
('ZL', _('Zlin Region')),
('MS', _('Moravian-Silesian Region')),
)
|
devs1991/test_edx_docmode
|
refs/heads/master
|
venv/lib/python2.7/site-packages/social/utils.py
|
47
|
import re
import sys
import unicodedata
import collections
import functools
import logging
import six
import requests
import social
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.poolmanager import PoolManager
from social.exceptions import AuthCanceled, AuthUnreachableProvider
from social.p3 import urlparse, urlunparse, urlencode, \
parse_qs as battery_parse_qs
SETTING_PREFIX = 'SOCIAL_AUTH'
social_logger = logging.Logger('social')
class SSLHttpAdapter(HTTPAdapter):
""""
Transport adapter that allows to use any SSL protocol. Based on:
http://requests.rtfd.org/latest/user/advanced/#example-specific-ssl-version
"""
def __init__(self, ssl_protocol):
self.ssl_protocol = ssl_protocol
super(SSLHttpAdapter, self).__init__()
def init_poolmanager(self, connections, maxsize, block=False):
self.poolmanager = PoolManager(
num_pools=connections,
maxsize=maxsize,
block=block,
ssl_version=self.ssl_protocol
)
@classmethod
def ssl_adapter_session(cls, ssl_protocol):
session = requests.Session()
session.mount('https://', SSLHttpAdapter(ssl_protocol))
return session
def import_module(name):
__import__(name)
return sys.modules[name]
def module_member(name):
mod, member = name.rsplit('.', 1)
module = import_module(mod)
return getattr(module, member)
def user_agent():
"""Builds a simple User-Agent string to send in requests"""
return 'python-social-auth-' + social.__version__
def url_add_parameters(url, params):
"""Adds parameters to URL, parameter will be repeated if already present"""
if params:
fragments = list(urlparse(url))
value = parse_qs(fragments[4])
value.update(params)
fragments[4] = urlencode(value)
url = urlunparse(fragments)
return url
def to_setting_name(*names):
return '_'.join([name.upper().replace('-', '_') for name in names if name])
def setting_name(*names):
return to_setting_name(*((SETTING_PREFIX,) + names))
def sanitize_redirect(host, redirect_to):
"""
Given the hostname and an untrusted URL to redirect to,
this method tests it to make sure it isn't garbage/harmful
and returns it, else returns None, similar as how's it done
on django.contrib.auth.views.
"""
if redirect_to:
try:
# Don't redirect to a different host
netloc = urlparse(redirect_to)[1] or host
except (TypeError, AttributeError):
pass
else:
if netloc == host:
return redirect_to
def user_is_authenticated(user):
if user and hasattr(user, 'is_authenticated'):
if isinstance(user.is_authenticated, collections.Callable):
authenticated = user.is_authenticated()
else:
authenticated = user.is_authenticated
elif user:
authenticated = True
else:
authenticated = False
return authenticated
def user_is_active(user):
if user and hasattr(user, 'is_active'):
if isinstance(user.is_active, collections.Callable):
is_active = user.is_active()
else:
is_active = user.is_active
elif user:
is_active = True
else:
is_active = False
return is_active
# This slugify version was borrowed from django revision a61dbd6
def slugify(value):
"""Converts to lowercase, removes non-word characters (alphanumerics
and underscores) and converts spaces to hyphens. Also strips leading
and trailing whitespace."""
value = unicodedata.normalize('NFKD', value) \
.encode('ascii', 'ignore') \
.decode('ascii')
value = re.sub('[^\w\s-]', '', value).strip().lower()
return re.sub('[-\s]+', '-', value)
def first(func, items):
"""Return the first item in the list for what func returns True"""
for item in items:
if func(item):
return item
def parse_qs(value):
"""Like urlparse.parse_qs but transform list values to single items"""
return drop_lists(battery_parse_qs(value))
def drop_lists(value):
out = {}
for key, val in value.items():
val = val[0]
if isinstance(key, six.binary_type):
key = six.text_type(key, 'utf-8')
if isinstance(val, six.binary_type):
val = six.text_type(val, 'utf-8')
out[key] = val
return out
def partial_pipeline_data(backend, user=None, *args, **kwargs):
partial = backend.strategy.session_get('partial_pipeline', None)
if partial:
idx, backend_name, xargs, xkwargs = \
backend.strategy.partial_from_session(partial)
if backend_name == backend.name:
kwargs.setdefault('pipeline_index', idx)
if user: # don't update user if it's None
kwargs.setdefault('user', user)
kwargs.setdefault('request', backend.strategy.request_data())
xkwargs.update(kwargs)
return xargs, xkwargs
else:
backend.strategy.clean_partial_pipeline()
def build_absolute_uri(host_url, path=None):
"""Build absolute URI with given (optional) path"""
path = path or ''
if path.startswith('http://') or path.startswith('https://'):
return path
if host_url.endswith('/') and path.startswith('/'):
path = path[1:]
return host_url + path
def constant_time_compare(val1, val2):
"""
Returns True if the two strings are equal, False otherwise.
The time taken is independent of the number of characters that match.
This code was borrowed from Django 1.5.4-final
"""
if len(val1) != len(val2):
return False
result = 0
if six.PY3 and isinstance(val1, bytes) and isinstance(val2, bytes):
for x, y in zip(val1, val2):
result |= x ^ y
else:
for x, y in zip(val1, val2):
result |= ord(x) ^ ord(y)
return result == 0
def is_url(value):
return value and \
(value.startswith('http://') or
value.startswith('https://') or
value.startswith('/'))
def setting_url(backend, *names):
for name in names:
if is_url(name):
return name
else:
value = backend.setting(name)
if is_url(value):
return value
def handle_http_errors(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except requests.HTTPError as err:
if err.response.status_code == 400:
raise AuthCanceled(args[0])
elif err.response.status_code == 503:
raise AuthUnreachableProvider(args[0])
else:
raise
return wrapper
def append_slash(url):
"""Make sure we append a slash at the end of the URL otherwise we
have issues with urljoin Example:
>>> urlparse.urljoin('http://www.example.com/api/v3', 'user/1/')
'http://www.example.com/api/user/1/'
"""
if url and not url.endswith('/'):
url = '{0}/'.format(url)
return url
|
atsolakid/edx-platform
|
refs/heads/master
|
common/lib/xmodule/xmodule/conditional_module.py
|
97
|
"""Conditional module is the xmodule, which you can use for disabling
some xmodules by conditions.
"""
import json
import logging
from lazy import lazy
from lxml import etree
from pkg_resources import resource_string
from xmodule.x_module import XModule, STUDENT_VIEW
from xmodule.seq_module import SequenceDescriptor
from xblock.fields import Scope, ReferenceList
from xmodule.modulestore.exceptions import ItemNotFoundError
log = logging.getLogger('edx.' + __name__)
class ConditionalFields(object):
has_children = True
show_tag_list = ReferenceList(help="List of urls of children that are references to external modules", scope=Scope.content)
sources_list = ReferenceList(help="List of sources upon which this module is conditional", scope=Scope.content)
class ConditionalModule(ConditionalFields, XModule):
"""
Blocks child module from showing unless certain conditions are met.
Example:
<conditional sources="i4x://.../problem_1; i4x://.../problem_2" completed="True">
<show sources="i4x://.../test_6; i4x://.../Avi_resources"/>
<video url_name="secret_video" />
</conditional>
<conditional> tag attributes:
sources - location id of required modules, separated by ';'
submitted - map to `is_submitted` module method.
(pressing RESET button makes this function to return False.)
attempted - map to `is_attempted` module method
correct - map to `is_correct` module method
poll_answer - map to `poll_answer` module attribute
voted - map to `voted` module attribute
<show> tag attributes:
sources - location id of required modules, separated by ';'
You can add you own rules for <conditional> tag, like
"completed", "attempted" etc. To do that yo must extend
`ConditionalModule.conditions_map` variable and add pair:
my_attr: my_property/my_method
After that you can use it:
<conditional my_attr="some value" ...>
...
</conditional>
And my_property/my_method will be called for required modules.
"""
js = {
'coffee': [
resource_string(__name__, 'js/src/javascript_loader.coffee'),
resource_string(__name__, 'js/src/conditional/display.coffee'),
],
'js': [
resource_string(__name__, 'js/src/collapsible.js'),
]
}
js_module_name = "Conditional"
css = {'scss': [resource_string(__name__, 'css/capa/display.scss')]}
# Map
# key: <tag attribute in xml>
# value: <name of module attribute>
conditions_map = {
'poll_answer': 'poll_answer', # poll_question attr
# problem was submitted (it can be wrong)
# if student will press reset button after that,
# state will be reverted
'submitted': 'is_submitted', # capa_problem attr
# if student attempted problem
'attempted': 'is_attempted', # capa_problem attr
# if problem is full points
'correct': 'is_correct',
'voted': 'voted' # poll_question attr
}
def _get_condition(self):
# Get first valid condition.
for xml_attr, attr_name in self.conditions_map.iteritems():
xml_value = self.descriptor.xml_attributes.get(xml_attr)
if xml_value:
return xml_value, attr_name
raise Exception(
'Error in conditional module: no known conditional found in {!r}'.format(
self.descriptor.xml_attributes.keys()
)
)
@lazy
def required_modules(self):
return [self.system.get_module(descriptor) for
descriptor in self.descriptor.get_required_module_descriptors()]
def is_condition_satisfied(self):
xml_value, attr_name = self._get_condition()
if xml_value and self.required_modules:
for module in self.required_modules:
if not hasattr(module, attr_name):
# We don't throw an exception here because it is possible for
# the descriptor of a required module to have a property but
# for the resulting module to be a (flavor of) ErrorModule.
# So just log and return false.
log.warn('Error in conditional module: \
required module {module} has no {module_attr}'.format(module=module, module_attr=attr_name))
return False
attr = getattr(module, attr_name)
if callable(attr):
attr = attr()
if xml_value != str(attr):
break
else:
return True
return False
def get_html(self):
# Calculate html ids of dependencies
self.required_html_ids = [descriptor.location.html_id() for
descriptor in self.descriptor.get_required_module_descriptors()]
return self.system.render_template('conditional_ajax.html', {
'element_id': self.location.html_id(),
'ajax_url': self.system.ajax_url,
'depends': ';'.join(self.required_html_ids)
})
def handle_ajax(self, _dispatch, _data):
"""This is called by courseware.moduleodule_render, to handle
an AJAX call.
"""
if not self.is_condition_satisfied():
defmsg = "{link} must be attempted before this will become visible."
message = self.descriptor.xml_attributes.get('message', defmsg)
context = {'module': self,
'message': message}
html = self.system.render_template('conditional_module.html',
context)
return json.dumps({'html': [html], 'message': bool(message)})
html = [child.render(STUDENT_VIEW).content for child in self.get_display_items()]
return json.dumps({'html': html})
def get_icon_class(self):
new_class = 'other'
# HACK: This shouldn't be hard-coded to two types
# OBSOLETE: This obsoletes 'type'
class_priority = ['video', 'problem']
child_classes = [self.system.get_module(child_descriptor).get_icon_class()
for child_descriptor in self.descriptor.get_children()]
for c in class_priority:
if c in child_classes:
new_class = c
return new_class
class ConditionalDescriptor(ConditionalFields, SequenceDescriptor):
"""Descriptor for conditional xmodule."""
_tag_name = 'conditional'
module_class = ConditionalModule
filename_extension = "xml"
has_score = False
show_in_read_only_mode = True
def __init__(self, *args, **kwargs):
"""
Create an instance of the conditional module.
"""
super(ConditionalDescriptor, self).__init__(*args, **kwargs)
# Convert sources xml_attribute to a ReferenceList field type so Location/Locator
# substitution can be done.
if not self.sources_list:
if 'sources' in self.xml_attributes and isinstance(self.xml_attributes['sources'], basestring):
self.sources_list = [
self.location.course_key.make_usage_key_from_deprecated_string(item)
for item in ConditionalDescriptor.parse_sources(self.xml_attributes)
]
@staticmethod
def parse_sources(xml_element):
""" Parse xml_element 'sources' attr and return a list of location strings. """
sources = xml_element.get('sources')
if sources:
return [location.strip() for location in sources.split(';')]
def get_required_module_descriptors(self):
"""Returns a list of XModuleDescriptor instances upon
which this module depends.
"""
descriptors = []
for location in self.sources_list:
try:
descriptor = self.system.load_item(location)
descriptors.append(descriptor)
except ItemNotFoundError:
msg = "Invalid module by location."
log.exception(msg)
self.system.error_tracker(msg)
return descriptors
@classmethod
def definition_from_xml(cls, xml_object, system):
children = []
show_tag_list = []
for child in xml_object:
if child.tag == 'show':
locations = ConditionalDescriptor.parse_sources(child)
for location in locations:
children.append(location)
show_tag_list.append(location)
else:
try:
descriptor = system.process_xml(etree.tostring(child))
children.append(descriptor.scope_ids.usage_id)
except:
msg = "Unable to load child when parsing Conditional."
log.exception(msg)
system.error_tracker(msg)
return {'show_tag_list': show_tag_list}, children
def definition_to_xml(self, resource_fs):
xml_object = etree.Element(self._tag_name)
for child in self.get_children():
if child.location not in self.show_tag_list:
self.runtime.add_block_as_child_node(child, xml_object)
if self.show_tag_list:
show_str = u'<{tag_name} sources="{sources}" />'.format(
tag_name='show', sources=';'.join(location.to_deprecated_string() for location in self.show_tag_list))
xml_object.append(etree.fromstring(show_str))
# Overwrite the original sources attribute with the value from sources_list, as
# Locations may have been changed to Locators.
stringified_sources_list = map(lambda loc: loc.to_deprecated_string(), self.sources_list)
self.xml_attributes['sources'] = ';'.join(stringified_sources_list)
return xml_object
|
Sorsly/subtle
|
refs/heads/master
|
google-cloud-sdk/lib/third_party/google/protobuf/unittest_import_public_pb2.py
|
43
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/protobuf/unittest_import_public.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/protobuf/unittest_import_public.proto',
package='protobuf_unittest_import',
syntax='proto2',
serialized_pb=_b('\n,google/protobuf/unittest_import_public.proto\x12\x18protobuf_unittest_import\" \n\x13PublicImportMessage\x12\t\n\x01\x65\x18\x01 \x01(\x05\x42\x1a\n\x18\x63om.google.protobuf.test')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_PUBLICIMPORTMESSAGE = _descriptor.Descriptor(
name='PublicImportMessage',
full_name='protobuf_unittest_import.PublicImportMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='e', full_name='protobuf_unittest_import.PublicImportMessage.e', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=74,
serialized_end=106,
)
DESCRIPTOR.message_types_by_name['PublicImportMessage'] = _PUBLICIMPORTMESSAGE
PublicImportMessage = _reflection.GeneratedProtocolMessageType('PublicImportMessage', (_message.Message,), dict(
DESCRIPTOR = _PUBLICIMPORTMESSAGE,
__module__ = 'google.protobuf.unittest_import_public_pb2'
# @@protoc_insertion_point(class_scope:protobuf_unittest_import.PublicImportMessage)
))
_sym_db.RegisterMessage(PublicImportMessage)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\030com.google.protobuf.test'))
# @@protoc_insertion_point(module_scope)
|
xbmc/xbmc-rbp
|
refs/heads/master
|
tools/Linux/FEH.py
|
43
|
import os
import sys
import re
AvailableOutputs = []
Output = None
try:
from qt import *
AvailableOutputs.append("--error-output=Qt")
except:
pass
try:
import pygtk
pygtk.require('2.0')
import gtk
AvailableOutputs.append("--error-output=GTK")
except:
pass
try:
import pygame
import datetime
AvailableOutputs.append("--error-output=SDL")
except:
pass
def error(errorLine):
if Output == "--error-output=Qt":
createQt(errorLine)
elif Output == "--error-output=GTK":
createGTK(errorLine)
elif Output == "--error-output=SDL":
createSDL(errorLine)
else:
try:
print(errorLine)
except:
print(errorLine)
exit(1)
def createQt(errorLine):
app = QApplication(sys.argv)
QObject.connect(app, SIGNAL('lastWindowClosed()')
, app
, SLOT('quit()')
)
dialog = QDialog(None, "Error", 0, 0)
dialog.setCaption(dialog.tr("Error"))
layout=QVBoxLayout(dialog)
layout.setSpacing(6)
layout.setMargin(5)
label=QLabel(errorLine, dialog)
layout.addWidget(label)
bnExit=QPushButton("Quit", dialog, "add")
dialog.connect(bnExit, SIGNAL("clicked()"), qApp, SLOT("quit()"))
layout.addWidget(bnExit)
app.setMainWidget(dialog)
dialog.show()
app.exec_loop()
def createGTK(errorLine):
window = gtk.Window(gtk.WINDOW_TOPLEVEL)
window.connect("destroy", lambda w: gtk.main_quit())
window.set_title("Error")
vbox = gtk.VBox(False, 5)
window.add(vbox)
window.set_border_width(5)
frame = gtk.Frame()
frame.set_shadow_type(gtk.SHADOW_NONE)
label = gtk.Label(errorLine)
frame.add(label)
vbox.pack_start(frame, False, False, 0)
button = gtk.Button("Quit")
button.connect_object("clicked", gtk.Widget.destroy, window)
vbox.pack_start(button, False, False, 0)
window.show_all ()
gtk.main()
def createSDL(errorLine):
pygame.init()
pygame.font.init()
pygame.display.set_caption("Error")
size = width, height = 800, 600
speed = [2, 2]
black = 0, 0, 0
screen = pygame.display.set_mode(size)
font = pygame.font.Font(None, 32)
autoQuit = 10
start = datetime.datetime.now()
finish = datetime.datetime.now()
delta = finish - start
while delta.seconds < autoQuit:
for event in pygame.event.get():
if event.type == pygame.QUIT or event.type == pygame.KEYDOWN:
sys.exit()
screen.fill(black)
place = [200, 200]
for line in errorLine.split('\n'):
text = font.render(line, 1, (255,255,255) )
place[1] += font.size(line)[1]
screen.blit(text, text.get_rect().move(place))
quitline = "Press any button to continue ("
quitline += str(autoQuit - delta.seconds)
quitline += ")"
text = font.render(quitline, 1, (255,255,255) )
screen.blit(text, text.get_rect().move(200,400))
pygame.display.flip()
finish = datetime.datetime.now()
delta = finish - start
def badDirectRendering():
out = os.popen("glxinfo | grep \"direct rendering\"", 'r')
line = out.read()
direct = "Yes" not in line
out.close()
return direct
def badColorDepth():
out = os.popen('xdpyinfo | grep "depth of root"', 'r')
p = re.compile("([0-9]*) planes")
for line in out.readlines():
match = p.search(line)
if (match is not None):
if int(match.group(1)) > 16:
bitDepth = False
else:
bitDepth = True
out.close()
return bitDepth
def possibleOutput(text):
return text in sys.argv and text in AvailableOutputs
if __name__=="__main__":
if len(AvailableOutputs) > 0:
Output = AvailableOutputs[0]
else:
Output = None
for text in sys.argv:
if possibleOutput(text):
Output = text
if "--no-test" in sys.argv:
exit(0)
if (badDirectRendering()):
error("XBMC needs hardware accelerated OpenGL rendering.\nInstall an appropriate graphics driver.\n\nPlease consult XBMC Wiki for supported hardware\nhttp://wiki.xbmc.org/?title=Supported_hardware")
if (badColorDepth()):
error("XBMC cannot run unless the\nscreen color depth is atleast 24 bit.\n\nPlease reconfigure your screen.")
|
zchking/odoo
|
refs/heads/8.0
|
addons/portal_sale/__openerp__.py
|
380
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Portal Sale',
'version': '0.1',
'category': 'Tools',
'complexity': 'easy',
'description': """
This module adds a Sales menu to your portal as soon as sale and portal are installed.
======================================================================================
After installing this module, portal users will be able to access their own documents
via the following menus:
- Quotations
- Sale Orders
- Delivery Orders
- Products (public ones)
- Invoices
- Payments/Refunds
If online payment acquirers are configured, portal users will also be given the opportunity to
pay online on their Sale Orders and Invoices that are not paid yet. Paypal is included
by default, you simply need to configure a Paypal account in the Accounting/Invoicing settings.
""",
'author': 'OpenERP SA',
'depends': ['sale', 'portal', 'payment'],
'data': [
'security/portal_security.xml',
'portal_sale_view.xml',
'portal_sale_data.xml',
'res_config_view.xml',
'security/ir.model.access.csv',
],
'auto_install': True,
'category': 'Hidden',
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
bealdav/OCB
|
refs/heads/patch-1
|
addons/website_event_sale/models/__init__.py
|
428
|
import product
import website
import sale_order
|
chienlieu2017/it_management
|
refs/heads/master
|
odoo/addons/report/__manifest__.py
|
20
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
{
'name': 'Report',
'category': 'Base',
'summary': 'Hidden',
'description': """
Report
""",
'depends': ['base', 'web', 'base_setup'],
'data': [
'data/report_paperformat_data.xml',
'security/ir.model.access.csv',
'views/layout_templates.xml',
'views/report_paperformat_views.xml',
'views/report_templates.xml',
'views/base_config_settings_views.xml',
'views/ir_actions_report_views.xml',
],
'qweb' : [
'static/src/xml/*.xml',
],
'auto_install': True,
}
|
kashyap32/scrapy
|
refs/heads/master
|
scrapy/selector/__init__.py
|
183
|
"""
Selectors
"""
from scrapy.selector.unified import *
from scrapy.selector.lxmlsel import *
|
frappe/erpnext
|
refs/heads/develop
|
erpnext/patches/v13_0/make_non_standard_user_type.py
|
1
|
# Copyright (c) 2019, Frappe and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from six import iteritems
from erpnext.setup.install import add_non_standard_user_types
def execute():
doctype_dict = {
'projects': ['Timesheet'],
'payroll': ['Salary Slip', 'Employee Tax Exemption Declaration', 'Employee Tax Exemption Proof Submission'],
'hr': ['Employee', 'Expense Claim', 'Leave Application', 'Attendance Request', 'Compensatory Leave Request']
}
for module, doctypes in iteritems(doctype_dict):
for doctype in doctypes:
frappe.reload_doc(module, 'doctype', doctype)
frappe.flags.ignore_select_perm = True
frappe.flags.update_select_perm_after_migrate = True
add_non_standard_user_types()
|
CoolCloud/taiga-back
|
refs/heads/master
|
tests/factories.py
|
8
|
# Copyright (C) 2014 Andrey Antukh <niwi@niwi.be>
# Copyright (C) 2014 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014 David Barragán <bameda@dbarragan.com>
# Copyright (C) 2014 Anler Hernández <hello@anler.me>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import uuid
import threading
from datetime import date, timedelta
from django.conf import settings
import factory
class Factory(factory.DjangoModelFactory):
class Meta:
strategy = factory.CREATE_STRATEGY
model = None
abstract = True
_SEQUENCE = 1
_SEQUENCE_LOCK = threading.Lock()
@classmethod
def _setup_next_sequence(cls):
with cls._SEQUENCE_LOCK:
cls._SEQUENCE += 1
return cls._SEQUENCE
class ProjectTemplateFactory(Factory):
class Meta:
strategy = factory.CREATE_STRATEGY
model = "projects.ProjectTemplate"
django_get_or_create = ("slug",)
name = "Template name"
slug = settings.DEFAULT_PROJECT_TEMPLATE
description = factory.Sequence(lambda n: "Description {}".format(n))
us_statuses = []
points = []
task_statuses = []
issue_statuses = []
issue_types = []
priorities = []
severities = []
roles = []
default_owner_role = "tester"
class ProjectFactory(Factory):
class Meta:
model = "projects.Project"
strategy = factory.CREATE_STRATEGY
name = factory.Sequence(lambda n: "Project {}".format(n))
slug = factory.Sequence(lambda n: "project-{}-slug".format(n))
description = "Project description"
owner = factory.SubFactory("tests.factories.UserFactory")
creation_template = factory.SubFactory("tests.factories.ProjectTemplateFactory")
class ProjectModulesConfigFactory(Factory):
class Meta:
model = "projects.ProjectModulesConfig"
strategy = factory.CREATE_STRATEGY
project = factory.SubFactory("tests.factories.ProjectFactory")
class RoleFactory(Factory):
class Meta:
model = "users.Role"
strategy = factory.CREATE_STRATEGY
name = factory.Sequence(lambda n: "Role {}".format(n))
slug = factory.Sequence(lambda n: "test-role-{}".format(n))
project = factory.SubFactory("tests.factories.ProjectFactory")
class PointsFactory(Factory):
class Meta:
model = "projects.Points"
strategy = factory.CREATE_STRATEGY
name = factory.Sequence(lambda n: "Points {}".format(n))
value = 2
project = factory.SubFactory("tests.factories.ProjectFactory")
class RolePointsFactory(Factory):
class Meta:
model = "userstories.RolePoints"
strategy = factory.CREATE_STRATEGY
user_story = factory.SubFactory("tests.factories.UserStoryFactory")
role = factory.SubFactory("tests.factories.RoleFactory")
points = factory.SubFactory("tests.factories.PointsFactory")
class UserStoryAttachmentFactory(Factory):
project = factory.SubFactory("tests.factories.ProjectFactory")
owner = factory.SubFactory("tests.factories.UserFactory")
content_object = factory.SubFactory("tests.factories.UserStoryFactory")
attached_file = factory.django.FileField(data=b"File contents")
class Meta:
model = "attachments.Attachment"
strategy = factory.CREATE_STRATEGY
class TaskAttachmentFactory(Factory):
project = factory.SubFactory("tests.factories.ProjectFactory")
owner = factory.SubFactory("tests.factories.UserFactory")
content_object = factory.SubFactory("tests.factories.TaskFactory")
attached_file = factory.django.FileField(data=b"File contents")
class Meta:
model = "attachments.Attachment"
strategy = factory.CREATE_STRATEGY
class IssueAttachmentFactory(Factory):
project = factory.SubFactory("tests.factories.ProjectFactory")
owner = factory.SubFactory("tests.factories.UserFactory")
content_object = factory.SubFactory("tests.factories.IssueFactory")
attached_file = factory.django.FileField(data=b"File contents")
class Meta:
model = "attachments.Attachment"
strategy = factory.CREATE_STRATEGY
class WikiAttachmentFactory(Factory):
project = factory.SubFactory("tests.factories.ProjectFactory")
owner = factory.SubFactory("tests.factories.UserFactory")
content_object = factory.SubFactory("tests.factories.WikiFactory")
attached_file = factory.django.FileField(data=b"File contents")
class Meta:
model = "attachments.Attachment"
strategy = factory.CREATE_STRATEGY
class UserFactory(Factory):
class Meta:
model = "users.User"
strategy = factory.CREATE_STRATEGY
username = factory.Sequence(lambda n: "user{}".format(n))
email = factory.LazyAttribute(lambda obj: '%s@email.com' % obj.username)
password = factory.PostGeneration(lambda obj, *args, **kwargs: obj.set_password(obj.username))
class MembershipFactory(Factory):
class Meta:
model = "projects.Membership"
strategy = factory.CREATE_STRATEGY
token = factory.LazyAttribute(lambda obj: str(uuid.uuid1()))
project = factory.SubFactory("tests.factories.ProjectFactory")
role = factory.SubFactory("tests.factories.RoleFactory")
user = factory.SubFactory("tests.factories.UserFactory")
class InvitationFactory(Factory):
class Meta:
model = "projects.Membership"
strategy = factory.CREATE_STRATEGY
token = factory.LazyAttribute(lambda obj: str(uuid.uuid1()))
project = factory.SubFactory("tests.factories.ProjectFactory")
role = factory.SubFactory("tests.factories.RoleFactory")
email = factory.Sequence(lambda n: "user{}@email.com".format(n))
class WebhookFactory(Factory):
class Meta:
model = "webhooks.Webhook"
strategy = factory.CREATE_STRATEGY
project = factory.SubFactory("tests.factories.ProjectFactory")
url = "http://localhost:8080/test"
key = "factory-key"
name = "Factory-name"
class WebhookLogFactory(Factory):
class Meta:
model = "webhooks.WebhookLog"
strategy = factory.CREATE_STRATEGY
webhook = factory.SubFactory("tests.factories.WebhookFactory")
url = "http://localhost:8080/test"
status = "200"
request_data = {"text": "test-request-data"}
response_data = {"text": "test-response-data"}
class StorageEntryFactory(Factory):
class Meta:
model = "userstorage.StorageEntry"
strategy = factory.CREATE_STRATEGY
owner = factory.SubFactory("tests.factories.UserFactory")
key = factory.Sequence(lambda n: "key-{}".format(n))
value = factory.Sequence(lambda n: {"value": "value-{}".format(n)})
class UserStoryFactory(Factory):
class Meta:
model = "userstories.UserStory"
strategy = factory.CREATE_STRATEGY
ref = factory.Sequence(lambda n: n)
project = factory.SubFactory("tests.factories.ProjectFactory")
owner = factory.SubFactory("tests.factories.UserFactory")
subject = factory.Sequence(lambda n: "User Story {}".format(n))
description = factory.Sequence(lambda n: "User Story {} description".format(n))
status = factory.SubFactory("tests.factories.UserStoryStatusFactory")
milestone = factory.SubFactory("tests.factories.MilestoneFactory")
class UserStoryStatusFactory(Factory):
class Meta:
model = "projects.UserStoryStatus"
strategy = factory.CREATE_STRATEGY
name = factory.Sequence(lambda n: "User Story status {}".format(n))
project = factory.SubFactory("tests.factories.ProjectFactory")
class TaskStatusFactory(Factory):
class Meta:
model = "projects.TaskStatus"
strategy = factory.CREATE_STRATEGY
name = factory.Sequence(lambda n: "Task status {}".format(n))
project = factory.SubFactory("tests.factories.ProjectFactory")
class MilestoneFactory(Factory):
class Meta:
model = "milestones.Milestone"
strategy = factory.CREATE_STRATEGY
name = factory.Sequence(lambda n: "Milestone {}".format(n))
owner = factory.SubFactory("tests.factories.UserFactory")
project = factory.SubFactory("tests.factories.ProjectFactory")
estimated_start = factory.LazyAttribute(lambda o: date.today())
estimated_finish = factory.LazyAttribute(lambda o: o.estimated_start + timedelta(days=7))
class IssueFactory(Factory):
class Meta:
model = "issues.Issue"
strategy = factory.CREATE_STRATEGY
ref = factory.Sequence(lambda n: n)
subject = factory.Sequence(lambda n: "Issue {}".format(n))
description = factory.Sequence(lambda n: "Issue {} description".format(n))
owner = factory.SubFactory("tests.factories.UserFactory")
project = factory.SubFactory("tests.factories.ProjectFactory")
status = factory.SubFactory("tests.factories.IssueStatusFactory")
severity = factory.SubFactory("tests.factories.SeverityFactory")
priority = factory.SubFactory("tests.factories.PriorityFactory")
type = factory.SubFactory("tests.factories.IssueTypeFactory")
milestone = factory.SubFactory("tests.factories.MilestoneFactory")
class TaskFactory(Factory):
class Meta:
model = "tasks.Task"
strategy = factory.CREATE_STRATEGY
ref = factory.Sequence(lambda n: n)
subject = factory.Sequence(lambda n: "Task {}".format(n))
description = factory.Sequence(lambda n: "Task {} description".format(n))
owner = factory.SubFactory("tests.factories.UserFactory")
project = factory.SubFactory("tests.factories.ProjectFactory")
status = factory.SubFactory("tests.factories.TaskStatusFactory")
milestone = factory.SubFactory("tests.factories.MilestoneFactory")
user_story = factory.SubFactory("tests.factories.UserStoryFactory")
tags = []
class WikiPageFactory(Factory):
class Meta:
model = "wiki.WikiPage"
strategy = factory.CREATE_STRATEGY
project = factory.SubFactory("tests.factories.ProjectFactory")
owner = factory.SubFactory("tests.factories.UserFactory")
slug = factory.Sequence(lambda n: "wiki-page-{}".format(n))
content = factory.Sequence(lambda n: "Wiki Page {} content".format(n))
class WikiLinkFactory(Factory):
class Meta:
model = "wiki.WikiLink"
strategy = factory.CREATE_STRATEGY
project = factory.SubFactory("tests.factories.ProjectFactory")
title = factory.Sequence(lambda n: "Wiki Link {} title".format(n))
href = factory.Sequence(lambda n: "link-{}".format(n))
order = factory.Sequence(lambda n: n)
class IssueStatusFactory(Factory):
class Meta:
model = "projects.IssueStatus"
strategy = factory.CREATE_STRATEGY
name = factory.Sequence(lambda n: "Issue Status {}".format(n))
project = factory.SubFactory("tests.factories.ProjectFactory")
class SeverityFactory(Factory):
class Meta:
model = "projects.Severity"
strategy = factory.CREATE_STRATEGY
name = factory.Sequence(lambda n: "Severity {}".format(n))
project = factory.SubFactory("tests.factories.ProjectFactory")
class PriorityFactory(Factory):
class Meta:
model = "projects.Priority"
strategy = factory.CREATE_STRATEGY
name = factory.Sequence(lambda n: "Priority {}".format(n))
project = factory.SubFactory("tests.factories.ProjectFactory")
class IssueTypeFactory(Factory):
class Meta:
model = "projects.IssueType"
strategy = factory.CREATE_STRATEGY
name = factory.Sequence(lambda n: "Issue Type {}".format(n))
project = factory.SubFactory("tests.factories.ProjectFactory")
class UserStoryCustomAttributeFactory(Factory):
class Meta:
model = "custom_attributes.UserStoryCustomAttribute"
strategy = factory.CREATE_STRATEGY
name = factory.Sequence(lambda n: "UserStory Custom Attribute {}".format(n))
description = factory.Sequence(lambda n: "Description for UserStory Custom Attribute {}".format(n))
project = factory.SubFactory("tests.factories.ProjectFactory")
class TaskCustomAttributeFactory(Factory):
class Meta:
model = "custom_attributes.TaskCustomAttribute"
strategy = factory.CREATE_STRATEGY
name = factory.Sequence(lambda n: "Task Custom Attribute {}".format(n))
description = factory.Sequence(lambda n: "Description for Task Custom Attribute {}".format(n))
project = factory.SubFactory("tests.factories.ProjectFactory")
class IssueCustomAttributeFactory(Factory):
class Meta:
model = "custom_attributes.IssueCustomAttribute"
strategy = factory.CREATE_STRATEGY
name = factory.Sequence(lambda n: "Issue Custom Attribute {}".format(n))
description = factory.Sequence(lambda n: "Description for Issue Custom Attribute {}".format(n))
project = factory.SubFactory("tests.factories.ProjectFactory")
class UserStoryCustomAttributesValuesFactory(Factory):
class Meta:
model = "custom_attributes.UserStoryCustomAttributesValues"
strategy = factory.CREATE_STRATEGY
attributes_values = {}
user_story = factory.SubFactory("tests.factories.UserStoryFactory")
class TaskCustomAttributesValuesFactory(Factory):
class Meta:
model = "custom_attributes.TaskCustomAttributesValues"
strategy = factory.CREATE_STRATEGY
attributes_values = {}
task = factory.SubFactory("tests.factories.TaskFactory")
class IssueCustomAttributesValuesFactory(Factory):
class Meta:
model = "custom_attributes.IssueCustomAttributesValues"
strategy = factory.CREATE_STRATEGY
attributes_values = {}
issue = factory.SubFactory("tests.factories.IssueFactory")
# class FanFactory(Factory):
# project = factory.SubFactory("tests.factories.ProjectFactory")
# user = factory.SubFactory("tests.factories.UserFactory")
# class StarsFactory(Factory):
# project = factory.SubFactory("tests.factories.ProjectFactory")
# count = 0
class VoteFactory(Factory):
class Meta:
model = "votes.Vote"
strategy = factory.CREATE_STRATEGY
content_type = factory.SubFactory("tests.factories.ContentTypeFactory")
object_id = factory.Sequence(lambda n: n)
user = factory.SubFactory("tests.factories.UserFactory")
class VotesFactory(Factory):
class Meta:
model = "votes.Votes"
strategy = factory.CREATE_STRATEGY
content_type = factory.SubFactory("tests.factories.ContentTypeFactory")
object_id = factory.Sequence(lambda n: n)
class WatchedFactory(Factory):
class Meta:
model = "notifications.Watched"
strategy = factory.CREATE_STRATEGY
content_type = factory.SubFactory("tests.factories.ContentTypeFactory")
object_id = factory.Sequence(lambda n: n)
user = factory.SubFactory("tests.factories.UserFactory")
project = factory.SubFactory("tests.factories.ProjectFactory")
class ContentTypeFactory(Factory):
class Meta:
model = "contenttypes.ContentType"
strategy = factory.CREATE_STRATEGY
django_get_or_create = ("app_label", "model")
app_label = factory.LazyAttribute(lambda obj: "issues")
model = factory.LazyAttribute(lambda obj: "Issue")
class AttachmentFactory(Factory):
class Meta:
model = "attachments.Attachment"
strategy = factory.CREATE_STRATEGY
owner = factory.SubFactory("tests.factories.UserFactory")
project = factory.SubFactory("tests.factories.ProjectFactory")
content_type = factory.SubFactory("tests.factories.ContentTypeFactory")
object_id = factory.Sequence(lambda n: n)
attached_file = factory.django.FileField(data=b"File contents")
class HistoryEntryFactory(Factory):
class Meta:
model = "history.HistoryEntry"
strategy = factory.CREATE_STRATEGY
type = 1
class ApplicationFactory(Factory):
class Meta:
model = "external_apps.Application"
strategy = factory.CREATE_STRATEGY
key = "testingkey"
class ApplicationTokenFactory(Factory):
class Meta:
model = "external_apps.ApplicationToken"
strategy = factory.CREATE_STRATEGY
application = factory.SubFactory("tests.factories.ApplicationFactory")
user = factory.SubFactory("tests.factories.UserFactory")
def create_issue(**kwargs):
"Create an issue and along with its dependencies."
owner = kwargs.pop("owner", None)
if owner is None:
owner = UserFactory.create()
project = kwargs.pop("project", None)
if project is None:
project = ProjectFactory.create(owner=owner)
defaults = {
"project": project,
"owner": owner,
"status": IssueStatusFactory.create(project=project),
"milestone": MilestoneFactory.create(project=project),
"priority": PriorityFactory.create(project=project),
"severity": SeverityFactory.create(project=project),
"type": IssueTypeFactory.create(project=project),
}
defaults.update(kwargs)
return IssueFactory.create(**defaults)
def create_task(**kwargs):
"Create a task and along with its dependencies."
owner = kwargs.pop("owner", None)
if not owner:
owner = UserFactory.create()
project = kwargs.pop("project", None)
if project is None:
project = ProjectFactory.create(owner=owner)
defaults = {
"project": project,
"owner": owner,
"status": TaskStatusFactory.create(project=project),
"milestone": MilestoneFactory.create(project=project),
"user_story": UserStoryFactory.create(project=project, owner=owner),
}
defaults.update(kwargs)
return TaskFactory.create(**defaults)
def create_membership(**kwargs):
"Create a membership along with its dependencies"
project = kwargs.pop("project", ProjectFactory())
project.points.add(PointsFactory.create(project=project, value=None))
defaults = {
"project": project,
"user": project.owner,
"role": RoleFactory.create(project=project)
}
defaults.update(kwargs)
return MembershipFactory.create(**defaults)
def create_invitation(**kwargs):
"Create an invitation along with its dependencies"
project = kwargs.pop("project", ProjectFactory())
project.points.add(PointsFactory.create(project=project, value=None))
defaults = {
"project": project,
"role": RoleFactory.create(project=project),
"email": "invited-user@email.com",
"token": "tokenvalue",
"invited_by_id": project.owner.id
}
defaults.update(kwargs)
return MembershipFactory.create(**defaults)
def create_userstory(**kwargs):
"Create an user story along with its dependencies"
owner = kwargs.pop("owner", None)
if not owner:
owner = UserFactory.create()
project = kwargs.pop("project", None)
if project is None:
project = ProjectFactory.create(owner=owner)
defaults = {
"project": project,
"owner": owner,
"milestone": MilestoneFactory.create(project=project, owner=owner)
}
defaults.update(kwargs)
return UserStoryFactory(**defaults)
def create_project(**kwargs):
"Create a project along with its dependencies"
defaults = {}
defaults.update(kwargs)
ProjectTemplateFactory.create(slug=settings.DEFAULT_PROJECT_TEMPLATE)
project = ProjectFactory.create(**defaults)
project.default_issue_status = IssueStatusFactory.create(project=project)
project.default_severity = SeverityFactory.create(project=project)
project.default_priority = PriorityFactory.create(project=project)
project.default_issue_type = IssueTypeFactory.create(project=project)
project.default_us_status = UserStoryStatusFactory.create(project=project)
project.default_task_status = TaskStatusFactory.create(project=project)
project.save()
return project
def create_user(**kwargs):
"Create an user along with her dependencies"
ProjectTemplateFactory.create(slug=settings.DEFAULT_PROJECT_TEMPLATE)
RoleFactory.create()
return UserFactory.create(**kwargs)
|
donghwicha/playground-python-angular
|
refs/heads/master
|
virtualenvs/django-env/mybackend/mypage/models.py
|
10644
|
from django.db import models
# Create your models here.
|
bharathelangovan/chipy.org
|
refs/heads/master
|
chipy_org/apps/about/models.py
|
10644
|
from django.db import models
# Create your models here.
|
richard-willowit/sale-workflow
|
refs/heads/8.0
|
sale_product_set/tests/__init__.py
|
28
|
# -*- coding: utf-8 -*-
from . import test_product_set
|
gluke77/rally
|
refs/heads/master
|
tests/unit/task/test_validation.py
|
3
|
# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import ddt
from glanceclient import exc as glance_exc
import mock
from novaclient import exceptions as nova_exc
import six
from rally.common.plugin import plugin
from rally import consts
from rally import exceptions
import rally.osclients
from rally.task import validation
from rally.verification.tempest import tempest
from tests.unit import test
MODULE = "rally.task.validation."
class ValidationUtilsTestCase(test.TestCase):
def test_validator(self):
@plugin.from_func()
def scenario():
pass
scenario._meta_init()
def validator_func(config, clients, deployment, a, b, c, d):
return (config, clients, deployment, a, b, c, d)
validator = validation.validator(validator_func)
self.assertEqual(scenario, validator("a", "b", "c", d=1)(scenario))
self.assertEqual(1, len(scenario._meta_get("validators")))
self.assertEqual(
("conf", "client", "deploy", "a", "b", "c", 1),
scenario._meta_get("validators")[0]("conf", "client", "deploy"))
@ddt.ddt
class ValidatorsTestCase(test.TestCase):
def _unwrap_validator(self, validator, *args, **kwargs):
@plugin.from_func()
def func():
pass
func._meta_init()
validator(*args, **kwargs)(func)
return func._meta_get("validators")[0]
def test_number_not_nullable(self):
validator = self._unwrap_validator(validation.number, param_name="n")
self.assertFalse(validator({}, None, None).is_valid)
def test_number_nullable(self):
validator = self._unwrap_validator(validation.number, param_name="n",
nullable=True)
self.assertTrue(validator({}, None, None).is_valid)
def test_number_min_max_value(self):
validator = self._unwrap_validator(validation.number,
param_name="a", minval=4, maxval=10)
result = validator({"args": {"a": 3.9}}, None, None)
self.assertFalse(result.is_valid, result.msg)
result = validator({"args": {"a": 4.1}}, None, None)
self.assertTrue(result.is_valid, result.msg)
result = validator({"args": {"a": 11}}, None, None)
self.assertFalse(result.is_valid, result.msg)
def test_number_integer_only(self):
validator = self._unwrap_validator(validation.number,
param_name="b", integer_only=True)
result = validator({"args": {"b": 3.9}}, None, None)
self.assertFalse(result.is_valid, result.msg)
result = validator({"args": {"b": 3}}, None, None)
self.assertTrue(result.is_valid, result.msg)
@mock.patch(MODULE + "os.access")
def test__file_access_ok(self, mock_access):
mock_access.return_value = True
result = validation._file_access_ok(
"foobar", os.R_OK, "p", False)
self.assertTrue(result.is_valid, result.msg)
@mock.patch(MODULE + "os.access")
def test__file_access_not_found(self, mock_access):
mock_access.return_value = False
result = validation._file_access_ok(
"foobar", os.R_OK, "p", False)
self.assertFalse(result.is_valid, result.msg)
@mock.patch(MODULE + "_file_access_ok")
def test_file_exists(self, mock__file_access_ok):
mock__file_access_ok.return_value = "foobar"
validator = self._unwrap_validator(validation.file_exists,
param_name="p",
required=False)
result = validator({"args": {"p": "test_file"}}, None, None)
self.assertEqual("foobar", result)
mock__file_access_ok.assert_called_once_with(
"test_file", os.R_OK, "p", False)
@ddt.data({"raises_message": "Command must be a dictionary"},
{"command": "foo",
"raises_message": "Command must be a dictionary"},
{"command": {"interpreter": "foobar", "script_file": "foo",
"script_inline": "bar"},
"raises_message": "Exactly one of "},
{"command": {"script_file": "foobar"},
"raises_message": "Supplied dict specifies no"},
{"command": {"script_inline": "foobar",
"interpreter": "foo",
"local_path": "bar"},
"raises_message": "When uploading an interpreter its path"},
{"command": {"interpreter": "/bin/bash",
"script_path": "foo"},
"raises_message": ("Unexpected command parameters: "
"script_path")},
{"command": {"script_inline": "foobar",
"interpreter": ["ENV=bar", "/bin/foo"],
"local_path": "bar",
"remote_path": "/bin/foo"}},
{"command": {"script_inline": "foobar", "interpreter": "foo"}})
@ddt.unpack
def test_check_command_dict(self, command=None, raises_message=None):
if raises_message:
e = self.assertRaises(
ValueError, validation.check_command_dict, command)
self.assertIn(raises_message, str(e))
else:
self.assertIsNone(validation.check_command_dict(command))
@mock.patch("rally.task.validation._file_access_ok")
def test_valid_command(self, mock__file_access_ok):
validator = self._unwrap_validator(validation.valid_command,
param_name="p")
mock__file_access_ok.return_value = validation.ValidationResult(True)
command = {"script_file": "foobar", "interpreter": "foo"}
result = validator({"args": {"p": command}}, None, None)
self.assertTrue(result.is_valid, result.msg)
mock__file_access_ok.assert_called_once_with(
filename="foobar", mode=os.R_OK, param_name="p.script_file",
required=True)
def test_valid_command_not_required(self):
validator = self._unwrap_validator(validation.valid_command,
param_name="p", required=False)
result = validator({"args": {"p": None}}, None, None)
self.assertTrue(result.is_valid)
def test_valid_command_required(self):
validator = self._unwrap_validator(validation.valid_command,
param_name="p")
result = validator({"args": {"p": None}}, None, None)
self.assertFalse(result.is_valid, result.msg)
@mock.patch("rally.task.validation._file_access_ok")
def test_valid_command_unreadable_script_file(self, mock__file_access_ok):
mock__file_access_ok.return_value = validation.ValidationResult(False)
validator = self._unwrap_validator(validation.valid_command,
param_name="p")
command = {"script_file": "foobar", "interpreter": "foo"}
result = validator({"args": {"p": command}}, None, None)
self.assertFalse(result.is_valid, result.msg)
@mock.patch("rally.task.validation.check_command_dict")
def test_valid_command_fail_check_command_dict(self,
mock_check_command_dict):
validator = self._unwrap_validator(validation.valid_command,
param_name="p")
mock_check_command_dict.side_effect = ValueError("foobar")
command = {"foo": "bar"}
result = validator({"args": {"p": command}}, None, None)
self.assertFalse(result.is_valid, result.msg)
self.assertEqual("foobar", result.msg)
def test_valid_command_script_inline(self):
validator = self._unwrap_validator(validation.valid_command,
param_name="p")
command = {"script_inline": "bar", "interpreter": "/bin/sh"}
result = validator({"args": {"p": command}}, None, None)
self.assertTrue(result.is_valid, result.msg)
@mock.patch("rally.task.validation._file_access_ok")
def test_valid_command_local_path(self, mock__file_access_ok):
mock__file_access_ok.return_value = validation.ValidationResult(False)
validator = self._unwrap_validator(validation.valid_command,
param_name="p")
command = {"remote_path": "bar", "local_path": "foobar"}
result = validator({"args": {"p": command}}, None, None)
self.assertFalse(result.is_valid, result.msg)
mock__file_access_ok.assert_called_once_with(
filename="foobar", mode=os.R_OK, param_name="p.local_path",
required=True)
def test__get_validated_image_no_value_in_config(self):
result = validation._get_validated_image({}, None, "non_existing")
self.assertFalse(result[0].is_valid, result[0].msg)
def test__get_validated_image_from_context(self):
clients = mock.MagicMock()
image = {
"size": 0,
"min_ram": 0,
"min_disk": 0
}
result = validation._get_validated_image({"args": {
"image": {"name": "foo"}}, "context": {
"images": {
"image_name": "foo"}
}}, clients, "image")
self.assertTrue(result[0].is_valid, result[0].msg)
self.assertEqual(result[1], image)
result = validation._get_validated_image({"args": {
"image": {"regex": r"^foo$"}}, "context": {
"images": {
"image_name": "foo"}
}}, clients, "image")
self.assertTrue(result[0].is_valid, result[0].msg)
self.assertEqual(result[1], image)
@mock.patch(MODULE + "openstack_types.GlanceImage.transform",
return_value="image_id")
def test__get_validated_image(self, mock_glance_image_transform):
clients = mock.MagicMock()
clients.glance().images.get().to_dict.return_value = {
"image": "image_id"}
result = validation._get_validated_image({"args": {"a": "test"},
"context": {
"image_name": "foo"}},
clients, "a")
self.assertTrue(result[0].is_valid, result[0].msg)
self.assertEqual({"image": "image_id", "min_disk": 0,
"min_ram": 0, "size": 0},
result[1])
mock_glance_image_transform.assert_called_once_with(
clients=clients, resource_config="test")
clients.glance().images.get.assert_called_with(image="image_id")
@mock.patch(MODULE + "openstack_types.GlanceImage.transform",
side_effect=exceptions.InvalidScenarioArgument)
def test__get_validated_image_transform_error(
self, mock_glance_image_transform):
result = validation._get_validated_image({"args": {"a": "test"}},
None, "a")
self.assertFalse(result[0].is_valid, result[0].msg)
@mock.patch(MODULE + "openstack_types.GlanceImage.transform")
def test__get_validated_image_not_found(
self, mock_glance_image_transform):
clients = mock.MagicMock()
clients.glance().images.get().to_dict.side_effect = (
glance_exc.HTTPNotFound(""))
result = validation._get_validated_image({"args": {"a": "test"}},
clients, "a")
self.assertFalse(result[0].is_valid, result[0].msg)
def test__get_validated_flavor_no_value_in_config(self):
result = validation._get_validated_flavor({}, None, "non_existing")
self.assertFalse(result[0].is_valid, result[0].msg)
@mock.patch(MODULE + "openstack_types.Flavor.transform",
return_value="flavor_id")
def test__get_validated_flavor(
self, mock_flavor_transform):
clients = mock.MagicMock()
clients.nova().flavors.get.return_value = "flavor"
result = validation._get_validated_flavor({"args": {"a": "test"}},
clients, "a")
self.assertTrue(result[0].is_valid, result[0].msg)
self.assertEqual(result[1], "flavor")
mock_flavor_transform.assert_called_once_with(
clients=clients, resource_config="test")
clients.nova().flavors.get.assert_called_once_with(flavor="flavor_id")
@mock.patch(MODULE + "openstack_types.Flavor.transform",
side_effect=exceptions.InvalidScenarioArgument)
def test__get_validated_flavor_transform_error(
self, mock_flavor_transform):
result = validation._get_validated_flavor({"args": {"a": "test"}},
None, "a")
self.assertFalse(result[0].is_valid, result[0].msg)
@mock.patch(MODULE + "openstack_types.Flavor.transform")
def test__get_validated_flavor_not_found(
self, mock_flavor_transform):
clients = mock.MagicMock()
clients.nova().flavors.get.side_effect = nova_exc.NotFound("")
result = validation._get_validated_flavor({"args": {"a": "test"}},
clients, "a")
self.assertFalse(result[0].is_valid, result[0].msg)
@mock.patch(MODULE + "openstack_types.Flavor.transform")
def test__get_validated_flavor_from_context(
self, mock_flavor_transform):
clients = mock.MagicMock()
clients.nova().flavors.get.side_effect = nova_exc.NotFound("")
config = {
"args": {"flavor": {"name": "test"}},
"context": {
"flavors": [{
"name": "test",
"ram": 32,
}]
}
}
result = validation._get_validated_flavor(config, clients, "flavor")
self.assertTrue(result[0].is_valid, result[0].msg)
@mock.patch(MODULE + "openstack_types.Flavor.transform")
def test__get_validated_flavor_from_context_failed(
self, mock_flavor_transform):
clients = mock.MagicMock()
clients.nova().flavors.get.side_effect = nova_exc.NotFound("")
config = {
"args": {"flavor": {"name": "test"}},
"context": {
"flavors": [{
"name": "othername",
"ram": 32,
}]
}
}
result = validation._get_validated_flavor(config, clients, "flavor")
self.assertFalse(result[0].is_valid, result[0].msg)
config = {
"args": {"flavor": {"name": "test"}},
}
result = validation._get_validated_flavor(config, clients, "flavor")
self.assertFalse(result[0].is_valid, result[0].msg)
@ddt.data("nfS", "Cifs", "GLUSTERFS", "hdfs")
def test_validate_share_proto_valid(self, share_proto):
validator = self._unwrap_validator(validation.validate_share_proto)
result = validator(
{"args": {"share_proto": share_proto}}, "clients", "deployment")
self.assertTrue(result.is_valid, result.msg)
@ddt.data(
*([{"args": {"share_proto": v}} for v in (
None, "", "nfsfoo", "foonfs", "nfscifs", )] +
[{}, {"args": {}}])
)
def test_validate_share_proto_invalid(self, config):
validator = self._unwrap_validator(validation.validate_share_proto)
result = validator(config, "clients", "deployment")
self.assertFalse(result.is_valid, result.msg)
def test_image_exists(self):
validator = self._unwrap_validator(validation.image_exists, "param")
result = validator({}, "clients", "deployment")
self.assertFalse(result.is_valid, result.msg)
def test_image_exists_nullable(self):
validator = self._unwrap_validator(validation.image_exists,
"param", nullable=True)
result = validator({}, "clients", "deployment")
self.assertTrue(result.is_valid, result.msg)
def test_flavor_exists(self):
validator = self._unwrap_validator(validation.flavor_exists, "param")
result = validator({}, "clients", "deployment")
self.assertFalse(result.is_valid, result.msg)
def test_image_valid_on_flavor_flavor_or_image_not_specified(self):
validator = self._unwrap_validator(validation.image_valid_on_flavor,
"flavor", "image")
result = validator({}, None, None)
self.assertFalse(result.is_valid, result.msg)
result = validator({"args": {"flavor": {"id": 11}}}, mock.MagicMock(),
None)
self.assertFalse(result.is_valid, result.msg)
@mock.patch(MODULE + "_get_validated_image")
@mock.patch(MODULE + "_get_validated_flavor")
def test_image_valid_on_flavor(self, mock__get_validated_flavor,
mock__get_validated_image):
image = {
"id": "fake_id",
"min_ram": None,
"size": 2,
"min_disk": 0
}
flavor = mock.MagicMock()
success = validation.ValidationResult(True)
mock__get_validated_flavor.return_value = (success, flavor)
mock__get_validated_image.return_value = (success, image)
# test flavor.disk None
validator = self._unwrap_validator(validation.image_valid_on_flavor,
"flavor", "image")
flavor.disk = None
flavor.ram = 2
image["min_ram"] = 4
result = validator(None, None, None)
self.assertFalse(result.is_valid, result.msg)
image["min_ram"] = 1
result = validator(None, None, None)
self.assertTrue(result.is_valid, result.msg)
# test validate_disk false
validator = self._unwrap_validator(validation.image_valid_on_flavor,
"flavor", "image", False)
flavor.disk = 1
flavor.ram = 2
image["min_ram"] = 4
result = validator(None, None, None)
self.assertFalse(result.is_valid, result.msg)
image["min_ram"] = 1
result = validator(None, None, None)
self.assertTrue(result.is_valid, result.msg)
# test validate_disk true and flavor.disk not None
validator = self._unwrap_validator(validation.image_valid_on_flavor,
"flavor", "image")
image["size"] = 2
image["min_disk"] = 0
flavor.disk = 5.0 / (1024 ** 3)
result = validator(None, None, None)
self.assertTrue(result.is_valid, result.msg)
image["min_disk"] = flavor.disk * 2
result = validator(None, None, None)
self.assertFalse(result.is_valid, result.msg)
image["min_disk"] = flavor.disk / 4
image["size"] = 1000
result = validator(None, None, None)
self.assertFalse(result.is_valid, result.msg)
@mock.patch(MODULE + "openstack_types.Flavor.transform")
@mock.patch(MODULE + "_get_validated_image")
def test_image_valid_on_flavor_context(
self, mock__get_validated_image,
mock_flavor_transform):
clients = mock.MagicMock()
clients.nova().flavors.get.side_effect = nova_exc.NotFound("")
image = {"min_ram": 24, "id": "fake_id"}
success = validation.ValidationResult(True)
mock__get_validated_image.return_value = (success, image)
validator = self._unwrap_validator(validation.image_valid_on_flavor,
"flavor", "image")
config = {
"args": {"flavor": {"name": "test"}},
"context": {
"flavors": [{
"name": "test",
"ram": 32,
}]
}
}
# test ram
image["min_ram"] = 64
result = validator(config, clients, None)
self.assertFalse(result.is_valid, result.msg)
def test_network_exists(self):
validator = self._unwrap_validator(validation.network_exists, "net")
net1 = mock.MagicMock()
net1.label = "private"
net2 = mock.MagicMock()
net2.label = "custom"
clients = mock.MagicMock()
clients.nova().networks.list.return_value = [net1, net2]
result = validator({}, clients, None)
self.assertTrue(result.is_valid, result.msg)
result = validator({"args": {"net": "custom"}}, clients, None)
self.assertTrue(result.is_valid, result.msg)
result = validator({"args": {"net": "custom2"}}, clients, None)
self.assertFalse(result.is_valid, result.msg)
def test_external_network_exists(self):
validator = self._unwrap_validator(
validation.external_network_exists, "name")
result = validator({"args": {}}, None, None)
self.assertTrue(result.is_valid, result.msg)
clients = mock.MagicMock()
net1 = mock.MagicMock()
net2 = mock.MagicMock()
clients.nova().floating_ip_pools.list.return_value = [net1, net2]
net1.name = "public"
net2.name = "custom"
result = validator({}, clients, None)
self.assertTrue(result.is_valid, result.msg)
result = validator({"args": {"name": "custom"}}, clients, None)
self.assertTrue(result.is_valid, result.msg)
result = validator({"args": {"name": "non_exist"}}, clients, None)
self.assertFalse(result.is_valid, result.msg)
net1.name = {"name": "public"}
net2.name = {"name": "custom"}
result = validator({"args": {"name": "custom"}}, clients, None)
self.assertTrue(result.is_valid, result.msg)
def test_tempest_tests_exists_no_arg(self):
validator = self._unwrap_validator(validation.tempest_tests_exists)
result = validator({}, None, None)
self.assertFalse(result.is_valid, result.msg)
@mock.patch(MODULE + "tempest.Tempest")
def test_tempest_tests_exists(self, mock_tempest):
mock_tempest().is_installed.return_value = False
mock_tempest().is_configured.return_value = False
mock_tempest().discover_tests.return_value = set([
"tempest.api.a", "tempest.api.b", "tempest.api.c"])
deployment = {"uuid": "someuuid"}
validator = self._unwrap_validator(validation.tempest_tests_exists)
result = validator({"args": {"test_name": "a"}}, None, deployment)
self.assertTrue(result.is_valid, result.msg)
mock_tempest().is_installed.assert_called_once_with()
mock_tempest().is_configured.assert_called_once_with()
mock_tempest().discover_tests.assert_called_once_with()
result = validator({"args": {"test_name": "d"}}, None, deployment)
self.assertFalse(result.is_valid, result.msg)
result = validator({"args": {"test_name": "tempest.api.a"}}, None,
deployment)
self.assertTrue(result.is_valid, result.msg)
result = validator({"args": {"test_name": "tempest.api.d"}}, None,
deployment)
self.assertFalse(result.is_valid, result.msg)
result = validator({"args": {"test_names": ["tempest.api.a", "b"]}},
None, deployment)
self.assertTrue(result.is_valid, result.msg)
result = validator({"args": {"test_names": ["tempest.api.j", "e"]}},
None, deployment)
self.assertFalse(result.is_valid, result.msg)
@mock.patch(MODULE + "tempest.Tempest")
def test_tempest_tests_exists_tempest_installation_failed(self,
mock_tempest):
mock_tempest().is_installed.return_value = False
mock_tempest().install.side_effect = tempest.TempestSetupFailure
deployment = {"uuid": "someuuid"}
validator = self._unwrap_validator(validation.tempest_tests_exists)
result = validator({"args": {"test_name": "a"}}, None, deployment)
self.assertFalse(result.is_valid, result.msg)
mock_tempest().is_installed.assert_called_once_with()
def test_tempest_set_exists_missing_args(self):
validator = self._unwrap_validator(validation.tempest_set_exists)
result = validator({}, None, None)
self.assertFalse(result.is_valid, result.msg)
def test_tempest_set_exists(self):
validator = self._unwrap_validator(validation.tempest_set_exists)
sets = list(list(consts.TempestTestsSets) +
list(consts.TempestTestsAPI))
result = validator(
{"args": {"set_name": sets[0]}}, None, None)
self.assertTrue(result.is_valid, result.msg)
result = validator(
{"args": {"set_name": "lol"}}, None, None)
self.assertFalse(result.is_valid, result.msg)
def test_required_parameters(self):
validator = self._unwrap_validator(validation.required_parameters,
"a", "b")
result = validator({"args": {"a": 1, "b": 2, "c": 3}}, None, None)
self.assertTrue(result.is_valid, result.msg)
result = validator({"args": {"a": 1, "c": 3}}, None, None)
self.assertFalse(result.is_valid, result.msg)
@mock.patch("rally.common.objects.Credential")
def test_required_service(self, mock_credential):
validator = self._unwrap_validator(validation.required_services,
consts.Service.KEYSTONE,
consts.Service.NOVA,
consts.Service.NOVA_NET)
clients = mock.MagicMock()
clients.services().values.return_value = [consts.Service.KEYSTONE,
consts.Service.NOVA,
consts.Service.NOVA_NET]
fake_service = mock.Mock(binary="nova-network", status="enabled")
with mock.patch("rally.osclients.Clients") as clients_cls:
nova_client = clients_cls.return_value.nova.return_value
nova_client.services.list.return_value = [fake_service]
result = validator({}, clients, {"admin": {"info": "admin"}})
clients_cls.assert_called_once_with(mock_credential.return_value)
mock_credential.assert_called_once_with(info="admin")
self.assertTrue(result.is_valid, result.msg)
validator = self._unwrap_validator(validation.required_services,
consts.Service.KEYSTONE,
consts.Service.NOVA)
clients.services().values.return_value = [consts.Service.KEYSTONE]
with mock.patch("rally.osclients.Clients") as clients_cls:
result = validator({}, clients, None)
self.assertFalse(clients_cls.called)
self.assertFalse(result.is_valid, result.msg)
def test_required_service_wrong_service(self):
validator = self._unwrap_validator(validation.required_services,
consts.Service.KEYSTONE,
consts.Service.NOVA, "lol")
clients = mock.MagicMock()
result = validator({}, clients, None)
self.assertFalse(result.is_valid, result.msg)
def test_required_contexts(self):
validator = self._unwrap_validator(validation.required_contexts,
"c1", "c2", "c3")
result = validator({"context": {"a": 1}}, None, None)
self.assertFalse(result.is_valid, result.msg)
result = validator({"context": {"c1": 1, "c2": 2, "c3": 3}},
None, None)
self.assertTrue(result.is_valid, result.msg)
result = validator({"context": {"c1": 1, "c2": 2, "c3": 3, "a": 1}},
None, None)
self.assertTrue(result.is_valid, result.msg)
def test_required_openstack_with_admin(self):
validator = self._unwrap_validator(validation.required_openstack,
admin=True)
# admin presented in deployment
fake_deployment = {"admin": "admin_credential", "users": []}
self.assertTrue(validator(None, None, fake_deployment).is_valid)
# admin not presented in deployment
fake_deployment = {"admin": None, "users": ["u1", "h2"]}
self.assertFalse(validator(None, None, fake_deployment).is_valid)
def test_required_openstack_with_users(self):
validator = self._unwrap_validator(validation.required_openstack,
users=True)
# users presented in deployment
fake_deployment = {"admin": None, "users": ["u_credential"]}
self.assertTrue(validator({}, None, fake_deployment).is_valid)
# admin and users presented in deployment
fake_deployment = {"admin": "a", "users": ["u1", "h2"]}
self.assertTrue(validator({}, None, fake_deployment).is_valid)
# admin and user context
fake_deployment = {"admin": "a", "users": []}
context = {"context": {"users": True}}
self.assertTrue(validator(context, None, fake_deployment).is_valid)
# just admin presented
fake_deployment = {"admin": "a", "users": []}
self.assertFalse(validator({}, None, fake_deployment).is_valid)
def test_required_openstack_with_admin_and_users(self):
validator = self._unwrap_validator(validation.required_openstack,
admin=True, users=True)
fake_deployment = {"admin": "a", "users": []}
self.assertFalse(validator({}, None, fake_deployment).is_valid)
fake_deployment = {"admin": "a", "users": ["u"]}
self.assertTrue(validator({}, None, fake_deployment).is_valid)
# admin and user context
fake_deployment = {"admin": "a", "users": []}
context = {"context": {"users": True}}
self.assertTrue(validator(context, None, fake_deployment).is_valid)
def test_required_openstack_invalid(self):
validator = self._unwrap_validator(validation.required_openstack)
self.assertFalse(validator(None, None, None).is_valid)
def test_volume_type_exists(self):
validator = self._unwrap_validator(validation.volume_type_exists,
param_name="volume_type")
clients = mock.MagicMock()
clients.cinder().volume_types.list.return_value = []
context = {"args": {"volume_type": False}}
result = validator(context, clients, mock.MagicMock())
self.assertTrue(result.is_valid, result.msg)
def test_volume_type_exists_check_types(self):
validator = self._unwrap_validator(validation.volume_type_exists,
param_name="volume_type")
clients = mock.MagicMock()
clients.cinder().volume_types.list.return_value = ["type"]
context = {"args": {"volume_type": True}}
result = validator(context, clients, mock.MagicMock())
self.assertTrue(result.is_valid, result.msg)
def test_volume_type_exists_check_types_no_types_exist(self):
validator = self._unwrap_validator(validation.volume_type_exists,
param_name="volume_type")
clients = mock.MagicMock()
clients.cinder().volume_types.list.return_value = []
context = {"args": {"volume_type": True}}
result = validator(context, clients, mock.MagicMock())
self.assertFalse(result.is_valid, result.msg)
@mock.patch(MODULE + "osclients")
def test_required_clients(self, mock_osclients):
validator = self._unwrap_validator(validation.required_clients,
"keystone", "nova")
clients = mock.MagicMock()
clients.keystone.return_value = "keystone"
clients.nova.return_value = "nova"
result = validator({}, clients, {})
self.assertTrue(result.is_valid, result.msg)
self.assertFalse(mock_osclients.Clients.called)
clients.nova.side_effect = ImportError
result = validator({}, clients, {})
self.assertFalse(result.is_valid, result.msg)
@mock.patch(MODULE + "objects")
@mock.patch(MODULE + "osclients")
def test_required_clients_with_admin(self, mock_osclients, mock_objects):
validator = self._unwrap_validator(validation.required_clients,
"keystone", "nova", admin=True)
clients = mock.Mock()
clients.keystone.return_value = "keystone"
clients.nova.return_value = "nova"
mock_osclients.Clients.return_value = clients
mock_objects.Credential.return_value = "foo_credential"
result = validator({}, clients, {"admin": {"foo": "bar"}})
self.assertTrue(result.is_valid, result.msg)
mock_objects.Credential.assert_called_once_with(foo="bar")
mock_osclients.Clients.assert_called_once_with("foo_credential")
clients.nova.side_effect = ImportError
result = validator({}, clients, {"admin": {"foo": "bar"}})
self.assertFalse(result.is_valid, result.msg)
@ddt.data(
{"ext_validate": "existing_extension",
"validation_result": True},
{"ext_validate": "absent_extension",
"validation_result": False},
)
@ddt.unpack
def test_required_neutron_extensions(self, ext_validate,
validation_result):
validator = self._unwrap_validator(
validation.required_neutron_extensions,
ext_validate)
clients = mock.Mock()
clients.neutron.return_value.list_extensions.return_value = (
{"extensions": [{"alias": "existing_extension"}]})
result = validator({}, clients, {})
self.assertEqual(result.is_valid, validation_result)
def test_required_cinder_services(self):
validator = self._unwrap_validator(
validation.required_cinder_services,
service_name=six.text_type("cinder-service"))
with mock.patch.object(rally.osclients.Cinder, "create_client") as c:
fake_service = mock.Mock(binary="cinder-service", state="up")
cinder_client = mock.Mock()
services = mock.Mock()
services.list.return_value = [fake_service]
cinder_client.services = services
c.return_value = cinder_client
deployment = {"admin": {"auth_url": "fake_credential",
"username": "username",
"password": "password"}}
result = validator({}, None, deployment)
self.assertTrue(result.is_valid, result.msg)
fake_service.state = "down"
result = validator({}, None, deployment)
self.assertFalse(result.is_valid, result.msg)
def test_restricted_parameters(self):
validator = self._unwrap_validator(
validation.restricted_parameters, ["param_name"])
result = validator({"args": {}}, None, None)
self.assertTrue(result.is_valid, result.msg)
def test_restricted_parameters_negative(self):
validator = self._unwrap_validator(
validation.restricted_parameters, ["param_name"])
result = validator({"args": {"param_name": "value"}}, None, None)
self.assertFalse(result.is_valid, result.msg)
def test_restricted_parameters_in_dict(self):
validator = self._unwrap_validator(
validation.restricted_parameters, ["param_name"], "subdict")
result = validator({"args": {"subdict": {}}}, None, None)
self.assertTrue(result.is_valid, result.msg)
def test_restricted_parameters_in_dict_negative(self):
validator = self._unwrap_validator(
validation.restricted_parameters, ["param_name"], "subdict")
result = validator({"args": {"subdict":
{"param_name": "value"}}}, None, None)
self.assertFalse(result.is_valid, result.msg)
def test_restricted_parameters_string_param_names(self):
validator = self._unwrap_validator(
validation.restricted_parameters, "param_name")
result = validator({"args": {}}, None, None)
self.assertTrue(result.is_valid, result.msg)
@ddt.data(
{"exception_msg": "Heat template validation failed on fake_path1. "
"Original error message: fake_msg."},
{"exception_msg": None}
)
@ddt.unpack
@mock.patch(MODULE + "os.path.exists", return_value=True)
@mock.patch(MODULE + "open", side_effect=mock.mock_open(), create=True)
def test_validate_heat_template(self, mock_open, mock_exists,
exception_msg):
validator = self._unwrap_validator(
validation.validate_heat_template, "template_path1",
"template_path2")
clients = mock.MagicMock()
mock_open().__enter__().read.side_effect = ["fake_template1",
"fake_template2"]
heat_validator = mock.MagicMock()
if exception_msg:
heat_validator.side_effect = Exception("fake_msg")
clients.heat().stacks.validate = heat_validator
context = {"args": {"template_path1": "fake_path1",
"template_path2": "fake_path2"}}
result = validator(context, clients, mock.MagicMock())
if not exception_msg:
heat_validator.assert_has_calls([
mock.call(template="fake_template1"),
mock.call(template="fake_template2")
])
mock_open.assert_has_calls([
mock.call("fake_path1", "r"),
mock.call("fake_path2", "r")
], any_order=True)
self.assertTrue(result.is_valid, result.msg)
else:
heat_validator.assert_called_once_with(template="fake_template1")
self.assertEqual("Heat template validation failed on fake_path1."
" Original error message: fake_msg.", result.msg)
self.assertFalse(result.is_valid)
def _get_keystone_v2_mock_client(self):
keystone = mock.Mock()
del keystone.projects
keystone.tenants = mock.Mock()
return keystone
def _get_keystone_v3_mock_client(self):
keystone = mock.Mock()
del keystone.tenants
keystone.projects = mock.Mock()
return keystone
def test_required_api_versions_keystonev2(self):
validator = self._unwrap_validator(
validation.required_api_versions, component="keystone",
versions=[2.0])
clients = mock.MagicMock()
clients.keystone.return_value = self._get_keystone_v3_mock_client()
self.assertFalse(validator({}, clients, None).is_valid)
clients.keystone.return_value = self._get_keystone_v2_mock_client()
self.assertTrue(validator({}, clients, None).is_valid)
def test_required_api_versions_keystonev3(self):
validator = self._unwrap_validator(
validation.required_api_versions, component="keystone",
versions=[3])
clients = mock.MagicMock()
clients.keystone.return_value = self._get_keystone_v2_mock_client()
self.assertFalse(validator({}, clients, None).is_valid)
clients.keystone.return_value = self._get_keystone_v3_mock_client()
self.assertTrue(validator({}, clients, None).is_valid)
def test_required_api_versions_keystone_all_versions(self):
validator = self._unwrap_validator(
validation.required_api_versions, component="keystone",
versions=[2.0, 3])
clients = mock.MagicMock()
clients.keystone.return_value = self._get_keystone_v3_mock_client()
self.assertTrue(validator({}, clients, None).is_valid)
clients.keystone.return_value = self._get_keystone_v2_mock_client()
self.assertTrue(validator({}, clients, None).is_valid)
@ddt.data({"nova_version": 2, "required_versions": [2], "valid": True},
{"nova_version": 3, "required_versions": [2], "valid": False},
{"nova_version": None, "required_versions": [2], "valid": False},
{"nova_version": 2, "required_versions": [2, 3], "valid": True},
{"nova_version": 4, "required_versions": [2, 3], "valid": False})
@ddt.unpack
def test_required_api_versions_choose_version(self, nova_version=None,
required_versions=(2,),
valid=False):
validator = self._unwrap_validator(
validation.required_api_versions, component="nova",
versions=required_versions)
clients = mock.MagicMock()
clients.nova.choose_version.return_value = nova_version
self.assertEqual(validator({}, clients, None).is_valid,
valid)
@ddt.data({"required_version": 2, "valid": True},
{"required_version": 3, "valid": False})
@ddt.unpack
def test_required_api_versions_context(self, required_version=None,
valid=False):
validator = self._unwrap_validator(
validation.required_api_versions, component="nova",
versions=[required_version])
clients = mock.MagicMock()
config = {"context": {"api_versions": {"nova": {"version": 2}}}}
self.assertEqual(validator(config, clients, None).is_valid,
valid)
|
tumb1er/django-celery-rpc
|
refs/heads/master
|
celery_rpc/app.py
|
3
|
from __future__ import absolute_import
import os
from django.conf import settings
from .utils import create_celery_app
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'settings')
rpc = create_celery_app()
rpc.autodiscover_tasks(['celery_rpc'])
rpc.autodiscover_tasks(lambda: settings.INSTALLED_APPS,
related_name="celery_rpc")
|
Rogentos/rogentos-anaconda
|
refs/heads/master
|
storage/devices.py
|
2
|
# devices.py
# Device classes for anaconda's storage configuration module.
#
# Copyright (C) 2009 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): Dave Lehman <dlehman@redhat.com>
#
"""
Device classes for use by anaconda.
This is the hierarchy of device objects that anaconda will use for
managing storage devices in the system. These classes will
individually make use of external support modules as needed to
perform operations specific to the type of device they represent.
TODO:
- see how to do network devices (NetworkManager may help)
- perhaps just a wrapper here
- document return values of all methods/functions
- find out what other kinds of wild and crazy devices we need to
represent here (iseries? xen? more mainframe? mac? ps?)
- PReP
- this is a prime candidate for a PseudoDevice
- DASD
- ZFCP
- XEN
What specifications do we allow? new existing
partitions
usage + +
filesystem, partition type are implicit
mountpoint + +
size
exact + -
range + -
resize - +
format - +
encryption + +
disk
exact + -
set + -
how will we specify this?
partition w/ multiple parents cannot otherwise occur
primary + -
mdraid sets
filesystem (*) + +
mountpoint + +
size?
format - +
encryption + +
level + ?
device minor + ?
member devices + ?
spares + ?
name?
bitmap? (boolean) + -
volume groups
name + -
member pvs + +
pesize + ?
logical volumes
filesystem + +
mountpoint + +
size
exact + ?
format - +
encryption + +
name + ?
vgname + ?
"""
import os
import math
import copy
import time
# device backend modules
from devicelibs import mdraid
from devicelibs import lvm
from devicelibs import dm
from devicelibs import crypto
import parted
import _ped
import block
from errors import *
from iutil import notify_kernel, numeric_type
from .storage_log import log_method_call
from udev import *
from formats import get_device_format_class, getFormat, DeviceFormat
import gettext
_ = lambda x: gettext.ldgettext("anaconda", x)
import logging
log = logging.getLogger("storage")
def get_device_majors():
majors = {}
for line in open("/proc/devices").readlines():
try:
(major, device) = line.split()
except ValueError:
continue
try:
majors[int(major)] = device
except ValueError:
continue
return majors
device_majors = get_device_majors()
def devicePathToName(devicePath):
if devicePath.startswith("/dev/"):
name = devicePath[5:]
else:
name = devicePath
if name.startswith("mapper/"):
name = name[7:]
return name
def deviceNameToDiskByPath(deviceName=None):
bypath = '/dev/disk/by-path'
if not deviceName:
return ""
if not os.path.isdir(bypath):
return ""
deviceName = os.path.basename(deviceName)
for path in os.listdir(bypath):
entry = bypath + '/' + path
if os.path.islink(entry):
target = os.path.basename(os.readlink(entry))
else:
target = os.path.basename(entry)
if target == deviceName:
return entry
return ""
class Device(object):
""" A generic device.
Device instances know which devices they depend upon (parents
attribute). They do not know which devices depend upon them, but
they do know whether or not they have any dependent devices
(isleaf attribute).
A Device's setup method should set up all parent devices as well
as the device itself. It should not run the resident format's
setup method.
Which Device types rely on their parents' formats being active?
DMCryptDevice
A Device's teardown method should accept the keyword argument
recursive, which takes a boolean value and indicates whether or
not to recursively close parent devices.
A Device's create method should create all parent devices as well
as the device itself. It should also run the Device's setup method
after creating the device. The create method should not create a
device's resident format.
Which device type rely on their parents' formats to be created
before they can be created/assembled?
VolumeGroup
DMCryptDevice
A Device's destroy method should destroy any resident format
before destroying the device itself.
"""
# This is a counter for generating unique ids for Devices.
_id = 0
_type = "generic device"
_packages = []
def __init__(self, name, parents=None):
""" Create a Device instance.
Arguments:
name -- the device name (generally a device node's basename)
Keyword Arguments:
parents -- a list of required Device instances
"""
self._name = name
if parents is None:
parents = []
elif not isinstance(parents, list):
raise ValueError("parents must be a list of Device instances")
self.parents = parents
self.kids = 0
# Set this instance's id and increment the counter.
self.id = Device._id
Device._id += 1
for parent in self.parents:
parent.addChild()
def __deepcopy__(self, memo):
""" Create a deep copy of a Device instance.
We can't do copy.deepcopy on parted objects, which is okay.
For these parted objects, we just do a shallow copy.
"""
new = self.__class__.__new__(self.__class__)
memo[id(self)] = new
dont_copy_attrs = ('_raidSet',)
shallow_copy_attrs = ('_partedDevice', '_partedPartition')
for (attr, value) in self.__dict__.items():
if attr in dont_copy_attrs:
setattr(new, attr, value)
elif attr in shallow_copy_attrs:
setattr(new, attr, copy.copy(value))
else:
setattr(new, attr, copy.deepcopy(value, memo))
return new
def __str__(self):
s = ("%(type)s instance (%(id)s) --\n"
" name = %(name)s status = %(status)s"
" parents = %(parents)s\n"
" kids = %(kids)s\n"
" id = %(dev_id)s\n" %
{"type": self.__class__.__name__, "id": "%#x" % id(self),
"name": self.name, "parents": self.parents, "kids": self.kids,
"status": self.status, "dev_id": self.id})
return s
@property
def dict(self):
d = {"type": self.type, "name": self.name,
"parents": [p.name for p in self.parents]}
return d
def writeKS(self, f, preexisting=False, noformat=False, s=None):
return
def removeChild(self):
log_method_call(self, name=self.name, kids=self.kids)
self.kids -= 1
def addChild(self):
log_method_call(self, name=self.name, kids=self.kids)
self.kids += 1
def setup(self, intf=None):
""" Open, or set up, a device. """
raise NotImplementedError("setup method not defined for Device")
def teardown(self, recursive=None):
""" Close, or tear down, a device. """
raise NotImplementedError("teardown method not defined for Device")
def create(self, intf=None):
""" Create the device. """
raise NotImplementedError("create method not defined for Device")
def destroy(self):
""" Destroy the device. """
raise NotImplementedError("destroy method not defined for Device")
def setupParents(self, orig=False):
""" Run setup method of all parent devices. """
log_method_call(self, name=self.name, orig=orig, kids=self.kids)
for parent in self.parents:
parent.setup(orig=orig)
def teardownParents(self, recursive=None):
""" Run teardown method of all parent devices. """
for parent in self.parents:
parent.teardown(recursive=recursive)
def createParents(self):
""" Run create method of all parent devices. """
log.info("NOTE: recursive device creation disabled")
for parent in self.parents:
if not parent.exists:
raise DeviceError("parent device does not exist", self.name)
#parent.create()
def dependsOn(self, dep):
""" Return True if this device depends on dep. """
# XXX does a device depend on itself?
if dep in self.parents:
return True
for parent in self.parents:
if parent.dependsOn(dep):
return True
return False
def dracutSetupString(self):
return ""
@property
def status(self):
""" This device's status.
For now, this should return a boolean:
True the device is open and ready for use
False the device is not open
"""
return False
@property
def name(self):
""" This device's name. """
return self._name
@property
def isleaf(self):
""" True if this device has no children. """
return self.kids == 0
@property
def typeDescription(self):
""" String describing the device type. """
return self._type
@property
def type(self):
""" Device type. """
return self._type
@property
def packages(self):
""" List of packages required to manage devices of this type.
This list includes the packages required by its parent devices.
"""
packages = self._packages
for parent in self.parents:
for package in parent.packages:
if package not in packages:
packages.append(package)
return packages
@property
def mediaPresent(self):
return True
class NetworkStorageDevice(object):
""" Virtual base class for network backed storage devices """
def __init__(self, host_address=None, nic=None):
""" Create a NetworkStorage Device instance. Note this class is only
to be used as a baseclass and then only with multiple inheritance.
The only correct use is:
class MyStorageDevice(StorageDevice, NetworkStorageDevice):
The sole purpose of this class is to:
1) Be able to check if a StorageDevice is network backed
(using isinstance).
2) To be able to get the host address of the host (server) backing
the storage *or* the NIC through which the storage is connected
Arguments:
host_address -- host address of the backing server
nic -- nic to which the storage is bound
"""
self.host_address = host_address
self.nic = nic
class StorageDevice(Device):
""" A generic storage device.
A fully qualified path to the device node can be obtained via the
path attribute, although it is not guaranteed to be useful, or
even present, unless the StorageDevice's setup method has been
run.
StorageDevice instances can optionally contain a filesystem,
represented by an FS instance. A StorageDevice's create method
should create a filesystem if one has been specified.
"""
_type = "storage device"
_devDir = "/dev"
sysfsBlockDir = "class/block"
_resizable = False
_partitionable = False
_isDisk = False
def __init__(self, device, format=None,
size=None, major=None, minor=None,
sysfsPath='', parents=None, exists=None, serial=None,
vendor="", model="", bus=""):
""" Create a StorageDevice instance.
Arguments:
device -- the device name (generally a device node's basename)
Keyword Arguments:
size -- the device's size (units/format TBD)
major -- the device major
minor -- the device minor
sysfsPath -- sysfs device path
format -- a DeviceFormat instance
parents -- a list of required Device instances
serial -- the ID_SERIAL_SHORT for this device
vendor -- the manufacturer of this Device
model -- manufacturer's device model string
bus -- the interconnect this device uses
"""
# allow specification of individual parents
if isinstance(parents, Device):
parents = [parents]
self.exists = exists
Device.__init__(self, device, parents=parents)
self.uuid = None
self._format = None
self._size = numeric_type(size)
self.major = numeric_type(major)
self.minor = numeric_type(minor)
self.sysfsPath = sysfsPath
self._serial = serial
self._vendor = vendor
self._model = model
self.bus = bus
self.protected = False
self.format = format
self.originalFormat = self.format
self.fstabComment = ""
self._targetSize = self._size
self._partedDevice = None
@property
def packages(self):
""" List of packages required to manage devices of this type.
This list includes the packages required by this device's
format type as well those required by all of its parent
devices.
"""
packages = super(StorageDevice, self).packages
packages.extend(self.format.packages)
for parent in self.parents:
for package in parent.format.packages:
if package not in packages:
packages.append(package)
return packages
@property
def partedDevice(self):
if self.exists and self.status and not self._partedDevice:
log.debug("looking up parted Device: %s" % self.path)
# We aren't guaranteed to be able to get a device. In
# particular, built-in USB flash readers show up as devices but
# do not always have any media present, so parted won't be able
# to find a device.
try:
self._partedDevice = parted.Device(path=self.path)
except (_ped.IOException, _ped.DeviceException):
pass
return self._partedDevice
def _getTargetSize(self):
return self._targetSize
def _setTargetSize(self, newsize):
self._targetSize = newsize
targetSize = property(lambda s: s._getTargetSize(),
lambda s, v: s._setTargetSize(v),
doc="Target size of this device")
def __str__(self):
s = Device.__str__(self)
s += (" uuid = %(uuid)s format = %(format)r size = %(size)s\n"
" major = %(major)s minor = %(minor)r exists = %(exists)s\n"
" sysfs path = %(sysfs)s partedDevice = %(partedDevice)r\n"
" target size = %(targetSize)s path = %(path)s\n"
" format args = %(formatArgs)s originalFormat = %(origFmt)s" %
{"uuid": self.uuid, "format": self.format, "size": self.size,
"major": self.major, "minor": self.minor, "exists": self.exists,
"sysfs": self.sysfsPath, "partedDevice": self.partedDevice,
"targetSize": self.targetSize, "path": self.path,
"formatArgs": self.formatArgs, "origFmt": self.originalFormat})
return s
@property
def dict(self):
d = super(StorageDevice, self).dict
d.update({"uuid": self.uuid, "size": self.size,
"format": self.format.dict, "removable": self.removable,
"major": self.major, "minor": self.minor,
"exists": self.exists, "sysfs": self.sysfsPath,
"targetSize": self.targetSize, "path": self.path})
return d
@property
def path(self):
""" Device node representing this device. """
return "%s/%s" % (self._devDir, self.name)
def updateSysfsPath(self):
""" Update this device's sysfs path. """
log_method_call(self, self.name, status=self.status)
sysfsName = self.name.replace("/", "!")
path = os.path.join("/sys", self.sysfsBlockDir, sysfsName)
self.sysfsPath = os.path.realpath(path)[4:]
log.debug("%s sysfsPath set to %s" % (self.name, self.sysfsPath))
@property
def formatArgs(self):
""" Device-specific arguments to format creation program. """
return []
@property
def resizable(self):
""" Can this type of device be resized? """
return self._resizable and self.exists and \
((self.format and self.format.resizable) or not self.format)
def notifyKernel(self):
""" Send a 'change' uevent to the kernel for this device. """
log_method_call(self, self.name, status=self.status)
if not self.exists:
log.debug("not sending change uevent for non-existent device")
return
if not self.status:
log.debug("not sending change uevent for inactive device")
return
path = os.path.normpath("/sys/%s" % self.sysfsPath)
try:
notify_kernel(path, action="change")
except Exception, e:
log.warning("failed to notify kernel of change: %s" % e)
@property
def fstabSpec(self):
spec = self.path
if self.format and self.format.uuid:
spec = "UUID=%s" % self.format.uuid
return spec
def resize(self, intf=None):
""" Resize the device.
New size should already be set.
"""
raise NotImplementedError("resize method not defined for StorageDevice")
def setup(self, intf=None, orig=False):
""" Open, or set up, a device. """
log_method_call(self, self.name, orig=orig, status=self.status)
if not self.exists:
raise DeviceError("device has not been created", self.name)
self.setupParents(orig=orig)
for parent in self.parents:
if orig:
parent.originalFormat.setup()
else:
parent.format.setup()
def teardown(self, recursive=None):
""" Close, or tear down, a device. """
log_method_call(self, self.name, status=self.status)
if not self.exists and not recursive:
raise DeviceError("device has not been created", self.name)
if self.status:
if self.originalFormat.exists:
self.originalFormat.teardown()
if self.format.exists:
self.format.teardown()
udev_settle()
if recursive:
self.teardownParents(recursive=recursive)
def _getSize(self):
""" Get the device's size in MB, accounting for pending changes. """
if self.exists and not self.mediaPresent:
return 0
if self.exists and self.partedDevice:
self._size = self.currentSize
size = self._size
if self.exists and self.resizable and self.targetSize != size:
size = self.targetSize
return size
def _setSize(self, newsize):
""" Set the device's size to a new value. """
if newsize > self.maxSize:
raise DeviceError("device cannot be larger than %s MB" %
(self.maxSize(),), self.name)
self._size = newsize
size = property(lambda x: x._getSize(),
lambda x, y: x._setSize(y),
doc="The device's size in MB, accounting for pending changes")
@property
def currentSize(self):
""" The device's actual size. """
size = 0
if self.exists and self.partedDevice:
size = self.partedDevice.getSize()
elif self.exists:
size = self._size
return size
@property
def minSize(self):
""" The minimum size this device can be. """
if self.format.minSize:
return self.format.minSize
else:
return self.size
@property
def maxSize(self):
""" The maximum size this device can be. """
if self.format.maxSize > self.currentSize:
return self.currentSize
else:
return self.format.maxSize
@property
def status(self):
""" This device's status.
For now, this should return a boolean:
True the device is open and ready for use
False the device is not open
"""
if not self.exists:
return False
return os.access(self.path, os.W_OK)
def _setFormat(self, format):
""" Set the Device's format. """
if not format:
format = getFormat(None, device=self.path, exists=self.exists)
log_method_call(self, self.name, type=format.type,
current=getattr(self._format, "type", None))
if self._format and self._format.status:
# FIXME: self.format.status doesn't mean much
raise DeviceError("cannot replace active format", self.name)
self._format = format
def _getFormat(self):
return self._format
format = property(lambda d: d._getFormat(),
lambda d,f: d._setFormat(f),
doc="The device's formatting.")
def create(self, intf=None):
""" Create the device. """
log_method_call(self, self.name, status=self.status)
if self.exists:
raise DeviceError("device has already been created", self.name)
self.createParents()
self.setupParents()
self.exists = True
self.setup()
def destroy(self):
""" Destroy the device. """
log_method_call(self, self.name, status=self.status)
if not self.exists:
raise DeviceError("device has not been created", self.name)
if not self.isleaf:
raise DeviceError("Cannot destroy non-leaf device", self.name)
self.exists = False
# we already did this in DeviceTree._removeDevice
#for parent in self.parents:
# parent.removeChild()
@property
def removable(self):
devpath = os.path.normpath("/sys/%s" % self.sysfsPath)
remfile = os.path.normpath("%s/removable" % devpath)
rem_f = None
try:
rem_f = open(remfile, "r")
except IOError as err:
if err.errno != 2:
raise
return False
try:
return (self.sysfsPath and os.path.exists(devpath) and
os.access(remfile, os.R_OK) and
rem_f.readline().strip() == "1")
finally:
rem_f.close()
@property
def isDisk(self):
return self._isDisk
@property
def partitionable(self):
return self._partitionable
@property
def partitioned(self):
return self.format.type == "disklabel" and self.partitionable
@property
def serial(self):
return self._serial
@property
def model(self):
if not self._model:
self._model = getattr(self.partedDevice, "model", "")
return self._model
@property
def vendor(self):
return self._vendor
class DiskDevice(StorageDevice):
""" A disk """
_type = "disk"
_partitionable = True
_isDisk = True
def __init__(self, device, format=None,
size=None, major=None, minor=None, sysfsPath='',
parents=None, serial=None, vendor="", model="", bus="",
exists=True):
""" Create a DiskDevice instance.
Arguments:
device -- the device name (generally a device node's basename)
Keyword Arguments:
size -- the device's size (units/format TBD)
major -- the device major
minor -- the device minor
sysfsPath -- sysfs device path
format -- a DeviceFormat instance
parents -- a list of required Device instances
removable -- whether or not this is a removable device
serial -- the ID_SERIAL_SHORT for this device
vendor -- the manufacturer of this Device
model -- manufacturer's device model string
bus -- the interconnect this device uses
DiskDevices always exist.
"""
StorageDevice.__init__(self, device, format=format, size=size,
major=major, minor=minor, exists=exists,
sysfsPath=sysfsPath, parents=parents,
serial=serial, model=model,
vendor=vendor, bus=bus)
def __str__(self):
s = StorageDevice.__str__(self)
s += (" removable = %(removable)s partedDevice = %(partedDevice)r" %
{"removable": self.removable, "partedDevice": self.partedDevice})
return s
@property
def mediaPresent(self):
if not self.partedDevice:
return False
# Some drivers (cpqarray <blegh>) make block device nodes for
# controllers with no disks attached and then report a 0 size,
# treat this as no media present
return self.partedDevice.getSize() != 0
@property
def description(self):
return self.model
@property
def size(self):
""" The disk's size in MB """
return super(DiskDevice, self).size
#size = property(StorageDevice._getSize)
def probe(self):
""" Probe for any missing information about this device.
pyparted should be able to tell us anything we want to know.
size, disklabel type, maybe even partition layout
"""
log_method_call(self, self.name, size=self.size, partedDevice=self.partedDevice)
def destroy(self):
""" Destroy the device. """
log_method_call(self, self.name, status=self.status)
if not self.mediaPresent:
raise DeviceError("cannot destroy disk with no media", self.name)
self.teardown()
def setup(self, intf=None, orig=False):
""" Open, or set up, a device. """
log_method_call(self, self.name, orig=orig, status=self.status)
if not os.path.exists(self.path):
raise DeviceError("device does not exist", self.name)
class PartitionDevice(StorageDevice):
""" A disk partition.
On types and flags...
We don't need to deal with numerical partition types at all. The
only type we are concerned with is primary/logical/extended. Usage
specification is accomplished through the use of flags, which we
will set according to the partition's format.
"""
_type = "partition"
_resizable = True
defaultSize = 500
def __init__(self, name, format=None,
size=None, grow=False, maxsize=None,
major=None, minor=None, bootable=None,
sysfsPath='', parents=None, exists=None,
partType=None, primary=False, weight=0):
""" Create a PartitionDevice instance.
Arguments:
name -- the device name (generally a device node's basename)
Keyword Arguments:
exists -- indicates whether this is an existing device
format -- the device's format (DeviceFormat instance)
For existing partitions:
parents -- the disk that contains this partition
major -- the device major
minor -- the device minor
sysfsPath -- sysfs device path
For new partitions:
partType -- primary,extended,&c (as parted constant)
grow -- whether or not to grow the partition
maxsize -- max size for growable partitions (in MB)
size -- the device's size (in MB)
bootable -- whether the partition is bootable
parents -- a list of potential containing disks
weight -- an initial sorting weight to assign
"""
self.req_disks = []
self.req_partType = None
self.req_primary = None
self.req_grow = None
self.req_bootable = None
self.req_size = 0
self.req_base_size = 0
self.req_max_size = 0
self.req_base_weight = 0
self._bootable = False
StorageDevice.__init__(self, name, format=format, size=size,
major=major, minor=minor, exists=exists,
sysfsPath=sysfsPath, parents=parents)
if not exists:
# this is a request, not a partition -- it has no parents
self.req_disks = self.parents[:]
for dev in self.parents:
dev.removeChild()
self.parents = []
# FIXME: Validate partType, but only if this is a new partition
# Otherwise, overwrite it with the partition's type.
self._partType = None
self.partedFlags = {}
self._partedPartition = None
self._origPath = None
self._currentSize = 0
# FIXME: Validate size, but only if this is a new partition.
# For existing partitions we will get the size from
# parted.
if self.exists:
log.debug("looking up parted Partition: %s" % self.path)
self._partedPartition = self.disk.format.partedDisk.getPartitionByPath(self.path)
if not self._partedPartition:
raise DeviceError("cannot find parted partition instance", self.name)
self._origPath = self.path
# collect information about the partition from parted
self.probe()
if self.getFlag(parted.PARTITION_PREP):
# the only way to identify a PPC PReP Boot partition is to
# check the partition type/flags, so do it here.
self.format = getFormat("prepboot", device=self.path, exists=True)
else:
# XXX It might be worthwhile to create a shit-simple
# PartitionRequest class and pass one to this constructor
# for new partitions.
if not self._size:
# default size for new partition requests
self._size = self.defaultSize
self.req_name = name
self.req_partType = partType
self.req_primary = primary
self.req_max_size = numeric_type(maxsize)
self.req_grow = grow
self.req_bootable = bootable
# req_size may be manipulated in the course of partitioning
self.req_size = self._size
# req_base_size will always remain constant
self.req_base_size = self._size
self.req_base_weight = weight
def __str__(self):
s = StorageDevice.__str__(self)
s += (" grow = %(grow)s max size = %(maxsize)s bootable = %(bootable)s\n"
" part type = %(partType)s primary = %(primary)s\n"
" partedPartition = %(partedPart)r disk = %(disk)r\n" %
{"grow": self.req_grow, "maxsize": self.req_max_size,
"bootable": self.bootable, "partType": self.partType,
"primary": self.req_primary,
"partedPart": self.partedPartition, "disk": self.disk})
if self.partedPartition:
s += (" start = %(start)s end = %(end)s length = %(length)s\n"
" flags = %(flags)s" %
{"length": self.partedPartition.geometry.length,
"start": self.partedPartition.geometry.start,
"end": self.partedPartition.geometry.end,
"flags": self.partedPartition.getFlagsAsString()})
return s
@property
def dict(self):
d = super(PartitionDevice, self).dict
d.update({"type": self.partType})
if not self.exists:
d.update({"grow": self.req_grow, "maxsize": self.req_max_size,
"bootable": self.bootable,
"primary": self.req_primary})
if self.partedPartition:
d.update({"length": self.partedPartition.geometry.length,
"start": self.partedPartition.geometry.start,
"end": self.partedPartition.geometry.end,
"flags": self.partedPartition.getFlagsAsString()})
return d
def writeKS(self, f, preexisting=False, noformat=False, s=None):
args = []
if self.isExtended:
return
if self.req_grow:
args.append("--grow")
if self.req_max_size:
args.append("--maxsize=%s" % self.req_max_size)
if self.req_primary:
args.append("--asprimary")
if self.req_size:
args.append("--size=%s" % (self.req_size or self.defaultSize))
if preexisting:
if len(self.req_disks) == 1:
args.append("--ondisk=%s" % self.req_disks[0].name)
else:
args.append("--onpart=%s" % self.name)
if noformat:
args.append("--noformat")
f.write("#part ")
self.format.writeKS(f)
f.write(" %s" % " ".join(args))
if s:
f.write(" %s" % s)
def _setTargetSize(self, newsize):
if newsize != self.currentSize:
# change this partition's geometry in-memory so that other
# partitioning operations can complete (e.g., autopart)
self._targetSize = newsize
disk = self.disk.format.partedDisk
# resize the partition's geometry in memory
(constraint, geometry) = self._computeResize(self.partedPartition)
disk.setPartitionGeometry(partition=self.partedPartition,
constraint=constraint,
start=geometry.start, end=geometry.end)
@property
def path(self):
""" Device node representing this device. """
if not self.parents:
# Bogus, but code in various places compares devices by path
# So we must return something unique
return self.name
return "%s/%s" % (self.parents[0]._devDir, self.name)
@property
def partType(self):
""" Get the partition's type (as parted constant). """
try:
ptype = self.partedPartition.type
except AttributeError:
ptype = self._partType
if not self.exists and ptype is None:
ptype = self.req_partType
return ptype
@property
def isExtended(self):
return (self.partType is not None and
self.partType & parted.PARTITION_EXTENDED)
@property
def isLogical(self):
return (self.partType is not None and
self.partType & parted.PARTITION_LOGICAL)
@property
def isPrimary(self):
return (self.partType is not None and
self.partType == parted.PARTITION_NORMAL)
@property
def isProtected(self):
return (self.partType is not None and
self.partType & parted.PARTITION_PROTECTED)
@property
def fstabSpec(self):
spec = self.path
if self.disk and self.disk.type == 'dasd':
spec = deviceNameToDiskByPath(self.path)
elif self.format and self.format.uuid:
spec = "UUID=%s" % self.format.uuid
return spec
def _getPartedPartition(self):
return self._partedPartition
def _setPartedPartition(self, partition):
""" Set this PartitionDevice's parted Partition instance. """
log_method_call(self, self.name)
if partition is None:
path = None
elif isinstance(partition, parted.Partition):
path = partition.path
else:
raise ValueError("partition must be a parted.Partition instance")
log.debug("device %s new partedPartition %s has path %s" % (self.name,
partition,
path))
self._partedPartition = partition
self.updateName()
partedPartition = property(lambda d: d._getPartedPartition(),
lambda d,p: d._setPartedPartition(p))
def resetPartedPartition(self):
""" Re-get self.partedPartition from the original disklabel. """
log_method_call(self, self.name)
if not self.exists:
return
# find the correct partition on the original parted.Disk since the
# name/number we're now using may no longer match
_disklabel = self.disk.originalFormat
if self.isExtended:
# getPartitionBySector doesn't work on extended partitions
_partition = _disklabel.extendedPartition
log.debug("extended lookup found partition %s"
% devicePathToName(getattr(_partition, "path", None)))
else:
# lookup the partition by sector to avoid the renumbering
# nonsense entirely
_sector = self.partedPartition.geometry.start
_partition = _disklabel.partedDisk.getPartitionBySector(_sector)
log.debug("sector-based lookup found partition %s"
% devicePathToName(getattr(_partition, "path", None)))
self.partedPartition = _partition
def _getWeight(self):
return self.req_base_weight
def _setWeight(self, weight):
self.req_base_weight = weight
weight = property(lambda d: d._getWeight(),
lambda d,w: d._setWeight(w))
def updateSysfsPath(self):
""" Update this device's sysfs path. """
log_method_call(self, self.name, status=self.status)
if not self.parents:
self.sysfsPath = ''
elif self.parents[0]._devDir == "/dev/mapper":
dm_node = dm.dm_node_from_name(self.name)
path = os.path.join("/sys", self.sysfsBlockDir, dm_node)
self.sysfsPath = os.path.realpath(path)[4:]
else:
StorageDevice.updateSysfsPath(self)
def updateName(self):
if self.partedPartition is None:
self._name = self.req_name
else:
self._name = \
devicePathToName(self.partedPartition.getDeviceNodeName())
def dependsOn(self, dep):
""" Return True if this device depends on dep. """
if isinstance(dep, PartitionDevice) and dep.isExtended and \
self.isLogical and self.disk == dep.disk:
return True
return Device.dependsOn(self, dep)
def _setFormat(self, format):
""" Set the Device's format. """
log_method_call(self, self.name)
StorageDevice._setFormat(self, format)
def _setBootable(self, bootable):
""" Set the bootable flag for this partition. """
if self.partedPartition:
if iutil.isS390():
return
if self.flagAvailable(parted.PARTITION_BOOT):
if bootable:
self.setFlag(parted.PARTITION_BOOT)
else:
self.unsetFlag(parted.PARTITION_BOOT)
else:
raise DeviceError("boot flag not available for this partition", self.name)
self._bootable = bootable
else:
self.req_bootable = bootable
def _getBootable(self):
return self._bootable or self.req_bootable
bootable = property(_getBootable, _setBootable)
def flagAvailable(self, flag):
log_method_call(self, path=self.path, flag=flag)
if not self.partedPartition:
return
return self.partedPartition.isFlagAvailable(flag)
def getFlag(self, flag):
log_method_call(self, path=self.path, flag=flag)
if not self.partedPartition or not self.flagAvailable(flag):
return
return self.partedPartition.getFlag(flag)
def setFlag(self, flag):
log_method_call(self, path=self.path, flag=flag)
if not self.partedPartition or not self.flagAvailable(flag):
return
self.partedPartition.setFlag(flag)
def unsetFlag(self, flag):
log_method_call(self, path=self.path, flag=flag)
if not self.partedPartition or not self.flagAvailable(flag):
return
self.partedPartition.unsetFlag(flag)
def probe(self):
""" Probe for any missing information about this device.
size, partition type, flags
"""
log_method_call(self, self.name, exists=self.exists)
if not self.exists:
return
# this is in MB
self._size = self.partedPartition.getSize()
self._currentSize = self._size
self.targetSize = self._size
self._partType = self.partedPartition.type
self._bootable = self.getFlag(parted.PARTITION_BOOT)
def create(self, intf=None):
""" Create the device. """
log_method_call(self, self.name, status=self.status)
if self.exists:
raise DeviceError("device already exists", self.name)
w = None
if intf:
w = intf.waitWindow(_("Creating"),
_("Creating device %s") % (self.path,))
try:
self.createParents()
self.setupParents()
self.disk.format.addPartition(self.partedPartition)
try:
self.disk.format.commit()
except DiskLabelCommitError:
part = self.disk.format.partedDisk.getPartitionByPath(self.path)
self.disk.format.removePartition(part)
raise
# Ensure old metadata which lived in freespace so did not get
# explictly destroyed by a destroyformat action gets wiped
DeviceFormat(device=self.path, exists=True).destroy()
except Exception:
raise
else:
self.partedPartition = self.disk.format.partedDisk.getPartitionByPath(self.path)
self.exists = True
self._currentSize = self.partedPartition.getSize()
self.setup()
finally:
if w:
w.pop()
def _computeResize(self, partition):
log_method_call(self, self.name, status=self.status)
# compute new size for partition
currentGeom = partition.geometry
currentDev = currentGeom.device
newLen = long(self.targetSize * 1024 * 1024) / currentDev.sectorSize
newGeometry = parted.Geometry(device=currentDev,
start=currentGeom.start,
length=newLen)
# and align the end sector
newGeometry.end = self.disk.format.endAlignment.alignDown(newGeometry,
newGeometry.end)
constraint = parted.Constraint(exactGeom=newGeometry)
return (constraint, newGeometry)
def resize(self, intf=None):
""" Resize the device.
self.targetSize must be set to the new size.
"""
log_method_call(self, self.name, status=self.status)
if self.targetSize != self.currentSize:
# partedDisk has been restored to _origPartedDisk, so
# recalculate resize geometry because we may have new
# partitions on the disk, which could change constraints
partedDisk = self.disk.format.partedDisk
partition = partedDisk.getPartitionByPath(self.path)
(constraint, geometry) = self._computeResize(partition)
partedDisk.setPartitionGeometry(partition=partition,
constraint=constraint,
start=geometry.start,
end=geometry.end)
self.disk.format.commit()
self._currentSize = partition.getSize()
def destroy(self):
""" Destroy the device. """
log_method_call(self, self.name, status=self.status)
if not self.exists:
raise DeviceError("device has not been created", self.name)
if not self.sysfsPath:
return
if not self.isleaf:
raise DeviceError("Cannot destroy non-leaf device", self.name)
self.setupParents(orig=True)
# we should have already set self.partedPartition to point to the
# partition on the original disklabel
self.disk.originalFormat.removePartition(self.partedPartition)
try:
self.disk.originalFormat.commit()
except DiskLabelCommitError:
self.disk.originalFormat.addPartition(self.partedPartition)
self.partedPartition = self.disk.originalFormat.partedDisk.getPartitionByPath(self.path)
raise
self.exists = False
def teardown(self, recursive=None):
""" Close, or tear down, a device. """
log_method_call(self, self.name, status=self.status)
if not self.exists and not recursive:
raise DeviceError("device has not been created", self.name)
if self.status:
if self.originalFormat.exists:
self.originalFormat.teardown()
if self.format.exists:
self.format.teardown()
if self.parents[0].type == 'dm-multipath':
devmap = block.getMap(major=self.major, minor=self.minor)
if devmap:
try:
block.removeDeviceMap(devmap)
except Exception as e:
raise DeviceTeardownError("failed to tear down device-mapper partition %s: %s" % (self.name, e))
udev_settle()
StorageDevice.teardown(self, recursive=recursive)
def _getSize(self):
""" Get the device's size. """
size = self._size
if self.partedPartition:
# this defaults to MB
size = self.partedPartition.getSize()
return size
def _setSize(self, newsize):
""" Set the device's size (for resize, not creation).
Arguments:
newsize -- the new size (in MB)
"""
log_method_call(self, self.name,
status=self.status, size=self._size, newsize=newsize)
if not self.exists:
raise DeviceError("device does not exist", self.name)
if newsize > self.disk.size:
raise ValueError("partition size would exceed disk size")
# this defaults to MB
maxAvailableSize = self.partedPartition.getMaxAvailableSize()
if newsize > maxAvailableSize:
raise ValueError("new size is greater than available space")
# now convert the size to sectors and update the geometry
geometry = self.partedPartition.geometry
physicalSectorSize = geometry.device.physicalSectorSize
new_length = (newsize * (1024 * 1024)) / physicalSectorSize
geometry.length = new_length
def _getDisk(self):
""" The disk that contains this partition."""
try:
disk = self.parents[0]
except IndexError:
disk = None
return disk
def _setDisk(self, disk):
"""Change the parent.
Setting up a disk is not trivial. It has the potential to change
the underlying object. If necessary we must also change this object.
"""
log_method_call(self, self.name, old=getattr(self.disk, "name", None),
new=getattr(disk, "name", None))
if self.disk:
self.disk.removeChild()
if disk:
self.parents = [disk]
disk.addChild()
else:
self.parents = []
disk = property(lambda p: p._getDisk(), lambda p,d: p._setDisk(d))
@property
def maxSize(self):
""" The maximum size this partition can be. """
# XXX: this is MB by default
maxPartSize = self.partedPartition.getMaxAvailableSize()
if self.format.maxSize > maxPartSize:
return maxPartSize
else:
return self.format.maxSize
@property
def currentSize(self):
""" The device's actual size. """
if self.exists:
return self._currentSize
else:
return 0
class DMDevice(StorageDevice):
""" A device-mapper device """
_type = "dm"
_devDir = "/dev/mapper"
def __init__(self, name, format=None, size=None, dmUuid=None,
target=None, exists=None, parents=None, sysfsPath=''):
""" Create a DMDevice instance.
Arguments:
name -- the device name (generally a device node's basename)
Keyword Arguments:
target -- the device-mapper target type (string)
size -- the device's size (units/format TBD)
dmUuid -- the device's device-mapper UUID
sysfsPath -- sysfs device path
format -- a DeviceFormat instance
parents -- a list of required Device instances
exists -- indicates whether this is an existing device
"""
StorageDevice.__init__(self, name, format=format, size=size,
exists=exists,
parents=parents, sysfsPath=sysfsPath)
self.target = target
self.dmUuid = dmUuid
def __str__(self):
s = StorageDevice.__str__(self)
s += (" target = %(target)s dmUuid = %(dmUuid)s" %
{"target": self.target, "dmUuid": self.dmUuid})
return s
@property
def dict(self):
d = super(DMDevice, self).dict
d.update({"target": self.target, "dmUuid": self.dmUuid})
return d
@property
def fstabSpec(self):
""" Return the device specifier for use in /etc/fstab. """
return self.path
@property
def mapName(self):
""" This device's device-mapper map name """
return self.name
@property
def status(self):
_status = False
for map in block.dm.maps():
if map.name == self.mapName:
_status = map.live_table and not map.suspended
break
return _status
def updateSysfsPath(self):
""" Update this device's sysfs path. """
log_method_call(self, self.name, status=self.status)
if not self.exists:
raise DeviceError("device has not been created", self.name)
if self.status:
dm_node = self.getDMNode()
path = os.path.join("/sys", self.sysfsBlockDir, dm_node)
self.sysfsPath = os.path.realpath(path)[4:]
else:
self.sysfsPath = ''
#def getTargetType(self):
# return dm.getDmTarget(name=self.name)
def getDMNode(self):
""" Return the dm-X (eg: dm-0) device node for this device. """
log_method_call(self, self.name, status=self.status)
if not self.exists:
raise DeviceError("device has not been created", self.name)
return dm.dm_node_from_name(self.name)
def _setName(self, name):
""" Set the device's map name. """
log_method_call(self, self.name, status=self.status)
if self.status:
raise DeviceError("cannot rename active device", self.name)
self._name = name
#self.sysfsPath = "/dev/disk/by-id/dm-name-%s" % self.name
name = property(lambda d: d._name,
lambda d,n: d._setName(n))
class DMCryptDevice(DMDevice):
""" A dm-crypt device """
_type = "dm-crypt"
def __init__(self, name, format=None, size=None, uuid=None,
exists=None, sysfsPath='', parents=None):
""" Create a DMCryptDevice instance.
Arguments:
name -- the device name (generally a device node's basename)
Keyword Arguments:
size -- the device's size (units/format TBD)
sysfsPath -- sysfs device path
format -- a DeviceFormat instance
parents -- a list of required Device instances
exists -- indicates whether this is an existing device
"""
DMDevice.__init__(self, name, format=format, size=size,
parents=parents, sysfsPath=sysfsPath,
exists=exists, target="crypt")
class LUKSDevice(DMCryptDevice):
""" A mapped LUKS device. """
_type = "luks/dm-crypt"
def __init__(self, name, format=None, size=None, uuid=None,
exists=None, sysfsPath='', parents=None):
""" Create a LUKSDevice instance.
Arguments:
name -- the device name
Keyword Arguments:
size -- the device's size in MB
uuid -- the device's UUID
sysfsPath -- sysfs device path
format -- a DeviceFormat instance
parents -- a list of required Device instances
exists -- indicates whether this is an existing device
"""
DMCryptDevice.__init__(self, name, format=format, size=size,
parents=parents, sysfsPath=sysfsPath,
uuid=None, exists=exists)
def writeKS(self, f, preexisting=False, noformat=False, s=None):
self.slave.writeKS(f, preexisting=preexisting, noformat=noformat, s=s)
self.format.writeKS(f)
if s:
f.write(" %s" % s)
@property
def size(self):
if not self.exists or not self.partedDevice:
size = float(self.slave.size) - crypto.LUKS_METADATA_SIZE
else:
size = self.partedDevice.getSize()
return size
@property
def fstabSpec(self):
spec = self.path
if self.format and self.format.uuid:
spec = "UUID=%s" % self.format.uuid
return spec
def create(self, intf=None):
""" Create the device. """
log_method_call(self, self.name, status=self.status)
if self.exists:
raise DeviceError("device already exists", self.name)
self.createParents()
self.setupParents()
#if not self.slave.format.exists:
# self.slave.format.create()
self._name = self.slave.format.mapName
self.exists = True
self.setup()
def setup(self, intf=None, orig=False):
""" Open, or set up, a device. """
log_method_call(self, self.name, orig=orig, status=self.status)
if not self.exists:
raise DeviceError("device has not been created", self.name)
self.slave.setup(orig=orig)
if orig:
self.slave.originalFormat.setup()
else:
self.slave.format.setup()
udev_settle()
# we always probe since the device may not be set up when we want
# information about it
self._size = self.currentSize
def teardown(self, recursive=False):
""" Close, or tear down, a device. """
log_method_call(self, self.name, status=self.status)
if not self.exists and not recursive:
raise DeviceError("device has not been created", self.name)
if self.status:
if self.originalFormat.exists:
self.originalFormat.teardown()
if self.format.exists:
self.format.teardown()
udev_settle()
if self.slave.originalFormat.exists:
self.slave.originalFormat.teardown()
udev_settle()
if self.slave.format.exists:
self.slave.format.teardown()
udev_settle()
if recursive:
self.teardownParents(recursive=recursive)
def destroy(self):
log_method_call(self, self.name, status=self.status)
self.format.teardown()
udev_settle()
self.teardown()
@property
def slave(self):
""" This device's backing device. """
return self.parents[0]
def dracutSetupString(self):
return "rd_LUKS_UUID=luks-%s" % self.slave.format.uuid
class LVMVolumeGroupDevice(DMDevice):
""" An LVM Volume Group
XXX Maybe this should inherit from StorageDevice instead of
DMDevice since there's no actual device.
"""
_type = "lvmvg"
def __init__(self, name, parents, size=None, free=None,
peSize=None, peCount=None, peFree=None, pvCount=None,
lvNames=[], uuid=None, exists=None, sysfsPath=''):
""" Create a LVMVolumeGroupDevice instance.
Arguments:
name -- the device name (generally a device node's basename)
parents -- a list of physical volumes (StorageDevice)
Keyword Arguments:
peSize -- physical extent size (in MB)
exists -- indicates whether this is an existing device
sysfsPath -- sysfs device path
For existing VG's only:
size -- the VG's size (in MB)
free -- amount of free space in the VG
peFree -- number of free extents
peCount -- total number of extents
pvCount -- number of PVs in this VG
lvNames -- the names of this VG's LVs
uuid -- the VG's UUID
"""
self.pvClass = get_device_format_class("lvmpv")
if not self.pvClass:
raise StorageError("cannot find 'lvmpv' class")
if isinstance(parents, list):
for dev in parents:
if not isinstance(dev.format, self.pvClass):
raise ValueError("constructor requires a list of PVs")
elif not isinstance(parents.format, self.pvClass):
raise ValueError("constructor requires a list of PVs")
DMDevice.__init__(self, name, parents=parents,
exists=exists, sysfsPath=sysfsPath)
self.uuid = uuid
self.free = numeric_type(free)
self.peSize = numeric_type(peSize)
self.peCount = numeric_type(peCount)
self.peFree = numeric_type(peFree)
self.pvCount = numeric_type(pvCount)
self.lvNames = lvNames
# circular references, here I come
self._lvs = []
# TODO: validate peSize if given
if not self.peSize:
self.peSize = 32.0 # MB
#self.probe()
def __str__(self):
s = DMDevice.__str__(self)
s += (" free = %(free)s PE Size = %(peSize)s PE Count = %(peCount)s\n"
" PE Free = %(peFree)s PV Count = %(pvCount)s\n"
" LV Names = %(lvNames)s modified = %(modified)s\n"
" extents = %(extents)s free space = %(freeSpace)s\n"
" free extents = %(freeExtents)s\n"
" PVs = %(pvs)s\n"
" LVs = %(lvs)s" %
{"free": self.free, "peSize": self.peSize, "peCount": self.peCount,
"peFree": self.peFree, "pvCount": self.pvCount,
"lvNames": self.lvNames, "modified": self.isModified,
"extents": self.extents, "freeSpace": self.freeSpace,
"freeExtents": self.freeExtents, "pvs": self.pvs, "lvs": self.lvs})
return s
@property
def dict(self):
d = super(LVMVolumeGroupDevice, self).dict
d.update({"free": self.free, "peSize": self.peSize,
"peCount": self.peCount, "peFree": self.peFree,
"pvCount": self.pvCount, "extents": self.extents,
"freeSpace": self.freeSpace,
"freeExtents": self.freeExtents,
"lvNames": [lv.name for lv in self.lvs]})
return d
def writeKS(self, f, preexisting=False, noformat=False, s=None):
args = ["--pesize=%s" % int(self.peSize * 1024)]
pvs = []
for pv in self.pvs:
pvs.append("pv.%s" % pv.format.uuid)
if preexisting:
args.append("--useexisting")
if noformat:
args.append("--noformat")
f.write("#volgroup %s %s %s" % (self.name, " ".join(args), " ".join(pvs)))
if s:
f.write(" %s" % s)
def probe(self):
""" Probe for any information about this device. """
log_method_call(self, self.name, status=self.status)
if not self.exists:
raise DeviceError("device has not been created", self.name)
@property
def mapName(self):
""" This device's device-mapper map name """
# Thank you lvm for this lovely hack.
return self.name.replace("-","--")
@property
def path(self):
""" Device node representing this device. """
return "%s/%s" % (self._devDir, self.mapName)
def updateSysfsPath(self):
""" Update this device's sysfs path. """
log_method_call(self, self.name, status=self.status)
if not self.exists:
raise DeviceError("device has not been created", self.name)
self.sysfsPath = ''
@property
def status(self):
""" The device's status (True means active). """
if not self.exists:
return False
# certainly if any of this VG's LVs are active then so are we
for lv in self.lvs:
if lv.status:
return True
# if any of our PVs are not active then we cannot be
for pv in self.pvs:
if not pv.status:
return False
# if we are missing some of our PVs we cannot be active
if len(self.pvs) != self.pvCount:
return False
return True
def _addDevice(self, device):
""" Add a new physical volume device to the volume group.
XXX This is for use by device probing routines and is not
intended for modification of the VG.
"""
log_method_call(self,
self.name,
device=device.name,
status=self.status)
if not self.exists:
raise DeviceError("device does not exist", self.name)
if not isinstance(device.format, self.pvClass):
raise ValueError("addDevice requires a PV arg")
if self.uuid and device.format.vgUuid != self.uuid:
raise ValueError("UUID mismatch")
if device in self.pvs:
raise ValueError("device is already a member of this VG")
self.parents.append(device)
device.addChild()
# now see if the VG can be activated
if len(self.parents) == self.pvCount:
self.setup()
def _removeDevice(self, device):
""" Remove a physical volume from the volume group.
This is for cases like clearing of preexisting partitions.
"""
log_method_call(self,
self.name,
device=device.name,
status=self.status)
try:
self.parents.remove(device)
except ValueError, e:
raise ValueError("cannot remove non-member PV device from VG")
device.removeChild()
def setup(self, intf=None, orig=False):
""" Open, or set up, a device.
XXX we don't do anything like "vgchange -ay" because we don't
want all of the LVs activated, just the VG itself.
"""
log_method_call(self, self.name, orig=orig, status=self.status)
if not self.exists:
raise DeviceError("device has not been created", self.name)
if self.status:
return
if len(self.parents) < self.pvCount:
raise DeviceError("cannot activate VG with missing PV(s)", self.name)
self.setupParents(orig=orig)
def teardown(self, recursive=None):
""" Close, or tear down, a device. """
log_method_call(self, self.name, status=self.status)
if not self.exists and not recursive:
raise DeviceError("device has not been created", self.name)
if self.status:
lvm.vgdeactivate(self.name)
if recursive:
self.teardownParents(recursive=recursive)
def create(self, intf=None):
""" Create the device. """
log_method_call(self, self.name, status=self.status)
if self.exists:
raise DeviceError("device already exists", self.name)
w = None
if intf:
w = intf.progressWindow(_("Creating"),
_("Creating device %s")
% (self.path,),
100, pulse = True)
try:
self.createParents()
self.setupParents()
pv_list = [pv.path for pv in self.parents]
lvm.vgcreate(self.name, pv_list, self.peSize, progress=w)
except Exception:
raise
else:
# FIXME set / update self.uuid here
self.exists = True
self.setup()
finally:
if w:
w.pop()
def destroy(self):
""" Destroy the device. """
log_method_call(self, self.name, status=self.status)
if not self.exists:
raise DeviceError("device has not been created", self.name)
# set up the pvs since lvm needs access to them to do the vgremove
self.setupParents(orig=True)
# this sometimes fails for some reason.
try:
lvm.vgreduce(self.name, [], rm=True)
lvm.vgremove(self.name)
except lvm.LVMError:
raise DeviceError("Could not completely remove VG", self.name)
finally:
self.exists = False
def reduce(self, pv_list):
""" Remove the listed PVs from the VG. """
log_method_call(self, self.name, status=self.status)
if not self.exists:
raise DeviceError("device has not been created", self.name)
lvm.vgreduce(self.name, pv_list)
# XXX do we need to notify the kernel?
def _addLogVol(self, lv):
""" Add an LV to this VG. """
if lv in self._lvs:
raise ValueError("lv is already part of this vg")
# verify we have the space, then add it
# do not verify for growing vg (because of ks)
if not lv.exists and \
not [pv for pv in self.pvs if getattr(pv, "req_grow", None)] and \
lv.size > self.freeSpace:
raise DeviceError("new lv is too large to fit in free space", self.name)
self._lvs.append(lv)
def _removeLogVol(self, lv):
""" Remove an LV from this VG. """
if lv not in self.lvs:
raise ValueError("specified lv is not part of this vg")
self._lvs.remove(lv)
def _addPV(self, pv):
""" Add a PV to this VG. """
if pv in self.pvs:
raise ValueError("pv is already part of this vg")
# for the time being we will not allow vgextend
if self.exists:
raise DeviceError("cannot add pv to existing vg", self.name)
self.parents.append(pv)
pv.addChild()
def _removePV(self, pv):
""" Remove an PV from this VG. """
if not pv in self.pvs:
raise ValueError("specified pv is not part of this vg")
# for the time being we will not allow vgreduce
if self.exists:
raise DeviceError("cannot remove pv from existing vg", self.name)
self.parents.remove(pv)
pv.removeChild()
# We can't rely on lvm to tell us about our size, free space, &c
# since we could have modifications queued, unless the VG and all of
# its PVs already exist.
#
# -- liblvm may contain support for in-memory devices
@property
def isModified(self):
""" Return True if the VG has changes queued that LVM is unaware of. """
modified = True
if self.exists and not filter(lambda d: not d.exists, self.pvs):
modified = False
return modified
@property
def size(self):
""" The size of this VG """
# TODO: just ask lvm if isModified returns False
# sum up the sizes of the PVs and align to pesize
size = 0
for pv in self.pvs:
size += max(0, self.align(pv.size - pv.format.peStart))
return size
@property
def extents(self):
""" Number of extents in this VG """
# TODO: just ask lvm if isModified returns False
return self.size / self.peSize
@property
def freeSpace(self):
""" The amount of free space in this VG (in MB). """
# TODO: just ask lvm if isModified returns False
# total the sizes of any LVs
used = 0
size = self.size
log.debug("%s size is %dMB" % (self.name, size))
for lv in self.lvs:
log.debug("lv %s uses %dMB" % (lv.name, lv.vgSpaceUsed))
used += self.align(lv.vgSpaceUsed, roundup=True)
free = self.size - used
log.debug("vg %s has %dMB free" % (self.name, free))
return free
@property
def freeExtents(self):
""" The number of free extents in this VG. """
# TODO: just ask lvm if isModified returns False
return self.freeSpace / self.peSize
def align(self, size, roundup=None):
""" Align a size to a multiple of physical extent size. """
size = numeric_type(size)
if roundup:
round = math.ceil
else:
round = math.floor
# we want Kbytes as a float for our math
size *= 1024.0
pesize = self.peSize * 1024.0
return long((round(size / pesize) * pesize) / 1024)
@property
def pvs(self):
""" A list of this VG's PVs """
return self.parents[:] # we don't want folks changing our list
@property
def lvs(self):
""" A list of this VG's LVs """
return self._lvs[:] # we don't want folks changing our list
@property
def complete(self):
"""Check if the vg has all its pvs in the system
Return True if complete.
"""
return len(self.pvs) == self.pvCount or not self.exists
class LVMLogicalVolumeDevice(DMDevice):
""" An LVM Logical Volume """
_type = "lvmlv"
_resizable = True
def __init__(self, name, vgdev, size=None, uuid=None,
stripes=1, logSize=0, snapshotSpace=0,
format=None, exists=None, sysfsPath='',
grow=None, maxsize=None, percent=None):
""" Create a LVMLogicalVolumeDevice instance.
Arguments:
name -- the device name (generally a device node's basename)
vgdev -- volume group (LVMVolumeGroupDevice instance)
Keyword Arguments:
size -- the device's size (in MB)
uuid -- the device's UUID
stripes -- number of copies in the vg (>1 for mirrored lvs)
logSize -- size of log volume (for mirrored lvs)
snapshotSpace -- sum of sizes of snapshots of this lv
sysfsPath -- sysfs device path
format -- a DeviceFormat instance
exists -- indicates whether this is an existing device
For new (non-existent) LVs only:
grow -- whether to grow this LV
maxsize -- maximum size for growable LV (in MB)
percent -- percent of VG space to take
"""
if isinstance(vgdev, list):
if len(vgdev) != 1:
raise ValueError("constructor requires a single LVMVolumeGroupDevice instance")
elif not isinstance(vgdev[0], LVMVolumeGroupDevice):
raise ValueError("constructor requires a LVMVolumeGroupDevice instance")
elif not isinstance(vgdev, LVMVolumeGroupDevice):
raise ValueError("constructor requires a LVMVolumeGroupDevice instance")
DMDevice.__init__(self, name, size=size, format=format,
sysfsPath=sysfsPath, parents=vgdev,
exists=exists)
self.uuid = uuid
self.snapshotSpace = snapshotSpace
self.stripes = stripes
self.logSize = logSize
self.req_grow = None
self.req_max_size = 0
self.req_size = 0
self.req_percent = 0
if not self.exists:
self.req_grow = grow
self.req_max_size = numeric_type(maxsize)
# XXX should we enforce that req_size be pe-aligned?
self.req_size = self._size
self.req_percent = numeric_type(percent)
# here we go with the circular references
self.vg._addLogVol(self)
def __str__(self):
s = DMDevice.__str__(self)
s += (" VG device = %(vgdev)r percent = %(percent)s\n"
" mirrored = %(mirrored)s stripes = %(stripes)d"
" snapshot total = %(snapshots)dMB\n"
" VG space used = %(vgspace)dMB" %
{"vgdev": self.vg, "percent": self.req_percent,
"mirrored": self.mirrored, "stripes": self.stripes,
"snapshots": self.snapshotSpace, "vgspace": self.vgSpaceUsed })
return s
@property
def dict(self):
d = super(LVMLogicalVolumeDevice, self).dict
if self.exists:
d.update({"mirrored": self.mirrored, "stripes": self.stripes,
"snapshots": self.snapshotSpace,
"vgspace": self.vgSpaceUsed})
else:
d.update({"percent": self.req_percent})
return d
def writeKS(self, f, preexisting=False, noformat=False, s=None):
args = ["--name=%s" % self.lvname,
"--vgname=%s" % self.vg.name]
if self.req_grow:
args.extend(["--grow", "--size=%s" % (self.req_size or 1)])
if self.req_max_size > 0:
args.append("--maxsize=%s" % self.req_max_size)
else:
if self.req_percent > 0:
args.append("--percent=%s" % self.req_percent)
elif self.req_size > 0:
args.append("--size=%s" % self.req_size)
if preexisting:
args.append("--useexisting")
if noformat:
args.append("--noformat")
f.write("#logvol ")
self.format.writeKS(f)
f.write(" %s" % " ".join(args))
if s:
f.write(" %s" % s)
@property
def mirrored(self):
return self.stripes > 1
def _setSize(self, size):
size = self.vg.align(numeric_type(size))
log.debug("trying to set lv %s size to %dMB" % (self.name, size))
if size <= (self.vg.freeSpace + self._size):
self._size = size
self.targetSize = size
else:
log.debug("failed to set size: %dMB short" % (size - (self.vg.freeSpace + self._size),))
raise ValueError("not enough free space in volume group")
size = property(StorageDevice._getSize, _setSize)
@property
def vgSpaceUsed(self):
return self.size * self.stripes + self.logSize + self.snapshotSpace
@property
def vg(self):
""" This Logical Volume's Volume Group. """
return self.parents[0]
@property
def mapName(self):
""" This device's device-mapper map name """
# Thank you lvm for this lovely hack.
return "%s-%s" % (self.vg.mapName, self._name.replace("-","--"))
@property
def path(self):
""" Device node representing this device. """
return "%s/%s" % (self._devDir, self.mapName)
def getDMNode(self):
""" Return the dm-X (eg: dm-0) device node for this device. """
log_method_call(self, self.name, status=self.status)
if not self.exists:
raise DeviceError("device has not been created", self.name)
return dm.dm_node_from_name(self.mapName)
@property
def name(self):
""" This device's name. """
return "%s-%s" % (self.vg.name, self._name)
@property
def lvname(self):
""" The LV's name (not including VG name). """
return self._name
@property
def complete(self):
""" Test if vg exits and if it has all pvs. """
return self.vg.complete
def setup(self, intf=None, orig=False):
""" Open, or set up, a device. """
log_method_call(self, self.name, orig=orig, status=self.status)
if not self.exists:
raise DeviceError("device has not been created", self.name)
if self.status:
return
self.vg.setup(orig=orig)
lvm.lvactivate(self.vg.name, self._name)
# we always probe since the device may not be set up when we want
# information about it
self._size = self.currentSize
def teardown(self, recursive=None):
""" Close, or tear down, a device. """
log_method_call(self, self.name, status=self.status)
if not self.exists and not recursive:
raise DeviceError("device has not been created", self.name)
udev_settle()
if self.status:
if self.originalFormat.exists:
self.originalFormat.teardown()
if self.format.exists:
self.format.teardown()
udev_settle()
if self.status:
lvm.lvdeactivate(self.vg.name, self._name)
if recursive:
# It's likely that teardown of a VG will fail due to other
# LVs being active (filesystems mounted, &c), so don't let
# it bring everything down.
try:
self.vg.teardown(recursive=recursive)
except Exception as e:
log.debug("vg %s teardown failed; continuing" % self.vg.name)
def create(self, intf=None):
""" Create the device. """
log_method_call(self, self.name, status=self.status)
if self.exists:
raise DeviceError("device already exists", self.name)
w = None
if intf:
w = intf.progressWindow(_("Creating"),
_("Creating device %s")
% (self.path,),
100, pulse = True)
try:
self.createParents()
self.setupParents()
# should we use --zero for safety's sake?
lvm.lvcreate(self.vg.name, self._name, self.size, progress=w)
except Exception:
raise
else:
# FIXME set / update self.uuid here
self.exists = True
self.setup()
finally:
if w:
w.pop()
def destroy(self):
""" Destroy the device. """
log_method_call(self, self.name, status=self.status)
if not self.exists:
raise DeviceError("device has not been created", self.name)
self.teardown()
# set up the vg's pvs so lvm can remove the lv
self.vg.setupParents(orig=True)
lvm.lvremove(self.vg.name, self._name)
self.exists = False
def resize(self, intf=None):
# XXX resize format probably, right?
log_method_call(self, self.name, status=self.status)
if not self.exists:
raise DeviceError("device has not been created", self.name)
# Setup VG parents (in case they are dmraid partitions for example)
self.vg.setupParents(orig=True)
if self.originalFormat.exists:
self.originalFormat.teardown()
if self.format.exists:
self.format.teardown()
udev_settle()
lvm.lvresize(self.vg.name, self._name, self.size)
def dracutSetupString(self):
# Note no mapName usage here, this is a lvm cmdline name, which
# is different (ofcourse)
return "rd_LVM_LV=%s/%s" % (self.vg.name, self._name)
class MDRaidArrayDevice(StorageDevice):
""" An mdraid (Linux RAID) device. """
_type = "mdarray"
def __init__(self, name, level=None, major=None, minor=None, size=None,
memberDevices=None, totalDevices=None, bitmap=False,
uuid=None, format=None, exists=None,
parents=None, sysfsPath=''):
""" Create a MDRaidArrayDevice instance.
Arguments:
name -- the device name (generally a device node's basename)
Keyword Arguments:
level -- the device's RAID level (a string, eg: '1' or 'raid1')
parents -- list of member devices (StorageDevice instances)
size -- the device's size (units/format TBD)
uuid -- the device's UUID
minor -- the device minor
bitmap -- whether to use a bitmap (boolean)
sysfsPath -- sysfs device path
format -- a DeviceFormat instance
exists -- indicates whether this is an existing device
"""
StorageDevice.__init__(self, name, format=format, exists=exists,
major=major, minor=minor, size=size,
parents=parents, sysfsPath=sysfsPath)
self.level = level
if level == "container":
self._type = "mdcontainer"
elif level is not None:
self.level = mdraid.raidLevel(level)
# For new arrays check if we have enough members
if (not exists and parents and
len(parents) < mdraid.get_raid_min_members(self.level)):
raise ValueError, _("A RAID%d set requires atleast %d members") % (
self.level, mdraid.get_raid_min_members(self.level))
self.uuid = uuid
self._totalDevices = numeric_type(totalDevices)
self._memberDevices = numeric_type(memberDevices)
self.sysfsPath = "/devices/virtual/block/%s" % name
self.chunkSize = 512.0 / 1024.0 # chunk size in MB
self.superBlockSize = 2.0 # superblock size in MB
# For container members probe size now, as we cannot determine it
# when teared down.
if self.parents and self.parents[0].type == "mdcontainer":
self._size = self.currentSize
self._type = "mdbiosraidarray"
# FIXME: Bitmap is more complicated than this.
# It can be internal or external. External requires a filename.
self.bitmap = bitmap
self.formatClass = get_device_format_class("mdmember")
if not self.formatClass:
raise DeviceError("cannot find class for 'mdmember'", self.name)
if self.exists and self.uuid:
# this is a hack to work around mdadm's insistence on giving
# really high minors to arrays it has no config entry for
md_f = open("/etc/mdadm.conf", "a")
md_f.write("ARRAY %s UUID=%s\n" % (self.path, self.uuid))
md_f.close()
@property
def smallestMember(self):
try:
smallest = sorted(self.devices, key=lambda d: d.size)[0]
except IndexError:
smallest = None
return smallest
@property
def size(self):
if not self.devices:
return 0
# For container members return probed size, as we cannot determine it
# when teared down.
if self.type == "mdbiosraidarray":
return self._size
size = 0
smallestMemberSize = self.smallestMember.size - self.superBlockSize
if not self.exists or not self.partedDevice:
if self.level == mdraid.RAID0:
size = self.memberDevices * smallestMemberSize
size -= size % self.chunkSize
elif self.level == mdraid.RAID1:
size = smallestMemberSize
elif self.level == mdraid.RAID4:
size = (self.memberDevices - 1) * smallestMemberSize
size -= size % self.chunkSize
elif self.level == mdraid.RAID5:
size = (self.memberDevices - 1) * smallestMemberSize
size -= size % self.chunkSize
elif self.level == mdraid.RAID6:
size = (self.memberDevices - 2) * smallestMemberSize
size -= size % self.chunkSize
elif self.level == mdraid.RAID10:
size = (self.memberDevices / 2.0) * smallestMemberSize
size -= size % self.chunkSize
else:
size = self.partedDevice.getSize()
return size
@property
def description(self):
if self.level == mdraid.RAID0:
levelstr = "stripe"
elif self.level == mdraid.RAID1:
levelstr = "mirror"
else:
levelstr = "raid%s" % self.level
if self.type == "mdcontainer":
return "BIOS RAID container"
elif self.type == "mdbiosraidarray":
return "BIOS RAID set (%s)" % levelstr
else:
return "MDRAID set (%s)" % levelstr
def __str__(self):
s = StorageDevice.__str__(self)
s += (" level = %(level)s bitmap = %(bitmap)s spares = %(spares)s\n"
" members = %(memberDevices)s\n"
" total devices = %(totalDevices)s" %
{"level": self.level, "bitmap": self.bitmap, "spares": self.spares,
"memberDevices": self.memberDevices, "totalDevices": self.totalDevices})
return s
@property
def dict(self):
d = super(MDRaidArrayDevice, self).dict
d.update({"level": self.level, "bitmap": self.bitmap,
"spares": self.spares, "memberDevices": self.memberDevices,
"totalDevices": self.totalDevices})
return d
def writeKS(self, f, preexisting=False, noformat=False, s=None):
args = ["--level=%s" % self.level,
"--device=%s" % self.name]
mems = []
if self.spares > 0:
args.append("--spares=%s" % self.spares)
if preexisting:
args.append("--useexisting")
if noformat:
args.append("--noformat")
for mem in self.parents:
mems.append("raid.%s" % mem.format.uuid)
f.write("#raid ")
self.format.writeKS(f)
f.write(" %s" % " ".join(args))
f.write(" %s" % " ".join(mems))
if s:
f.write(" %s" % s)
@property
def mdadmConfEntry(self):
""" This array's mdadm.conf entry. """
if self.level is None or self.memberDevices is None or not self.uuid:
raise DeviceError("array is not fully defined", self.name)
# containers and the sets within must only have a UUID= parameter
if self.type == "mdcontainer" or self.type == "mdbiosraidarray":
fmt = "ARRAY %s UUID=%s\n"
return fmt % (self.path, self.uuid)
fmt = "ARRAY %s level=raid%d num-devices=%d UUID=%s\n"
return fmt % (self.path, self.level, self.memberDevices, self.uuid)
@property
def totalDevices(self):
""" Total number of devices in the array, including spares. """
count = len(self.parents)
if not self.exists:
count = self._totalDevices
return count
def _getMemberDevices(self):
return self._memberDevices
def _setMemberDevices(self, number):
if not isinstance(number, int):
raise ValueError("memberDevices is an integer")
if number > self.totalDevices:
raise ValueError("memberDevices cannot be greater than totalDevices")
self._memberDevices = number
memberDevices = property(_getMemberDevices, _setMemberDevices,
doc="number of member devices")
def _getSpares(self):
spares = 0
if self.memberDevices is not None:
if self.totalDevices is not None:
spares = self.totalDevices - self.memberDevices
else:
spares = self.memberDevices
self._totalDevices = self.memberDevices
return spares
def _setSpares(self, spares):
# FIXME: this is too simple to be right
if self.totalDevices > spares:
self.memberDevices = self.totalDevices - spares
spares = property(_getSpares, _setSpares)
def probe(self):
""" Probe for any missing information about this device.
I'd like to avoid paying any attention to "Preferred Minor"
as it seems problematic.
"""
log_method_call(self, self.name, status=self.status)
if not self.exists:
raise DeviceError("device has not been created", self.name)
try:
self.devices[0].setup()
except Exception:
return
info = mdraid.mdexamine(self.devices[0].path)
if self.level is None:
self.level = mdraid.raidLevel(info['level'])
def updateSysfsPath(self):
""" Update this device's sysfs path. """
log_method_call(self, self.name, status=self.status)
if not self.exists:
raise DeviceError("device has not been created", self.name)
if self.status:
self.sysfsPath = "/devices/virtual/block/%s" % self.name
else:
self.sysfsPath = ''
def _addDevice(self, device):
""" Add a new member device to the array.
XXX This is for use when probing devices, not for modification
of arrays.
"""
log_method_call(self,
self.name,
device=device.name,
status=self.status)
if not self.exists:
raise DeviceError("device has not been created", self.name)
if not isinstance(device.format, self.formatClass):
raise ValueError("invalid device format for mdraid member")
if self.uuid and device.format.mdUuid != self.uuid:
raise ValueError("cannot add member with non-matching UUID")
if device in self.devices:
raise ValueError("device is already a member of this array")
# we added it, so now set up the relations
self.devices.append(device)
device.addChild()
device.setup()
udev_settle()
try:
mdraid.mdadd(device.path)
# mdadd causes udev events
udev_settle()
except MDRaidError as e:
log.warning("failed to add member %s to md array %s: %s"
% (device.path, self.path, e))
if self.status:
# we always probe since the device may not be set up when we want
# information about it
self._size = self.currentSize
def _removeDevice(self, device):
""" Remove a component device from the array.
XXX This is for use by clearpart, not for reconfiguration.
"""
log_method_call(self,
self.name,
device=device.name,
status=self.status)
if device not in self.devices:
raise ValueError("cannot remove non-member device from array")
self.devices.remove(device)
device.removeChild()
@property
def status(self):
""" This device's status.
For now, this should return a boolean:
True the device is open and ready for use
False the device is not open
"""
# check the status in sysfs
status = False
if not self.exists:
return status
state_file = "/sys/%s/md/array_state" % self.sysfsPath
if os.access(state_file, os.R_OK):
state_f = open(state_file)
state = state_f.read().strip()
state_f.close()
log.debug("%s state is %s" % (self.name, state))
if state in ("clean", "active", "active-idle", "readonly", "read-auto"):
status = True
# mdcontainers have state inactive when started (clear if stopped)
if self.type == "mdcontainer" and state == "inactive":
status = True
return status
@property
def degraded(self):
""" Return True if the array is running in degraded mode. """
rc = False
degraded_file = "/sys/%s/md/degraded" % self.sysfsPath
if os.access(degraded_file, os.R_OK):
deg_f = open(degraded_file)
val = deg_f.read().strip()
deg_f.close()
log.debug("%s degraded is %s" % (self.name, val))
if val == "1":
rc = True
return rc
@property
def devices(self):
""" Return a list of this array's member device instances. """
return self.parents
def setup(self, intf=None, orig=False):
""" Open, or set up, a device. """
log_method_call(self, self.name, orig=orig, status=self.status)
if not self.exists:
raise DeviceError("device has not been created", self.name)
if self.status:
return
disks = []
for member in self.devices:
member.setup(orig=orig)
disks.append(member.path)
mdraid.mdactivate(self.path,
members=disks,
super_minor=self.minor,
uuid=self.uuid)
udev_settle()
# we always probe since the device may not be set up when we want
# information about it
self._size = self.currentSize
def teardown(self, recursive=None):
""" Close, or tear down, a device. """
log_method_call(self, self.name, status=self.status)
if not self.exists and not recursive:
raise DeviceError("device has not been created", self.name)
if self.status:
if self.originalFormat.exists:
self.originalFormat.teardown()
if self.format.exists:
self.format.teardown()
udev_settle()
# Since BIOS RAID sets (containers in mdraid terminology) never change
# there is no need to stop them and later restart them. Not stopping
# (and thus also not starting) them also works around bug 523334
if self.type == "mdcontainer" or self.type == "mdbiosraidarray":
return
# We don't really care what the array's state is. If the device
# file exists, we want to deactivate it. mdraid has too many
# states.
if self.exists and os.path.exists(self.path):
mdraid.mddeactivate(self.path)
if recursive:
self.teardownParents(recursive=recursive)
def create(self, intf=None):
""" Create the device. """
log_method_call(self, self.name, status=self.status)
if self.exists:
raise DeviceError("device already exists", self.name)
w = None
if intf:
w = intf.progressWindow(_("Creating"),
_("Creating device %s")
% (self.path,),
100, pulse = True)
try:
self.createParents()
self.setupParents()
disks = [disk.path for disk in self.devices]
spares = len(self.devices) - self.memberDevices
# Figure out format specific options
metadata="1.1"
# bitmaps are not meaningful on raid0 according to mdadm-3.0.3
bitmap = self.level != 0
if getattr(self.format, "mountpoint", None) == "/boot":
metadata="1.0"
bitmap=False
elif self.format.type == "swap":
bitmap=False
mdraid.mdcreate(self.path,
self.level,
disks,
spares,
metadataVer=metadata,
bitmap=bitmap,
progress=w)
except Exception:
raise
else:
self.exists = True
# the array is automatically activated upon creation, but...
self.setup()
udev_settle()
self.updateSysfsPath()
info = udev_get_block_device(self.sysfsPath)
self.uuid = udev_device_get_md_uuid(info)
for member in self.devices:
member.mdUuid = self.uuid
finally:
if w:
w.pop()
@property
def formatArgs(self):
formatArgs = []
if self.format.type == "ext2":
if self.level == mdraid.RAID5:
formatArgs = ['-R',
'stride=%d' % ((self.memberDevices - 1) * 16)]
if self.level == mdraid.RAID4:
formatArgs = ['-R',
'stride=%d' % ((self.memberDevices - 1) * 16)]
elif self.level == mdraid.RAID0:
formatArgs = ['-R',
'stride=%d' % (self.memberDevices * 16)]
def destroy(self):
""" Destroy the device. """
log_method_call(self, self.name, status=self.status)
if not self.exists:
raise DeviceError("device has not been created", self.name)
self.teardown()
# The destruction of the formatting on the member devices does the
# real work, but it isn't our place to do it from here.
self.exists = False
@property
def mediaPresent(self):
# Containers should not get any format handling done
# (the device node does not allow read / write calls)
if self.type == "mdcontainer":
return False
# BIOS RAID sets should show as present even when teared down
elif self.type == "mdbiosraidarray":
return True
else:
return self.partedDevice is not None
@property
def model(self):
return self.description
@property
def partitionable(self):
return self.type == "mdbiosraidarray"
@property
def isDisk(self):
return self.type == "mdbiosraidarray"
def dracutSetupString(self):
return "rd_MD_UUID=%s" % self.uuid
class DMRaidArrayDevice(DMDevice):
""" A dmraid (device-mapper RAID) device """
_type = "dm-raid array"
_packages = ["dmraid"]
_partitionable = True
_isDisk = True
def __init__(self, name, raidSet=None, format=None,
size=None, parents=None, sysfsPath=''):
""" Create a DMRaidArrayDevice instance.
Arguments:
name -- the dmraid name also the device node's basename
Keyword Arguments:
raidSet -- the RaidSet object from block
parents -- a list of the member devices
sysfsPath -- sysfs device path
size -- the device's size
format -- a DeviceFormat instance
"""
if isinstance(parents, list):
for parent in parents:
if not parent.format or parent.format.type != "dmraidmember":
raise ValueError("parent devices must contain dmraidmember format")
DMDevice.__init__(self, name, format=format, size=size,
parents=parents, sysfsPath=sysfsPath, exists=True)
self.formatClass = get_device_format_class("dmraidmember")
if not self.formatClass:
raise StorageError("cannot find class for 'dmraidmember'")
self._raidSet = raidSet
@property
def raidSet(self):
return self._raidSet
def _addDevice(self, device):
""" Add a new member device to the array.
XXX This is for use when probing devices, not for modification
of arrays.
"""
log_method_call(self, self.name, device=device.name, status=self.status)
if not self.exists:
raise DeviceError("device has not been created", self.name)
if not isinstance(device.format, self.formatClass):
raise ValueError("invalid device format for dmraid member")
if device in self.members:
raise ValueError("device is already a member of this array")
# we added it, so now set up the relations
self.devices.append(device)
device.addChild()
@property
def members(self):
return self.parents
@property
def devices(self):
""" Return a list of this array's member device instances. """
return self.parents
def deactivate(self):
""" Deactivate the raid set. """
log_method_call(self, self.name, status=self.status)
# This call already checks if the set is not active.
self._raidSet.deactivate()
def activate(self):
""" Activate the raid set. """
log_method_call(self, self.name, status=self.status)
# This call already checks if the set is active.
self._raidSet.activate(mknod=True)
udev_settle()
def setup(self, intf=None, orig=False):
""" Open, or set up, a device. """
log_method_call(self, self.name, orig=orig, status=self.status)
StorageDevice.setup(self, intf=intf, orig=orig)
self.activate()
def teardown(self, recursive=None):
""" Close, or tear down, a device. """
log_method_call(self, self.name, status=self.status)
if not self.exists and not recursive:
raise DeviceError("device has not been created", self.name)
log.debug("not tearing down dmraid device %s" % self.name)
@property
def description(self):
return "BIOS RAID set (%s)" % self._raidSet.rs.set_type
@property
def model(self):
return self.description
def dracutSetupString(self):
return "rd_DM_UUID=%s" % self.name
class MultipathDevice(DMDevice):
""" A multipath device """
_type = "dm-multipath"
_packages = ["device-mapper-multipath"]
_partitionable = True
_isDisk = True
def __init__(self, name, info, format=None, size=None,
parents=None, sysfsPath=''):
""" Create a MultipathDevice instance.
Arguments:
name -- the device name (generally a device node's basename)
info -- the udev info for this device
Keyword Arguments:
sysfsPath -- sysfs device path
size -- the device's size
format -- a DeviceFormat instance
parents -- a list of the backing devices (Device instances)
"""
self._info = info
self.setupIdentity()
DMDevice.__init__(self, name, format=format, size=size,
parents=parents, sysfsPath=sysfsPath,
exists=True)
self.config = {
'wwid' : self.identity,
'alias' : self.name,
'mode' : '0600',
'uid' : '0',
'gid' : '0',
}
def setupIdentity(self):
""" Adds identifying remarks to MultipathDevice object.
May be overridden by a sub-class for e.g. RDAC handling.
"""
self._identity_short = self._info['ID_SERIAL_SHORT']
self._identity = self._info['ID_SERIAL']
@property
def identity(self):
""" Get identity set with setupIdentityFromInfo()
May be overridden by a sub-class for e.g. RDAC handling.
"""
if not hasattr(self, "_identity"):
raise RuntimeError, "setupIdentityFromInfo() has not been called."
return self._identity
@property
def wwid(self):
identity = self._identity_short
ret = []
while identity:
ret.append(identity[:2])
identity = identity[2:]
return ":".join(ret)
@property
def model(self):
if not self.parents:
return ""
return self.parents[0].model
@property
def vendor(self):
if not self.parents:
return ""
return self.parents[0].vendor
@property
def description(self):
return "WWID %s" % (self.wwid,)
def addParent(self, parent):
""" Add a parent device to the mpath. """
log_method_call(self, self.name, status=self.status)
if self.status:
self.teardown()
self.parents.append(parent)
self.setup()
else:
self.parents.append(parent)
def setupPartitions(self):
log_method_call(self, name=self.name, kids=self.kids)
rc = iutil.execWithRedirect("kpartx",
["-a", "-p", "p", "/dev/mapper/%s" % self.name],
stdout = "/dev/tty5",
stderr = "/dev/tty5")
if rc:
raise MPathError("multipath partition activation failed for '%s'" %
self.name)
udev_settle()
def teardown(self, recursive=None):
""" Tear down the mpath device. """
log_method_call(self, self.name, status=self.status)
if not self.exists and not recursive:
raise DeviceError("device has not been created", self.name)
if self.exists and os.path.exists(self.path):
#self.teardownPartitions()
#rc = iutil.execWithRedirect("multipath",
# ['-f', self.name],
# stdout = "/dev/tty5",
# stderr = "/dev/tty5")
#if rc:
# raise MPathError("multipath deactivation failed for '%s'" %
# self.name)
bdev = block.getDevice(self.name)
devmap = block.getMap(major=bdev[0], minor=bdev[1])
if devmap.open_count:
return
try:
block.removeDeviceMap(devmap)
except Exception as e:
raise MPathError("failed to tear down multipath device %s: %s"
% (self.name, e))
if recursive:
self.teardownParents(recursive=recursive)
def setup(self, intf=None, orig=False):
""" Open, or set up, a device. """
log_method_call(self, self.name, orig=orig, status=self.status)
if self.status:
return
StorageDevice.setup(self, intf=intf, orig=orig)
udev_settle()
rc = iutil.execWithRedirect("multipath",
[self.name],
stdout = "/dev/tty5",
stderr = "/dev/tty5")
if rc:
raise MPathError("multipath activation failed for '%s'" %
self.name)
udev_settle()
self.setupPartitions()
udev_settle()
class NoDevice(StorageDevice):
""" A nodev device for nodev filesystems like tmpfs. """
_type = "nodev"
def __init__(self, format=None):
""" Create a NoDevice instance.
Arguments:
Keyword Arguments:
format -- a DeviceFormat instance
"""
if format:
name = format.type
else:
name = "none"
StorageDevice.__init__(self, name, format=format)
@property
def path(self):
""" Device node representing this device. """
return self.name
def probe(self):
""" Probe for any missing information about this device. """
log_method_call(self, self.name, status=self.status)
def setup(self, intf=None, orig=False):
""" Open, or set up, a device. """
log_method_call(self, self.name, orig=orig, status=self.status)
def teardown(self, recursive=False):
""" Close, or tear down, a device. """
log_method_call(self, self.name, status=self.status)
def create(self, intf=None):
""" Create the device. """
log_method_call(self, self.name, status=self.status)
self.setupParents()
def destroy(self):
""" Destroy the device. """
log_method_call(self, self.name, status=self.status)
class FileDevice(StorageDevice):
""" A file on a filesystem.
This exists because of swap files.
"""
_type = "file"
_devDir = ""
def __init__(self, path, format=None, size=None,
exists=None, parents=None):
""" Create a FileDevice instance.
Arguments:
path -- full path to the file
Keyword Arguments:
format -- a DeviceFormat instance
size -- the file size (units TBD)
parents -- a list of required devices (Device instances)
exists -- indicates whether this is an existing device
"""
StorageDevice.__init__(self, path, format=format, size=size,
exists=exists, parents=parents)
def probe(self):
""" Probe for any missing information about this device. """
pass
@property
def fstabSpec(self):
return self.name
@property
def path(self):
path = self.name
root = ""
try:
status = self.parents[0].format.status
except (AttributeError, IndexError):
status = False
if status:
# this is the actual active mountpoint
root = self.parents[0].format._mountpoint
# trim the mountpoint down to the chroot since we already have
# the otherwise fully-qualified path
mountpoint = self.parents[0].format.mountpoint
if mountpoint.endswith("/"):
mountpoint = mountpoint[:-1]
if mountpoint:
root = root[:-len(mountpoint)]
return os.path.normpath("%s/%s" % (root, path))
def setup(self, intf=None, orig=False):
StorageDevice.setup(self, orig=orig)
if self.format and self.format.exists and not self.format.status:
self.format.device = self.path
for parent in self.parents:
if orig:
parent.originalFormat.setup()
else:
parent.format.setup()
def teardown(self, recursive=None):
StorageDevice.teardown(self)
if self.format and self.format.exists and not self.format.status:
self.format.device = self.path
def create(self, intf=None):
""" Create the device. """
log_method_call(self, self.name, status=self.status)
if self.exists:
raise DeviceError("device already exists", self.name)
w = None
if intf:
w = intf.waitWindow(_("Creating"),
_("Creating file %s") % (self.path,))
try:
# this only checks that parents exist
self.createParents()
self.setupParents()
fd = os.open(self.path, os.O_RDWR)
buf = '\0' * 1024 * 1024 * self.size
os.write(fd, buf)
except (OSError, TypeError) as e:
log.error("error writing out %s: %s" % (self.path, e))
raise DeviceError(e, self.name)
else:
self.exists = True
finally:
os.close(fd)
if w:
w.pop()
def destroy(self):
""" Destroy the device. """
log_method_call(self, self.name, status=self.status)
if not self.exists:
raise DeviceError("device has not been created", self.name)
os.unlink(self.path)
self.exists = False
class DirectoryDevice(FileDevice):
""" A directory on a filesystem.
This exists because of bind mounts.
"""
_type = "directory"
def create(self):
""" Create the device. """
log_method_call(self, self.name, status=self.status)
if self.exists:
raise DeviceError("device already exists", self.name)
self.createParents()
self.setupParents()
try:
iutil.mkdirChain(self.path)
except Exception, e:
raise DeviceError(e, self.name)
self.exists = True
def destroy(self):
""" Destroy the device. """
log_method_call(self, self.name, status=self.status)
if not self.exists:
raise DeviceError("device has not been created", self.name)
os.unlink(self.path)
self.exists = False
class iScsiDiskDevice(DiskDevice, NetworkStorageDevice):
""" An iSCSI disk. """
_type = "iscsi"
_packages = ["iscsi-initiator-utils", "dracut-network"]
def __init__(self, device, **kwargs):
self.node = kwargs.pop("node")
self.ibft = kwargs.pop("ibft")
self.initiator = kwargs.pop("initiator")
DiskDevice.__init__(self, device, **kwargs)
NetworkStorageDevice.__init__(self, host_address=self.node.address)
log.debug("created new iscsi disk %s %s:%d" % (self.node.name, self.node.address, self.node.port))
def dracutSetupString(self):
if self.ibft:
return "iscsi_firmware"
netroot="netroot=iscsi:"
auth = self.node.getAuth()
if auth:
netroot += "%s:%s" % (auth.username, auth.password)
if len(auth.reverse_username) or len(auth.reverse_password):
netroot += ":%s:%s" % (auth.reverse_username,
auth.reverse_password)
netroot += "@%s::%d::%s" % (self.node.address, self.node.port,
self.node.name)
netroot += " iscsi_initiator=%s" % self.initiator
return netroot
class FcoeDiskDevice(DiskDevice, NetworkStorageDevice):
""" An FCoE disk. """
_type = "fcoe"
_packages = ["fcoe-utils", "dracut-network"]
def __init__(self, device, **kwargs):
self.nic = kwargs.pop("nic")
self.identifier = kwargs.pop("identifier")
DiskDevice.__init__(self, device, **kwargs)
NetworkStorageDevice.__init__(self, nic=self.nic)
log.debug("created new fcoe disk %s @ %s" % (device, self.nic))
def dracutSetupString(self):
dcb = True
from .fcoe import fcoe
for nic, dcb in fcoe().nics:
if nic == self.nic:
break
if dcb:
dcbOpt = "dcb"
else:
dcbOpt = "nodcb"
return "netroot=fcoe:%s:%s" % (self.nic, dcbOpt)
class OpticalDevice(StorageDevice):
""" An optical drive, eg: cdrom, dvd+r, &c.
XXX Is this useful?
"""
_type = "cdrom"
def __init__(self, name, major=None, minor=None, exists=None,
format=None, parents=None, sysfsPath='', vendor="",
model=""):
StorageDevice.__init__(self, name, format=format,
major=major, minor=minor, exists=True,
parents=parents, sysfsPath=sysfsPath,
vendor=vendor, model=model)
@property
def mediaPresent(self):
""" Return a boolean indicating whether or not the device contains
media.
"""
log_method_call(self, self.name, status=self.status)
if not self.exists:
raise DeviceError("device has not been created", self.name)
try:
fd = os.open(self.path, os.O_RDONLY)
except OSError as e:
# errno 123 = No medium found
if e.errno == 123:
return False
else:
return True
else:
os.close(fd)
return True
def eject(self):
""" Eject the drawer. """
import _isys
log_method_call(self, self.name, status=self.status)
if not self.exists:
raise DeviceError("device has not been created", self.name)
#try to umount and close device before ejecting
self.teardown()
# Make a best effort attempt to do the eject. If it fails, it's not
# critical.
fd = os.open(self.path, os.O_RDONLY | os.O_NONBLOCK)
try:
_isys.ejectcdrom(fd)
except SystemError as e:
log.warning("error ejecting cdrom %s: %s" % (self.name, e))
os.close(fd)
class ZFCPDiskDevice(DiskDevice):
""" A mainframe ZFCP disk. """
_type = "zfcp"
def __init__(self, device, **kwargs):
self.hba_id = kwargs.pop("hba_id")
self.wwpn = kwargs.pop("wwpn")
self.fcp_lun = kwargs.pop("fcp_lun")
DiskDevice.__init__(self, device, **kwargs)
def __str__(self):
s = DiskDevice.__str__(self)
s += (" hba_id = %(hba_id)s wwpn = %(wwpn)s fcp_lun = %(fcp_lun)s" %
{"hba_id": self.hba_id,
"wwpn": self.wwpn,
"fcp_lun": self.fcp_lun})
return s
def dracutSetupString(self):
return "rd_ZFCP=%s,%s,%s" % (self.hba_id, self.wwpn, self.fcp_lun,)
class DASDDevice(DiskDevice):
""" A mainframe DASD. """
_type = "dasd"
def __init__(self, device, **kwargs):
self.busid = kwargs.pop('busid')
self.opts = kwargs.pop('opts')
self.dasd = kwargs.pop('dasd')
DiskDevice.__init__(self, device, **kwargs)
if self.dasd:
self.dasd.addDASD(self)
def getOpts(self):
return map(lambda (k, v): "%s=%s" % (k, v,), self.opts.items())
def dracutSetupString(self):
args = ["rd_DASD=%s" % (self.busid,)] + self.getOpts()
return ",".join(args)
class NFSDevice(StorageDevice, NetworkStorageDevice):
""" An NFS device """
_type = "nfs"
_packages = ["dracut-network"]
def __init__(self, device, format=None, parents=None):
# we could make host/ip, path, &c but will anything use it?
StorageDevice.__init__(self, device, format=format, parents=parents)
NetworkStorageDevice.__init__(self, device.split(":")[0])
@property
def path(self):
""" Device node representing this device. """
return self.name
def setup(self, intf=None, orig=False):
""" Open, or set up, a device. """
log_method_call(self, self.name, orig=orig, status=self.status)
def teardown(self, recursive=None):
""" Close, or tear down, a device. """
log_method_call(self, self.name, status=self.status)
def create(self, intf=None):
""" Create the device. """
log_method_call(self, self.name, status=self.status)
self.createParents()
self.setupParents()
def destroy(self):
""" Destroy the device. """
log_method_call(self, self.name, status=self.status)
|
DarrelHsu/cvsClient
|
refs/heads/master
|
third_party/logilab/common/fileutils.py
|
24
|
# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of logilab-common.
#
# logilab-common is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option) any
# later version.
#
# logilab-common is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-common. If not, see <http://www.gnu.org/licenses/>.
"""File and file-path manipulation utilities.
:group path manipulation: first_level_directory, relative_path, is_binary,\
get_by_ext, remove_dead_links
:group file manipulation: norm_read, norm_open, lines, stream_lines, lines,\
write_open_mode, ensure_fs_mode, export
:sort: path manipulation, file manipulation
"""
__docformat__ = "restructuredtext en"
import sys
import shutil
import mimetypes
from os.path import isabs, isdir, islink, split, exists, normpath, join
from os.path import abspath
from os import sep, mkdir, remove, listdir, stat, chmod, walk
from stat import ST_MODE, S_IWRITE
from cStringIO import StringIO
from logilab.common import STD_BLACKLIST as BASE_BLACKLIST, IGNORED_EXTENSIONS
from logilab.common.shellutils import find
from logilab.common.deprecation import deprecated
from logilab.common.compat import FileIO, any
def first_level_directory(path):
"""Return the first level directory of a path.
>>> first_level_directory('home/syt/work')
'home'
>>> first_level_directory('/home/syt/work')
'/'
>>> first_level_directory('work')
'work'
>>>
:type path: str
:param path: the path for which we want the first level directory
:rtype: str
:return: the first level directory appearing in `path`
"""
head, tail = split(path)
while head and tail:
head, tail = split(head)
if tail:
return tail
# path was absolute, head is the fs root
return head
def abspath_listdir(path):
"""Lists path's content using absolute paths.
>>> os.listdir('/home')
['adim', 'alf', 'arthur', 'auc']
>>> abspath_listdir('/home')
['/home/adim', '/home/alf', '/home/arthur', '/home/auc']
"""
path = abspath(path)
return [join(path, filename) for filename in listdir(path)]
def is_binary(filename):
"""Return true if filename may be a binary file, according to it's
extension.
:type filename: str
:param filename: the name of the file
:rtype: bool
:return:
true if the file is a binary file (actually if it's mime type
isn't beginning by text/)
"""
try:
return not mimetypes.guess_type(filename)[0].startswith('text')
except AttributeError:
return 1
def write_open_mode(filename):
"""Return the write mode that should used to open file.
:type filename: str
:param filename: the name of the file
:rtype: str
:return: the mode that should be use to open the file ('w' or 'wb')
"""
if is_binary(filename):
return 'wb'
return 'w'
def ensure_fs_mode(filepath, desired_mode=S_IWRITE):
"""Check that the given file has the given mode(s) set, else try to
set it.
:type filepath: str
:param filepath: path of the file
:type desired_mode: int
:param desired_mode:
ORed flags describing the desired mode. Use constants from the
`stat` module for file permission's modes
"""
mode = stat(filepath)[ST_MODE]
if not mode & desired_mode:
chmod(filepath, mode | desired_mode)
# XXX (syt) unused? kill?
class ProtectedFile(FileIO):
"""A special file-object class that automatically does a 'chmod +w' when
needed.
XXX: for now, the way it is done allows 'normal file-objects' to be
created during the ProtectedFile object lifetime.
One way to circumvent this would be to chmod / unchmod on each
write operation.
One other way would be to :
- catch the IOError in the __init__
- if IOError, then create a StringIO object
- each write operation writes in this StringIO object
- on close()/del(), write/append the StringIO content to the file and
do the chmod only once
"""
def __init__(self, filepath, mode):
self.original_mode = stat(filepath)[ST_MODE]
self.mode_changed = False
if mode in ('w', 'a', 'wb', 'ab'):
if not self.original_mode & S_IWRITE:
chmod(filepath, self.original_mode | S_IWRITE)
self.mode_changed = True
FileIO.__init__(self, filepath, mode)
def _restore_mode(self):
"""restores the original mode if needed"""
if self.mode_changed:
chmod(self.name, self.original_mode)
# Don't re-chmod in case of several restore
self.mode_changed = False
def close(self):
"""restore mode before closing"""
self._restore_mode()
FileIO.close(self)
def __del__(self):
if not self.closed:
self.close()
class UnresolvableError(Exception):
"""Exception raised by relative path when it's unable to compute relative
path between two paths.
"""
def relative_path(from_file, to_file):
"""Try to get a relative path from `from_file` to `to_file`
(path will be absolute if to_file is an absolute file). This function
is useful to create link in `from_file` to `to_file`. This typical use
case is used in this function description.
If both files are relative, they're expected to be relative to the same
directory.
>>> relative_path( from_file='toto/index.html', to_file='index.html')
'../index.html'
>>> relative_path( from_file='index.html', to_file='toto/index.html')
'toto/index.html'
>>> relative_path( from_file='tutu/index.html', to_file='toto/index.html')
'../toto/index.html'
>>> relative_path( from_file='toto/index.html', to_file='/index.html')
'/index.html'
>>> relative_path( from_file='/toto/index.html', to_file='/index.html')
'../index.html'
>>> relative_path( from_file='/toto/index.html', to_file='/toto/summary.html')
'summary.html'
>>> relative_path( from_file='index.html', to_file='index.html')
''
>>> relative_path( from_file='/index.html', to_file='toto/index.html')
Traceback (most recent call last):
File "<string>", line 1, in ?
File "<stdin>", line 37, in relative_path
UnresolvableError
>>> relative_path( from_file='/index.html', to_file='/index.html')
''
>>>
:type from_file: str
:param from_file: source file (where links will be inserted)
:type to_file: str
:param to_file: target file (on which links point)
:raise UnresolvableError: if it has been unable to guess a correct path
:rtype: str
:return: the relative path of `to_file` from `from_file`
"""
from_file = normpath(from_file)
to_file = normpath(to_file)
if from_file == to_file:
return ''
if isabs(to_file):
if not isabs(from_file):
return to_file
elif isabs(from_file):
raise UnresolvableError()
from_parts = from_file.split(sep)
to_parts = to_file.split(sep)
idem = 1
result = []
while len(from_parts) > 1:
dirname = from_parts.pop(0)
if idem and len(to_parts) > 1 and dirname == to_parts[0]:
to_parts.pop(0)
else:
idem = 0
result.append('..')
result += to_parts
return sep.join(result)
def norm_read(path):
"""Return the content of the file with normalized line feeds.
:type path: str
:param path: path to the file to read
:rtype: str
:return: the content of the file with normalized line feeds
"""
return open(path, 'U').read()
norm_read = deprecated("use \"open(path, 'U').read()\"")(norm_read)
def norm_open(path):
"""Return a stream for a file with content with normalized line feeds.
:type path: str
:param path: path to the file to open
:rtype: file or StringIO
:return: the opened file with normalized line feeds
"""
return open(path, 'U')
norm_open = deprecated("use \"open(path, 'U')\"")(norm_open)
def lines(path, comments=None):
"""Return a list of non empty lines in the file located at `path`.
:type path: str
:param path: path to the file
:type comments: str or None
:param comments:
optional string which can be used to comment a line in the file
(i.e. lines starting with this string won't be returned)
:rtype: list
:return:
a list of stripped line in the file, without empty and commented
lines
:warning: at some point this function will probably return an iterator
"""
stream = open(path, 'U')
result = stream_lines(stream, comments)
stream.close()
return result
def stream_lines(stream, comments=None):
"""Return a list of non empty lines in the given `stream`.
:type stream: object implementing 'xreadlines' or 'readlines'
:param stream: file like object
:type comments: str or None
:param comments:
optional string which can be used to comment a line in the file
(i.e. lines starting with this string won't be returned)
:rtype: list
:return:
a list of stripped line in the file, without empty and commented
lines
:warning: at some point this function will probably return an iterator
"""
try:
readlines = stream.xreadlines
except AttributeError:
readlines = stream.readlines
result = []
for line in readlines():
line = line.strip()
if line and (comments is None or not line.startswith(comments)):
result.append(line)
return result
def export(from_dir, to_dir,
blacklist=BASE_BLACKLIST, ignore_ext=IGNORED_EXTENSIONS,
verbose=0):
"""Make a mirror of `from_dir` in `to_dir`, omitting directories and
files listed in the black list or ending with one of the given
extensions.
:type from_dir: str
:param from_dir: directory to export
:type to_dir: str
:param to_dir: destination directory
:type blacklist: list or tuple
:param blacklist:
list of files or directories to ignore, default to the content of
`BASE_BLACKLIST`
:type ignore_ext: list or tuple
:param ignore_ext:
list of extensions to ignore, default to the content of
`IGNORED_EXTENSIONS`
:type verbose: bool
:param verbose:
flag indicating whether information about exported files should be
printed to stderr, default to False
"""
try:
mkdir(to_dir)
except OSError:
pass # FIXME we should use "exists" if the point is about existing dir
# else (permission problems?) shouldn't return / raise ?
for directory, dirnames, filenames in walk(from_dir):
for norecurs in blacklist:
try:
dirnames.remove(norecurs)
except ValueError:
continue
for dirname in dirnames:
src = join(directory, dirname)
dest = to_dir + src[len(from_dir):]
if isdir(src):
if not exists(dest):
mkdir(dest)
for filename in filenames:
# don't include binary files
# endswith does not accept tuple in 2.4
if any([filename.endswith(ext) for ext in ignore_ext]):
continue
src = join(directory, filename)
dest = to_dir + src[len(from_dir):]
if verbose:
print >> sys.stderr, src, '->', dest
if exists(dest):
remove(dest)
shutil.copy2(src, dest)
def remove_dead_links(directory, verbose=0):
"""Recursively traverse directory and remove all dead links.
:type directory: str
:param directory: directory to cleanup
:type verbose: bool
:param verbose:
flag indicating whether information about deleted links should be
printed to stderr, default to False
"""
for dirpath, dirname, filenames in walk(directory):
for filename in dirnames + filenames:
src = join(dirpath, filename)
if islink(src) and not exists(src):
if verbose:
print 'remove dead link', src
remove(src)
|
EDUlib/edx-platform
|
refs/heads/master
|
lms/djangoapps/ccx/tests/factories.py
|
4
|
"""
Dummy factories for tests
"""
from factory import Sequence, SubFactory
from factory.django import DjangoModelFactory
from common.djangoapps.student.tests.factories import UserFactory
from lms.djangoapps.ccx.models import CustomCourseForEdX
# pylint: disable=missing-class-docstring
class CcxFactory(DjangoModelFactory):
class Meta:
model = CustomCourseForEdX
display_name = Sequence(lambda n: f'Test CCX #{n}') # pylint: disable=unnecessary-lambda
id = None # pylint: disable=invalid-name
coach = SubFactory(UserFactory)
|
gdgellatly/OCB1
|
refs/heads/7.0
|
openerp/service/cron.py
|
57
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2011 OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
""" Cron jobs scheduling
Cron jobs are defined in the ir_cron table/model. This module deals with all
cron jobs, for all databases of a single OpenERP server instance.
"""
import logging
import threading
import time
from datetime import datetime
import openerp
_logger = logging.getLogger(__name__)
SLEEP_INTERVAL = 60 # 1 min
def cron_runner(number):
while True:
time.sleep(SLEEP_INTERVAL + number) # Steve Reich timing style
registries = openerp.modules.registry.RegistryManager.registries
_logger.debug('cron%d polling for jobs', number)
for db_name, registry in registries.items():
while True and registry.ready:
# acquired = openerp.addons.base.ir.ir_cron.ir_cron._acquire_job(db_name)
# TODO why isnt openerp.addons.base defined ?
import sys
base = sys.modules['addons.base']
acquired = base.ir.ir_cron.ir_cron._acquire_job(db_name)
if not acquired:
break
def start_service():
""" Start the above runner function in a daemon thread.
The thread is a typical daemon thread: it will never quit and must be
terminated when the main process exits - with no consequence (the processing
threads it spawns are not marked daemon).
"""
# Force call to strptime just before starting the cron thread
# to prevent time.strptime AttributeError within the thread.
# See: http://bugs.python.org/issue7980
datetime.strptime('2012-01-01', '%Y-%m-%d')
for i in range(openerp.tools.config['max_cron_threads']):
def target():
cron_runner(i)
t = threading.Thread(target=target, name="openerp.service.cron.cron%d" % i)
t.setDaemon(True)
t.start()
_logger.debug("cron%d started!" % i)
def stop_service():
pass
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
larsks/blivet
|
refs/heads/master
|
blivet/arch.py
|
2
|
#
# arch.py
#
# Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2013
# Red Hat, Inc. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author(s): Jeremy Katz <katzj@redhat.com>
# Paul Nasrat <pnasrat@redhat.com>
# Peter Jones <pjones@redhat.com>
# Chris Lumens <clumens@redhat.com>
# Will Woods <wwoods@redhat.com>
# Dennis Gilmore <dgilmore@ausil.us>
# David Marlin <dmarlin@redhat.com>
#
# The absolute_import is needed so that we can
# import the "platform" module from the Python
# standard library but not the local blivet module
# that is also called "platform".
from __future__ import absolute_import
import os
from .flags import flags
from .storage_log import log_exception_info
from .util import open # pylint: disable=redefined-builtin
import logging
log = logging.getLogger("blivet")
# DMI information paths
DMI_CHASSIS_VENDOR = "/sys/class/dmi/id/chassis_vendor"
def getPPCMachine():
"""
:return: The PPC machine type, or None if not PPC.
:rtype: string
"""
if not isPPC():
return None
# ppc machine hash
# Note: This is a substring match!
ppcType = { 'Mac' : 'PMac',
'Book' : 'PMac',
'CHRP' : 'pSeries',
'CHRP IBM' : 'pSeries', # @TODO the CHRP entry above should match this
'Pegasos' : 'Pegasos',
'Efika' : 'Efika',
'iSeries' : 'iSeries',
'pSeries' : 'pSeries',
'PReP' : 'PReP',
'Amiga' : 'APUS',
'Gemini' : 'Gemini',
'Shiner' : 'ANS',
'BRIQ' : 'BRIQ',
'Teron' : 'Teron',
'AmigaOne' : 'Teron',
'Maple' : 'pSeries',
'Cell' : 'pSeries',
'Momentum' : 'pSeries',
'PS3' : 'PS3',
'PowerNV' : 'pSeries'
}
machine = None
platform = None
with open('/proc/cpuinfo', 'r') as f:
for line in f:
if 'machine' in line:
machine = line.split(':')[1]
elif 'platform' in line:
platform = line.split(':')[1]
for part in (machine, platform):
if part is None:
continue
for _type in ppcType.items():
if _type[0] in part:
return _type[1]
log.warning("Unknown PowerPC machine type: %s platform: %s", machine, platform)
return None
def getPPCMacID():
"""
:return: The powermac machine type, or None if not PPC or a powermac.
:rtype: string
"""
if not isPPC():
return None
if getPPCMachine() != "PMac":
return None
with open('/proc/cpuinfo', 'r') as f:
for line in f:
if 'machine' in line:
machine = line.split(':')[1]
return machine.strip()
log.warning("No Power Mac machine id")
return None
def getPPCMacGen():
"""
:return: The PPC generation, or None if not PPC or a powermac.
:rtype: string
"""
# XXX: should NuBus be here?
# Note: This is a substring match!
pmacGen = ['OldWorld', 'NewWorld', 'NuBus']
if not isPPC():
return None
if getPPCMachine() != "PMac":
return None
gen = None
with open('/proc/cpuinfo', 'r') as f:
for line in f:
if 'pmac-generation' in line:
gen = line.split(':')[1]
break
if gen is None:
log.warning("Unable to find pmac-generation")
return None
for _type in pmacGen:
if _type in gen:
return _type
log.warning("Unknown Power Mac generation: %s", gen)
return None
def getPPCMacBook():
"""
:return: True if the hardware is an iBook or PowerBook, False otherwise.
:rtype: string
"""
if not isPPC():
return False
if getPPCMachine() != "PMac":
return False
#@TBD - Search for 'book' anywhere in cpuinfo? Shouldn't this be more restrictive?
with open('/proc/cpuinfo', 'r') as f:
for line in f:
if 'book' in line.lower():
return True
return False
def isAARCH64():
"""
:return: True if the hardware supports Aarch64, False otherwise.
:rtype: boolean
"""
return os.uname()[4] == 'aarch64'
def getARMMachine():
"""
:return: The ARM processor variety type, or None if not ARM.
:rtype: string
"""
if not isARM():
return None
if flags.arm_platform:
return flags.arm_platform
armMachine = os.uname()[2].rpartition('.' )[2]
if armMachine.startswith('arm'):
# @TBD - Huh? Don't you want the arm machine name here?
return None
else:
return armMachine
def isCell():
"""
:return: True if the hardware is the Cell platform, False otherwise.
:rtype: boolean
"""
if not isPPC():
return False
with open('/proc/cpuinfo', 'r') as f:
for line in f:
if 'Cell' in line:
return True
return False
def isMactel():
"""
:return: True if the hardware is an Intel-based Apple Mac, False otherwise.
:rtype: boolean
"""
if not isX86():
mactel = False
elif not os.path.isfile(DMI_CHASSIS_VENDOR):
mactel = False
else:
try:
buf = open(DMI_CHASSIS_VENDOR).read()
mactel = ("apple" in buf.lower())
except UnicodeDecodeError:
mactel = False
return mactel
def isEfi():
"""
:return: True if the hardware supports EFI, False otherwise.
:rtype: boolean
"""
# XXX need to make sure efivars is loaded...
if os.path.exists("/sys/firmware/efi"):
return True
else:
return False
# Architecture checking functions
def isX86(bits=None):
""":return: True if the hardware supports X86, False otherwise.
:rtype: boolean
:param bits: The number of bits used to define a memory address.
:type bits: int
"""
arch = os.uname()[4]
# x86 platforms include:
# i*86
# athlon*
# x86_64
# amd*
# ia32e
if bits is None:
if (arch.startswith('i') and arch.endswith('86')) or \
arch.startswith('athlon') or arch.startswith('amd') or \
arch == 'x86_64' or arch == 'ia32e':
return True
elif bits == 32:
if arch.startswith('i') and arch.endswith('86'):
return True
elif bits == 64:
if arch.startswith('athlon') or arch.startswith('amd') or \
arch == 'x86_64' or arch == 'ia32e':
return True
return False
def isPPC(bits=None):
"""
:return: True if the hardware supports PPC, False otherwise.
:rtype: boolean
:param bits: The number of bits used to define a memory address.
:type bits: int
"""
arch = os.uname()[4]
if bits is None:
if arch in ('ppc', 'ppc64', 'ppc64le'):
return True
elif bits == 32:
if arch == 'ppc':
return True
elif bits == 64:
if arch in ('ppc64', 'ppc64le'):
return True
return False
def isS390():
"""
:return: True if the hardware supports PPC, False otherwise.
:rtype: boolean
"""
return os.uname()[4].startswith('s390')
def isIA64():
"""
:return: True if the hardware supports IA64, False otherwise.
:rtype: boolean
"""
return os.uname()[4] == 'ia64'
def isAlpha():
"""
:return: True if the hardware supports Alpha, False otherwise.
:rtype: boolean
"""
return os.uname()[4].startswith('alpha')
def isARM():
"""
:return: True if the hardware supports ARM, False otherwise.
:rtype: boolean
"""
return os.uname()[4].startswith('arm')
def getArch():
"""
:return: The hardware architecture
:rtype: string
"""
if isX86(bits=32):
return 'i386'
elif isX86(bits=64):
return 'x86_64'
elif isPPC(bits=32):
return 'ppc'
elif isPPC(bits=64):
# ppc64 and ppc64le are distinct architectures
return os.uname()[4]
elif isAARCH64():
return 'aarch64'
elif isAlpha():
return 'alpha'
elif isARM():
return 'arm'
else:
return os.uname()[4]
def numBits():
""" Return an integer representing the length
of the "word" used by the current architecture
-> it is usually either 32 or 64
:return: number of bits for the current architecture
or None if the number could not be determined
:rtype: integer or None
"""
try:
import platform
nbits = platform.architecture()[0]
# the string is in the format:
# "<number>bit"
# so we remove the bit suffix and convert the
# number to an integer
(nbits, _rest) = nbits.split("bit", 1)
return int(nbits)
except Exception: # pylint: disable=broad-except
log_exception_info(log.error, "architecture word size detection failed")
return None
|
zeroblade1984/Cancro
|
refs/heads/master
|
tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py
|
12527
|
# Util.py - Python extension for perf script, miscellaneous utility code
#
# Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import errno, os
FUTEX_WAIT = 0
FUTEX_WAKE = 1
FUTEX_PRIVATE_FLAG = 128
FUTEX_CLOCK_REALTIME = 256
FUTEX_CMD_MASK = ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME)
NSECS_PER_SEC = 1000000000
def avg(total, n):
return total / n
def nsecs(secs, nsecs):
return secs * NSECS_PER_SEC + nsecs
def nsecs_secs(nsecs):
return nsecs / NSECS_PER_SEC
def nsecs_nsecs(nsecs):
return nsecs % NSECS_PER_SEC
def nsecs_str(nsecs):
str = "%5u.%09u" % (nsecs_secs(nsecs), nsecs_nsecs(nsecs)),
return str
def add_stats(dict, key, value):
if not dict.has_key(key):
dict[key] = (value, value, value, 1)
else:
min, max, avg, count = dict[key]
if value < min:
min = value
if value > max:
max = value
avg = (avg + value) / 2
dict[key] = (min, max, avg, count + 1)
def clear_term():
print("\x1b[H\x1b[2J")
audit_package_warned = False
try:
import audit
machine_to_id = {
'x86_64': audit.MACH_86_64,
'alpha' : audit.MACH_ALPHA,
'ia64' : audit.MACH_IA64,
'ppc' : audit.MACH_PPC,
'ppc64' : audit.MACH_PPC64,
's390' : audit.MACH_S390,
's390x' : audit.MACH_S390X,
'i386' : audit.MACH_X86,
'i586' : audit.MACH_X86,
'i686' : audit.MACH_X86,
}
try:
machine_to_id['armeb'] = audit.MACH_ARMEB
except:
pass
machine_id = machine_to_id[os.uname()[4]]
except:
if not audit_package_warned:
audit_package_warned = True
print "Install the audit-libs-python package to get syscall names"
def syscall_name(id):
try:
return audit.audit_syscall_to_name(id, machine_id)
except:
return str(id)
def strerror(nr):
try:
return errno.errorcode[abs(nr)]
except:
return "Unknown %d errno" % nr
|
cogmission/nupic
|
refs/heads/master
|
examples/prediction/experiments/inspect_test.py
|
50
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from nupic.engine import *
import os
import sys
os.chdir(sys.argv[1] + '/networks')
for name in ('trained_l1.nta', 'trained.nta'):
if os.path.exists(name):
break
n = Network(name)
n.inspect()
|
balister/linux-omap-philip
|
refs/heads/e100-2.6.38
|
scripts/rt-tester/rt-tester.py
|
1094
|
#!/usr/bin/python
#
# rt-mutex tester
#
# (C) 2006 Thomas Gleixner <tglx@linutronix.de>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
import os
import sys
import getopt
import shutil
import string
# Globals
quiet = 0
test = 0
comments = 0
sysfsprefix = "/sys/devices/system/rttest/rttest"
statusfile = "/status"
commandfile = "/command"
# Command opcodes
cmd_opcodes = {
"schedother" : "1",
"schedfifo" : "2",
"lock" : "3",
"locknowait" : "4",
"lockint" : "5",
"lockintnowait" : "6",
"lockcont" : "7",
"unlock" : "8",
"lockbkl" : "9",
"unlockbkl" : "10",
"signal" : "11",
"resetevent" : "98",
"reset" : "99",
}
test_opcodes = {
"prioeq" : ["P" , "eq" , None],
"priolt" : ["P" , "lt" , None],
"priogt" : ["P" , "gt" , None],
"nprioeq" : ["N" , "eq" , None],
"npriolt" : ["N" , "lt" , None],
"npriogt" : ["N" , "gt" , None],
"unlocked" : ["M" , "eq" , 0],
"trylock" : ["M" , "eq" , 1],
"blocked" : ["M" , "eq" , 2],
"blockedwake" : ["M" , "eq" , 3],
"locked" : ["M" , "eq" , 4],
"opcodeeq" : ["O" , "eq" , None],
"opcodelt" : ["O" , "lt" , None],
"opcodegt" : ["O" , "gt" , None],
"eventeq" : ["E" , "eq" , None],
"eventlt" : ["E" , "lt" , None],
"eventgt" : ["E" , "gt" , None],
}
# Print usage information
def usage():
print "rt-tester.py <-c -h -q -t> <testfile>"
print " -c display comments after first command"
print " -h help"
print " -q quiet mode"
print " -t test mode (syntax check)"
print " testfile: read test specification from testfile"
print " otherwise from stdin"
return
# Print progress when not in quiet mode
def progress(str):
if not quiet:
print str
# Analyse a status value
def analyse(val, top, arg):
intval = int(val)
if top[0] == "M":
intval = intval / (10 ** int(arg))
intval = intval % 10
argval = top[2]
elif top[0] == "O":
argval = int(cmd_opcodes.get(arg, arg))
else:
argval = int(arg)
# progress("%d %s %d" %(intval, top[1], argval))
if top[1] == "eq" and intval == argval:
return 1
if top[1] == "lt" and intval < argval:
return 1
if top[1] == "gt" and intval > argval:
return 1
return 0
# Parse the commandline
try:
(options, arguments) = getopt.getopt(sys.argv[1:],'chqt')
except getopt.GetoptError, ex:
usage()
sys.exit(1)
# Parse commandline options
for option, value in options:
if option == "-c":
comments = 1
elif option == "-q":
quiet = 1
elif option == "-t":
test = 1
elif option == '-h':
usage()
sys.exit(0)
# Select the input source
if arguments:
try:
fd = open(arguments[0])
except Exception,ex:
sys.stderr.write("File not found %s\n" %(arguments[0]))
sys.exit(1)
else:
fd = sys.stdin
linenr = 0
# Read the test patterns
while 1:
linenr = linenr + 1
line = fd.readline()
if not len(line):
break
line = line.strip()
parts = line.split(":")
if not parts or len(parts) < 1:
continue
if len(parts[0]) == 0:
continue
if parts[0].startswith("#"):
if comments > 1:
progress(line)
continue
if comments == 1:
comments = 2
progress(line)
cmd = parts[0].strip().lower()
opc = parts[1].strip().lower()
tid = parts[2].strip()
dat = parts[3].strip()
try:
# Test or wait for a status value
if cmd == "t" or cmd == "w":
testop = test_opcodes[opc]
fname = "%s%s%s" %(sysfsprefix, tid, statusfile)
if test:
print fname
continue
while 1:
query = 1
fsta = open(fname, 'r')
status = fsta.readline().strip()
fsta.close()
stat = status.split(",")
for s in stat:
s = s.strip()
if s.startswith(testop[0]):
# Seperate status value
val = s[2:].strip()
query = analyse(val, testop, dat)
break
if query or cmd == "t":
break
progress(" " + status)
if not query:
sys.stderr.write("Test failed in line %d\n" %(linenr))
sys.exit(1)
# Issue a command to the tester
elif cmd == "c":
cmdnr = cmd_opcodes[opc]
# Build command string and sys filename
cmdstr = "%s:%s" %(cmdnr, dat)
fname = "%s%s%s" %(sysfsprefix, tid, commandfile)
if test:
print fname
continue
fcmd = open(fname, 'w')
fcmd.write(cmdstr)
fcmd.close()
except Exception,ex:
sys.stderr.write(str(ex))
sys.stderr.write("\nSyntax error in line %d\n" %(linenr))
if not test:
fd.close()
sys.exit(1)
# Normal exit pass
print "Pass"
sys.exit(0)
|
hazmatzo/oppia
|
refs/heads/develop
|
core/controllers/admin_test.py
|
30
|
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the admin page."""
__author__ = 'Sean Lip'
from core.controllers import editor
from core.controllers import pages
from core.domain import config_domain
from core.tests import test_utils
import feconf
SITE_FORUM_URL = 'siteforum.url'
class AdminIntegrationTest(test_utils.GenericTestBase):
def setUp(self):
"""Complete the signup process for self.ADMIN_EMAIL."""
super(AdminIntegrationTest, self).setUp()
self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
def test_admin_page(self):
"""Test that the admin page shows the expected sections."""
self.login(self.ADMIN_EMAIL, is_super_admin=True)
response = self.testapp.get('/admin')
self.assertEqual(response.status_int, 200)
response.mustcontain(
'Performance Counters',
'Total processing time for all JSON responses',
'Configuration',
'Reload a single exploration',
'three_balls')
self.logout()
def test_admin_page_rights(self):
"""Test access rights to the admin page."""
response = self.testapp.get('/admin')
self.assertEqual(response.status_int, 302)
# Login as a non-admin.
self.login(self.EDITOR_EMAIL)
response = self.testapp.get('/admin', expect_errors=True)
self.assertEqual(response.status_int, 401)
self.logout()
# Login as an admin.
self.login(self.ADMIN_EMAIL, is_super_admin=True)
response = self.testapp.get('/admin')
self.assertEqual(response.status_int, 200)
self.logout()
def test_change_configuration_property(self):
"""Test that configuration properties can be changed."""
TEST_STRING = self.UNICODE_TEST_STRING
self.login(self.ADMIN_EMAIL, is_super_admin=True)
response = self.testapp.get('/admin')
csrf_token = self.get_csrf_token_from_response(response)
response_dict = self.get_json('/adminhandler')
response_config_properties = response_dict['config_properties']
self.assertDictContainsSubset({
'value': editor.MODERATOR_REQUEST_FORUM_URL_DEFAULT_VALUE,
}, response_config_properties[editor.MODERATOR_REQUEST_FORUM_URL.name])
payload = {
'action': 'save_config_properties',
'new_config_property_values': {
editor.MODERATOR_REQUEST_FORUM_URL.name: TEST_STRING
}
}
self.post_json('/adminhandler', payload, csrf_token)
response_dict = self.get_json('/adminhandler')
response_config_properties = response_dict['config_properties']
self.assertDictContainsSubset({
'value': TEST_STRING
}, response_config_properties[editor.MODERATOR_REQUEST_FORUM_URL.name])
self.logout()
def test_change_about_page_config_property(self):
"""Test that the correct variables show up on the about page."""
# Navigate to the about page. The site name is not set.
response = self.testapp.get('/about')
self.assertIn('https://site/forum/url', response.body)
self.assertNotIn(SITE_FORUM_URL, response.body)
self.login(self.ADMIN_EMAIL, is_super_admin=True)
response = self.testapp.get('/admin')
csrf_token = self.get_csrf_token_from_response(response)
self.post_json('/adminhandler', {
'action': 'save_config_properties',
'new_config_property_values': {
pages.SITE_FORUM_URL.name: SITE_FORUM_URL
}
}, csrf_token)
self.logout()
# Navigate to the splash page. The site name is set.
response = self.testapp.get('/about')
self.assertNotIn('https://site/forum/url', response.body)
self.assertIn(SITE_FORUM_URL, response.body)
def test_change_rights(self):
"""Test that the correct role indicators show up on app pages."""
BOTH_MODERATOR_AND_ADMIN_EMAIL = 'moderator.and.admin@example.com'
self.signup(self.MODERATOR_EMAIL, self.MODERATOR_USERNAME)
self.signup('superadmin@example.com', 'superadm1n')
self.signup(BOTH_MODERATOR_AND_ADMIN_EMAIL, 'moderatorandadm1n')
# Navigate to any page. The role is not set.
self.testapp.get('/').mustcontain(no=['/moderator', '/admin'])
# Log in as a superadmin. This gives access to /admin.
self.login('superadmin@example.com', is_super_admin=True)
self.testapp.get('/').mustcontain('/admin', no=['/moderator'])
# Add a moderator, an admin, and a person with both roles, then log
# out.
response = self.testapp.get('/admin')
csrf_token = self.get_csrf_token_from_response(response)
self.post_json('/adminhandler', {
'action': 'save_config_properties',
'new_config_property_values': {
config_domain.ADMIN_EMAILS.name: [
self.ADMIN_EMAIL, BOTH_MODERATOR_AND_ADMIN_EMAIL],
config_domain.MODERATOR_EMAILS.name: [
self.MODERATOR_EMAIL, BOTH_MODERATOR_AND_ADMIN_EMAIL],
}
}, csrf_token)
self.logout()
# Log in as a moderator.
self.login(self.MODERATOR_EMAIL)
self.testapp.get(feconf.GALLERY_URL).mustcontain(
'/moderator', no=['/admin'])
self.logout()
# Log in as an admin.
self.login(self.ADMIN_EMAIL)
self.testapp.get(feconf.GALLERY_URL).mustcontain(
'/moderator', no=['/admin'])
self.logout()
# Log in as a both-moderator-and-admin. Only '(Admin)' is shown in the
# navbar.
self.login(BOTH_MODERATOR_AND_ADMIN_EMAIL)
self.testapp.get(feconf.GALLERY_URL).mustcontain(
'/moderator', no=['/admin'])
self.logout()
|
coolbombom/CouchPotato
|
refs/heads/master
|
cherrypy/lib/auth_digest.py
|
101
|
# This file is part of CherryPy <http://www.cherrypy.org/>
# -*- coding: utf-8 -*-
# vim:ts=4:sw=4:expandtab:fileencoding=utf-8
__doc__ = """An implementation of the server-side of HTTP Digest Access
Authentication, which is described in :rfc:`2617`.
Example usage, using the built-in get_ha1_dict_plain function which uses a dict
of plaintext passwords as the credentials store::
userpassdict = {'alice' : '4x5istwelve'}
get_ha1 = cherrypy.lib.auth_digest.get_ha1_dict_plain(userpassdict)
digest_auth = {'tools.auth_digest.on': True,
'tools.auth_digest.realm': 'wonderland',
'tools.auth_digest.get_ha1': get_ha1,
'tools.auth_digest.key': 'a565c27146791cfb',
}
app_config = { '/' : digest_auth }
"""
__author__ = 'visteya'
__date__ = 'April 2009'
import time
from cherrypy._cpcompat import parse_http_list, parse_keqv_list
import cherrypy
from cherrypy._cpcompat import md5, ntob
md5_hex = lambda s: md5(ntob(s)).hexdigest()
qop_auth = 'auth'
qop_auth_int = 'auth-int'
valid_qops = (qop_auth, qop_auth_int)
valid_algorithms = ('MD5', 'MD5-sess')
def TRACE(msg):
cherrypy.log(msg, context='TOOLS.AUTH_DIGEST')
# Three helper functions for users of the tool, providing three variants
# of get_ha1() functions for three different kinds of credential stores.
def get_ha1_dict_plain(user_password_dict):
"""Returns a get_ha1 function which obtains a plaintext password from a
dictionary of the form: {username : password}.
If you want a simple dictionary-based authentication scheme, with plaintext
passwords, use get_ha1_dict_plain(my_userpass_dict) as the value for the
get_ha1 argument to digest_auth().
"""
def get_ha1(realm, username):
password = user_password_dict.get(username)
if password:
return md5_hex('%s:%s:%s' % (username, realm, password))
return None
return get_ha1
def get_ha1_dict(user_ha1_dict):
"""Returns a get_ha1 function which obtains a HA1 password hash from a
dictionary of the form: {username : HA1}.
If you want a dictionary-based authentication scheme, but with
pre-computed HA1 hashes instead of plain-text passwords, use
get_ha1_dict(my_userha1_dict) as the value for the get_ha1
argument to digest_auth().
"""
def get_ha1(realm, username):
return user_ha1_dict.get(user)
return get_ha1
def get_ha1_file_htdigest(filename):
"""Returns a get_ha1 function which obtains a HA1 password hash from a
flat file with lines of the same format as that produced by the Apache
htdigest utility. For example, for realm 'wonderland', username 'alice',
and password '4x5istwelve', the htdigest line would be::
alice:wonderland:3238cdfe91a8b2ed8e39646921a02d4c
If you want to use an Apache htdigest file as the credentials store,
then use get_ha1_file_htdigest(my_htdigest_file) as the value for the
get_ha1 argument to digest_auth(). It is recommended that the filename
argument be an absolute path, to avoid problems.
"""
def get_ha1(realm, username):
result = None
f = open(filename, 'r')
for line in f:
u, r, ha1 = line.rstrip().split(':')
if u == username and r == realm:
result = ha1
break
f.close()
return result
return get_ha1
def synthesize_nonce(s, key, timestamp=None):
"""Synthesize a nonce value which resists spoofing and can be checked for staleness.
Returns a string suitable as the value for 'nonce' in the www-authenticate header.
s
A string related to the resource, such as the hostname of the server.
key
A secret string known only to the server.
timestamp
An integer seconds-since-the-epoch timestamp
"""
if timestamp is None:
timestamp = int(time.time())
h = md5_hex('%s:%s:%s' % (timestamp, s, key))
nonce = '%s:%s' % (timestamp, h)
return nonce
def H(s):
"""The hash function H"""
return md5_hex(s)
class HttpDigestAuthorization (object):
"""Class to parse a Digest Authorization header and perform re-calculation
of the digest.
"""
def errmsg(self, s):
return 'Digest Authorization header: %s' % s
def __init__(self, auth_header, http_method, debug=False):
self.http_method = http_method
self.debug = debug
scheme, params = auth_header.split(" ", 1)
self.scheme = scheme.lower()
if self.scheme != 'digest':
raise ValueError('Authorization scheme is not "Digest"')
self.auth_header = auth_header
# make a dict of the params
items = parse_http_list(params)
paramsd = parse_keqv_list(items)
self.realm = paramsd.get('realm')
self.username = paramsd.get('username')
self.nonce = paramsd.get('nonce')
self.uri = paramsd.get('uri')
self.method = paramsd.get('method')
self.response = paramsd.get('response') # the response digest
self.algorithm = paramsd.get('algorithm', 'MD5')
self.cnonce = paramsd.get('cnonce')
self.opaque = paramsd.get('opaque')
self.qop = paramsd.get('qop') # qop
self.nc = paramsd.get('nc') # nonce count
# perform some correctness checks
if self.algorithm not in valid_algorithms:
raise ValueError(self.errmsg("Unsupported value for algorithm: '%s'" % self.algorithm))
has_reqd = self.username and \
self.realm and \
self.nonce and \
self.uri and \
self.response
if not has_reqd:
raise ValueError(self.errmsg("Not all required parameters are present."))
if self.qop:
if self.qop not in valid_qops:
raise ValueError(self.errmsg("Unsupported value for qop: '%s'" % self.qop))
if not (self.cnonce and self.nc):
raise ValueError(self.errmsg("If qop is sent then cnonce and nc MUST be present"))
else:
if self.cnonce or self.nc:
raise ValueError(self.errmsg("If qop is not sent, neither cnonce nor nc can be present"))
def __str__(self):
return 'authorization : %s' % self.auth_header
def validate_nonce(self, s, key):
"""Validate the nonce.
Returns True if nonce was generated by synthesize_nonce() and the timestamp
is not spoofed, else returns False.
s
A string related to the resource, such as the hostname of the server.
key
A secret string known only to the server.
Both s and key must be the same values which were used to synthesize the nonce
we are trying to validate.
"""
try:
timestamp, hashpart = self.nonce.split(':', 1)
s_timestamp, s_hashpart = synthesize_nonce(s, key, timestamp).split(':', 1)
is_valid = s_hashpart == hashpart
if self.debug:
TRACE('validate_nonce: %s' % is_valid)
return is_valid
except ValueError: # split() error
pass
return False
def is_nonce_stale(self, max_age_seconds=600):
"""Returns True if a validated nonce is stale. The nonce contains a
timestamp in plaintext and also a secure hash of the timestamp. You should
first validate the nonce to ensure the plaintext timestamp is not spoofed.
"""
try:
timestamp, hashpart = self.nonce.split(':', 1)
if int(timestamp) + max_age_seconds > int(time.time()):
return False
except ValueError: # int() error
pass
if self.debug:
TRACE("nonce is stale")
return True
def HA2(self, entity_body=''):
"""Returns the H(A2) string. See :rfc:`2617` section 3.2.2.3."""
# RFC 2617 3.2.2.3
# If the "qop" directive's value is "auth" or is unspecified, then A2 is:
# A2 = method ":" digest-uri-value
#
# If the "qop" value is "auth-int", then A2 is:
# A2 = method ":" digest-uri-value ":" H(entity-body)
if self.qop is None or self.qop == "auth":
a2 = '%s:%s' % (self.http_method, self.uri)
elif self.qop == "auth-int":
a2 = "%s:%s:%s" % (self.http_method, self.uri, H(entity_body))
else:
# in theory, this should never happen, since I validate qop in __init__()
raise ValueError(self.errmsg("Unrecognized value for qop!"))
return H(a2)
def request_digest(self, ha1, entity_body=''):
"""Calculates the Request-Digest. See :rfc:`2617` section 3.2.2.1.
ha1
The HA1 string obtained from the credentials store.
entity_body
If 'qop' is set to 'auth-int', then A2 includes a hash
of the "entity body". The entity body is the part of the
message which follows the HTTP headers. See :rfc:`2617` section
4.3. This refers to the entity the user agent sent in the request which
has the Authorization header. Typically GET requests don't have an entity,
and POST requests do.
"""
ha2 = self.HA2(entity_body)
# Request-Digest -- RFC 2617 3.2.2.1
if self.qop:
req = "%s:%s:%s:%s:%s" % (self.nonce, self.nc, self.cnonce, self.qop, ha2)
else:
req = "%s:%s" % (self.nonce, ha2)
# RFC 2617 3.2.2.2
#
# If the "algorithm" directive's value is "MD5" or is unspecified, then A1 is:
# A1 = unq(username-value) ":" unq(realm-value) ":" passwd
#
# If the "algorithm" directive's value is "MD5-sess", then A1 is
# calculated only once - on the first request by the client following
# receipt of a WWW-Authenticate challenge from the server.
# A1 = H( unq(username-value) ":" unq(realm-value) ":" passwd )
# ":" unq(nonce-value) ":" unq(cnonce-value)
if self.algorithm == 'MD5-sess':
ha1 = H('%s:%s:%s' % (ha1, self.nonce, self.cnonce))
digest = H('%s:%s' % (ha1, req))
return digest
def www_authenticate(realm, key, algorithm='MD5', nonce=None, qop=qop_auth, stale=False):
"""Constructs a WWW-Authenticate header for Digest authentication."""
if qop not in valid_qops:
raise ValueError("Unsupported value for qop: '%s'" % qop)
if algorithm not in valid_algorithms:
raise ValueError("Unsupported value for algorithm: '%s'" % algorithm)
if nonce is None:
nonce = synthesize_nonce(realm, key)
s = 'Digest realm="%s", nonce="%s", algorithm="%s", qop="%s"' % (
realm, nonce, algorithm, qop)
if stale:
s += ', stale="true"'
return s
def digest_auth(realm, get_ha1, key, debug=False):
"""A CherryPy tool which hooks at before_handler to perform
HTTP Digest Access Authentication, as specified in :rfc:`2617`.
If the request has an 'authorization' header with a 'Digest' scheme, this
tool authenticates the credentials supplied in that header. If
the request has no 'authorization' header, or if it does but the scheme is
not "Digest", or if authentication fails, the tool sends a 401 response with
a 'WWW-Authenticate' Digest header.
realm
A string containing the authentication realm.
get_ha1
A callable which looks up a username in a credentials store
and returns the HA1 string, which is defined in the RFC to be
MD5(username : realm : password). The function's signature is:
``get_ha1(realm, username)``
where username is obtained from the request's 'authorization' header.
If username is not found in the credentials store, get_ha1() returns
None.
key
A secret string known only to the server, used in the synthesis of nonces.
"""
request = cherrypy.serving.request
auth_header = request.headers.get('authorization')
nonce_is_stale = False
if auth_header is not None:
try:
auth = HttpDigestAuthorization(auth_header, request.method, debug=debug)
except ValueError:
raise cherrypy.HTTPError(400, "The Authorization header could not be parsed.")
if debug:
TRACE(str(auth))
if auth.validate_nonce(realm, key):
ha1 = get_ha1(realm, auth.username)
if ha1 is not None:
# note that for request.body to be available we need to hook in at
# before_handler, not on_start_resource like 3.1.x digest_auth does.
digest = auth.request_digest(ha1, entity_body=request.body)
if digest == auth.response: # authenticated
if debug:
TRACE("digest matches auth.response")
# Now check if nonce is stale.
# The choice of ten minutes' lifetime for nonce is somewhat arbitrary
nonce_is_stale = auth.is_nonce_stale(max_age_seconds=600)
if not nonce_is_stale:
request.login = auth.username
if debug:
TRACE("authentication of %s successful" % auth.username)
return
# Respond with 401 status and a WWW-Authenticate header
header = www_authenticate(realm, key, stale=nonce_is_stale)
if debug:
TRACE(header)
cherrypy.serving.response.headers['WWW-Authenticate'] = header
raise cherrypy.HTTPError(401, "You are not authorized to access that resource")
|
bradh/openchange
|
refs/heads/master
|
script/bug-analysis/buganalysis/mail.py
|
7
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) Enrique J. Hernández 2014
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Methods related to mail notifications.
This is specific to OpenChange.
"""
from email.mime.text import MIMEText
import smtplib
def notify_user_email(from_addr, email, tracker_url,
smtp_addr='localhost', duplicated=False):
"""Notify a user after sending a report to know track the issue later.
:param str from_addr: the from email address
:param str email: the user's email address
:param str tracker_url: the tracker URL
:param str smtp_addr: the STMP server
:param bool duplicated: indicating if the sent crash report is duplicated or not
"""
to_addr = email
if duplicated:
text = """This crash report is a duplicate from {0}.""".format(tracker_url)
else:
text = """The crash report was created at {0}.""".format(tracker_url)
text += """
\n\nYou can follow the crash report fixing status there.\n\n
Thanks very much for reporting it!\n
----
OpenChange team"""
msg = MIMEText(text, 'plain')
msg['Subject'] = '[OpenChange crash report] Your crash report was uploaded!'
msg['From'] = from_addr
msg['To'] = to_addr
s = smtplib.SMTP(smtp_addr)
# sendmail function takes 3 arguments: sender's address, recipient's address
# and message to send - here it is sent as one string.
s.sendmail(from_addr, to_addr, msg.as_string())
s.quit()
|
timsloan/prometeo-erp
|
refs/heads/master
|
sales/signals.py
|
3
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""This file is part of the prometeo project.
This program is free software: you can redistribute it and/or modify it
under the terms of the GNU Lesser General Public License as published by the
Free Software Foundation, either version 3 of the License, or (at your
option) any later version.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU Lesser General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>
"""
__author__ = 'Emanuele Bertoldi <emanuele.bertoldi@gmail.com>'
__copyright__ = 'Copyright (c) 2011 Emanuele Bertoldi'
__version__ = '0.0.5'
from django.db.models.signals import post_save
from prometeo.core.auth.signals import *
from models import *
## CONNECTIONS ##
post_save.connect(update_author_permissions, BankAccount, dispatch_uid="update_bankaccount_permissions")
post_save.connect(update_author_permissions, SalesInvoice, dispatch_uid="update_salesinvoice_permissions")
|
edx-solutions/edx-platform
|
refs/heads/master
|
lms/djangoapps/instructor/tests/test_enrollment_store_provider.py
|
4
|
"""
Exercises tests on the base_store_provider file
"""
from django.test import TestCase
from lms.djangoapps.instructor.enrollment_report import AbstractEnrollmentReportProvider
from lms.djangoapps.instructor.paidcourse_enrollment_report import PaidCourseEnrollmentReportProvider
class BadImplementationAbstractEnrollmentReportProvider(AbstractEnrollmentReportProvider):
"""
Test implementation of EnrollmentProvider to assert that non-implementations of methods
raises the correct methods
"""
def get_user_profile(self, user_id):
"""
Fake implementation of method which calls base class, which should throw NotImplementedError
"""
super(BadImplementationAbstractEnrollmentReportProvider, self).get_user_profile(user_id)
def get_enrollment_info(self, user, course_id):
"""
Fake implementation of method which calls base class, which should throw NotImplementedError
"""
super(BadImplementationAbstractEnrollmentReportProvider, self).get_enrollment_info(user, course_id)
def get_payment_info(self, user, course_id):
"""
Fake implementation of method which calls base class, which should throw NotImplementedError
"""
super(BadImplementationAbstractEnrollmentReportProvider, self).get_payment_info(user, course_id)
class TestBaseNotificationDataProvider(TestCase):
"""
Cover the EnrollmentReportProvider class
"""
def test_cannot_create_instance(self):
"""
EnrollmentReportProvider is an abstract class and we should not be able
to create an instance of it
"""
with self.assertRaises(TypeError):
# parent of the BaseEnrollmentReportProvider is EnrollmentReportProvider
super(BadImplementationAbstractEnrollmentReportProvider, self)
def test_get_provider(self):
"""
Makes sure we get an instance of the registered enrollment provider
"""
provider = PaidCourseEnrollmentReportProvider()
self.assertIsNotNone(provider)
self.assertTrue(isinstance(provider, PaidCourseEnrollmentReportProvider))
def test_base_methods_exceptions(self):
"""
Asserts that all base-methods on the EnrollmentProvider interface will throw
an NotImplementedError
"""
bad_provider = BadImplementationAbstractEnrollmentReportProvider()
with self.assertRaises(NotImplementedError):
bad_provider.get_enrollment_info(None, None)
with self.assertRaises(NotImplementedError):
bad_provider.get_payment_info(None, None)
with self.assertRaises(NotImplementedError):
bad_provider.get_user_profile(None)
|
InakiZabala/odoomrp-wip
|
refs/heads/8.0
|
mrp_subcontracting/models/mrp_production.py
|
1
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
from openerp import models, fields, api
class MrpProduction(models.Model):
_inherit = 'mrp.production'
@api.one
def _created_purchases(self):
cond = [('mrp_production', '=', self.id)]
self.created_purchases = len(self.env['purchase.order'].search(cond))
@api.one
def _created_outpickings(self):
picking_obj = self.env['stock.picking']
cond = [('mrp_production', '=', self.id)]
self.created_outpickings = len(
picking_obj.search(cond).filtered(
lambda x: x.picking_type_id.code == 'outgoing'))
@api.one
def _created_inpickings(self):
picking_obj = self.env['stock.picking']
cond = [('mrp_production', '=', self.id)]
self.created_outpickings = len(
picking_obj.search(cond).filtered(
lambda x: x.picking_type_id.code == 'incoming'))
created_purchases = fields.Integer(
string='Created Purchases', readonly=True,
compute='_created_purchases', track_visibility='always')
created_outpickings = fields.Integer(
string='Created Out Pickings', readonly=True,
compute='_created_outpickings', track_visibility='always')
created_inpickings = fields.Integer(
string='Created In Pickings', readonly=True,
compute='_created_inpickings', track_visibility='always')
@api.one
def action_confirm(self):
res = super(MrpProduction, self).action_confirm()
for move in self.move_lines:
if move.work_order.routing_wc_line.external:
ptype = move.work_order.routing_wc_line.picking_type_id
move.location_id = ptype.default_location_src_id.id
move.location_dest_id = ptype.default_location_dest_id.id
for wc_line in self.workcenter_lines:
if wc_line.external:
wc_line.procurement_order = (
self._create_external_procurement(wc_line))
return res
def _prepare_extenal_procurement(self, wc_line):
wc = wc_line.routing_wc_line
name = "%s: %s" % (wc_line.production_id.name, wc_line.name)
return {
'name': name,
'origin': name,
'product_id': wc.semifinished_id.id,
'product_qty': self.product_qty,
'product_uom': wc.semifinished_id.uom_id.id,
'location_id': self.location_dest_id.id,
'production_id': self.id,
'warehouse_id': wc.picking_type_id.warehouse_id.id,
'mrp_operation': wc_line.id,
}
def _create_external_procurement(self, wc_line):
procurement = self.env['procurement.order'].create(
self._prepare_extenal_procurement(wc_line))
procurement.run()
return procurement.id
class MrpProductionWorkcenterLine(models.Model):
_inherit = 'mrp.production.workcenter.line'
external = fields.Boolean(related='routing_wc_line.external', store=True,
readonly=True, copy=True)
purchase_order = fields.Many2one('purchase.order', 'Purchase Order')
out_picking = fields.Many2one('stock.picking', 'Out Picking')
in_picking = fields.Many2one('stock.picking', 'In Picking')
procurement_order = fields.Many2one('procurement.order',
'Procurement Order')
|
bjwbell/servo
|
refs/heads/master
|
tests/wpt/harness/wptrunner/__init__.py
|
1447
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
mcltn/ansible-modules-extras
|
refs/heads/devel
|
network/f5/bigip_gtm_datacenter.py
|
23
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2016 F5 Networks Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: bigip_gtm_datacenter
short_description: Manage Datacenter configuration in BIG-IP
description:
- Manage BIG-IP data center configuration. A data center defines the location
where the physical network components reside, such as the server and link
objects that share the same subnet on the network. This module is able to
manipulate the data center definitions in a BIG-IP
version_added: "2.2"
options:
contact:
description:
- The name of the contact for the data center.
description:
description:
- The description of the data center.
enabled:
description:
- Whether the data center should be enabled. At least one of C(state) and
C(enabled) are required.
choices:
- yes
- no
location:
description:
- The location of the data center.
name:
description:
- The name of the data center.
required: true
state:
description:
- The state of the datacenter on the BIG-IP. When C(present), guarantees
that the data center exists. When C(absent) removes the data center
from the BIG-IP. C(enabled) will enable the data center and C(disabled)
will ensure the data center is disabled. At least one of state and
enabled are required.
choices:
- present
- absent
notes:
- Requires the f5-sdk Python package on the host. This is as easy as
pip install f5-sdk.
extends_documentation_fragment: f5
requirements:
- f5-sdk
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = '''
- name: Create data center "New York"
bigip_gtm_datacenter:
server: "big-ip"
name: "New York"
location: "222 West 23rd"
delegate_to: localhost
'''
RETURN = '''
contact:
description: The contact that was set on the datacenter
returned: changed
type: string
sample: "admin@root.local"
description:
description: The description that was set for the datacenter
returned: changed
type: string
sample: "Datacenter in NYC"
enabled:
description: Whether the datacenter is enabled or not
returned: changed
type: bool
sample: true
location:
description: The location that is set for the datacenter
returned: changed
type: string
sample: "222 West 23rd"
name:
description: Name of the datacenter being manipulated
returned: changed
type: string
sample: "foo"
'''
try:
from f5.bigip import ManagementRoot
from icontrol.session import iControlUnexpectedHTTPError
HAS_F5SDK = True
except ImportError:
HAS_F5SDK = False
class BigIpGtmDatacenter(object):
def __init__(self, *args, **kwargs):
if not HAS_F5SDK:
raise F5ModuleError("The python f5-sdk module is required")
# The params that change in the module
self.cparams = dict()
# Stores the params that are sent to the module
self.params = kwargs
self.api = ManagementRoot(kwargs['server'],
kwargs['user'],
kwargs['password'],
port=kwargs['server_port'])
def create(self):
params = dict()
check_mode = self.params['check_mode']
contact = self.params['contact']
description = self.params['description']
location = self.params['location']
name = self.params['name']
partition = self.params['partition']
enabled = self.params['enabled']
# Specifically check for None because a person could supply empty
# values which would technically still be valid
if contact is not None:
params['contact'] = contact
if description is not None:
params['description'] = description
if location is not None:
params['location'] = location
if enabled is not None:
params['enabled'] = True
else:
params['disabled'] = False
params['name'] = name
params['partition'] = partition
self.cparams = camel_dict_to_snake_dict(params)
if check_mode:
return True
d = self.api.tm.gtm.datacenters.datacenter
d.create(**params)
if not self.exists():
raise F5ModuleError("Failed to create the datacenter")
return True
def read(self):
"""Read information and transform it
The values that are returned by BIG-IP in the f5-sdk can have encoding
attached to them as well as be completely missing in some cases.
Therefore, this method will transform the data from the BIG-IP into a
format that is more easily consumable by the rest of the class and the
parameters that are supported by the module.
"""
p = dict()
name = self.params['name']
partition = self.params['partition']
r = self.api.tm.gtm.datacenters.datacenter.load(
name=name,
partition=partition
)
if hasattr(r, 'servers'):
# Deliberately using sets to supress duplicates
p['servers'] = set([str(x) for x in r.servers])
if hasattr(r, 'contact'):
p['contact'] = str(r.contact)
if hasattr(r, 'location'):
p['location'] = str(r.location)
if hasattr(r, 'description'):
p['description'] = str(r.description)
if r.enabled:
p['enabled'] = True
else:
p['enabled'] = False
p['name'] = name
return p
def update(self):
changed = False
params = dict()
current = self.read()
check_mode = self.params['check_mode']
contact = self.params['contact']
description = self.params['description']
location = self.params['location']
name = self.params['name']
partition = self.params['partition']
enabled = self.params['enabled']
if contact is not None:
if 'contact' in current:
if contact != current['contact']:
params['contact'] = contact
else:
params['contact'] = contact
if description is not None:
if 'description' in current:
if description != current['description']:
params['description'] = description
else:
params['description'] = description
if location is not None:
if 'location' in current:
if location != current['location']:
params['location'] = location
else:
params['location'] = location
if enabled is not None:
if current['enabled'] != enabled:
if enabled is True:
params['enabled'] = True
params['disabled'] = False
else:
params['disabled'] = True
params['enabled'] = False
if params:
changed = True
if check_mode:
return changed
self.cparams = camel_dict_to_snake_dict(params)
else:
return changed
r = self.api.tm.gtm.datacenters.datacenter.load(
name=name,
partition=partition
)
r.update(**params)
r.refresh()
return True
def delete(self):
params = dict()
check_mode = self.params['check_mode']
params['name'] = self.params['name']
params['partition'] = self.params['partition']
self.cparams = camel_dict_to_snake_dict(params)
if check_mode:
return True
dc = self.api.tm.gtm.datacenters.datacenter.load(**params)
dc.delete()
if self.exists():
raise F5ModuleError("Failed to delete the datacenter")
return True
def present(self):
changed = False
if self.exists():
changed = self.update()
else:
changed = self.create()
return changed
def absent(self):
changed = False
if self.exists():
changed = self.delete()
return changed
def exists(self):
name = self.params['name']
partition = self.params['partition']
return self.api.tm.gtm.datacenters.datacenter.exists(
name=name,
partition=partition
)
def flush(self):
result = dict()
state = self.params['state']
enabled = self.params['enabled']
if state is None and enabled is None:
module.fail_json(msg="Neither 'state' nor 'enabled' set")
try:
if state == "present":
changed = self.present()
# Ensure that this field is not returned to the user since it
# is not a valid parameter to the module.
if 'disabled' in self.cparams:
del self.cparams['disabled']
elif state == "absent":
changed = self.absent()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(str(e))
result.update(**self.cparams)
result.update(dict(changed=changed))
return result
def main():
argument_spec = f5_argument_spec()
meta_args = dict(
contact=dict(required=False, default=None),
description=dict(required=False, default=None),
enabled=dict(required=False, type='bool', default=None, choices=BOOLEANS),
location=dict(required=False, default=None),
name=dict(required=True)
)
argument_spec.update(meta_args)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True
)
try:
obj = BigIpGtmDatacenter(check_mode=module.check_mode, **module.params)
result = obj.flush()
module.exit_json(**result)
except F5ModuleError as e:
module.fail_json(msg=str(e))
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import camel_dict_to_snake_dict
from ansible.module_utils.f5 import *
if __name__ == '__main__':
main()
|
alxgu/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/avi/avi_virtualservice.py
|
29
|
#!/usr/bin/python
#
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
# Avi Version: 17.1.1
#
# Copyright: (c) 2017 Gaurav Rastogi, <grastogi@avinetworks.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_virtualservice
author: Gaurav Rastogi (@grastogi23) <grastogi@avinetworks.com>
short_description: Module for setup of VirtualService Avi RESTful Object
description:
- This module is used to configure VirtualService object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.3"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent", "present"]
avi_api_update_method:
description:
- Default method for object update is HTTP PUT.
- Setting to patch will override that behavior to use HTTP PATCH.
version_added: "2.5"
default: put
choices: ["put", "patch"]
avi_api_patch_op:
description:
- Patch operation to use when using avi_api_update_method as patch.
version_added: "2.5"
choices: ["add", "replace", "delete"]
active_standby_se_tag:
description:
- This configuration only applies if the virtualservice is in legacy active standby ha mode and load distribution among active standby is enabled.
- This field is used to tag the virtualservice so that virtualservices with the same tag will share the same active serviceengine.
- Virtualservices with different tags will have different active serviceengines.
- If one of the serviceengine's in the serviceenginegroup fails, all virtualservices will end up using the same active serviceengine.
- Redistribution of the virtualservices can be either manual or automated when the failed serviceengine recovers.
- Redistribution is based on the auto redistribute property of the serviceenginegroup.
- Enum options - ACTIVE_STANDBY_SE_1, ACTIVE_STANDBY_SE_2.
- Default value when not specified in API or module is interpreted by Avi Controller as ACTIVE_STANDBY_SE_1.
analytics_policy:
description:
- Determines analytics settings for the application.
analytics_profile_ref:
description:
- Specifies settings related to analytics.
- It is a reference to an object of type analyticsprofile.
application_profile_ref:
description:
- Enable application layer specific features for the virtual service.
- It is a reference to an object of type applicationprofile.
auto_allocate_floating_ip:
description:
- Auto-allocate floating/elastic ip from the cloud infrastructure.
- Field deprecated in 17.1.1.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
auto_allocate_ip:
description:
- Auto-allocate vip from the provided subnet.
- Field deprecated in 17.1.1.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
availability_zone:
description:
- Availability-zone to place the virtual service.
- Field deprecated in 17.1.1.
avi_allocated_fip:
description:
- (internal-use) fip allocated by avi in the cloud infrastructure.
- Field deprecated in 17.1.1.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
avi_allocated_vip:
description:
- (internal-use) vip allocated by avi in the cloud infrastructure.
- Field deprecated in 17.1.1.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
bulk_sync_kvcache:
description:
- (this is a beta feature).
- Sync key-value cache to the new ses when vs is scaled out.
- For ex ssl sessions are stored using vs's key-value cache.
- When the vs is scaled out, the ssl session information is synced to the new se, allowing existing ssl sessions to be reused on the new se.
- Field introduced in 17.2.7, 18.1.1.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
version_added: "2.6"
type: bool
client_auth:
description:
- Http authentication configuration for protected resources.
close_client_conn_on_config_update:
description:
- Close client connection on vs config update.
- Field introduced in 17.2.4.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
version_added: "2.5"
type: bool
cloud_config_cksum:
description:
- Checksum of cloud configuration for vs.
- Internally set by cloud connector.
cloud_ref:
description:
- It is a reference to an object of type cloud.
cloud_type:
description:
- Enum options - cloud_none, cloud_vcenter, cloud_openstack, cloud_aws, cloud_vca, cloud_apic, cloud_mesos, cloud_linuxserver, cloud_docker_ucp,
- cloud_rancher, cloud_oshift_k8s, cloud_azure.
- Default value when not specified in API or module is interpreted by Avi Controller as CLOUD_NONE.
connections_rate_limit:
description:
- Rate limit the incoming connections to this virtual service.
content_rewrite:
description:
- Profile used to match and rewrite strings in request and/or response body.
created_by:
description:
- Creator name.
delay_fairness:
description:
- Select the algorithm for qos fairness.
- This determines how multiple virtual services sharing the same service engines will prioritize traffic over a congested network.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
description:
description:
- User defined description for the object.
discovered_network_ref:
description:
- (internal-use) discovered networks providing reachability for client facing virtual service ip.
- This field is deprecated.
- It is a reference to an object of type network.
- Field deprecated in 17.1.1.
discovered_networks:
description:
- (internal-use) discovered networks providing reachability for client facing virtual service ip.
- This field is used internally by avi, not editable by the user.
- Field deprecated in 17.1.1.
discovered_subnet:
description:
- (internal-use) discovered subnets providing reachability for client facing virtual service ip.
- This field is deprecated.
- Field deprecated in 17.1.1.
dns_info:
description:
- Service discovery specific data including fully qualified domain name, type and time-to-live of the dns record.
- Note that only one of fqdn and dns_info setting is allowed.
dns_policies:
description:
- Dns policies applied on the dns traffic of the virtual service.
- Field introduced in 17.1.1.
version_added: "2.4"
east_west_placement:
description:
- Force placement on all se's in service group (mesos mode only).
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
enable_autogw:
description:
- Response traffic to clients will be sent back to the source mac address of the connection, rather than statically sent to a default gateway.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
type: bool
enable_rhi:
description:
- Enable route health injection using the bgp config in the vrf context.
type: bool
enable_rhi_snat:
description:
- Enable route health injection for source nat'ted floating ip address using the bgp config in the vrf context.
type: bool
enabled:
description:
- Enable or disable the virtual service.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
type: bool
error_page_profile_ref:
description:
- Error page profile to be used for this virtualservice.this profile is used to send the custom error page to the client generated by the proxy.
- It is a reference to an object of type errorpageprofile.
- Field introduced in 17.2.4.
version_added: "2.5"
floating_ip:
description:
- Floating ip to associate with this virtual service.
- Field deprecated in 17.1.1.
floating_subnet_uuid:
description:
- If auto_allocate_floating_ip is true and more than one floating-ip subnets exist, then the subnet for the floating ip address allocation.
- This field is applicable only if the virtualservice belongs to an openstack or aws cloud.
- In openstack or aws cloud it is required when auto_allocate_floating_ip is selected.
- Field deprecated in 17.1.1.
flow_dist:
description:
- Criteria for flow distribution among ses.
- Enum options - LOAD_AWARE, CONSISTENT_HASH_SOURCE_IP_ADDRESS, CONSISTENT_HASH_SOURCE_IP_ADDRESS_AND_PORT.
- Default value when not specified in API or module is interpreted by Avi Controller as LOAD_AWARE.
flow_label_type:
description:
- Criteria for flow labelling.
- Enum options - NO_LABEL, APPLICATION_LABEL, SERVICE_LABEL.
- Default value when not specified in API or module is interpreted by Avi Controller as NO_LABEL.
fqdn:
description:
- Dns resolvable, fully qualified domain name of the virtualservice.
- Only one of 'fqdn' and 'dns_info' configuration is allowed.
host_name_xlate:
description:
- Translate the host name sent to the servers to this value.
- Translate the host name sent from servers back to the value used by the client.
http_policies:
description:
- Http policies applied on the data traffic of the virtual service.
ign_pool_net_reach:
description:
- Ignore pool servers network reachability constraints for virtual service placement.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
ip_address:
description:
- Ip address of the virtual service.
- Field deprecated in 17.1.1.
ipam_network_subnet:
description:
- Subnet and/or network for allocating virtualservice ip by ipam provider module.
- Field deprecated in 17.1.1.
l4_policies:
description:
- L4 policies applied to the data traffic of the virtual service.
- Field introduced in 17.2.7.
version_added: "2.6"
limit_doser:
description:
- Limit potential dos attackers who exceed max_cps_per_client significantly to a fraction of max_cps_per_client for a while.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
max_cps_per_client:
description:
- Maximum connections per second per client ip.
- Allowed values are 10-1000.
- Special values are 0- 'unlimited'.
- Default value when not specified in API or module is interpreted by Avi Controller as 0.
microservice_ref:
description:
- Microservice representing the virtual service.
- It is a reference to an object of type microservice.
name:
description:
- Name for the virtual service.
required: true
network_profile_ref:
description:
- Determines network settings such as protocol, tcp or udp, and related options for the protocol.
- It is a reference to an object of type networkprofile.
network_ref:
description:
- Manually override the network on which the virtual service is placed.
- It is a reference to an object of type network.
- Field deprecated in 17.1.1.
network_security_policy_ref:
description:
- Network security policies for the virtual service.
- It is a reference to an object of type networksecuritypolicy.
nsx_securitygroup:
description:
- A list of nsx service groups representing the clients which can access the virtual ip of the virtual service.
- Field introduced in 17.1.1.
version_added: "2.4"
performance_limits:
description:
- Optional settings that determine performance limits like max connections or bandwdith etc.
pool_group_ref:
description:
- The pool group is an object that contains pools.
- It is a reference to an object of type poolgroup.
pool_ref:
description:
- The pool is an object that contains destination servers and related attributes such as load-balancing and persistence.
- It is a reference to an object of type pool.
port_uuid:
description:
- (internal-use) network port assigned to the virtual service ip address.
- Field deprecated in 17.1.1.
remove_listening_port_on_vs_down:
description:
- Remove listening port if virtualservice is down.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
requests_rate_limit:
description:
- Rate limit the incoming requests to this virtual service.
scaleout_ecmp:
description:
- Disable re-distribution of flows across service engines for a virtual service.
- Enable if the network itself performs flow hashing with ecmp in environments such as gcp.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
se_group_ref:
description:
- The service engine group to use for this virtual service.
- Moving to a new se group is disruptive to existing connections for this vs.
- It is a reference to an object of type serviceenginegroup.
server_network_profile_ref:
description:
- Determines the network settings profile for the server side of tcp proxied connections.
- Leave blank to use the same settings as the client to vs side of the connection.
- It is a reference to an object of type networkprofile.
service_metadata:
description:
- Metadata pertaining to the service provided by this virtual service.
- In openshift/kubernetes environments, egress pod info is stored.
- Any user input to this field will be overwritten by avi vantage.
version_added: "2.4"
service_pool_select:
description:
- Select pool based on destination port.
services:
description:
- List of services defined for this virtual service.
sideband_profile:
description:
- Sideband configuration to be used for this virtualservice.it can be used for sending traffic to sideband vips for external inspection etc.
version_added: "2.4"
snat_ip:
description:
- Nat'ted floating source ip address(es) for upstream connection to servers.
sp_pool_refs:
description:
- Gslb pools used to manage site-persistence functionality.
- Each site-persistence pool contains the virtualservices in all the other sites, that is auto-generated by the gslb manager.
- This is a read-only field for the user.
- It is a reference to an object of type pool.
- Field introduced in 17.2.2.
version_added: "2.5"
ssl_key_and_certificate_refs:
description:
- Select or create one or two certificates, ec and/or rsa, that will be presented to ssl/tls terminated connections.
- It is a reference to an object of type sslkeyandcertificate.
ssl_profile_ref:
description:
- Determines the set of ssl versions and ciphers to accept for ssl/tls terminated connections.
- It is a reference to an object of type sslprofile.
ssl_sess_cache_avg_size:
description:
- Expected number of ssl session cache entries (may be exceeded).
- Allowed values are 1024-16383.
- Default value when not specified in API or module is interpreted by Avi Controller as 1024.
static_dns_records:
description:
- List of static dns records applied to this virtual service.
- These are static entries and no health monitoring is performed against the ip addresses.
subnet:
description:
- Subnet providing reachability for client facing virtual service ip.
- Field deprecated in 17.1.1.
subnet_uuid:
description:
- It represents subnet for the virtual service ip address allocation when auto_allocate_ip is true.it is only applicable in openstack or aws cloud.
- This field is required if auto_allocate_ip is true.
- Field deprecated in 17.1.1.
tenant_ref:
description:
- It is a reference to an object of type tenant.
traffic_clone_profile_ref:
description:
- Server network or list of servers for cloning traffic.
- It is a reference to an object of type trafficcloneprofile.
- Field introduced in 17.1.1.
version_added: "2.4"
traffic_enabled:
description:
- Knob to enable the virtual service traffic on its assigned service engines.
- This setting is effective only when the enabled flag is set to true.
- Field introduced in 17.2.8.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
version_added: "2.6"
type: bool
type:
description:
- Specify if this is a normal virtual service, or if it is the parent or child of an sni-enabled virtual hosted virtual service.
- Enum options - VS_TYPE_NORMAL, VS_TYPE_VH_PARENT, VS_TYPE_VH_CHILD.
- Default value when not specified in API or module is interpreted by Avi Controller as VS_TYPE_NORMAL.
url:
description:
- Avi controller URL of the object.
use_bridge_ip_as_vip:
description:
- Use bridge ip as vip on each host in mesos deployments.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
use_vip_as_snat:
description:
- Use the virtual ip as the snat ip for health monitoring and sending traffic to the backend servers instead of the service engine interface ip.
- The caveat of enabling this option is that the virtualservice cannot be configued in an active-active ha mode.
- Dns based multi vip solution has to be used for ha & non-disruptive upgrade purposes.
- Field introduced in 17.1.9,17.2.3.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
version_added: "2.5"
type: bool
uuid:
description:
- Uuid of the virtualservice.
vh_domain_name:
description:
- The exact name requested from the client's sni-enabled tls hello domain name field.
- If this is a match, the parent vs will forward the connection to this child vs.
vh_parent_vs_uuid:
description:
- Specifies the virtual service acting as virtual hosting (sni) parent.
vip:
description:
- List of virtual service ips.
- While creating a 'shared vs',please use vsvip_ref to point to the shared entities.
- Field introduced in 17.1.1.
version_added: "2.4"
vrf_context_ref:
description:
- Virtual routing context that the virtual service is bound to.
- This is used to provide the isolation of the set of networks the application is attached to.
- It is a reference to an object of type vrfcontext.
vs_datascripts:
description:
- Datascripts applied on the data traffic of the virtual service.
vsvip_ref:
description:
- Mostly used during the creation of shared vs, this field refers to entities that can be shared across virtual services.
- It is a reference to an object of type vsvip.
- Field introduced in 17.1.1.
version_added: "2.4"
waf_policy_ref:
description:
- Waf policy for the virtual service.
- It is a reference to an object of type wafpolicy.
- Field introduced in 17.2.1.
version_added: "2.5"
weight:
description:
- The quality of service weight to assign to traffic transmitted from this virtual service.
- A higher weight will prioritize traffic versus other virtual services sharing the same service engines.
- Allowed values are 1-128.
- Default value when not specified in API or module is interpreted by Avi Controller as 1.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Create SSL Virtual Service using Pool testpool2
avi_virtualservice:
controller: 10.10.27.90
username: admin
password: AviNetworks123!
name: newtestvs
state: present
performance_limits:
max_concurrent_connections: 1000
services:
- port: 443
enable_ssl: true
- port: 80
ssl_profile_ref: '/api/sslprofile?name=System-Standard'
application_profile_ref: '/api/applicationprofile?name=System-Secure-HTTP'
ssl_key_and_certificate_refs:
- '/api/sslkeyandcertificate?name=System-Default-Cert'
ip_address:
addr: 10.90.131.103
type: V4
pool_ref: '/api/pool?name=testpool2'
"""
RETURN = '''
obj:
description: VirtualService (api/virtualservice) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.network.avi.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
avi_api_update_method=dict(default='put',
choices=['put', 'patch']),
avi_api_patch_op=dict(choices=['add', 'replace', 'delete']),
active_standby_se_tag=dict(type='str',),
analytics_policy=dict(type='dict',),
analytics_profile_ref=dict(type='str',),
application_profile_ref=dict(type='str',),
auto_allocate_floating_ip=dict(type='bool',),
auto_allocate_ip=dict(type='bool',),
availability_zone=dict(type='str',),
avi_allocated_fip=dict(type='bool',),
avi_allocated_vip=dict(type='bool',),
bulk_sync_kvcache=dict(type='bool',),
client_auth=dict(type='dict',),
close_client_conn_on_config_update=dict(type='bool',),
cloud_config_cksum=dict(type='str',),
cloud_ref=dict(type='str',),
cloud_type=dict(type='str',),
connections_rate_limit=dict(type='dict',),
content_rewrite=dict(type='dict',),
created_by=dict(type='str',),
delay_fairness=dict(type='bool',),
description=dict(type='str',),
discovered_network_ref=dict(type='list',),
discovered_networks=dict(type='list',),
discovered_subnet=dict(type='list',),
dns_info=dict(type='list',),
dns_policies=dict(type='list',),
east_west_placement=dict(type='bool',),
enable_autogw=dict(type='bool',),
enable_rhi=dict(type='bool',),
enable_rhi_snat=dict(type='bool',),
enabled=dict(type='bool',),
error_page_profile_ref=dict(type='str',),
floating_ip=dict(type='dict',),
floating_subnet_uuid=dict(type='str',),
flow_dist=dict(type='str',),
flow_label_type=dict(type='str',),
fqdn=dict(type='str',),
host_name_xlate=dict(type='str',),
http_policies=dict(type='list',),
ign_pool_net_reach=dict(type='bool',),
ip_address=dict(type='dict',),
ipam_network_subnet=dict(type='dict',),
l4_policies=dict(type='list',),
limit_doser=dict(type='bool',),
max_cps_per_client=dict(type='int',),
microservice_ref=dict(type='str',),
name=dict(type='str', required=True),
network_profile_ref=dict(type='str',),
network_ref=dict(type='str',),
network_security_policy_ref=dict(type='str',),
nsx_securitygroup=dict(type='list',),
performance_limits=dict(type='dict',),
pool_group_ref=dict(type='str',),
pool_ref=dict(type='str',),
port_uuid=dict(type='str',),
remove_listening_port_on_vs_down=dict(type='bool',),
requests_rate_limit=dict(type='dict',),
scaleout_ecmp=dict(type='bool',),
se_group_ref=dict(type='str',),
server_network_profile_ref=dict(type='str',),
service_metadata=dict(type='str',),
service_pool_select=dict(type='list',),
services=dict(type='list',),
sideband_profile=dict(type='dict',),
snat_ip=dict(type='list',),
sp_pool_refs=dict(type='list',),
ssl_key_and_certificate_refs=dict(type='list',),
ssl_profile_ref=dict(type='str',),
ssl_sess_cache_avg_size=dict(type='int',),
static_dns_records=dict(type='list',),
subnet=dict(type='dict',),
subnet_uuid=dict(type='str',),
tenant_ref=dict(type='str',),
traffic_clone_profile_ref=dict(type='str',),
traffic_enabled=dict(type='bool',),
type=dict(type='str',),
url=dict(type='str',),
use_bridge_ip_as_vip=dict(type='bool',),
use_vip_as_snat=dict(type='bool',),
uuid=dict(type='str',),
vh_domain_name=dict(type='list',),
vh_parent_vs_uuid=dict(type='str',),
vip=dict(type='list',),
vrf_context_ref=dict(type='str',),
vs_datascripts=dict(type='list',),
vsvip_ref=dict(type='str',),
waf_policy_ref=dict(type='str',),
weight=dict(type='int',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'virtualservice',
set([]))
if __name__ == '__main__':
main()
|
perovic/root
|
refs/heads/master
|
tutorials/pyroot/graph.py
|
10
|
#
# To see the output of this macro, click begin_html <a href="gif/graph.gif">here</a>. end_html
#
from ROOT import TCanvas, TGraph
from ROOT import gROOT
from math import sin
from array import array
c1 = TCanvas( 'c1', 'A Simple Graph Example', 200, 10, 700, 500 )
c1.SetFillColor( 42 )
c1.SetGrid()
n = 20
x, y = array( 'd' ), array( 'd' )
for i in range( n ):
x.append( 0.1*i )
y.append( 10*sin( x[i]+0.2 ) )
print ' i %i %f %f ' % (i,x[i],y[i])
gr = TGraph( n, x, y )
gr.SetLineColor( 2 )
gr.SetLineWidth( 4 )
gr.SetMarkerColor( 4 )
gr.SetMarkerStyle( 21 )
gr.SetTitle( 'a simple graph' )
gr.GetXaxis().SetTitle( 'X title' )
gr.GetYaxis().SetTitle( 'Y title' )
gr.Draw( 'ACP' )
# TCanvas.Update() draws the frame, after which one can change it
c1.Update()
c1.GetFrame().SetFillColor( 21 )
c1.GetFrame().SetBorderSize( 12 )
c1.Modified()
c1.Update()
|
evertonstz/py-auto-brightness
|
refs/heads/master
|
setup.py
|
1
|
from setuptools import setup
from pyautobrightness import __version__
setup(
name='pyautobrightness',
version=__version__,
author='Everton Correia',
author_email='evertonjcorreia@gmail.com',
packages=['pyautobrightness'],
keywords=['Brightness', 'Auto', 'Webcam'],
url='https://github.com/evertonstz/py-auto-brightness',
platforms='Unix',
download_url='https://github.com/evertonstz/py-auto-brightness/tarball/'+__version__,
license='GPL',
description='A very simple software to change the screen brightness using a webcam as light sensor.',
entry_points={'console_scripts': ['pyautobrightness = pyautobrightness.pyautobrightnessmain:main']}
)
|
gregvonkuster/bioconda-recipes
|
refs/heads/master
|
recipes/pymot/setup.py
|
57
|
#!/usr/bin/python
from setuptools import setup, find_packages
from os import listdir
pyfiles = [f.replace('.py', '') for f in listdir('.') if f.endswith('.py')]
setup(name='PyMOT', version='13.09.2016', description='The Multiple Object Tracking (MOT) metrics "multiple object tracking precision" (MOTP) and "multiple object tracking accuracy" (MOTA) allow for objective comparison of tracker characteristics [0]. The MOTP shows the ability of the tracker to estimate precise object positions, independent of its skill at recognizing object configurations, keeping consistent trajectories, and so forth. The MOTA accounts for all object configuration errors made by the tracker, false positives, misses, mismatches, over all frames.', url='https://github.com/Videmo/pymot', packages=find_packages(), py_modules=pyfiles)
|
osvt/patron_rest
|
refs/heads/master
|
patron_app/views.py
|
1
|
# coding=utf-8
from django.shortcuts import render
# Create your views here.
from django.http import HttpResponse
import json
import os
import shutil
import uuid
import time
# patron_dir = "C:/etc/patron"
patron_dir = os.path.dirname(os.path.abspath(__file__)) + "/../etc/patron"
command_dir = os.path.dirname(os.path.abspath(__file__)) + "/../etc/commands"
admin_tenant_id = "admin"
def get_403_error():
return "ERROR (Forbidden): Policy doesn't allow this operation to be performed. (HTTP 403) (Request-ID: req-%s)" % uuid.uuid4()
def get_404_error():
return "ERROR (Unsupported): This operation is unsupported. (HTTP 404) (Request-ID: req-%s)" % uuid.uuid4()
def MyHttpResponse(*args, **kwargs):
response = HttpResponse(*args, **kwargs)
response["Access-Control-Allow-Origin"] = "*"
response["Access-Control-Allow-Methods"] = "POST, GET, OPTIONS"
response["Access-Control-Max-Age"] = "1000"
response["Access-Control-Allow-Headers"] = "*"
return response
def tenants(request):
if request.method != "GET":
return MyHttpResponse("Unsupported HTTP method: " + request.method, content_type="text/html")
response_data = []
print "Searching: " + patron_dir + "/custom_policy"
files = os.listdir(patron_dir + "/custom_policy")
files.sort()
i = 0
display_names = {"admin": "Admins(云平台管理员)",
"tenant1": "学校A",
"tenant2": "学校B",
"tenant3": "学校C"}
for f in files:
print f
tmp_tenant = {}
tmp_tenant['id'] = f
tmp_tenant['name'] = display_names[f]
response_data.append(tmp_tenant)
i += 1
return MyHttpResponse(json.dumps(response_data), content_type="application/json")
def models(request, model_name):
if not os.path.exists(patron_dir + "/model/" + model_name):
return MyHttpResponse("The model doesn't exist, model = " + model_name, content_type="text/html")
model_path = patron_dir + "/model/" + model_name
if request.method == 'GET':
print "method = " + request.method + ", file to read = " + model_path
file_object = open(model_path, 'r')
try:
rules = []
while 1:
line = file_object.readline()
if not line:
break
rules.append(line.rstrip("\n").rstrip("\r"))
response_data = rules
# metadata_text = file_object.read()
finally:
file_object.close()
return MyHttpResponse(json.dumps(response_data), content_type="application/json")
else:
print "Unsupported method = " + request.method
return MyHttpResponse("Unsupported HTTP method: " + request.method, content_type="text/html")
def metadata(request, tenant_id):
if not os.path.exists(patron_dir + "/custom_policy/" + tenant_id):
return MyHttpResponse("The tenant doesn't exist, tenant = " + tenant_id, content_type="text/html")
metadata_path = patron_dir + "/custom_policy/" + tenant_id + "/metadata.json"
if request.method == 'GET':
print "method = " + request.method + ", file to read = " + metadata_path
file_object = open(metadata_path, 'r')
try:
response_data = json.load(file_object)
# metadata_text = file_object.read()
finally:
file_object.close()
return MyHttpResponse(json.dumps(response_data), content_type="application/json")
elif request.method == 'POST':
print "method = " + request.method + ", file to write = " + metadata_path
file_object = open(metadata_path, 'w')
try:
tmp = json.loads(request.body)
jstr = json.dumps(tmp, ensure_ascii=False, indent=4)
file_object.write(jstr.encode('utf-8'))
finally:
file_object.close()
return MyHttpResponse("{\"status\":\"success\"}", content_type="text/html")
else:
print "Unsupported method = " + request.method
return MyHttpResponse("Unsupported HTTP method: " + request.method, content_type="text/html")
def policy(request, tenant_id, policy_name):
if not os.path.exists(patron_dir + "/custom_policy/" + tenant_id):
return MyHttpResponse("The tenant doesn't exist, tenant = " + tenant_id, content_type="text/html")
if os.path.exists(patron_dir + "/custom_policy/" + tenant_id + "/" + policy_name):
policy_path = patron_dir + "/custom_policy/" + tenant_id + "/" + policy_name
elif os.path.exists(patron_dir + "/" + policy_name):
policy_path = patron_dir + "/" + policy_name
else:
return MyHttpResponse("The policy doesn't exist, tenant = " + tenant_id + ", policy = " + policy_name,
content_type="text/html")
if request.method == 'GET':
print "method = " + request.method + ", file to read = " + policy_path
file_object = open(policy_path, 'r')
try:
rules = []
while 1:
line = file_object.readline()
if not line:
break
rules.append(line.rstrip("\n").rstrip("\r"))
response_data = rules
# metadata_text = file_object.read()
finally:
file_object.close()
return MyHttpResponse(json.dumps(response_data), content_type="application/json")
elif request.method == 'POST':
print "method = " + request.method + ", file to write = " + policy_path
file_object = open(policy_path, 'w')
try:
tmp = json.loads(request.body)
rules = ""
for line in tmp:
rules += line + "\n"
rules = rules.rstrip("\n")
file_object.write(rules)
finally:
file_object.close()
return MyHttpResponse("{\"status\":\"success\"}", content_type="text/html")
else:
print "Unsupported method = " + request.method
return MyHttpResponse("Unsupported HTTP method: " + request.method, content_type="text/html")
def users(request, tenant_id):
if request.method != "GET":
return MyHttpResponse("Unsupported HTTP method: " + request.method, content_type="text/html")
if not os.path.exists(patron_dir + "/custom_policy/" + tenant_id):
return MyHttpResponse("The tenant doesn't exist, tenant = " + tenant_id, content_type="text/html")
userlist_path = patron_dir + "/custom_policy/" + tenant_id + "/" + "users.txt"
print "method = " + request.method + ", file to read = " + userlist_path
file_object = open(userlist_path, 'r')
user_list = file_object.read().split(",")
response_data = user_list
return MyHttpResponse(json.dumps(response_data), content_type="application/json")
def get_short_command(command):
word_list = command.split(" ")
if len(word_list) < 2:
return command
else:
return word_list[0] + " " + word_list[1]
def get_action(command):
m = {"nova list": "compute:get_all",
"nova service-list": "compute_extension:services",
"nova boot": "compute:create",
"nova show": "compute:get",
"nova delete": "compute:delete"}
if m.has_key(command):
return m[command]
else:
return ""
def get_object(command):
word_list = command.split(" ")
if len(word_list) < 3:
return ""
else:
return word_list[len(word_list) - 1]
def get_command_output(command):
output_path = command_dir + "/" + command + ".txt"
try:
file_object = open(output_path, 'r')
except:
return ""
return file_object.read()
def enforce_command(tenant_id, sub, obj, act):
if tenant_id == admin_tenant_id:
res = True
print "sub = " + sub + ", obj = " + obj + ", act = " + act + ", res = " + str(res)
return res
if act == "compute_extension:services":
res = False
print "sub = " + sub + ", obj = " + obj + ", act = " + act + ", res = " + str(res)
return res
if tenant_id == "tenant3":
res = False
print "sub = " + sub + ", obj = " + obj + ", act = " + act + ", res = " + str(res)
return res
if sub == "admin":
res = True
print "sub = " + sub + ", obj = " + obj + ", act = " + act + ", res = " + str(res)
return res
policy_path = patron_dir + "/custom_policy/" + tenant_id + "/custom-policy.json"
try:
file_object = open(policy_path, 'r')
except:
res = True
print "sub = " + sub + ", obj = " + obj + ", act = " + act + ", res = " + str(res)
return res
if obj != "":
rule = '"%s": "target_id:%s and user_id:%s"' % (act, obj, sub)
else:
rule = '"%s": "user_id:%s"' % (act, sub)
print "rule = " + rule
rules = file_object.read()
if rule in rules:
res = True
else:
res = False
print "sub = " + sub + ", obj = " + obj + ", act = " + act + ", res = " + str(res)
return res
def commands(request, tenant_id, user_name):
if request.method != "GET":
return MyHttpResponse("Unsupported HTTP method: " + request.method, content_type="text/html")
response_data = ["nova list",
"nova service-list",
"nova boot --flavor m1.nano --image cirros --nic net-id=c4eb995e-748d-4684-a956-10d0ad0e73fd --security-group default vm1",
"nova show vm1",
"nova delete vm1"]
return MyHttpResponse(json.dumps(response_data), content_type="application/json")
def command(request, tenant_id, user_name, command):
#return MyHttpResponse("test", content_type="text/plain")
if request.method != "GET":
return MyHttpResponse("Unsupported HTTP method: " + request.method, content_type="text/html")
if not os.path.exists(patron_dir + "/custom_policy/" + tenant_id):
return MyHttpResponse("The tenant doesn't exist, tenant = " + tenant_id, content_type="text/html")
userlist_path = patron_dir + "/custom_policy/" + tenant_id + "/" + "users.txt"
print "method = " + request.method + ", file to read = " + userlist_path
file_object = open(userlist_path, 'r')
user_list = file_object.read().split(",")
if user_name not in user_list:
return MyHttpResponse("The user doesn't exist, tenant = " + tenant_id + ", user = " + user_name, content_type="text/html")
print "method = " + request.method + ", tenant = " + tenant_id + ", user = " + user_name + ", command to run = " + command
time.sleep(1)
sub = user_name
obj = get_object(command)
act = get_action(get_short_command(command))
if not enforce_command(tenant_id, sub, obj, act):
response_data = get_403_error()
return MyHttpResponse(response_data, content_type="text/plain")
response_data = get_command_output(get_short_command(command))
if response_data == "":
response_data = get_404_error()
return MyHttpResponse(response_data, content_type="text/plain")
def reset(request):
return MyHttpResponse("{\"status\":\"%s\"}" % patron_dir, content_type="application/json")
def redirect_handler(request, url_path):
return render(request, url_path + '.html')
def mainpage_handler(request):
return render(request, 'Portal.html')
|
mpillar/codeeval
|
refs/heads/master
|
2-hard/grid-walk/main.py
|
1
|
import collections
def sum_digits(n):
s = 0
while n:
s += n % 10
n /= 10
return s
def can_visit(p):
value = sum_digits(abs(p[0])) + sum_digits(abs(p[1]))
return value <= 19
def is_visited(p, visited):
return p in visited
def neighbors(p):
x, y = p
return [(x+1, y), (x, y+1), (x-1, y), (x, y-1)]
def count_accessible_coordinates():
visited = {(0, 0)}
queue = collections.deque([(0, 0)])
while len(queue) > 0:
p = queue.pop()
for n in neighbors(p):
if not is_visited(n, visited) and can_visit(n):
visited.add(n)
queue.append(n)
return len(visited)
print(count_accessible_coordinates())
|
campbe13/openhatch
|
refs/heads/master
|
vendor/packages/Django/tests/modeltests/m2m_signals/tests.py
|
150
|
"""
Testing signals emitted on changing m2m relations.
"""
from .models import Person
from django.db import models
from django.test import TestCase
from .models import Part, Car, SportsCar, Person
class ManyToManySignalsTest(TestCase):
def m2m_changed_signal_receiver(self, signal, sender, **kwargs):
message = {
'instance': kwargs['instance'],
'action': kwargs['action'],
'reverse': kwargs['reverse'],
'model': kwargs['model'],
}
if kwargs['pk_set']:
message['objects'] = list(
kwargs['model'].objects.filter(pk__in=kwargs['pk_set'])
)
self.m2m_changed_messages.append(message)
def setUp(self):
self.m2m_changed_messages = []
self.vw = Car.objects.create(name='VW')
self.bmw = Car.objects.create(name='BMW')
self.toyota = Car.objects.create(name='Toyota')
self.wheelset = Part.objects.create(name='Wheelset')
self.doors = Part.objects.create(name='Doors')
self.engine = Part.objects.create(name='Engine')
self.airbag = Part.objects.create(name='Airbag')
self.sunroof = Part.objects.create(name='Sunroof')
self.alice = Person.objects.create(name='Alice')
self.bob = Person.objects.create(name='Bob')
self.chuck = Person.objects.create(name='Chuck')
self.daisy = Person.objects.create(name='Daisy')
def tearDown(self):
# disconnect all signal handlers
models.signals.m2m_changed.disconnect(
self.m2m_changed_signal_receiver, Car.default_parts.through
)
models.signals.m2m_changed.disconnect(
self.m2m_changed_signal_receiver, Car.optional_parts.through
)
models.signals.m2m_changed.disconnect(
self.m2m_changed_signal_receiver, Person.fans.through
)
models.signals.m2m_changed.disconnect(
self.m2m_changed_signal_receiver, Person.friends.through
)
def test_m2m_relations_add_remove_clear(self):
expected_messages = []
# Install a listener on one of the two m2m relations.
models.signals.m2m_changed.connect(
self.m2m_changed_signal_receiver, Car.optional_parts.through
)
# Test the add, remove and clear methods on both sides of the
# many-to-many relation
# adding a default part to our car - no signal listener installed
self.vw.default_parts.add(self.sunroof)
# Now install a listener
models.signals.m2m_changed.connect(
self.m2m_changed_signal_receiver, Car.default_parts.through
)
self.vw.default_parts.add(self.wheelset, self.doors, self.engine)
expected_messages.append({
'instance': self.vw,
'action': 'pre_add',
'reverse': False,
'model': Part,
'objects': [self.doors, self.engine, self.wheelset],
})
expected_messages.append({
'instance': self.vw,
'action': 'post_add',
'reverse': False,
'model': Part,
'objects': [self.doors, self.engine, self.wheelset],
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
# give the BMW and Toyata some doors as well
self.doors.car_set.add(self.bmw, self.toyota)
expected_messages.append({
'instance': self.doors,
'action': 'pre_add',
'reverse': True,
'model': Car,
'objects': [self.bmw, self.toyota],
})
expected_messages.append({
'instance': self.doors,
'action': 'post_add',
'reverse': True,
'model': Car,
'objects': [self.bmw, self.toyota],
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
# remove the engine from the self.vw and the airbag (which is not set
# but is returned)
self.vw.default_parts.remove(self.engine, self.airbag)
expected_messages.append({
'instance': self.vw,
'action': 'pre_remove',
'reverse': False,
'model': Part,
'objects': [self.airbag, self.engine],
})
expected_messages.append({
'instance': self.vw,
'action': 'post_remove',
'reverse': False,
'model': Part,
'objects': [self.airbag, self.engine],
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
# give the self.vw some optional parts (second relation to same model)
self.vw.optional_parts.add(self.airbag, self.sunroof)
expected_messages.append({
'instance': self.vw,
'action': 'pre_add',
'reverse': False,
'model': Part,
'objects': [self.airbag, self.sunroof],
})
expected_messages.append({
'instance': self.vw,
'action': 'post_add',
'reverse': False,
'model': Part,
'objects': [self.airbag, self.sunroof],
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
# add airbag to all the cars (even though the self.vw already has one)
self.airbag.cars_optional.add(self.vw, self.bmw, self.toyota)
expected_messages.append({
'instance': self.airbag,
'action': 'pre_add',
'reverse': True,
'model': Car,
'objects': [self.bmw, self.toyota],
})
expected_messages.append({
'instance': self.airbag,
'action': 'post_add',
'reverse': True,
'model': Car,
'objects': [self.bmw, self.toyota],
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
# remove airbag from the self.vw (reverse relation with custom
# related_name)
self.airbag.cars_optional.remove(self.vw)
expected_messages.append({
'instance': self.airbag,
'action': 'pre_remove',
'reverse': True,
'model': Car,
'objects': [self.vw],
})
expected_messages.append({
'instance': self.airbag,
'action': 'post_remove',
'reverse': True,
'model': Car,
'objects': [self.vw],
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
# clear all parts of the self.vw
self.vw.default_parts.clear()
expected_messages.append({
'instance': self.vw,
'action': 'pre_clear',
'reverse': False,
'model': Part,
})
expected_messages.append({
'instance': self.vw,
'action': 'post_clear',
'reverse': False,
'model': Part,
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
# take all the doors off of cars
self.doors.car_set.clear()
expected_messages.append({
'instance': self.doors,
'action': 'pre_clear',
'reverse': True,
'model': Car,
})
expected_messages.append({
'instance': self.doors,
'action': 'post_clear',
'reverse': True,
'model': Car,
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
# take all the airbags off of cars (clear reverse relation with custom
# related_name)
self.airbag.cars_optional.clear()
expected_messages.append({
'instance': self.airbag,
'action': 'pre_clear',
'reverse': True,
'model': Car,
})
expected_messages.append({
'instance': self.airbag,
'action': 'post_clear',
'reverse': True,
'model': Car,
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
# alternative ways of setting relation:
self.vw.default_parts.create(name='Windows')
p6 = Part.objects.get(name='Windows')
expected_messages.append({
'instance': self.vw,
'action': 'pre_add',
'reverse': False,
'model': Part,
'objects': [p6],
})
expected_messages.append({
'instance': self.vw,
'action': 'post_add',
'reverse': False,
'model': Part,
'objects': [p6],
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
# direct assignment clears the set first, then adds
self.vw.default_parts = [self.wheelset,self.doors,self.engine]
expected_messages.append({
'instance': self.vw,
'action': 'pre_clear',
'reverse': False,
'model': Part,
})
expected_messages.append({
'instance': self.vw,
'action': 'post_clear',
'reverse': False,
'model': Part,
})
expected_messages.append({
'instance': self.vw,
'action': 'pre_add',
'reverse': False,
'model': Part,
'objects': [self.doors, self.engine, self.wheelset],
})
expected_messages.append({
'instance': self.vw,
'action': 'post_add',
'reverse': False,
'model': Part,
'objects': [self.doors, self.engine, self.wheelset],
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
# Check that signals still work when model inheritance is involved
c4 = SportsCar.objects.create(name='Bugatti', price='1000000')
c4b = Car.objects.get(name='Bugatti')
c4.default_parts = [self.doors]
expected_messages.append({
'instance': c4,
'action': 'pre_clear',
'reverse': False,
'model': Part,
})
expected_messages.append({
'instance': c4,
'action': 'post_clear',
'reverse': False,
'model': Part,
})
expected_messages.append({
'instance': c4,
'action': 'pre_add',
'reverse': False,
'model': Part,
'objects': [self.doors],
})
expected_messages.append({
'instance': c4,
'action': 'post_add',
'reverse': False,
'model': Part,
'objects': [self.doors],
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
self.engine.car_set.add(c4)
expected_messages.append({
'instance': self.engine,
'action': 'pre_add',
'reverse': True,
'model': Car,
'objects': [c4b],
})
expected_messages.append({
'instance': self.engine,
'action': 'post_add',
'reverse': True,
'model': Car,
'objects': [c4b],
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
def test_m2m_relations_with_self(self):
expected_messages = []
models.signals.m2m_changed.connect(
self.m2m_changed_signal_receiver, Person.fans.through
)
models.signals.m2m_changed.connect(
self.m2m_changed_signal_receiver, Person.friends.through
)
self.alice.friends = [self.bob, self.chuck]
expected_messages.append({
'instance': self.alice,
'action': 'pre_clear',
'reverse': False,
'model': Person,
})
expected_messages.append({
'instance': self.alice,
'action': 'post_clear',
'reverse': False,
'model': Person,
})
expected_messages.append({
'instance': self.alice,
'action': 'pre_add',
'reverse': False,
'model': Person,
'objects': [self.bob, self.chuck],
})
expected_messages.append({
'instance': self.alice,
'action': 'post_add',
'reverse': False,
'model': Person,
'objects': [self.bob, self.chuck],
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
self.alice.fans = [self.daisy]
expected_messages.append({
'instance': self.alice,
'action': 'pre_clear',
'reverse': False,
'model': Person,
})
expected_messages.append({
'instance': self.alice,
'action': 'post_clear',
'reverse': False,
'model': Person,
})
expected_messages.append({
'instance': self.alice,
'action': 'pre_add',
'reverse': False,
'model': Person,
'objects': [self.daisy],
})
expected_messages.append({
'instance': self.alice,
'action': 'post_add',
'reverse': False,
'model': Person,
'objects': [self.daisy],
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
self.chuck.idols = [self.alice,self.bob]
expected_messages.append({
'instance': self.chuck,
'action': 'pre_clear',
'reverse': True,
'model': Person,
})
expected_messages.append({
'instance': self.chuck,
'action': 'post_clear',
'reverse': True,
'model': Person,
})
expected_messages.append({
'instance': self.chuck,
'action': 'pre_add',
'reverse': True,
'model': Person,
'objects': [self.alice, self.bob],
})
expected_messages.append({
'instance': self.chuck,
'action': 'post_add',
'reverse': True,
'model': Person,
'objects': [self.alice, self.bob],
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
|
errordeveloper/fe-devel
|
refs/heads/ver-1.1.0-alpha
|
Native/Test/Python/all-operator-parameters.py
|
3
|
#
# Copyright 2010-2012 Fabric Engine Inc. All rights reserved.
#
import fabric
fabricClient = fabric.createClient()
for i in range( 1, 32 ):
klCode = "operator foo( "
for j in range( 0, i ):
if j > 0: klCode += ", "
klCode += "io Integer v" + str(j)
klCode += " ) { "
for j in range( 0, i ):
klCode += "v" + str(j) + " -= " + str(j+1) + "; "
klCode += " }"
op = fabricClient.DependencyGraph.createOperator("foo_"+str(i))
op.setEntryPoint("foo")
op.setSourceCode(klCode)
node = fabricClient.DependencyGraph.createNode("bar_"+str(i))
layout = []
for j in range( 0, i ):
layout.append( "self.p" + str(j) )
node.addMember( "p" + str(j), "Integer" )
node.setData( "p" + str(j), 0, j+1 )
binding = fabricClient.DG.createBinding()
binding.setOperator(op)
binding.setParameterLayout(layout)
node.bindings.append(binding)
node.evaluate()
for j in range( 0, i ):
if node.getData( "p" + str(j), 0 ) != 0:
print( "Parameter " + str(j) + " fails when evaluating " + str(i) + " params" )
fabricClient.close()
|
Idematica/django-oscar
|
refs/heads/master
|
oscar/apps/shipping/migrations/0005_auto.py
|
22
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
depends_on = (
('address', '0001_initial'),
)
def forwards(self, orm):
# Adding M2M table for field countries on 'OrderAndItemCharges'
db.create_table('shipping_orderanditemcharges_countries', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('orderanditemcharges', models.ForeignKey(orm['shipping.orderanditemcharges'], null=False)),
('country', models.ForeignKey(orm['address.country'], null=False))
))
db.create_unique('shipping_orderanditemcharges_countries', ['orderanditemcharges_id', 'country_id'])
# Adding M2M table for field countries on 'WeightBased'
db.create_table('shipping_weightbased_countries', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('weightbased', models.ForeignKey(orm['shipping.weightbased'], null=False)),
('country', models.ForeignKey(orm['address.country'], null=False))
))
db.create_unique('shipping_weightbased_countries', ['weightbased_id', 'country_id'])
def backwards(self, orm):
# Removing M2M table for field countries on 'OrderAndItemCharges'
db.delete_table('shipping_orderanditemcharges_countries')
# Removing M2M table for field countries on 'WeightBased'
db.delete_table('shipping_weightbased_countries')
models = {
'address.country': {
'Meta': {'ordering': "('-is_highlighted', 'name')", 'object_name': 'Country'},
'is_highlighted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'is_shipping_country': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'iso_3166_1_a2': ('django.db.models.fields.CharField', [], {'max_length': '2', 'primary_key': 'True'}),
'iso_3166_1_a3': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True', 'db_index': 'True'}),
'iso_3166_1_numeric': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'printable_name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'shipping.orderanditemcharges': {
'Meta': {'object_name': 'OrderAndItemCharges'},
'code': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '128'}),
'countries': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['address.Country']", 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'free_shipping_threshold': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}),
'price_per_item': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '12', 'decimal_places': '2'}),
'price_per_order': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '12', 'decimal_places': '2'})
},
'shipping.weightband': {
'Meta': {'ordering': "['upper_limit']", 'object_name': 'WeightBand'},
'charge': ('django.db.models.fields.DecimalField', [], {'max_digits': '12', 'decimal_places': '2'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'method': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'bands'", 'to': "orm['shipping.WeightBased']"}),
'upper_limit': ('django.db.models.fields.FloatField', [], {})
},
'shipping.weightbased': {
'Meta': {'object_name': 'WeightBased'},
'code': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '128'}),
'countries': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['address.Country']", 'null': 'True', 'blank': 'True'}),
'default_weight': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '12', 'decimal_places': '2'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}),
'upper_charge': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2'})
}
}
complete_apps = ['shipping']
|
MillerDix/NEChromeX
|
refs/heads/master
|
flaskTest/venv/lib/python2.7/site-packages/wheel/test/test_basic.py
|
472
|
"""
Basic wheel tests.
"""
import os
import pkg_resources
import json
import sys
from pkg_resources import resource_filename
import wheel.util
import wheel.tool
from wheel import egg2wheel
from wheel.install import WheelFile
from zipfile import ZipFile
from shutil import rmtree
test_distributions = ("complex-dist", "simple.dist", "headers.dist")
def teardown_module():
"""Delete eggs/wheels created by tests."""
base = pkg_resources.resource_filename('wheel.test', '')
for dist in test_distributions:
for subdir in ('build', 'dist'):
try:
rmtree(os.path.join(base, dist, subdir))
except OSError:
pass
def setup_module():
build_wheel()
build_egg()
def build_wheel():
"""Build wheels from test distributions."""
for dist in test_distributions:
pwd = os.path.abspath(os.curdir)
distdir = pkg_resources.resource_filename('wheel.test', dist)
os.chdir(distdir)
try:
sys.argv = ['', 'bdist_wheel']
exec(compile(open('setup.py').read(), 'setup.py', 'exec'))
finally:
os.chdir(pwd)
def build_egg():
"""Build eggs from test distributions."""
for dist in test_distributions:
pwd = os.path.abspath(os.curdir)
distdir = pkg_resources.resource_filename('wheel.test', dist)
os.chdir(distdir)
try:
sys.argv = ['', 'bdist_egg']
exec(compile(open('setup.py').read(), 'setup.py', 'exec'))
finally:
os.chdir(pwd)
def test_findable():
"""Make sure pkg_resources can find us."""
assert pkg_resources.working_set.by_key['wheel'].version
def test_egg_re():
"""Make sure egg_info_re matches."""
egg_names = open(pkg_resources.resource_filename('wheel', 'eggnames.txt'))
for line in egg_names:
line = line.strip()
if not line:
continue
assert egg2wheel.egg_info_re.match(line), line
def test_compatibility_tags():
"""Test compatibilty tags are working."""
wf = WheelFile("package-1.0.0-cp32.cp33-noabi-noarch.whl")
assert (list(wf.compatibility_tags) ==
[('cp32', 'noabi', 'noarch'), ('cp33', 'noabi', 'noarch')])
assert (wf.arity == 2)
wf2 = WheelFile("package-1.0.0-1st-cp33-noabi-noarch.whl")
wf2_info = wf2.parsed_filename.groupdict()
assert wf2_info['build'] == '1st', wf2_info
def test_convert_egg():
base = pkg_resources.resource_filename('wheel.test', '')
for dist in test_distributions:
distdir = os.path.join(base, dist, 'dist')
eggs = [e for e in os.listdir(distdir) if e.endswith('.egg')]
wheel.tool.convert(eggs, distdir, verbose=False)
def test_unpack():
"""
Make sure 'wheel unpack' works.
This also verifies the integrity of our testing wheel files.
"""
for dist in test_distributions:
distdir = pkg_resources.resource_filename('wheel.test',
os.path.join(dist, 'dist'))
for wheelfile in (w for w in os.listdir(distdir) if w.endswith('.whl')):
wheel.tool.unpack(os.path.join(distdir, wheelfile), distdir)
def test_no_scripts():
"""Make sure entry point scripts are not generated."""
dist = "complex-dist"
basedir = pkg_resources.resource_filename('wheel.test', dist)
for (dirname, subdirs, filenames) in os.walk(basedir):
for filename in filenames:
if filename.endswith('.whl'):
whl = ZipFile(os.path.join(dirname, filename))
for entry in whl.infolist():
assert not '.data/scripts/' in entry.filename
def test_pydist():
"""Make sure pydist.json exists and validates against our schema."""
# XXX this test may need manual cleanup of older wheels
import jsonschema
def open_json(filename):
return json.loads(open(filename, 'rb').read().decode('utf-8'))
pymeta_schema = open_json(resource_filename('wheel.test',
'pydist-schema.json'))
valid = 0
for dist in ("simple.dist", "complex-dist"):
basedir = pkg_resources.resource_filename('wheel.test', dist)
for (dirname, subdirs, filenames) in os.walk(basedir):
for filename in filenames:
if filename.endswith('.whl'):
whl = ZipFile(os.path.join(dirname, filename))
for entry in whl.infolist():
if entry.filename.endswith('/metadata.json'):
pymeta = json.loads(whl.read(entry).decode('utf-8'))
jsonschema.validate(pymeta, pymeta_schema)
valid += 1
assert valid > 0, "No metadata.json found"
def test_util():
"""Test functions in util.py."""
for i in range(10):
before = b'*' * i
encoded = wheel.util.urlsafe_b64encode(before)
assert not encoded.endswith(b'=')
after = wheel.util.urlsafe_b64decode(encoded)
assert before == after
def test_pick_best():
"""Test the wheel ranking algorithm."""
def get_tags(res):
info = res[-1].parsed_filename.groupdict()
return info['pyver'], info['abi'], info['plat']
cand_tags = [('py27', 'noabi', 'noarch'), ('py26', 'noabi', 'noarch'),
('cp27', 'noabi', 'linux_i686'),
('cp26', 'noabi', 'linux_i686'),
('cp27', 'noabi', 'linux_x86_64'),
('cp26', 'noabi', 'linux_x86_64')]
cand_wheels = [WheelFile('testpkg-1.0-%s-%s-%s.whl' % t)
for t in cand_tags]
supported = [('cp27', 'noabi', 'linux_i686'), ('py27', 'noabi', 'noarch')]
supported2 = [('cp27', 'noabi', 'linux_i686'), ('py27', 'noabi', 'noarch'),
('cp26', 'noabi', 'linux_i686'), ('py26', 'noabi', 'noarch')]
supported3 = [('cp26', 'noabi', 'linux_i686'), ('py26', 'noabi', 'noarch'),
('cp27', 'noabi', 'linux_i686'), ('py27', 'noabi', 'noarch')]
for supp in (supported, supported2, supported3):
context = lambda: list(supp)
for wheel in cand_wheels:
wheel.context = context
best = max(cand_wheels)
assert list(best.tags)[0] == supp[0]
# assert_equal(
# list(map(get_tags, pick_best(cand_wheels, supp, top=False))), supp)
|
cyx1231st/nova
|
refs/heads/eventually-consistent-host-state-mitaka
|
nova/db/sqlalchemy/migrate_repo/versions/254_add_request_id_in_pci_devices.py
|
81
|
# Copyright (c) 2014 Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Column
from sqlalchemy import MetaData
from sqlalchemy import String
from sqlalchemy import Table
def upgrade(engine):
"""Function adds request_id field."""
meta = MetaData(bind=engine)
pci_devices = Table('pci_devices', meta, autoload=True)
shadow_pci_devices = Table('shadow_pci_devices', meta, autoload=True)
request_id = Column('request_id', String(36), nullable=True)
if not hasattr(pci_devices.c, 'request_id'):
pci_devices.create_column(request_id)
if not hasattr(shadow_pci_devices.c, 'request_id'):
shadow_pci_devices.create_column(request_id.copy())
|
aamalev/aioworkers
|
refs/heads/master
|
tests/test_http.py
|
2
|
import pytest
import yarl
from aioworkers.http import _URL as URL
def test_repr():
url = URL('/api/')
assert repr(url) == "URL('/api/')"
def test_div_err():
url = URL('/api/')
with pytest.raises(TypeError):
assert url / 1
@pytest.mark.parametrize('a', [
'/api/',
'/api',
'/api/..',
'http://aioworkers/api/',
'http://aioworkers/api',
])
def test_yarl_compat(a):
assert str(URL(a) / '1') == str(yarl.URL(a) / '1')
|
amith01994/intellij-community
|
refs/heads/master
|
python/testData/paramInfo/TupleAndNamedArg1.py
|
83
|
def f(a, b, c):
pass
f(<arg_c>c=1, *(10, <arg_star>20))
|
benhammadiali/ISETK
|
refs/heads/master
|
vendor/doctrine/orm/docs/en/_exts/configurationblock.py
|
2577
|
#Copyright (c) 2010 Fabien Potencier
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is furnished
#to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#THE SOFTWARE.
from docutils.parsers.rst import Directive, directives
from docutils import nodes
from string import upper
class configurationblock(nodes.General, nodes.Element):
pass
class ConfigurationBlock(Directive):
has_content = True
required_arguments = 0
optional_arguments = 0
final_argument_whitespace = True
option_spec = {}
formats = {
'html': 'HTML',
'xml': 'XML',
'php': 'PHP',
'yaml': 'YAML',
'jinja': 'Twig',
'html+jinja': 'Twig',
'jinja+html': 'Twig',
'php+html': 'PHP',
'html+php': 'PHP',
'ini': 'INI',
'php-annotations': 'Annotations',
}
def run(self):
env = self.state.document.settings.env
node = nodes.Element()
node.document = self.state.document
self.state.nested_parse(self.content, self.content_offset, node)
entries = []
for i, child in enumerate(node):
if isinstance(child, nodes.literal_block):
# add a title (the language name) before each block
#targetid = "configuration-block-%d" % env.new_serialno('configuration-block')
#targetnode = nodes.target('', '', ids=[targetid])
#targetnode.append(child)
innernode = nodes.emphasis(self.formats[child['language']], self.formats[child['language']])
para = nodes.paragraph()
para += [innernode, child]
entry = nodes.list_item('')
entry.append(para)
entries.append(entry)
resultnode = configurationblock()
resultnode.append(nodes.bullet_list('', *entries))
return [resultnode]
def visit_configurationblock_html(self, node):
self.body.append(self.starttag(node, 'div', CLASS='configuration-block'))
def depart_configurationblock_html(self, node):
self.body.append('</div>\n')
def visit_configurationblock_latex(self, node):
pass
def depart_configurationblock_latex(self, node):
pass
def setup(app):
app.add_node(configurationblock,
html=(visit_configurationblock_html, depart_configurationblock_html),
latex=(visit_configurationblock_latex, depart_configurationblock_latex))
app.add_directive('configuration-block', ConfigurationBlock)
|
npiganeau/odoo
|
refs/heads/master
|
addons/auth_openid/utils.py
|
428
|
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010-2012 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
KEY_LENGTH = 16
SREG2AX = { # from http://www.axschema.org/types/#sreg
'nickname': 'http://axschema.org/namePerson/friendly',
'email': 'http://axschema.org/contact/email',
'fullname': 'http://axschema.org/namePerson',
'dob': 'http://axschema.org/birthDate',
'gender': 'http://axschema.org/person/gender',
'postcode': 'http://axschema.org/contact/postalCode/home',
'country': 'http://axschema.org/contact/country/home',
'language': 'http://axschema.org/pref/language',
'timezone': 'http://axschema.org/pref/timezone',
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
AustinRoy7/Pomodoro-timer
|
refs/heads/master
|
venv/Lib/site-packages/pip/_vendor/html5lib/filters/optionaltags.py
|
1727
|
from __future__ import absolute_import, division, unicode_literals
from . import _base
class Filter(_base.Filter):
def slider(self):
previous1 = previous2 = None
for token in self.source:
if previous1 is not None:
yield previous2, previous1, token
previous2 = previous1
previous1 = token
yield previous2, previous1, None
def __iter__(self):
for previous, token, next in self.slider():
type = token["type"]
if type == "StartTag":
if (token["data"] or
not self.is_optional_start(token["name"], previous, next)):
yield token
elif type == "EndTag":
if not self.is_optional_end(token["name"], next):
yield token
else:
yield token
def is_optional_start(self, tagname, previous, next):
type = next and next["type"] or None
if tagname in 'html':
# An html element's start tag may be omitted if the first thing
# inside the html element is not a space character or a comment.
return type not in ("Comment", "SpaceCharacters")
elif tagname == 'head':
# A head element's start tag may be omitted if the first thing
# inside the head element is an element.
# XXX: we also omit the start tag if the head element is empty
if type in ("StartTag", "EmptyTag"):
return True
elif type == "EndTag":
return next["name"] == "head"
elif tagname == 'body':
# A body element's start tag may be omitted if the first thing
# inside the body element is not a space character or a comment,
# except if the first thing inside the body element is a script
# or style element and the node immediately preceding the body
# element is a head element whose end tag has been omitted.
if type in ("Comment", "SpaceCharacters"):
return False
elif type == "StartTag":
# XXX: we do not look at the preceding event, so we never omit
# the body element's start tag if it's followed by a script or
# a style element.
return next["name"] not in ('script', 'style')
else:
return True
elif tagname == 'colgroup':
# A colgroup element's start tag may be omitted if the first thing
# inside the colgroup element is a col element, and if the element
# is not immediately preceeded by another colgroup element whose
# end tag has been omitted.
if type in ("StartTag", "EmptyTag"):
# XXX: we do not look at the preceding event, so instead we never
# omit the colgroup element's end tag when it is immediately
# followed by another colgroup element. See is_optional_end.
return next["name"] == "col"
else:
return False
elif tagname == 'tbody':
# A tbody element's start tag may be omitted if the first thing
# inside the tbody element is a tr element, and if the element is
# not immediately preceeded by a tbody, thead, or tfoot element
# whose end tag has been omitted.
if type == "StartTag":
# omit the thead and tfoot elements' end tag when they are
# immediately followed by a tbody element. See is_optional_end.
if previous and previous['type'] == 'EndTag' and \
previous['name'] in ('tbody', 'thead', 'tfoot'):
return False
return next["name"] == 'tr'
else:
return False
return False
def is_optional_end(self, tagname, next):
type = next and next["type"] or None
if tagname in ('html', 'head', 'body'):
# An html element's end tag may be omitted if the html element
# is not immediately followed by a space character or a comment.
return type not in ("Comment", "SpaceCharacters")
elif tagname in ('li', 'optgroup', 'tr'):
# A li element's end tag may be omitted if the li element is
# immediately followed by another li element or if there is
# no more content in the parent element.
# An optgroup element's end tag may be omitted if the optgroup
# element is immediately followed by another optgroup element,
# or if there is no more content in the parent element.
# A tr element's end tag may be omitted if the tr element is
# immediately followed by another tr element, or if there is
# no more content in the parent element.
if type == "StartTag":
return next["name"] == tagname
else:
return type == "EndTag" or type is None
elif tagname in ('dt', 'dd'):
# A dt element's end tag may be omitted if the dt element is
# immediately followed by another dt element or a dd element.
# A dd element's end tag may be omitted if the dd element is
# immediately followed by another dd element or a dt element,
# or if there is no more content in the parent element.
if type == "StartTag":
return next["name"] in ('dt', 'dd')
elif tagname == 'dd':
return type == "EndTag" or type is None
else:
return False
elif tagname == 'p':
# A p element's end tag may be omitted if the p element is
# immediately followed by an address, article, aside,
# blockquote, datagrid, dialog, dir, div, dl, fieldset,
# footer, form, h1, h2, h3, h4, h5, h6, header, hr, menu,
# nav, ol, p, pre, section, table, or ul, element, or if
# there is no more content in the parent element.
if type in ("StartTag", "EmptyTag"):
return next["name"] in ('address', 'article', 'aside',
'blockquote', 'datagrid', 'dialog',
'dir', 'div', 'dl', 'fieldset', 'footer',
'form', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6',
'header', 'hr', 'menu', 'nav', 'ol',
'p', 'pre', 'section', 'table', 'ul')
else:
return type == "EndTag" or type is None
elif tagname == 'option':
# An option element's end tag may be omitted if the option
# element is immediately followed by another option element,
# or if it is immediately followed by an <code>optgroup</code>
# element, or if there is no more content in the parent
# element.
if type == "StartTag":
return next["name"] in ('option', 'optgroup')
else:
return type == "EndTag" or type is None
elif tagname in ('rt', 'rp'):
# An rt element's end tag may be omitted if the rt element is
# immediately followed by an rt or rp element, or if there is
# no more content in the parent element.
# An rp element's end tag may be omitted if the rp element is
# immediately followed by an rt or rp element, or if there is
# no more content in the parent element.
if type == "StartTag":
return next["name"] in ('rt', 'rp')
else:
return type == "EndTag" or type is None
elif tagname == 'colgroup':
# A colgroup element's end tag may be omitted if the colgroup
# element is not immediately followed by a space character or
# a comment.
if type in ("Comment", "SpaceCharacters"):
return False
elif type == "StartTag":
# XXX: we also look for an immediately following colgroup
# element. See is_optional_start.
return next["name"] != 'colgroup'
else:
return True
elif tagname in ('thead', 'tbody'):
# A thead element's end tag may be omitted if the thead element
# is immediately followed by a tbody or tfoot element.
# A tbody element's end tag may be omitted if the tbody element
# is immediately followed by a tbody or tfoot element, or if
# there is no more content in the parent element.
# A tfoot element's end tag may be omitted if the tfoot element
# is immediately followed by a tbody element, or if there is no
# more content in the parent element.
# XXX: we never omit the end tag when the following element is
# a tbody. See is_optional_start.
if type == "StartTag":
return next["name"] in ['tbody', 'tfoot']
elif tagname == 'tbody':
return type == "EndTag" or type is None
else:
return False
elif tagname == 'tfoot':
# A tfoot element's end tag may be omitted if the tfoot element
# is immediately followed by a tbody element, or if there is no
# more content in the parent element.
# XXX: we never omit the end tag when the following element is
# a tbody. See is_optional_start.
if type == "StartTag":
return next["name"] == 'tbody'
else:
return type == "EndTag" or type is None
elif tagname in ('td', 'th'):
# A td element's end tag may be omitted if the td element is
# immediately followed by a td or th element, or if there is
# no more content in the parent element.
# A th element's end tag may be omitted if the th element is
# immediately followed by a td or th element, or if there is
# no more content in the parent element.
if type == "StartTag":
return next["name"] in ('td', 'th')
else:
return type == "EndTag" or type is None
return False
|
halvertoluke/edx-platform
|
refs/heads/default_branch
|
common/djangoapps/util/testing.py
|
21
|
"""
Utility Mixins for unit tests
"""
import json
import sys
from mock import patch
from django.conf import settings
from django.core.urlresolvers import clear_url_caches, resolve
from django.test import TestCase
from util.db import OuterAtomic, CommitOnSuccessManager
class UrlResetMixin(object):
"""Mixin to reset urls.py before and after a test
Django memoizes the function that reads the urls module (whatever module
urlconf names). The module itself is also stored by python in sys.modules.
To fully reload it, we need to reload the python module, and also clear django's
cache of the parsed urls.
However, the order in which we do this doesn't matter, because neither one will
get reloaded until the next request
Doing this is expensive, so it should only be added to tests that modify settings
that affect the contents of urls.py
"""
def _reset_urls(self, urlconf_modules):
"""Reset `urls.py` for a set of Django apps."""
for urlconf in urlconf_modules:
if urlconf in sys.modules:
reload(sys.modules[urlconf])
clear_url_caches()
# Resolve a URL so that the new urlconf gets loaded
resolve('/')
def setUp(self, *args, **kwargs):
"""Reset Django urls before tests and after tests
If you need to reset `urls.py` from a particular Django app (or apps),
specify these modules in *args.
Examples:
# Reload only the root urls.py
super(MyTestCase, self).setUp()
# Reload urls from my_app
super(MyTestCase, self).setUp("my_app.urls")
# Reload urls from my_app and another_app
super(MyTestCase, self).setUp("my_app.urls", "another_app.urls")
"""
super(UrlResetMixin, self).setUp(**kwargs)
urlconf_modules = [settings.ROOT_URLCONF]
if args:
urlconf_modules.extend(args)
self._reset_urls(urlconf_modules)
self.addCleanup(lambda: self._reset_urls(urlconf_modules))
class EventTestMixin(object):
"""
Generic mixin for verifying that events were emitted during a test.
"""
def setUp(self, tracker):
super(EventTestMixin, self).setUp()
self.tracker = tracker
patcher = patch(self.tracker)
self.mock_tracker = patcher.start()
self.addCleanup(patcher.stop)
def assert_no_events_were_emitted(self):
"""
Ensures no events were emitted since the last event related assertion.
"""
self.assertFalse(self.mock_tracker.emit.called) # pylint: disable=maybe-no-member
def assert_event_emitted(self, event_name, **kwargs):
"""
Verify that an event was emitted with the given parameters.
"""
self.mock_tracker.emit.assert_any_call( # pylint: disable=maybe-no-member
event_name,
kwargs
)
def reset_tracker(self):
"""
Reset the mock tracker in order to forget about old events.
"""
self.mock_tracker.reset_mock()
class PatchMediaTypeMixin(object):
"""
Generic mixin for verifying unsupported media type in PATCH
"""
def test_patch_unsupported_media_type(self):
response = self.client.patch(
self.url,
json.dumps({}),
content_type=self.unsupported_media_type
)
self.assertEqual(response.status_code, 415)
def patch_testcase():
"""
Disable commit_on_success decorators for tests in TestCase subclasses.
Since tests in TestCase classes are wrapped in an atomic block, we
cannot use transaction.commit() or transaction.rollback().
https://docs.djangoproject.com/en/1.8/topics/testing/tools/#django.test.TransactionTestCase
"""
def enter_atomics_wrapper(wrapped_func):
"""
Wrapper for TestCase._enter_atomics
"""
wrapped_func = wrapped_func.__func__
def _wrapper(*args, **kwargs):
"""
Method that performs atomic-entering accounting.
"""
CommitOnSuccessManager.ENABLED = False
OuterAtomic.ALLOW_NESTED = True
if not hasattr(OuterAtomic, 'atomic_for_testcase_calls'):
OuterAtomic.atomic_for_testcase_calls = 0
OuterAtomic.atomic_for_testcase_calls += 1
return wrapped_func(*args, **kwargs)
return classmethod(_wrapper)
def rollback_atomics_wrapper(wrapped_func):
"""
Wrapper for TestCase._rollback_atomics
"""
wrapped_func = wrapped_func.__func__
def _wrapper(*args, **kwargs):
"""
Method that performs atomic-rollback accounting.
"""
CommitOnSuccessManager.ENABLED = True
OuterAtomic.ALLOW_NESTED = False
OuterAtomic.atomic_for_testcase_calls -= 1
return wrapped_func(*args, **kwargs)
return classmethod(_wrapper)
# pylint: disable=protected-access
TestCase._enter_atomics = enter_atomics_wrapper(TestCase._enter_atomics)
TestCase._rollback_atomics = rollback_atomics_wrapper(TestCase._rollback_atomics)
|
NaturalGIS/QGIS
|
refs/heads/master
|
tests/src/python/test_db_manager_spatialite.py
|
15
|
# -*- coding: utf-8 -*-
"""QGIS Unit tests for the DBManager SPATIALITE plugin
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Even Rouault'
__date__ = '2016-10-17'
__copyright__ = 'Copyright 2016, Even Rouault'
import qgis # NOQA
import os
import tempfile
import shutil
from osgeo import gdal, ogr, osr
from qgis.core import QgsDataSourceUri, QgsSettings
from qgis.PyQt.QtCore import QCoreApplication
from qgis.testing import start_app, unittest
from plugins.db_manager.db_plugins import supportedDbTypes, createDbPlugin
from plugins.db_manager.db_plugins.plugin import TableField
def GDAL_COMPUTE_VERSION(maj, min, rev):
return ((maj) * 1000000 + (min) * 10000 + (rev) * 100)
class TestPyQgsDBManagerSpatialite(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""Run before all tests"""
QCoreApplication.setOrganizationName("QGIS_Test")
QCoreApplication.setOrganizationDomain("TestPyQgsDBManagerSpatialite.com")
QCoreApplication.setApplicationName("TestPyQgsDBManagerSpatialite")
QgsSettings().clear()
start_app()
cls.basetestpath = tempfile.mkdtemp()
cls.test_spatialite = os.path.join(cls.basetestpath, 'TestPyQgsDBManagerSpatialite.spatialite')
ds = ogr.GetDriverByName('SQLite').CreateDataSource(cls.test_spatialite)
lyr = ds.CreateLayer('testlayer', geom_type=ogr.wkbLineString, options=['SPATIAL_INDEX=NO'])
cls.supportsAlterFieldDefn = lyr.TestCapability(ogr.OLCAlterFieldDefn) == 1
lyr.CreateField(ogr.FieldDefn('text_field', ogr.OFTString))
f = ogr.Feature(lyr.GetLayerDefn())
f['text_field'] = 'foo'
f.SetGeometry(ogr.CreateGeometryFromWkt('LINESTRING(1 2,3 4)'))
lyr.CreateFeature(f)
f = None
ds = None
@classmethod
def tearDownClass(cls):
"""Run after all tests"""
QgsSettings().clear()
shutil.rmtree(cls.basetestpath, True)
def testSupportedDbTypes(self):
self.assertIn('spatialite', supportedDbTypes())
def testCreateDbPlugin(self):
plugin = createDbPlugin('spatialite')
self.assertIsNotNone(plugin)
def testConnect(self):
connection_name = 'testConnect'
plugin = createDbPlugin('spatialite')
uri = QgsDataSourceUri()
uri.setDatabase(self.test_spatialite)
self.assertTrue(plugin.addConnection(connection_name, uri))
connections = plugin.connections()
self.assertEqual(len(connections), 1)
connection = createDbPlugin('spatialite', connection_name + '_does_not_exist')
connection_succeeded = False
try:
connection.connect()
connection_succeeded = True
except:
pass
self.assertFalse(connection_succeeded, 'exception should have been raised')
connection = connections[0]
connection.connect()
connection.reconnect()
connection.remove()
self.assertEqual(len(plugin.connections()), 0)
connection = createDbPlugin('spatialite', connection_name)
connection_succeeded = False
try:
connection.connect()
connection_succeeded = True
except:
pass
self.assertFalse(connection_succeeded, 'exception should have been raised')
def testExecuteRegExp(self):
"""This test checks for REGEXP syntax support, which is enabled in Qgis.utils' spatialite_connection()"""
connection_name = 'testListLayer'
plugin = createDbPlugin('spatialite')
uri = QgsDataSourceUri()
uri.setDatabase(self.test_spatialite)
self.assertTrue(plugin.addConnection(connection_name, uri))
connection = createDbPlugin('spatialite', connection_name)
connection.connect()
db = connection.database()
db.connector._execute(None, 'SELECT \'ABC\' REGEXP \'[CBA]\'')
def testListLayer(self):
connection_name = 'testListLayer'
plugin = createDbPlugin('spatialite')
uri = QgsDataSourceUri()
uri.setDatabase(self.test_spatialite)
self.assertTrue(plugin.addConnection(connection_name, uri))
connection = createDbPlugin('spatialite', connection_name)
connection.connect()
db = connection.database()
self.assertIsNotNone(db)
tables = db.tables()
self.assertEqual(len(tables), 1)
table = tables[0]
self.assertEqual(table.name, 'testlayer')
info = table.info()
# expected_html = """<div class="section"><h2>General info</h2><div><table><tr><td>Relation type: </td><td>Table </td></tr><tr><td>Rows: </td><td>1 </td></tr></table></div></div><div class="section"><h2>GeoPackage</h2><div><table><tr><td>Column: </td><td>geom </td></tr><tr><td>Geometry: </td><td>LINESTRING </td></tr><tr><td>Dimension: </td><td>XY </td></tr><tr><td>Spatial ref: </td><td>Undefined (-1) </td></tr><tr><td>Extent: </td><td>1.00000, 2.00000 - 3.00000, 4.00000 </td></tr></table><p><warning> No spatial index defined (<a href="action:spatialindex/create">create it</a>)</p></div></div><div class="section"><h2>Fields</h2><div><table class="header"><tr><th># </th><th>Name </th><th>Type </th><th>Null </th><th>Default </th></tr><tr><td>0 </td><td class="underline">fid </td><td>INTEGER </td><td>Y </td><td> </td></tr><tr><td>1 </td><td>geom </td><td>LINESTRING </td><td>Y </td><td> </td></tr><tr><td>2 </td><td>text_field </td><td>TEXT </td><td>Y </td><td> </td></tr></table></div></div>"""
# # GDAL 2.2.0
# expected_html_2 = """<div class="section"><h2>General info</h2><div><table><tr><td>Relation type: </td><td>Table </td></tr><tr><td>Rows: </td><td>1 </td></tr></table></div></div><div class="section"><h2>GeoPackage</h2><div><table><tr><td>Column: </td><td>geom </td></tr><tr><td>Geometry: </td><td>LINESTRING </td></tr><tr><td>Dimension: </td><td>XY </td></tr><tr><td>Spatial ref: </td><td>Undefined (-1) </td></tr><tr><td>Extent: </td><td>1.00000, 2.00000 - 3.00000, 4.00000 </td></tr></table><p><warning> No spatial index defined (<a href="action:spatialindex/create">create it</a>)</p></div></div><div class="section"><h2>Fields</h2><div><table class="header"><tr><th># </th><th>Name </th><th>Type </th><th>Null </th><th>Default </th></tr><tr><td>0 </td><td class="underline">fid </td><td>INTEGER </td><td>N </td><td> </td></tr><tr><td>1 </td><td>geom </td><td>LINESTRING </td><td>Y </td><td> </td></tr><tr><td>2 </td><td>text_field </td><td>TEXT </td><td>Y </td><td> </td></tr></table></div></div><div class="section"><h2>Triggers</h2><div><table class="header"><tr><th>Name </th><th>Function </th></tr><tr><td>trigger_insert_feature_count_testlayer (<a href="action:trigger/trigger_insert_feature_count_testlayer/delete">delete</a>) </td><td>CREATE TRIGGER "trigger_insert_feature_count_testlayer" AFTER INSERT ON "testlayer" BEGIN UPDATE spatialite_ogr_contents SET feature_count = feature_count + 1 WHERE table_name = 'testlayer'; END </td></tr><tr><td>trigger_delete_feature_count_testlayer (<a href="action:trigger/trigger_delete_feature_count_testlayer/delete">delete</a>) </td><td>CREATE TRIGGER "trigger_delete_feature_count_testlayer" AFTER DELETE ON "testlayer" BEGIN UPDATE spatialite_ogr_contents SET feature_count = feature_count - 1 WHERE table_name = 'testlayer'; END </td></tr></table></div></div>"""
# # GDAL 2.3.0
# expected_html_3 = """<div class="section"><h2>General info</h2><div><table><tr><td>Relation type: </td><td>Table </td></tr><tr><td>Rows: </td><td>1 </td></tr></table></div></div><div class="section"><h2>GeoPackage</h2><div><table><tr><td>Column: </td><td>geom </td></tr><tr><td>Geometry: </td><td>LINESTRING </td></tr><tr><td>Dimension: </td><td>XY </td></tr><tr><td>Spatial ref: </td><td>Undefined (-1) </td></tr><tr><td>Extent: </td><td>1.00000, 2.00000 - 3.00000, 4.00000 </td></tr></table><p><warning> No spatial index defined (<a href="action:spatialindex/create">create it</a>)</p></div></div><div class="section"><h2>Fields</h2><div><table class="header"><tr><th># </th><th>Name </th><th>Type </th><th>Null </th><th>Default </th></tr><tr><td>0 </td><td class="underline">fid </td><td>INTEGER </td><td>N </td><td> </td></tr><tr><td>1 </td><td>geom </td><td>LINESTRING </td><td>Y </td><td> </td></tr><tr><td>2 </td><td>text_field </td><td>TEXT </td><td>Y </td><td> </td></tr></table></div></div><div class="section"><h2>Triggers</h2><div><table class="header"><tr><th>Name </th><th>Function </th></tr><tr><td>trigger_insert_feature_count_testlayer (<a href="action:trigger/trigger_insert_feature_count_testlayer/delete">delete</a>) </td><td>CREATE TRIGGER "trigger_insert_feature_count_testlayer" AFTER INSERT ON "testlayer" BEGIN UPDATE spatialite_ogr_contents SET feature_count = feature_count + 1 WHERE lower(table_name) = lower('testlayer'); END </td></tr><tr><td>trigger_delete_feature_count_testlayer (<a href="action:trigger/trigger_delete_feature_count_testlayer/delete">delete</a>) </td><td>CREATE TRIGGER "trigger_delete_feature_count_testlayer" AFTER DELETE ON "testlayer" BEGIN UPDATE spatialite_ogr_contents SET feature_count = feature_count - 1 WHERE lower(table_name) = lower('testlayer'); END </td></tr></table></div></div>"""
# GDAL 2.3.0
expected_html = """<div class="section"><h2>General info</h2><div><table><tr><td>Relation type: </td><td>Table </td></tr><tr><td>Rows: </td><td>1 </td></tr></table></div></div><div class="section"><h2>Fields</h2><div><table class="header"><tr><th># </th><th>Name </th><th>Type </th><th>Null </th><th>Default </th></tr><tr><td>0 </td><td class="underline">ogc_fid </td><td>INTEGER </td><td>Y </td><td> </td></tr><tr><td>1 </td><td>GEOMETRY </td><td>BLOB </td><td>Y </td><td> </td></tr><tr><td>2 </td><td>text_field </td><td>VARCHAR </td><td>Y </td><td> </td></tr></table></div></div>"""
self.assertIn(info.toHtml(), [expected_html])
connection.remove()
def testCreateRenameDeleteTable(self):
connection_name = 'testCreateRenameDeleteTable'
plugin = createDbPlugin('spatialite')
uri = QgsDataSourceUri()
test_spatialite_new = os.path.join(self.basetestpath, 'testCreateRenameDeleteTable.spatialite')
shutil.copy(self.test_spatialite, test_spatialite_new)
uri.setDatabase(test_spatialite_new)
self.assertTrue(plugin.addConnection(connection_name, uri))
connection = createDbPlugin('spatialite', connection_name)
connection.connect()
db = connection.database()
self.assertIsNotNone(db)
tables = db.tables()
self.assertEqual(len(tables), 1)
table = tables[0]
self.assertTrue(table.rename('newName'))
self.assertEqual(table.name, 'newName')
connection.reconnect()
db = connection.database()
tables = db.tables()
self.assertEqual(len(tables), 1)
table = tables[0]
self.assertEqual(table.name, 'newName')
fields = []
geom = ['geometry', 'POINT', 4326, 3]
field1 = TableField(table)
field1.name = 'fid'
field1.dataType = 'INTEGER'
field1.notNull = True
field1.primaryKey = True
field2 = TableField(table)
field2.name = 'str_field'
field2.dataType = 'TEXT'
field2.modifier = 20
fields = [field1, field2]
self.assertTrue(db.createVectorTable('newName2', fields, geom))
tables = db.tables()
self.assertEqual(len(tables), 2)
new_table = tables[1]
self.assertEqual(new_table.name, 'newName2')
fields = new_table.fields()
self.assertEqual(len(fields), 2)
# self.assertFalse(new_table.hasSpatialIndex())
# self.assertTrue(new_table.createSpatialIndex())
# self.assertTrue(new_table.hasSpatialIndex())
self.assertTrue(new_table.delete())
tables = db.tables()
self.assertEqual(len(tables), 1)
connection.remove()
def testCreateRenameDeleteFields(self):
if not self.supportsAlterFieldDefn:
return
connection_name = 'testCreateRenameDeleteFields'
plugin = createDbPlugin('spatialite')
uri = QgsDataSourceUri()
test_spatialite_new = os.path.join(self.basetestpath, 'testCreateRenameDeleteFields.spatialite')
shutil.copy(self.test_spatialite, test_spatialite_new)
uri.setDatabase(test_spatialite_new)
self.assertTrue(plugin.addConnection(connection_name, uri))
connection = createDbPlugin('spatialite', connection_name)
connection.connect()
db = connection.database()
self.assertIsNotNone(db)
tables = db.tables()
self.assertEqual(len(tables), 1)
table = tables[0]
field_before_count = len(table.fields())
field = TableField(table)
field.name = 'real_field'
field.dataType = 'DOUBLE'
self.assertTrue(table.addField(field))
self.assertEqual(len(table.fields()), field_before_count + 1)
# not supported in spatialite
# self.assertTrue(field.update('real_field2', new_type_str='TEXT (30)', new_not_null=True, new_default_str='foo'))
field = table.fields()[field_before_count]
self.assertEqual(field.name, 'real_field')
self.assertEqual(field.dataType, 'DOUBLE')
# self.assertEqual(field.notNull, 1)
# self.assertEqual(field.default, "'foo'")
# self.assertTrue(table.deleteField(field))
# self.assertEqual(len(table.fields()), field_before_count)
connection.remove()
def testTableDataModel(self):
connection_name = 'testTableDataModel'
plugin = createDbPlugin('spatialite')
uri = QgsDataSourceUri()
uri.setDatabase(self.test_spatialite)
self.assertTrue(plugin.addConnection(connection_name, uri))
connection = createDbPlugin('spatialite', connection_name)
connection.connect()
db = connection.database()
self.assertIsNotNone(db)
tables = db.tables()
self.assertEqual(len(tables), 1)
table = tables[0]
self.assertEqual(table.name, 'testlayer')
model = table.tableDataModel(None)
self.assertEqual(model.rowCount(), 1)
self.assertEqual(model.getData(0, 0), 1) # fid
wkb = model.getData(0, 1)
geometry = ogr.CreateGeometryFromWkb(wkb)
self.assertEqual(geometry.ExportToWkt(), 'LINESTRING (1 2,3 4)')
self.assertEqual(model.getData(0, 2), 'foo')
connection.remove()
# def testRaster(self):
# if int(gdal.VersionInfo('VERSION_NUM')) < GDAL_COMPUTE_VERSION(2, 0, 2):
# return
# connection_name = 'testRaster'
# plugin = createDbPlugin('spatialite')
# uri = QgsDataSourceUri()
# test_spatialite_new = os.path.join(self.basetestpath, 'testRaster.spatialite')
# shutil.copy(self.test_spatialite, test_spatialite_new)
# mem_ds = gdal.GetDriverByName('MEM').Create('', 20, 20)
# mem_ds.SetGeoTransform([2, 0.01, 0, 49, 0, -0.01])
# sr = osr.SpatialReference()
# sr.ImportFromEPSG(4326)
# mem_ds.SetProjection(sr.ExportToWkt())
# mem_ds.GetRasterBand(1).Fill(255)
# gdal.GetDriverByName('SQLite').CreateCopy(test_spatialite_new, mem_ds, options=['APPEND_SUBDATASET=YES', 'RASTER_TABLE=raster_table'])
# mem_ds = None
# uri.setDatabase(test_spatialite_new)
# self.assertTrue(plugin.addConnection(connection_name, uri))
# connection = createDbPlugin('spatialite', connection_name)
# connection.connect()
# db = connection.database()
# self.assertIsNotNone(db)
# tables = db.tables()
# self.assertEqual(len(tables), 2)
# table = None
# for i in range(2):
# if tables[i].name == 'raster_table':
# table = tables[i]
# break
# self.assertIsNotNone(table)
# info = table.info()
# expected_html = """<div class="section"><h2>General info</h2><div><table><tr><td>Relation type: </td><td>Table </td></tr><tr><td>Rows: </td><td>Unknown (<a href="action:rows/count">find out</a>) </td></tr></table></div></div><div class="section"><h2>GeoPackage</h2><div><table><tr><td>Column: </td><td> </td></tr><tr><td>Geometry: </td><td>RASTER </td></tr><tr><td>Spatial ref: </td><td>WGS 84 geodetic (4326) </td></tr><tr><td>Extent: </td><td>2.00000, 48.80000 - 2.20000, 49.00000 </td></tr></table></div></div><div class="section"><h2>Fields</h2><div><table class="header"><tr><th># </th><th>Name </th><th>Type </th><th>Null </th><th>Default </th></tr><tr><td>0 </td><td class="underline">id </td><td>INTEGER </td><td>Y </td><td> </td></tr><tr><td>1 </td><td>zoom_level </td><td>INTEGER </td><td>N </td><td> </td></tr><tr><td>2 </td><td>tile_column </td><td>INTEGER </td><td>N </td><td> </td></tr><tr><td>3 </td><td>tile_row </td><td>INTEGER </td><td>N </td><td> </td></tr><tr><td>4 </td><td>tile_data </td><td>BLOB </td><td>N </td><td> </td></tr></table></div></div><div class="section"><h2>Indexes</h2><div><table class="header"><tr><th>Name </th><th>Column(s) </th></tr><tr><td>sqlite_autoindex_raster_table_1 </td><td>zoom_level<br>tile_column<br>tile_row </td></tr></table></div></div>"""
# self.assertEqual(info.toHtml(), expected_html)
# connection.remove()
# def testTwoRaster(self):
# if int(gdal.VersionInfo('VERSION_NUM')) < GDAL_COMPUTE_VERSION(2, 0, 2):
# return
# connection_name = 'testTwoRaster'
# plugin = createDbPlugin('spatialite')
# uri = QgsDataSourceUri()
# test_spatialite_new = os.path.join(self.basetestpath, 'testTwoRaster.spatialite')
# shutil.copy(self.test_spatialite, test_spatialite_new)
# mem_ds = gdal.GetDriverByName('MEM').Create('', 20, 20)
# mem_ds.SetGeoTransform([2, 0.01, 0, 49, 0, -0.01])
# sr = osr.SpatialReference()
# sr.ImportFromEPSG(4326)
# mem_ds.SetProjection(sr.ExportToWkt())
# mem_ds.GetRasterBand(1).Fill(255)
# for i in range(2):
# gdal.GetDriverByName('SQLite').CreateCopy(test_spatialite_new, mem_ds, options=['APPEND_SUBDATASET=YES', 'RASTER_TABLE=raster_table%d' % (i + 1)])
# mem_ds = None
# uri.setDatabase(test_spatialite_new)
# self.assertTrue(plugin.addConnection(connection_name, uri))
# connection = createDbPlugin('spatialite', connection_name)
# connection.connect()
# db = connection.database()
# self.assertIsNotNone(db)
# tables = db.tables()
# self.assertEqual(len(tables), 3)
# table = None
# for i in range(2):
# if tables[i].name.startswith('raster_table'):
# table = tables[i]
# info = table.info()
# info.toHtml()
# connection.remove()
def testNonSpatial(self):
connection_name = 'testnonspatial'
plugin = createDbPlugin('spatialite')
uri = QgsDataSourceUri()
test_spatialite = os.path.join(self.basetestpath, 'testnonspatial.spatialite')
ds = ogr.GetDriverByName('SQLite').CreateDataSource(test_spatialite)
lyr = ds.CreateLayer('testnonspatial', geom_type=ogr.wkbNone)
lyr.CreateField(ogr.FieldDefn('text_field', ogr.OFTString))
f = ogr.Feature(lyr.GetLayerDefn())
f['text_field'] = 'foo'
lyr.CreateFeature(f)
f = None
ds = None
uri.setDatabase(test_spatialite)
self.assertTrue(plugin.addConnection(connection_name, uri))
connection = createDbPlugin('spatialite', connection_name)
connection.connect()
db = connection.database()
self.assertIsNotNone(db)
tables = db.tables()
self.assertEqual(len(tables), 1)
table = tables[0]
self.assertEqual(table.name, 'testnonspatial')
info = table.info()
# expected_html = """<div class="section"><h2>General info</h2><div><table><tr><td>Relation type: </td><td>Table </td></tr><tr><td>Rows: </td><td>1 </td></tr></table></div></div><div class="section"><h2>Fields</h2><div><table class="header"><tr><th># </th><th>Name </th><th>Type </th><th>Null </th><th>Default </th></tr><tr><td>0 </td><td class="underline">fid </td><td>INTEGER </td><td>Y </td><td> </td></tr><tr><td>1 </td><td>text_field </td><td>TEXT </td><td>Y </td><td> </td></tr></table></div></div>"""
# # GDAL 2.2.0
# expected_html_2 = """<div class="section"><h2>General info</h2><div><table><tr><td>Relation type: </td><td>Table </td></tr><tr><td>Rows: </td><td>1 </td></tr></table></div></div><div class="section"><h2>Fields</h2><div><table class="header"><tr><th># </th><th>Name </th><th>Type </th><th>Null </th><th>Default </th></tr><tr><td>0 </td><td class="underline">fid </td><td>INTEGER </td><td>N </td><td> </td></tr><tr><td>1 </td><td>text_field </td><td>TEXT </td><td>Y </td><td> </td></tr></table></div></div><div class="section"><h2>Triggers</h2><div><table class="header"><tr><th>Name </th><th>Function </th></tr><tr><td>trigger_insert_feature_count_testnonspatial (<a href="action:trigger/trigger_insert_feature_count_testnonspatial/delete">delete</a>) </td><td>CREATE TRIGGER "trigger_insert_feature_count_testnonspatial" AFTER INSERT ON "testnonspatial" BEGIN UPDATE spatialite_ogr_contents SET feature_count = feature_count + 1 WHERE table_name = 'testnonspatial'; END </td></tr><tr><td>trigger_delete_feature_count_testnonspatial (<a href="action:trigger/trigger_delete_feature_count_testnonspatial/delete">delete</a>) </td><td>CREATE TRIGGER "trigger_delete_feature_count_testnonspatial" AFTER DELETE ON "testnonspatial" BEGIN UPDATE spatialite_ogr_contents SET feature_count = feature_count - 1 WHERE table_name = 'testnonspatial'; END </td></tr></table></div></div>"""
# # GDAL 2.3.0
# expected_html_3 = """<div class="section"><h2>General info</h2><div><table><tr><td>Relation type: </td><td>Table </td></tr><tr><td>Rows: </td><td>1 </td></tr></table></div></div><div class="section"><h2>Fields</h2><div><table class="header"><tr><th># </th><th>Name </th><th>Type </th><th>Null </th><th>Default </th></tr><tr><td>0 </td><td class="underline">fid </td><td>INTEGER </td><td>N </td><td> </td></tr><tr><td>1 </td><td>text_field </td><td>TEXT </td><td>Y </td><td> </td></tr></table></div></div><div class="section"><h2>Triggers</h2><div><table class="header"><tr><th>Name </th><th>Function </th></tr><tr><td>trigger_insert_feature_count_testnonspatial (<a href="action:trigger/trigger_insert_feature_count_testnonspatial/delete">delete</a>) </td><td>CREATE TRIGGER "trigger_insert_feature_count_testnonspatial" AFTER INSERT ON "testnonspatial" BEGIN UPDATE spatialite_ogr_contents SET feature_count = feature_count + 1 WHERE lower(table_name) = lower('testnonspatial'); END </td></tr><tr><td>trigger_delete_feature_count_testnonspatial (<a href="action:trigger/trigger_delete_feature_count_testnonspatial/delete">delete</a>) </td><td>CREATE TRIGGER "trigger_delete_feature_count_testnonspatial" AFTER DELETE ON "testnonspatial" BEGIN UPDATE spatialite_ogr_contents SET feature_count = feature_count - 1 WHERE lower(table_name) = lower('testnonspatial'); END </td></tr></table></div></div>"""
# self.assertIn(info.toHtml(), [expected_html, expected_html_2, expected_html_3], info.toHtml())
connection.remove()
def testAllGeometryTypes(self):
connection_name = 'testAllGeometryTypes'
plugin = createDbPlugin('spatialite')
uri = QgsDataSourceUri()
test_spatialite = os.path.join(self.basetestpath, 'testAllGeometryTypes.spatialite')
ds = ogr.GetDriverByName('SQLite').CreateDataSource(test_spatialite)
ds.CreateLayer('testPoint', geom_type=ogr.wkbPoint)
ds.CreateLayer('testLineString', geom_type=ogr.wkbLineString)
ds.CreateLayer('testPolygon', geom_type=ogr.wkbPolygon)
ds.CreateLayer('testMultiPoint', geom_type=ogr.wkbMultiPoint)
ds.CreateLayer('testMultiLineString', geom_type=ogr.wkbMultiLineString)
ds.CreateLayer('testMultiPolygon', geom_type=ogr.wkbMultiPolygon)
ds.CreateLayer('testGeometryCollection', geom_type=ogr.wkbGeometryCollection)
ds.CreateLayer('testCircularString', geom_type=ogr.wkbCircularString)
ds.CreateLayer('testCompoundCurve', geom_type=ogr.wkbCompoundCurve)
ds.CreateLayer('testCurvePolygon', geom_type=ogr.wkbCurvePolygon)
ds.CreateLayer('testMultiCurve', geom_type=ogr.wkbMultiCurve)
ds.CreateLayer('testMultiSurface', geom_type=ogr.wkbMultiSurface)
ds = None
uri.setDatabase(test_spatialite)
self.assertTrue(plugin.addConnection(connection_name, uri))
connection = createDbPlugin('spatialite', connection_name)
connection.connect()
db = connection.database()
self.assertIsNotNone(db)
# tables = db.tables()
# for i in range(len(tables)):
# table = tables[i]
# info = table.info()
connection.remove()
if __name__ == '__main__':
unittest.main()
|
javiplx/cobbler-devel
|
refs/heads/cobbler-debian
|
cobbler/item_distro.py
|
3
|
"""
A cobbler distribution. A distribution is a kernel, and initrd, and potentially
some kernel options.
Copyright 2006-2008, Red Hat, Inc
Michael DeHaan <mdehaan@redhat.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
"""
import utils
import item
import weakref
import os
import codes
import time
from cexceptions import *
from utils import _
class Distro(item.Item):
TYPE_NAME = _("distro")
COLLECTION_TYPE = "distro"
def clear(self,is_subobject=False):
"""
Reset this object.
"""
self.name = None
self.uid = ""
self.owners = self.settings.default_ownership
self.kernel = None
self.initrd = None
self.kernel_options = {}
self.kernel_options_post = {}
self.ks_meta = {}
self.arch = 'i386'
self.breed = 'redhat'
self.os_version = ''
self.source_repos = []
self.mgmt_classes = []
self.depth = 0
self.template_files = {}
self.comment = ""
self.tree_build_time = 0
self.redhat_management_key = "<<inherit>>"
self.redhat_management_server = "<<inherit>>"
def make_clone(self):
ds = self.to_datastruct()
cloned = Distro(self.config)
cloned.from_datastruct(ds)
return cloned
def get_parent(self):
"""
Return object next highest up the tree.
NOTE: conceptually there is no need for subdistros
"""
return None
def from_datastruct(self,seed_data):
"""
Modify this object to take on values in seed_data
"""
self.parent = self.load_item(seed_data,'parent')
self.name = self.load_item(seed_data,'name')
self.owners = self.load_item(seed_data,'owners',self.settings.default_ownership)
self.kernel = self.load_item(seed_data,'kernel')
self.initrd = self.load_item(seed_data,'initrd')
self.kernel_options = self.load_item(seed_data,'kernel_options')
self.kernel_options_post = self.load_item(seed_data,'kernel_options_post')
self.ks_meta = self.load_item(seed_data,'ks_meta')
self.arch = self.load_item(seed_data,'arch','i386')
self.breed = self.load_item(seed_data,'breed','redhat')
self.os_version = self.load_item(seed_data,'os_version','')
self.source_repos = self.load_item(seed_data,'source_repos',[])
self.depth = self.load_item(seed_data,'depth',0)
self.mgmt_classes = self.load_item(seed_data,'mgmt_classes',[])
self.template_files = self.load_item(seed_data,'template_files',{})
self.comment = self.load_item(seed_data,'comment')
self.redhat_management_key = self.load_item(seed_data,'redhat_management_key',"<<inherit>>")
self.redhat_management_server = self.load_item(seed_data,'redhat_management_server',"<<inherit>>")
# backwards compatibility enforcement
self.set_arch(self.arch)
if self.kernel_options != "<<inherit>>" and type(self.kernel_options) != dict:
self.set_kernel_options(self.kernel_options)
if self.kernel_options_post != "<<inherit>>" and type(self.kernel_options_post) != dict:
self.set_kernel_options_post(self.kernel_options_post)
if self.ks_meta != "<<inherit>>" and type(self.ks_meta) != dict:
self.set_ksmeta(self.ks_meta)
self.set_mgmt_classes(self.mgmt_classes)
self.set_template_files(self.template_files)
self.set_owners(self.owners)
self.tree_build_time = self.load_item(seed_data, 'tree_build_time', -1)
self.ctime = self.load_item(seed_data, 'ctime', 0)
self.mtime = self.load_item(seed_data, 'mtime', 0)
self.set_tree_build_time(self.tree_build_time)
self.uid = self.load_item(seed_data,'uid','')
if self.uid == '':
self.uid = self.config.generate_uid()
return self
def set_kernel(self,kernel):
"""
Specifies a kernel. The kernel parameter is a full path, a filename
in the configured kernel directory (set in /etc/cobbler.conf) or a
directory path that would contain a selectable kernel. Kernel
naming conventions are checked, see docs in the utils module
for find_kernel.
"""
if utils.find_kernel(kernel):
self.kernel = kernel
return True
raise CX(_("kernel not found"))
def set_tree_build_time(self, datestamp):
"""
Sets the import time of the distro, for use by action_import.py.
If not imported, this field is not meaningful.
"""
self.tree_build_time = float(datestamp)
return True
def set_breed(self, breed):
return utils.set_breed(self,breed)
def set_os_version(self, os_version):
return utils.set_os_version(self,os_version)
def set_initrd(self,initrd):
"""
Specifies an initrd image. Path search works as in set_kernel.
File must be named appropriately.
"""
if utils.find_initrd(initrd):
self.initrd = initrd
return True
raise CX(_("initrd not found"))
def set_redhat_management_key(self,key):
return utils.set_redhat_management_key(self,key)
def set_redhat_management_server(self,server):
return utils.set_redhat_management_server(self,server)
def set_source_repos(self, repos):
"""
A list of http:// URLs on the cobbler server that point to
yum configuration files that can be used to
install core packages. Use by cobbler import only.
"""
self.source_repos = repos
def set_arch(self,arch):
"""
The field is mainly relevant to PXE provisioning.
Should someone have Itanium machines on a network, having
syslinux (pxelinux.0) be the only option in the config file causes
problems.
Using an alternative distro type allows for dhcpd.conf templating
to "do the right thing" with those systems -- this also relates to
bootloader configuration files which have different syntax for different
distro types (because of the bootloaders).
This field is named "arch" because mainly on Linux, we only care about
the architecture, though if (in the future) new provisioning types
are added, an arch value might be something like "bsd_x86".
Update: (7/2008) this is now used to build fake PXE trees for s390x also
"""
return utils.set_arch(self,arch)
def is_valid(self):
"""
A distro requires that the kernel and initrd be set. All
other variables are optional.
"""
# NOTE: this code does not support inheritable distros at this time.
# this is by design because inheritable distros do not make sense.
if self.name is None:
raise CX(_("name is required"))
if self.kernel is None:
raise CX(_("kernel is required"))
if self.initrd is None:
raise CX(_("initrd is required"))
return True
def to_datastruct(self):
"""
Return a serializable datastructure representation of this object.
"""
return {
'name' : self.name,
'kernel' : self.kernel,
'initrd' : self.initrd,
'kernel_options' : self.kernel_options,
'kernel_options_post' : self.kernel_options_post,
'ks_meta' : self.ks_meta,
'mgmt_classes' : self.mgmt_classes,
'template_files' : self.template_files,
'arch' : self.arch,
'breed' : self.breed,
'os_version' : self.os_version,
'source_repos' : self.source_repos,
'parent' : self.parent,
'depth' : self.depth,
'owners' : self.owners,
'comment' : self.comment,
'tree_build_time' : self.tree_build_time,
'ctime' : self.ctime,
'mtime' : self.mtime,
'uid' : self.uid,
'redhat_management_key' : self.redhat_management_key,
'redhat_management_server' : self.redhat_management_server
}
def printable(self):
"""
Human-readable representation.
"""
kstr = utils.find_kernel(self.kernel)
istr = utils.find_initrd(self.initrd)
buf = _("distro : %s\n") % self.name
buf = buf + _("architecture : %s\n") % self.arch
buf = buf + _("breed : %s\n") % self.breed
buf = buf + _("created : %s\n") % time.ctime(self.ctime)
buf = buf + _("comment : %s\n") % self.comment
buf = buf + _("initrd : %s\n") % istr
buf = buf + _("kernel : %s\n") % kstr
buf = buf + _("kernel options : %s\n") % self.kernel_options
buf = buf + _("ks metadata : %s\n") % self.ks_meta
if self.tree_build_time != -1:
buf = buf + _("tree build time : %s\n") % time.ctime(self.tree_build_time)
else:
buf = buf + _("tree build time : %s\n") % "N/A"
buf = buf + _("modified : %s\n") % time.ctime(self.mtime)
buf = buf + _("mgmt classes : %s\n") % self.mgmt_classes
buf = buf + _("os version : %s\n") % self.os_version
buf = buf + _("owners : %s\n") % self.owners
buf = buf + _("post kernel options : %s\n") % self.kernel_options_post
buf = buf + _("redhat mgmt key : %s\n") % self.redhat_management_key
buf = buf + _("redhat mgmt server : %s\n") % self.redhat_management_server
buf = buf + _("template files : %s\n") % self.template_files
return buf
def remote_methods(self):
return {
'name' : self.set_name,
'kernel' : self.set_kernel,
'initrd' : self.set_initrd,
'kopts' : self.set_kernel_options,
'kopts-post' : self.set_kernel_options_post,
'kopts_post' : self.set_kernel_options_post,
'arch' : self.set_arch,
'ksmeta' : self.set_ksmeta,
'breed' : self.set_breed,
'os-version' : self.set_os_version,
'os_version' : self.set_os_version,
'owners' : self.set_owners,
'mgmt-classes' : self.set_mgmt_classes,
'mgmt_classes' : self.set_mgmt_classes,
'template-files' : self.set_template_files,
'template_files' : self.set_template_files,
'comment' : self.set_comment,
'redhat_management_key' : self.set_redhat_management_key,
'redhat_management_server' : self.set_redhat_management_server
}
|
AMeng/django-single-model-admin
|
refs/heads/master
|
test/app/models.py
|
1
|
from django.db import models
class TestModel(models.Model):
field = models.CharField(max_length=25)
|
mignev/startappsync
|
refs/heads/master
|
gitconfig/file.py
|
1
|
# -*- coding: utf-8 -*-
# file.py -- Safe access to git files
# Copyright (C) 2010 Google, Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2
# of the License or (at your option) a later version of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
"""Safe access to git files."""
import errno
import os
import tempfile
import io
def ensure_dir_exists(dirname):
"""Ensure a directory exists, creating if necessary."""
try:
os.makedirs(dirname)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def fancy_rename(oldname, newname):
"""Rename file with temporary backup file to rollback if rename fails"""
if not os.path.exists(newname):
try:
os.rename(oldname, newname)
except OSError:
raise
return
# destination file exists
try:
(fd, tmpfile) = tempfile.mkstemp(".tmp", prefix=oldname+".", dir=".")
os.close(fd)
os.remove(tmpfile)
except OSError:
# either file could not be created (e.g. permission problem)
# or could not be deleted (e.g. rude virus scanner)
raise
try:
os.rename(newname, tmpfile)
except OSError:
raise # no rename occurred
try:
os.rename(oldname, newname)
except OSError:
os.rename(tmpfile, newname)
raise
os.remove(tmpfile)
def GitFile(filename, mode='rb', bufsize=-1):
"""Create a file object that obeys the git file locking protocol.
:return: a builtin file object or a _GitFile object
:note: See _GitFile for a description of the file locking protocol.
Only read-only and write-only (binary) modes are supported; r+, w+, and a
are not. To read and write from the same file, you can take advantage of
the fact that opening a file for write does not actually open the file you
request.
"""
if 'a' in mode:
raise IOError('append mode not supported for Git files')
if '+' in mode:
raise IOError('read/write mode not supported for Git files')
if 'b' not in mode:
raise IOError('text mode not supported for Git files')
if 'w' in mode:
return _GitFile(filename, mode, bufsize)
else:
return io.open(filename, mode, bufsize)
class _GitFile(object):
"""File that follows the git locking protocol for writes.
All writes to a file foo will be written into foo.lock in the same
directory, and the lockfile will be renamed to overwrite the original file
on close.
:note: You *must* call close() or abort() on a _GitFile for the lock to be
released. Typically this will happen in a finally block.
"""
PROXY_PROPERTIES = set(['closed', 'encoding', 'errors', 'mode', 'name',
'newlines', 'softspace'])
PROXY_METHODS = ('__iter__', 'flush', 'fileno', 'isatty', 'read',
'readline', 'readlines', 'seek', 'tell',
'truncate', 'write', 'writelines')
def __init__(self, filename, mode, bufsize):
self._filename = filename
self._lockfilename = '%s.lock' % self._filename
fd = os.open(self._lockfilename,
os.O_RDWR | os.O_CREAT | os.O_EXCL | getattr(os, "O_BINARY", 0))
self._file = os.fdopen(fd, mode, bufsize)
self._closed = False
for method in self.PROXY_METHODS:
setattr(self, method, getattr(self._file, method))
def abort(self):
"""Close and discard the lockfile without overwriting the target.
If the file is already closed, this is a no-op.
"""
if self._closed:
return
self._file.close()
try:
os.remove(self._lockfilename)
self._closed = True
except OSError as e:
# The file may have been removed already, which is ok.
if e.errno != errno.ENOENT:
raise
self._closed = True
def close(self):
"""Close this file, saving the lockfile over the original.
:note: If this method fails, it will attempt to delete the lockfile.
However, it is not guaranteed to do so (e.g. if a filesystem becomes
suddenly read-only), which will prevent future writes to this file
until the lockfile is removed manually.
:raises OSError: if the original file could not be overwritten. The lock
file is still closed, so further attempts to write to the same file
object will raise ValueError.
"""
if self._closed:
return
self._file.close()
try:
try:
os.rename(self._lockfilename, self._filename)
except OSError as e:
# Windows versions prior to Vista don't support atomic renames
if e.errno != errno.EEXIST:
raise
fancy_rename(self._lockfilename, self._filename)
finally:
self.abort()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def __getattr__(self, name):
"""Proxy property calls to the underlying file."""
if name in self.PROXY_PROPERTIES:
return getattr(self._file, name)
raise AttributeError(name)
|
julien78910/CouchPotatoServer
|
refs/heads/develop
|
couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/ku6.py
|
147
|
from __future__ import unicode_literals
from .common import InfoExtractor
class Ku6IE(InfoExtractor):
_VALID_URL = r'http://v\.ku6\.com/show/(?P<id>[a-zA-Z0-9\-\_]+)(?:\.)*html'
_TEST = {
'url': 'http://v.ku6.com/show/JG-8yS14xzBr4bCn1pu0xw...html',
'md5': '01203549b9efbb45f4b87d55bdea1ed1',
'info_dict': {
'id': 'JG-8yS14xzBr4bCn1pu0xw',
'ext': 'f4v',
'title': 'techniques test',
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
title = self._html_search_regex(
r'<h1 title=.*>(.*?)</h1>', webpage, 'title')
dataUrl = 'http://v.ku6.com/fetchVideo4Player/%s.html' % video_id
jsonData = self._download_json(dataUrl, video_id)
downloadUrl = jsonData['data']['f']
return {
'id': video_id,
'title': title,
'url': downloadUrl
}
|
ahmadRagheb/goldenHR
|
refs/heads/master
|
erpnext/selling/doctype/sales_order/test_sales_order.py
|
6
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import flt, add_days
import frappe.permissions
import unittest
from erpnext.stock.doctype.item.test_item import get_total_projected_qty
from erpnext.selling.doctype.sales_order.sales_order \
import make_material_request, make_delivery_note, make_sales_invoice, WarehouseRequired
from erpnext.stock.doctype.stock_entry.stock_entry_utils import make_stock_entry
from frappe.tests.test_permissions import set_user_permission_doctypes
class TestSalesOrder(unittest.TestCase):
def tearDown(self):
frappe.set_user("Administrator")
for role in ("Stock User", "Sales User"):
set_user_permission_doctypes(doctypes="Sales Order", role=role,
apply_user_permissions=0, user_permission_doctypes=None)
def test_make_material_request(self):
so = make_sales_order(do_not_submit=True)
self.assertRaises(frappe.ValidationError, make_material_request, so.name)
so.submit()
mr = make_material_request(so.name)
self.assertEquals(mr.material_request_type, "Purchase")
self.assertEquals(len(mr.get("items")), len(so.get("items")))
def test_make_delivery_note(self):
so = make_sales_order(do_not_submit=True)
self.assertRaises(frappe.ValidationError, make_delivery_note, so.name)
so.submit()
dn = make_delivery_note(so.name)
self.assertEquals(dn.doctype, "Delivery Note")
self.assertEquals(len(dn.get("items")), len(so.get("items")))
def test_make_sales_invoice(self):
so = make_sales_order(do_not_submit=True)
self.assertRaises(frappe.ValidationError, make_sales_invoice, so.name)
so.submit()
si = make_sales_invoice(so.name)
self.assertEquals(len(si.get("items")), len(so.get("items")))
self.assertEquals(len(si.get("items")), 1)
si.insert()
si.submit()
si1 = make_sales_invoice(so.name)
self.assertEquals(len(si1.get("items")), 0)
def test_update_qty(self):
so = make_sales_order()
create_dn_against_so(so.name, 6)
so.load_from_db()
self.assertEquals(so.get("items")[0].delivered_qty, 6)
# Check delivered_qty after make_sales_invoice without update_stock checked
si1 = make_sales_invoice(so.name)
si1.get("items")[0].qty = 6
si1.insert()
si1.submit()
so.load_from_db()
self.assertEquals(so.get("items")[0].delivered_qty, 6)
# Check delivered_qty after make_sales_invoice with update_stock checked
si2 = make_sales_invoice(so.name)
si2.set("update_stock", 1)
si2.get("items")[0].qty = 3
si2.insert()
si2.submit()
so.load_from_db()
self.assertEquals(so.get("items")[0].delivered_qty, 9)
def test_reserved_qty_for_partial_delivery(self):
make_stock_entry(target="_Test Warehouse - _TC", qty=10, rate=100)
existing_reserved_qty = get_reserved_qty()
so = make_sales_order()
self.assertEqual(get_reserved_qty(), existing_reserved_qty + 10)
dn = create_dn_against_so(so.name)
self.assertEqual(get_reserved_qty(), existing_reserved_qty + 5)
# close so
so.load_from_db()
so.update_status("Closed")
self.assertEqual(get_reserved_qty(), existing_reserved_qty)
# unclose so
so.load_from_db()
so.update_status('Draft')
self.assertEqual(get_reserved_qty(), existing_reserved_qty + 5)
dn.cancel()
self.assertEqual(get_reserved_qty(), existing_reserved_qty + 10)
# cancel
so.load_from_db()
so.cancel()
self.assertEqual(get_reserved_qty(), existing_reserved_qty)
def test_reserved_qty_for_over_delivery(self):
make_stock_entry(target="_Test Warehouse - _TC", qty=10, rate=100)
# set over-delivery tolerance
frappe.db.set_value('Item', "_Test Item", 'tolerance', 50)
existing_reserved_qty = get_reserved_qty()
so = make_sales_order()
self.assertEqual(get_reserved_qty(), existing_reserved_qty + 10)
dn = create_dn_against_so(so.name, 15)
self.assertEqual(get_reserved_qty(), existing_reserved_qty)
total_projected_qty = get_total_projected_qty('_Test Item')
item_doc_before_cancel = frappe.get_doc('Item', '_Test Item')
self.assertEqual(total_projected_qty, item_doc_before_cancel.total_projected_qty)
dn.cancel()
self.assertEqual(get_reserved_qty(), existing_reserved_qty + 10)
total_projected_qty = get_total_projected_qty('_Test Item')
item_doc_after_cancel = frappe.get_doc('Item', '_Test Item')
self.assertEqual(total_projected_qty, item_doc_after_cancel.total_projected_qty)
def test_reserved_qty_for_over_delivery_via_sales_invoice(self):
make_stock_entry(target="_Test Warehouse - _TC", qty=10, rate=100)
# set over-delivery tolerance
frappe.db.set_value('Item', "_Test Item", 'tolerance', 50)
existing_reserved_qty = get_reserved_qty()
so = make_sales_order()
self.assertEqual(get_reserved_qty(), existing_reserved_qty + 10)
si = make_sales_invoice(so.name)
si.update_stock = 1
si.get("items")[0].qty = 12
si.insert()
si.submit()
total_projected_qty = get_total_projected_qty('_Test Item')
item_doc = frappe.get_doc('Item', '_Test Item')
self.assertEqual(total_projected_qty, item_doc.total_projected_qty)
self.assertEqual(get_reserved_qty(), existing_reserved_qty)
so.load_from_db()
self.assertEqual(so.get("items")[0].delivered_qty, 12)
self.assertEqual(so.per_delivered, 100)
si.cancel()
self.assertEqual(get_reserved_qty(), existing_reserved_qty + 10)
total_projected_qty = get_total_projected_qty('_Test Item')
item_doc = frappe.get_doc('Item', '_Test Item')
self.assertEqual(total_projected_qty, item_doc.total_projected_qty)
so.load_from_db()
self.assertEqual(so.get("items")[0].delivered_qty, 0)
self.assertEqual(so.per_delivered, 0)
def test_reserved_qty_for_partial_delivery_with_packing_list(self):
make_stock_entry(target="_Test Warehouse - _TC", qty=10, rate=100)
make_stock_entry(item="_Test Item Home Desktop 100", target="_Test Warehouse - _TC", qty=10, rate=100)
existing_reserved_qty_item1 = get_reserved_qty("_Test Item")
existing_reserved_qty_item2 = get_reserved_qty("_Test Item Home Desktop 100")
so = make_sales_order(item_code="_Test Product Bundle Item")
self.assertEqual(get_reserved_qty("_Test Item"), existing_reserved_qty_item1 + 50)
self.assertEqual(get_reserved_qty("_Test Item Home Desktop 100"),
existing_reserved_qty_item2 + 20)
dn = create_dn_against_so(so.name)
self.assertEqual(get_reserved_qty("_Test Item"), existing_reserved_qty_item1 + 25)
self.assertEqual(get_reserved_qty("_Test Item Home Desktop 100"),
existing_reserved_qty_item2 + 10)
# close so
so.load_from_db()
so.update_status("Closed")
self.assertEqual(get_reserved_qty("_Test Item"), existing_reserved_qty_item1)
self.assertEqual(get_reserved_qty("_Test Item Home Desktop 100"), existing_reserved_qty_item2)
total_projected_qty = get_total_projected_qty('_Test Item')
item_doc = frappe.get_doc('Item', '_Test Item')
self.assertEqual(total_projected_qty, item_doc.total_projected_qty)
# unclose so
so.load_from_db()
so.update_status('Draft')
self.assertEqual(get_reserved_qty("_Test Item"), existing_reserved_qty_item1 + 25)
self.assertEqual(get_reserved_qty("_Test Item Home Desktop 100"),
existing_reserved_qty_item2 + 10)
dn.cancel()
self.assertEqual(get_reserved_qty("_Test Item"), existing_reserved_qty_item1 + 50)
self.assertEqual(get_reserved_qty("_Test Item Home Desktop 100"),
existing_reserved_qty_item2 + 20)
so.load_from_db()
so.cancel()
self.assertEqual(get_reserved_qty("_Test Item"), existing_reserved_qty_item1)
self.assertEqual(get_reserved_qty("_Test Item Home Desktop 100"), existing_reserved_qty_item2)
def test_reserved_qty_for_over_delivery_with_packing_list(self):
make_stock_entry(target="_Test Warehouse - _TC", qty=10, rate=100)
make_stock_entry(item="_Test Item Home Desktop 100", target="_Test Warehouse - _TC", qty=10, rate=100)
# set over-delivery tolerance
frappe.db.set_value('Item', "_Test Product Bundle Item", 'tolerance', 50)
existing_reserved_qty_item1 = get_reserved_qty("_Test Item")
existing_reserved_qty_item2 = get_reserved_qty("_Test Item Home Desktop 100")
so = make_sales_order(item_code="_Test Product Bundle Item")
self.assertEqual(get_reserved_qty("_Test Item"), existing_reserved_qty_item1 + 50)
self.assertEqual(get_reserved_qty("_Test Item Home Desktop 100"),
existing_reserved_qty_item2 + 20)
dn = create_dn_against_so(so.name, 15)
total_projected_qty = get_total_projected_qty('_Test Item')
item_doc = frappe.get_doc('Item', '_Test Item')
self.assertEqual(total_projected_qty, item_doc.total_projected_qty)
self.assertEqual(get_reserved_qty("_Test Item"), existing_reserved_qty_item1)
self.assertEqual(get_reserved_qty("_Test Item Home Desktop 100"),
existing_reserved_qty_item2)
dn.cancel()
self.assertEqual(get_reserved_qty("_Test Item"), existing_reserved_qty_item1 + 50)
self.assertEqual(get_reserved_qty("_Test Item Home Desktop 100"),
existing_reserved_qty_item2 + 20)
def test_warehouse_user(self):
for role in ("Stock User", "Sales User"):
set_user_permission_doctypes(doctypes="Sales Order", role=role,
apply_user_permissions=1, user_permission_doctypes=["Warehouse"])
frappe.permissions.add_user_permission("Warehouse", "_Test Warehouse 1 - _TC", "test@example.com")
frappe.permissions.add_user_permission("Warehouse", "_Test Warehouse 2 - _TC1", "test2@example.com")
frappe.permissions.add_user_permission("Company", "_Test Company 1", "test2@example.com")
test_user = frappe.get_doc("User", "test@example.com")
test_user.add_roles("Sales User", "Stock User")
test_user.remove_roles("Sales Manager")
test_user_2 = frappe.get_doc("User", "test2@example.com")
test_user_2.add_roles("Sales User", "Stock User")
test_user_2.remove_roles("Sales Manager")
frappe.set_user("test@example.com")
so = make_sales_order(company="_Test Company 1",
warehouse="_Test Warehouse 2 - _TC1", do_not_save=True)
so.conversion_rate = 0.02
so.plc_conversion_rate = 0.02
self.assertRaises(frappe.PermissionError, so.insert)
frappe.set_user("test2@example.com")
so.insert()
frappe.set_user("Administrator")
frappe.permissions.remove_user_permission("Warehouse", "_Test Warehouse 1 - _TC", "test@example.com")
frappe.permissions.remove_user_permission("Warehouse", "_Test Warehouse 2 - _TC1", "test2@example.com")
frappe.permissions.remove_user_permission("Company", "_Test Company 1", "test2@example.com")
def test_block_delivery_note_against_cancelled_sales_order(self):
so = make_sales_order()
dn = make_delivery_note(so.name)
dn.insert()
so.cancel()
self.assertRaises(frappe.CancelledLinkError, dn.submit)
def test_service_type_product_bundle(self):
from erpnext.stock.doctype.item.test_item import make_item
from erpnext.selling.doctype.product_bundle.test_product_bundle import make_product_bundle
make_item("_Test Service Product Bundle", {"is_stock_item": 0})
make_item("_Test Service Product Bundle Item 1", {"is_stock_item": 0})
make_item("_Test Service Product Bundle Item 2", {"is_stock_item": 0})
make_product_bundle("_Test Service Product Bundle",
["_Test Service Product Bundle Item 1", "_Test Service Product Bundle Item 2"])
so = make_sales_order(item_code = "_Test Service Product Bundle", warehouse=None)
self.assertTrue("_Test Service Product Bundle Item 1" in [d.item_code for d in so.packed_items])
self.assertTrue("_Test Service Product Bundle Item 2" in [d.item_code for d in so.packed_items])
def test_mix_type_product_bundle(self):
from erpnext.stock.doctype.item.test_item import make_item
from erpnext.selling.doctype.product_bundle.test_product_bundle import make_product_bundle
make_item("_Test Mix Product Bundle", {"is_stock_item": 0})
make_item("_Test Mix Product Bundle Item 1", {"is_stock_item": 1})
make_item("_Test Mix Product Bundle Item 2", {"is_stock_item": 0})
make_product_bundle("_Test Mix Product Bundle",
["_Test Mix Product Bundle Item 1", "_Test Mix Product Bundle Item 2"])
self.assertRaises(WarehouseRequired, make_sales_order, item_code = "_Test Mix Product Bundle", warehouse="")
def test_auto_insert_price(self):
from erpnext.stock.doctype.item.test_item import make_item
make_item("_Test Item for Auto Price List", {"is_stock_item": 0})
frappe.db.set_value("Stock Settings", None, "auto_insert_price_list_rate_if_missing", 1)
item_price = frappe.db.get_value("Item Price", {"price_list": "_Test Price List",
"item_code": "_Test Item for Auto Price List"})
if item_price:
frappe.delete_doc("Item Price", item_price)
make_sales_order(item_code = "_Test Item for Auto Price List", selling_price_list="_Test Price List", rate=100)
self.assertEquals(frappe.db.get_value("Item Price",
{"price_list": "_Test Price List", "item_code": "_Test Item for Auto Price List"}, "price_list_rate"), 100)
# do not update price list
frappe.db.set_value("Stock Settings", None, "auto_insert_price_list_rate_if_missing", 0)
item_price = frappe.db.get_value("Item Price", {"price_list": "_Test Price List",
"item_code": "_Test Item for Auto Price List"})
if item_price:
frappe.delete_doc("Item Price", item_price)
make_sales_order(item_code = "_Test Item for Auto Price List", selling_price_list="_Test Price List", rate=100)
self.assertEquals(frappe.db.get_value("Item Price",
{"price_list": "_Test Price List", "item_code": "_Test Item for Auto Price List"}, "price_list_rate"), None)
frappe.db.set_value("Stock Settings", None, "auto_insert_price_list_rate_if_missing", 1)
def test_drop_shipping(self):
from erpnext.selling.doctype.sales_order.sales_order import make_purchase_order_for_drop_shipment
from erpnext.stock.doctype.item.test_item import make_item
from erpnext.buying.doctype.purchase_order.purchase_order import update_status
make_stock_entry(target="_Test Warehouse - _TC", qty=10, rate=100)
po_item = make_item("_Test Item for Drop Shipping", {"is_stock_item": 1, "delivered_by_supplier": 1,
'default_supplier': '_Test Supplier',
"expense_account": "_Test Account Cost for Goods Sold - _TC",
"cost_center": "_Test Cost Center - _TC"
})
dn_item = make_item("_Test Regular Item", {"is_stock_item": 1, "expense_account": "_Test Account Cost for Goods Sold - _TC",
"cost_center": "_Test Cost Center - _TC"})
so_items = [
{
"item_code": po_item.item_code,
"warehouse": "",
"qty": 2,
"rate": 400,
"delivered_by_supplier": 1,
"supplier": '_Test Supplier'
},
{
"item_code": dn_item.item_code,
"warehouse": "_Test Warehouse - _TC",
"qty": 2,
"rate": 300,
"conversion_factor": 1.0
}
]
if frappe.db.get_value("Item", "_Test Regular Item", "is_stock_item")==1:
make_stock_entry(item="_Test Regular Item", target="_Test Warehouse - _TC", qty=10, rate=100)
#setuo existing qty from bin
bin = frappe.get_all("Bin", filters={"item_code": po_item.item_code, "warehouse": "_Test Warehouse - _TC"},
fields=["ordered_qty", "reserved_qty"])
existing_ordered_qty = bin[0].ordered_qty if bin else 0.0
existing_reserved_qty = bin[0].reserved_qty if bin else 0.0
bin = frappe.get_all("Bin", filters={"item_code": dn_item.item_code,
"warehouse": "_Test Warehouse - _TC"}, fields=["reserved_qty"])
existing_reserved_qty_for_dn_item = bin[0].reserved_qty if bin else 0.0
#create so, po and partial dn
so = make_sales_order(item_list=so_items, do_not_submit=True)
so.submit()
po = make_purchase_order_for_drop_shipment(so.name, '_Test Supplier')
po.submit()
dn = create_dn_against_so(so.name, delivered_qty=1)
self.assertEquals(so.customer, po.customer)
self.assertEquals(po.items[0].sales_order, so.name)
self.assertEquals(po.items[0].item_code, po_item.item_code)
self.assertEquals(dn.items[0].item_code, dn_item.item_code)
#test ordered_qty and reserved_qty
bin = frappe.get_all("Bin", filters={"item_code": po_item.item_code, "warehouse": "_Test Warehouse - _TC"},
fields=["ordered_qty", "reserved_qty"])
ordered_qty = bin[0].ordered_qty if bin else 0.0
reserved_qty = bin[0].reserved_qty if bin else 0.0
self.assertEquals(abs(flt(ordered_qty)), existing_ordered_qty)
self.assertEquals(abs(flt(reserved_qty)), existing_reserved_qty)
reserved_qty = frappe.db.get_value("Bin",
{"item_code": dn_item.item_code, "warehouse": "_Test Warehouse - _TC"}, "reserved_qty")
self.assertEquals(abs(flt(reserved_qty)), existing_reserved_qty_for_dn_item + 1)
#test po_item length
self.assertEquals(len(po.items), 1)
#test per_delivered status
update_status("Delivered", po.name)
self.assertEquals(flt(frappe.db.get_value("Sales Order", so.name, "per_delivered"), 2), 75.00)
#test reserved qty after complete delivery
dn = create_dn_against_so(so.name, delivered_qty=1)
reserved_qty = frappe.db.get_value("Bin",
{"item_code": dn_item.item_code, "warehouse": "_Test Warehouse - _TC"}, "reserved_qty")
self.assertEquals(abs(flt(reserved_qty)), existing_reserved_qty_for_dn_item)
#test after closing so
so.db_set('status', "Closed")
so.update_reserved_qty()
bin = frappe.get_all("Bin", filters={"item_code": po_item.item_code, "warehouse": "_Test Warehouse - _TC"},
fields=["ordered_qty", "reserved_qty"])
ordered_qty = bin[0].ordered_qty if bin else 0.0
reserved_qty = bin[0].reserved_qty if bin else 0.0
self.assertEquals(abs(flt(ordered_qty)), existing_ordered_qty)
self.assertEquals(abs(flt(reserved_qty)), existing_reserved_qty)
reserved_qty = frappe.db.get_value("Bin",
{"item_code": dn_item.item_code, "warehouse": "_Test Warehouse - _TC"}, "reserved_qty")
self.assertEquals(abs(flt(reserved_qty)), existing_reserved_qty_for_dn_item)
def test_total_projected_qty_against_sales_order(self):
so = make_sales_order(item = '_Test Item')
total_projected_qty = get_total_projected_qty('_Test Item')
item_doc = frappe.get_doc('Item', '_Test Item')
self.assertEqual(total_projected_qty, item_doc.total_projected_qty)
def test_reserved_qty_for_closing_so(self):
bin = frappe.get_all("Bin", filters={"item_code": "_Test Item", "warehouse": "_Test Warehouse - _TC"},
fields=["reserved_qty"])
existing_reserved_qty = bin[0].reserved_qty if bin else 0.0
so = make_sales_order(item_code="_Test Item", qty=1)
self.assertEquals(get_reserved_qty(item_code="_Test Item", warehouse="_Test Warehouse - _TC"), existing_reserved_qty+1)
so.update_status("Closed")
self.assertEquals(get_reserved_qty(item_code="_Test Item", warehouse="_Test Warehouse - _TC"), existing_reserved_qty)
def test_create_so_with_margin(self):
so = make_sales_order(item_code="_Test Item", qty=1, do_not_submit=True)
so.items[0].price_list_rate = price_list_rate = 100
so.items[0].margin_type = 'Percentage'
so.items[0].margin_rate_or_amount = 25
so.insert()
new_so = frappe.copy_doc(so)
new_so.save(ignore_permissions=True)
self.assertEquals(new_so.get("items")[0].rate, flt((price_list_rate*25)/100 + price_list_rate))
new_so.items[0].margin_rate_or_amount = 25
new_so.submit()
self.assertEquals(new_so.get("items")[0].rate, flt((price_list_rate*25)/100 + price_list_rate))
def make_sales_order(**args):
so = frappe.new_doc("Sales Order")
args = frappe._dict(args)
if args.transaction_date:
so.transaction_date = args.transaction_date
so.company = args.company or "_Test Company"
so.customer = args.customer or "_Test Customer"
so.currency = args.currency or "INR"
if args.selling_price_list:
so.selling_price_list = args.selling_price_list
if "warehouse" not in args:
args.warehouse = "_Test Warehouse - _TC"
if args.item_list:
for item in args.item_list:
so.append("items", item)
else:
so.append("items", {
"item_code": args.item or args.item_code or "_Test Item",
"warehouse": args.warehouse,
"qty": args.qty or 10,
"uom": args.uom or None,
"rate": args.rate or 100
})
so.delivery_date = add_days(so.transaction_date, 10)
if not args.do_not_save:
so.insert()
if not args.do_not_submit:
so.submit()
return so
def create_dn_against_so(so, delivered_qty=0):
frappe.db.set_value("Stock Settings", None, "allow_negative_stock", 1)
dn = make_delivery_note(so)
dn.get("items")[0].qty = delivered_qty or 5
dn.insert()
dn.submit()
return dn
def get_reserved_qty(item_code="_Test Item", warehouse="_Test Warehouse - _TC"):
return flt(frappe.db.get_value("Bin", {"item_code": item_code, "warehouse": warehouse},
"reserved_qty"))
test_dependencies = ["Currency Exchange"]
|
shipEZ/flaskApp
|
refs/heads/master
|
lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/util/response.py
|
515
|
from __future__ import absolute_import
from ..packages.six.moves import http_client as httplib
from ..exceptions import HeaderParsingError
def is_fp_closed(obj):
"""
Checks whether a given file-like object is closed.
:param obj:
The file-like object to check.
"""
try:
# Check via the official file-like-object way.
return obj.closed
except AttributeError:
pass
try:
# Check if the object is a container for another file-like object that
# gets released on exhaustion (e.g. HTTPResponse).
return obj.fp is None
except AttributeError:
pass
raise ValueError("Unable to determine whether fp is closed.")
def assert_header_parsing(headers):
"""
Asserts whether all headers have been successfully parsed.
Extracts encountered errors from the result of parsing headers.
Only works on Python 3.
:param headers: Headers to verify.
:type headers: `httplib.HTTPMessage`.
:raises urllib3.exceptions.HeaderParsingError:
If parsing errors are found.
"""
# This will fail silently if we pass in the wrong kind of parameter.
# To make debugging easier add an explicit check.
if not isinstance(headers, httplib.HTTPMessage):
raise TypeError('expected httplib.Message, got {0}.'.format(
type(headers)))
defects = getattr(headers, 'defects', None)
get_payload = getattr(headers, 'get_payload', None)
unparsed_data = None
if get_payload: # Platform-specific: Python 3.
unparsed_data = get_payload()
if defects or unparsed_data:
raise HeaderParsingError(defects=defects, unparsed_data=unparsed_data)
def is_response_to_head(response):
"""
Checks whether the request of a response has been a HEAD-request.
Handles the quirks of AppEngine.
:param conn:
:type conn: :class:`httplib.HTTPResponse`
"""
# FIXME: Can we do this somehow without accessing private httplib _method?
method = response._method
if isinstance(method, int): # Platform-specific: Appengine
return method == 3
return method.upper() == 'HEAD'
|
terbolous/SickRage
|
refs/heads/master
|
lib/unidecode/x065.py
|
252
|
data = (
'Pan ', # 0x00
'Yang ', # 0x01
'Lei ', # 0x02
'Sa ', # 0x03
'Shu ', # 0x04
'Zan ', # 0x05
'Nian ', # 0x06
'Xian ', # 0x07
'Jun ', # 0x08
'Huo ', # 0x09
'Li ', # 0x0a
'La ', # 0x0b
'Han ', # 0x0c
'Ying ', # 0x0d
'Lu ', # 0x0e
'Long ', # 0x0f
'Qian ', # 0x10
'Qian ', # 0x11
'Zan ', # 0x12
'Qian ', # 0x13
'Lan ', # 0x14
'San ', # 0x15
'Ying ', # 0x16
'Mei ', # 0x17
'Rang ', # 0x18
'Chan ', # 0x19
'[?] ', # 0x1a
'Cuan ', # 0x1b
'Xi ', # 0x1c
'She ', # 0x1d
'Luo ', # 0x1e
'Jun ', # 0x1f
'Mi ', # 0x20
'Li ', # 0x21
'Zan ', # 0x22
'Luan ', # 0x23
'Tan ', # 0x24
'Zuan ', # 0x25
'Li ', # 0x26
'Dian ', # 0x27
'Wa ', # 0x28
'Dang ', # 0x29
'Jiao ', # 0x2a
'Jue ', # 0x2b
'Lan ', # 0x2c
'Li ', # 0x2d
'Nang ', # 0x2e
'Zhi ', # 0x2f
'Gui ', # 0x30
'Gui ', # 0x31
'Qi ', # 0x32
'Xin ', # 0x33
'Pu ', # 0x34
'Sui ', # 0x35
'Shou ', # 0x36
'Kao ', # 0x37
'You ', # 0x38
'Gai ', # 0x39
'Yi ', # 0x3a
'Gong ', # 0x3b
'Gan ', # 0x3c
'Ban ', # 0x3d
'Fang ', # 0x3e
'Zheng ', # 0x3f
'Bo ', # 0x40
'Dian ', # 0x41
'Kou ', # 0x42
'Min ', # 0x43
'Wu ', # 0x44
'Gu ', # 0x45
'He ', # 0x46
'Ce ', # 0x47
'Xiao ', # 0x48
'Mi ', # 0x49
'Chu ', # 0x4a
'Ge ', # 0x4b
'Di ', # 0x4c
'Xu ', # 0x4d
'Jiao ', # 0x4e
'Min ', # 0x4f
'Chen ', # 0x50
'Jiu ', # 0x51
'Zhen ', # 0x52
'Duo ', # 0x53
'Yu ', # 0x54
'Chi ', # 0x55
'Ao ', # 0x56
'Bai ', # 0x57
'Xu ', # 0x58
'Jiao ', # 0x59
'Duo ', # 0x5a
'Lian ', # 0x5b
'Nie ', # 0x5c
'Bi ', # 0x5d
'Chang ', # 0x5e
'Dian ', # 0x5f
'Duo ', # 0x60
'Yi ', # 0x61
'Gan ', # 0x62
'San ', # 0x63
'Ke ', # 0x64
'Yan ', # 0x65
'Dun ', # 0x66
'Qi ', # 0x67
'Dou ', # 0x68
'Xiao ', # 0x69
'Duo ', # 0x6a
'Jiao ', # 0x6b
'Jing ', # 0x6c
'Yang ', # 0x6d
'Xia ', # 0x6e
'Min ', # 0x6f
'Shu ', # 0x70
'Ai ', # 0x71
'Qiao ', # 0x72
'Ai ', # 0x73
'Zheng ', # 0x74
'Di ', # 0x75
'Zhen ', # 0x76
'Fu ', # 0x77
'Shu ', # 0x78
'Liao ', # 0x79
'Qu ', # 0x7a
'Xiong ', # 0x7b
'Xi ', # 0x7c
'Jiao ', # 0x7d
'Sen ', # 0x7e
'Jiao ', # 0x7f
'Zhuo ', # 0x80
'Yi ', # 0x81
'Lian ', # 0x82
'Bi ', # 0x83
'Li ', # 0x84
'Xiao ', # 0x85
'Xiao ', # 0x86
'Wen ', # 0x87
'Xue ', # 0x88
'Qi ', # 0x89
'Qi ', # 0x8a
'Zhai ', # 0x8b
'Bin ', # 0x8c
'Jue ', # 0x8d
'Zhai ', # 0x8e
'[?] ', # 0x8f
'Fei ', # 0x90
'Ban ', # 0x91
'Ban ', # 0x92
'Lan ', # 0x93
'Yu ', # 0x94
'Lan ', # 0x95
'Wei ', # 0x96
'Dou ', # 0x97
'Sheng ', # 0x98
'Liao ', # 0x99
'Jia ', # 0x9a
'Hu ', # 0x9b
'Xie ', # 0x9c
'Jia ', # 0x9d
'Yu ', # 0x9e
'Zhen ', # 0x9f
'Jiao ', # 0xa0
'Wo ', # 0xa1
'Tou ', # 0xa2
'Chu ', # 0xa3
'Jin ', # 0xa4
'Chi ', # 0xa5
'Yin ', # 0xa6
'Fu ', # 0xa7
'Qiang ', # 0xa8
'Zhan ', # 0xa9
'Qu ', # 0xaa
'Zhuo ', # 0xab
'Zhan ', # 0xac
'Duan ', # 0xad
'Zhuo ', # 0xae
'Si ', # 0xaf
'Xin ', # 0xb0
'Zhuo ', # 0xb1
'Zhuo ', # 0xb2
'Qin ', # 0xb3
'Lin ', # 0xb4
'Zhuo ', # 0xb5
'Chu ', # 0xb6
'Duan ', # 0xb7
'Zhu ', # 0xb8
'Fang ', # 0xb9
'Xie ', # 0xba
'Hang ', # 0xbb
'Yu ', # 0xbc
'Shi ', # 0xbd
'Pei ', # 0xbe
'You ', # 0xbf
'Mye ', # 0xc0
'Pang ', # 0xc1
'Qi ', # 0xc2
'Zhan ', # 0xc3
'Mao ', # 0xc4
'Lu ', # 0xc5
'Pei ', # 0xc6
'Pi ', # 0xc7
'Liu ', # 0xc8
'Fu ', # 0xc9
'Fang ', # 0xca
'Xuan ', # 0xcb
'Jing ', # 0xcc
'Jing ', # 0xcd
'Ni ', # 0xce
'Zu ', # 0xcf
'Zhao ', # 0xd0
'Yi ', # 0xd1
'Liu ', # 0xd2
'Shao ', # 0xd3
'Jian ', # 0xd4
'Es ', # 0xd5
'Yi ', # 0xd6
'Qi ', # 0xd7
'Zhi ', # 0xd8
'Fan ', # 0xd9
'Piao ', # 0xda
'Fan ', # 0xdb
'Zhan ', # 0xdc
'Guai ', # 0xdd
'Sui ', # 0xde
'Yu ', # 0xdf
'Wu ', # 0xe0
'Ji ', # 0xe1
'Ji ', # 0xe2
'Ji ', # 0xe3
'Huo ', # 0xe4
'Ri ', # 0xe5
'Dan ', # 0xe6
'Jiu ', # 0xe7
'Zhi ', # 0xe8
'Zao ', # 0xe9
'Xie ', # 0xea
'Tiao ', # 0xeb
'Xun ', # 0xec
'Xu ', # 0xed
'Xu ', # 0xee
'Xu ', # 0xef
'Gan ', # 0xf0
'Han ', # 0xf1
'Tai ', # 0xf2
'Di ', # 0xf3
'Xu ', # 0xf4
'Chan ', # 0xf5
'Shi ', # 0xf6
'Kuang ', # 0xf7
'Yang ', # 0xf8
'Shi ', # 0xf9
'Wang ', # 0xfa
'Min ', # 0xfb
'Min ', # 0xfc
'Tun ', # 0xfd
'Chun ', # 0xfe
'Wu ', # 0xff
)
|
levlaz/circleci.py
|
refs/heads/master
|
circleci/version.py
|
1
|
"""
circleci.version
~~~~~~~~~~~~~~~~
This module provides some helper functions to set version in various places.
.. versionadded:: 1.2.0
"""
VERSION = "2.0.0"
"""Current version of circleci.py."""
def get_short_version():
"""
Format "short" version in the form of X.Y
:returns: short version
"""
ver = VERSION.split(".")
short_version = "{0}.{1}".format(ver[0], ver[1])
return short_version
|
fxfitz/ansible
|
refs/heads/devel
|
lib/ansible/modules/windows/win_stat.py
|
24
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = r'''
---
module: win_stat
version_added: "1.7"
short_description: Get information about Windows files
description:
- Returns information about a Windows file.
- For non-Windows targets, use the M(stat) module instead.
options:
path:
description:
- The full path of the file/object to get the facts of; both forward and
back slashes are accepted.
required: yes
get_md5:
description:
- Whether to return the checksum sum of the file. Between Ansible 1.9
and 2.2 this is no longer an MD5, but a SHA1 instead. As of Ansible
2.3 this is back to an MD5. Will return None if host is unable to
use specified algorithm.
- The default of this option changed from C(yes) to C(no) in Ansible 2.5
and will be removed altogether in Ansible 2.9.
- Use C(get_checksum=true) with C(checksum_algorithm=md5) to return an
md5 hash under the C(checksum) return value.
type: bool
default: 'no'
get_checksum:
description:
- Whether to return a checksum of the file (default sha1)
type: bool
default: 'yes'
version_added: "2.1"
checksum_algorithm:
description:
- Algorithm to determine checksum of file. Will throw an error if
the host is unable to use specified algorithm.
default: sha1
choices: [ md5, sha1, sha256, sha384, sha512 ]
version_added: "2.3"
notes:
- For non-Windows targets, use the M(stat) module instead.
author:
- Chris Church (@cchurch)
'''
EXAMPLES = r'''
- name: Obtain information about a file
win_stat:
path: C:\foo.ini
register: file_info
- name: Obtain information about a folder
win_stat:
path: C:\bar
register: folder_info
- name: Get MD5 checksum of a file
win_stat:
path: C:\foo.ini
get_checksum: yes
checksum_algorithm: md5
register: md5_checksum
- debug:
var: md5_checksum.stat.checksum
- name: Get SHA1 checksum of file
win_stat:
path: C:\foo.ini
get_checksum: yes
register: sha1_checksum
- debug:
var: sha1_checksum.stat.checksum
- name: Get SHA256 checksum of file
win_stat:
path: C:\foo.ini
get_checksum: yes
checksum_algorithm: sha256
register: sha256_checksum
- debug:
var: sha256_checksum.stat.checksum
'''
RETURN = r'''
changed:
description: Whether anything was changed
returned: always
type: boolean
sample: True
stat:
description: dictionary containing all the stat data
returned: success
type: complex
contains:
attributes:
description: Attributes of the file at path in raw form
returned: success, path exists
type: string
sample: "Archive, Hidden"
checksum:
description: The checksum of a file based on checksum_algorithm specified
returned: success, path exist, path is a file, get_checksum == True
checksum_algorithm specified is supported
type: string
sample: 09cb79e8fc7453c84a07f644e441fd81623b7f98
creationtime:
description: The create time of the file represented in seconds since epoch
returned: success, path exists
type: float
sample: 1477984205.15
exists:
description: If the path exists or not
returned: success
type: boolean
sample: True
extension:
description: The extension of the file at path
returned: success, path exists, path is a file
type: string
sample: ".ps1"
filename:
description: The name of the file (without path)
returned: success, path exists, path is a file
type: string
sammple: foo.ini
hlnk_targets:
description: List of other files pointing to the same file (hard links), excludes the current file
returned: success, path exists
type: list
sample:
- C:\temp\file.txt
- C:\Windows\update.log
isarchive:
description: If the path is ready for archiving or not
returned: success, path exists
type: boolean
sample: True
isdir:
description: If the path is a directory or not
returned: success, path exists
type: boolean
sample: True
ishidden:
description: If the path is hidden or not
returned: success, path exists
type: boolean
sample: True
isjunction:
description: If the path is a junction point or not
returned: success, path exists
type: boolean
sample: True
islnk:
description: If the path is a symbolic link or not
returned: success, path exists
type: boolean
sample: True
isreadonly:
description: If the path is read only or not
returned: success, path exists
type: boolean
sample: True
isreg:
description: If the path is a regular file
returned: success, path exists
type: boolean
sample: True
isshared:
description: If the path is shared or not
returned: success, path exists
type: boolean
sample: True
lastaccesstime:
description: The last access time of the file represented in seconds since epoch
returned: success, path exists
type: float
sample: 1477984205.15
lastwritetime:
description: The last modification time of the file represented in seconds since epoch
returned: success, path exists
type: float
sample: 1477984205.15
lnk_source:
description: Target of the symlink normalized for the remote filesystem
returned: success, path exists and the path is a symbolic link or junction point
type: string
sample: C:\temp\link
lnk_target:
description: Target of the symlink. Note that relative paths remain relative
returned: success, path exists and the path is a symbolic link or junction point
type: string
sample: ..\link
md5:
description: The MD5 checksum of a file (Between Ansible 1.9 and 2.2 this was returned as a SHA1 hash), will be removed in 2.9
returned: success, path exist, path is a file, get_md5 == True
type: string
sample: 09cb79e8fc7453c84a07f644e441fd81623b7f98
nlink:
description: Number of links to the file (hard links)
returned: success, path exists
type: int
sample: 1
owner:
description: The owner of the file
returned: success, path exists
type: string
sample: BUILTIN\Administrators
path:
description: The full absolute path to the file
returned: success, path exists, file exists
type: string
sample: C:\foo.ini
sharename:
description: The name of share if folder is shared
returned: success, path exists, file is a directory and isshared == True
type: string
sample: file-share
size:
description: The size in bytes of a file or folder
returned: success, path exists, file is not a link
type: int
sample: 1024
'''
|
SCgeeker/OpenSesame
|
refs/heads/master
|
plugins/joystick/_libjoystick/basejoystick.py
|
3
|
#-*- coding:utf-8 -*-
"""
This file is part of OpenSesame.
OpenSesame is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenSesame is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with OpenSesame. If not, see <http://www.gnu.org/licenses/>.
"""
class basejoystick(object):
"""
desc: |
If you insert the `joystick` plugin at the start of your experiment, a
`joystick` object automatically becomes part of the experiment object
and can be accessed from within an inline_script item as `exp.joystick`.
__Function list:__
%--
toc:
mindepth: 2
maxdepth: 2
--%
%--
constant:
arg_joybuttonlist: |
A list of buttons that are accepted or `None` to accept all
buttons.
arg_timeout: |
An timeout value in milliseconds or `None` for no timeout.
--%
"""
def __init__(self, experiment, device=0, joybuttonlist=None, timeout=None):
"""
desc:
Intializes the joystick object.
arguments:
experiment:
desc: An Opensesame experiment.
type: experiment
keywords:
device:
desc: The joystick device number.
type: int
joybuttonlist:
desc: "%arg_joybuttonlist"
type: [list, NoneType]
timeout:
desc: "%arg_timeout"
type: [int, float, NoneType]
"""
raise NotImplementedError()
def set_joybuttonlist(self, joybuttonlist=None):
"""
desc:
Sets a list of accepted buttons.
keywords:
joybuttonlist:
desc: "%arg_joybuttonlist"
type: [list, NoneType]
"""
if joybuttonlist == None or joybuttonlist == []:
self._joybuttonlist = None
else:
self._joybuttonlist = []
for joybutton in joybuttonlist:
self._joybuttonlist.append(joybutton)
def set_timeout(self, timeout=None):
"""
desc:
Sets a timeout.
keywords:
timeout:
desc: "%arg_timeout"
type: [int, float, NoneType]
"""
self.timeout = timeout
def get_joybutton(self, joybuttonlist=None, timeout=None):
"""
desc:
Collects joystick button input.
keywords:
joybuttonlist:
desc: A list of buttons that are accepted or `None` to
default joybuttonlist.
type: [list, NoneType]
timeout:
desc: A timeout value in milliseconds or `None` to use default
timeout.
type: [int, float, NoneType]
returns:
desc: A (joybutton, timestamp) tuple. The joybutton is `None` if a
timeout occurs.
type: tuple
"""
raise NotImplementedError()
def get_joyaxes(self, timeout=None):
"""
desc:
Waits for joystick axes movement.
keywords:
timeout:
desc: A timeout value in milliseconds or `None` to use default
timeout.
type: [int, float, NoneType]
returns:
desc: A (position, timestamp) tuple. The position is None if a
timeout occurs.
type: tuple
"""
raise NotImplementedError()
def get_joyballs(self, timeout=None):
"""
desc:
Waits for joystick trackball movement.
keywords:
timeout:
desc: A timeout value in milliseconds or `None` to use default
timeout.
type: [int, float, NoneType]
returns:
desc: A (position, timestamp) tuple. The position is `None` if a
timeout occurs.
type: tuple
"""
raise NotImplementedError()
def get_joyhats(self, timeout=None):
"""
desc:
Waits for joystick hat movement.
keywords:
timeout:
desc: A timeout value in milliseconds or `None` to use default
timeout.
type: [int, float, NoneType]
returns:
desc: A (position, timestamp) tuple. The position is `None` if a
timeout occurs.
type: tuple
"""
raise NotImplementedError()
def get_joyinput(self, joybuttonlist=None, timeout=None):
"""
desc:
Waits for any joystick input (buttons, axes, hats or balls).
keywords:
joybuttonlist:
desc: A list of buttons that are accepted or `None` to
default joybuttonlist.
type: [list, NoneType]
timeout:
desc: A timeout value in milliseconds or `None` to use default
timeout.
type: [int, float, NoneType]
returns:
desc: A (event, value, timestamp) tuple. The value is `None` if a
timeout occurs.
type: tuple
"""
raise NotImplementedError()
def input_options(self):
"""
desc:
Generates a list with the number of available buttons, axes, balls
and hats.
returns:
desc: |
A list with number of inputs as: [buttons, axes, balls,
hats].
type: list
"""
raise NotImplementedError()
def flush(self):
"""
desc:
Clears all pending input, not limited to the joystick.
returns:
desc: True if joyinput was pending (i.e., if there was something
to flush) and False otherwise.
type: bool
"""
raise NotImplementedError()
|
huongttlan/statsmodels
|
refs/heads/master
|
statsmodels/examples/ex_arch_canada.py
|
34
|
# -*- coding: utf-8 -*-
"""
Created on Sat Dec 24 07:31:47 2011
Author: Josef Perktold
"""
from __future__ import print_function
import numpy as np
import statsmodels.sandbox.stats.diagnostic as dia
canada_raw = '''\
405.36646642737 929.610513893698 7.52999999999884 386.136109062605
404.639833965913 929.803984550587 7.69999999999709 388.135759111711
403.814883043744 930.318387567177 7.47000000000116 390.540112911955
404.215773188006 931.427687420772 7.2699999999968 393.963817246136
405.046713585284 932.662005594273 7.37000000000262 396.764690917547
404.416738673847 933.550939726636 7.12999999999738 400.021701616327
402.81912737043 933.531526191785 7.40000000000146 400.751498688807
401.977334663103 933.076879439814 8.33000000000175 405.733473658807
402.089724946428 932.12375320915 8.83000000000175 409.05038628366
401.306688373207 930.635939140315 10.429999999993 411.398377747425
401.630171263522 929.097059933419 12.1999999999971 413.019421511595
401.56375463175 928.563335601161 12.7700000000041 415.166962884156
402.815698906973 929.069380060201 12.429999999993 414.662070678749
403.142107624713 930.265516098198 12.2299999999959 415.731936138368
403.078619166324 931.677031559203 11.6999999999971 416.231468866173
403.718785733801 932.138967575148 11.1999999999971 418.14392690728
404.866799027579 932.276686471608 11.2700000000041 419.735231229658
405.636186735378 932.832783118083 11.4700000000012 420.484186198549
405.136285378794 933.733419116009 11.3000000000029 420.930881402259
406.024639922986 934.177206176622 11.1699999999983 422.112404525291
406.412269729241 934.592839827856 11 423.627805811063
406.300932644569 935.606709830033 10.6300000000047 423.988686751336
406.335351723382 936.511085968336 10.2700000000041 424.190212657915
406.773695329549 937.420090112655 10.1999999999971 426.127043353785
405.152547649247 938.415921627889 9.66999999999825 426.857794216679
404.929830809648 938.999170021426 9.60000000000582 426.745717993024
404.576546350926 939.235354789206 9.60000000000582 426.885793656802
404.199492630983 939.679504234357 9.5 428.840253264144
405.94985619596 940.249674139969 9.5 430.122322107039
405.82209202516 941.435818685214 9.02999999999884 430.230679154048
406.446282537108 942.29809597644 8.69999999999709 430.392994893689
407.051247525876 943.532223256403 8.13000000000466 432.028420083791
407.946023990985 944.34896981513 7.87000000000262 433.388625934544
408.179584663105 944.821488789039 7.66999999999825 433.964091817787
408.599812740441 945.067136927327 7.80000000000291 434.484384354647
409.090560656008 945.80672616174 7.7300000000032 436.156879277168
408.704215141145 946.869661504613 7.56999999999971 438.265143944308
408.980275213206 946.876612143542 7.56999999999971 438.763587343863
408.328690037174 947.249692256472 7.33000000000175 439.949811558539
407.885696563307 947.651276093962 7.56999999999971 441.835856392131
407.260532233258 948.183970741596 7.62999999999738 443.176872656863
406.775150765526 948.349239264364 7.59999999999854 444.359199033223
406.179413590339 948.032170661406 8.16999999999825 444.523614807208
405.439793348166 947.106483115935 9.19999999999709 446.969404642587
403.279970790458 946.079554231134 10.1699999999983 450.158586973168
403.364855995771 946.183811678692 10.3300000000017 451.546427290378
403.380680430043 946.22579516585 10.3999999999942 452.298351499968
404.003182812546 945.997783938785 10.3699999999953 453.120066578834
404.47739841708 945.518279080208 10.6000000000058 453.999145996277
404.786782762866 945.351397570438 11 454.955176222477
405.271003921828 945.291785517556 11.3999999999942 455.482381155116
405.382993140508 945.400785900878 11.7299999999959 456.100929020225
405.156416006566 945.905809840959 11.070000000007 457.202696739531
406.470043094757 945.90347041344 11.6699999999983 457.388589594786
406.229308967752 946.319028746014 11.4700000000012 457.779898919191
406.726483850871 946.579621275764 11.3000000000029 457.553538085846
408.578504884277 946.780032223884 10.9700000000012 458.80240271533
409.67671010704 947.628284240641 10.6300000000047 459.05640335985
410.385763295936 948.622057553611 10.1000000000058 459.15782324686
410.539523677181 949.399183241404 9.66999999999825 459.703720275789
410.445258303139 949.948137966398 9.52999999999884 459.703720275789
410.625605270832 949.794494142446 9.47000000000116 460.025814162716
410.867239714014 949.953380175189 9.5 461.025722503696
411.235917829196 950.250239444989 9.27000000000407 461.30391443673
410.663655285725 950.538030883093 9.5 461.4030814421
410.808508412624 950.787128498243 9.42999999999302 462.927726133156
412.115961520089 950.869528648471 9.69999999999709 464.688777934061
412.999407129539 950.928132469716 9.89999999999418 465.071700094375
412.955056755303 951.845722481401 9.42999999999302 464.285125295526
412.82413309368 952.6004761952 9.30000000000291 464.034426099541
413.048874899 953.597552755418 8.86999999999534 463.453479461824
413.611017876145 954.143388344158 8.77000000000407 465.071700094375
413.604781916778 954.542593332134 8.60000000000582 466.088867474481
412.968388225217 955.263136106029 8.33000000000175 466.617120754625
412.265886525002 956.056052852469 8.16999999999825 465.747796561181
412.910594097915 956.79658640007 8.02999999999884 465.899527268299
413.829416419695 957.386480451857 7.90000000000146 466.409925351738
414.22415210314 958.06341570725 7.87000000000262 466.955244491812
415.1677707968 958.716592187518 7.52999999999884 467.628081344681
415.701580225863 959.488142422254 6.93000000000029 467.70256230891
416.867407108435 960.362493080892 6.80000000000291 469.134788222928
417.610399060359 960.783379042937 6.69999999999709 469.336419672322
418.002980476361 961.029029939624 6.93000000000029 470.011666329664
417.266680178544 961.765709811429 6.87000000000262 469.647234439539'''
canada = np.array(canada_raw.split(), float).reshape(-1,4)
k=2;
resarch2 = dia.acorr_lm((canada[:,k]-canada[:,k].mean())**2, maxlag=2, autolag=None, store=1)
print(resarch2)
resarch5 = dia.acorr_lm(canada[:,k]**2, maxlag=12, autolag=None, store=1)
ss = '''\
ARCH LM-test; Null hypothesis: no ARCH effects
Chi-squared = %(chi)-8.4f df = %(df)-4d p-value = %(pval)8.4g
'''
resarch = resarch5
print()
print(ss % dict(chi=resarch[2], df=resarch[-1].resols.df_model, pval=resarch[3]))
#R:FinTS: ArchTest(as.vector(Canada[,3]), lag=5)
'''
ARCH LM-test; Null hypothesis: no ARCH effects
data: as.vector(Canada[, 3])
Chi-squared = 78.878, df = 5, p-value = 1.443e-15
'''
#from ss above
'''
ARCH LM-test; Null hypothesis: no ARCH effects
Chi-squared = 78.849 df = 5 p-value = 1.461e-15
'''
#k=2
#R
'''
ARCH LM-test; Null hypothesis: no ARCH effects
data: as.vector(Canada[, 4])
Chi-squared = 74.6028, df = 5, p-value = 1.121e-14
'''
#mine
'''
ARCH LM-test; Null hypothesis: no ARCH effects
Chi-squared = 74.6028 df = 5 p-value = 1.126e-14
'''
'''
> ArchTest(as.vector(Canada[,4]), lag=12)
ARCH LM-test; Null hypothesis: no ARCH effects
data: as.vector(Canada[, 4])
Chi-squared = 69.6359, df = 12, p-value = 3.747e-10
'''
#mine:
'''
ARCH LM-test; Null hypothesis: no ARCH effects
Chi-squared = 69.6359 df = 12 p-value = 3.747e-10
'''
|
kalxas/geonode
|
refs/heads/master
|
geonode/br/tests/__init__.py
|
6
|
# -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2020 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from geonode.br.tests.test_restore import * # noqa aside
from geonode.br.tests.test_restore_helpers import * # noqa aside
from geonode.br.tests.test_backup import * # noqa aside
|
rohitw1991/smartfrappe
|
refs/heads/develop
|
frappe/boot.py
|
31
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
"""
bootstrap client session
"""
import frappe
import frappe.defaults
import frappe.widgets.page
from frappe.utils import get_gravatar
def get_bootinfo():
"""build and return boot info"""
frappe.set_user_lang(frappe.session.user)
bootinfo = frappe._dict()
hooks = frappe.get_hooks()
doclist = []
# user
get_user(bootinfo)
# system info
bootinfo['sysdefaults'] = frappe.defaults.get_defaults()
bootinfo['server_date'] = frappe.utils.nowdate()
if frappe.session['user'] != 'Guest':
bootinfo['user_info'] = get_fullnames()
bootinfo['sid'] = frappe.session['sid'];
# home page
bootinfo.modules = {}
for app in frappe.get_installed_apps():
try:
bootinfo.modules.update(frappe.get_attr(app + ".config.desktop.get_data")() or {})
except ImportError:
pass
except AttributeError:
pass
bootinfo.module_app = frappe.local.module_app
bootinfo.hidden_modules = frappe.db.get_global("hidden_modules")
bootinfo.doctype_icons = dict(frappe.db.sql("""select name, icon from
tabDocType where ifnull(icon,'')!=''"""))
bootinfo.doctype_icons.update(dict(frappe.db.sql("""select name, icon from
tabPage where ifnull(icon,'')!=''""")))
add_home_page(bootinfo, doclist)
add_allowed_pages(bootinfo)
load_translations(bootinfo)
add_timezone_info(bootinfo)
load_conf_settings(bootinfo)
load_print(bootinfo, doclist)
# ipinfo
if frappe.session['data'].get('ipinfo'):
bootinfo['ipinfo'] = frappe.session['data']['ipinfo']
# add docs
bootinfo['docs'] = doclist
for method in hooks.boot_session or []:
frappe.get_attr(method)(bootinfo)
if bootinfo.lang:
bootinfo.lang = unicode(bootinfo.lang)
bootinfo.error_report_email = frappe.get_hooks("error_report_email")
return bootinfo
def load_conf_settings(bootinfo):
from frappe import conf
for key in ['developer_mode']:
if key in conf: bootinfo[key] = conf.get(key)
def add_allowed_pages(bootinfo):
roles = frappe.get_roles()
bootinfo.page_info = {}
for p in frappe.db.sql("""select distinct
tabPage.name, tabPage.modified, tabPage.title
from `tabPage Role`, `tabPage`
where `tabPage Role`.role in (%s)
and `tabPage Role`.parent = `tabPage`.name""" % ', '.join(['%s']*len(roles)),
roles, as_dict=True):
bootinfo.page_info[p.name] = {"modified":p.modified, "title":p.title}
# pages where role is not set are also allowed
for p in frappe.db.sql("""select name, modified, title
from `tabPage` where
(select count(*) from `tabPage Role`
where `tabPage Role`.parent=tabPage.name) = 0""", as_dict=1):
bootinfo.page_info[p.name] = {"modified":p.modified, "title":p.title}
def load_translations(bootinfo):
if frappe.local.lang != 'en':
bootinfo["__messages"] = frappe.get_lang_dict("boot")
bootinfo["lang"] = frappe.lang
def get_fullnames():
"""map of user fullnames"""
ret = frappe.db.sql("""select name,
concat(ifnull(first_name, ''),
if(ifnull(last_name, '')!='', ' ', ''), ifnull(last_name, '')) as fullname,
user_image as image, gender, email
from tabUser where ifnull(enabled, 0)=1""", as_dict=1)
d = {}
for r in ret:
if not r.image:
r.image = get_gravatar()
d[r.name] = r
return d
def get_startup_js():
startup_js = []
for method in frappe.get_hooks().startup_js or []:
startup_js.append(frappe.get_attr(method)() or "")
return "\n".join(startup_js)
def get_user(bootinfo):
"""get user info"""
bootinfo.user = frappe.user.load_user()
def add_home_page(bootinfo, docs):
"""load home page"""
if frappe.session.user=="Guest":
return
home_page = frappe.db.get_default("desktop:home_page")
try:
page = frappe.widgets.page.get(home_page)
except (frappe.DoesNotExistError, frappe.PermissionError):
frappe.message_log.pop()
page = frappe.widgets.page.get('desktop')
bootinfo['home_page'] = page.name
docs.append(page)
def add_timezone_info(bootinfo):
user = bootinfo.user.get("time_zone")
system = bootinfo.sysdefaults.get("time_zone")
if user and user != system:
import frappe.utils.momentjs
bootinfo.timezone_info = {"zones":{}, "rules":{}, "links":{}}
frappe.utils.momentjs.update(user, bootinfo.timezone_info)
frappe.utils.momentjs.update(system, bootinfo.timezone_info)
def load_print(bootinfo, doclist):
print_settings = frappe.db.get_singles_dict("Print Settings")
print_settings.doctype = ":Print Settings"
doclist.append(print_settings)
load_print_css(bootinfo, print_settings)
def load_print_css(bootinfo, print_settings):
bootinfo.print_css = frappe.get_attr("frappe.templates.pages.print.get_print_style")(print_settings.print_style or "Modern")
|
signed/intellij-community
|
refs/heads/master
|
python/testData/inspections/PyProtectedMemberInspection/trueNegative.py
|
83
|
__author__ = 'ktisha'
class A:
def __init__(self):
self._a = 1
def foo(self):
self.b= 1
class B(A):
def __init__(self):
A.__init__(self)
self.b = self._a
|
asreimer/lmfit2
|
refs/heads/master
|
python/dmap.py
|
1
|
import os
import struct
import time
import gc
import numpy as np
import sys
DMAP = 0
CHAR = 1
SHORT = 2
INT = 3
FLOAT = 4
DOUBLE = 8
STRING = 9
LONG = 10
UCHAR = 16
USHORT = 17
UINT = 18
ULONG = 19
DMAP_DATA_KEYS=[0,1,2,3,4,8,9,10,16,17,18,19]
LOGGING = False
class EmptyFileError(Exception):
"""Raised if the dmap file is empty or corrupted
"""
pass
class DmapDataError(Exception):
"""Raised if there is an error in parsing of data
"""
pass
class RawDmapScaler(object):
"""Holds all the same data that the original C dmap scaler struct holds +
some additional type format identifiers
"""
def __init__(self,name,dmap_type,data_type_fmt,mode,data):
self.dmap_type = dmap_type
self.name = name
self.mode = mode
self.data = data
self.data_type_fmt = data_type_fmt
def get_type(self):
"""Returns the DMAP type of the scaler
:returns: dmap_type
"""
return self.dmap_type
def get_name(self):
"""Returns the name of the scaler
:returns: name
"""
return self.name
def get_mode(self):
"""Returns the mode of the scaler
:returns: mode
"""
return self.mode
def get_data(self):
"""Returns the scaler data
:returns: data
"""
return self.data
def get_datatype_fmt(self):
"""Returns the string format identifier of the scaler that
corresponds to the DMAP type
:returns: data_type_fmt
"""
return self.data_type_fmt
def set_type(self,data_type):
"""Sets the DMAP type of the scaler
:param data_type: DMAP type of the scaler
"""
self.dmap_type = dmap_type
def set_name(self,name):
"""Sets the name of the scaler
:param name: scaler name
"""
self.name = name
def set_mode(self,mode):
"""Sets the mode of the scaler
:param mode: scaler mode
"""
self.mode = mode
def set_data(self,data):
"""Sets the data of the scaler
:param data: data for the scaler to contain
"""
self.data = data
def set_datatype_fmt(self,fmt):
"""Sets the string format identifier of the scaler that
corresponds to the DMAP type of the scaler
:param fmt: DMAP type string format of the scaler
"""
self.data_type_fmt = fmt
class RawDmapArray(object):
"""Holds all the same data that the original C dmap array struct holds +
some additional type information
"""
def __init__(self,name,dmap_type,data_type_fmt,mode,dimension,arr_dimensions,data):
self.dmap_type = dmap_type
self.name = name
self.mode = mode
self.dimension = dimension
self.arr_dimensions = arr_dimensions
self.data = data
self.data_type_fmt = data_type_fmt
def get_type(self):
"""Returns the DMAP type of the array
:returns: dmap_type
"""
return self.dmap_type
def get_name(self):
"""Returns the name of the array
:returns: name
"""
return self.name
def get_mode(self):
"""Returns the mode of the array
:returns: mode
"""
return self.mode
def get_dimension(self):
"""Returns the number of dimensions in the array
:returns: dimension
"""
return self.dimension
def get_arr_dimensions(self):
"""Returns a list of array dimensions
:returns: arr_dimensions
"""
return self.arr_dimensions
def get_data(self):
"""Returns the array data
:returns: data
"""
return self.data
def get_datatype_fmt(self):
"""Returns the string format identifier of the scaler that
corresponds to the DMAP type
:returns: data_type_fmt
"""
return self.data_type_fmt
def set_type(self,data_type):
"""Sets the DMAP type of the array
:param data_type: DMAP type of the array
"""
self.type = data_type
def set_name(self,name):
"""Sets the name of the array
:param name: name of the array
"""
self.name = name
def set_mode(self,mode):
"""Sets the mode of the array
:param mode: the mode of the array
"""
self.mode = mode
def set_dimension(self,dimension):
"""Sets the number of array dimensions
:param dimension: total array dimensions
"""
self.dimension = dimension
def set_arr_dimensions(self,arr_dimensions):
"""Sets the list of dimensions for the array
:param arr_dimensions: list of dimensions for the array
"""
self.arr_dimensions = arr_dimensions
def set_data(self,data):
"""Sets the array data
:param data: the data associated with the array
"""
self.data = data
def set_datatype_fmt(self,fmt):
"""Sets the DMAP type string format identifier of the array
:param fmt: the string format identifier
"""
self.data_type_fmt = fmt
class RawDmapRecord(object):
"""Contains the arrays and scalers associated with a dmap record.
"""
def __init__(self):
self.num_scalers = 0
self.num_arrays = 0
self.scalers = []
self.arrays = []
def set_num_scalers(self,num_scalers):
"""Sets the number of scalers in this DMAP record
:param num_scalers: number of scalers
"""
self.num_scalers = num_scalers
def set_num_arrays(self,num_arrays):
"""Sets the number of arrays in this DMAP record
:param num_arrays: number of arrays
"""
self.num_arrays = num_arrays
def add_scaler(self,new_scaler):
"""Adds a new scaler to the DMAP record
:param new_scaler: new RawDmapScaler to add
"""
self.scalers.append(new_scaler)
self.num_scalers = self.num_scalers + 1
def set_scalers(self,scalers):
"""Sets the DMAP scaler list to a new list
:param scalers: new list of scalers
"""
self.scalers = scalers
self.num_scalers = len(scalers)
def add_array(self,new_array):
"""Adds a new array to the DMAP record
:param new_array: new RawDmapArray to add
"""
self.arrays.append(new_array)
self.num_arrays = self.num_arrays + 1
def set_arrays(self,arrays):
"""Sets the DMAP array list to a new list
:param arrays: new list of arrays
"""
self.arrays = arrays
self.num_arrays = len(arrays)
def get_num_scalers(self):
"""Returns the number of scalers in the DMAP record
:returns: num_scalers
"""
return self.num_scalers
def get_num_arrays(self):
"""Returns the number of arrays in the DMAP record
:returns: num_arrays
"""
return self.num_arrays
def get_scalers(self):
"""Returns the list of scalers in the DMAP record
:returns: scalers
"""
return self.scalers
def get_arrays(self):
"""Returns the list of arrays in the DMAP record
:returns: arrays
"""
return self.arrays
class RawDmapRead(object):
"""Contains members and methods relating to parsing files into raw Dmap objects.
Takes in a buffer path to decode. Default is open a file, but can optionally
use a stream such as from a real time socket connection
"""
def __init__(self,dmap_data,stream=False):
self.cursor = 0
self.dmap_records = []
#parses the whole file/stream into a byte array
if stream is False:
with open(dmap_data,'rb') as f:
self.dmap_bytearr = bytearray(f.read())
if os.stat(dmap_data).st_size == 0:
raise EmptyFileError("File is empty")
else:
if len(dmap_data) == 0:
message = "Stream contains no data!"
raise EmptyFileError(message)
self.dmap_bytearr = bytearray(dmap_data)
self.test_initial_data_integrity()
#parse bytes until end of byte array
pr = self.parse_record
add_rec = self.dmap_records.append
end_byte = len(self.dmap_bytearr)
counter = 0
while self.cursor < end_byte:
if LOGGING == True:
with open("logfile.txt",'a') as f:
f.write("TOP LEVEL LOOP: iteration {0}\n".format(counter))
new_record = pr()
add_rec(new_record)
counter = counter + 1
#print(self.cursor,len(self.dmap_bytearr))
if (self.cursor > end_byte):
message = "Bytes attempted {0} does not match the size of file {1}".format(self.cursor,end_byte)
raise DmapDataError(message)
def test_initial_data_integrity(self):
"""Quickly parses the data to add up data sizes and determine if the records are intact.
There still may be errors, but this is a quick initial check
"""
end_byte = len(self.dmap_bytearr)
size_total = 0
while self.cursor < end_byte:
code = self.read_data('i')
size = self.read_data('i')
#print(code,size,end_byte)
if size <= 0:
message = """INITIAL INTEGRITY: Initial integrity check shows size <= 0.
Data is likely corrupted"""
raise DmapDataError(message)
elif size > end_byte:
message = """INITIAL INTEGRITY: Initial integrity check shows
total sizes mismatch buffer size. Data is likely corrupted"""
raise DmapDataError(message)
size_total = size_total + size
if size_total > end_byte:
message = """INTIAL INTEGRITY: Initial integrity check shows record size mismatch.
Data is likely corrupted"""
raise DmapDataError(message)
self.cursor = self.cursor + size - 2 * self.get_num_bytes('i')
#print (end_byte,size_total)
if size_total != end_byte:
#print(size_total,end_byte)
message = """INITIAL INTEGRITY: Initial integrity check shows total size < buffer size.
Data is likely corrupted"""
raise DmapDataError(message)
self.cursor = 0
def parse_record(self):
"""Parses a single dmap record from the buffer
"""
bytes_already_read = self.cursor
code = self.read_data('i')
size = self.read_data('i')
#print(code,size,self.cursor,len(self.dmap_bytearr))
if LOGGING == True:
with open("logfile.txt",'a') as f:
f.write("PARSE RECORD: code {0} size {1}\n".format(code,size))
#adding 8 bytes because code+size are part of the record.
if size > (len(self.dmap_bytearr) - self.cursor + 2 * self.get_num_bytes('i')):
message = "PARSE RECORD: Integrity check shows record size bigger than remaining buffer. Data is likely corrupted"
raise DmapDataError(message)
elif size <= 0:
message = "PARSE RECORD: Integrity check shows record size <= 0. Data is likely corrupted"
raise DmapDataError(message)
num_scalers = self.read_data('i')
num_arrays = self.read_data('i')
#print("num scalers",num_scalers,"num arrays",num_arrays)
if LOGGING == True:
with open("logfile.txt",'a') as f:
f.write("PARSE RECORD: num_scalers {0} num_arrays {1}\n".format(num_scalers,num_arrays))
if(num_scalers <= 0):
message = "PARSE RECORD: Number of scalers is 0 or negative."
raise DmapDataError(message)
elif(num_arrays <= 0):
message = "PARSE RECORD: Number of arrays is 0 or negative."
raise DmapDataError(message)
elif (num_scalers + num_arrays) > size:
message = "PARSE RECORD: Invalid number of record elements. Array or scaler field is likely corrupted."
raise DmapDataError(message)
dm_rec = RawDmapRecord()
if LOGGING == True:
with open("logfile.txt",'a') as f:
f.write("PARSE RECORD: processing scalers\n")
scalers = [self.parse_scaler() for sc in range(0,num_scalers)]
dm_rec.set_scalers(scalers)
if LOGGING == True:
with open("logfile.txt",'a') as f:
f.write("PARSE RECORD: processing arrays\n")
arrays = [self.parse_array(size) for ar in range(0,num_arrays)]
dm_rec.set_arrays(arrays)
if (self.cursor - bytes_already_read) != size:
message = "PARSE RECORD: Bytes read {0} does not match the records size field {1}".format(self.cursor-bytes_already_read,size)
raise DmapDataError(message)
return dm_rec
def parse_scaler(self):
"""Parses a new dmap scaler from bytearray
:returns: new RawDmapScaler with parsed data
"""
mode = 6
name = self.read_data('s')
#print("name",name)
data_type = self.read_data('c')
#print("datatype",data_type)
if data_type not in DMAP_DATA_KEYS:
message = "PARSE_SCALER: Data type is corrupted. Record is likely corrupted"
raise DmapDataError(message)
if LOGGING == True:
with open("logfile.txt",'a') as f:
f.write("PARSE SCALER: name {0} data_type {1}\n".format(name,data_type))
data_type_fmt = self.convert_datatype_to_fmt(data_type)
if data_type_fmt != DMAP:
data = self.read_data(data_type_fmt)
#print("data",data)
else:
data = self.parse_record()
return RawDmapScaler(name,data_type,data_type_fmt,mode,data)
def parse_array(self,record_size):
"""Parses a new dmap array from bytearray
:returns: new RawDmapArray with parsed data
"""
mode = 7
name = self.read_data('s')
#print("name",name)
data_type = self.read_data('c')
#print("datatype",data_type)
if data_type not in DMAP_DATA_KEYS:
message = "PARSE_ARRAY: Data type is corrupted. Record is likely corrupted"
raise DmapDataError(message)
if LOGGING == True:
with open("logfile.txt",'a') as f:
f.write("PARSE ARRAY: name {0} data_type {1}\n".format(name,data_type))
data_type_fmt = self.convert_datatype_to_fmt(data_type)
array_dimension = self.read_data('i')
if array_dimension > record_size:
message = """PARSE_ARRAY: Parsed # of array dimensions are larger than
record size. Record is likely corrupted"""
raise DmapDataError(message)
elif array_dimension <= 0:
message = """PARSE ARRAY: Parsed # of array dimensions are zero or
negative. Record is likely corrupted"""
raise DmapDataError(message)
dimensions = [self.read_data('i') for i in range(0,array_dimension)]
if not dimensions:
message = "PARSE ARRAY: Array dimensions could not be parsed."
raise DmapDataError(message)
elif sum(x <= 0 for x in dimensions) > 0 and name != "slist": # slist is exception
message = """PARSE ARRAY: Array dimension is zero or negative.
Record is likely corrupted"""
raise DmapDataError(message)
for x in dimensions:
if x >= record_size:
message = "PARSE_ARRAY: Array dimension exceeds record size."
if LOGGING == True:
with open("logfile.txt",'a') as f:
f.write("PARSE ARRAY: dimensions {0}\n".format(dimensions))
#total_elements = reduce(lambda x,y: x*y,dimensions)
total_elements = 1
for dim in dimensions:
total_elements = total_elements * dim
if total_elements > record_size:
message = """PARSE_ARRAY: Total array elements > record size."""
raise DmapDataError(message)
elif total_elements * self.get_num_bytes(data_type_fmt) > record_size:
message = "PARSE ARRAY: Array size exceeds record size. Data is likely corrupted"
raise DmapDataError(message)
if LOGGING == True:
with open("logfile.txt",'a') as f:
f.write("PARSE ARRAY: total elements {0} size {1}\n".format(total_elements,self.get_num_bytes(data_type_fmt)))
#parsing an array of strings requires a different method. Numpy can't
#parse strings or dmaps into arrays the way it can for other types because it doesnt
#know the sizes. They have to be manually read the slow way. Because chars
#are encoded as hex literals, they have to be read one at a time to make sense.
if data_type_fmt == 's' or data_type_fmt == 'c' or data_type_fmt == DMAP:
data_array = np.array(self.build_n_dimension_list(dimensions,data_type_fmt))
else:
data_array = self.read_numerical_array(data_type_fmt,dimensions,total_elements)
return RawDmapArray(name,data_type,data_type_fmt,mode,array_dimension,dimensions,data_array)
def build_n_dimension_list(self,dim,data_type_fmt):
"""This is used to build a list of multiple dimensions without knowing
them ahead of time. This method is used to manually parse arrays from a dmap
:param dim: list of dimensions
:param data_type_fmt: string format identifier of the DMAP data type
:returns: n dimensional list of data parsed from buffer
"""
dim_data = []
dimension = dim.pop()
if not dim:
dim_data = [self.read_data(data_type_fmt) for i in range(0,dimension)]
##print("data",data)
else:
dim_data = [self.build_n_dimension_list(list(dim),data_type_fmt) for i in range(0,dimension)]
return dim_data
def read_data(self,data_type_fmt):
"""Reads an individual data type from the buffer
Given a format identifier, a number of bytes are read from the buffer
and turned into the correct data type
:param data_type_fmt: a string format identifier for the DMAP data type
:returns: parsed data
"""
if LOGGING == True:
with open("logfile.txt",'a') as f:
f.write("READ DATA: cursor {0} bytelen {1}\n".format(self.cursor,len(self.dmap_bytearr)))
if self.cursor >= len(self.dmap_bytearr):
message = "READ DATA: Cursor extends out of buffer. Data is likely corrupted"
raise DmapDataError(message)
if len(self.dmap_bytearr) - self.cursor < self.get_num_bytes(data_type_fmt):
message = "READ DATA: Byte offsets into buffer are not properly aligned. Data is likely corrupted"
raise DmapDataError(message)
if data_type_fmt is DMAP:
return self.parse_record()
elif data_type_fmt is 'c':
data = self.dmap_bytearr[self.cursor]
#print (data,data_type)
self.cursor = self.cursor + self.get_num_bytes(data_type_fmt)
elif data_type_fmt is not 's':
data = struct.unpack_from(data_type_fmt,buffer(self.dmap_bytearr),self.cursor)
#print(data,data_type)
self.cursor = self.cursor + self.get_num_bytes(data_type_fmt)
else:
byte_counter = 0
while self.dmap_bytearr[self.cursor + byte_counter] is not 0:
#print(self.dmap_bytearr[self.cursor + byte_counter])
byte_counter = byte_counter + 1
if self.cursor + byte_counter >= len(self.dmap_bytearr):
message = "READ DATA: String is improperly terminated. Dmap record is corrupted"
raise DmapDataError(message)
char_count = '{0}s'.format(byte_counter)
data = struct.unpack_from(char_count,buffer(self.dmap_bytearr),self.cursor)
self.cursor = self.cursor + byte_counter + 1
if(data_type_fmt is 'c'):
return data
else:
return data[0] #struct.unpack returns a tuple. [0] is the actual data
def read_numerical_array(self,data_type_fmt,dimensions,total_elements):
"""Reads a numerical array from bytearray using numpy
Instead of reading array elements one by one, this method uses numpy to read an
entire section of the buffer into a numpy array and then reshapes it to the correct
dimensions. This method is prefered due to massive performance increase
:param data_type_fmt: a string format identifier for the DMAP data type
:param dimensions: a list of each array dimension
:param total_elements: total elements in the array
:returns: parsed numpy array in the correct shape
"""
#print(dimensions,total_elements)
start = self.cursor
end = self.cursor+total_elements*self.get_num_bytes(data_type_fmt)
if end > len(self.dmap_bytearr):
message = "READ_NUMERICAL_ARRAY: Array end point extends past length of buffer"
raise DmapDataError(message)
buf = self.dmap_bytearr[self.cursor:self.cursor+total_elements*self.get_num_bytes(data_type_fmt)]
try:
array = np.frombuffer(buf,dtype=data_type_fmt)
except ValueError as v:
message = "READ_NUMERICAL_ARRAY: Array buffer in not multiple of data size. Likely due to corrupted array parameters in record"
if(len(dimensions) >1 ):
array = array.reshape(tuple(dimensions[::-1])) #reshape expects a tuple and dimensions reversed from what is parsed
self.cursor = self.cursor + total_elements * self.get_num_bytes(data_type_fmt)
if LOGGING == True:
with open("logfile.txt",'a') as f:
f.write("READ NUMERICAL ARRAY: Successfully read array\n")
return array
def get_num_bytes(self,data_type_fmt):
"""Returns the number of bytes associated with each type
:param data_type_fmt: a string format identifier for the DMAP data type
:returns: size in bytes of the data type
"""
return {
'c' : 1,
'B' : 1,
'h' : 2,
'H' : 2,
'i' : 4,
'I' : 4,
'q' : 8,
'Q' : 8,
'f' : 4,
'd' : 8,
}.get(data_type_fmt,0)
def convert_datatype_to_fmt(self,data_type):
"""Converts a parsed data type header field from the dmap record to
a data type character format
:param data_type: DMAP data type from parsed record
:returns: a string format identifier for the DMAP data type
"""
return {
CHAR : 'c',
SHORT : 'h',
INT : 'i',
FLOAT : 'f',
DOUBLE : 'd',
STRING : 's',
LONG : 'q',
UCHAR : 'B' ,
USHORT : 'H' ,
UINT : 'I',
ULONG : 'Q',
}.get(data_type,DMAP)
def get_records(self):
"""Returns the list of parsed DMAP records
:returns: dmap_records
"""
return self.dmap_records
class RawDmapWrite(object):
"""Contains members and methods relating to encoding dictionaries into a raw
dmap buffer.
The ud_types are use to override the default types for riding. Useful
if you want to write a number as a char instead of an int for example
"""
def __init__(self, data_dicts,file_path,ud_types={}):
super(RawDmapWrite, self).__init__()
self.data_dict = data_dicts
self.records = []
self.ud_types = ud_types
self.dmap_bytearr = bytearray()
for dd in data_dicts:
self.data_dict_to_dmap_rec(dd)
for rc in self.records:
self.dmap_record_to_bytes(rc)
#print(self.dmap_bytearr)
with open(file_path,'wb') as f:
f.write(self.dmap_bytearr)
def data_dict_to_dmap_rec(self,data_dict):
""" This method converts a data dictionary to a dmap record.
The user defined dictionary specifies if any default types are to be
overridden with your own type. This functions runs through each key/val
element of the dictionary and creates a RawDmapArray or RawDmapScaler
and adds them to a RawDmapRecord. Any python lists are converted to
numpy arrays for fast and efficient convertion to bytes
:param data_dict: a dictionary of data to encode
"""
record = RawDmapRecord()
for k,v in data_dict.iteritems():
if k in self.ud_types:
data_type_fmt = self.ud_types[k]
else:
data_type_fmt = self.find_datatype_fmt(v)
if data_type_fmt == '':
"""TODO: handle recursive dmap writing"""
pass
if isinstance(v, (list, np.ndarray)):
mode = 7
if isinstance(v,list):
if data_type_fmt == 'c':
data = np.asarray([chr(x) for x in v],dtype='c')
elif data_type_fmt == 's':
data = np.asarray(v,dtype=object)
else:
data = np.asarray(v,dtype=data_type_fmt)
if isinstance(v,np.ndarray):
if data_type_fmt == 'c' and v.dtype != 'S1':
data = np.asarray([chr(x) for x in v],dtype='c')
# elif data_type_fmt == 's':
# data = np.asarray(v,dtype=object)
else:
data = np.asarray(v,dtype=data_type_fmt)
dmap_type = self.convert_fmt_to_dmap_type(data_type_fmt)
#dimensions need to be reversed to match what dmap expects
arr_dimensions = data.shape[::-1]
dimension = len(arr_dimensions)
array = RawDmapArray(k,dmap_type,data_type_fmt,mode,dimension,arr_dimensions,data)
record.add_array(array)
else:
dmap_type = self.convert_fmt_to_dmap_type(data_type_fmt)
mode = 6
scaler = RawDmapScaler(k,dmap_type,data_type_fmt,mode,v)
record.add_scaler(scaler)
self.records.append(record)
def find_datatype_fmt(self,data):
"""Input could be an array of any dimensions so will recurse until
fundamental type is found
:param data: data for which to find its type format
:returns: a string format identifier for the python data type
"""
if isinstance(data,np.ndarray) or isinstance(data,list):
return self.find_datatype_fmt(data[0])
else:
return self.type_to_fmt(data)
def dmap_record_to_bytes(self,record):
"""This method converts a dmap record to the byte format that is written to file.
Format is code,length of record,number of scalers,number of arrays, followed by
the scalers and then the arrays
:param record: a RawDmapRecord
"""
scalers = record.get_scalers()
arrays = record.get_arrays()
code = 65537
num_scalers = record.get_num_scalers()
num_arrays = record.get_num_arrays()
byte_arr = bytearray()
for sc in scalers:
byte_arr.extend(self.dmap_scaler_to_bytes(sc))
for ar in arrays:
byte_arr.extend(self.dmap_array_to_bytes(ar))
# + 16 for length,code,num scalers, and num arrays fields
length = len(byte_arr) + 16
#print(length)
code_bytes = struct.pack('i',code)
length_bytes = struct.pack('i',length)
num_scalers_bytes = struct.pack('i',num_scalers)
num_arrays_bytes = struct.pack('i',num_arrays)
self.dmap_bytearr.extend(code_bytes)
self.dmap_bytearr.extend(length_bytes)
self.dmap_bytearr.extend(num_scalers_bytes)
self.dmap_bytearr.extend(num_arrays_bytes)
self.dmap_bytearr.extend(byte_arr)
def dmap_scaler_to_bytes(self,scaler):
"""This method converts a RawDmapScaler to the byte format written out.
The bytes are written as a name, then type, then data
:param scaler: a RawDmapScaler
:returns: total bytes the scaler will take up
"""
name = "{0}\0".format(scaler.get_name())
struct_fmt = '{0}s'.format(len(name))
name_bytes = struct.pack(struct_fmt,name)
dmap_type_bytes = struct.pack('c',chr(scaler.get_type()))
data_type_fmt = scaler.get_datatype_fmt()
if data_type_fmt == 's':
data = "{0}\0".format(scaler.get_data())
struct_fmt = '{0}s'.format(len(data))
data_bytes = struct.pack(struct_fmt,data)
#data_bytes = scaler.get_data().encode('utf-8') + chr(0)
elif data_type_fmt == 'c':
data_bytes = chr(scaler.get_data())
else:
data_bytes = struct.pack(data_type_fmt,scaler.get_data())
total_bytes = name_bytes + dmap_type_bytes + data_bytes
return total_bytes
def dmap_array_to_bytes(self,array):
"""This method converts a RawDmapArray to the byte format to be written out.
The format is name,then type, number of dimensions, dimensions, array data.
:param array: a RawDmapArray
:returns: total bytes the array will take up
"""
name = "{0}\0".format(array.get_name())
struct_fmt = '{0}s'.format(len(name))
name_bytes = struct.pack(struct_fmt,name)
dmap_type_bytes = struct.pack('c',chr(array.get_type()))
data_type_fmt = array.get_datatype_fmt()
dimension_bytes = struct.pack('i',array.get_dimension())
arr_dimensions_bytes = bytes()
for dim in array.get_arr_dimensions():
arr_dimensions_bytes = arr_dimensions_bytes + struct.pack('i',dim)
data_bytes = array.get_data().tostring()
total_bytes = name_bytes + dmap_type_bytes + dimension_bytes + arr_dimensions_bytes + data_bytes
return total_bytes
def type_to_fmt(self,data):
"""Finds data types and converts them to a format specifier for struct or numpy
packing methods
:param data: data for which to find type
:returns: a string format identifier for the python data type
"""
if isinstance(data,int):
return 'i'
elif isinstance(data,str):
return 's'
elif isinstance(data,float):
return 'f'
elif isinstance(data,np.float32):
return 'f'
elif isinstance(data,np.float64):
return 'd'
elif isinstance(data,np.char):
return 'c'
elif isinstance(data,np.int8):
return 'c'
elif isinstance(data,np.int16):
return 'h'
elif isinstance(data,np.int32):
return 'i'
elif isinstance(data,np.int64):
return 'q'
elif isinstance(data,np.uint8):
return 'B'
elif isinstance(data,np.uint16):
return 'H'
elif isinstance(data,np.uint32):
return 'I'
elif isinstance(data,np.uint64):
return 'Q'
else:
return ''
def convert_fmt_to_dmap_type(self,fmt):
"""Converts a format specifier to a dmap type to be written as part of buffer
:param fmt: a string format identifier for the DMAP data type
:returns: DMAP type
"""
return {
'c' : CHAR,
'h' : SHORT,
'i' : INT,
'f' : FLOAT,
'd' : DOUBLE,
's' : STRING,
'q' : LONG,
'B' : UCHAR,
'H' : USHORT,
'I' : UINT,
'Q' : ULONG,
}.get(fmt,None)
def dicts_to_file(data_dicts,file_path,file_type=''):
"""This function abstracts the type overrides for the main SuperDARN
file types. These dictionaries write out the types to be compatible
with C DMAP reading
:param data_dicts: python dictionaries to write out
:param file_path: path for which to write the data to file
:param file_type: type of SuperDARN file with what the data is
"""
rawacf_types = {
'radar.revision.major' : 'c',
'radar.revision.minor' : 'c',
'origin.code' : 'c',
'origin.time' : 's',
'origin.command' : 's',
'cp' : 'h',
'stid' : 'h',
'time.yr' : 'h',
'time.mo' : 'h',
'time.dy' : 'h',
'time.hr' : 'h',
'time.mt' : 'h',
'time.sc' : 'h',
'time.us' : 'i',
'txpow' : 'h',
'nave' : 'h',
'atten' : 'h',
'lagfr' : 'h',
'smsep' : 'h',
'ercod' : 'h',
'stat.agc' : 'h',
'stat.lopwr' : 'h',
'noise.search' : 'f',
'noise.mean' : 'f',
'channel' : 'h',
'bmnum' : 'h',
'bmazm' : 'f',
'scan' : 'h',
'offset' : 'h',
'rxrise' : 'h',
'intt.sc' : 'h',
'intt.us' : 'i',
'txpl' : 'h',
'mpinc' : 'h',
'mppul' : 'h',
'mplgs' : 'h',
'nrang' : 'h',
'frang' : 'h',
'rsep' : 'h',
'xcf' : 'h',
'tfreq' : 'h',
'mxpwr' : 'i',
'lvmax' : 'i',
'rawacf.revision.major' : 'i',
'rawacf.revision.minor' : 'i',
'combf' : 's',
'thr' : 'f',
'ptab' : 'h',
'ltab' : 'h',
'slist' : 'h',
'pwr0' : 'f',
'acfd' : 'f',
'xcfd' : 'f',
}
mapfile_types = {
'start.year' : 'h',
'start.month' : 'h',
'start.day' : 'h',
'start.hour' : 'h',
'start.minute' : 'h',
'start.second' : 'd',
'end.year' : 'h',
'end.month' : 'h',
'end.day' : 'h',
'end.hour' : 'h',
'end.minute' : 'h',
'end.second' : 'd',
'map.major.revision' : 'h',
'map.minor.revision' : 'h',
'source' : 's',
'doping.level' : 'h',
'model.wt' : 'h',
'error.wt' : 'h',
'IMF.flag' : 'h',
'IMF.delay' : 'h',
'IMF.Bx' : 'd',
'IMF.By' : 'd',
'IMF.Bz' : 'd',
'model.angle' : 's',
'model.level' : 's',
'hemisphere' : 'h',
'fit.order' : 'h',
'latmin' : 'f',
'chi.sqr' : 'd',
'chi.sqr.dat' : 'd',
'rms.err' : 'd',
'lon.shft' : 'f',
'lat.shft' : 'f',
'mlt.start' : 'd',
'mlt.end' : 'd',
'mlt.av' : 'd',
'pot.drop' : 'd',
'pot.drop.err' : 'd',
'pot.max' : 'd',
'pot.max.err' : 'd',
'pot.min' : 'd',
'pot.min.err' : 'd',
'stid' : 'h',
'channel' : 'h',
'nvec' : 'h',
'freq' : 'f',
'major.revision' : 'h',
'minor.revision' : 'h',
'program.id' : 'h',
'noise.mean' : 'f',
'noise.sd' : 'f',
'gsct' : 'h',
'v.min' : 'f',
'v.max' : 'f',
'p.min' : 'f',
'p.max' : 'f',
'w.min' : 'f',
'w.max' : 'f',
've.min' : 'f',
've.max' : 'f',
'vector.mlat' : 'f',
'vector.mlon' : 'f',
'vector.kvect' : 'f',
'vector.stid' : 'h',
'vector.channel' : 'h',
'vector.index' : 'i',
'vector.vel.median' : 'f',
'vector.vel.sd' : 'f',
'N' : 'd',
'N+1' : 'd',
'N+2' : 'd',
'N+3' : 'd',
'model.mlat' : 'f',
'model.mlon' : 'f',
'model.kvect' : 'f',
'model.vel.median' : 'f',
'boundary.mlat' : 'f',
'boundary.mlon' : 'f',
}
fitacf_types = {
'radar.revision.major' : 'c',
'radar.revision.minor' : 'c',
'origin.code' : 'c',
'origin.time' : 's',
'origin.command' : 's',
'cp' : 'h',
'stid' : 'h',
'time.yr' : 'h',
'time.mo' : 'h',
'time.dy' : 'h',
'time.hr' : 'h',
'time.mt' : 'h',
'time.sc' : 'h',
'time.us' : 'i',
'txpow' : 'h',
'nave' : 'h',
'atten' : 'h',
'lagfr' : 'h',
'smsep' : 'h',
'ercod' : 'h',
'stat.agc' : 'h',
'stat.lopwr' : 'h',
'noise.search' : 'f',
'noise.mean' : 'f',
'channel' : 'h',
'bmnum' : 'h',
'bmazm' : 'f',
'scan' : 'h',
'offset' : 'h',
'rxrise' : 'h',
'intt.sc' : 'h',
'intt.us' : 'i',
'txpl' : 'h',
'mpinc' : 'h',
'mppul' : 'h',
'mplgs' : 'h',
'nrang' : 'h',
'frang' : 'h',
'rsep' : 'h',
'xcf' : 'h',
'tfreq' : 'h',
'mxpwr' : 'i',
'lvmax' : 'i',
'fitacf.revision.major' : 'i',
'fitacf.revision.minor' : 'i',
'combf' : 's',
'noise.sky' : 'f',
'noise.lag0' : 'f',
'noise.vel' : 'f',
'ptab' : 'h',
'ltab' : 'h',
'pwr0' : 'f',
'slist' : 'h',
'nlag' : 'h',
'qflg' : 'c',
'gflg' : 'c',
'p_l' : 'f',
'p_l_e' : 'f',
'p_s' : 'f',
'p_s_e' : 'f',
'v' : 'f',
'v_e' : 'f',
'w_l' : 'f',
'w_l_e' : 'f',
'w_s' : 'f',
'w_s_e' : 'f',
'sd_l' : 'f',
'sd_s' : 'f',
'sd_phi' : 'f',
'x_qflg' : 'c',
'x_gflg' : 'c',
'x_p_l' : 'f',
'x_p_l_e' : 'f',
'x_p_s' : 'f',
'x_p_s_e' : 'f',
'x_v' : 'f',
'x_v_e' : 'f',
'x_w_l' : 'f',
'x_w_l_e' : 'f',
'x_w_s' : 'f',
'x_w_s_e' : 'f',
'phi0' : 'f',
'phi0_e' : 'f',
'elv' : 'f',
'elv_low' : 'f',
'elv_high' : 'f',
'x_sd_l' : 'f',
'x_sd_s' : 'f',
'x_sd_phi' : 'f',
}
iqdat_types = {
'radar.revision.major' : 'c',
'radar.revision.minor' : 'c',
'origin.code' : 'c',
'origin.time' : 's',
'origin.command' : 's',
'cp' : 'h',
'stid' : 'h',
'time.yr' : 'h',
'time.mo' : 'h',
'time.dy' : 'h',
'time.hr' : 'h',
'time.mt' : 'h',
'time.sc' : 'h',
'time.us' : 'i',
'txpow' : 'h',
'nave' : 'h',
'atten' : 'h',
'lagfr' : 'h',
'smsep' : 'h',
'ercod' : 'h',
'stat.agc' : 'h',
'stat.lopwr' : 'h',
'noise.search' : 'f',
'noise.mean' : 'f',
'channel' : 'h',
'bmnum' : 'h',
'bmazm' : 'f',
'scan' : 'h',
'offset' : 'h',
'rxrise' : 'h',
'intt.sc' : 'h',
'intt.us' : 'i',
'txpl' : 'h',
'mpinc' : 'h',
'mppul' : 'h',
'mplgs' : 'h',
'nrang' : 'h',
'frang' : 'h',
'rsep' : 'h',
'xcf' : 'h',
'tfreq' : 'h',
'mxpwr' : 'i',
'lvmax' : 'i',
'iqdata.revision.major' : 'i',
'iqdata.revision.minor' : 'i',
'combf' : 's',
'seqnum' : 'i',
'chnnum' : 'i',
'smpnum' : 'i',
'skpnum' : 'i',
'ptab' : 'h',
'ltab' : 'h',
'tsc' : 'i',
'tus' : 'i',
'tatten' : 'h',
'tnoise' : 'f',
'toff' : 'i',
'tsze' : 'i',
'data' : 'h',
}
ud_types = {
'iqdat' : iqdat_types,
'fitacf' : fitacf_types,
'rawacf' : rawacf_types,
'map' : mapfile_types
}.get(file_type,None)
if ud_types is None:
raise ValueError("Incorrect or missing file type")
for dd in data_dicts:
for k,v in dd.iteritems():
if k not in ud_types:
message = "DICTS_TO_FILE: A supplied dictionary contains extra field {0}".format(k)
raise DmapDataError(message)
for k,v in ud_types.iteritems():
if k not in dd:
message = "DICTS_TO_FILE: Supplied dictionary is missing field {0}".format(k)
raise DmapDataError(message)
wr = RawDmapWrite(data_dicts,file_path,ud_types)
def parse_dmap_format_from_file(filepath,raw_dmap=False):
"""Creates a new dmap object from file and then formats the data results
into a nice list of dictionaries
:param filepath: file path to get DMAP data from
:param raw_dmap: a flag signalling to return the RawDmapRead object
instead of data dictionaries
:returns: list of data dictionaries
"""
dm = RawDmapRead(filepath)
if raw_dmap == True:
return dm
else:
records = dm.get_records()
data_list = [dmap_rec_to_dict(rec) for rec in records]
return data_list
def parse_dmap_format_from_stream(stream,raw_dmap=False):
"""Creates a new dmap object from a stream and then formats the data results
into a nice list of dictionaries
:param stream: buffer of raw bytes to convert
:param raw_dmap: a flag signalling to return the RawDmapRead object
instead of data dictionaries
:returns: list of data dictionaries
"""
dm = RawDmapRead(stream,stream=True)
if raw_dmap == True:
return dm
else:
records = dm.get_records()
data_list = [dmap_rec_to_dict(rec) for rec in records]
return data_list
def dmap_rec_to_dict(rec):
"""Converts the dmap record data to a easy to use dictionary
:param rec: a RawDmapRecord
:returns: a dictionary of all data contained in the record
"""
parsed_items = {}
scalers = rec.get_scalers()
arrays = rec.get_arrays()
merged_lists = scalers + arrays
record_dict = {ml.get_name():ml.get_data() for ml in merged_lists}
return record_dict
if __name__ == '__main__':
pass
dm = RawDmapRead('20101211.0047.24.cve.rawacf')
#records = parse_dmap_format_from_file('testfiles/20150831.0000.03.bks.rawacf')
#print(records[5])
#records = parse_dmap_format('20150831.0000.03.bks_corrupt.rawacf')
#wr = RawDmapWrite(records,"testing.acf")
#records = parse_dmap_format_from_file('testing.acf')
#wr = RawDmapWrite(records,"testing.acf")
#dicts_to_rawacf(records,'testing.acf')
#records = parse_dmap_format_from_file('testing.acf')
#print(records[0])
#gc.collect()
#records = parse_dmap_format_from_file('20131004.0401.00.rkn.fitacf')
# print(records[0])
# gc.collect()
# print(len(gc.get_objects()))
#while(True):
#time.sleep(1)
#records = parse_dmap_format_from_file('20150831.0000.03.bks.rawacf')
|
Secheron/compassion-modules
|
refs/heads/master
|
sponsorship_tracking/model/__init__.py
|
2
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 Compassion CH (http://www.compassion.ch)
# Releasing children from poverty in Jesus' name
# @author: David Coninckx <david@coninckx.com>
#
# The licence is in the file __openerp__.py
#
##############################################################################
from . import contracts
from . import project_compassion
|
marcel-dancak/QGIS
|
refs/heads/master
|
python/plugins/processing/tests/Grass7AlgorithmsImageryTest.py
|
12
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
Grass7AlgorithmsImageryTest.py
------------------------------
Date : May 2016
Copyright : (C) 2016 by Médéric Ribreux
Email : mederic dot ribreux at medspx dot fr
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Médéric Ribreux'
__date__ = 'May 2016'
__copyright__ = '(C) 2016, Médéric Ribreux'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = ':%H$'
import AlgorithmsTestBase
import nose2
import shutil
from qgis.testing import (
start_app,
unittest
)
from processing.algs.grass7.Grass7Utils import Grass7Utils
class TestGrass7AlgorithmsImageryTest(unittest.TestCase, AlgorithmsTestBase.AlgorithmsTest):
@classmethod
def setUpClass(cls):
start_app()
from processing.core.Processing import Processing
Processing.initialize()
cls.cleanup_paths = []
assert Grass7Utils.installedVersion()
@classmethod
def tearDownClass(cls):
from processing.core.Processing import Processing
Processing.deinitialize()
for path in cls.cleanup_paths:
shutil.rmtree(path)
def test_definition_file(self):
return 'grass7_algorithms_imagery_tests.yaml'
if __name__ == '__main__':
nose2.main()
|
EmreAtes/spack
|
refs/heads/develop
|
var/spack/repos/builtin/packages/argp-standalone/package.py
|
5
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class ArgpStandalone(AutotoolsPackage):
"""Standalone version of the argp interface from glibc for parsing
unix-style arguments. """
homepage = "https://www.lysator.liu.se/~nisse/misc"
url = "https://www.lysator.liu.se/~nisse/misc/argp-standalone-1.3.tar.gz"
version('1.3', '720704bac078d067111b32444e24ba69')
# Homebrew (https://github.com/Homebrew/homebrew-core) patches
# argp-standalone to work on Darwin; the patchfile below was taken
# from
# https://raw.githubusercontent.com/Homebrew/formula-patches/b5f0ad3/argp-standalone/patch-argp-fmtstream.h
patch('argp-fmtstream.h.patch', 0, 'platform=darwin', '.')
def install(self, spec, prefix):
make('install')
make('check')
mkdirp(self.spec.prefix.lib)
install('libargp.a', join_path(self.spec.prefix.lib, 'libargp.a'))
mkdirp(self.spec.prefix.include)
install('argp.h', join_path(self.spec.prefix.include, 'argp.h'))
|
johnttaylor/Outcast
|
refs/heads/master
|
templates/top/tools/test.py
|
1
|
#!/usr/bin/env python3
"""
Package specific script that 'tests' the package content as part of the
publish process.
Usage: test <pkgroot>
Returns: If all of the test(s) are succesful, then the script returns '0'; else
a non-zero value is returned.
"""
import sys
import os
import subprocess
#------------------------------------------------------------------------------
#
# This section can be deleted if NOT using the Outcast Chuck tool.
# This method assumes that the file top/publish_test.lst exists and it
# contains the list of test(s) to execute.
#
def _use_outcast():
# Begin
if ( len(sys.argv) < 2 ):
exit( "ERROR: Missing <pkgroot> command argument" )
# Build arguments for the bob-the-builder
testlist = os.path.join( sys.argv[1], "top", "publish_test.lst" )
path = os.path.join( sys.argv[1], "tests" )
cmd = "chuck.py -v --path {} --file {}".format(path, testlist)
# Invoke the build
print("EXECUTING: " + cmd)
p = subprocess.Popen( cmd, shell=True )
r = p.communicate()
if ( p.returncode != 0 ):
exit( "ERROR: Test(s) failed" )
#------------------------------------------------------------------------------
# Uncomment out the following line if using the Outcast Chuck test tool
#_use_outcast()
# Delete this line if using the Outcast default namespace algorihtm
print("Testing....")
|
rizumu/django
|
refs/heads/master
|
tests/gis_tests/gis_migrations/test_commands.py
|
276
|
from __future__ import unicode_literals
from django.core.management import call_command
from django.db import connection
from django.test import TransactionTestCase, skipUnlessDBFeature
@skipUnlessDBFeature("gis_enabled")
class MigrateTests(TransactionTestCase):
"""
Tests running the migrate command in Geodjango.
"""
available_apps = ["gis_tests.gis_migrations"]
def get_table_description(self, table):
with connection.cursor() as cursor:
return connection.introspection.get_table_description(cursor, table)
def assertTableExists(self, table):
with connection.cursor() as cursor:
self.assertIn(table, connection.introspection.table_names(cursor))
def assertTableNotExists(self, table):
with connection.cursor() as cursor:
self.assertNotIn(table, connection.introspection.table_names(cursor))
def test_migrate_gis(self):
"""
Tests basic usage of the migrate command when a model uses Geodjango
fields. Regression test for ticket #22001:
https://code.djangoproject.com/ticket/22001
It's also used to showcase an error in migrations where spatialite is
enabled and geo tables are renamed resulting in unique constraint
failure on geometry_columns. Regression for ticket #23030:
https://code.djangoproject.com/ticket/23030
"""
# Make sure the right tables exist
self.assertTableExists("gis_migrations_neighborhood")
self.assertTableExists("gis_migrations_household")
self.assertTableExists("gis_migrations_family")
if connection.features.supports_raster:
self.assertTableExists("gis_migrations_heatmap")
# Unmigrate everything
call_command("migrate", "gis_migrations", "zero", verbosity=0)
# Make sure it's all gone
self.assertTableNotExists("gis_migrations_neighborhood")
self.assertTableNotExists("gis_migrations_household")
self.assertTableNotExists("gis_migrations_family")
if connection.features.supports_raster:
self.assertTableNotExists("gis_migrations_heatmap")
# Even geometry columns metadata
try:
GeoColumn = connection.ops.geometry_columns()
except NotImplementedError:
# Not all GIS backends have geometry columns model
pass
else:
self.assertEqual(
GeoColumn.objects.filter(
**{'%s__in' % GeoColumn.table_name_col(): ["gis_neighborhood", "gis_household"]}
).count(),
0)
# Revert the "unmigration"
call_command("migrate", "gis_migrations", verbosity=0)
|
Filechaser/sickbeard_mp4_automator
|
refs/heads/master
|
autoprocess/autoProcessTV.py
|
3
|
# Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
import sys
try:
from urllib.request import FancyURLopener
from urllib.parse import urlencode
except ImportError:
from urllib import FancyURLopener
from urllib import urlencode
import os.path
import logging
class AuthURLOpener(FancyURLopener):
def __init__(self, user, pw):
self.username = user
self.password = pw
self.numTries = 0
FancyURLopener.__init__(self)
def prompt_user_passwd(self, host, realm):
if self.numTries == 0:
self.numTries = 1
return (self.username, self.password)
else:
return ('', '')
def openit(self, url):
self.numTries = 0
return FancyURLopener.open(self, url)
def processEpisode(dirName, settings, nzbName=None, logger=None):
# Setup logging
if logger:
log = logger
else:
log = logging.getLogger(__name__)
host = settings.Sickbeard['host']
port = settings.Sickbeard['port']
username = settings.Sickbeard['user']
password = settings.Sickbeard['pass']
try:
ssl = int(settings.Sickbeard['ssl'])
except:
ssl = 0
try:
web_root = settings.Sickbeard['web_root']
except:
web_root = ""
params = {}
params['quiet'] = 1
params['dir'] = dirName
if nzbName is not None:
params['nzbName'] = nzbName
myOpener = AuthURLOpener(username, password)
if ssl:
protocol = "https://"
else:
protocol = "http://"
url = protocol + host + ":" + port + web_root + "/home/postprocess/processEpisode?" + urlencode(params)
log.debug('Host: %s.' % host)
log.debug('Port: %s.' % port)
log.debug('Username: %s.' % username)
log.debug('Password: %s.' % password)
log.debug('Protocol: %s.' % protocol)
log.debug('Web Root: %s.' % web_root)
log.debug('URL: %s.' % url)
log.info("Opening URL: %s." % url)
try:
urlObj = myOpener.openit(url)
except IOError:
log.exception("Unable to open URL")
sys.exit(1)
result = urlObj.readlines()
lastline = None
for line in result:
if line:
log.debug(line.strip())
lastline = line.strip()
if lastline:
log.info(lastline)
|
morreene/tradenews
|
refs/heads/master
|
venv/Lib/site-packages/cffi/backend_ctypes.py
|
14
|
import ctypes, ctypes.util, operator, sys
from . import model
if sys.version_info < (3,):
bytechr = chr
else:
unicode = str
long = int
xrange = range
bytechr = lambda num: bytes([num])
class CTypesType(type):
pass
class CTypesData(object):
__metaclass__ = CTypesType
__slots__ = ['__weakref__']
__name__ = '<cdata>'
def __init__(self, *args):
raise TypeError("cannot instantiate %r" % (self.__class__,))
@classmethod
def _newp(cls, init):
raise TypeError("expected a pointer or array ctype, got '%s'"
% (cls._get_c_name(),))
@staticmethod
def _to_ctypes(value):
raise TypeError
@classmethod
def _arg_to_ctypes(cls, *value):
try:
ctype = cls._ctype
except AttributeError:
raise TypeError("cannot create an instance of %r" % (cls,))
if value:
res = cls._to_ctypes(*value)
if not isinstance(res, ctype):
res = cls._ctype(res)
else:
res = cls._ctype()
return res
@classmethod
def _create_ctype_obj(cls, init):
if init is None:
return cls._arg_to_ctypes()
else:
return cls._arg_to_ctypes(init)
@staticmethod
def _from_ctypes(ctypes_value):
raise TypeError
@classmethod
def _get_c_name(cls, replace_with=''):
return cls._reftypename.replace(' &', replace_with)
@classmethod
def _fix_class(cls):
cls.__name__ = 'CData<%s>' % (cls._get_c_name(),)
cls.__qualname__ = 'CData<%s>' % (cls._get_c_name(),)
cls.__module__ = 'ffi'
def _get_own_repr(self):
raise NotImplementedError
def _addr_repr(self, address):
if address == 0:
return 'NULL'
else:
if address < 0:
address += 1 << (8*ctypes.sizeof(ctypes.c_void_p))
return '0x%x' % address
def __repr__(self, c_name=None):
own = self._get_own_repr()
return '<cdata %r %s>' % (c_name or self._get_c_name(), own)
def _convert_to_address(self, BClass):
if BClass is None:
raise TypeError("cannot convert %r to an address" % (
self._get_c_name(),))
else:
raise TypeError("cannot convert %r to %r" % (
self._get_c_name(), BClass._get_c_name()))
@classmethod
def _get_size(cls):
return ctypes.sizeof(cls._ctype)
def _get_size_of_instance(self):
return ctypes.sizeof(self._ctype)
@classmethod
def _cast_from(cls, source):
raise TypeError("cannot cast to %r" % (cls._get_c_name(),))
def _cast_to_integer(self):
return self._convert_to_address(None)
@classmethod
def _alignment(cls):
return ctypes.alignment(cls._ctype)
def __iter__(self):
raise TypeError("cdata %r does not support iteration" % (
self._get_c_name()),)
def _make_cmp(name):
cmpfunc = getattr(operator, name)
def cmp(self, other):
v_is_ptr = not isinstance(self, CTypesGenericPrimitive)
w_is_ptr = (isinstance(other, CTypesData) and
not isinstance(other, CTypesGenericPrimitive))
if v_is_ptr and w_is_ptr:
return cmpfunc(self._convert_to_address(None),
other._convert_to_address(None))
elif v_is_ptr or w_is_ptr:
return NotImplemented
else:
if isinstance(self, CTypesGenericPrimitive):
self = self._value
if isinstance(other, CTypesGenericPrimitive):
other = other._value
return cmpfunc(self, other)
cmp.func_name = name
return cmp
__eq__ = _make_cmp('__eq__')
__ne__ = _make_cmp('__ne__')
__lt__ = _make_cmp('__lt__')
__le__ = _make_cmp('__le__')
__gt__ = _make_cmp('__gt__')
__ge__ = _make_cmp('__ge__')
def __hash__(self):
return hash(self._convert_to_address(None))
def _to_string(self, maxlen):
raise TypeError("string(): %r" % (self,))
class CTypesGenericPrimitive(CTypesData):
__slots__ = []
def __hash__(self):
return hash(self._value)
def _get_own_repr(self):
return repr(self._from_ctypes(self._value))
class CTypesGenericArray(CTypesData):
__slots__ = []
@classmethod
def _newp(cls, init):
return cls(init)
def __iter__(self):
for i in xrange(len(self)):
yield self[i]
def _get_own_repr(self):
return self._addr_repr(ctypes.addressof(self._blob))
class CTypesGenericPtr(CTypesData):
__slots__ = ['_address', '_as_ctype_ptr']
_automatic_casts = False
kind = "pointer"
@classmethod
def _newp(cls, init):
return cls(init)
@classmethod
def _cast_from(cls, source):
if source is None:
address = 0
elif isinstance(source, CTypesData):
address = source._cast_to_integer()
elif isinstance(source, (int, long)):
address = source
else:
raise TypeError("bad type for cast to %r: %r" %
(cls, type(source).__name__))
return cls._new_pointer_at(address)
@classmethod
def _new_pointer_at(cls, address):
self = cls.__new__(cls)
self._address = address
self._as_ctype_ptr = ctypes.cast(address, cls._ctype)
return self
def _get_own_repr(self):
try:
return self._addr_repr(self._address)
except AttributeError:
return '???'
def _cast_to_integer(self):
return self._address
def __nonzero__(self):
return bool(self._address)
__bool__ = __nonzero__
@classmethod
def _to_ctypes(cls, value):
if not isinstance(value, CTypesData):
raise TypeError("unexpected %s object" % type(value).__name__)
address = value._convert_to_address(cls)
return ctypes.cast(address, cls._ctype)
@classmethod
def _from_ctypes(cls, ctypes_ptr):
address = ctypes.cast(ctypes_ptr, ctypes.c_void_p).value or 0
return cls._new_pointer_at(address)
@classmethod
def _initialize(cls, ctypes_ptr, value):
if value:
ctypes_ptr.contents = cls._to_ctypes(value).contents
def _convert_to_address(self, BClass):
if (BClass in (self.__class__, None) or BClass._automatic_casts
or self._automatic_casts):
return self._address
else:
return CTypesData._convert_to_address(self, BClass)
class CTypesBaseStructOrUnion(CTypesData):
__slots__ = ['_blob']
@classmethod
def _create_ctype_obj(cls, init):
# may be overridden
raise TypeError("cannot instantiate opaque type %s" % (cls,))
def _get_own_repr(self):
return self._addr_repr(ctypes.addressof(self._blob))
@classmethod
def _offsetof(cls, fieldname):
return getattr(cls._ctype, fieldname).offset
def _convert_to_address(self, BClass):
if getattr(BClass, '_BItem', None) is self.__class__:
return ctypes.addressof(self._blob)
else:
return CTypesData._convert_to_address(self, BClass)
@classmethod
def _from_ctypes(cls, ctypes_struct_or_union):
self = cls.__new__(cls)
self._blob = ctypes_struct_or_union
return self
@classmethod
def _to_ctypes(cls, value):
return value._blob
def __repr__(self, c_name=None):
return CTypesData.__repr__(self, c_name or self._get_c_name(' &'))
class CTypesBackend(object):
PRIMITIVE_TYPES = {
'char': ctypes.c_char,
'short': ctypes.c_short,
'int': ctypes.c_int,
'long': ctypes.c_long,
'long long': ctypes.c_longlong,
'signed char': ctypes.c_byte,
'unsigned char': ctypes.c_ubyte,
'unsigned short': ctypes.c_ushort,
'unsigned int': ctypes.c_uint,
'unsigned long': ctypes.c_ulong,
'unsigned long long': ctypes.c_ulonglong,
'float': ctypes.c_float,
'double': ctypes.c_double,
'_Bool': ctypes.c_bool,
}
for _name in ['unsigned long long', 'unsigned long',
'unsigned int', 'unsigned short', 'unsigned char']:
_size = ctypes.sizeof(PRIMITIVE_TYPES[_name])
PRIMITIVE_TYPES['uint%d_t' % (8*_size)] = PRIMITIVE_TYPES[_name]
if _size == ctypes.sizeof(ctypes.c_void_p):
PRIMITIVE_TYPES['uintptr_t'] = PRIMITIVE_TYPES[_name]
if _size == ctypes.sizeof(ctypes.c_size_t):
PRIMITIVE_TYPES['size_t'] = PRIMITIVE_TYPES[_name]
for _name in ['long long', 'long', 'int', 'short', 'signed char']:
_size = ctypes.sizeof(PRIMITIVE_TYPES[_name])
PRIMITIVE_TYPES['int%d_t' % (8*_size)] = PRIMITIVE_TYPES[_name]
if _size == ctypes.sizeof(ctypes.c_void_p):
PRIMITIVE_TYPES['intptr_t'] = PRIMITIVE_TYPES[_name]
PRIMITIVE_TYPES['ptrdiff_t'] = PRIMITIVE_TYPES[_name]
if _size == ctypes.sizeof(ctypes.c_size_t):
PRIMITIVE_TYPES['ssize_t'] = PRIMITIVE_TYPES[_name]
def __init__(self):
self.RTLD_LAZY = 0 # not supported anyway by ctypes
self.RTLD_NOW = 0
self.RTLD_GLOBAL = ctypes.RTLD_GLOBAL
self.RTLD_LOCAL = ctypes.RTLD_LOCAL
def set_ffi(self, ffi):
self.ffi = ffi
def _get_types(self):
return CTypesData, CTypesType
def load_library(self, path, flags=0):
cdll = ctypes.CDLL(path, flags)
return CTypesLibrary(self, cdll)
def new_void_type(self):
class CTypesVoid(CTypesData):
__slots__ = []
_reftypename = 'void &'
@staticmethod
def _from_ctypes(novalue):
return None
@staticmethod
def _to_ctypes(novalue):
if novalue is not None:
raise TypeError("None expected, got %s object" %
(type(novalue).__name__,))
return None
CTypesVoid._fix_class()
return CTypesVoid
def new_primitive_type(self, name):
if name == 'wchar_t':
raise NotImplementedError(name)
ctype = self.PRIMITIVE_TYPES[name]
if name == 'char':
kind = 'char'
elif name in ('float', 'double'):
kind = 'float'
else:
if name in ('signed char', 'unsigned char'):
kind = 'byte'
elif name == '_Bool':
kind = 'bool'
else:
kind = 'int'
is_signed = (ctype(-1).value == -1)
#
def _cast_source_to_int(source):
if isinstance(source, (int, long, float)):
source = int(source)
elif isinstance(source, CTypesData):
source = source._cast_to_integer()
elif isinstance(source, bytes):
source = ord(source)
elif source is None:
source = 0
else:
raise TypeError("bad type for cast to %r: %r" %
(CTypesPrimitive, type(source).__name__))
return source
#
kind1 = kind
class CTypesPrimitive(CTypesGenericPrimitive):
__slots__ = ['_value']
_ctype = ctype
_reftypename = '%s &' % name
kind = kind1
def __init__(self, value):
self._value = value
@staticmethod
def _create_ctype_obj(init):
if init is None:
return ctype()
return ctype(CTypesPrimitive._to_ctypes(init))
if kind == 'int' or kind == 'byte':
@classmethod
def _cast_from(cls, source):
source = _cast_source_to_int(source)
source = ctype(source).value # cast within range
return cls(source)
def __int__(self):
return self._value
if kind == 'bool':
@classmethod
def _cast_from(cls, source):
if not isinstance(source, (int, long, float)):
source = _cast_source_to_int(source)
return cls(bool(source))
def __int__(self):
return self._value
if kind == 'char':
@classmethod
def _cast_from(cls, source):
source = _cast_source_to_int(source)
source = bytechr(source & 0xFF)
return cls(source)
def __int__(self):
return ord(self._value)
if kind == 'float':
@classmethod
def _cast_from(cls, source):
if isinstance(source, float):
pass
elif isinstance(source, CTypesGenericPrimitive):
if hasattr(source, '__float__'):
source = float(source)
else:
source = int(source)
else:
source = _cast_source_to_int(source)
source = ctype(source).value # fix precision
return cls(source)
def __int__(self):
return int(self._value)
def __float__(self):
return self._value
_cast_to_integer = __int__
if kind == 'int' or kind == 'byte' or kind == 'bool':
@staticmethod
def _to_ctypes(x):
if not isinstance(x, (int, long)):
if isinstance(x, CTypesData):
x = int(x)
else:
raise TypeError("integer expected, got %s" %
type(x).__name__)
if ctype(x).value != x:
if not is_signed and x < 0:
raise OverflowError("%s: negative integer" % name)
else:
raise OverflowError("%s: integer out of bounds"
% name)
return x
if kind == 'char':
@staticmethod
def _to_ctypes(x):
if isinstance(x, bytes) and len(x) == 1:
return x
if isinstance(x, CTypesPrimitive): # <CData <char>>
return x._value
raise TypeError("character expected, got %s" %
type(x).__name__)
def __nonzero__(self):
return ord(self._value) != 0
else:
def __nonzero__(self):
return self._value != 0
__bool__ = __nonzero__
if kind == 'float':
@staticmethod
def _to_ctypes(x):
if not isinstance(x, (int, long, float, CTypesData)):
raise TypeError("float expected, got %s" %
type(x).__name__)
return ctype(x).value
@staticmethod
def _from_ctypes(value):
return getattr(value, 'value', value)
@staticmethod
def _initialize(blob, init):
blob.value = CTypesPrimitive._to_ctypes(init)
if kind == 'char':
def _to_string(self, maxlen):
return self._value
if kind == 'byte':
def _to_string(self, maxlen):
return chr(self._value & 0xff)
#
CTypesPrimitive._fix_class()
return CTypesPrimitive
def new_pointer_type(self, BItem):
getbtype = self.ffi._get_cached_btype
if BItem is getbtype(model.PrimitiveType('char')):
kind = 'charp'
elif BItem in (getbtype(model.PrimitiveType('signed char')),
getbtype(model.PrimitiveType('unsigned char'))):
kind = 'bytep'
elif BItem is getbtype(model.void_type):
kind = 'voidp'
else:
kind = 'generic'
#
class CTypesPtr(CTypesGenericPtr):
__slots__ = ['_own']
if kind == 'charp':
__slots__ += ['__as_strbuf']
_BItem = BItem
if hasattr(BItem, '_ctype'):
_ctype = ctypes.POINTER(BItem._ctype)
_bitem_size = ctypes.sizeof(BItem._ctype)
else:
_ctype = ctypes.c_void_p
if issubclass(BItem, CTypesGenericArray):
_reftypename = BItem._get_c_name('(* &)')
else:
_reftypename = BItem._get_c_name(' * &')
def __init__(self, init):
ctypeobj = BItem._create_ctype_obj(init)
if kind == 'charp':
self.__as_strbuf = ctypes.create_string_buffer(
ctypeobj.value + b'\x00')
self._as_ctype_ptr = ctypes.cast(
self.__as_strbuf, self._ctype)
else:
self._as_ctype_ptr = ctypes.pointer(ctypeobj)
self._address = ctypes.cast(self._as_ctype_ptr,
ctypes.c_void_p).value
self._own = True
def __add__(self, other):
if isinstance(other, (int, long)):
return self._new_pointer_at(self._address +
other * self._bitem_size)
else:
return NotImplemented
def __sub__(self, other):
if isinstance(other, (int, long)):
return self._new_pointer_at(self._address -
other * self._bitem_size)
elif type(self) is type(other):
return (self._address - other._address) // self._bitem_size
else:
return NotImplemented
def __getitem__(self, index):
if getattr(self, '_own', False) and index != 0:
raise IndexError
return BItem._from_ctypes(self._as_ctype_ptr[index])
def __setitem__(self, index, value):
self._as_ctype_ptr[index] = BItem._to_ctypes(value)
if kind == 'charp' or kind == 'voidp':
@classmethod
def _arg_to_ctypes(cls, *value):
if value and isinstance(value[0], bytes):
return ctypes.c_char_p(value[0])
else:
return super(CTypesPtr, cls)._arg_to_ctypes(*value)
if kind == 'charp' or kind == 'bytep':
def _to_string(self, maxlen):
if maxlen < 0:
maxlen = sys.maxsize
p = ctypes.cast(self._as_ctype_ptr,
ctypes.POINTER(ctypes.c_char))
n = 0
while n < maxlen and p[n] != b'\x00':
n += 1
return b''.join([p[i] for i in range(n)])
def _get_own_repr(self):
if getattr(self, '_own', False):
return 'owning %d bytes' % (
ctypes.sizeof(self._as_ctype_ptr.contents),)
return super(CTypesPtr, self)._get_own_repr()
#
if (BItem is self.ffi._get_cached_btype(model.void_type) or
BItem is self.ffi._get_cached_btype(model.PrimitiveType('char'))):
CTypesPtr._automatic_casts = True
#
CTypesPtr._fix_class()
return CTypesPtr
def new_array_type(self, CTypesPtr, length):
if length is None:
brackets = ' &[]'
else:
brackets = ' &[%d]' % length
BItem = CTypesPtr._BItem
getbtype = self.ffi._get_cached_btype
if BItem is getbtype(model.PrimitiveType('char')):
kind = 'char'
elif BItem in (getbtype(model.PrimitiveType('signed char')),
getbtype(model.PrimitiveType('unsigned char'))):
kind = 'byte'
else:
kind = 'generic'
#
class CTypesArray(CTypesGenericArray):
__slots__ = ['_blob', '_own']
if length is not None:
_ctype = BItem._ctype * length
else:
__slots__.append('_ctype')
_reftypename = BItem._get_c_name(brackets)
_declared_length = length
_CTPtr = CTypesPtr
def __init__(self, init):
if length is None:
if isinstance(init, (int, long)):
len1 = init
init = None
elif kind == 'char' and isinstance(init, bytes):
len1 = len(init) + 1 # extra null
else:
init = tuple(init)
len1 = len(init)
self._ctype = BItem._ctype * len1
self._blob = self._ctype()
self._own = True
if init is not None:
self._initialize(self._blob, init)
@staticmethod
def _initialize(blob, init):
if isinstance(init, bytes):
init = [init[i:i+1] for i in range(len(init))]
else:
init = tuple(init)
if len(init) > len(blob):
raise IndexError("too many initializers")
addr = ctypes.cast(blob, ctypes.c_void_p).value
PTR = ctypes.POINTER(BItem._ctype)
itemsize = ctypes.sizeof(BItem._ctype)
for i, value in enumerate(init):
p = ctypes.cast(addr + i * itemsize, PTR)
BItem._initialize(p.contents, value)
def __len__(self):
return len(self._blob)
def __getitem__(self, index):
if not (0 <= index < len(self._blob)):
raise IndexError
return BItem._from_ctypes(self._blob[index])
def __setitem__(self, index, value):
if not (0 <= index < len(self._blob)):
raise IndexError
self._blob[index] = BItem._to_ctypes(value)
if kind == 'char' or kind == 'byte':
def _to_string(self, maxlen):
if maxlen < 0:
maxlen = len(self._blob)
p = ctypes.cast(self._blob,
ctypes.POINTER(ctypes.c_char))
n = 0
while n < maxlen and p[n] != b'\x00':
n += 1
return b''.join([p[i] for i in range(n)])
def _get_own_repr(self):
if getattr(self, '_own', False):
return 'owning %d bytes' % (ctypes.sizeof(self._blob),)
return super(CTypesArray, self)._get_own_repr()
def _convert_to_address(self, BClass):
if BClass in (CTypesPtr, None) or BClass._automatic_casts:
return ctypes.addressof(self._blob)
else:
return CTypesData._convert_to_address(self, BClass)
@staticmethod
def _from_ctypes(ctypes_array):
self = CTypesArray.__new__(CTypesArray)
self._blob = ctypes_array
return self
@staticmethod
def _arg_to_ctypes(value):
return CTypesPtr._arg_to_ctypes(value)
def __add__(self, other):
if isinstance(other, (int, long)):
return CTypesPtr._new_pointer_at(
ctypes.addressof(self._blob) +
other * ctypes.sizeof(BItem._ctype))
else:
return NotImplemented
@classmethod
def _cast_from(cls, source):
raise NotImplementedError("casting to %r" % (
cls._get_c_name(),))
#
CTypesArray._fix_class()
return CTypesArray
def _new_struct_or_union(self, kind, name, base_ctypes_class):
#
class struct_or_union(base_ctypes_class):
pass
struct_or_union.__name__ = '%s_%s' % (kind, name)
kind1 = kind
#
class CTypesStructOrUnion(CTypesBaseStructOrUnion):
__slots__ = ['_blob']
_ctype = struct_or_union
_reftypename = '%s &' % (name,)
_kind = kind = kind1
#
CTypesStructOrUnion._fix_class()
return CTypesStructOrUnion
def new_struct_type(self, name):
return self._new_struct_or_union('struct', name, ctypes.Structure)
def new_union_type(self, name):
return self._new_struct_or_union('union', name, ctypes.Union)
def complete_struct_or_union(self, CTypesStructOrUnion, fields, tp,
totalsize=-1, totalalignment=-1, sflags=0):
if totalsize >= 0 or totalalignment >= 0:
raise NotImplementedError("the ctypes backend of CFFI does not support "
"structures completed by verify(); please "
"compile and install the _cffi_backend module.")
struct_or_union = CTypesStructOrUnion._ctype
fnames = [fname for (fname, BField, bitsize) in fields]
btypes = [BField for (fname, BField, bitsize) in fields]
bitfields = [bitsize for (fname, BField, bitsize) in fields]
#
bfield_types = {}
cfields = []
for (fname, BField, bitsize) in fields:
if bitsize < 0:
cfields.append((fname, BField._ctype))
bfield_types[fname] = BField
else:
cfields.append((fname, BField._ctype, bitsize))
bfield_types[fname] = Ellipsis
if sflags & 8:
struct_or_union._pack_ = 1
struct_or_union._fields_ = cfields
CTypesStructOrUnion._bfield_types = bfield_types
#
@staticmethod
def _create_ctype_obj(init):
result = struct_or_union()
if init is not None:
initialize(result, init)
return result
CTypesStructOrUnion._create_ctype_obj = _create_ctype_obj
#
def initialize(blob, init):
if is_union:
if len(init) > 1:
raise ValueError("union initializer: %d items given, but "
"only one supported (use a dict if needed)"
% (len(init),))
if not isinstance(init, dict):
if isinstance(init, (bytes, unicode)):
raise TypeError("union initializer: got a str")
init = tuple(init)
if len(init) > len(fnames):
raise ValueError("too many values for %s initializer" %
CTypesStructOrUnion._get_c_name())
init = dict(zip(fnames, init))
addr = ctypes.addressof(blob)
for fname, value in init.items():
BField, bitsize = name2fieldtype[fname]
assert bitsize < 0, \
"not implemented: initializer with bit fields"
offset = CTypesStructOrUnion._offsetof(fname)
PTR = ctypes.POINTER(BField._ctype)
p = ctypes.cast(addr + offset, PTR)
BField._initialize(p.contents, value)
is_union = CTypesStructOrUnion._kind == 'union'
name2fieldtype = dict(zip(fnames, zip(btypes, bitfields)))
#
for fname, BField, bitsize in fields:
if fname == '':
raise NotImplementedError("nested anonymous structs/unions")
if hasattr(CTypesStructOrUnion, fname):
raise ValueError("the field name %r conflicts in "
"the ctypes backend" % fname)
if bitsize < 0:
def getter(self, fname=fname, BField=BField,
offset=CTypesStructOrUnion._offsetof(fname),
PTR=ctypes.POINTER(BField._ctype)):
addr = ctypes.addressof(self._blob)
p = ctypes.cast(addr + offset, PTR)
return BField._from_ctypes(p.contents)
def setter(self, value, fname=fname, BField=BField):
setattr(self._blob, fname, BField._to_ctypes(value))
#
if issubclass(BField, CTypesGenericArray):
setter = None
if BField._declared_length == 0:
def getter(self, fname=fname, BFieldPtr=BField._CTPtr,
offset=CTypesStructOrUnion._offsetof(fname),
PTR=ctypes.POINTER(BField._ctype)):
addr = ctypes.addressof(self._blob)
p = ctypes.cast(addr + offset, PTR)
return BFieldPtr._from_ctypes(p)
#
else:
def getter(self, fname=fname, BField=BField):
return BField._from_ctypes(getattr(self._blob, fname))
def setter(self, value, fname=fname, BField=BField):
# xxx obscure workaround
value = BField._to_ctypes(value)
oldvalue = getattr(self._blob, fname)
setattr(self._blob, fname, value)
if value != getattr(self._blob, fname):
setattr(self._blob, fname, oldvalue)
raise OverflowError("value too large for bitfield")
setattr(CTypesStructOrUnion, fname, property(getter, setter))
#
CTypesPtr = self.ffi._get_cached_btype(model.PointerType(tp))
for fname in fnames:
if hasattr(CTypesPtr, fname):
raise ValueError("the field name %r conflicts in "
"the ctypes backend" % fname)
def getter(self, fname=fname):
return getattr(self[0], fname)
def setter(self, value, fname=fname):
setattr(self[0], fname, value)
setattr(CTypesPtr, fname, property(getter, setter))
def new_function_type(self, BArgs, BResult, has_varargs):
nameargs = [BArg._get_c_name() for BArg in BArgs]
if has_varargs:
nameargs.append('...')
nameargs = ', '.join(nameargs)
#
class CTypesFunctionPtr(CTypesGenericPtr):
__slots__ = ['_own_callback', '_name']
_ctype = ctypes.CFUNCTYPE(getattr(BResult, '_ctype', None),
*[BArg._ctype for BArg in BArgs],
use_errno=True)
_reftypename = BResult._get_c_name('(* &)(%s)' % (nameargs,))
def __init__(self, init, error=None):
# create a callback to the Python callable init()
import traceback
assert not has_varargs, "varargs not supported for callbacks"
if getattr(BResult, '_ctype', None) is not None:
error = BResult._from_ctypes(
BResult._create_ctype_obj(error))
else:
error = None
def callback(*args):
args2 = []
for arg, BArg in zip(args, BArgs):
args2.append(BArg._from_ctypes(arg))
try:
res2 = init(*args2)
res2 = BResult._to_ctypes(res2)
except:
traceback.print_exc()
res2 = error
if issubclass(BResult, CTypesGenericPtr):
if res2:
res2 = ctypes.cast(res2, ctypes.c_void_p).value
# .value: http://bugs.python.org/issue1574593
else:
res2 = None
#print repr(res2)
return res2
if issubclass(BResult, CTypesGenericPtr):
# The only pointers callbacks can return are void*s:
# http://bugs.python.org/issue5710
callback_ctype = ctypes.CFUNCTYPE(
ctypes.c_void_p,
*[BArg._ctype for BArg in BArgs],
use_errno=True)
else:
callback_ctype = CTypesFunctionPtr._ctype
self._as_ctype_ptr = callback_ctype(callback)
self._address = ctypes.cast(self._as_ctype_ptr,
ctypes.c_void_p).value
self._own_callback = init
@staticmethod
def _initialize(ctypes_ptr, value):
if value:
raise NotImplementedError("ctypes backend: not supported: "
"initializers for function pointers")
def __repr__(self):
c_name = getattr(self, '_name', None)
if c_name:
i = self._reftypename.index('(* &)')
if self._reftypename[i-1] not in ' )*':
c_name = ' ' + c_name
c_name = self._reftypename.replace('(* &)', c_name)
return CTypesData.__repr__(self, c_name)
def _get_own_repr(self):
if getattr(self, '_own_callback', None) is not None:
return 'calling %r' % (self._own_callback,)
return super(CTypesFunctionPtr, self)._get_own_repr()
def __call__(self, *args):
if has_varargs:
assert len(args) >= len(BArgs)
extraargs = args[len(BArgs):]
args = args[:len(BArgs)]
else:
assert len(args) == len(BArgs)
ctypes_args = []
for arg, BArg in zip(args, BArgs):
ctypes_args.append(BArg._arg_to_ctypes(arg))
if has_varargs:
for i, arg in enumerate(extraargs):
if arg is None:
ctypes_args.append(ctypes.c_void_p(0)) # NULL
continue
if not isinstance(arg, CTypesData):
raise TypeError(
"argument %d passed in the variadic part "
"needs to be a cdata object (got %s)" %
(1 + len(BArgs) + i, type(arg).__name__))
ctypes_args.append(arg._arg_to_ctypes(arg))
result = self._as_ctype_ptr(*ctypes_args)
return BResult._from_ctypes(result)
#
CTypesFunctionPtr._fix_class()
return CTypesFunctionPtr
def new_enum_type(self, name, enumerators, enumvalues, CTypesInt):
assert isinstance(name, str)
reverse_mapping = dict(zip(reversed(enumvalues),
reversed(enumerators)))
#
class CTypesEnum(CTypesInt):
__slots__ = []
_reftypename = '%s &' % name
def _get_own_repr(self):
value = self._value
try:
return '%d: %s' % (value, reverse_mapping[value])
except KeyError:
return str(value)
def _to_string(self, maxlen):
value = self._value
try:
return reverse_mapping[value]
except KeyError:
return str(value)
#
CTypesEnum._fix_class()
return CTypesEnum
def get_errno(self):
return ctypes.get_errno()
def set_errno(self, value):
ctypes.set_errno(value)
def string(self, b, maxlen=-1):
return b._to_string(maxlen)
def buffer(self, bptr, size=-1):
raise NotImplementedError("buffer() with ctypes backend")
def sizeof(self, cdata_or_BType):
if isinstance(cdata_or_BType, CTypesData):
return cdata_or_BType._get_size_of_instance()
else:
assert issubclass(cdata_or_BType, CTypesData)
return cdata_or_BType._get_size()
def alignof(self, BType):
assert issubclass(BType, CTypesData)
return BType._alignment()
def newp(self, BType, source):
if not issubclass(BType, CTypesData):
raise TypeError
return BType._newp(source)
def cast(self, BType, source):
return BType._cast_from(source)
def callback(self, BType, source, error, onerror):
assert onerror is None # XXX not implemented
return BType(source, error)
_weakref_cache_ref = None
def gcp(self, cdata, destructor, size=0):
if self._weakref_cache_ref is None:
import weakref
class MyRef(weakref.ref):
def __eq__(self, other):
myref = self()
return self is other or (
myref is not None and myref is other())
def __ne__(self, other):
return not (self == other)
def __hash__(self):
try:
return self._hash
except AttributeError:
self._hash = hash(self())
return self._hash
self._weakref_cache_ref = {}, MyRef
weak_cache, MyRef = self._weakref_cache_ref
if destructor is None:
try:
del weak_cache[MyRef(cdata)]
except KeyError:
raise TypeError("Can remove destructor only on a object "
"previously returned by ffi.gc()")
return None
def remove(k):
cdata, destructor = weak_cache.pop(k, (None, None))
if destructor is not None:
destructor(cdata)
new_cdata = self.cast(self.typeof(cdata), cdata)
assert new_cdata is not cdata
weak_cache[MyRef(new_cdata, remove)] = (cdata, destructor)
return new_cdata
typeof = type
def getcname(self, BType, replace_with):
return BType._get_c_name(replace_with)
def typeoffsetof(self, BType, fieldname, num=0):
if isinstance(fieldname, str):
if num == 0 and issubclass(BType, CTypesGenericPtr):
BType = BType._BItem
if not issubclass(BType, CTypesBaseStructOrUnion):
raise TypeError("expected a struct or union ctype")
BField = BType._bfield_types[fieldname]
if BField is Ellipsis:
raise TypeError("not supported for bitfields")
return (BField, BType._offsetof(fieldname))
elif isinstance(fieldname, (int, long)):
if issubclass(BType, CTypesGenericArray):
BType = BType._CTPtr
if not issubclass(BType, CTypesGenericPtr):
raise TypeError("expected an array or ptr ctype")
BItem = BType._BItem
offset = BItem._get_size() * fieldname
if offset > sys.maxsize:
raise OverflowError
return (BItem, offset)
else:
raise TypeError(type(fieldname))
def rawaddressof(self, BTypePtr, cdata, offset=None):
if isinstance(cdata, CTypesBaseStructOrUnion):
ptr = ctypes.pointer(type(cdata)._to_ctypes(cdata))
elif isinstance(cdata, CTypesGenericPtr):
if offset is None or not issubclass(type(cdata)._BItem,
CTypesBaseStructOrUnion):
raise TypeError("unexpected cdata type")
ptr = type(cdata)._to_ctypes(cdata)
elif isinstance(cdata, CTypesGenericArray):
ptr = type(cdata)._to_ctypes(cdata)
else:
raise TypeError("expected a <cdata 'struct-or-union'>")
if offset:
ptr = ctypes.cast(
ctypes.c_void_p(
ctypes.cast(ptr, ctypes.c_void_p).value + offset),
type(ptr))
return BTypePtr._from_ctypes(ptr)
class CTypesLibrary(object):
def __init__(self, backend, cdll):
self.backend = backend
self.cdll = cdll
def load_function(self, BType, name):
c_func = getattr(self.cdll, name)
funcobj = BType._from_ctypes(c_func)
funcobj._name = name
return funcobj
def read_variable(self, BType, name):
try:
ctypes_obj = BType._ctype.in_dll(self.cdll, name)
except AttributeError as e:
raise NotImplementedError(e)
return BType._from_ctypes(ctypes_obj)
def write_variable(self, BType, name, value):
new_ctypes_obj = BType._to_ctypes(value)
ctypes_obj = BType._ctype.in_dll(self.cdll, name)
ctypes.memmove(ctypes.addressof(ctypes_obj),
ctypes.addressof(new_ctypes_obj),
ctypes.sizeof(BType._ctype))
|
Soya93/Extract-Refactoring
|
refs/heads/master
|
python/lib/Lib/distutils/command/bdist.py
|
81
|
"""distutils.command.bdist
Implements the Distutils 'bdist' command (create a built [binary]
distribution)."""
# This module should be kept compatible with Python 2.1.
__revision__ = "$Id: bdist.py 37828 2004-11-10 22:23:15Z loewis $"
import os, string
from types import *
from distutils.core import Command
from distutils.errors import *
from distutils.util import get_platform
def show_formats ():
"""Print list of available formats (arguments to "--format" option).
"""
from distutils.fancy_getopt import FancyGetopt
formats=[]
for format in bdist.format_commands:
formats.append(("formats=" + format, None,
bdist.format_command[format][1]))
pretty_printer = FancyGetopt(formats)
pretty_printer.print_help("List of available distribution formats:")
class bdist (Command):
description = "create a built (binary) distribution"
user_options = [('bdist-base=', 'b',
"temporary directory for creating built distributions"),
('plat-name=', 'p',
"platform name to embed in generated filenames "
"(default: %s)" % get_platform()),
('formats=', None,
"formats for distribution (comma-separated list)"),
('dist-dir=', 'd',
"directory to put final built distributions in "
"[default: dist]"),
('skip-build', None,
"skip rebuilding everything (for testing/debugging)"),
]
boolean_options = ['skip-build']
help_options = [
('help-formats', None,
"lists available distribution formats", show_formats),
]
# The following commands do not take a format option from bdist
no_format_option = ('bdist_rpm',
#'bdist_sdux', 'bdist_pkgtool'
)
# This won't do in reality: will need to distinguish RPM-ish Linux,
# Debian-ish Linux, Solaris, FreeBSD, ..., Windows, Mac OS.
default_format = { 'posix': 'gztar',
'java': 'gztar',
'nt': 'zip',
'os2': 'zip', }
# Establish the preferred order (for the --help-formats option).
format_commands = ['rpm', 'gztar', 'bztar', 'ztar', 'tar',
'wininst', 'zip',
#'pkgtool', 'sdux'
]
# And the real information.
format_command = { 'rpm': ('bdist_rpm', "RPM distribution"),
'zip': ('bdist_dumb', "ZIP file"),
'gztar': ('bdist_dumb', "gzip'ed tar file"),
'bztar': ('bdist_dumb', "bzip2'ed tar file"),
'ztar': ('bdist_dumb', "compressed tar file"),
'tar': ('bdist_dumb', "tar file"),
'wininst': ('bdist_wininst',
"Windows executable installer"),
'zip': ('bdist_dumb', "ZIP file"),
#'pkgtool': ('bdist_pkgtool',
# "Solaris pkgtool distribution"),
#'sdux': ('bdist_sdux', "HP-UX swinstall depot"),
}
def initialize_options (self):
self.bdist_base = None
self.plat_name = None
self.formats = None
self.dist_dir = None
self.skip_build = 0
# initialize_options()
def finalize_options (self):
# have to finalize 'plat_name' before 'bdist_base'
if self.plat_name is None:
self.plat_name = get_platform()
# 'bdist_base' -- parent of per-built-distribution-format
# temporary directories (eg. we'll probably have
# "build/bdist.<plat>/dumb", "build/bdist.<plat>/rpm", etc.)
if self.bdist_base is None:
build_base = self.get_finalized_command('build').build_base
self.bdist_base = os.path.join(build_base,
'bdist.' + self.plat_name)
self.ensure_string_list('formats')
if self.formats is None:
try:
self.formats = [self.default_format[os.name]]
except KeyError:
raise DistutilsPlatformError, \
"don't know how to create built distributions " + \
"on platform %s" % os.name
if self.dist_dir is None:
self.dist_dir = "dist"
# finalize_options()
def run (self):
# Figure out which sub-commands we need to run.
commands = []
for format in self.formats:
try:
commands.append(self.format_command[format][0])
except KeyError:
raise DistutilsOptionError, "invalid format '%s'" % format
# Reinitialize and run each command.
for i in range(len(self.formats)):
cmd_name = commands[i]
sub_cmd = self.reinitialize_command(cmd_name)
if cmd_name not in self.no_format_option:
sub_cmd.format = self.formats[i]
# If we're going to need to run this command again, tell it to
# keep its temporary files around so subsequent runs go faster.
if cmd_name in commands[i+1:]:
sub_cmd.keep_temp = 1
self.run_command(cmd_name)
# run()
# class bdist
|
jessstrap/servotk
|
refs/heads/master
|
tests/wpt/web-platform-tests/tools/pywebsocket/src/example/abort_wsh.py
|
465
|
# Copyright 2012, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from mod_pywebsocket import handshake
def web_socket_do_extra_handshake(request):
pass
def web_socket_transfer_data(request):
raise handshake.AbortedByUserException(
"Aborted in web_socket_transfer_data")
# vi:sts=4 sw=4 et
|
75651/kbengine_cloud
|
refs/heads/master
|
kbe/src/lib/python/Lib/test/test_complex.py
|
72
|
import unittest
from test import support
from random import random
from math import atan2, isnan, copysign
import operator
INF = float("inf")
NAN = float("nan")
# These tests ensure that complex math does the right thing
class ComplexTest(unittest.TestCase):
def assertAlmostEqual(self, a, b):
if isinstance(a, complex):
if isinstance(b, complex):
unittest.TestCase.assertAlmostEqual(self, a.real, b.real)
unittest.TestCase.assertAlmostEqual(self, a.imag, b.imag)
else:
unittest.TestCase.assertAlmostEqual(self, a.real, b)
unittest.TestCase.assertAlmostEqual(self, a.imag, 0.)
else:
if isinstance(b, complex):
unittest.TestCase.assertAlmostEqual(self, a, b.real)
unittest.TestCase.assertAlmostEqual(self, 0., b.imag)
else:
unittest.TestCase.assertAlmostEqual(self, a, b)
def assertCloseAbs(self, x, y, eps=1e-9):
"""Return true iff floats x and y "are close\""""
# put the one with larger magnitude second
if abs(x) > abs(y):
x, y = y, x
if y == 0:
return abs(x) < eps
if x == 0:
return abs(y) < eps
# check that relative difference < eps
self.assertTrue(abs((x-y)/y) < eps)
def assertFloatsAreIdentical(self, x, y):
"""assert that floats x and y are identical, in the sense that:
(1) both x and y are nans, or
(2) both x and y are infinities, with the same sign, or
(3) both x and y are zeros, with the same sign, or
(4) x and y are both finite and nonzero, and x == y
"""
msg = 'floats {!r} and {!r} are not identical'
if isnan(x) or isnan(y):
if isnan(x) and isnan(y):
return
elif x == y:
if x != 0.0:
return
# both zero; check that signs match
elif copysign(1.0, x) == copysign(1.0, y):
return
else:
msg += ': zeros have different signs'
self.fail(msg.format(x, y))
def assertClose(self, x, y, eps=1e-9):
"""Return true iff complexes x and y "are close\""""
self.assertCloseAbs(x.real, y.real, eps)
self.assertCloseAbs(x.imag, y.imag, eps)
def check_div(self, x, y):
"""Compute complex z=x*y, and check that z/x==y and z/y==x."""
z = x * y
if x != 0:
q = z / x
self.assertClose(q, y)
q = z.__truediv__(x)
self.assertClose(q, y)
if y != 0:
q = z / y
self.assertClose(q, x)
q = z.__truediv__(y)
self.assertClose(q, x)
def test_truediv(self):
simple_real = [float(i) for i in range(-5, 6)]
simple_complex = [complex(x, y) for x in simple_real for y in simple_real]
for x in simple_complex:
for y in simple_complex:
self.check_div(x, y)
# A naive complex division algorithm (such as in 2.0) is very prone to
# nonsense errors for these (overflows and underflows).
self.check_div(complex(1e200, 1e200), 1+0j)
self.check_div(complex(1e-200, 1e-200), 1+0j)
# Just for fun.
for i in range(100):
self.check_div(complex(random(), random()),
complex(random(), random()))
self.assertRaises(ZeroDivisionError, complex.__truediv__, 1+1j, 0+0j)
# FIXME: The following currently crashes on Alpha
# self.assertRaises(OverflowError, pow, 1e200+1j, 1e200+1j)
self.assertAlmostEqual(complex.__truediv__(2+0j, 1+1j), 1-1j)
self.assertRaises(ZeroDivisionError, complex.__truediv__, 1+1j, 0+0j)
def test_floordiv(self):
self.assertRaises(TypeError, complex.__floordiv__, 3+0j, 1.5+0j)
self.assertRaises(TypeError, complex.__floordiv__, 3+0j, 0+0j)
def test_richcompare(self):
self.assertIs(complex.__eq__(1+1j, 1<<10000), False)
self.assertIs(complex.__lt__(1+1j, None), NotImplemented)
self.assertIs(complex.__eq__(1+1j, 1+1j), True)
self.assertIs(complex.__eq__(1+1j, 2+2j), False)
self.assertIs(complex.__ne__(1+1j, 1+1j), False)
self.assertIs(complex.__ne__(1+1j, 2+2j), True)
for i in range(1, 100):
f = i / 100.0
self.assertIs(complex.__eq__(f+0j, f), True)
self.assertIs(complex.__ne__(f+0j, f), False)
self.assertIs(complex.__eq__(complex(f, f), f), False)
self.assertIs(complex.__ne__(complex(f, f), f), True)
self.assertIs(complex.__lt__(1+1j, 2+2j), NotImplemented)
self.assertIs(complex.__le__(1+1j, 2+2j), NotImplemented)
self.assertIs(complex.__gt__(1+1j, 2+2j), NotImplemented)
self.assertIs(complex.__ge__(1+1j, 2+2j), NotImplemented)
self.assertRaises(TypeError, operator.lt, 1+1j, 2+2j)
self.assertRaises(TypeError, operator.le, 1+1j, 2+2j)
self.assertRaises(TypeError, operator.gt, 1+1j, 2+2j)
self.assertRaises(TypeError, operator.ge, 1+1j, 2+2j)
self.assertIs(operator.eq(1+1j, 1+1j), True)
self.assertIs(operator.eq(1+1j, 2+2j), False)
self.assertIs(operator.ne(1+1j, 1+1j), False)
self.assertIs(operator.ne(1+1j, 2+2j), True)
def test_richcompare_boundaries(self):
def check(n, deltas, is_equal, imag = 0.0):
for delta in deltas:
i = n + delta
z = complex(i, imag)
self.assertIs(complex.__eq__(z, i), is_equal(delta))
self.assertIs(complex.__ne__(z, i), not is_equal(delta))
# For IEEE-754 doubles the following should hold:
# x in [2 ** (52 + i), 2 ** (53 + i + 1)] -> x mod 2 ** i == 0
# where the interval is representable, of course.
for i in range(1, 10):
pow = 52 + i
mult = 2 ** i
check(2 ** pow, range(1, 101), lambda delta: delta % mult == 0)
check(2 ** pow, range(1, 101), lambda delta: False, float(i))
check(2 ** 53, range(-100, 0), lambda delta: True)
def test_mod(self):
# % is no longer supported on complex numbers
self.assertRaises(TypeError, (1+1j).__mod__, 0+0j)
self.assertRaises(TypeError, lambda: (3.33+4.43j) % 0)
self.assertRaises(TypeError, (1+1j).__mod__, 4.3j)
def test_divmod(self):
self.assertRaises(TypeError, divmod, 1+1j, 1+0j)
self.assertRaises(TypeError, divmod, 1+1j, 0+0j)
def test_pow(self):
self.assertAlmostEqual(pow(1+1j, 0+0j), 1.0)
self.assertAlmostEqual(pow(0+0j, 2+0j), 0.0)
self.assertRaises(ZeroDivisionError, pow, 0+0j, 1j)
self.assertAlmostEqual(pow(1j, -1), 1/1j)
self.assertAlmostEqual(pow(1j, 200), 1)
self.assertRaises(ValueError, pow, 1+1j, 1+1j, 1+1j)
a = 3.33+4.43j
self.assertEqual(a ** 0j, 1)
self.assertEqual(a ** 0.+0.j, 1)
self.assertEqual(3j ** 0j, 1)
self.assertEqual(3j ** 0, 1)
try:
0j ** a
except ZeroDivisionError:
pass
else:
self.fail("should fail 0.0 to negative or complex power")
try:
0j ** (3-2j)
except ZeroDivisionError:
pass
else:
self.fail("should fail 0.0 to negative or complex power")
# The following is used to exercise certain code paths
self.assertEqual(a ** 105, a ** 105)
self.assertEqual(a ** -105, a ** -105)
self.assertEqual(a ** -30, a ** -30)
self.assertEqual(0.0j ** 0, 1)
b = 5.1+2.3j
self.assertRaises(ValueError, pow, a, b, 0)
def test_boolcontext(self):
for i in range(100):
self.assertTrue(complex(random() + 1e-6, random() + 1e-6))
self.assertTrue(not complex(0.0, 0.0))
def test_conjugate(self):
self.assertClose(complex(5.3, 9.8).conjugate(), 5.3-9.8j)
def test_constructor(self):
class OS:
def __init__(self, value): self.value = value
def __complex__(self): return self.value
class NS(object):
def __init__(self, value): self.value = value
def __complex__(self): return self.value
self.assertEqual(complex(OS(1+10j)), 1+10j)
self.assertEqual(complex(NS(1+10j)), 1+10j)
self.assertRaises(TypeError, complex, OS(None))
self.assertRaises(TypeError, complex, NS(None))
self.assertRaises(TypeError, complex, {})
self.assertRaises(TypeError, complex, NS(1.5))
self.assertRaises(TypeError, complex, NS(1))
self.assertAlmostEqual(complex("1+10j"), 1+10j)
self.assertAlmostEqual(complex(10), 10+0j)
self.assertAlmostEqual(complex(10.0), 10+0j)
self.assertAlmostEqual(complex(10), 10+0j)
self.assertAlmostEqual(complex(10+0j), 10+0j)
self.assertAlmostEqual(complex(1,10), 1+10j)
self.assertAlmostEqual(complex(1,10), 1+10j)
self.assertAlmostEqual(complex(1,10.0), 1+10j)
self.assertAlmostEqual(complex(1,10), 1+10j)
self.assertAlmostEqual(complex(1,10), 1+10j)
self.assertAlmostEqual(complex(1,10.0), 1+10j)
self.assertAlmostEqual(complex(1.0,10), 1+10j)
self.assertAlmostEqual(complex(1.0,10), 1+10j)
self.assertAlmostEqual(complex(1.0,10.0), 1+10j)
self.assertAlmostEqual(complex(3.14+0j), 3.14+0j)
self.assertAlmostEqual(complex(3.14), 3.14+0j)
self.assertAlmostEqual(complex(314), 314.0+0j)
self.assertAlmostEqual(complex(314), 314.0+0j)
self.assertAlmostEqual(complex(3.14+0j, 0j), 3.14+0j)
self.assertAlmostEqual(complex(3.14, 0.0), 3.14+0j)
self.assertAlmostEqual(complex(314, 0), 314.0+0j)
self.assertAlmostEqual(complex(314, 0), 314.0+0j)
self.assertAlmostEqual(complex(0j, 3.14j), -3.14+0j)
self.assertAlmostEqual(complex(0.0, 3.14j), -3.14+0j)
self.assertAlmostEqual(complex(0j, 3.14), 3.14j)
self.assertAlmostEqual(complex(0.0, 3.14), 3.14j)
self.assertAlmostEqual(complex("1"), 1+0j)
self.assertAlmostEqual(complex("1j"), 1j)
self.assertAlmostEqual(complex(), 0)
self.assertAlmostEqual(complex("-1"), -1)
self.assertAlmostEqual(complex("+1"), +1)
self.assertAlmostEqual(complex("(1+2j)"), 1+2j)
self.assertAlmostEqual(complex("(1.3+2.2j)"), 1.3+2.2j)
self.assertAlmostEqual(complex("3.14+1J"), 3.14+1j)
self.assertAlmostEqual(complex(" ( +3.14-6J )"), 3.14-6j)
self.assertAlmostEqual(complex(" ( +3.14-J )"), 3.14-1j)
self.assertAlmostEqual(complex(" ( +3.14+j )"), 3.14+1j)
self.assertAlmostEqual(complex("J"), 1j)
self.assertAlmostEqual(complex("( j )"), 1j)
self.assertAlmostEqual(complex("+J"), 1j)
self.assertAlmostEqual(complex("( -j)"), -1j)
self.assertAlmostEqual(complex('1e-500'), 0.0 + 0.0j)
self.assertAlmostEqual(complex('-1e-500j'), 0.0 - 0.0j)
self.assertAlmostEqual(complex('-1e-500+1e-500j'), -0.0 + 0.0j)
class complex2(complex): pass
self.assertAlmostEqual(complex(complex2(1+1j)), 1+1j)
self.assertAlmostEqual(complex(real=17, imag=23), 17+23j)
self.assertAlmostEqual(complex(real=17+23j), 17+23j)
self.assertAlmostEqual(complex(real=17+23j, imag=23), 17+46j)
self.assertAlmostEqual(complex(real=1+2j, imag=3+4j), -3+5j)
# check that the sign of a zero in the real or imaginary part
# is preserved when constructing from two floats. (These checks
# are harmless on systems without support for signed zeros.)
def split_zeros(x):
"""Function that produces different results for 0. and -0."""
return atan2(x, -1.)
self.assertEqual(split_zeros(complex(1., 0.).imag), split_zeros(0.))
self.assertEqual(split_zeros(complex(1., -0.).imag), split_zeros(-0.))
self.assertEqual(split_zeros(complex(0., 1.).real), split_zeros(0.))
self.assertEqual(split_zeros(complex(-0., 1.).real), split_zeros(-0.))
c = 3.14 + 1j
self.assertTrue(complex(c) is c)
del c
self.assertRaises(TypeError, complex, "1", "1")
self.assertRaises(TypeError, complex, 1, "1")
# SF bug 543840: complex(string) accepts strings with \0
# Fixed in 2.3.
self.assertRaises(ValueError, complex, '1+1j\0j')
self.assertRaises(TypeError, int, 5+3j)
self.assertRaises(TypeError, int, 5+3j)
self.assertRaises(TypeError, float, 5+3j)
self.assertRaises(ValueError, complex, "")
self.assertRaises(TypeError, complex, None)
self.assertRaisesRegex(TypeError, "not 'NoneType'", complex, None)
self.assertRaises(ValueError, complex, "\0")
self.assertRaises(ValueError, complex, "3\09")
self.assertRaises(TypeError, complex, "1", "2")
self.assertRaises(TypeError, complex, "1", 42)
self.assertRaises(TypeError, complex, 1, "2")
self.assertRaises(ValueError, complex, "1+")
self.assertRaises(ValueError, complex, "1+1j+1j")
self.assertRaises(ValueError, complex, "--")
self.assertRaises(ValueError, complex, "(1+2j")
self.assertRaises(ValueError, complex, "1+2j)")
self.assertRaises(ValueError, complex, "1+(2j)")
self.assertRaises(ValueError, complex, "(1+2j)123")
self.assertRaises(ValueError, complex, "x")
self.assertRaises(ValueError, complex, "1j+2")
self.assertRaises(ValueError, complex, "1e1ej")
self.assertRaises(ValueError, complex, "1e++1ej")
self.assertRaises(ValueError, complex, ")1+2j(")
# the following three are accepted by Python 2.6
self.assertRaises(ValueError, complex, "1..1j")
self.assertRaises(ValueError, complex, "1.11.1j")
self.assertRaises(ValueError, complex, "1e1.1j")
# check that complex accepts long unicode strings
self.assertEqual(type(complex("1"*500)), complex)
# check whitespace processing
self.assertEqual(complex('\N{EM SPACE}(\N{EN SPACE}1+1j ) '), 1+1j)
class EvilExc(Exception):
pass
class evilcomplex:
def __complex__(self):
raise EvilExc
self.assertRaises(EvilExc, complex, evilcomplex())
class float2:
def __init__(self, value):
self.value = value
def __float__(self):
return self.value
self.assertAlmostEqual(complex(float2(42.)), 42)
self.assertAlmostEqual(complex(real=float2(17.), imag=float2(23.)), 17+23j)
self.assertRaises(TypeError, complex, float2(None))
class complex0(complex):
"""Test usage of __complex__() when inheriting from 'complex'"""
def __complex__(self):
return 42j
class complex1(complex):
"""Test usage of __complex__() with a __new__() method"""
def __new__(self, value=0j):
return complex.__new__(self, 2*value)
def __complex__(self):
return self
class complex2(complex):
"""Make sure that __complex__() calls fail if anything other than a
complex is returned"""
def __complex__(self):
return None
self.assertAlmostEqual(complex(complex0(1j)), 42j)
self.assertAlmostEqual(complex(complex1(1j)), 2j)
self.assertRaises(TypeError, complex, complex2(1j))
def test_hash(self):
for x in range(-30, 30):
self.assertEqual(hash(x), hash(complex(x, 0)))
x /= 3.0 # now check against floating point
self.assertEqual(hash(x), hash(complex(x, 0.)))
def test_abs(self):
nums = [complex(x/3., y/7.) for x in range(-9,9) for y in range(-9,9)]
for num in nums:
self.assertAlmostEqual((num.real**2 + num.imag**2) ** 0.5, abs(num))
def test_repr_str(self):
def test(v, expected, test_fn=self.assertEqual):
test_fn(repr(v), expected)
test_fn(str(v), expected)
test(1+6j, '(1+6j)')
test(1-6j, '(1-6j)')
test(-(1+0j), '(-1+-0j)', test_fn=self.assertNotEqual)
test(complex(1., INF), "(1+infj)")
test(complex(1., -INF), "(1-infj)")
test(complex(INF, 1), "(inf+1j)")
test(complex(-INF, INF), "(-inf+infj)")
test(complex(NAN, 1), "(nan+1j)")
test(complex(1, NAN), "(1+nanj)")
test(complex(NAN, NAN), "(nan+nanj)")
test(complex(0, INF), "infj")
test(complex(0, -INF), "-infj")
test(complex(0, NAN), "nanj")
self.assertEqual(1-6j,complex(repr(1-6j)))
self.assertEqual(1+6j,complex(repr(1+6j)))
self.assertEqual(-6j,complex(repr(-6j)))
self.assertEqual(6j,complex(repr(6j)))
@support.requires_IEEE_754
def test_negative_zero_repr_str(self):
def test(v, expected, test_fn=self.assertEqual):
test_fn(repr(v), expected)
test_fn(str(v), expected)
test(complex(0., 1.), "1j")
test(complex(-0., 1.), "(-0+1j)")
test(complex(0., -1.), "-1j")
test(complex(-0., -1.), "(-0-1j)")
test(complex(0., 0.), "0j")
test(complex(0., -0.), "-0j")
test(complex(-0., 0.), "(-0+0j)")
test(complex(-0., -0.), "(-0-0j)")
def test_neg(self):
self.assertEqual(-(1+6j), -1-6j)
def test_file(self):
a = 3.33+4.43j
b = 5.1+2.3j
fo = None
try:
fo = open(support.TESTFN, "w")
print(a, b, file=fo)
fo.close()
fo = open(support.TESTFN, "r")
self.assertEqual(fo.read(), ("%s %s\n" % (a, b)))
finally:
if (fo is not None) and (not fo.closed):
fo.close()
support.unlink(support.TESTFN)
def test_getnewargs(self):
self.assertEqual((1+2j).__getnewargs__(), (1.0, 2.0))
self.assertEqual((1-2j).__getnewargs__(), (1.0, -2.0))
self.assertEqual((2j).__getnewargs__(), (0.0, 2.0))
self.assertEqual((-0j).__getnewargs__(), (0.0, -0.0))
self.assertEqual(complex(0, INF).__getnewargs__(), (0.0, INF))
self.assertEqual(complex(INF, 0).__getnewargs__(), (INF, 0.0))
@support.requires_IEEE_754
def test_plus_minus_0j(self):
# test that -0j and 0j literals are not identified
z1, z2 = 0j, -0j
self.assertEqual(atan2(z1.imag, -1.), atan2(0., -1.))
self.assertEqual(atan2(z2.imag, -1.), atan2(-0., -1.))
@support.requires_IEEE_754
def test_negated_imaginary_literal(self):
z0 = -0j
z1 = -7j
z2 = -1e1000j
# Note: In versions of Python < 3.2, a negated imaginary literal
# accidentally ended up with real part 0.0 instead of -0.0, thanks to a
# modification during CST -> AST translation (see issue #9011). That's
# fixed in Python 3.2.
self.assertFloatsAreIdentical(z0.real, -0.0)
self.assertFloatsAreIdentical(z0.imag, -0.0)
self.assertFloatsAreIdentical(z1.real, -0.0)
self.assertFloatsAreIdentical(z1.imag, -7.0)
self.assertFloatsAreIdentical(z2.real, -0.0)
self.assertFloatsAreIdentical(z2.imag, -INF)
@support.requires_IEEE_754
def test_overflow(self):
self.assertEqual(complex("1e500"), complex(INF, 0.0))
self.assertEqual(complex("-1e500j"), complex(0.0, -INF))
self.assertEqual(complex("-1e500+1.8e308j"), complex(-INF, INF))
@support.requires_IEEE_754
def test_repr_roundtrip(self):
vals = [0.0, 1e-500, 1e-315, 1e-200, 0.0123, 3.1415, 1e50, INF, NAN]
vals += [-v for v in vals]
# complex(repr(z)) should recover z exactly, even for complex
# numbers involving an infinity, nan, or negative zero
for x in vals:
for y in vals:
z = complex(x, y)
roundtrip = complex(repr(z))
self.assertFloatsAreIdentical(z.real, roundtrip.real)
self.assertFloatsAreIdentical(z.imag, roundtrip.imag)
# if we predefine some constants, then eval(repr(z)) should
# also work, except that it might change the sign of zeros
inf, nan = float('inf'), float('nan')
infj, nanj = complex(0.0, inf), complex(0.0, nan)
for x in vals:
for y in vals:
z = complex(x, y)
roundtrip = eval(repr(z))
# adding 0.0 has no effect beside changing -0.0 to 0.0
self.assertFloatsAreIdentical(0.0 + z.real,
0.0 + roundtrip.real)
self.assertFloatsAreIdentical(0.0 + z.imag,
0.0 + roundtrip.imag)
def test_format(self):
# empty format string is same as str()
self.assertEqual(format(1+3j, ''), str(1+3j))
self.assertEqual(format(1.5+3.5j, ''), str(1.5+3.5j))
self.assertEqual(format(3j, ''), str(3j))
self.assertEqual(format(3.2j, ''), str(3.2j))
self.assertEqual(format(3+0j, ''), str(3+0j))
self.assertEqual(format(3.2+0j, ''), str(3.2+0j))
# empty presentation type should still be analogous to str,
# even when format string is nonempty (issue #5920).
self.assertEqual(format(3.2+0j, '-'), str(3.2+0j))
self.assertEqual(format(3.2+0j, '<'), str(3.2+0j))
z = 4/7. - 100j/7.
self.assertEqual(format(z, ''), str(z))
self.assertEqual(format(z, '-'), str(z))
self.assertEqual(format(z, '<'), str(z))
self.assertEqual(format(z, '10'), str(z))
z = complex(0.0, 3.0)
self.assertEqual(format(z, ''), str(z))
self.assertEqual(format(z, '-'), str(z))
self.assertEqual(format(z, '<'), str(z))
self.assertEqual(format(z, '2'), str(z))
z = complex(-0.0, 2.0)
self.assertEqual(format(z, ''), str(z))
self.assertEqual(format(z, '-'), str(z))
self.assertEqual(format(z, '<'), str(z))
self.assertEqual(format(z, '3'), str(z))
self.assertEqual(format(1+3j, 'g'), '1+3j')
self.assertEqual(format(3j, 'g'), '0+3j')
self.assertEqual(format(1.5+3.5j, 'g'), '1.5+3.5j')
self.assertEqual(format(1.5+3.5j, '+g'), '+1.5+3.5j')
self.assertEqual(format(1.5-3.5j, '+g'), '+1.5-3.5j')
self.assertEqual(format(1.5-3.5j, '-g'), '1.5-3.5j')
self.assertEqual(format(1.5+3.5j, ' g'), ' 1.5+3.5j')
self.assertEqual(format(1.5-3.5j, ' g'), ' 1.5-3.5j')
self.assertEqual(format(-1.5+3.5j, ' g'), '-1.5+3.5j')
self.assertEqual(format(-1.5-3.5j, ' g'), '-1.5-3.5j')
self.assertEqual(format(-1.5-3.5e-20j, 'g'), '-1.5-3.5e-20j')
self.assertEqual(format(-1.5-3.5j, 'f'), '-1.500000-3.500000j')
self.assertEqual(format(-1.5-3.5j, 'F'), '-1.500000-3.500000j')
self.assertEqual(format(-1.5-3.5j, 'e'), '-1.500000e+00-3.500000e+00j')
self.assertEqual(format(-1.5-3.5j, '.2e'), '-1.50e+00-3.50e+00j')
self.assertEqual(format(-1.5-3.5j, '.2E'), '-1.50E+00-3.50E+00j')
self.assertEqual(format(-1.5e10-3.5e5j, '.2G'), '-1.5E+10-3.5E+05j')
self.assertEqual(format(1.5+3j, '<20g'), '1.5+3j ')
self.assertEqual(format(1.5+3j, '*<20g'), '1.5+3j**************')
self.assertEqual(format(1.5+3j, '>20g'), ' 1.5+3j')
self.assertEqual(format(1.5+3j, '^20g'), ' 1.5+3j ')
self.assertEqual(format(1.5+3j, '<20'), '(1.5+3j) ')
self.assertEqual(format(1.5+3j, '>20'), ' (1.5+3j)')
self.assertEqual(format(1.5+3j, '^20'), ' (1.5+3j) ')
self.assertEqual(format(1.123-3.123j, '^20.2'), ' (1.1-3.1j) ')
self.assertEqual(format(1.5+3j, '20.2f'), ' 1.50+3.00j')
self.assertEqual(format(1.5+3j, '>20.2f'), ' 1.50+3.00j')
self.assertEqual(format(1.5+3j, '<20.2f'), '1.50+3.00j ')
self.assertEqual(format(1.5e20+3j, '<20.2f'), '150000000000000000000.00+3.00j')
self.assertEqual(format(1.5e20+3j, '>40.2f'), ' 150000000000000000000.00+3.00j')
self.assertEqual(format(1.5e20+3j, '^40,.2f'), ' 150,000,000,000,000,000,000.00+3.00j ')
self.assertEqual(format(1.5e21+3j, '^40,.2f'), ' 1,500,000,000,000,000,000,000.00+3.00j ')
self.assertEqual(format(1.5e21+3000j, ',.2f'), '1,500,000,000,000,000,000,000.00+3,000.00j')
# Issue 7094: Alternate formatting (specified by #)
self.assertEqual(format(1+1j, '.0e'), '1e+00+1e+00j')
self.assertEqual(format(1+1j, '#.0e'), '1.e+00+1.e+00j')
self.assertEqual(format(1+1j, '.0f'), '1+1j')
self.assertEqual(format(1+1j, '#.0f'), '1.+1.j')
self.assertEqual(format(1.1+1.1j, 'g'), '1.1+1.1j')
self.assertEqual(format(1.1+1.1j, '#g'), '1.10000+1.10000j')
# Alternate doesn't make a difference for these, they format the same with or without it
self.assertEqual(format(1+1j, '.1e'), '1.0e+00+1.0e+00j')
self.assertEqual(format(1+1j, '#.1e'), '1.0e+00+1.0e+00j')
self.assertEqual(format(1+1j, '.1f'), '1.0+1.0j')
self.assertEqual(format(1+1j, '#.1f'), '1.0+1.0j')
# Misc. other alternate tests
self.assertEqual(format((-1.5+0.5j), '#f'), '-1.500000+0.500000j')
self.assertEqual(format((-1.5+0.5j), '#.0f'), '-2.+0.j')
self.assertEqual(format((-1.5+0.5j), '#e'), '-1.500000e+00+5.000000e-01j')
self.assertEqual(format((-1.5+0.5j), '#.0e'), '-2.e+00+5.e-01j')
self.assertEqual(format((-1.5+0.5j), '#g'), '-1.50000+0.500000j')
self.assertEqual(format((-1.5+0.5j), '.0g'), '-2+0.5j')
self.assertEqual(format((-1.5+0.5j), '#.0g'), '-2.+0.5j')
# zero padding is invalid
self.assertRaises(ValueError, (1.5+0.5j).__format__, '010f')
# '=' alignment is invalid
self.assertRaises(ValueError, (1.5+3j).__format__, '=20')
# integer presentation types are an error
for t in 'bcdoxX':
self.assertRaises(ValueError, (1.5+0.5j).__format__, t)
# make sure everything works in ''.format()
self.assertEqual('*{0:.3f}*'.format(3.14159+2.71828j), '*3.142+2.718j*')
# issue 3382
self.assertEqual(format(complex(NAN, NAN), 'f'), 'nan+nanj')
self.assertEqual(format(complex(1, NAN), 'f'), '1.000000+nanj')
self.assertEqual(format(complex(NAN, 1), 'f'), 'nan+1.000000j')
self.assertEqual(format(complex(NAN, -1), 'f'), 'nan-1.000000j')
self.assertEqual(format(complex(NAN, NAN), 'F'), 'NAN+NANj')
self.assertEqual(format(complex(1, NAN), 'F'), '1.000000+NANj')
self.assertEqual(format(complex(NAN, 1), 'F'), 'NAN+1.000000j')
self.assertEqual(format(complex(NAN, -1), 'F'), 'NAN-1.000000j')
self.assertEqual(format(complex(INF, INF), 'f'), 'inf+infj')
self.assertEqual(format(complex(1, INF), 'f'), '1.000000+infj')
self.assertEqual(format(complex(INF, 1), 'f'), 'inf+1.000000j')
self.assertEqual(format(complex(INF, -1), 'f'), 'inf-1.000000j')
self.assertEqual(format(complex(INF, INF), 'F'), 'INF+INFj')
self.assertEqual(format(complex(1, INF), 'F'), '1.000000+INFj')
self.assertEqual(format(complex(INF, 1), 'F'), 'INF+1.000000j')
self.assertEqual(format(complex(INF, -1), 'F'), 'INF-1.000000j')
def test_main():
support.run_unittest(ComplexTest)
if __name__ == "__main__":
test_main()
|
ycsoft/FatCat-Server
|
refs/heads/master
|
LIBS/boost_1_58_0/libs/python/test/pointer_vector.py
|
12
|
# Copyright Joel de Guzman 2004. Distributed under the Boost
# Software License, Version 1.0. (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
'''
>>> import pointer_vector_ext
>>> d = pointer_vector_ext.DoesSomething()
>>> lst = d.returnList()
>>> lst[0].f();
'harru'
'''
def run(args = None):
import sys
import doctest
if args is not None:
sys.argv = args
return doctest.testmod(sys.modules.get(__name__))
if __name__ == '__main__':
print 'running...'
import sys
status = run()[0]
if (status == 0): print "Done."
sys.exit(status)
|
petrjasek/superdesk-core
|
refs/heads/master
|
tests/templates/filters_test.py
|
2
|
# -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014, 2015 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
from flask import render_template_string
from superdesk.tests import TestCase
class ConvertDatetimeFiltersTest(TestCase):
def test_convert_datetime_utc_no_format(self):
template_string = '{{ item.versioncreated | format_datetime("Australia/Sydney")}}'
item = {"versioncreated": "2015-01-01T22:54:53+0000"}
result = render_template_string(template_string, item=item)
self.assertEqual(result, "2015-01-02 09:54:53+11:00")
def test_convert_datetime_local_time_no_format(self):
template_string = '{{ item.versioncreated | format_datetime("Australia/Sydney")}}'
item = {"versioncreated": "2015-01-01T22:54:53+05:30"}
result = render_template_string(template_string, item=item)
self.assertEqual(result, "2015-01-02 04:24:53+11:00")
def test_convert_datetime_utc_format(self):
template_string = (
'{{ item.versioncreated | format_datetime(timezone_string="Australia/Sydney", ' 'date_format="%Y-%m-%d")}}'
)
item = {"versioncreated": "2015-01-01T22:54:53+0000"}
result = render_template_string(template_string, item=item)
self.assertEqual(result, "2015-01-02")
def test_convert_datetime_invalid_date(self):
template_string = '{{ item.versioncreated | format_datetime("Australia/Sydney", "%Y-%m-%d")}}'
item = {"versioncreated": "test string"}
result = render_template_string(template_string, item=item)
self.assertEqual(result, "")
def test_convert_datetime_invalid_timezone(self):
template_string = '{{ item.versioncreated | format_datetime("australia/sydney", "%Y-%m-%d")}}'
item = {"versioncreated": "test string"}
result = render_template_string(template_string, item=item)
self.assertEqual(result, "")
def test_convert_datetime_utc_default_timezone(self):
template_string = "{{ item.versioncreated | format_datetime()}}"
item = {"versioncreated": "2015-01-01T22:54:53+0000"}
result = render_template_string(template_string, item=item)
self.assertEqual(result, "2015-01-01 23:54:53+01:00")
def test_convert_datetime_local_time_default_timezone(self):
template_string = "{{ item.versioncreated | format_datetime()}}"
item = {"versioncreated": "2015-01-01T22:54:53+05:30"}
result = render_template_string(template_string, item=item)
self.assertEqual(result, "2015-01-01 18:24:53+01:00")
def test_convert_datetime_utc_timezone_format(self):
template_string = '{{ item.versioncreated | format_datetime("Australia/Sydney", "%d %b %Y %H:%S %Z")}}'
item = {"versioncreated": "2015-01-01T22:54:53+0000"}
result = render_template_string(template_string, item=item)
self.assertEqual(result, "02 Jan 2015 09:53 AEDT")
item = {"versioncreated": "2015-06-01T22:54:53+0000"}
result = render_template_string(template_string, item=item)
self.assertEqual(result, "02 Jun 2015 08:53 AEST")
def test_get_first_paragraph(self):
template_string = "{{ item.body_html | first_paragraph() }}"
item = {"body_html": "<p><br></p><p>First paragraph</p><p>Second paragraph</p>"}
result = render_template_string(template_string, item=item)
self.assertEqual(result, "<p>First paragraph</p>")
def test_get_first_paragraph_doesnt_fail_with_empty_body(self):
template_string = "{{ item.body_html | first_paragraph()}}"
item = {"headline": "Sample headline"}
result = render_template_string(template_string, item=item)
self.assertEqual(result, "")
|
KEHANG/RMG-Py
|
refs/heads/master
|
rmgpy/display.py
|
11
|
#!/usr/bin/env python
# encoding: utf-8
################################################################################
#
# RMG - Reaction Mechanism Generator
#
# Copyright (c) 2012 by the RMG Team (rmg_dev@mit.edu)
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the 'Software'),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
################################################################################
"""
This module contains only a display() function, which, if you are running in
the IPython --pylab mode, will render things inline in pretty SVG or PNG graphics.
If you are NOT running in IPython --pylab mode, it will do nothing.
"""
def do_nothing(object):
pass
try:
import IPython
except ImportError:
# Don't have IPython installed
display = do_nothing
else:
try:
displayer = IPython.core.display.display
display = lambda obj: displayer(obj, include='png')
except (NameError,AttributeError):
display = do_nothing #not runing in IPython --pylab mode.
|
Clemson-DPA/dpa-pipe
|
refs/heads/master
|
dpa/ui/maya/export.py
|
1
|
from PySide import QtGui, QtCore
# -----------------------------------------------------------------------------
|
smalley/cfapi
|
refs/heads/master
|
test/harness.py
|
3
|
import unittest
from app import app, db
class IntegrationTest(unittest.TestCase):
def setUp(self):
# Set up the database settings
# app.config['SQLALCHEMY_DATABASE_URI'] = 'postgres://postgres@localhost/civic_json_worker_test'
app.config['SQLALCHEMY_DATABASE_URI'] = 'postgres:///civic_json_worker_test'
db.create_all()
self.app = app.test_client()
def tearDown(self):
db.session.close()
db.drop_all()
|
michaelhuang/QuantSoftwareToolkit
|
refs/heads/master
|
Legacy/Legacy/pseries.py
|
5
|
'''
Created on Oct 7, 2010
@author: Tucker Balch
@contact: tucker@cc.@gatech.edu
'''
import os
import pandas as pandas
from qstkutil import DataAccess as da
from qstkutil import timeutil as tu
__version__ = "$Revision: 156 $"
def getDataMatrixFromData(dataname,partname,symbols,tsstart,tsend):
pathpre = os.environ.get('QSDATA') + "/Processed"
if dataname == "Norgate":
pathsub = "/Norgate/Equities"
paths=list()
paths.append(pathpre + pathsub + "/US_NASDAQ/")
paths.append(pathpre + pathsub + "/US_NYSE/")
paths.append(pathpre + pathsub + "/US_NYSE Arca/")
paths.append(pathpre + pathsub + "/OTC/")
paths.append(pathpre + pathsub + "/US_AMEX/")
paths.append(pathpre + pathsub + "/Delisted_US_Recent/")
paths.append(pathpre + pathsub + "/US_Delisted/")
datastr1 = "/StrategyData"
datastr2 = "StrategyData"
else:
raise Exception("unknown dataname " + str(dataname))
data = da.DataAccess(True, paths, datastr1, datastr2,
False, symbols, tsstart, tsend)
tss = list(data.getTimestampArray())
start_time = tss[0]
end_time = tss[-1]
dates = []
for ts in tss:
dates.append(tu.epoch2date(ts))
vals = data.getMatrixBetweenTS(symbols,partname,
start_time,end_time)
syms = list(data.getListOfSymbols())
del data
return(pandas.DataMatrix(vals,dates,syms))
# end getTSFromData
|
cleverhans-lab/cleverhans
|
refs/heads/master
|
cleverhans_v3.1.0/tests_tf/test_utils.py
|
1
|
# pylint: disable=missing-docstring
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
import numpy as np
from cleverhans import utils
from cleverhans.utils_keras import cnn_model
from cleverhans.utils_keras import KerasModelWrapper
class TestUtils(unittest.TestCase):
def test_to_categorical_with_nb_classes_arg(self):
vec = np.asarray([0])
cat = np.asarray([[1, 0, 0]])
self.assertTrue(np.all(utils.to_categorical(vec, 3) == cat))
def test_random_targets_vector(self):
# Test utils.random_targets with a vector of labels as the input
gt_labels = np.asarray([0, 1, 2, 3])
rt = utils.random_targets(gt_labels, 5)
# Make sure random_targets returns a one-hot encoded labels
self.assertTrue(len(rt.shape) == 2)
rt_labels = np.argmax(rt, axis=1)
# Make sure all labels are different from the correct labels
self.assertTrue(np.all(rt_labels != gt_labels))
def test_random_targets_one_hot(self):
# Test utils.random_targets with one-hot encoded labels as the input
gt = np.asarray(
[[0, 0, 1, 0, 0], [1, 0, 0, 0, 0], [0, 0, 0, 1, 0], [1, 0, 0, 0, 0]]
)
gt_labels = np.argmax(gt, axis=1)
rt = utils.random_targets(gt, 5)
# Make sure random_targets returns a one-hot encoded labels
self.assertTrue(len(rt.shape) == 2)
rt_labels = np.argmax(rt, axis=1)
# Make sure all labels are different from the correct labels
self.assertTrue(np.all(rt_labels != gt_labels))
def test_random_targets_one_hot_single_label(self):
# Test utils.random_targets with a single one-hot encoded label
gt = np.asarray([0, 0, 1, 0, 0])
gt = gt.reshape((1, 5))
gt_labels = np.argmax(gt, axis=1)
rt = utils.random_targets(gt, 5)
# Make sure random_targets returns a one-hot encoded labels
self.assertTrue(len(rt.shape) == 2)
rt_labels = np.argmax(rt, axis=1)
# Make sure all labels are different from the correct labels
self.assertTrue(np.all(rt_labels != gt_labels))
def test_other_classes_neg_class_ind(self):
with self.assertRaises(Exception) as context:
utils.other_classes(10, -1)
self.assertTrue(context.exception)
def test_other_classes_invalid_class_ind(self):
with self.assertRaises(Exception) as context:
utils.other_classes(5, 8)
self.assertTrue(context.exception)
def test_other_classes_return_val(self):
res = utils.other_classes(5, 2)
res_expected = [0, 1, 3, 4]
self.assertTrue(res == res_expected)
def test_get_logits_over_interval(self):
import tensorflow as tf
model = cnn_model()
wrap = KerasModelWrapper(model)
fgsm_params = {"eps": 0.5}
img = np.ones(shape=(28, 28, 1))
num_points = 21
with tf.Session() as sess:
tf.global_variables_initializer().run()
logits = utils.get_logits_over_interval(
sess,
wrap,
img,
fgsm_params,
min_epsilon=-10,
max_epsilon=10,
num_points=num_points,
)
self.assertEqual(logits.shape[0], num_points)
if __name__ == "__main__":
unittest.main()
|
andreparames/odoo
|
refs/heads/8.0
|
addons/point_of_sale/wizard/pos_session_opening.py
|
337
|
from openerp.osv import osv, fields
from openerp.tools.translate import _
from openerp.addons.point_of_sale.point_of_sale import pos_session
class pos_session_opening(osv.osv_memory):
_name = 'pos.session.opening'
_columns = {
'pos_config_id' : fields.many2one('pos.config', 'Point of Sale', required=True),
'pos_session_id' : fields.many2one('pos.session', 'PoS Session'),
'pos_state' : fields.related('pos_session_id', 'state',
type='selection',
selection=pos_session.POS_SESSION_STATE,
string='Session Status', readonly=True),
'pos_state_str' : fields.char('Status', readonly=True),
'show_config' : fields.boolean('Show Config', readonly=True),
'pos_session_name' : fields.related('pos_session_id', 'name',
type='char', size=64, readonly=True),
'pos_session_username' : fields.related('pos_session_id', 'user_id', 'name',
type='char', size=64, readonly=True)
}
def open_ui(self, cr, uid, ids, context=None):
data = self.browse(cr, uid, ids[0], context=context)
context = dict(context or {})
context['active_id'] = data.pos_session_id.id
return {
'type' : 'ir.actions.act_url',
'url': '/pos/web/',
'target': 'self',
}
def open_existing_session_cb_close(self, cr, uid, ids, context=None):
wizard = self.browse(cr, uid, ids[0], context=context)
wizard.pos_session_id.signal_workflow('cashbox_control')
return self.open_session_cb(cr, uid, ids, context)
def open_session_cb(self, cr, uid, ids, context=None):
assert len(ids) == 1, "you can open only one session at a time"
proxy = self.pool.get('pos.session')
wizard = self.browse(cr, uid, ids[0], context=context)
if not wizard.pos_session_id:
values = {
'user_id' : uid,
'config_id' : wizard.pos_config_id.id,
}
session_id = proxy.create(cr, uid, values, context=context)
s = proxy.browse(cr, uid, session_id, context=context)
if s.state=='opened':
return self.open_ui(cr, uid, ids, context=context)
return self._open_session(session_id)
return self._open_session(wizard.pos_session_id.id)
def open_existing_session_cb(self, cr, uid, ids, context=None):
assert len(ids) == 1
wizard = self.browse(cr, uid, ids[0], context=context)
return self._open_session(wizard.pos_session_id.id)
def _open_session(self, session_id):
return {
'name': _('Session'),
'view_type': 'form',
'view_mode': 'form,tree',
'res_model': 'pos.session',
'res_id': session_id,
'view_id': False,
'type': 'ir.actions.act_window',
}
def on_change_config(self, cr, uid, ids, config_id, context=None):
result = {
'pos_session_id': False,
'pos_state': False,
'pos_state_str' : '',
'pos_session_username' : False,
'pos_session_name' : False,
}
if not config_id:
return {'value' : result}
proxy = self.pool.get('pos.session')
session_ids = proxy.search(cr, uid, [
('state', '!=', 'closed'),
('config_id', '=', config_id),
('user_id', '=', uid),
], context=context)
if session_ids:
session = proxy.browse(cr, uid, session_ids[0], context=context)
result['pos_state'] = str(session.state)
result['pos_state_str'] = dict(pos_session.POS_SESSION_STATE).get(session.state, '')
result['pos_session_id'] = session.id
result['pos_session_name'] = session.name
result['pos_session_username'] = session.user_id.name
return {'value' : result}
def default_get(self, cr, uid, fieldnames, context=None):
so = self.pool.get('pos.session')
session_ids = so.search(cr, uid, [('state','<>','closed'), ('user_id','=',uid)], context=context)
if session_ids:
result = so.browse(cr, uid, session_ids[0], context=context).config_id.id
else:
current_user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
result = current_user.pos_config and current_user.pos_config.id or False
if not result:
r = self.pool.get('pos.config').search(cr, uid, [], context=context)
result = r and r[0] or False
count = self.pool.get('pos.config').search_count(cr, uid, [('state', '=', 'active')], context=context)
show_config = bool(count > 1)
return {
'pos_config_id' : result,
'show_config' : show_config,
}
|
leafclick/intellij-community
|
refs/heads/master
|
python/testData/inspections/AddCallSuperSelfNamePreserved.py
|
77
|
class A:
def __init__(self, x):
self.x = x
class B(A):
def <warning descr="Call to __init__ of super class is missed">__init_<caret>_</warning>(this, y):
this.y = y
|
andreimacavei/coala
|
refs/heads/master
|
coalib/tests/bears/GlobalBearTest.py
|
3
|
import sys
sys.path.insert(0, ".")
import unittest
from coalib.settings.Section import Section
from coalib.bears.GlobalBear import GlobalBear, BEAR_KIND
class GlobalBearTest(unittest.TestCase):
def test_api(self):
test_object = GlobalBear(0, Section("name"), None)
self.assertRaises(NotImplementedError, test_object.run)
def test_kind(self):
self.assertEqual(GlobalBear.kind(), BEAR_KIND.GLOBAL)
if __name__ == '__main__':
unittest.main(verbosity=2)
|
siconos/siconos-deb
|
refs/heads/ubuntu/xenial
|
examples/Mechanics/ImpactingBar/tools.py
|
1
|
import numpy as np
import cmath
pi = cmath.pi
def computeBigH(H,Nfft):
## x = cmath.exp(2*1j*pi/Nfft)*np.ones((Nfft),dtype='complex64')
## sfft = (np.vander(x,Nfft)).T
## sfft = np.vander(sfft[:,0],Nfft)
## print sfft
## print sfft.shape
bigH = np.zeros(np.asarray(H.shape)*Nfft,dtype='complex64')
print("shape of bigH:", bigH.shape)
nc = H.shape[0]
ndof = H.shape[1]
for i in range(Nfft):
for j in range(Nfft):
bigH[i*nc:(i+1)*nc,j*ndof:(j+1)*ndof] = H*cmath.exp(2*1j*pi*i*j/Nfft)
return bigH
def energyNormalisation(mass,U,gamma,Nfft):
res = -gamma
for i in range(Nfft):
res += np.dot((U[i*ndof:(i+1)*ndof]).conjugate.T,np.dot(Mass*U[i*ndof:(i+1)*ndof]))
|
lucc/alot
|
refs/heads/master
|
tests/db/manager_test.py
|
1
|
# Copyright (C) 2018 Patrick Totzke
# This file is released under the GNU GPL, version 3 or a later revision.
# For further details see the COPYING file
"""Test suite for alot.db.manager module."""
import tempfile
import textwrap
import os
import shutil
from alot.db.manager import DBManager
from alot.settings.const import settings
from notmuch import Database
from .. import utilities
class TestDBManager(utilities.TestCaseClassCleanup):
@classmethod
def setUpClass(cls):
# create temporary notmuch config
with tempfile.NamedTemporaryFile(mode='w+', delete=False) as f:
f.write(textwrap.dedent("""\
[maildir]
synchronize_flags = true
"""))
cls.notmuch_config_path = f.name
cls.addClassCleanup(os.unlink, f.name)
# define an empty notmuch database in a temporary directory
cls.dbpath = tempfile.mkdtemp()
cls.db = Database(path=cls.dbpath, create=True)
cls.db.close()
cls.manager = DBManager(cls.dbpath)
# clean up temporary database
cls.addClassCleanup(cls.manager.kill_search_processes)
cls.addClassCleanup(shutil.rmtree, cls.dbpath)
# let global settings manager read our temporary notmuch config
settings.read_notmuch_config(cls.notmuch_config_path)
def test_save_named_query(self):
alias = 'key'
querystring = 'query string'
self.manager.save_named_query(alias, querystring)
self.manager.flush()
named_queries_dict = self.manager.get_named_queries()
self.assertDictEqual(named_queries_dict, {alias: querystring})
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.