repo_name stringlengths 5 100 | ref stringlengths 12 67 | path stringlengths 4 244 | copies stringlengths 1 8 | content stringlengths 0 1.05M ⌀ |
|---|---|---|---|---|
fevxie/odoo | refs/heads/8.0 | addons/mrp/tests/test_multicompany.py | 374 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2012-TODAY OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.tests import common
class TestMrpMulticompany(common.TransactionCase):
def setUp(self):
super(TestMrpMulticompany, self).setUp()
cr, uid = self.cr, self.uid
# Usefull models
self.ir_model_data = self.registry('ir.model.data')
self.res_users = self.registry('res.users')
self.stock_location = self.registry('stock.location')
group_user_id = self.registry('ir.model.data').xmlid_to_res_id(cr, uid, 'base.group_user')
group_stock_manager_id = self.registry('ir.model.data').xmlid_to_res_id(cr, uid, 'stock.group_stock_manager')
company_2_id = self.registry('ir.model.data').xmlid_to_res_id(cr, uid, 'stock.res_company_1')
self.multicompany_user_id = self.res_users.create(cr, uid,
{'name': 'multicomp', 'login': 'multicomp',
'groups_id': [(6, 0, [group_user_id, group_stock_manager_id])],
'company_id': company_2_id, 'company_ids': [(6,0,[company_2_id])]})
def test_00_multicompany_user(self):
"""check no error on getting default mrp.production values in multicompany setting"""
cr, uid, context = self.cr, self.multicompany_user_id, {}
fields = ['location_src_id', 'location_dest_id']
defaults = self.stock_location.default_get(cr, uid, ['location_id', 'location_dest_id', 'type'], context)
for field in fields:
if defaults.get(field):
try:
self.stock_location.check_access_rule(cr, uid, [defaults[field]], 'read', context)
except Exception, exc:
assert False, "unreadable location %s: %s" % (field, exc)
|
harveybia/face-hack | refs/heads/master | venv/face/lib/python2.7/site-packages/PIL/features.py | 58 | from PIL import Image
modules = {
"pil": "PIL._imaging",
"tkinter": "PIL._imagingtk",
"freetype2": "PIL._imagingft",
"littlecms2": "PIL._imagingcms",
"webp": "PIL._webp",
"transp_webp": ("WEBP", "WebPDecoderBuggyAlpha")
}
def check_module(feature):
if feature not in modules:
raise ValueError("Unknown module %s" % feature)
module = modules[feature]
method_to_call = None
if type(module) is tuple:
module, method_to_call = module
try:
imported_module = __import__(module)
except ImportError:
# If a method is being checked, None means that
# rather than the method failing, the module required for the method
# failed to be imported first
return None if method_to_call else False
if method_to_call:
method = getattr(imported_module, method_to_call)
return method() is True
else:
return True
def get_supported_modules():
supported_modules = []
for feature in modules:
if check_module(feature):
supported_modules.append(feature)
return supported_modules
codecs = {
"jpg": "jpeg",
"jpg_2000": "jpeg2k",
"zlib": "zip",
"libtiff": "libtiff"
}
def check_codec(feature):
if feature not in codecs:
raise ValueError("Unknown codec %s" % feature)
codec = codecs[feature]
return codec + "_encoder" in dir(Image.core)
def get_supported_codecs():
supported_codecs = []
for feature in codecs:
if check_codec(feature):
supported_codecs.append(feature)
return supported_codecs
|
mycodeday/crm-platform | refs/heads/master | account_test/report/__init__.py | 433 | import account_test_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
razvanphp/arangodb | refs/heads/devel | 3rdParty/V8-3.31.74.1/third_party/python_26/Lib/test/test_socketserver.py | 56 | """
Test suite for SocketServer.py.
"""
import contextlib
import errno
import imp
import os
import select
import signal
import socket
import tempfile
import threading
import time
import unittest
import SocketServer
import test.test_support
from test.test_support import reap_children, verbose, TestSkipped
from test.test_support import TESTFN as TEST_FILE
test.test_support.requires("network")
TEST_STR = "hello world\n"
HOST = test.test_support.HOST
HAVE_UNIX_SOCKETS = hasattr(socket, "AF_UNIX")
HAVE_FORKING = hasattr(os, "fork") and os.name != "os2"
def signal_alarm(n):
"""Call signal.alarm when it exists (i.e. not on Windows)."""
if hasattr(signal, 'alarm'):
signal.alarm(n)
def receive(sock, n, timeout=20):
r, w, x = select.select([sock], [], [], timeout)
if sock in r:
return sock.recv(n)
else:
raise RuntimeError, "timed out on %r" % (sock,)
if HAVE_UNIX_SOCKETS:
class ForkingUnixStreamServer(SocketServer.ForkingMixIn,
SocketServer.UnixStreamServer):
pass
class ForkingUnixDatagramServer(SocketServer.ForkingMixIn,
SocketServer.UnixDatagramServer):
pass
@contextlib.contextmanager
def simple_subprocess(testcase):
pid = os.fork()
if pid == 0:
# Don't throw an exception; it would be caught by the test harness.
os._exit(72)
yield None
pid2, status = os.waitpid(pid, 0)
testcase.assertEquals(pid2, pid)
testcase.assertEquals(72 << 8, status)
class SocketServerTest(unittest.TestCase):
"""Test all socket servers."""
def setUp(self):
signal_alarm(20) # Kill deadlocks after 20 seconds.
self.port_seed = 0
self.test_files = []
def tearDown(self):
signal_alarm(0) # Didn't deadlock.
reap_children()
for fn in self.test_files:
try:
os.remove(fn)
except os.error:
pass
self.test_files[:] = []
def pickaddr(self, proto):
if proto == socket.AF_INET:
return (HOST, 0)
else:
# XXX: We need a way to tell AF_UNIX to pick its own name
# like AF_INET provides port==0.
dir = None
if os.name == 'os2':
dir = '\socket'
fn = tempfile.mktemp(prefix='unix_socket.', dir=dir)
if os.name == 'os2':
# AF_UNIX socket names on OS/2 require a specific prefix
# which can't include a drive letter and must also use
# backslashes as directory separators
if fn[1] == ':':
fn = fn[2:]
if fn[0] in (os.sep, os.altsep):
fn = fn[1:]
if os.sep == '/':
fn = fn.replace(os.sep, os.altsep)
else:
fn = fn.replace(os.altsep, os.sep)
self.test_files.append(fn)
return fn
def make_server(self, addr, svrcls, hdlrbase):
class MyServer(svrcls):
def handle_error(self, request, client_address):
self.close_request(request)
self.server_close()
raise
class MyHandler(hdlrbase):
def handle(self):
line = self.rfile.readline()
self.wfile.write(line)
if verbose: print "creating server"
server = MyServer(addr, MyHandler)
self.assertEquals(server.server_address, server.socket.getsockname())
return server
def run_server(self, svrcls, hdlrbase, testfunc):
server = self.make_server(self.pickaddr(svrcls.address_family),
svrcls, hdlrbase)
# We had the OS pick a port, so pull the real address out of
# the server.
addr = server.server_address
if verbose:
print "server created"
print "ADDR =", addr
print "CLASS =", svrcls
t = threading.Thread(
name='%s serving' % svrcls,
target=server.serve_forever,
# Short poll interval to make the test finish quickly.
# Time between requests is short enough that we won't wake
# up spuriously too many times.
kwargs={'poll_interval':0.01})
t.daemon = True # In case this function raises.
t.start()
if verbose: print "server running"
for i in range(3):
if verbose: print "test client", i
testfunc(svrcls.address_family, addr)
if verbose: print "waiting for server"
server.shutdown()
t.join()
if verbose: print "done"
def stream_examine(self, proto, addr):
s = socket.socket(proto, socket.SOCK_STREAM)
s.connect(addr)
s.sendall(TEST_STR)
buf = data = receive(s, 100)
while data and '\n' not in buf:
data = receive(s, 100)
buf += data
self.assertEquals(buf, TEST_STR)
s.close()
def dgram_examine(self, proto, addr):
s = socket.socket(proto, socket.SOCK_DGRAM)
s.sendto(TEST_STR, addr)
buf = data = receive(s, 100)
while data and '\n' not in buf:
data = receive(s, 100)
buf += data
self.assertEquals(buf, TEST_STR)
s.close()
def test_TCPServer(self):
self.run_server(SocketServer.TCPServer,
SocketServer.StreamRequestHandler,
self.stream_examine)
def test_ThreadingTCPServer(self):
self.run_server(SocketServer.ThreadingTCPServer,
SocketServer.StreamRequestHandler,
self.stream_examine)
if HAVE_FORKING:
def test_ForkingTCPServer(self):
with simple_subprocess(self):
self.run_server(SocketServer.ForkingTCPServer,
SocketServer.StreamRequestHandler,
self.stream_examine)
if HAVE_UNIX_SOCKETS:
def test_UnixStreamServer(self):
self.run_server(SocketServer.UnixStreamServer,
SocketServer.StreamRequestHandler,
self.stream_examine)
def test_ThreadingUnixStreamServer(self):
self.run_server(SocketServer.ThreadingUnixStreamServer,
SocketServer.StreamRequestHandler,
self.stream_examine)
if HAVE_FORKING:
def test_ForkingUnixStreamServer(self):
with simple_subprocess(self):
self.run_server(ForkingUnixStreamServer,
SocketServer.StreamRequestHandler,
self.stream_examine)
def test_UDPServer(self):
self.run_server(SocketServer.UDPServer,
SocketServer.DatagramRequestHandler,
self.dgram_examine)
def test_ThreadingUDPServer(self):
self.run_server(SocketServer.ThreadingUDPServer,
SocketServer.DatagramRequestHandler,
self.dgram_examine)
if HAVE_FORKING:
def test_ForkingUDPServer(self):
with simple_subprocess(self):
self.run_server(SocketServer.ForkingUDPServer,
SocketServer.DatagramRequestHandler,
self.dgram_examine)
# Alas, on Linux (at least) recvfrom() doesn't return a meaningful
# client address so this cannot work:
# if HAVE_UNIX_SOCKETS:
# def test_UnixDatagramServer(self):
# self.run_server(SocketServer.UnixDatagramServer,
# SocketServer.DatagramRequestHandler,
# self.dgram_examine)
#
# def test_ThreadingUnixDatagramServer(self):
# self.run_server(SocketServer.ThreadingUnixDatagramServer,
# SocketServer.DatagramRequestHandler,
# self.dgram_examine)
#
# if HAVE_FORKING:
# def test_ForkingUnixDatagramServer(self):
# self.run_server(SocketServer.ForkingUnixDatagramServer,
# SocketServer.DatagramRequestHandler,
# self.dgram_examine)
def test_main():
if imp.lock_held():
# If the import lock is held, the threads will hang
raise TestSkipped("can't run when import lock is held")
test.test_support.run_unittest(SocketServerTest)
if __name__ == "__main__":
test_main()
signal_alarm(3) # Shutdown shouldn't take more than 3 seconds.
|
eahneahn/free | refs/heads/master | lib/python2.7/site-packages/django/utils/timezone.py | 16 | """
Timezone-related classes and functions.
This module uses pytz when it's available and fallbacks when it isn't.
"""
from datetime import datetime, timedelta, tzinfo
from threading import local
import sys
import time as _time
try:
import pytz
except ImportError:
pytz = None
from django.conf import settings
from django.utils import six
__all__ = [
'utc', 'get_default_timezone', 'get_current_timezone',
'activate', 'deactivate', 'override',
'is_naive', 'is_aware', 'make_aware', 'make_naive',
]
# UTC and local time zones
ZERO = timedelta(0)
class UTC(tzinfo):
"""
UTC implementation taken from Python's docs.
Used only when pytz isn't available.
"""
def __repr__(self):
return "<UTC>"
def utcoffset(self, dt):
return ZERO
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return ZERO
class ReferenceLocalTimezone(tzinfo):
"""
Local time implementation taken from Python's docs.
Used only when pytz isn't available, and most likely inaccurate. If you're
having trouble with this class, don't waste your time, just install pytz.
Kept identical to the reference version. Subclasses contain improvements.
"""
def __init__(self):
# This code is moved in __init__ to execute it as late as possible
# See get_default_timezone().
self.STDOFFSET = timedelta(seconds=-_time.timezone)
if _time.daylight:
self.DSTOFFSET = timedelta(seconds=-_time.altzone)
else:
self.DSTOFFSET = self.STDOFFSET
self.DSTDIFF = self.DSTOFFSET - self.STDOFFSET
tzinfo.__init__(self)
def __repr__(self):
return "<LocalTimezone>"
def utcoffset(self, dt):
if self._isdst(dt):
return self.DSTOFFSET
else:
return self.STDOFFSET
def dst(self, dt):
if self._isdst(dt):
return self.DSTDIFF
else:
return ZERO
def tzname(self, dt):
is_dst = False if dt is None else self._isdst(dt)
return _time.tzname[is_dst]
def _isdst(self, dt):
tt = (dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second,
dt.weekday(), 0, 0)
stamp = _time.mktime(tt)
tt = _time.localtime(stamp)
return tt.tm_isdst > 0
class LocalTimezone(ReferenceLocalTimezone):
"""
Slightly improved local time implementation focusing on correctness.
It still crashes on dates before 1970 or after 2038, but at least the
error message is helpful.
"""
def _isdst(self, dt):
try:
return super(LocalTimezone, self)._isdst(dt)
except (OverflowError, ValueError) as exc:
exc_type = type(exc)
exc_value = exc_type(
"Unsupported value: %r. You should install pytz." % dt)
exc_value.__cause__ = exc
six.reraise(exc_type, exc_value, sys.exc_info()[2])
utc = pytz.utc if pytz else UTC()
"""UTC time zone as a tzinfo instance."""
# In order to avoid accessing the settings at compile time,
# wrap the expression in a function and cache the result.
_localtime = None
def get_default_timezone():
"""
Returns the default time zone as a tzinfo instance.
This is the time zone defined by settings.TIME_ZONE.
See also :func:`get_current_timezone`.
"""
global _localtime
if _localtime is None:
if isinstance(settings.TIME_ZONE, six.string_types) and pytz is not None:
_localtime = pytz.timezone(settings.TIME_ZONE)
else:
# This relies on os.environ['TZ'] being set to settings.TIME_ZONE.
_localtime = LocalTimezone()
return _localtime
# This function exists for consistency with get_current_timezone_name
def get_default_timezone_name():
"""
Returns the name of the default time zone.
"""
return _get_timezone_name(get_default_timezone())
_active = local()
def get_current_timezone():
"""
Returns the currently active time zone as a tzinfo instance.
"""
return getattr(_active, "value", get_default_timezone())
def get_current_timezone_name():
"""
Returns the name of the currently active time zone.
"""
return _get_timezone_name(get_current_timezone())
def _get_timezone_name(timezone):
"""
Returns the name of ``timezone``.
"""
try:
# for pytz timezones
return timezone.zone
except AttributeError:
# for regular tzinfo objects
return timezone.tzname(None)
# Timezone selection functions.
# These functions don't change os.environ['TZ'] and call time.tzset()
# because it isn't thread safe.
def activate(timezone):
"""
Sets the time zone for the current thread.
The ``timezone`` argument must be an instance of a tzinfo subclass or a
time zone name. If it is a time zone name, pytz is required.
"""
if isinstance(timezone, tzinfo):
_active.value = timezone
elif isinstance(timezone, six.string_types) and pytz is not None:
_active.value = pytz.timezone(timezone)
else:
raise ValueError("Invalid timezone: %r" % timezone)
def deactivate():
"""
Unsets the time zone for the current thread.
Django will then use the time zone defined by settings.TIME_ZONE.
"""
if hasattr(_active, "value"):
del _active.value
class override(object):
"""
Temporarily set the time zone for the current thread.
This is a context manager that uses ``~django.utils.timezone.activate()``
to set the timezone on entry, and restores the previously active timezone
on exit.
The ``timezone`` argument must be an instance of a ``tzinfo`` subclass, a
time zone name, or ``None``. If is it a time zone name, pytz is required.
If it is ``None``, Django enables the default time zone.
"""
def __init__(self, timezone):
self.timezone = timezone
self.old_timezone = getattr(_active, 'value', None)
def __enter__(self):
if self.timezone is None:
deactivate()
else:
activate(self.timezone)
def __exit__(self, exc_type, exc_value, traceback):
if self.old_timezone is None:
deactivate()
else:
_active.value = self.old_timezone
# Templates
def template_localtime(value, use_tz=None):
"""
Checks if value is a datetime and converts it to local time if necessary.
If use_tz is provided and is not None, that will force the value to
be converted (or not), overriding the value of settings.USE_TZ.
This function is designed for use by the template engine.
"""
should_convert = (isinstance(value, datetime)
and (settings.USE_TZ if use_tz is None else use_tz)
and not is_naive(value)
and getattr(value, 'convert_to_local_time', True))
return localtime(value) if should_convert else value
# Utilities
def localtime(value, timezone=None):
"""
Converts an aware datetime.datetime to local time.
Local time is defined by the current time zone, unless another time zone
is specified.
"""
if timezone is None:
timezone = get_current_timezone()
value = value.astimezone(timezone)
if hasattr(timezone, 'normalize'):
# available for pytz time zones
value = timezone.normalize(value)
return value
def now():
"""
Returns an aware or naive datetime.datetime, depending on settings.USE_TZ.
"""
if settings.USE_TZ:
# timeit shows that datetime.now(tz=utc) is 24% slower
return datetime.utcnow().replace(tzinfo=utc)
else:
return datetime.now()
# By design, these four functions don't perform any checks on their arguments.
# The caller should ensure that they don't receive an invalid value like None.
def is_aware(value):
"""
Determines if a given datetime.datetime is aware.
The logic is described in Python's docs:
http://docs.python.org/library/datetime.html#datetime.tzinfo
"""
return value.tzinfo is not None and value.tzinfo.utcoffset(value) is not None
def is_naive(value):
"""
Determines if a given datetime.datetime is naive.
The logic is described in Python's docs:
http://docs.python.org/library/datetime.html#datetime.tzinfo
"""
return value.tzinfo is None or value.tzinfo.utcoffset(value) is None
def make_aware(value, timezone):
"""
Makes a naive datetime.datetime in a given time zone aware.
"""
if hasattr(timezone, 'localize'):
# available for pytz time zones
return timezone.localize(value, is_dst=None)
else:
# may be wrong around DST changes
return value.replace(tzinfo=timezone)
def make_naive(value, timezone):
"""
Makes an aware datetime.datetime naive in a given time zone.
"""
value = value.astimezone(timezone)
if hasattr(timezone, 'normalize'):
# available for pytz time zones
value = timezone.normalize(value)
return value.replace(tzinfo=None)
|
matmutant/sl4a | refs/heads/master | python/src/Lib/distutils/command/build_clib.py | 138 | """distutils.command.build_clib
Implements the Distutils 'build_clib' command, to build a C/C++ library
that is included in the module distribution and needed by an extension
module."""
# This module should be kept compatible with Python 2.1.
__revision__ = "$Id: build_clib.py 37828 2004-11-10 22:23:15Z loewis $"
# XXX this module has *lots* of code ripped-off quite transparently from
# build_ext.py -- not surprisingly really, as the work required to build
# a static library from a collection of C source files is not really all
# that different from what's required to build a shared object file from
# a collection of C source files. Nevertheless, I haven't done the
# necessary refactoring to account for the overlap in code between the
# two modules, mainly because a number of subtle details changed in the
# cut 'n paste. Sigh.
import os, string
from types import *
from distutils.core import Command
from distutils.errors import *
from distutils.sysconfig import customize_compiler
from distutils import log
def show_compilers ():
from distutils.ccompiler import show_compilers
show_compilers()
class build_clib (Command):
description = "build C/C++ libraries used by Python extensions"
user_options = [
('build-clib', 'b',
"directory to build C/C++ libraries to"),
('build-temp', 't',
"directory to put temporary build by-products"),
('debug', 'g',
"compile with debugging information"),
('force', 'f',
"forcibly build everything (ignore file timestamps)"),
('compiler=', 'c',
"specify the compiler type"),
]
boolean_options = ['debug', 'force']
help_options = [
('help-compiler', None,
"list available compilers", show_compilers),
]
def initialize_options (self):
self.build_clib = None
self.build_temp = None
# List of libraries to build
self.libraries = None
# Compilation options for all libraries
self.include_dirs = None
self.define = None
self.undef = None
self.debug = None
self.force = 0
self.compiler = None
# initialize_options()
def finalize_options (self):
# This might be confusing: both build-clib and build-temp default
# to build-temp as defined by the "build" command. This is because
# I think that C libraries are really just temporary build
# by-products, at least from the point of view of building Python
# extensions -- but I want to keep my options open.
self.set_undefined_options('build',
('build_temp', 'build_clib'),
('build_temp', 'build_temp'),
('compiler', 'compiler'),
('debug', 'debug'),
('force', 'force'))
self.libraries = self.distribution.libraries
if self.libraries:
self.check_library_list(self.libraries)
if self.include_dirs is None:
self.include_dirs = self.distribution.include_dirs or []
if type(self.include_dirs) is StringType:
self.include_dirs = string.split(self.include_dirs,
os.pathsep)
# XXX same as for build_ext -- what about 'self.define' and
# 'self.undef' ?
# finalize_options()
def run (self):
if not self.libraries:
return
# Yech -- this is cut 'n pasted from build_ext.py!
from distutils.ccompiler import new_compiler
self.compiler = new_compiler(compiler=self.compiler,
dry_run=self.dry_run,
force=self.force)
customize_compiler(self.compiler)
if self.include_dirs is not None:
self.compiler.set_include_dirs(self.include_dirs)
if self.define is not None:
# 'define' option is a list of (name,value) tuples
for (name,value) in self.define:
self.compiler.define_macro(name, value)
if self.undef is not None:
for macro in self.undef:
self.compiler.undefine_macro(macro)
self.build_libraries(self.libraries)
# run()
def check_library_list (self, libraries):
"""Ensure that the list of libraries (presumably provided as a
command option 'libraries') is valid, i.e. it is a list of
2-tuples, where the tuples are (library_name, build_info_dict).
Raise DistutilsSetupError if the structure is invalid anywhere;
just returns otherwise."""
# Yechh, blecch, ackk: this is ripped straight out of build_ext.py,
# with only names changed to protect the innocent!
if type(libraries) is not ListType:
raise DistutilsSetupError, \
"'libraries' option must be a list of tuples"
for lib in libraries:
if type(lib) is not TupleType and len(lib) != 2:
raise DistutilsSetupError, \
"each element of 'libraries' must a 2-tuple"
if type(lib[0]) is not StringType:
raise DistutilsSetupError, \
"first element of each tuple in 'libraries' " + \
"must be a string (the library name)"
if '/' in lib[0] or (os.sep != '/' and os.sep in lib[0]):
raise DistutilsSetupError, \
("bad library name '%s': " +
"may not contain directory separators") % \
lib[0]
if type(lib[1]) is not DictionaryType:
raise DistutilsSetupError, \
"second element of each tuple in 'libraries' " + \
"must be a dictionary (build info)"
# for lib
# check_library_list ()
def get_library_names (self):
# Assume the library list is valid -- 'check_library_list()' is
# called from 'finalize_options()', so it should be!
if not self.libraries:
return None
lib_names = []
for (lib_name, build_info) in self.libraries:
lib_names.append(lib_name)
return lib_names
# get_library_names ()
def get_source_files (self):
self.check_library_list(self.libraries)
filenames = []
for (lib_name, build_info) in self.libraries:
sources = build_info.get('sources')
if (sources is None or
type(sources) not in (ListType, TupleType) ):
raise DistutilsSetupError, \
("in 'libraries' option (library '%s'), "
"'sources' must be present and must be "
"a list of source filenames") % lib_name
filenames.extend(sources)
return filenames
# get_source_files ()
def build_libraries (self, libraries):
for (lib_name, build_info) in libraries:
sources = build_info.get('sources')
if sources is None or type(sources) not in (ListType, TupleType):
raise DistutilsSetupError, \
("in 'libraries' option (library '%s'), " +
"'sources' must be present and must be " +
"a list of source filenames") % lib_name
sources = list(sources)
log.info("building '%s' library", lib_name)
# First, compile the source code to object files in the library
# directory. (This should probably change to putting object
# files in a temporary build directory.)
macros = build_info.get('macros')
include_dirs = build_info.get('include_dirs')
objects = self.compiler.compile(sources,
output_dir=self.build_temp,
macros=macros,
include_dirs=include_dirs,
debug=self.debug)
# Now "link" the object files together into a static library.
# (On Unix at least, this isn't really linking -- it just
# builds an archive. Whatever.)
self.compiler.create_static_lib(objects, lib_name,
output_dir=self.build_clib,
debug=self.debug)
# for libraries
# build_libraries ()
# class build_lib
|
sesuncedu/bitcurator | refs/heads/master | tools/py3fpdf/tutorial/bookmark.py | 21 | from FPDF.Bookmark import *
pdf=Bookmark()
pdf.Open()
pdf.SetFont('Arial','',15)
#Page 1
pdf.AddPage()
pdf.Bookmark('Page 1')
pdf.Bookmark('Paragraph 1',1,-1)
pdf.Cell(0,6,'Paragraph 1')
pdf.Ln(50)
pdf.Bookmark('Paragraph 2',1,-1)
pdf.Cell(0,6,'Paragraph 2')
#Page 2
pdf.AddPage()
pdf.Bookmark('Page 2')
pdf.Bookmark('Paragraph 3',1,-1)
pdf.Cell(0,6,'Paragraph 3')
pdf.Output('bookmark.pdf','F')
|
sbmlteam/deviser | refs/heads/sk_fix_issues | generator/bindings_files/DowncastExtensionFile.py | 1 | #!/usr/bin/env python
#
# @file DowncastExtensionFile.py
# @brief class for generating downcast extension file
# @author Frank Bergmann
# @author Sarah Keating
#
# <!--------------------------------------------------------------------------
#
# Copyright (c) 2013-2018 by the California Institute of Technology
# (California, USA), the European Bioinformatics Institute (EMBL-EBI, UK)
# and the University of Heidelberg (Germany), with support from the National
# Institutes of Health (USA) under grant R01GM070923. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# Neither the name of the California Institute of Technology (Caltech), nor
# of the European Bioinformatics Institute (EMBL-EBI), nor of the University
# of Heidelberg, nor the names of any contributors, may be used to endorse
# or promote products derived from this software without specific prior
# written permission.
# ------------------------------------------------------------------------ -->
from ..base_files import BaseCppFile, BaseInterfaceFile
from ..util import strFunctions
class DowncastExtensionFile():
"""Class for downcast extension files"""
def __init__(self, name, package, binding):
self.binding = binding
self.package = package.lower()
self.cap_package = self.package.upper()
self.up_package = strFunctions.upper_first(self.package)
if binding == 'csharp' or binding == 'java':
self.fileout = BaseInterfaceFile.BaseInterfaceFile(name)
self.fileout.brief_description = 'Casting to most specific ' \
'extension object for ' \
'{0}'.format(binding)
else:
self.fileout = BaseCppFile.BaseCppFile(name, 'cpp', None)
self.fileout.brief_description = 'Casting to most specific ' \
'extension object for ' \
'{0}'.format(binding)
########################################################################
# Write binding specific code
def write_binding_specific_i_code(self):
self.fileout.skip_line(2)
self.fileout.write_line('#ifdef USE_{0}'.format(self.cap_package))
self.fileout.write_line('%pragma({0}) modulecode ='.format(self.binding))
self.fileout.write_line('%{')
line = 'if (pkgName'
if self.binding == 'csharp':
line += ' == \"{0}\")'.format(self.package)
else:
line += '.equals(\"{0}\"))'.format(self.package)
self.fileout.up_indent()
self.fileout.write_line(line)
self.fileout.write_line('{')
self.fileout.up_indent()
self.fileout.write_line('return new {0}Extension(cPtr, '
'owner);'.format(self.up_package))
self.fileout.down_indent()
self.fileout.write_line('}')
self.fileout.down_indent()
self.fileout.write_line('%}')
self.fileout.write_line('#endif /* USE_{0} */'.format(self.cap_package))
self.fileout.skip_line()
def write_binding_specific_cpp_code(self):
self.fileout.skip_line(2)
self.fileout.write_line('#ifdef USE_{0}'.format(self.cap_package))
line = 'if (pkgName == \"{0}\")'.format(self.package)
self.fileout.write_line(line)
self.fileout.write_line('{')
self.fileout.up_indent()
self.fileout.write_line('return SWIGTYPE_p_{0}'
'Extension;'.format(self.up_package))
self.fileout.down_indent()
self.fileout.write_line('}')
self.fileout.write_line('#endif // USE_{0}'.format(self.cap_package))
self.fileout.skip_line()
########################################################################
# Write file
def write_file(self):
self.fileout.write_file()
if self.binding == 'csharp' or self.binding == 'java':
self.write_binding_specific_i_code()
else:
self.write_binding_specific_cpp_code()
def close_file(self):
self.fileout.close_file() |
e0ne/cinder | refs/heads/master | cinder/tests/test_api_urlmap.py | 27 | # Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for cinder.api.urlmap.py
"""
from cinder.api import urlmap
from cinder import test
class TestParseFunctions(test.TestCase):
def test_unquote_header_value_without_quotes(self):
arg = 'TestString'
result = urlmap.unquote_header_value(arg)
self.assertEqual(result, arg)
def test_unquote_header_value_with_quotes(self):
result = urlmap.unquote_header_value('"TestString"')
self.assertEqual(result, 'TestString')
def test_parse_list_header(self):
arg = 'token, "quoted value"'
result = urlmap.parse_list_header(arg)
self.assertEqual(result, ['token', 'quoted value'])
def test_parse_options_header(self):
result = urlmap.parse_options_header('Content-Type: text/html;'
' mimetype=text/html')
self.assertEqual(result, ('Content-Type:', {'mimetype': 'text/html'}))
def test_parse_options_header_without_value(self):
result = urlmap.parse_options_header(None)
self.assertEqual(result, ('', {}))
class TestAccept(test.TestCase):
def test_best_match_ValueError(self):
arg = 'text/html; q=some_invalud_value'
accept = urlmap.Accept(arg)
self.assertEqual(accept.best_match(['text/html']), (None, {}))
def test_best_match(self):
arg = '*/*; q=0.7, application/json; q=0.7, text/html; q=-0.8'
accept = urlmap.Accept(arg)
self.assertEqual(accept.best_match(['application/json',
'application/xml', 'text/html']),
('application/json', {'q': '0.7'}))
def test_match_mask_one_asterisk(self):
arg = 'text/*; q=0.7'
accept = urlmap.Accept(arg)
self.assertEqual(accept.best_match(['text/html']),
('text/html', {'q': '0.7'}))
def test_match_mask_two_asterisk(self):
arg = '*/*; q=0.7'
accept = urlmap.Accept(arg)
self.assertEqual(accept.best_match(['text/html']),
('text/html', {'q': '0.7'}))
def test_match_mask_no_asterisk(self):
arg = 'application/json; q=0.7'
accept = urlmap.Accept(arg)
self.assertEqual(accept.best_match(['text/html']), (None, {}))
def test_content_type_params(self):
arg = "application/xml; q=0.1, application/json; q=0.2," \
" text/html; q=0.3"
accept = urlmap.Accept(arg)
self.assertEqual(accept.content_type_params('application/json'),
{'q': '0.2'})
def test_content_type_params_wrong_content_type(self):
arg = 'application/xml; q=0.1, text/html; q=0.1'
accept = urlmap.Accept(arg)
self.assertEqual(accept.content_type_params('application/json'), {})
class TestUrlMapFactory(test.TestCase):
def setUp(self):
super(TestUrlMapFactory, self).setUp()
self.global_conf = {'not_found_app': 'app_global',
'domain hoobar.com port 10 /': 'some_app_global'}
self.loader = self.mox.CreateMockAnything()
def test_not_found_app_in_local_conf(self):
local_conf = {'not_found_app': 'app_local',
'domain foobar.com port 20 /': 'some_app_local'}
self.loader.get_app('app_local', global_conf=self.global_conf).\
AndReturn('app_local_loader')
self.loader.get_app('some_app_local', global_conf=self.global_conf).\
AndReturn('some_app_loader')
self.mox.ReplayAll()
expected_urlmap = urlmap.URLMap(not_found_app='app_local_loader')
expected_urlmap['http://foobar.com:20'] = 'some_app_loader'
self.assertEqual(urlmap.urlmap_factory(self.loader, self.global_conf,
**local_conf), expected_urlmap)
def test_not_found_app_not_in_local_conf(self):
local_conf = {'domain foobar.com port 20 /': 'some_app_local'}
self.loader.get_app('app_global', global_conf=self.global_conf).\
AndReturn('app_global_loader')
self.loader.get_app('some_app_local', global_conf=self.global_conf).\
AndReturn('some_app_returned_by_loader')
self.mox.ReplayAll()
expected_urlmap = urlmap.URLMap(not_found_app='app_global_loader')
expected_urlmap['http://foobar.com:20'] = 'some_app_returned'\
'_by_loader'
self.assertEqual(urlmap.urlmap_factory(self.loader, self.global_conf,
**local_conf), expected_urlmap)
def test_not_found_app_is_none(self):
local_conf = {'not_found_app': None,
'domain foobar.com port 20 /': 'some_app_local'}
self.loader.get_app('some_app_local', global_conf=self.global_conf).\
AndReturn('some_app_returned_by_loader')
self.mox.ReplayAll()
expected_urlmap = urlmap.URLMap(not_found_app=None)
expected_urlmap['http://foobar.com:20'] = 'some_app_returned'\
'_by_loader'
self.assertEqual(urlmap.urlmap_factory(self.loader, self.global_conf,
**local_conf), expected_urlmap)
class TestURLMap(test.TestCase):
def setUp(self):
super(TestURLMap, self).setUp()
self.urlmap = urlmap.URLMap()
self.input_environ = {'HTTP_ACCEPT': "application/json;"
"version=9.0", 'REQUEST_METHOD': "GET",
'CONTENT_TYPE': 'application/xml',
'SCRIPT_NAME': '/scriptname',
'PATH_INFO': "/resource.xml"}
self.environ = {'HTTP_ACCEPT': "application/json;"
"version=9.0", 'REQUEST_METHOD': "GET",
'CONTENT_TYPE': 'application/xml',
'SCRIPT_NAME': '/scriptname/app_url',
'PATH_INFO': "/resource.xml"}
def test_match_with_applications(self):
self.urlmap[('http://10.20.30.40:50', '/path/somepath')] = 'app'
self.assertEqual(self.urlmap._match('20.30.40.50', '20',
'path/somepath'), (None, None))
def test_match_without_applications(self):
self.assertEqual(self.urlmap._match('host', 20, 'app_url/somepath'),
(None, None))
def test_match_path_info_equals_app_url(self):
self.urlmap[('http://20.30.40.50:60', '/app_url/somepath')] = 'app'
self.assertEqual(self.urlmap._match('http://20.30.40.50', '60',
'/app_url/somepath'),
('app', '/app_url/somepath'))
def test_match_path_info_equals_app_url_many_app(self):
self.urlmap[('http://20.30.40.50:60', '/path')] = 'app1'
self.urlmap[('http://20.30.40.50:60', '/path/somepath')] = 'app2'
self.urlmap[('http://20.30.40.50:60', '/path/somepath/elsepath')] = \
'app3'
self.assertEqual(self.urlmap._match('http://20.30.40.50', '60',
'/path/somepath/elsepath'),
('app3', '/path/somepath/elsepath'))
def test_set_script_name(self):
app = self.mox.CreateMockAnything()
start_response = self.mox.CreateMockAnything()
app.__call__(self.environ, start_response).AndReturn('value')
self.mox.ReplayAll()
wrap = self.urlmap._set_script_name(app, '/app_url')
self.assertEqual(wrap(self.input_environ, start_response), 'value')
def test_munge_path(self):
app = self.mox.CreateMockAnything()
start_response = self.mox.CreateMockAnything()
app.__call__(self.environ, start_response).AndReturn('value')
self.mox.ReplayAll()
wrap = self.urlmap._munge_path(app, '/app_url/resource.xml',
'/app_url')
self.assertEqual(wrap(self.input_environ, start_response), 'value')
def test_content_type_strategy_without_version(self):
self.assertEqual(self.urlmap._content_type_strategy('host', 20,
self.environ),
None)
def test_content_type_strategy_with_version(self):
environ = {'HTTP_ACCEPT': "application/vnd.openstack.melange+xml;"
"version=9.0", 'REQUEST_METHOD': "GET",
'PATH_INFO': "/resource.xml",
'CONTENT_TYPE': 'application/xml; version=2.0'}
self.urlmap[('http://10.20.30.40:50', '/v2.0')] = 'app'
self.mox.StubOutWithMock(self.urlmap, '_set_script_name')
self.urlmap._set_script_name('app', '/v2.0').AndReturn('value')
self.mox.ReplayAll()
self.assertEqual(self.urlmap._content_type_strategy(
'http://10.20.30.40', '50', environ), 'value')
def test_path_strategy_wrong_path_info(self):
self.assertEqual(self.urlmap._path_strategy('http://10.20.30.40', '50',
'/resource'),
(None, None, None))
def test_path_strategy_mime_type_only(self):
self.assertEqual(self.urlmap._path_strategy('http://10.20.30.40', '50',
'/resource.xml'),
('application/xml', None, None))
def test_path_strategy(self):
self.urlmap[('http://10.20.30.40:50', '/path/elsepath/')] = 'app'
self.mox.StubOutWithMock(self.urlmap, '_munge_path')
self.urlmap._munge_path('app', '/path/elsepath/resource.xml',
'/path/elsepath').AndReturn('value')
self.mox.ReplayAll()
self.assertEqual(self.urlmap._path_strategy(
'http://10.20.30.40', '50', '/path/elsepath/resource.xml'),
('application/xml', 'value', '/path/elsepath'))
def test_path_strategy_wrong_mime_type(self):
self.urlmap[('http://10.20.30.40:50', '/path/elsepath/')] = 'app'
self.mox.StubOutWithMock(self.urlmap, '_munge_path')
self.urlmap._munge_path('app', '/path/elsepath/resource.abc',
'/path/elsepath').AndReturn('value')
self.mox.ReplayAll()
self.assertEqual(self.urlmap._path_strategy(
'http://10.20.30.40', '50', '/path/elsepath/resource.abc'),
(None, 'value', '/path/elsepath'))
def test_accept_strategy_version_not_in_params(self):
environ = {'HTTP_ACCEPT': "application/xml; q=0.1, application/json; "
"q=0.2", 'REQUEST_METHOD': "GET",
'PATH_INFO': "/resource.xml",
'CONTENT_TYPE': 'application/xml; version=2.0'}
self.assertEqual(self.urlmap._accept_strategy(
'http://10.20.30.40', '50', environ, ['application/xml']),
('application/xml', None))
def test_accept_strategy_version(self):
environ = {'HTTP_ACCEPT': "application/xml; q=0.1; version=1.0,"
"application/json; q=0.2; version=2.0",
'REQUEST_METHOD': "GET", 'PATH_INFO': "/resource.xml",
'CONTENT_TYPE': 'application/xml; version=2.0'}
self.urlmap[('http://10.20.30.40:50', '/v1.0')] = 'app'
self.mox.StubOutWithMock(self.urlmap, '_set_script_name')
self.urlmap._set_script_name('app', '/v1.0').AndReturn('value')
self.mox.ReplayAll()
self.assertEqual(self.urlmap._accept_strategy(
'http://10.20.30.40', '50', environ, ['application/xml']),
('application/xml', 'value'))
|
teeple/pns_server | refs/heads/master | work/install/Python-2.7.4/Lib/distutils/tests/test_build_clib.py | 73 | """Tests for distutils.command.build_clib."""
import unittest
import os
import sys
from test.test_support import run_unittest
from distutils.command.build_clib import build_clib
from distutils.errors import DistutilsSetupError
from distutils.tests import support
from distutils.spawn import find_executable
class BuildCLibTestCase(support.TempdirManager,
support.LoggingSilencer,
unittest.TestCase):
def test_check_library_dist(self):
pkg_dir, dist = self.create_dist()
cmd = build_clib(dist)
# 'libraries' option must be a list
self.assertRaises(DistutilsSetupError, cmd.check_library_list, 'foo')
# each element of 'libraries' must a 2-tuple
self.assertRaises(DistutilsSetupError, cmd.check_library_list,
['foo1', 'foo2'])
# first element of each tuple in 'libraries'
# must be a string (the library name)
self.assertRaises(DistutilsSetupError, cmd.check_library_list,
[(1, 'foo1'), ('name', 'foo2')])
# library name may not contain directory separators
self.assertRaises(DistutilsSetupError, cmd.check_library_list,
[('name', 'foo1'),
('another/name', 'foo2')])
# second element of each tuple must be a dictionary (build info)
self.assertRaises(DistutilsSetupError, cmd.check_library_list,
[('name', {}),
('another', 'foo2')])
# those work
libs = [('name', {}), ('name', {'ok': 'good'})]
cmd.check_library_list(libs)
def test_get_source_files(self):
pkg_dir, dist = self.create_dist()
cmd = build_clib(dist)
# "in 'libraries' option 'sources' must be present and must be
# a list of source filenames
cmd.libraries = [('name', {})]
self.assertRaises(DistutilsSetupError, cmd.get_source_files)
cmd.libraries = [('name', {'sources': 1})]
self.assertRaises(DistutilsSetupError, cmd.get_source_files)
cmd.libraries = [('name', {'sources': ['a', 'b']})]
self.assertEqual(cmd.get_source_files(), ['a', 'b'])
cmd.libraries = [('name', {'sources': ('a', 'b')})]
self.assertEqual(cmd.get_source_files(), ['a', 'b'])
cmd.libraries = [('name', {'sources': ('a', 'b')}),
('name2', {'sources': ['c', 'd']})]
self.assertEqual(cmd.get_source_files(), ['a', 'b', 'c', 'd'])
def test_build_libraries(self):
pkg_dir, dist = self.create_dist()
cmd = build_clib(dist)
class FakeCompiler:
def compile(*args, **kw):
pass
create_static_lib = compile
cmd.compiler = FakeCompiler()
# build_libraries is also doing a bit of typoe checking
lib = [('name', {'sources': 'notvalid'})]
self.assertRaises(DistutilsSetupError, cmd.build_libraries, lib)
lib = [('name', {'sources': list()})]
cmd.build_libraries(lib)
lib = [('name', {'sources': tuple()})]
cmd.build_libraries(lib)
def test_finalize_options(self):
pkg_dir, dist = self.create_dist()
cmd = build_clib(dist)
cmd.include_dirs = 'one-dir'
cmd.finalize_options()
self.assertEqual(cmd.include_dirs, ['one-dir'])
cmd.include_dirs = None
cmd.finalize_options()
self.assertEqual(cmd.include_dirs, [])
cmd.distribution.libraries = 'WONTWORK'
self.assertRaises(DistutilsSetupError, cmd.finalize_options)
def test_run(self):
# can't test on windows
if sys.platform == 'win32':
return
pkg_dir, dist = self.create_dist()
cmd = build_clib(dist)
foo_c = os.path.join(pkg_dir, 'foo.c')
self.write_file(foo_c, 'int main(void) { return 1;}\n')
cmd.libraries = [('foo', {'sources': [foo_c]})]
build_temp = os.path.join(pkg_dir, 'build')
os.mkdir(build_temp)
cmd.build_temp = build_temp
cmd.build_clib = build_temp
# before we run the command, we want to make sure
# all commands are present on the system
# by creating a compiler and checking its executables
from distutils.ccompiler import new_compiler
from distutils.sysconfig import customize_compiler
compiler = new_compiler()
customize_compiler(compiler)
for ccmd in compiler.executables.values():
if ccmd is None:
continue
if find_executable(ccmd[0]) is None:
return # can't test
# this should work
cmd.run()
# let's check the result
self.assertTrue('libfoo.a' in os.listdir(build_temp))
def test_suite():
return unittest.makeSuite(BuildCLibTestCase)
if __name__ == "__main__":
run_unittest(test_suite())
|
yigitguler/django | refs/heads/master | django/contrib/gis/geos/mutable_list.py | 105 | # Copyright (c) 2008-2009 Aryeh Leib Taurog, all rights reserved.
# Released under the New BSD license.
"""
This module contains a base type which provides list-style mutations
without specific data storage methods.
See also http://www.aryehleib.com/MutableLists.html
Author: Aryeh Leib Taurog.
"""
from django.utils.functional import total_ordering
from django.utils import six
from django.utils.six.moves import xrange
@total_ordering
class ListMixin(object):
"""
A base class which provides complete list interface.
Derived classes must call ListMixin's __init__() function
and implement the following:
function _get_single_external(self, i):
Return single item with index i for general use.
The index i will always satisfy 0 <= i < len(self).
function _get_single_internal(self, i):
Same as above, but for use within the class [Optional]
Note that if _get_single_internal and _get_single_internal return
different types of objects, _set_list must distinguish
between the two and handle each appropriately.
function _set_list(self, length, items):
Recreate the entire object.
NOTE: items may be a generator which calls _get_single_internal.
Therefore, it is necessary to cache the values in a temporary:
temp = list(items)
before clobbering the original storage.
function _set_single(self, i, value):
Set the single item at index i to value [Optional]
If left undefined, all mutations will result in rebuilding
the object using _set_list.
function __len__(self):
Return the length
int _minlength:
The minimum legal length [Optional]
int _maxlength:
The maximum legal length [Optional]
type or tuple _allowed:
A type or tuple of allowed item types [Optional]
class _IndexError:
The type of exception to be raise on invalid index [Optional]
"""
_minlength = 0
_maxlength = None
_IndexError = IndexError
### Python initialization and special list interface methods ###
def __init__(self, *args, **kwargs):
if not hasattr(self, '_get_single_internal'):
self._get_single_internal = self._get_single_external
if not hasattr(self, '_set_single'):
self._set_single = self._set_single_rebuild
self._assign_extended_slice = self._assign_extended_slice_rebuild
super(ListMixin, self).__init__(*args, **kwargs)
def __getitem__(self, index):
"Get the item(s) at the specified index/slice."
if isinstance(index, slice):
return [self._get_single_external(i) for i in xrange(*index.indices(len(self)))]
else:
index = self._checkindex(index)
return self._get_single_external(index)
def __delitem__(self, index):
"Delete the item(s) at the specified index/slice."
if not isinstance(index, six.integer_types + (slice,)):
raise TypeError("%s is not a legal index" % index)
# calculate new length and dimensions
origLen = len(self)
if isinstance(index, six.integer_types):
index = self._checkindex(index)
indexRange = [index]
else:
indexRange = range(*index.indices(origLen))
newLen = origLen - len(indexRange)
newItems = (self._get_single_internal(i)
for i in xrange(origLen)
if i not in indexRange)
self._rebuild(newLen, newItems)
def __setitem__(self, index, val):
"Set the item(s) at the specified index/slice."
if isinstance(index, slice):
self._set_slice(index, val)
else:
index = self._checkindex(index)
self._check_allowed((val,))
self._set_single(index, val)
def __iter__(self):
"Iterate over the items in the list"
for i in xrange(len(self)):
yield self[i]
### Special methods for arithmetic operations ###
def __add__(self, other):
'add another list-like object'
return self.__class__(list(self) + list(other))
def __radd__(self, other):
'add to another list-like object'
return other.__class__(list(other) + list(self))
def __iadd__(self, other):
'add another list-like object to self'
self.extend(list(other))
return self
def __mul__(self, n):
'multiply'
return self.__class__(list(self) * n)
def __rmul__(self, n):
'multiply'
return self.__class__(list(self) * n)
def __imul__(self, n):
'multiply'
if n <= 0:
del self[:]
else:
cache = list(self)
for i in range(n - 1):
self.extend(cache)
return self
def __eq__(self, other):
olen = len(other)
for i in range(olen):
try:
c = self[i] == other[i]
except self._IndexError:
# self must be shorter
return False
if not c:
return False
return len(self) == olen
def __lt__(self, other):
olen = len(other)
for i in range(olen):
try:
c = self[i] < other[i]
except self._IndexError:
# self must be shorter
return True
if c:
return c
elif other[i] < self[i]:
return False
return len(self) < olen
### Public list interface Methods ###
## Non-mutating ##
def count(self, val):
"Standard list count method"
count = 0
for i in self:
if val == i:
count += 1
return count
def index(self, val):
"Standard list index method"
for i in xrange(0, len(self)):
if self[i] == val:
return i
raise ValueError('%s not found in object' % str(val))
## Mutating ##
def append(self, val):
"Standard list append method"
self[len(self):] = [val]
def extend(self, vals):
"Standard list extend method"
self[len(self):] = vals
def insert(self, index, val):
"Standard list insert method"
if not isinstance(index, six.integer_types):
raise TypeError("%s is not a legal index" % index)
self[index:index] = [val]
def pop(self, index=-1):
"Standard list pop method"
result = self[index]
del self[index]
return result
def remove(self, val):
"Standard list remove method"
del self[self.index(val)]
def reverse(self):
"Standard list reverse method"
self[:] = self[-1::-1]
def sort(self, cmp=None, key=None, reverse=False):
"Standard list sort method"
if key:
temp = [(key(v), v) for v in self]
temp.sort(key=lambda x: x[0], reverse=reverse)
self[:] = [v[1] for v in temp]
else:
temp = list(self)
if cmp is not None:
temp.sort(cmp=cmp, reverse=reverse)
else:
temp.sort(reverse=reverse)
self[:] = temp
### Private routines ###
def _rebuild(self, newLen, newItems):
if newLen < self._minlength:
raise ValueError('Must have at least %d items' % self._minlength)
if self._maxlength is not None and newLen > self._maxlength:
raise ValueError('Cannot have more than %d items' % self._maxlength)
self._set_list(newLen, newItems)
def _set_single_rebuild(self, index, value):
self._set_slice(slice(index, index + 1, 1), [value])
def _checkindex(self, index, correct=True):
length = len(self)
if 0 <= index < length:
return index
if correct and -length <= index < 0:
return index + length
raise self._IndexError('invalid index: %s' % str(index))
def _check_allowed(self, items):
if hasattr(self, '_allowed'):
if False in [isinstance(val, self._allowed) for val in items]:
raise TypeError('Invalid type encountered in the arguments.')
def _set_slice(self, index, values):
"Assign values to a slice of the object"
try:
iter(values)
except TypeError:
raise TypeError('can only assign an iterable to a slice')
self._check_allowed(values)
origLen = len(self)
valueList = list(values)
start, stop, step = index.indices(origLen)
# CAREFUL: index.step and step are not the same!
# step will never be None
if index.step is None:
self._assign_simple_slice(start, stop, valueList)
else:
self._assign_extended_slice(start, stop, step, valueList)
def _assign_extended_slice_rebuild(self, start, stop, step, valueList):
'Assign an extended slice by rebuilding entire list'
indexList = range(start, stop, step)
# extended slice, only allow assigning slice of same size
if len(valueList) != len(indexList):
raise ValueError('attempt to assign sequence of size %d '
'to extended slice of size %d'
% (len(valueList), len(indexList)))
# we're not changing the length of the sequence
newLen = len(self)
newVals = dict(zip(indexList, valueList))
def newItems():
for i in xrange(newLen):
if i in newVals:
yield newVals[i]
else:
yield self._get_single_internal(i)
self._rebuild(newLen, newItems())
def _assign_extended_slice(self, start, stop, step, valueList):
'Assign an extended slice by re-assigning individual items'
indexList = range(start, stop, step)
# extended slice, only allow assigning slice of same size
if len(valueList) != len(indexList):
raise ValueError('attempt to assign sequence of size %d '
'to extended slice of size %d'
% (len(valueList), len(indexList)))
for i, val in zip(indexList, valueList):
self._set_single(i, val)
def _assign_simple_slice(self, start, stop, valueList):
'Assign a simple slice; Can assign slice of any length'
origLen = len(self)
stop = max(start, stop)
newLen = origLen - stop + start + len(valueList)
def newItems():
for i in xrange(origLen + 1):
if i == start:
for val in valueList:
yield val
if i < origLen:
if i < start or i >= stop:
yield self._get_single_internal(i)
self._rebuild(newLen, newItems())
|
nitzmahone/ansible-modules-extras | refs/heads/devel | windows/win_iis_webapplication.py | 153 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Henrik Wallström <henrik@wallstroms.nu>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: win_iis_webapplication
version_added: "2.0"
short_description: Configures a IIS Web application.
description:
- Creates, Removes and configures a IIS Web applications
options:
name:
description:
- Name of the Web applicatio
required: true
default: null
aliases: []
site:
description:
- Name of the site on which the application is created.
required: true
default: null
aliases: []
state:
description:
- State of the web application
choices:
- present
- absent
required: false
default: null
aliases: []
physical_path:
description:
- The physical path on the remote host to use for the new applicatiojn. The specified folder must already exist.
required: false
default: null
aliases: []
application_pool:
description:
- The application pool in which the new site executes.
required: false
default: null
aliases: []
author: Henrik Wallström
'''
EXAMPLES = '''
$ ansible -i hosts -m win_iis_webapplication -a "name=api site=acme physical_path=c:\\apps\\acme\\api" host
'''
|
spnow/grr | refs/heads/master | tools/demo.py | 3 | #!/usr/bin/env python
"""This is a single binary demo program."""
import threading
# pylint: disable=unused-import,g-bad-import-order
from grr.lib import server_plugins
from grr.gui import admin_ui
# pylint: enable=unused-import,g-bad-import-order
from grr.client import client
from grr.gui import runtests
from grr.lib import config_lib
from grr.lib import flags
from grr.lib import startup
from grr.tools import http_server
from grr.worker import enroller
from grr.worker import worker
BASE_DIR = "grr/"
def main(argv):
"""Sets up all the component in their own threads."""
# For testing we use the test config file.
flags.FLAGS.config = config_lib.CONFIG["Test.config"]
config_lib.CONFIG.AddContext(
"Demo Context",
"The demo runs all functions in a single process using the "
"in memory data store.")
startup.TestInit()
# pylint: disable=unused-import,unused-variable,g-import-not-at-top
from grr.gui import gui_plugins
# pylint: enable=unused-import,unused-variable,g-import-not-at-top
# This is the worker thread.
worker_thread = threading.Thread(target=worker.main, args=[argv],
name="Worker")
worker_thread.daemon = True
worker_thread.start()
# This is the enroller thread.
enroller_thread = threading.Thread(target=enroller.main, args=[argv],
name="Enroller")
enroller_thread.daemon = True
enroller_thread.start()
# This is the http server Frontend that clients communicate with.
http_thread = threading.Thread(target=http_server.main, args=[argv],
name="HTTP Server")
http_thread.daemon = True
http_thread.start()
client_thread = threading.Thread(target=client.main, args=[argv],
name="Client")
client_thread.daemon = True
client_thread.start()
# The UI is running in the main thread.
runtests.main(argv)
if __name__ == "__main__":
flags.StartMain(main)
|
waheedahmed/edx-platform | refs/heads/master | lms/djangoapps/course_structure_api/__init__.py | 175 | """ Course structure API """
|
manassolanki/frappe | refs/heads/develop | frappe/patches/v8_0/set_doctype_values_in_custom_role.py | 19 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
def execute():
frappe.reload_doctype('Custom Role')
# set ref doctype in custom role for reports
frappe.db.sql(""" update `tabCustom Role` set
`tabCustom Role`.ref_doctype = (select ref_doctype from `tabReport` where name = `tabCustom Role`.report)
where `tabCustom Role`.report is not null""")
|
imbasimba/astroquery | refs/heads/obs-id-download | astroquery/oac/tests/__init__.py | 12133432 | |
SIFTeam/enigma2 | refs/heads/master | lib/python/Plugins/Extensions/DVDBurn/Process.py | 14 | from Components.Task import Task, Job, DiskspacePrecondition, Condition, ToolExistsPrecondition
from Components.Harddisk import harddiskmanager
from Screens.MessageBox import MessageBox
import os
class png2yuvTask(Task):
def __init__(self, job, inputfile, outputfile):
Task.__init__(self, job, "Creating menu video")
self.setTool("png2yuv")
self.args += ["-n1", "-Ip", "-f25", "-j", inputfile]
self.dumpFile = outputfile
self.weighting = 15
def run(self, callback):
Task.run(self, callback)
self.container.stdoutAvail.remove(self.processStdout)
self.container.dumpToFile(self.dumpFile)
def processStderr(self, data):
print "[png2yuvTask]", data[:-1]
class mpeg2encTask(Task):
def __init__(self, job, inputfile, outputfile):
Task.__init__(self, job, "Encoding menu video")
self.setTool("mpeg2enc")
self.args += ["-f8", "-np", "-a2", "-o", outputfile]
self.inputFile = inputfile
self.weighting = 25
def run(self, callback):
Task.run(self, callback)
self.container.readFromFile(self.inputFile)
def processOutputLine(self, line):
print "[mpeg2encTask]", line[:-1]
class spumuxTask(Task):
def __init__(self, job, xmlfile, inputfile, outputfile):
Task.__init__(self, job, "Muxing buttons into menu")
self.setTool("spumux")
self.args += [xmlfile]
self.inputFile = inputfile
self.dumpFile = outputfile
self.weighting = 15
def run(self, callback):
Task.run(self, callback)
self.container.stdoutAvail.remove(self.processStdout)
self.container.dumpToFile(self.dumpFile)
self.container.readFromFile(self.inputFile)
def processStderr(self, data):
print "[spumuxTask]", data[:-1]
class MakeFifoNode(Task):
def __init__(self, job, number):
Task.__init__(self, job, "Make FIFO nodes")
self.setTool("mknod")
nodename = self.job.workspace + "/dvd_title_%d" % number + ".mpg"
self.args += [nodename, "p"]
self.weighting = 10
class LinkTS(Task):
def __init__(self, job, sourcefile, link_name):
Task.__init__(self, job, "Creating symlink for source titles")
self.setTool("ln")
self.args += ["-s", sourcefile, link_name]
self.weighting = 10
class CopyMeta(Task):
def __init__(self, job, sourcefile):
Task.__init__(self, job, "Copy title meta files")
self.setTool("cp")
from os import listdir
path, filename = sourcefile.rstrip("/").rsplit("/",1)
tsfiles = listdir(path)
for file in tsfiles:
if file.startswith(filename+"."):
self.args += [path+'/'+file]
self.args += [self.job.workspace]
self.weighting = 15
class DemuxTask(Task):
def __init__(self, job, inputfile):
Task.__init__(self, job, "Demux video into ES")
title = job.project.titles[job.i]
self.global_preconditions.append(DiskspacePrecondition(title.estimatedDiskspace))
self.setTool("projectx")
self.args += [inputfile, "-demux", "-set", "ExportPanel.Streamtype.Subpicture=0", "-set", "ExportPanel.Streamtype.Teletext=0", "-out", self.job.workspace ]
self.end = 300
self.prog_state = 0
self.weighting = 1000
self.cutfile = self.job.workspace + "/cut_%d.Xcl" % (job.i+1)
self.cutlist = title.cutlist
self.currentPID = None
self.relevantAudioPIDs = [ ]
self.getRelevantAudioPIDs(title)
self.generated_files = [ ]
self.mplex_audiofiles = { }
self.mplex_videofile = ""
self.mplex_streamfiles = [ ]
if len(self.cutlist) > 1:
self.args += [ "-cut", self.cutfile ]
def prepare(self):
self.writeCutfile()
def getRelevantAudioPIDs(self, title):
for audiotrack in title.properties.audiotracks:
if audiotrack.active.getValue():
self.relevantAudioPIDs.append(audiotrack.pid.getValue())
def processOutputLine(self, line):
line = line[:-1]
#print "[DemuxTask]", line
MSG_NEW_FILE = "---> new File: "
MSG_PROGRESS = "[PROGRESS] "
MSG_NEW_MP2 = "++> Mpg Audio: PID 0x"
MSG_NEW_AC3 = "++> AC3/DTS Audio: PID 0x"
if line.startswith(MSG_NEW_FILE):
file = line[len(MSG_NEW_FILE):]
if file[0] == "'":
file = file[1:-1]
self.haveNewFile(file)
elif line.startswith(MSG_PROGRESS):
progress = line[len(MSG_PROGRESS):]
self.haveProgress(progress)
elif line.startswith(MSG_NEW_MP2) or line.startswith(MSG_NEW_AC3):
try:
self.currentPID = str(int(line.split(': PID 0x',1)[1].split(' ',1)[0],16))
except ValueError:
print "[DemuxTask] ERROR: couldn't detect Audio PID (projectx too old?)"
def haveNewFile(self, file):
print "[DemuxTask] produced file:", file, self.currentPID
self.generated_files.append(file)
if self.currentPID in self.relevantAudioPIDs:
self.mplex_audiofiles[self.currentPID] = file
elif file.endswith("m2v"):
self.mplex_videofile = file
def haveProgress(self, progress):
#print "PROGRESS [%s]" % progress
MSG_CHECK = "check & synchronize audio file"
MSG_DONE = "done..."
if progress == "preparing collection(s)...":
self.prog_state = 0
elif progress[:len(MSG_CHECK)] == MSG_CHECK:
self.prog_state += 1
else:
try:
p = int(progress)
p = p - 1 + self.prog_state * 100
if p > self.progress:
self.progress = p
except ValueError:
pass
def writeCutfile(self):
f = open(self.cutfile, "w")
f.write("CollectionPanel.CutMode=4\n")
for p in self.cutlist:
s = p / 90000
m = s / 60
h = m / 60
m %= 60
s %= 60
f.write("%02d:%02d:%02d\n" % (h, m, s))
f.close()
def cleanup(self, failed):
print "[DemuxTask::cleanup]"
self.mplex_streamfiles = [ self.mplex_videofile ]
for pid in self.relevantAudioPIDs:
if pid in self.mplex_audiofiles:
self.mplex_streamfiles.append(self.mplex_audiofiles[pid])
print self.mplex_streamfiles
if failed:
import os
for file in self.generated_files:
try:
os.remove(file)
except OSError:
pass
class MplexTaskPostcondition(Condition):
def check(self, task):
if task.error == task.ERROR_UNDERRUN:
return True
return task.error is None
def getErrorMessage(self, task):
return {
task.ERROR_UNDERRUN: ("Can't multiplex source video!"),
task.ERROR_UNKNOWN: ("An unknown error occurred!")
}[task.error]
class MplexTask(Task):
ERROR_UNDERRUN, ERROR_UNKNOWN = range(2)
def __init__(self, job, outputfile, inputfiles=None, demux_task=None, weighting = 500):
Task.__init__(self, job, "Mux ES into PS")
self.weighting = weighting
self.demux_task = demux_task
self.postconditions.append(MplexTaskPostcondition())
self.setTool("mplex")
self.args += ["-f8", "-o", outputfile, "-v1"]
if inputfiles:
self.args += inputfiles
def setTool(self, tool):
self.cmd = tool
self.args = [tool]
self.global_preconditions.append(ToolExistsPrecondition())
# we don't want the ReturncodePostcondition in this case because for right now we're just gonna ignore the fact that mplex fails with a buffer underrun error on some streams (this always at the very end)
def prepare(self):
self.error = None
if self.demux_task:
self.args += self.demux_task.mplex_streamfiles
def processOutputLine(self, line):
print "[MplexTask] ", line[:-1]
if line.startswith("**ERROR:"):
if line.find("Frame data under-runs detected") != -1:
self.error = self.ERROR_UNDERRUN
else:
self.error = self.ERROR_UNKNOWN
class RemoveESFiles(Task):
def __init__(self, job, demux_task):
Task.__init__(self, job, "Remove temp. files")
self.demux_task = demux_task
self.setTool("rm")
self.weighting = 10
def prepare(self):
self.args += ["-f"]
self.args += self.demux_task.generated_files
self.args += [self.demux_task.cutfile]
class ReplexTask(Task):
def __init__(self, job, outputfile, inputfile):
Task.__init__(self, job, "ReMux TS into PS")
self.weighting = 1000
self.setTool("replex")
self.args += ["-t", "DVD", "-j", "-o", outputfile, inputfile]
def processOutputLine(self, line):
print "[ReplexTask] ", line[:-1]
class DVDAuthorTask(Task):
def __init__(self, job):
Task.__init__(self, job, "Authoring DVD")
self.weighting = 20
self.setTool("dvdauthor")
self.CWD = self.job.workspace
self.args += ["-x", self.job.workspace+"/dvdauthor.xml"]
self.menupreview = job.menupreview
def processOutputLine(self, line):
print "[DVDAuthorTask] ", line[:-1]
if not self.menupreview and line.startswith("STAT: Processing"):
self.callback(self, [], stay_resident=True)
elif line.startswith("STAT: VOBU"):
try:
progress = int(line.split("MB")[0].split(" ")[-1])
if progress:
self.job.mplextask.progress = progress
print "[DVDAuthorTask] update mplextask progress:", self.job.mplextask.progress, "of", self.job.mplextask.end
except:
print "couldn't set mux progress"
class DVDAuthorFinalTask(Task):
def __init__(self, job):
Task.__init__(self, job, "dvdauthor finalize")
self.setTool("dvdauthor")
self.args += ["-T", "-o", self.job.workspace + "/dvd"]
class WaitForResidentTasks(Task):
def __init__(self, job):
Task.__init__(self, job, "waiting for dvdauthor to finalize")
def run(self, callback):
print "waiting for %d resident task(s) %s to finish..." % (len(self.job.resident_tasks),str(self.job.resident_tasks))
self.callback = callback
if self.job.resident_tasks == 0:
callback(self, [])
class BurnTaskPostcondition(Condition):
RECOVERABLE = True
def check(self, task):
if task.returncode == 0:
return True
elif task.error is None or task.error is task.ERROR_MINUSRWBUG:
return True
return False
def getErrorMessage(self, task):
return {
task.ERROR_NOTWRITEABLE: _("Medium is not a writeable DVD!"),
task.ERROR_LOAD: _("Could not load medium! No disc inserted?"),
task.ERROR_SIZE: _("Content does not fit on DVD!"),
task.ERROR_WRITE_FAILED: _("Write failed!"),
task.ERROR_DVDROM: _("No (supported) DVDROM found!"),
task.ERROR_ISOFS: _("Medium is not empty!"),
task.ERROR_FILETOOLARGE: _("TS file is too large for ISO9660 level 1!"),
task.ERROR_ISOTOOLARGE: _("ISO file is too large for this filesystem!"),
task.ERROR_UNKNOWN: _("An unknown error occurred!")
}[task.error]
class BurnTask(Task):
ERROR_NOTWRITEABLE, ERROR_LOAD, ERROR_SIZE, ERROR_WRITE_FAILED, ERROR_DVDROM, ERROR_ISOFS, ERROR_FILETOOLARGE, ERROR_ISOTOOLARGE, ERROR_MINUSRWBUG, ERROR_UNKNOWN = range(10)
def __init__(self, job, extra_args=[], tool="growisofs"):
Task.__init__(self, job, job.name)
self.weighting = 500
self.end = 120 # 100 for writing, 10 for buffer flush, 10 for closing disc
self.postconditions.append(BurnTaskPostcondition())
self.setTool(tool)
self.args += extra_args
def prepare(self):
self.error = None
def processOutputLine(self, line):
line = line[:-1]
print "[GROWISOFS] %s" % line
if line[8:14] == "done, ":
self.progress = float(line[:6])
print "progress:", self.progress
elif line.find("flushing cache") != -1:
self.progress = 100
elif line.find("closing disc") != -1:
self.progress = 110
elif line.startswith(":-["):
if line.find("ASC=30h") != -1:
self.error = self.ERROR_NOTWRITEABLE
elif line.find("ASC=24h") != -1:
self.error = self.ERROR_LOAD
elif line.find("SK=5h/ASC=A8h/ACQ=04h") != -1:
self.error = self.ERROR_MINUSRWBUG
else:
self.error = self.ERROR_UNKNOWN
print "BurnTask: unknown error %s" % line
elif line.startswith(":-("):
if line.find("No space left on device") != -1:
self.error = self.ERROR_SIZE
elif self.error == self.ERROR_MINUSRWBUG:
print "*sigh* this is a known bug. we're simply gonna assume everything is fine."
self.postconditions = []
elif line.find("write failed") != -1:
self.error = self.ERROR_WRITE_FAILED
elif line.find("unable to open64(") != -1 and line.find(",O_RDONLY): No such file or directory") != -1:
self.error = self.ERROR_DVDROM
elif line.find("media is not recognized as recordable DVD") != -1:
self.error = self.ERROR_NOTWRITEABLE
else:
self.error = self.ERROR_UNKNOWN
print "BurnTask: unknown error %s" % line
elif line.startswith("FATAL:"):
if line.find("already carries isofs!"):
self.error = self.ERROR_ISOFS
else:
self.error = self.ERROR_UNKNOWN
print "BurnTask: unknown error %s" % line
elif line.find("-allow-limited-size was not specified. There is no way do represent this file size. Aborting.") != -1:
self.error = self.ERROR_FILETOOLARGE
elif line.startswith("genisoimage: File too large."):
self.error = self.ERROR_ISOTOOLARGE
def setTool(self, tool):
self.cmd = tool
self.args = [tool]
self.global_preconditions.append(ToolExistsPrecondition())
class RemoveDVDFolder(Task):
def __init__(self, job):
Task.__init__(self, job, "Remove temp. files")
self.setTool("rm")
self.args += ["-rf", self.job.workspace]
self.weighting = 10
class CheckDiskspaceTask(Task):
def __init__(self, job):
Task.__init__(self, job, "Checking free space")
totalsize = 0 # require an extra safety 50 MB
maxsize = 0
for title in job.project.titles:
titlesize = title.estimatedDiskspace
if titlesize > maxsize: maxsize = titlesize
totalsize += titlesize
diskSpaceNeeded = totalsize + maxsize
job.estimateddvdsize = totalsize / 1024 / 1024
totalsize += 50*1024*1024 # require an extra safety 50 MB
self.global_preconditions.append(DiskspacePrecondition(diskSpaceNeeded))
self.weighting = 5
def abort(self):
self.finish(aborted = True)
def run(self, callback):
self.callback = callback
failed_preconditions = self.checkPreconditions(True) + self.checkPreconditions(False)
if len(failed_preconditions):
callback(self, failed_preconditions)
return
Task.processFinished(self, 0)
class PreviewTask(Task):
def __init__(self, job, path):
Task.__init__(self, job, "Preview")
self.postconditions.append(PreviewTaskPostcondition())
self.job = job
self.path = path
self.weighting = 10
def run(self, callback):
self.callback = callback
if self.job.menupreview:
self.previewProject()
else:
import Screens.Standby
if Screens.Standby.inStandby:
self.previewCB(False)
else:
from Tools import Notifications
Notifications.AddNotificationWithCallback(self.previewCB, MessageBox, _("Do you want to preview this DVD before burning?"), timeout = 60, default = False)
def abort(self):
self.finish(aborted = True)
def previewCB(self, answer):
if answer == True:
self.previewProject()
else:
self.closedCB(True)
def playerClosed(self):
if self.job.menupreview:
self.closedCB(True)
else:
from Tools import Notifications
Notifications.AddNotificationWithCallback(self.closedCB, MessageBox, _("Do you want to burn this collection to DVD medium?") )
def closedCB(self, answer):
if answer == True:
Task.processFinished(self, 0)
else:
Task.processFinished(self, 1)
def previewProject(self):
from Screens.DVD import DVDPlayer
self.job.project.session.openWithCallback(self.playerClosed, DVDPlayer, dvd_filelist= [ self.path ])
class PreviewTaskPostcondition(Condition):
def check(self, task):
return task.returncode == 0
def getErrorMessage(self, task):
return "Cancel"
class ImagingPostcondition(Condition):
def check(self, task):
return task.returncode == 0
def getErrorMessage(self, task):
return _("Failed") + ": python-imaging"
class ImagePrepareTask(Task):
def __init__(self, job):
Task.__init__(self, job, _("please wait, loading picture..."))
self.postconditions.append(ImagingPostcondition())
self.weighting = 20
self.job = job
self.Menus = job.Menus
def run(self, callback):
self.callback = callback
# we are doing it this weird way so that the TaskView Screen actually pops up before the spinner comes
from enigma import eTimer
self.delayTimer = eTimer()
self.delayTimer.callback.append(self.conduct)
self.delayTimer.start(10,1)
def conduct(self):
try:
from ImageFont import truetype
from Image import open as Image_open
s = self.job.project.menutemplate.settings
(width, height) = s.dimensions.getValue()
self.Menus.im_bg_orig = Image_open(s.menubg.getValue())
if self.Menus.im_bg_orig.size != (width, height):
self.Menus.im_bg_orig = self.Menus.im_bg_orig.resize((width, height))
self.Menus.fontsizes = [s.fontsize_headline.getValue(), s.fontsize_title.getValue(), s.fontsize_subtitle.getValue()]
self.Menus.fonts = [(truetype(s.fontface_headline.getValue(), self.Menus.fontsizes[0])), (truetype(s.fontface_title.getValue(), self.Menus.fontsizes[1])),(truetype(s.fontface_subtitle.getValue(), self.Menus.fontsizes[2]))]
Task.processFinished(self, 0)
except:
Task.processFinished(self, 1)
class MenuImageTask(Task):
def __init__(self, job, menu_count, spuxmlfilename, menubgpngfilename, highlightpngfilename):
Task.__init__(self, job, "Create Menu %d Image" % menu_count)
self.postconditions.append(ImagingPostcondition())
self.weighting = 10
self.job = job
self.Menus = job.Menus
self.menu_count = menu_count
self.spuxmlfilename = spuxmlfilename
self.menubgpngfilename = menubgpngfilename
self.highlightpngfilename = highlightpngfilename
def run(self, callback):
self.callback = callback
#try:
import ImageDraw, Image, os
s = self.job.project.menutemplate.settings
s_top = s.margin_top.getValue()
s_bottom = s.margin_bottom.getValue()
s_left = s.margin_left.getValue()
s_right = s.margin_right.getValue()
s_rows = s.space_rows.getValue()
s_cols = s.space_cols.getValue()
nr_cols = s.cols.getValue()
nr_rows = s.rows.getValue()
thumb_size = s.thumb_size.getValue()
if thumb_size[0]:
from Image import open as Image_open
(s_width, s_height) = s.dimensions.getValue()
fonts = self.Menus.fonts
im_bg = self.Menus.im_bg_orig.copy()
im_high = Image.new("P", (s_width, s_height), 0)
im_high.putpalette(self.Menus.spu_palette)
draw_bg = ImageDraw.Draw(im_bg)
draw_high = ImageDraw.Draw(im_high)
if self.menu_count == 1:
headlineText = self.job.project.settings.name.getValue().decode("utf-8")
headlinePos = self.getPosition(s.offset_headline.getValue(), 0, 0, s_width, s_top, draw_bg.textsize(headlineText, font=fonts[0]))
draw_bg.text(headlinePos, headlineText, fill=self.Menus.color_headline, font=fonts[0])
spuxml = """<?xml version="1.0" encoding="utf-8"?>
<subpictures>
<stream>
<spu
highlight="%s"
transparent="%02x%02x%02x"
start="00:00:00.00"
force="yes" >""" % (self.highlightpngfilename, self.Menus.spu_palette[0], self.Menus.spu_palette[1], self.Menus.spu_palette[2])
#rowheight = (self.Menus.fontsizes[1]+self.Menus.fontsizes[2]+thumb_size[1]+s_rows)
menu_start_title = (self.menu_count-1)*self.job.titles_per_menu + 1
menu_end_title = (self.menu_count)*self.job.titles_per_menu + 1
nr_titles = len(self.job.project.titles)
if menu_end_title > nr_titles:
menu_end_title = nr_titles+1
col = 1
row = 1
for title_no in range( menu_start_title , menu_end_title ):
title = self.job.project.titles[title_no-1]
col_width = ( s_width - s_left - s_right ) / nr_cols
row_height = ( s_height - s_top - s_bottom ) / nr_rows
left = s_left + ( (col-1) * col_width ) + s_cols/2
right = left + col_width - s_cols
top = s_top + ( (row-1) * row_height) + s_rows/2
bottom = top + row_height - s_rows
width = right - left
height = bottom - top
if bottom > s_height:
bottom = s_height
#draw_bg.rectangle((left, top, right, bottom), outline=(255,0,0))
im_cell_bg = Image.new("RGBA", (width, height),(0,0,0,0))
draw_cell_bg = ImageDraw.Draw(im_cell_bg)
im_cell_high = Image.new("P", (width, height), 0)
im_cell_high.putpalette(self.Menus.spu_palette)
draw_cell_high = ImageDraw.Draw(im_cell_high)
if thumb_size[0]:
thumbPos = self.getPosition(s.offset_thumb.getValue(), 0, 0, width, height, thumb_size)
box = (thumbPos[0], thumbPos[1], thumbPos[0]+thumb_size[0], thumbPos[1]+thumb_size[1])
try:
thumbIm = Image_open(title.inputfile.rsplit('.',1)[0] + ".png")
im_cell_bg.paste(thumbIm,thumbPos)
except:
draw_cell_bg.rectangle(box, fill=(64,127,127,127))
border = s.thumb_border.getValue()
if border:
draw_cell_high.rectangle(box, fill=1)
draw_cell_high.rectangle((box[0]+border, box[1]+border, box[2]-border, box[3]-border), fill=0)
titleText = title.formatDVDmenuText(s.titleformat.getValue(), title_no).decode("utf-8")
titlePos = self.getPosition(s.offset_title.getValue(), 0, 0, width, height, draw_bg.textsize(titleText, font=fonts[1]))
draw_cell_bg.text(titlePos, titleText, fill=self.Menus.color_button, font=fonts[1])
draw_cell_high.text(titlePos, titleText, fill=1, font=self.Menus.fonts[1])
subtitleText = title.formatDVDmenuText(s.subtitleformat.getValue(), title_no).decode("utf-8")
subtitlePos = self.getPosition(s.offset_subtitle.getValue(), 0, 0, width, height, draw_cell_bg.textsize(subtitleText, font=fonts[2]))
draw_cell_bg.text(subtitlePos, subtitleText, fill=self.Menus.color_button, font=fonts[2])
del draw_cell_bg
del draw_cell_high
im_bg.paste(im_cell_bg,(left, top, right, bottom), mask=im_cell_bg)
im_high.paste(im_cell_high,(left, top, right, bottom))
spuxml += """
<button name="button%s" x0="%d" x1="%d" y0="%d" y1="%d"/>""" % (str(title_no).zfill(2),left,right,top,bottom )
if col < nr_cols:
col += 1
else:
col = 1
row += 1
top = s_height - s_bottom - s_rows/2
if self.menu_count < self.job.nr_menus:
next_page_text = s.next_page_text.getValue().decode("utf-8")
textsize = draw_bg.textsize(next_page_text, font=fonts[1])
pos = ( s_width-textsize[0]-s_right, top )
draw_bg.text(pos, next_page_text, fill=self.Menus.color_button, font=fonts[1])
draw_high.text(pos, next_page_text, fill=1, font=fonts[1])
spuxml += """
<button name="button_next" x0="%d" x1="%d" y0="%d" y1="%d"/>""" % (pos[0],pos[0]+textsize[0],pos[1],pos[1]+textsize[1])
if self.menu_count > 1:
prev_page_text = s.prev_page_text.getValue().decode("utf-8")
textsize = draw_bg.textsize(prev_page_text, font=fonts[1])
pos = ( (s_left+s_cols/2), top )
draw_bg.text(pos, prev_page_text, fill=self.Menus.color_button, font=fonts[1])
draw_high.text(pos, prev_page_text, fill=1, font=fonts[1])
spuxml += """
<button name="button_prev" x0="%d" x1="%d" y0="%d" y1="%d"/>""" % (pos[0],pos[0]+textsize[0],pos[1],pos[1]+textsize[1])
del draw_bg
del draw_high
fd=open(self.menubgpngfilename,"w")
im_bg.save(fd,"PNG")
fd.close()
fd=open(self.highlightpngfilename,"w")
im_high.save(fd,"PNG")
fd.close()
spuxml += """
</spu>
</stream>
</subpictures>"""
f = open(self.spuxmlfilename, "w")
f.write(spuxml)
f.close()
Task.processFinished(self, 0)
#except:
#Task.processFinished(self, 1)
def getPosition(self, offset, left, top, right, bottom, size):
pos = [left, top]
if offset[0] != -1:
pos[0] += offset[0]
else:
pos[0] += ( (right-left) - size[0] ) / 2
if offset[1] != -1:
pos[1] += offset[1]
else:
pos[1] += ( (bottom-top) - size[1] ) / 2
return tuple(pos)
class Menus:
def __init__(self, job):
self.job = job
job.Menus = self
s = self.job.project.menutemplate.settings
self.color_headline = tuple(s.color_headline.getValue())
self.color_button = tuple(s.color_button.getValue())
self.color_highlight = tuple(s.color_highlight.getValue())
self.spu_palette = [ 0x60, 0x60, 0x60 ] + s.color_highlight.getValue()
ImagePrepareTask(job)
nr_titles = len(job.project.titles)
job.titles_per_menu = s.cols.getValue()*s.rows.getValue()
job.nr_menus = ((nr_titles+job.titles_per_menu-1)/job.titles_per_menu)
#a new menu_count every 4 titles (1,2,3,4->1 ; 5,6,7,8->2 etc.)
for menu_count in range(1 , job.nr_menus+1):
num = str(menu_count)
spuxmlfilename = job.workspace+"/spumux"+num+".xml"
menubgpngfilename = job.workspace+"/dvd_menubg"+num+".png"
highlightpngfilename = job.workspace+"/dvd_highlight"+num+".png"
MenuImageTask(job, menu_count, spuxmlfilename, menubgpngfilename, highlightpngfilename)
png2yuvTask(job, menubgpngfilename, job.workspace+"/dvdmenubg"+num+".yuv")
menubgm2vfilename = job.workspace+"/dvdmenubg"+num+".mv2"
mpeg2encTask(job, job.workspace+"/dvdmenubg"+num+".yuv", menubgm2vfilename)
menubgmpgfilename = job.workspace+"/dvdmenubg"+num+".mpg"
menuaudiofilename = s.menuaudio.getValue()
MplexTask(job, outputfile=menubgmpgfilename, inputfiles = [menubgm2vfilename, menuaudiofilename], weighting = 20)
menuoutputfilename = job.workspace+"/dvdmenu"+num+".mpg"
spumuxTask(job, spuxmlfilename, menubgmpgfilename, menuoutputfilename)
def CreateAuthoringXML_singleset(job):
nr_titles = len(job.project.titles)
mode = job.project.settings.authormode.getValue()
authorxml = []
authorxml.append('<?xml version="1.0" encoding="utf-8"?>\n')
authorxml.append(' <dvdauthor dest="' + (job.workspace+"/dvd") + '">\n')
authorxml.append(' <vmgm>\n')
authorxml.append(' <menus lang="' + job.project.menutemplate.settings.menulang.getValue() + '">\n')
authorxml.append(' <pgc>\n')
authorxml.append(' <vob file="' + job.project.settings.vmgm.getValue() + '" />\n', )
if mode.startswith("menu"):
authorxml.append(' <post> jump titleset 1 menu; </post>\n')
else:
authorxml.append(' <post> jump title 1; </post>\n')
authorxml.append(' </pgc>\n')
authorxml.append(' </menus>\n')
authorxml.append(' </vmgm>\n')
authorxml.append(' <titleset>\n')
if mode.startswith("menu"):
authorxml.append(' <menus lang="' + job.project.menutemplate.settings.menulang.getValue() + '">\n')
authorxml.append(' <video aspect="4:3"/>\n')
for menu_count in range(1 , job.nr_menus+1):
if menu_count == 1:
authorxml.append(' <pgc entry="root">\n')
else:
authorxml.append(' <pgc>\n')
menu_start_title = (menu_count-1)*job.titles_per_menu + 1
menu_end_title = (menu_count)*job.titles_per_menu + 1
if menu_end_title > nr_titles:
menu_end_title = nr_titles+1
for i in range( menu_start_title , menu_end_title ):
authorxml.append(' <button name="button' + (str(i).zfill(2)) + '"> jump title ' + str(i) +'; </button>\n')
if menu_count > 1:
authorxml.append(' <button name="button_prev"> jump menu ' + str(menu_count-1) + '; </button>\n')
if menu_count < job.nr_menus:
authorxml.append(' <button name="button_next"> jump menu ' + str(menu_count+1) + '; </button>\n')
menuoutputfilename = job.workspace+"/dvdmenu"+str(menu_count)+".mpg"
authorxml.append(' <vob file="' + menuoutputfilename + '" pause="inf"/>\n')
authorxml.append(' </pgc>\n')
authorxml.append(' </menus>\n')
authorxml.append(' <titles>\n')
for i in range( nr_titles ):
chapters = ','.join(job.project.titles[i].getChapterMarks())
title_no = i+1
title_filename = job.workspace + "/dvd_title_%d.mpg" % (title_no)
if job.menupreview:
LinkTS(job, job.project.settings.vmgm.getValue(), title_filename)
else:
MakeFifoNode(job, title_no)
if mode.endswith("linked") and title_no < nr_titles:
post_tag = "jump title %d;" % ( title_no+1 )
elif mode.startswith("menu"):
post_tag = "call vmgm menu 1;"
else: post_tag = ""
authorxml.append(' <pgc>\n')
authorxml.append(' <vob file="' + title_filename + '" chapters="' + chapters + '" />\n')
authorxml.append(' <post> ' + post_tag + ' </post>\n')
authorxml.append(' </pgc>\n')
authorxml.append(' </titles>\n')
authorxml.append(' </titleset>\n')
authorxml.append(' </dvdauthor>\n')
f = open(job.workspace+"/dvdauthor.xml", "w")
for x in authorxml:
f.write(x)
f.close()
def CreateAuthoringXML_multiset(job):
nr_titles = len(job.project.titles)
mode = job.project.settings.authormode.getValue()
authorxml = []
authorxml.append('<?xml version="1.0" encoding="utf-8"?>\n')
authorxml.append(' <dvdauthor dest="' + (job.workspace+"/dvd") + '" jumppad="yes">\n')
authorxml.append(' <vmgm>\n')
authorxml.append(' <menus lang="' + job.project.menutemplate.settings.menulang.getValue() + '">\n')
authorxml.append(' <video aspect="4:3"/>\n')
if mode.startswith("menu"):
for menu_count in range(1 , job.nr_menus+1):
if menu_count == 1:
authorxml.append(' <pgc>\n')
else:
authorxml.append(' <pgc>\n')
menu_start_title = (menu_count-1)*job.titles_per_menu + 1
menu_end_title = (menu_count)*job.titles_per_menu + 1
if menu_end_title > nr_titles:
menu_end_title = nr_titles+1
for i in range( menu_start_title , menu_end_title ):
authorxml.append(' <button name="button' + (str(i).zfill(2)) + '"> jump titleset ' + str(i) +' title 1; </button>\n')
if menu_count > 1:
authorxml.append(' <button name="button_prev"> jump menu ' + str(menu_count-1) + '; </button>\n')
if menu_count < job.nr_menus:
authorxml.append(' <button name="button_next"> jump menu ' + str(menu_count+1) + '; </button>\n')
menuoutputfilename = job.workspace+"/dvdmenu"+str(menu_count)+".mpg"
authorxml.append(' <vob file="' + menuoutputfilename + '" pause="inf"/>\n')
authorxml.append(' </pgc>\n')
else:
authorxml.append(' <pgc>\n')
authorxml.append(' <vob file="' + job.project.settings.vmgm.getValue() + '" />\n' )
authorxml.append(' <post> jump titleset 1 title 1; </post>\n')
authorxml.append(' </pgc>\n')
authorxml.append(' </menus>\n')
authorxml.append(' </vmgm>\n')
for i in range( nr_titles ):
title = job.project.titles[i]
authorxml.append(' <titleset>\n')
authorxml.append(' <menus lang="' + job.project.menutemplate.settings.menulang.getValue() + '">\n')
authorxml.append(' <pgc entry="root">\n')
authorxml.append(' <pre>\n')
authorxml.append(' jump vmgm menu entry title;\n')
authorxml.append(' </pre>\n')
authorxml.append(' </pgc>\n')
authorxml.append(' </menus>\n')
authorxml.append(' <titles>\n')
for audiotrack in title.properties.audiotracks:
active = audiotrack.active.getValue()
if active:
format = audiotrack.format.getValue()
language = audiotrack.language.getValue()
audio_tag = ' <audio format="%s"' % format
if language != "nolang":
audio_tag += ' lang="%s"' % language
audio_tag += ' />\n'
authorxml.append(audio_tag)
aspect = title.properties.aspect.getValue()
video_tag = ' <video aspect="'+aspect+'"'
if title.properties.widescreen.getValue() == "4:3":
video_tag += ' widescreen="'+title.properties.widescreen.getValue()+'"'
video_tag += ' />\n'
authorxml.append(video_tag)
chapters = ','.join(title.getChapterMarks())
title_no = i+1
title_filename = job.workspace + "/dvd_title_%d.mpg" % (title_no)
if job.menupreview:
LinkTS(job, job.project.settings.vmgm.getValue(), title_filename)
else:
MakeFifoNode(job, title_no)
if mode.endswith("linked") and title_no < nr_titles:
post_tag = "jump titleset %d title 1;" % ( title_no+1 )
elif mode.startswith("menu"):
post_tag = "call vmgm menu 1;"
else: post_tag = ""
authorxml.append(' <pgc>\n')
authorxml.append(' <vob file="' + title_filename + '" chapters="' + chapters + '" />\n')
authorxml.append(' <post> ' + post_tag + ' </post>\n')
authorxml.append(' </pgc>\n')
authorxml.append(' </titles>\n')
authorxml.append(' </titleset>\n')
authorxml.append(' </dvdauthor>\n')
f = open(job.workspace+"/dvdauthor.xml", "w")
for x in authorxml:
f.write(x)
f.close()
def getISOfilename(isopath, volName):
from Tools.Directories import fileExists
i = 0
filename = isopath+'/'+volName+".iso"
while fileExists(filename):
i = i+1
filename = isopath+'/'+volName + str(i).zfill(3) + ".iso"
return filename
class DVDJob(Job):
def __init__(self, project, menupreview=False):
Job.__init__(self, "DVDBurn Job")
self.project = project
from time import strftime
from Tools.Directories import SCOPE_HDD, resolveFilename, createDir
new_workspace = resolveFilename(SCOPE_HDD) + "tmp/" + strftime("%Y%m%d%H%M%S")
createDir(new_workspace, True)
self.workspace = new_workspace
self.project.workspace = self.workspace
self.menupreview = menupreview
self.conduct()
def conduct(self):
CheckDiskspaceTask(self)
if self.project.settings.authormode.getValue().startswith("menu") or self.menupreview:
Menus(self)
if self.project.settings.titlesetmode.getValue() == "multi":
CreateAuthoringXML_multiset(self)
else:
CreateAuthoringXML_singleset(self)
DVDAuthorTask(self)
nr_titles = len(self.project.titles)
if self.menupreview:
PreviewTask(self, self.workspace + "/dvd/VIDEO_TS/")
else:
hasProjectX = os.path.exists('/usr/bin/projectx')
print "[DVDJob] hasProjectX=", hasProjectX
for self.i in range(nr_titles):
self.title = self.project.titles[self.i]
link_name = self.workspace + "/source_title_%d.ts" % (self.i+1)
title_filename = self.workspace + "/dvd_title_%d.mpg" % (self.i+1)
LinkTS(self, self.title.inputfile, link_name)
if not hasProjectX:
ReplexTask(self, outputfile=title_filename, inputfile=link_name).end = self.estimateddvdsize
else:
demux = DemuxTask(self, link_name)
self.mplextask = MplexTask(self, outputfile=title_filename, demux_task=demux)
self.mplextask.end = self.estimateddvdsize
RemoveESFiles(self, demux)
WaitForResidentTasks(self)
PreviewTask(self, self.workspace + "/dvd/VIDEO_TS/")
output = self.project.settings.output.getValue()
volName = self.project.settings.name.getValue()
if output == "dvd":
self.name = _("Burn DVD")
tool = "growisofs"
burnargs = [ "-Z", "/dev/" + harddiskmanager.getCD(), "-dvd-compat" ]
if self.project.size/(1024*1024) > self.project.MAX_SL:
burnargs += [ "-use-the-force-luke=4gms", "-speed=1", "-R" ]
elif output == "iso":
self.name = _("Create DVD-ISO")
tool = "genisoimage"
isopathfile = getISOfilename(self.project.settings.isopath.getValue(), volName)
burnargs = [ "-o", isopathfile ]
burnargs += [ "-dvd-video", "-publisher", "Dreambox", "-V", volName, self.workspace + "/dvd" ]
BurnTask(self, burnargs, tool)
RemoveDVDFolder(self)
class DVDdataJob(Job):
def __init__(self, project):
Job.__init__(self, "Data DVD Burn")
self.project = project
from time import strftime
from Tools.Directories import SCOPE_HDD, resolveFilename, createDir
new_workspace = resolveFilename(SCOPE_HDD) + "tmp/" + strftime("%Y%m%d%H%M%S") + "/dvd/"
createDir(new_workspace, True)
self.workspace = new_workspace
self.project.workspace = self.workspace
self.conduct()
def conduct(self):
if self.project.settings.output.getValue() == "iso":
CheckDiskspaceTask(self)
nr_titles = len(self.project.titles)
for self.i in range(nr_titles):
title = self.project.titles[self.i]
filename = title.inputfile.rstrip("/").rsplit("/",1)[1]
link_name = self.workspace + filename
LinkTS(self, title.inputfile, link_name)
CopyMeta(self, title.inputfile)
output = self.project.settings.output.getValue()
volName = self.project.settings.name.getValue()
tool = "growisofs"
if output == "dvd":
self.name = _("Burn DVD")
burnargs = [ "-Z", "/dev/" + harddiskmanager.getCD(), "-dvd-compat" ]
if self.project.size/(1024*1024) > self.project.MAX_SL:
burnargs += [ "-use-the-force-luke=4gms", "-speed=1", "-R" ]
elif output == "iso":
tool = "genisoimage"
self.name = _("Create DVD-ISO")
isopathfile = getISOfilename(self.project.settings.isopath.getValue(), volName)
burnargs = [ "-o", isopathfile ]
if self.project.settings.dataformat.getValue() == "iso9660_1":
burnargs += ["-iso-level", "1" ]
elif self.project.settings.dataformat.getValue() == "iso9660_4":
burnargs += ["-iso-level", "4", "-allow-limited-size" ]
elif self.project.settings.dataformat.getValue() == "udf":
burnargs += ["-udf", "-allow-limited-size" ]
burnargs += [ "-publisher", "Dreambox", "-V", volName, "-follow-links", self.workspace ]
BurnTask(self, burnargs, tool)
RemoveDVDFolder(self)
class DVDisoJob(Job):
def __init__(self, project, imagepath):
Job.__init__(self, _("Burn DVD"))
self.project = project
self.menupreview = False
from Tools.Directories import getSize
if imagepath.endswith(".iso"):
PreviewTask(self, imagepath)
burnargs = [ "-Z", "/dev/" + harddiskmanager.getCD() + '='+imagepath, "-dvd-compat" ]
if getSize(imagepath)/(1024*1024) > self.project.MAX_SL:
burnargs += [ "-use-the-force-luke=4gms", "-speed=1", "-R" ]
else:
PreviewTask(self, imagepath + "/VIDEO_TS/")
volName = self.project.settings.name.getValue()
burnargs = [ "-Z", "/dev/" + harddiskmanager.getCD(), "-dvd-compat" ]
if getSize(imagepath)/(1024*1024) > self.project.MAX_SL:
burnargs += [ "-use-the-force-luke=4gms", "-speed=1", "-R" ]
burnargs += [ "-dvd-video", "-publisher", "Dreambox", "-V", volName, imagepath ]
tool = "growisofs"
BurnTask(self, burnargs, tool)
|
rabipanda/tensorflow | refs/heads/master | tensorflow/contrib/quantize/python/quantize_graph_test.py | 4 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit tests for the quantize_graph graph rewriting API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.layers.python.layers import layers
from tensorflow.contrib.quantize.python import quantize_graph
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
class QuantizeGraphTest(test_util.TensorFlowTestCase):
# We have a lot of other tests that test the details of the rewrite, here we
# just the specific features of the quantize_graph API.
def _RunTestOverParameters(self, test_fn):
rewrite_fns = [
quantize_graph.create_training_graph,
quantize_graph.create_eval_graph,
quantize_graph.experimental_create_training_graph,
quantize_graph.experimental_create_eval_graph,
]
for fn in rewrite_fns:
test_fn(fn)
def testReturnedElements(self):
self._RunTestOverParameters(self._TestReturnElements)
def _TestReturnElements(self, fn):
graph = ops.Graph()
with graph.as_default():
a = constant_op.constant(1.0)
b = variables.Variable(2.0)
c = a + b
elements = [a, b, c.op]
q_graph, returned_elements = fn(graph, elements=elements)
# Make sure q_graph is different from graph.
self.assertTrue(graph != q_graph)
# Check that the returned elements are part of the new graph.
for returned_element in returned_elements:
self.assertEqual(q_graph, returned_element.graph)
# Check that the elements match with the one from the input graph.
for element, returned_element in zip(elements, returned_elements):
self.assertEqual(element.name, returned_element.name)
def testNoReturnElements(self):
self._RunTestOverParameters(self._TestNoReturnElements)
def _TestNoReturnElements(self, fn):
graph = ops.Graph()
with graph.as_default():
a = constant_op.constant(1.0)
b = variables.Variable(2.0)
_ = a + b
q_graph = fn(graph)
# Check that quantize_graph didn't return a tuple when elements isn't
# provided.
self.assertTrue(isinstance(q_graph, ops.Graph))
# Make sure q_graph is different from graph.
self.assertTrue(graph != q_graph)
def testDeviceName(self):
self._RunTestOverParameters(self._TestDeviceName)
def _TestDeviceName(self, fn):
graph = ops.Graph()
with graph.as_default():
batch_size, height, width, depth = 5, 128, 128, 3
inputs = array_ops.zeros((batch_size, height, width, depth))
conv = layers.conv2d(
inputs,
32, [5, 5],
stride=2,
padding='SAME',
weights_initializer=self._WeightInit(0.09),
activation_fn=None,
scope='test')
_ = nn_ops.relu6(conv)
device_name = '/job:oink/task:0/device:CPU:0'
q_graph = fn(graph, device_name_or_function=device_name)
orig_variable_names = set(
[v.name for v in graph.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)])
q_variables = q_graph.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
# Ensure that variables were added.
self.assertTrue(len(orig_variable_names) < len(q_variables))
# All added variables should have the specified device name.
for var in q_variables:
if var.name not in orig_variable_names:
self.assertEqual(var.device, device_name)
def _WeightInit(self, stddev):
"""Returns truncated normal variable initializer.
Function is defined purely to shorten the name so that it stops wrapping.
Args:
stddev: Standard deviation of normal variable.
Returns:
An initialized that initialzes with a truncated normal variable.
"""
return init_ops.truncated_normal_initializer(stddev=stddev)
if __name__ == '__main__':
googletest.main()
|
Azure/azure-sdk-for-python | refs/heads/sync-eng/common-js-nightly-docs-2-1768-ForTestPipeline | sdk/monitor/azure-mgmt-monitor/azure/mgmt/monitor/v2020_05_01_preview/aio/_monitor_management_client.py | 1 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Optional, TYPE_CHECKING
from azure.mgmt.core import AsyncARMPipelineClient
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
from ._configuration import MonitorManagementClientConfiguration
from .operations import ScheduledQueryRulesOperations
from .. import models
class MonitorManagementClient(object):
"""Monitor Management Client.
:ivar scheduled_query_rules: ScheduledQueryRulesOperations operations
:vartype scheduled_query_rules: $(python-base-namespace).v2020_05_01_preview.aio.operations.ScheduledQueryRulesOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: The Azure subscription Id.
:type subscription_id: str
:param str base_url: Service URL
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
base_url: Optional[str] = None,
**kwargs: Any
) -> None:
if not base_url:
base_url = 'https://management.azure.com'
self._config = MonitorManagementClientConfiguration(credential, subscription_id, **kwargs)
self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._serialize.client_side_validation = False
self._deserialize = Deserializer(client_models)
self.scheduled_query_rules = ScheduledQueryRulesOperations(
self._client, self._config, self._serialize, self._deserialize)
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "MonitorManagementClient":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
|
ennoborg/gramps | refs/heads/master | gramps/gui/display.py | 1 | #
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2002-2006 Donald N. Allingham
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#-------------------------------------------------------------------------
#
# Python modules
#
#-------------------------------------------------------------------------
import os
import webbrowser
#------------------------------------------------------------------------
#
# Gramps modules
#
#------------------------------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
from gramps.gen.const import URL_MANUAL_PAGE, URL_WIKISTRING
from gramps.gen.constfunc import is_quartz
from gramps.gen.config import config
from .utils import open_file_with_default_application as run_file
#list of manuals on wiki, map locale code to wiki extension, add language codes
#completely, or first part, so pt_BR if Brazilian portugeze wiki manual, and
#nl for Dutch (nl_BE, nl_NL language code)
MANUALS = {
'nl' : '/nl',
'fr' : '/fr',
'sq' : '/sq',
'mk' : '/mk',
'de' : '/de',
'fi' : '/fi',
'ru' : '/ru',
'sk' : '/sk',
}
#first, determine language code, so nl_BE --> wiki /nl
lang = glocale.language[0]
if lang in MANUALS:
EXTENSION = MANUALS[lang]
else:
EXTENSION = ''
def display_help(webpage='', section=''):
"""
Display the specified webpage and section from the Gramps wiki.
"""
if not webpage:
link = URL_WIKISTRING + URL_MANUAL_PAGE + EXTENSION
else:
link = URL_WIKISTRING + webpage + EXTENSION
if section:
link = link + '#' + section
display_url(link)
def display_url(link, uistate=None):
"""
Open the specified URL in a browser.
"""
webbrowser.open_new_tab(link)
|
rsalmaso/huey | refs/heads/master | huey/serializer.py | 2 | try:
import gzip
except ImportError:
gzip = None
try:
import zlib
except ImportError:
zlib = None
import hashlib
import hmac
import logging
import pickle
import sys
from huey.exceptions import ConfigurationError
from huey.utils import encode
logger = logging.getLogger('huey.serializer')
if gzip is not None:
if sys.version_info[0] > 2:
gzip_compress = gzip.compress
gzip_decompress = gzip.decompress
else:
from io import BytesIO
def gzip_compress(data, comp_level):
buf = BytesIO()
fh = gzip.GzipFile(fileobj=buf, mode='wb',
compresslevel=comp_level)
fh.write(data)
fh.close()
return buf.getvalue()
def gzip_decompress(data):
buf = BytesIO(data)
fh = gzip.GzipFile(fileobj=buf, mode='rb')
try:
return fh.read()
finally:
fh.close()
if sys.version_info[0] == 2:
def is_compressed(data):
return data and (data[0] == b'\x1f' or data[0] == b'\x78')
else:
def is_compressed(data):
return data and data[0] == 0x1f or data[0] == 0x78
class Serializer(object):
def __init__(self, compression=False, compression_level=6, use_zlib=False,
pickle_protocol=pickle.HIGHEST_PROTOCOL):
self.comp = compression
self.comp_level = compression_level
self.use_zlib = use_zlib
self.pickle_protocol = pickle_protocol or pickle.HIGHEST_PROTOCOL
if self.comp:
if self.use_zlib and zlib is None:
raise ConfigurationError('use_zlib specified, but zlib module '
'not found.')
elif gzip is None:
raise ConfigurationError('gzip module required to enable '
'compression.')
def _serialize(self, data):
return pickle.dumps(data, self.pickle_protocol)
def _deserialize(self, data):
return pickle.loads(data)
def serialize(self, data):
data = self._serialize(data)
if self.comp:
if self.use_zlib:
data = zlib.compress(data, self.comp_level)
else:
data = gzip_compress(data, self.comp_level)
return data
def deserialize(self, data):
if self.comp:
if not is_compressed(data):
logger.warning('compression enabled but message data does not '
'appear to be compressed.')
elif self.use_zlib:
data = zlib.decompress(data)
else:
data = gzip_decompress(data)
return self._deserialize(data)
def constant_time_compare(s1, s2):
return hmac.compare_digest(s1, s2)
class SignedSerializer(Serializer):
def __init__(self, secret=None, salt='huey', **kwargs):
super(SignedSerializer, self).__init__(**kwargs)
if not secret or not salt:
raise ConfigurationError('The secret and salt parameters are '
'required by %r' % type(self))
self.secret = encode(secret)
self.salt = encode(salt)
self.separator = b':'
self._key = hashlib.sha1(self.salt + self.secret).digest()
def _signature(self, message):
signature = hmac.new(self._key, msg=message, digestmod=hashlib.sha1)
return signature.hexdigest().encode('utf8')
def _sign(self, message):
return message + self.separator + self._signature(message)
def _unsign(self, signed):
if self.separator not in signed:
raise ValueError('Separator "%s" not found' % self.separator)
msg, sig = signed.rsplit(self.separator, 1)
if constant_time_compare(sig, self._signature(msg)):
return msg
raise ValueError('Signature "%s" mismatch!' % sig)
def _serialize(self, message):
data = super(SignedSerializer, self)._serialize(message)
return self._sign(data)
def _deserialize(self, data):
return super(SignedSerializer, self)._deserialize(self._unsign(data))
|
Telthor/bad-boids | refs/heads/master | setup.py | 1 | from setuptools import setup, find_packages
setup(
name = 'Boids',
version = '0.0.1',
packages = find_packages(),
install_requires = ['matplotlib','numpy','nose'],
license = 'MIT',
author = 'David Wise',
author_email = 'd.wise.15@ucl.ac.uk',
scripts = ['scripts/boids']
)
|
proger/offlineimap | refs/heads/master | offlineimap/repository/LocalStatus.py | 4 | # Local status cache repository support
# Copyright (C) 2002 John Goerzen
# <jgoerzen@complete.org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from Base import BaseRepository
from offlineimap.folder.LocalStatus import LocalStatusFolder, magicline
from offlineimap.folder.LocalStatusSQLite import LocalStatusSQLiteFolder
import os
import re
class LocalStatusRepository(BaseRepository):
def __init__(self, reposname, account):
BaseRepository.__init__(self, reposname, account)
self.directory = os.path.join(account.getaccountmeta(), 'LocalStatus')
#statusbackend can be 'plain' or 'sqlite'
backend = self.account.getconf('status_backend', 'plain')
if backend == 'sqlite':
self._backend = 'sqlite'
self.LocalStatusFolderClass = LocalStatusSQLiteFolder
self.directory += '-sqlite'
elif backend == 'plain':
self._backend = 'plain'
self.LocalStatusFolderClass = LocalStatusFolder
else:
raise SyntaxWarning("Unknown status_backend '%s' for account '%s'" \
% (backend, account.name))
if not os.path.exists(self.directory):
os.mkdir(self.directory, 0700)
# self._folders is a list of LocalStatusFolders()
self._folders = None
def getsep(self):
return '.'
def getfolderfilename(self, foldername):
"""Return the full path of the status file"""
# replace with 'dot' if final path name is '.'
foldername = re.sub('(^|\/)\.$','\\1dot', foldername)
return os.path.join(self.directory, foldername)
def makefolder(self, foldername):
"""Create a LocalStatus Folder
Empty Folder for plain backend. NoOp for sqlite backend as those
are created on demand."""
# Invalidate the cache.
self._folders = None
if self._backend == 'sqlite':
return
filename = self.getfolderfilename(foldername)
file = open(filename + ".tmp", "wt")
file.write(magicline + '\n')
file.close()
os.rename(filename + ".tmp", filename)
# Invalidate the cache.
self._folders = None
def getfolder(self, foldername):
"""Return the Folder() object for a foldername"""
return self.LocalStatusFolderClass(self.directory, foldername,
self, self.accountname,
self.config)
def getfolders(self):
"""Returns a list of ALL folders on this server.
This is currently nowhere used in the code."""
if self._folders != None:
return self._folders
for folder in os.listdir(self.directory):
self._folders = retval.append(self.getfolder(folder))
return self._folders
def forgetfolders(self):
"""Forgets the cached list of folders, if any. Useful to run
after a sync run."""
self._folders = None
|
wangxuan007/flasky | refs/heads/master | venv/lib/python2.7/site-packages/sqlalchemy/testing/suite/test_results.py | 155 | from .. import fixtures, config
from ..config import requirements
from .. import exclusions
from ..assertions import eq_
from .. import engines
from sqlalchemy import Integer, String, select, util, sql, DateTime
import datetime
from ..schema import Table, Column
class RowFetchTest(fixtures.TablesTest):
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table('plain_pk', metadata,
Column('id', Integer, primary_key=True),
Column('data', String(50))
)
Table('has_dates', metadata,
Column('id', Integer, primary_key=True),
Column('today', DateTime)
)
@classmethod
def insert_data(cls):
config.db.execute(
cls.tables.plain_pk.insert(),
[
{"id": 1, "data": "d1"},
{"id": 2, "data": "d2"},
{"id": 3, "data": "d3"},
]
)
config.db.execute(
cls.tables.has_dates.insert(),
[
{"id": 1, "today": datetime.datetime(2006, 5, 12, 12, 0, 0)}
]
)
def test_via_string(self):
row = config.db.execute(
self.tables.plain_pk.select().
order_by(self.tables.plain_pk.c.id)
).first()
eq_(
row['id'], 1
)
eq_(
row['data'], "d1"
)
def test_via_int(self):
row = config.db.execute(
self.tables.plain_pk.select().
order_by(self.tables.plain_pk.c.id)
).first()
eq_(
row[0], 1
)
eq_(
row[1], "d1"
)
def test_via_col_object(self):
row = config.db.execute(
self.tables.plain_pk.select().
order_by(self.tables.plain_pk.c.id)
).first()
eq_(
row[self.tables.plain_pk.c.id], 1
)
eq_(
row[self.tables.plain_pk.c.data], "d1"
)
@requirements.duplicate_names_in_cursor_description
def test_row_with_dupe_names(self):
result = config.db.execute(
select([self.tables.plain_pk.c.data,
self.tables.plain_pk.c.data.label('data')]).
order_by(self.tables.plain_pk.c.id)
)
row = result.first()
eq_(result.keys(), ['data', 'data'])
eq_(row, ('d1', 'd1'))
def test_row_w_scalar_select(self):
"""test that a scalar select as a column is returned as such
and that type conversion works OK.
(this is half a SQLAlchemy Core test and half to catch database
backends that may have unusual behavior with scalar selects.)
"""
datetable = self.tables.has_dates
s = select([datetable.alias('x').c.today]).as_scalar()
s2 = select([datetable.c.id, s.label('somelabel')])
row = config.db.execute(s2).first()
eq_(row['somelabel'], datetime.datetime(2006, 5, 12, 12, 0, 0))
class PercentSchemaNamesTest(fixtures.TablesTest):
"""tests using percent signs, spaces in table and column names.
This is a very fringe use case, doesn't work for MySQL
or Postgresql. the requirement, "percent_schema_names",
is marked "skip" by default.
"""
__requires__ = ('percent_schema_names', )
__backend__ = True
@classmethod
def define_tables(cls, metadata):
cls.tables.percent_table = Table('percent%table', metadata,
Column("percent%", Integer),
Column(
"spaces % more spaces", Integer),
)
cls.tables.lightweight_percent_table = sql.table(
'percent%table', sql.column("percent%"),
sql.column("spaces % more spaces")
)
def test_single_roundtrip(self):
percent_table = self.tables.percent_table
for params in [
{'percent%': 5, 'spaces % more spaces': 12},
{'percent%': 7, 'spaces % more spaces': 11},
{'percent%': 9, 'spaces % more spaces': 10},
{'percent%': 11, 'spaces % more spaces': 9}
]:
config.db.execute(percent_table.insert(), params)
self._assert_table()
def test_executemany_roundtrip(self):
percent_table = self.tables.percent_table
config.db.execute(
percent_table.insert(),
{'percent%': 5, 'spaces % more spaces': 12}
)
config.db.execute(
percent_table.insert(),
[{'percent%': 7, 'spaces % more spaces': 11},
{'percent%': 9, 'spaces % more spaces': 10},
{'percent%': 11, 'spaces % more spaces': 9}]
)
self._assert_table()
def _assert_table(self):
percent_table = self.tables.percent_table
lightweight_percent_table = self.tables.lightweight_percent_table
for table in (
percent_table,
percent_table.alias(),
lightweight_percent_table,
lightweight_percent_table.alias()):
eq_(
list(
config.db.execute(
table.select().order_by(table.c['percent%'])
)
),
[
(5, 12),
(7, 11),
(9, 10),
(11, 9)
]
)
eq_(
list(
config.db.execute(
table.select().
where(table.c['spaces % more spaces'].in_([9, 10])).
order_by(table.c['percent%']),
)
),
[
(9, 10),
(11, 9)
]
)
row = config.db.execute(table.select().
order_by(table.c['percent%'])).first()
eq_(row['percent%'], 5)
eq_(row['spaces % more spaces'], 12)
eq_(row[table.c['percent%']], 5)
eq_(row[table.c['spaces % more spaces']], 12)
config.db.execute(
percent_table.update().values(
{percent_table.c['spaces % more spaces']: 15}
)
)
eq_(
list(
config.db.execute(
percent_table.
select().
order_by(percent_table.c['percent%'])
)
),
[(5, 15), (7, 15), (9, 15), (11, 15)]
)
|
NeovaHealth/odoo | refs/heads/8.0 | addons/portal_project_issue/__init__.py | 493 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
|
kchodorow/tensorflow | refs/heads/master | tensorflow/python/kernel_tests/conv2d_backprop_filter_grad_test.py | 101 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for convolution related functionality in tensorflow.ops.nn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import nn_ops
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
class Conv2DBackpropFilterGradTest(test.TestCase):
def testGradient(self):
with self.test_session():
for padding in ["SAME", "VALID"]:
for stride in [1, 2]:
np.random.seed(1)
in_shape = [5, 8, 6, 4]
in_val = constant_op.constant(
2 * np.random.random_sample(in_shape) - 1, dtype=dtypes.float32)
filter_shape = [3, 3, 4, 6]
# Make a convolution op with the current settings, just to easily get
# the shape of the output.
conv_out = nn_ops.conv2d(in_val,
array_ops.zeros(filter_shape),
[1, stride, stride, 1], padding)
out_backprop_shape = conv_out.get_shape().as_list()
out_backprop_val = constant_op.constant(
2 * np.random.random_sample(out_backprop_shape) - 1,
dtype=dtypes.float32)
output = nn_ops.conv2d_backprop_filter(in_val, filter_shape,
out_backprop_val,
[1, stride, stride, 1],
padding)
err = gradient_checker.compute_gradient_error(
[in_val, out_backprop_val], [in_shape, out_backprop_shape],
output, filter_shape)
print("conv2d_backprop_filter gradient err = %g " % err)
err_tolerance = 2e-3
self.assertLess(err, err_tolerance)
if __name__ == "__main__":
test.main()
|
nushio3/chainer | refs/heads/master | docs/source/conf.py | 2 | # -*- coding: utf-8 -*-
#
# Chainer documentation build configuration file, created by
# sphinx-quickstart on Sun May 10 12:22:10 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
sys.path.insert(0, '../..')
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.intersphinx',
'sphinx.ext.mathjax',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Chainer'
copyright = u'2015, Preferred Networks, inc. and Preferred Infrastructure, inc.'
author = u'Preferred Networks, inc. and Preferred Infrastructure, inc.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0.0'
# The full version, including alpha/beta/rc tags.
release = '1.0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
if not os.environ.get('READTHEDOCS'):
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_style = 'css/modified_theme.css'
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if on_rtd:
html_context = {
'css_files': [
'https://media.readthedocs.org/css/sphinx_rtd_theme.css',
'https://media.readthedocs.org/css/readthedocs-doc-embed.css',
'_static/css/modified_theme.css',
],
}
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Chainerdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Chainer.tex', u'Chainer Documentation',
u'Preferred Networks, inc. and Preferred Infrastructure, inc.', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'chainer', u'Chainer Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Chainer', u'Chainer Documentation',
author, 'Chainer', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
autosummary_generate = True
intersphinx_mapping = {
'python': ('https://docs.python.org/2/', None),
'numpy': ('http://docs.scipy.org/doc/numpy/', None),
'pycuda': ('http://documen.tician.de/pycuda/', None),
'scikits.cuda': ('http://scikit-cuda.readthedocs.org/en/latest/', None),
'sklearn': ('http://scikit-learn.org/stable/', None),
}
|
procoder317/scikit-learn | refs/heads/master | sklearn/covariance/shrunk_covariance_.py | 209 | """
Covariance estimators using shrinkage.
Shrinkage corresponds to regularising `cov` using a convex combination:
shrunk_cov = (1-shrinkage)*cov + shrinkage*structured_estimate.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Virgile Fritsch <virgile.fritsch@inria.fr>
#
# License: BSD 3 clause
# avoid division truncation
from __future__ import division
import warnings
import numpy as np
from .empirical_covariance_ import empirical_covariance, EmpiricalCovariance
from ..externals.six.moves import xrange
from ..utils import check_array
# ShrunkCovariance estimator
def shrunk_covariance(emp_cov, shrinkage=0.1):
"""Calculates a covariance matrix shrunk on the diagonal
Read more in the :ref:`User Guide <shrunk_covariance>`.
Parameters
----------
emp_cov : array-like, shape (n_features, n_features)
Covariance matrix to be shrunk
shrinkage : float, 0 <= shrinkage <= 1
Coefficient in the convex combination used for the computation
of the shrunk estimate.
Returns
-------
shrunk_cov : array-like
Shrunk covariance.
Notes
-----
The regularized (shrunk) covariance is given by
(1 - shrinkage)*cov
+ shrinkage*mu*np.identity(n_features)
where mu = trace(cov) / n_features
"""
emp_cov = check_array(emp_cov)
n_features = emp_cov.shape[0]
mu = np.trace(emp_cov) / n_features
shrunk_cov = (1. - shrinkage) * emp_cov
shrunk_cov.flat[::n_features + 1] += shrinkage * mu
return shrunk_cov
class ShrunkCovariance(EmpiricalCovariance):
"""Covariance estimator with shrinkage
Read more in the :ref:`User Guide <shrunk_covariance>`.
Parameters
----------
store_precision : boolean, default True
Specify if the estimated precision is stored
shrinkage : float, 0 <= shrinkage <= 1, default 0.1
Coefficient in the convex combination used for the computation
of the shrunk estimate.
assume_centered : boolean, default False
If True, data are not centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False, data are centered before computation.
Attributes
----------
covariance_ : array-like, shape (n_features, n_features)
Estimated covariance matrix
precision_ : array-like, shape (n_features, n_features)
Estimated pseudo inverse matrix.
(stored only if store_precision is True)
`shrinkage` : float, 0 <= shrinkage <= 1
Coefficient in the convex combination used for the computation
of the shrunk estimate.
Notes
-----
The regularized covariance is given by
(1 - shrinkage)*cov
+ shrinkage*mu*np.identity(n_features)
where mu = trace(cov) / n_features
"""
def __init__(self, store_precision=True, assume_centered=False,
shrinkage=0.1):
EmpiricalCovariance.__init__(self, store_precision=store_precision,
assume_centered=assume_centered)
self.shrinkage = shrinkage
def fit(self, X, y=None):
""" Fits the shrunk covariance model
according to the given training data and parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : not used, present for API consistence purpose.
Returns
-------
self : object
Returns self.
"""
X = check_array(X)
# Not calling the parent object to fit, to avoid a potential
# matrix inversion when setting the precision
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
covariance = empirical_covariance(
X, assume_centered=self.assume_centered)
covariance = shrunk_covariance(covariance, self.shrinkage)
self._set_covariance(covariance)
return self
# Ledoit-Wolf estimator
def ledoit_wolf_shrinkage(X, assume_centered=False, block_size=1000):
"""Estimates the shrunk Ledoit-Wolf covariance matrix.
Read more in the :ref:`User Guide <shrunk_covariance>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data from which to compute the Ledoit-Wolf shrunk covariance shrinkage.
assume_centered : Boolean
If True, data are not centered before computation.
Useful to work with data whose mean is significantly equal to
zero but is not exactly zero.
If False, data are centered before computation.
block_size : int
Size of the blocks into which the covariance matrix will be split.
Returns
-------
shrinkage: float
Coefficient in the convex combination used for the computation
of the shrunk estimate.
Notes
-----
The regularized (shrunk) covariance is:
(1 - shrinkage)*cov
+ shrinkage * mu * np.identity(n_features)
where mu = trace(cov) / n_features
"""
X = np.asarray(X)
# for only one feature, the result is the same whatever the shrinkage
if len(X.shape) == 2 and X.shape[1] == 1:
return 0.
if X.ndim == 1:
X = np.reshape(X, (1, -1))
if X.shape[0] == 1:
warnings.warn("Only one sample available. "
"You may want to reshape your data array")
n_samples, n_features = X.shape
# optionaly center data
if not assume_centered:
X = X - X.mean(0)
# number of blocks to split the covariance matrix into
n_splits = int(n_features / block_size)
X2 = X ** 2
emp_cov_trace = np.sum(X2, axis=0) / n_samples
mu = np.sum(emp_cov_trace) / n_features
beta_ = 0. # sum of the coefficients of <X2.T, X2>
delta_ = 0. # sum of the *squared* coefficients of <X.T, X>
# starting block computation
for i in xrange(n_splits):
for j in xrange(n_splits):
rows = slice(block_size * i, block_size * (i + 1))
cols = slice(block_size * j, block_size * (j + 1))
beta_ += np.sum(np.dot(X2.T[rows], X2[:, cols]))
delta_ += np.sum(np.dot(X.T[rows], X[:, cols]) ** 2)
rows = slice(block_size * i, block_size * (i + 1))
beta_ += np.sum(np.dot(X2.T[rows], X2[:, block_size * n_splits:]))
delta_ += np.sum(
np.dot(X.T[rows], X[:, block_size * n_splits:]) ** 2)
for j in xrange(n_splits):
cols = slice(block_size * j, block_size * (j + 1))
beta_ += np.sum(np.dot(X2.T[block_size * n_splits:], X2[:, cols]))
delta_ += np.sum(
np.dot(X.T[block_size * n_splits:], X[:, cols]) ** 2)
delta_ += np.sum(np.dot(X.T[block_size * n_splits:],
X[:, block_size * n_splits:]) ** 2)
delta_ /= n_samples ** 2
beta_ += np.sum(np.dot(X2.T[block_size * n_splits:],
X2[:, block_size * n_splits:]))
# use delta_ to compute beta
beta = 1. / (n_features * n_samples) * (beta_ / n_samples - delta_)
# delta is the sum of the squared coefficients of (<X.T,X> - mu*Id) / p
delta = delta_ - 2. * mu * emp_cov_trace.sum() + n_features * mu ** 2
delta /= n_features
# get final beta as the min between beta and delta
beta = min(beta, delta)
# finally get shrinkage
shrinkage = 0 if beta == 0 else beta / delta
return shrinkage
def ledoit_wolf(X, assume_centered=False, block_size=1000):
"""Estimates the shrunk Ledoit-Wolf covariance matrix.
Read more in the :ref:`User Guide <shrunk_covariance>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data from which to compute the covariance estimate
assume_centered : boolean, default=False
If True, data are not centered before computation.
Useful to work with data whose mean is significantly equal to
zero but is not exactly zero.
If False, data are centered before computation.
block_size : int, default=1000
Size of the blocks into which the covariance matrix will be split.
This is purely a memory optimization and does not affect results.
Returns
-------
shrunk_cov : array-like, shape (n_features, n_features)
Shrunk covariance.
shrinkage : float
Coefficient in the convex combination used for the computation
of the shrunk estimate.
Notes
-----
The regularized (shrunk) covariance is:
(1 - shrinkage)*cov
+ shrinkage * mu * np.identity(n_features)
where mu = trace(cov) / n_features
"""
X = np.asarray(X)
# for only one feature, the result is the same whatever the shrinkage
if len(X.shape) == 2 and X.shape[1] == 1:
if not assume_centered:
X = X - X.mean()
return np.atleast_2d((X ** 2).mean()), 0.
if X.ndim == 1:
X = np.reshape(X, (1, -1))
warnings.warn("Only one sample available. "
"You may want to reshape your data array")
n_samples = 1
n_features = X.size
else:
n_samples, n_features = X.shape
# get Ledoit-Wolf shrinkage
shrinkage = ledoit_wolf_shrinkage(
X, assume_centered=assume_centered, block_size=block_size)
emp_cov = empirical_covariance(X, assume_centered=assume_centered)
mu = np.sum(np.trace(emp_cov)) / n_features
shrunk_cov = (1. - shrinkage) * emp_cov
shrunk_cov.flat[::n_features + 1] += shrinkage * mu
return shrunk_cov, shrinkage
class LedoitWolf(EmpiricalCovariance):
"""LedoitWolf Estimator
Ledoit-Wolf is a particular form of shrinkage, where the shrinkage
coefficient is computed using O. Ledoit and M. Wolf's formula as
described in "A Well-Conditioned Estimator for Large-Dimensional
Covariance Matrices", Ledoit and Wolf, Journal of Multivariate
Analysis, Volume 88, Issue 2, February 2004, pages 365-411.
Read more in the :ref:`User Guide <shrunk_covariance>`.
Parameters
----------
store_precision : bool, default=True
Specify if the estimated precision is stored.
assume_centered : bool, default=False
If True, data are not centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False (default), data are centered before computation.
block_size : int, default=1000
Size of the blocks into which the covariance matrix will be split
during its Ledoit-Wolf estimation. This is purely a memory
optimization and does not affect results.
Attributes
----------
covariance_ : array-like, shape (n_features, n_features)
Estimated covariance matrix
precision_ : array-like, shape (n_features, n_features)
Estimated pseudo inverse matrix.
(stored only if store_precision is True)
shrinkage_ : float, 0 <= shrinkage <= 1
Coefficient in the convex combination used for the computation
of the shrunk estimate.
Notes
-----
The regularised covariance is::
(1 - shrinkage)*cov
+ shrinkage*mu*np.identity(n_features)
where mu = trace(cov) / n_features
and shrinkage is given by the Ledoit and Wolf formula (see References)
References
----------
"A Well-Conditioned Estimator for Large-Dimensional Covariance Matrices",
Ledoit and Wolf, Journal of Multivariate Analysis, Volume 88, Issue 2,
February 2004, pages 365-411.
"""
def __init__(self, store_precision=True, assume_centered=False,
block_size=1000):
EmpiricalCovariance.__init__(self, store_precision=store_precision,
assume_centered=assume_centered)
self.block_size = block_size
def fit(self, X, y=None):
""" Fits the Ledoit-Wolf shrunk covariance model
according to the given training data and parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : not used, present for API consistence purpose.
Returns
-------
self : object
Returns self.
"""
# Not calling the parent object to fit, to avoid computing the
# covariance matrix (and potentially the precision)
X = check_array(X)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
covariance, shrinkage = ledoit_wolf(X - self.location_,
assume_centered=True,
block_size=self.block_size)
self.shrinkage_ = shrinkage
self._set_covariance(covariance)
return self
# OAS estimator
def oas(X, assume_centered=False):
"""Estimate covariance with the Oracle Approximating Shrinkage algorithm.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data from which to compute the covariance estimate.
assume_centered : boolean
If True, data are not centered before computation.
Useful to work with data whose mean is significantly equal to
zero but is not exactly zero.
If False, data are centered before computation.
Returns
-------
shrunk_cov : array-like, shape (n_features, n_features)
Shrunk covariance.
shrinkage : float
Coefficient in the convex combination used for the computation
of the shrunk estimate.
Notes
-----
The regularised (shrunk) covariance is:
(1 - shrinkage)*cov
+ shrinkage * mu * np.identity(n_features)
where mu = trace(cov) / n_features
The formula we used to implement the OAS
does not correspond to the one given in the article. It has been taken
from the MATLAB program available from the author's webpage
(https://tbayes.eecs.umich.edu/yilun/covestimation).
"""
X = np.asarray(X)
# for only one feature, the result is the same whatever the shrinkage
if len(X.shape) == 2 and X.shape[1] == 1:
if not assume_centered:
X = X - X.mean()
return np.atleast_2d((X ** 2).mean()), 0.
if X.ndim == 1:
X = np.reshape(X, (1, -1))
warnings.warn("Only one sample available. "
"You may want to reshape your data array")
n_samples = 1
n_features = X.size
else:
n_samples, n_features = X.shape
emp_cov = empirical_covariance(X, assume_centered=assume_centered)
mu = np.trace(emp_cov) / n_features
# formula from Chen et al.'s **implementation**
alpha = np.mean(emp_cov ** 2)
num = alpha + mu ** 2
den = (n_samples + 1.) * (alpha - (mu ** 2) / n_features)
shrinkage = 1. if den == 0 else min(num / den, 1.)
shrunk_cov = (1. - shrinkage) * emp_cov
shrunk_cov.flat[::n_features + 1] += shrinkage * mu
return shrunk_cov, shrinkage
class OAS(EmpiricalCovariance):
"""Oracle Approximating Shrinkage Estimator
Read more in the :ref:`User Guide <shrunk_covariance>`.
OAS is a particular form of shrinkage described in
"Shrinkage Algorithms for MMSE Covariance Estimation"
Chen et al., IEEE Trans. on Sign. Proc., Volume 58, Issue 10, October 2010.
The formula used here does not correspond to the one given in the
article. It has been taken from the Matlab program available from the
authors' webpage (https://tbayes.eecs.umich.edu/yilun/covestimation).
Parameters
----------
store_precision : bool, default=True
Specify if the estimated precision is stored.
assume_centered: bool, default=False
If True, data are not centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False (default), data are centered before computation.
Attributes
----------
covariance_ : array-like, shape (n_features, n_features)
Estimated covariance matrix.
precision_ : array-like, shape (n_features, n_features)
Estimated pseudo inverse matrix.
(stored only if store_precision is True)
shrinkage_ : float, 0 <= shrinkage <= 1
coefficient in the convex combination used for the computation
of the shrunk estimate.
Notes
-----
The regularised covariance is::
(1 - shrinkage)*cov
+ shrinkage*mu*np.identity(n_features)
where mu = trace(cov) / n_features
and shrinkage is given by the OAS formula (see References)
References
----------
"Shrinkage Algorithms for MMSE Covariance Estimation"
Chen et al., IEEE Trans. on Sign. Proc., Volume 58, Issue 10, October 2010.
"""
def fit(self, X, y=None):
""" Fits the Oracle Approximating Shrinkage covariance model
according to the given training data and parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : not used, present for API consistence purpose.
Returns
-------
self: object
Returns self.
"""
X = check_array(X)
# Not calling the parent object to fit, to avoid computing the
# covariance matrix (and potentially the precision)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
covariance, shrinkage = oas(X - self.location_, assume_centered=True)
self.shrinkage_ = shrinkage
self._set_covariance(covariance)
return self
|
arrayexpress/ae_auto | refs/heads/master | ae_web/maintenance/migrations/__init__.py | 12133432 | |
sopier/django | refs/heads/master | tests/template_backends/apps/importerror/__init__.py | 12133432 | |
resmo/ansible | refs/heads/devel | test/units/modules/source_control/__init__.py | 12133432 | |
philsch/ansible | refs/heads/devel | test/units/plugins/callback/__init__.py | 12133432 | |
RackSec/ansible | refs/heads/devel | test/integration/targets/module_utils/module_utils/baz1/__init__.py | 12133432 | |
njoubert/ardupilot | refs/heads/master | mk/PX4/Tools/genmsg/test/test_genmsg_command_line.py | 216 | # Software License Agreement (BSD License)
#
# Copyright (c) 2011, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
def test_includepath_to_dict():
from genmsg.command_line import includepath_to_dict
assert {} == includepath_to_dict([])
assert {'std_msgs': [ 'foo' ]} == includepath_to_dict(['std_msgs:foo'])
assert {'std_msgs': [ 'foo' ], 'bar_msgs': [ 'baz:colon' ]} == includepath_to_dict(['std_msgs:foo', 'bar_msgs:baz:colon'])
|
TomTranter/OpenPNM | refs/heads/master | tests/unit/phases/mixtures/HumidAirTest.py | 1 | import openpnm as op
from openpnm.phases import mixtures
class HumidAirTest:
def setup_class(self):
ws = op.Workspace()
ws.clear()
self.net = op.network.Cubic(shape=[10, 10, 10])
def test_init(self):
ha = mixtures.GenericMixture(network=self.net)
assert isinstance(ha, mixtures.GenericMixture)
if __name__ == '__main__':
t = HumidAirTest()
self = t
t.setup_class()
for item in t.__dir__():
if item.startswith('test'):
print('running test: '+item)
t.__getattribute__(item)()
|
jhunufa/ArduWatchRaspSerial | refs/heads/master | virtualenv/lib/python3.4/site-packages/pip/_vendor/lockfile/pidlockfile.py | 536 | # -*- coding: utf-8 -*-
# pidlockfile.py
#
# Copyright © 2008–2009 Ben Finney <ben+python@benfinney.id.au>
#
# This is free software: you may copy, modify, and/or distribute this work
# under the terms of the Python Software Foundation License, version 2 or
# later as published by the Python Software Foundation.
# No warranty expressed or implied. See the file LICENSE.PSF-2 for details.
""" Lockfile behaviour implemented via Unix PID files.
"""
from __future__ import absolute_import
import errno
import os
import time
from . import (LockBase, AlreadyLocked, LockFailed, NotLocked, NotMyLock,
LockTimeout)
class PIDLockFile(LockBase):
""" Lockfile implemented as a Unix PID file.
The lock file is a normal file named by the attribute `path`.
A lock's PID file contains a single line of text, containing
the process ID (PID) of the process that acquired the lock.
>>> lock = PIDLockFile('somefile')
>>> lock = PIDLockFile('somefile')
"""
def __init__(self, path, threaded=False, timeout=None):
# pid lockfiles don't support threaded operation, so always force
# False as the threaded arg.
LockBase.__init__(self, path, False, timeout)
self.unique_name = self.path
def read_pid(self):
""" Get the PID from the lock file.
"""
return read_pid_from_pidfile(self.path)
def is_locked(self):
""" Test if the lock is currently held.
The lock is held if the PID file for this lock exists.
"""
return os.path.exists(self.path)
def i_am_locking(self):
""" Test if the lock is held by the current process.
Returns ``True`` if the current process ID matches the
number stored in the PID file.
"""
return self.is_locked() and os.getpid() == self.read_pid()
def acquire(self, timeout=None):
""" Acquire the lock.
Creates the PID file for this lock, or raises an error if
the lock could not be acquired.
"""
timeout = timeout if timeout is not None else self.timeout
end_time = time.time()
if timeout is not None and timeout > 0:
end_time += timeout
while True:
try:
write_pid_to_pidfile(self.path)
except OSError as exc:
if exc.errno == errno.EEXIST:
# The lock creation failed. Maybe sleep a bit.
if time.time() > end_time:
if timeout is not None and timeout > 0:
raise LockTimeout("Timeout waiting to acquire"
" lock for %s" %
self.path)
else:
raise AlreadyLocked("%s is already locked" %
self.path)
time.sleep(timeout is not None and timeout / 10 or 0.1)
else:
raise LockFailed("failed to create %s" % self.path)
else:
return
def release(self):
""" Release the lock.
Removes the PID file to release the lock, or raises an
error if the current process does not hold the lock.
"""
if not self.is_locked():
raise NotLocked("%s is not locked" % self.path)
if not self.i_am_locking():
raise NotMyLock("%s is locked, but not by me" % self.path)
remove_existing_pidfile(self.path)
def break_lock(self):
""" Break an existing lock.
Removes the PID file if it already exists, otherwise does
nothing.
"""
remove_existing_pidfile(self.path)
def read_pid_from_pidfile(pidfile_path):
""" Read the PID recorded in the named PID file.
Read and return the numeric PID recorded as text in the named
PID file. If the PID file cannot be read, or if the content is
not a valid PID, return ``None``.
"""
pid = None
try:
pidfile = open(pidfile_path, 'r')
except IOError:
pass
else:
# According to the FHS 2.3 section on PID files in /var/run:
#
# The file must consist of the process identifier in
# ASCII-encoded decimal, followed by a newline character.
#
# Programs that read PID files should be somewhat flexible
# in what they accept; i.e., they should ignore extra
# whitespace, leading zeroes, absence of the trailing
# newline, or additional lines in the PID file.
line = pidfile.readline().strip()
try:
pid = int(line)
except ValueError:
pass
pidfile.close()
return pid
def write_pid_to_pidfile(pidfile_path):
""" Write the PID in the named PID file.
Get the numeric process ID (“PID”) of the current process
and write it to the named file as a line of text.
"""
open_flags = (os.O_CREAT | os.O_EXCL | os.O_WRONLY)
open_mode = 0o644
pidfile_fd = os.open(pidfile_path, open_flags, open_mode)
pidfile = os.fdopen(pidfile_fd, 'w')
# According to the FHS 2.3 section on PID files in /var/run:
#
# The file must consist of the process identifier in
# ASCII-encoded decimal, followed by a newline character. For
# example, if crond was process number 25, /var/run/crond.pid
# would contain three characters: two, five, and newline.
pid = os.getpid()
pidfile.write("%s\n" % pid)
pidfile.close()
def remove_existing_pidfile(pidfile_path):
""" Remove the named PID file if it exists.
Removing a PID file that doesn't already exist puts us in the
desired state, so we ignore the condition if the file does not
exist.
"""
try:
os.remove(pidfile_path)
except OSError as exc:
if exc.errno == errno.ENOENT:
pass
else:
raise
|
candy7393/VTK | refs/heads/master | ThirdParty/Twisted/twisted/words/test/test_jabbererror.py | 40 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.words.protocols.jabber.error}.
"""
from twisted.trial import unittest
from twisted.words.protocols.jabber import error
from twisted.words.xish import domish
NS_XML = 'http://www.w3.org/XML/1998/namespace'
NS_STREAMS = 'http://etherx.jabber.org/streams'
NS_XMPP_STREAMS = 'urn:ietf:params:xml:ns:xmpp-streams'
NS_XMPP_STANZAS = 'urn:ietf:params:xml:ns:xmpp-stanzas'
class BaseErrorTest(unittest.TestCase):
def test_getElementPlain(self):
"""
Test getting an element for a plain error.
"""
e = error.BaseError('feature-not-implemented')
element = e.getElement()
self.assertIdentical(element.uri, None)
self.assertEqual(len(element.children), 1)
def test_getElementText(self):
"""
Test getting an element for an error with a text.
"""
e = error.BaseError('feature-not-implemented', 'text')
element = e.getElement()
self.assertEqual(len(element.children), 2)
self.assertEqual(unicode(element.text), 'text')
self.assertEqual(element.text.getAttribute((NS_XML, 'lang')), None)
def test_getElementTextLang(self):
"""
Test getting an element for an error with a text and language.
"""
e = error.BaseError('feature-not-implemented', 'text', 'en_US')
element = e.getElement()
self.assertEqual(len(element.children), 2)
self.assertEqual(unicode(element.text), 'text')
self.assertEqual(element.text[(NS_XML, 'lang')], 'en_US')
def test_getElementAppCondition(self):
"""
Test getting an element for an error with an app specific condition.
"""
ac = domish.Element(('testns', 'myerror'))
e = error.BaseError('feature-not-implemented', appCondition=ac)
element = e.getElement()
self.assertEqual(len(element.children), 2)
self.assertEqual(element.myerror, ac)
class StreamErrorTest(unittest.TestCase):
def test_getElementPlain(self):
"""
Test namespace of the element representation of an error.
"""
e = error.StreamError('feature-not-implemented')
element = e.getElement()
self.assertEqual(element.uri, NS_STREAMS)
def test_getElementConditionNamespace(self):
"""
Test that the error condition element has the correct namespace.
"""
e = error.StreamError('feature-not-implemented')
element = e.getElement()
self.assertEqual(NS_XMPP_STREAMS, getattr(element, 'feature-not-implemented').uri)
def test_getElementTextNamespace(self):
"""
Test that the error text element has the correct namespace.
"""
e = error.StreamError('feature-not-implemented', 'text')
element = e.getElement()
self.assertEqual(NS_XMPP_STREAMS, element.text.uri)
class StanzaErrorTest(unittest.TestCase):
"""
Tests for L{error.StreamError}.
"""
def test_typeRemoteServerTimeout(self):
"""
Remote Server Timeout should yield type wait, code 504.
"""
e = error.StanzaError('remote-server-timeout')
self.assertEqual('wait', e.type)
self.assertEqual('504', e.code)
def test_getElementPlain(self):
"""
Test getting an element for a plain stanza error.
"""
e = error.StanzaError('feature-not-implemented')
element = e.getElement()
self.assertEqual(element.uri, None)
self.assertEqual(element['type'], 'cancel')
self.assertEqual(element['code'], '501')
def test_getElementType(self):
"""
Test getting an element for a stanza error with a given type.
"""
e = error.StanzaError('feature-not-implemented', 'auth')
element = e.getElement()
self.assertEqual(element.uri, None)
self.assertEqual(element['type'], 'auth')
self.assertEqual(element['code'], '501')
def test_getElementConditionNamespace(self):
"""
Test that the error condition element has the correct namespace.
"""
e = error.StanzaError('feature-not-implemented')
element = e.getElement()
self.assertEqual(NS_XMPP_STANZAS, getattr(element, 'feature-not-implemented').uri)
def test_getElementTextNamespace(self):
"""
Test that the error text element has the correct namespace.
"""
e = error.StanzaError('feature-not-implemented', text='text')
element = e.getElement()
self.assertEqual(NS_XMPP_STANZAS, element.text.uri)
def test_toResponse(self):
"""
Test an error response is generated from a stanza.
The addressing on the (new) response stanza should be reversed, an
error child (with proper properties) added and the type set to
C{'error'}.
"""
stanza = domish.Element(('jabber:client', 'message'))
stanza['type'] = 'chat'
stanza['to'] = 'user1@example.com'
stanza['from'] = 'user2@example.com/resource'
e = error.StanzaError('service-unavailable')
response = e.toResponse(stanza)
self.assertNotIdentical(response, stanza)
self.assertEqual(response['from'], 'user1@example.com')
self.assertEqual(response['to'], 'user2@example.com/resource')
self.assertEqual(response['type'], 'error')
self.assertEqual(response.error.children[0].name,
'service-unavailable')
self.assertEqual(response.error['type'], 'cancel')
self.assertNotEqual(stanza.children, response.children)
class ParseErrorTest(unittest.TestCase):
"""
Tests for L{error._parseError}.
"""
def setUp(self):
self.error = domish.Element((None, 'error'))
def test_empty(self):
"""
Test parsing of the empty error element.
"""
result = error._parseError(self.error, 'errorns')
self.assertEqual({'condition': None,
'text': None,
'textLang': None,
'appCondition': None}, result)
def test_condition(self):
"""
Test parsing of an error element with a condition.
"""
self.error.addElement(('errorns', 'bad-request'))
result = error._parseError(self.error, 'errorns')
self.assertEqual('bad-request', result['condition'])
def test_text(self):
"""
Test parsing of an error element with a text.
"""
text = self.error.addElement(('errorns', 'text'))
text.addContent('test')
result = error._parseError(self.error, 'errorns')
self.assertEqual('test', result['text'])
self.assertEqual(None, result['textLang'])
def test_textLang(self):
"""
Test parsing of an error element with a text with a defined language.
"""
text = self.error.addElement(('errorns', 'text'))
text[NS_XML, 'lang'] = 'en_US'
text.addContent('test')
result = error._parseError(self.error, 'errorns')
self.assertEqual('en_US', result['textLang'])
def test_textLangInherited(self):
"""
Test parsing of an error element with a text with inherited language.
"""
text = self.error.addElement(('errorns', 'text'))
self.error[NS_XML, 'lang'] = 'en_US'
text.addContent('test')
result = error._parseError(self.error, 'errorns')
self.assertEqual('en_US', result['textLang'])
test_textLangInherited.todo = "xml:lang inheritance not implemented"
def test_appCondition(self):
"""
Test parsing of an error element with an app specific condition.
"""
condition = self.error.addElement(('testns', 'condition'))
result = error._parseError(self.error, 'errorns')
self.assertEqual(condition, result['appCondition'])
def test_appConditionMultiple(self):
"""
Test parsing of an error element with multiple app specific conditions.
"""
self.error.addElement(('testns', 'condition'))
condition = self.error.addElement(('testns', 'condition2'))
result = error._parseError(self.error, 'errorns')
self.assertEqual(condition, result['appCondition'])
class ExceptionFromStanzaTest(unittest.TestCase):
def test_basic(self):
"""
Test basic operations of exceptionFromStanza.
Given a realistic stanza, check if a sane exception is returned.
Using this stanza::
<iq type='error'
from='pubsub.shakespeare.lit'
to='francisco@denmark.lit/barracks'
id='subscriptions1'>
<pubsub xmlns='http://jabber.org/protocol/pubsub'>
<subscriptions/>
</pubsub>
<error type='cancel'>
<feature-not-implemented
xmlns='urn:ietf:params:xml:ns:xmpp-stanzas'/>
<unsupported xmlns='http://jabber.org/protocol/pubsub#errors'
feature='retrieve-subscriptions'/>
</error>
</iq>
"""
stanza = domish.Element((None, 'stanza'))
p = stanza.addElement(('http://jabber.org/protocol/pubsub', 'pubsub'))
p.addElement('subscriptions')
e = stanza.addElement('error')
e['type'] = 'cancel'
e.addElement((NS_XMPP_STANZAS, 'feature-not-implemented'))
uc = e.addElement(('http://jabber.org/protocol/pubsub#errors',
'unsupported'))
uc['feature'] = 'retrieve-subscriptions'
result = error.exceptionFromStanza(stanza)
self.assert_(isinstance(result, error.StanzaError))
self.assertEqual('feature-not-implemented', result.condition)
self.assertEqual('cancel', result.type)
self.assertEqual(uc, result.appCondition)
self.assertEqual([p], result.children)
def test_legacy(self):
"""
Test legacy operations of exceptionFromStanza.
Given a realistic stanza with only legacy (pre-XMPP) error information,
check if a sane exception is returned.
Using this stanza::
<message type='error'
to='piers@pipetree.com/Home'
from='qmacro@jaber.org'>
<body>Are you there?</body>
<error code='502'>Unable to resolve hostname.</error>
</message>
"""
stanza = domish.Element((None, 'stanza'))
p = stanza.addElement('body', content='Are you there?')
e = stanza.addElement('error', content='Unable to resolve hostname.')
e['code'] = '502'
result = error.exceptionFromStanza(stanza)
self.assert_(isinstance(result, error.StanzaError))
self.assertEqual('service-unavailable', result.condition)
self.assertEqual('wait', result.type)
self.assertEqual('Unable to resolve hostname.', result.text)
self.assertEqual([p], result.children)
class ExceptionFromStreamErrorTest(unittest.TestCase):
def test_basic(self):
"""
Test basic operations of exceptionFromStreamError.
Given a realistic stream error, check if a sane exception is returned.
Using this error::
<stream:error xmlns:stream='http://etherx.jabber.org/streams'>
<xml-not-well-formed xmlns='urn:ietf:params:xml:ns:xmpp-streams'/>
</stream:error>
"""
e = domish.Element(('http://etherx.jabber.org/streams', 'error'))
e.addElement((NS_XMPP_STREAMS, 'xml-not-well-formed'))
result = error.exceptionFromStreamError(e)
self.assert_(isinstance(result, error.StreamError))
self.assertEqual('xml-not-well-formed', result.condition)
|
yeyanchao/calibre | refs/heads/master | src/calibre/ebooks/conversion/plugins/pml_input.py | 2 | # -*- coding: utf-8 -*-
__license__ = 'GPL v3'
__copyright__ = '2009, John Schember <john@nachtimwald.com>'
__docformat__ = 'restructuredtext en'
import glob
import os
import shutil
from calibre.customize.conversion import InputFormatPlugin
from calibre.ptempfile import TemporaryDirectory
from calibre.utils.zipfile import ZipFile
class PMLInput(InputFormatPlugin):
name = 'PML Input'
author = 'John Schember'
description = 'Convert PML to OEB'
# pmlz is a zip file containing pml files and png images.
file_types = set(['pml', 'pmlz'])
def process_pml(self, pml_path, html_path, close_all=False):
from calibre.ebooks.pml.pmlconverter import PML_HTMLizer
pclose = False
hclose = False
if not hasattr(pml_path, 'read'):
pml_stream = open(pml_path, 'rb')
pclose = True
else:
pml_stream = pml_path
pml_stream.seek(0)
if not hasattr(html_path, 'write'):
html_stream = open(html_path, 'wb')
hclose = True
else:
html_stream = html_path
ienc = pml_stream.encoding if pml_stream.encoding else 'cp1252'
if self.options.input_encoding:
ienc = self.options.input_encoding
self.log.debug('Converting PML to HTML...')
hizer = PML_HTMLizer()
html = hizer.parse_pml(pml_stream.read().decode(ienc), html_path)
html = '<html><head><title></title></head><body>%s</body></html>'%html
html_stream.write(html.encode('utf-8', 'replace'))
if pclose:
pml_stream.close()
if hclose:
html_stream.close()
return hizer.get_toc()
def get_images(self, stream, tdir, top_level=False):
images = []
imgs = []
if top_level:
imgs = glob.glob(os.path.join(tdir, '*.png'))
# Images not in top level try bookname_img directory because
# that's where Dropbook likes to see them.
if not imgs:
if hasattr(stream, 'name'):
imgs = glob.glob(os.path.join(tdir, os.path.splitext(os.path.basename(stream.name))[0] + '_img', '*.png'))
# No images in Dropbook location try generic images directory
if not imgs:
imgs = glob.glob(os.path.join(os.path.join(tdir, u'images'), u'*.png'))
if imgs:
os.makedirs(os.path.join(os.getcwdu(), u'images'))
for img in imgs:
pimg_name = os.path.basename(img)
pimg_path = os.path.join(os.getcwdu(), 'images', pimg_name)
images.append('images/' + pimg_name)
shutil.copy(img, pimg_path)
return images
def convert(self, stream, options, file_ext, log,
accelerators):
from calibre.ebooks.metadata.toc import TOC
from calibre.ebooks.metadata.opf2 import OPFCreator
self.options = options
self.log = log
pages, images = [], []
toc = TOC()
if file_ext == 'pmlz':
log.debug('De-compressing content to temporary directory...')
with TemporaryDirectory(u'_unpmlz') as tdir:
zf = ZipFile(stream)
zf.extractall(tdir)
pmls = glob.glob(os.path.join(tdir, u'*.pml'))
for pml in pmls:
html_name = os.path.splitext(os.path.basename(pml))[0]+'.html'
html_path = os.path.join(os.getcwdu(), html_name)
pages.append(html_name)
log.debug('Processing PML item %s...' % pml)
ttoc = self.process_pml(pml, html_path)
toc += ttoc
images = self.get_images(stream, tdir, True)
else:
toc = self.process_pml(stream, u'index.html')
pages.append(u'index.html')
if hasattr(stream, 'name'):
images = self.get_images(stream, os.path.abspath(os.path.dirname(stream.name)))
# We want pages to be orded alphabetically.
pages.sort()
manifest_items = []
for item in pages+images:
manifest_items.append((item, None))
from calibre.ebooks.metadata.meta import get_metadata
log.debug('Reading metadata from input file...')
mi = get_metadata(stream, 'pml')
if 'images/cover.png' in images:
mi.cover = u'images/cover.png'
opf = OPFCreator(os.getcwdu(), mi)
log.debug('Generating manifest...')
opf.create_manifest(manifest_items)
opf.create_spine(pages)
opf.set_toc(toc)
with open(u'metadata.opf', 'wb') as opffile:
with open(u'toc.ncx', 'wb') as tocfile:
opf.render(opffile, tocfile, u'toc.ncx')
return os.path.join(os.getcwdu(), u'metadata.opf')
|
cysnake4713/server-tools | refs/heads/8.0 | auditlog/tests/test_auditlog.py | 28 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# This module copyright (C) 2015 Therp BV (<http://therp.nl>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.tests.common import TransactionCase
class TestAuditlog(TransactionCase):
def setUp(self):
super(TestAuditlog, self).setUp()
self.groups_model_id = self.env.ref('base.model_res_groups').id
self.groups_rule = self.env['auditlog.rule'].create({
'name': 'testrule for groups',
'model_id': self.groups_model_id,
'log_read': True,
'log_create': True,
'log_write': True,
'log_unlink': True,
'state': 'subscribed',
})
def tearDown(self):
self.groups_rule.unlink()
super(TestAuditlog, self).tearDown()
def test_LogCreation(self):
"""First test, caching some data."""
auditlog_log = self.env['auditlog.log']
group = self.env['res.groups'].create({
'name': 'testgroup1',
})
self.assertTrue(auditlog_log.search([
('model_id', '=', self.groups_model_id),
('method', '=', 'create'),
('res_id', '=', group.id),
]).ensure_one())
group.write({'name': 'Testgroup1'})
self.assertTrue(auditlog_log.search([
('model_id', '=', self.groups_model_id),
('method', '=', 'write'),
('res_id', '=', group.id),
]).ensure_one())
group.unlink()
self.assertTrue(auditlog_log.search([
('model_id', '=', self.groups_model_id),
('method', '=', 'unlink'),
('res_id', '=', group.id),
]).ensure_one())
def test_LogCreation2(self):
"""Second test, using cached data of the first one."""
auditlog_log = self.env['auditlog.log']
testgroup2 = self.env['res.groups'].create({
'name': 'testgroup2',
})
self.assertTrue(auditlog_log.search([
('model_id', '=', self.groups_model_id),
('method', '=', 'create'),
('res_id', '=', testgroup2.id),
]).ensure_one())
def test_LogCreation3(self):
"""Third test, two groups, the latter being the parent of the former.
Then we remove it right after (with (2, X) tuple) to test the creation
of a 'write' log with a deleted resource (so with no text
representation).
"""
auditlog_log = self.env['auditlog.log']
testgroup3 = testgroup3 = self.env['res.groups'].create({
'name': 'testgroup3',
})
testgroup4 = self.env['res.groups'].create({
'name': 'testgroup4',
'implied_ids': [(4, testgroup3.id)],
})
testgroup4.write({'implied_ids': [(2, testgroup3.id)]})
self.assertTrue(auditlog_log.search([
('model_id', '=', self.groups_model_id),
('method', '=', 'create'),
('res_id', '=', testgroup3.id),
]).ensure_one())
self.assertTrue(auditlog_log.search([
('model_id', '=', self.groups_model_id),
('method', '=', 'create'),
('res_id', '=', testgroup4.id),
]).ensure_one())
self.assertTrue(auditlog_log.search([
('model_id', '=', self.groups_model_id),
('method', '=', 'write'),
('res_id', '=', testgroup4.id),
]).ensure_one())
|
hwjworld/xiaodun-platform | refs/heads/master | common/djangoapps/util/tests/test_date_utils.py | 52 | # -*- coding: utf-8 -*-
"""
Tests for util.date_utils
"""
from datetime import datetime, timedelta, tzinfo
import unittest
import ddt
from mock import patch
from nose.tools import assert_equals, assert_false # pylint: disable=E0611
from pytz import UTC
from util.date_utils import (
get_default_time_display, get_time_display, almost_same_datetime,
strftime_localized,
)
def test_get_default_time_display():
assert_equals("", get_default_time_display(None))
test_time = datetime(1992, 3, 12, 15, 3, 30, tzinfo=UTC)
assert_equals(
"Mar 12, 1992 at 15:03 UTC",
get_default_time_display(test_time))
def test_get_dflt_time_disp_notz():
test_time = datetime(1992, 3, 12, 15, 3, 30)
assert_equals(
"Mar 12, 1992 at 15:03 UTC",
get_default_time_display(test_time))
def test_get_time_disp_ret_empty():
assert_equals("", get_time_display(None))
test_time = datetime(1992, 3, 12, 15, 3, 30, tzinfo=UTC)
assert_equals("", get_time_display(test_time, ""))
def test_get_time_display():
test_time = datetime(1992, 3, 12, 15, 3, 30, tzinfo=UTC)
assert_equals("dummy text", get_time_display(test_time, 'dummy text'))
assert_equals("Mar 12 1992", get_time_display(test_time, '%b %d %Y'))
assert_equals("Mar 12 1992 UTC", get_time_display(test_time, '%b %d %Y %Z'))
assert_equals("Mar 12 15:03", get_time_display(test_time, '%b %d %H:%M'))
def test_get_time_pass_through():
test_time = datetime(1992, 3, 12, 15, 3, 30, tzinfo=UTC)
assert_equals("Mar 12, 1992 at 15:03 UTC", get_time_display(test_time))
assert_equals("Mar 12, 1992 at 15:03 UTC", get_time_display(test_time, None))
assert_equals("Mar 12, 1992 at 15:03 UTC", get_time_display(test_time, "%"))
def test_get_time_display_coerce():
test_time_standard = datetime(1992, 1, 12, 15, 3, 30, tzinfo=UTC)
test_time_daylight = datetime(1992, 7, 12, 15, 3, 30, tzinfo=UTC)
assert_equals("Jan 12, 1992 at 07:03 PST",
get_time_display(test_time_standard, None, coerce_tz="US/Pacific"))
assert_equals("Jan 12, 1992 at 15:03 UTC",
get_time_display(test_time_standard, None, coerce_tz="NONEXISTENTTZ"))
assert_equals("Jan 12 07:03",
get_time_display(test_time_standard, '%b %d %H:%M', coerce_tz="US/Pacific"))
assert_equals("Jul 12, 1992 at 08:03 PDT",
get_time_display(test_time_daylight, None, coerce_tz="US/Pacific"))
assert_equals("Jul 12, 1992 at 15:03 UTC",
get_time_display(test_time_daylight, None, coerce_tz="NONEXISTENTTZ"))
assert_equals("Jul 12 08:03",
get_time_display(test_time_daylight, '%b %d %H:%M', coerce_tz="US/Pacific"))
# pylint: disable=W0232
class NamelessTZ(tzinfo):
"""Static timezone for testing"""
def utcoffset(self, _dt):
return timedelta(hours=-3)
def dst(self, _dt):
return timedelta(0)
def test_get_default_time_display_no_tzname():
assert_equals("", get_default_time_display(None))
test_time = datetime(1992, 3, 12, 15, 3, 30, tzinfo=NamelessTZ())
assert_equals(
"Mar 12, 1992 at 15:03-0300",
get_default_time_display(test_time))
def test_almost_same_datetime():
assert almost_same_datetime(
datetime(2013, 5, 3, 10, 20, 30),
datetime(2013, 5, 3, 10, 21, 29)
)
assert almost_same_datetime(
datetime(2013, 5, 3, 11, 20, 30),
datetime(2013, 5, 3, 10, 21, 29),
timedelta(hours=1)
)
assert_false(
almost_same_datetime(
datetime(2013, 5, 3, 11, 20, 30),
datetime(2013, 5, 3, 10, 21, 29)
)
)
assert_false(
almost_same_datetime(
datetime(2013, 5, 3, 11, 20, 30),
datetime(2013, 5, 3, 10, 21, 29),
timedelta(minutes=10)
)
)
def fake_ugettext(translations):
"""
Create a fake implementation of ugettext, for testing.
"""
def _ugettext(text): # pylint: disable=missing-docstring
return translations.get(text, text)
return _ugettext
def fake_pgettext(translations):
"""
Create a fake implementation of pgettext, for testing.
"""
def _pgettext(context, text): # pylint: disable=missing-docstring
return translations.get((context, text), text)
return _pgettext
@ddt.ddt
class StrftimeLocalizedTest(unittest.TestCase):
"""
Tests for strftime_localized.
"""
@ddt.data(
("%Y", "2013"),
("%m/%d/%y", "02/14/13"),
("hello", "hello"),
(u'%Y년 %m월 %d일', u"2013년 02월 14일"),
("%a, %b %d, %Y", "Thu, Feb 14, 2013"),
("%I:%M:%S %p", "04:41:17 PM"),
)
def test_usual_strftime_behavior(self, (fmt, expected)):
dtime = datetime(2013, 02, 14, 16, 41, 17)
self.assertEqual(expected, strftime_localized(dtime, fmt))
# strftime doesn't like Unicode, so do the work in UTF8.
self.assertEqual(expected, dtime.strftime(fmt.encode('utf8')).decode('utf8'))
@ddt.data(
("SHORT_DATE", "Feb 14, 2013"),
("LONG_DATE", "Thursday, February 14, 2013"),
("TIME", "04:41:17 PM"),
("%x %X!", "Feb 14, 2013 04:41:17 PM!"),
)
def test_shortcuts(self, (fmt, expected)):
dtime = datetime(2013, 02, 14, 16, 41, 17)
self.assertEqual(expected, strftime_localized(dtime, fmt))
@patch('util.date_utils.pgettext', fake_pgettext(translations={
("abbreviated month name", "Feb"): "XXfebXX",
("month name", "February"): "XXfebruaryXX",
("abbreviated weekday name", "Thu"): "XXthuXX",
("weekday name", "Thursday"): "XXthursdayXX",
("am/pm indicator", "PM"): "XXpmXX",
}))
@ddt.data(
("SHORT_DATE", "XXfebXX 14, 2013"),
("LONG_DATE", "XXthursdayXX, XXfebruaryXX 14, 2013"),
("DATE_TIME", "XXfebXX 14, 2013 at 16:41"),
("TIME", "04:41:17 XXpmXX"),
("%x %X!", "XXfebXX 14, 2013 04:41:17 XXpmXX!"),
)
def test_translated_words(self, (fmt, expected)):
dtime = datetime(2013, 02, 14, 16, 41, 17)
self.assertEqual(expected, strftime_localized(dtime, fmt))
@patch('util.date_utils.ugettext', fake_ugettext(translations={
"SHORT_DATE_FORMAT": "date(%Y.%m.%d)",
"LONG_DATE_FORMAT": "date(%A.%Y.%B.%d)",
"DATE_TIME_FORMAT": "date(%Y.%m.%d@%H.%M)",
"TIME_FORMAT": "%Hh.%Mm.%Ss",
}))
@ddt.data(
("SHORT_DATE", "date(2013.02.14)"),
("Look: %x", "Look: date(2013.02.14)"),
("LONG_DATE", "date(Thursday.2013.February.14)"),
("DATE_TIME", "date(2013.02.14@16.41)"),
("TIME", "16h.41m.17s"),
("The time is: %X", "The time is: 16h.41m.17s"),
("%x %X", "date(2013.02.14) 16h.41m.17s"),
)
def test_translated_formats(self, (fmt, expected)):
dtime = datetime(2013, 02, 14, 16, 41, 17)
self.assertEqual(expected, strftime_localized(dtime, fmt))
@patch('util.date_utils.ugettext', fake_ugettext(translations={
"SHORT_DATE_FORMAT": "oops date(%Y.%x.%d)",
"TIME_FORMAT": "oops %Hh.%Xm.%Ss",
}))
@ddt.data(
("SHORT_DATE", "Feb 14, 2013"),
("TIME", "04:41:17 PM"),
)
def test_recursion_protection(self, (fmt, expected)):
dtime = datetime(2013, 02, 14, 16, 41, 17)
self.assertEqual(expected, strftime_localized(dtime, fmt))
@ddt.data(
"%",
"Hello%"
"%Y/%m/%d%",
)
def test_invalid_format_strings(self, fmt):
dtime = datetime(2013, 02, 14, 16, 41, 17)
with self.assertRaises(ValueError):
strftime_localized(dtime, fmt)
|
extremewaysback/django | refs/heads/master | tests/defer/tests.py | 338 | from __future__ import unicode_literals
from django.db.models.query_utils import DeferredAttribute, InvalidQuery
from django.test import TestCase
from .models import (
BigChild, Child, ChildProxy, Primary, RefreshPrimaryProxy, Secondary,
)
class AssertionMixin(object):
def assert_delayed(self, obj, num):
"""
Instances with deferred fields look the same as normal instances when
we examine attribute values. Therefore, this method returns the number
of deferred fields on returned instances.
"""
count = 0
for field in obj._meta.fields:
if isinstance(obj.__class__.__dict__.get(field.attname), DeferredAttribute):
count += 1
self.assertEqual(count, num)
class DeferTests(AssertionMixin, TestCase):
@classmethod
def setUpTestData(cls):
cls.s1 = Secondary.objects.create(first="x1", second="y1")
cls.p1 = Primary.objects.create(name="p1", value="xx", related=cls.s1)
def test_defer(self):
qs = Primary.objects.all()
self.assert_delayed(qs.defer("name")[0], 1)
self.assert_delayed(qs.defer("name").get(pk=self.p1.pk), 1)
self.assert_delayed(qs.defer("related__first")[0], 0)
self.assert_delayed(qs.defer("name").defer("value")[0], 2)
def test_only(self):
qs = Primary.objects.all()
self.assert_delayed(qs.only("name")[0], 2)
self.assert_delayed(qs.only("name").get(pk=self.p1.pk), 2)
self.assert_delayed(qs.only("name").only("value")[0], 2)
self.assert_delayed(qs.only("related__first")[0], 2)
# Using 'pk' with only() should result in 3 deferred fields, namely all
# of them except the model's primary key see #15494
self.assert_delayed(qs.only("pk")[0], 3)
# You can use 'pk' with reverse foreign key lookups.
self.assert_delayed(self.s1.primary_set.all().only('pk')[0], 3)
def test_defer_only_chaining(self):
qs = Primary.objects.all()
self.assert_delayed(qs.only("name", "value").defer("name")[0], 2)
self.assert_delayed(qs.defer("name").only("value", "name")[0], 2)
self.assert_delayed(qs.defer("name").only("value")[0], 2)
self.assert_delayed(qs.only("name").defer("value")[0], 2)
def test_defer_on_an_already_deferred_field(self):
qs = Primary.objects.all()
self.assert_delayed(qs.defer("name")[0], 1)
self.assert_delayed(qs.defer("name").defer("name")[0], 1)
def test_defer_none_to_clear_deferred_set(self):
qs = Primary.objects.all()
self.assert_delayed(qs.defer("name", "value")[0], 2)
self.assert_delayed(qs.defer(None)[0], 0)
self.assert_delayed(qs.only("name").defer(None)[0], 0)
def test_only_none_raises_error(self):
msg = 'Cannot pass None as an argument to only().'
with self.assertRaisesMessage(TypeError, msg):
Primary.objects.only(None)
def test_defer_extra(self):
qs = Primary.objects.all()
self.assert_delayed(qs.defer("name").extra(select={"a": 1})[0], 1)
self.assert_delayed(qs.extra(select={"a": 1}).defer("name")[0], 1)
def test_defer_values_does_not_defer(self):
# User values() won't defer anything (you get the full list of
# dictionaries back), but it still works.
self.assertEqual(Primary.objects.defer("name").values()[0], {
"id": self.p1.id,
"name": "p1",
"value": "xx",
"related_id": self.s1.id,
})
def test_only_values_does_not_defer(self):
self.assertEqual(Primary.objects.only("name").values()[0], {
"id": self.p1.id,
"name": "p1",
"value": "xx",
"related_id": self.s1.id,
})
def test_get(self):
# Using defer() and only() with get() is also valid.
qs = Primary.objects.all()
self.assert_delayed(qs.defer("name").get(pk=self.p1.pk), 1)
self.assert_delayed(qs.only("name").get(pk=self.p1.pk), 2)
def test_defer_with_select_related(self):
obj = Primary.objects.select_related().defer("related__first", "related__second")[0]
self.assert_delayed(obj.related, 2)
self.assert_delayed(obj, 0)
def test_only_with_select_related(self):
obj = Primary.objects.select_related().only("related__first")[0]
self.assert_delayed(obj, 2)
self.assert_delayed(obj.related, 1)
self.assertEqual(obj.related_id, self.s1.pk)
self.assertEqual(obj.name, "p1")
def test_defer_select_related_raises_invalid_query(self):
# When we defer a field and also select_related it, the query is
# invalid and raises an exception.
with self.assertRaises(InvalidQuery):
Primary.objects.defer("related").select_related("related")[0]
def test_only_select_related_raises_invalid_query(self):
with self.assertRaises(InvalidQuery):
Primary.objects.only("name").select_related("related")[0]
def test_defer_foreign_keys_are_deferred_and_not_traversed(self):
# With a depth-based select_related, all deferred ForeignKeys are
# deferred instead of traversed.
with self.assertNumQueries(3):
obj = Primary.objects.defer("related").select_related()[0]
self.assert_delayed(obj, 1)
self.assertEqual(obj.related.id, self.s1.pk)
def test_saving_object_with_deferred_field(self):
# Saving models with deferred fields is possible (but inefficient,
# since every field has to be retrieved first).
Primary.objects.create(name="p2", value="xy", related=self.s1)
obj = Primary.objects.defer("value").get(name="p2")
obj.name = "a new name"
obj.save()
self.assertQuerysetEqual(
Primary.objects.all(), [
"p1", "a new name",
],
lambda p: p.name,
ordered=False,
)
def test_defer_baseclass_when_subclass_has_no_added_fields(self):
# Regression for #10572 - A subclass with no extra fields can defer
# fields from the base class
Child.objects.create(name="c1", value="foo", related=self.s1)
# You can defer a field on a baseclass when the subclass has no fields
obj = Child.objects.defer("value").get(name="c1")
self.assert_delayed(obj, 1)
self.assertEqual(obj.name, "c1")
self.assertEqual(obj.value, "foo")
def test_only_baseclass_when_subclass_has_no_added_fields(self):
# You can retrieve a single column on a base class with no fields
Child.objects.create(name="c1", value="foo", related=self.s1)
obj = Child.objects.only("name").get(name="c1")
# on an inherited model, its PK is also fetched, hence '3' deferred fields.
self.assert_delayed(obj, 3)
self.assertEqual(obj.name, "c1")
self.assertEqual(obj.value, "foo")
class BigChildDeferTests(AssertionMixin, TestCase):
@classmethod
def setUpTestData(cls):
cls.s1 = Secondary.objects.create(first="x1", second="y1")
BigChild.objects.create(name="b1", value="foo", related=cls.s1, other="bar")
def test_defer_baseclass_when_subclass_has_added_field(self):
# You can defer a field on a baseclass
obj = BigChild.objects.defer("value").get(name="b1")
self.assert_delayed(obj, 1)
self.assertEqual(obj.name, "b1")
self.assertEqual(obj.value, "foo")
self.assertEqual(obj.other, "bar")
def test_defer_subclass(self):
# You can defer a field on a subclass
obj = BigChild.objects.defer("other").get(name="b1")
self.assert_delayed(obj, 1)
self.assertEqual(obj.name, "b1")
self.assertEqual(obj.value, "foo")
self.assertEqual(obj.other, "bar")
def test_only_baseclass_when_subclass_has_added_field(self):
# You can retrieve a single field on a baseclass
obj = BigChild.objects.only("name").get(name="b1")
# when inherited model, its PK is also fetched, hence '4' deferred fields.
self.assert_delayed(obj, 4)
self.assertEqual(obj.name, "b1")
self.assertEqual(obj.value, "foo")
self.assertEqual(obj.other, "bar")
def test_only_sublcass(self):
# You can retrieve a single field on a subclass
obj = BigChild.objects.only("other").get(name="b1")
self.assert_delayed(obj, 4)
self.assertEqual(obj.name, "b1")
self.assertEqual(obj.value, "foo")
self.assertEqual(obj.other, "bar")
class TestDefer2(AssertionMixin, TestCase):
def test_defer_proxy(self):
"""
Ensure select_related together with only on a proxy model behaves
as expected. See #17876.
"""
related = Secondary.objects.create(first='x1', second='x2')
ChildProxy.objects.create(name='p1', value='xx', related=related)
children = ChildProxy.objects.all().select_related().only('id', 'name')
self.assertEqual(len(children), 1)
child = children[0]
self.assert_delayed(child, 2)
self.assertEqual(child.name, 'p1')
self.assertEqual(child.value, 'xx')
def test_defer_inheritance_pk_chaining(self):
"""
When an inherited model is fetched from the DB, its PK is also fetched.
When getting the PK of the parent model it is useful to use the already
fetched parent model PK if it happens to be available. Tests that this
is done.
"""
s1 = Secondary.objects.create(first="x1", second="y1")
bc = BigChild.objects.create(name="b1", value="foo", related=s1,
other="bar")
bc_deferred = BigChild.objects.only('name').get(pk=bc.pk)
with self.assertNumQueries(0):
bc_deferred.id
self.assertEqual(bc_deferred.pk, bc_deferred.id)
def test_eq(self):
s1 = Secondary.objects.create(first="x1", second="y1")
s1_defer = Secondary.objects.only('pk').get(pk=s1.pk)
self.assertEqual(s1, s1_defer)
self.assertEqual(s1_defer, s1)
def test_refresh_not_loading_deferred_fields(self):
s = Secondary.objects.create()
rf = Primary.objects.create(name='foo', value='bar', related=s)
rf2 = Primary.objects.only('related', 'value').get()
rf.name = 'new foo'
rf.value = 'new bar'
rf.save()
with self.assertNumQueries(1):
rf2.refresh_from_db()
self.assertEqual(rf2.value, 'new bar')
with self.assertNumQueries(1):
self.assertEqual(rf2.name, 'new foo')
def test_custom_refresh_on_deferred_loading(self):
s = Secondary.objects.create()
rf = RefreshPrimaryProxy.objects.create(name='foo', value='bar', related=s)
rf2 = RefreshPrimaryProxy.objects.only('related').get()
rf.name = 'new foo'
rf.value = 'new bar'
rf.save()
with self.assertNumQueries(1):
# Customized refresh_from_db() reloads all deferred fields on
# access of any of them.
self.assertEqual(rf2.name, 'new foo')
self.assertEqual(rf2.value, 'new bar')
|
ovnicraft/evex-configuration | refs/heads/master | util/jenkins/stage_release.py | 2 | """
Take in a YAML file with the basic data of all the things we could
deploy and command line hashes for the repos that we want to deploy
right now.
Example Config YAML file:
---
DOC_STORE_CONFIG:
hosts: [ list, of, mongo, hosts]
port: #
db: 'db'
user: 'jenkins'
password: 'password'
configuration_repo: "/path/to/configuration/repo"
configuration_secure_repo: "/path/to/configuration-secure"
repos:
edxapp:
plays:
- edxapp
- worker
xqueue:
plays:
- xqueue
6.00x:
plays:
- xserver
xserver:
plays:
- xserver
deployments:
edx:
- stage
- prod
edge:
- stage
- prod
loadtest:
- stage
# A jenkins URL to post requests for building AMIs
abbey_url: "http://...."
abbey_token: "API_TOKEN"
---
"""
import argparse
import json
import yaml
import logging as log
from datetime import datetime
from git import Repo
from pprint import pformat
from pymongo import MongoClient, DESCENDING
log.basicConfig(level=log.DEBUG)
def uri_from(doc_store_config):
"""
Convert the below structure to a mongodb uri.
DOC_STORE_CONFIG:
hosts:
- 'host1.com'
- 'host2.com'
port: 10012
db: 'devops'
user: 'username'
password: 'password'
"""
uri_format = "mongodb://{user}:{password}@{hosts}/{db}"
host_format = "{host}:{port}"
port = doc_store_config['port']
host_uris = [host_format.format(host=host,port=port) for host in doc_store_config['hosts']]
return uri_format.format(
user=doc_store_config['user'],
password=doc_store_config['password'],
hosts=",".join(host_uris),
db=doc_store_config['db'])
def prepare_release(args):
config = yaml.safe_load(open(args.config))
client = MongoClient(uri_from(config['DOC_STORE_CONFIG']))
db = client[config['DOC_STORE_CONFIG']['db']]
# Get configuration repo versions
config_repo_ver = Repo(config['configuration_repo']).commit().hexsha
config_secure_ver = Repo(config['configuration_secure_repo']).commit().hexsha
# Parse the vars.
var_array = map(lambda key_value: key_value.split('='), args.REPOS)
update_repos = { item[0]:item[1] for item in var_array }
log.info("Update repos: {}".format(pformat(update_repos)))
release = {}
now = datetime.utcnow()
release['_id'] = args.release_id
release['date_created'] = now
release['date_modified'] = now
release['build_status'] = 'Unknown'
release['build_user'] = args.user
release_coll = db[args.deployment]
releases = release_coll.find({'build_status': 'Succeeded'}).sort('_id', DESCENDING)
all_plays = {}
try:
last_successful = releases.next()
all_plays = last_successful['plays']
except StopIteration:
# No successful builds.
log.warn("No Previously successful builds.")
# For all repos that were updated
for repo, ref in update_repos.items():
var_name = "{}_version".format(repo.replace('-','_'))
if repo not in config['repos']:
raise Exception("No info for repo with name '{}'".format(repo))
# For any play that uses the updated repo
for play in config['repos'][repo]:
if play not in all_plays:
all_plays[play] = {}
if 'vars' not in all_plays[play]:
all_plays[play]['vars'] = {}
all_plays[play]['vars'][var_name] = ref
# Configuration to use to build these AMIs
all_plays[play]['configuration_ref'] = config_repo_ver
all_plays[play]['configuration_secure_ref'] = config_secure_ver
# Set amis to None for all envs of this deployment
all_plays[play]['amis'] = {}
for env in config['deployments'][args.deployment]:
# Check the AMIs collection to see if an ami already exist
# for this configuration.
potential_ami = ami_for(db, env,
args.deployment,
play, config_repo_ver,
config_secure_ver,
ref)
if potential_ami:
all_plays[play]['amis'][env] = potential_ami['_id']
else:
all_plays[play]['amis'][env] = None
release['plays'] = all_plays
release_coll.insert(release)
# All plays that need new AMIs have been updated.
notify_abbey(config['abbey_url'], config['abbey_token'], args.deployment, all_plays, args.release_id)
def ami_for(db, env, deployment, play, configuration,
configuration_secure, ansible_vars):
ami_signature = {
'env': env,
'deployment': deployment,
'play': play,
'configuration_ref': configuration,
'configuration_secure_ref': configuration_secure,
'vars': ansible_vars,
}
return db.amis.find_one(ami_signature)
import requests
def notify_abbey(abbey_url, abbey_token, deployment, all_plays, release_id):
for play_name, play in all_plays.items():
for env, ami in play['amis'].items():
if ami is None:
params = []
params.append({ 'name': 'play', 'value': play_name})
params.append({ 'name': 'deployment', 'value': deployment})
params.append({ 'name': 'environment', 'value': env})
params.append({ 'name': 'vars', 'value': yaml.dump(play['vars'], default_flow_style=False)})
params.append({ 'name': 'release_id', 'value': release_id})
build_params = {'parameter': params}
log.info("Need ami for {}".format(pformat(build_params)))
r = requests.post(abbey_url,
data={"token": abbey_token},
params={"json": json.dumps(build_params)})
log.info("Sent request got {}".format(r))
if r.status_code != 201:
# Something went wrong.
msg = "Failed to submit request with params: {}"
raise Exception(msg.format(pformat(build_params)))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Prepare a new release.")
parser.add_argument('-c', '--config', required=True, help="Configuration for deploys")
parser.add_argument('-u', '--user', required=True, help="User staging the release.")
msg = "The deployment to build for eg. edx, edge, loadtest"
parser.add_argument('-d', '--deployment', required=True, help=msg)
parser.add_argument('-r', '--release-id', required=True, help="Id of Release.")
parser.add_argument('REPOS', nargs='+',
help="Any number of var=value(no spcae around '='" + \
" e.g. 'edxapp=3233bac xqueue=92832ab'")
args = parser.parse_args()
log.debug(args)
prepare_release(args)
|
ldolberg/mam | refs/heads/master | maggregator/Treedistance.py | 2 | import sys
import os
import re
import glob,commands
import numpy,math
from optparse import OptionParser
import copy
import time
import pickle
#sys.path.append('/home/lautaro/workspace')
#import tree
from hungarian import maxWeightMatching
from multiAggregator import main_aggregator,build_aggregate_tree,build_aggregate_randomized_tree
from zss import test_tree
from zss.test_tree import Node
from zss import compare
from editdist import distance
class String:
def __init__(self,val):
self._val = val
def get_val(self):
return self._val
def ___cmp__(self,val):
return len(self._val) >= len(val.get_val())
def similarity(self,val):
import levenshtein
return 1 - (levenshtein.levenshtein(self._val,val.get_val()) / float(max(len(self._val),len(val.get_val()))))
class Tree:
def __init__(self,val,l=None,r=None,parent=None):
self._attr = val
self._left = l
self._right = r
self._parent = parent
def __str__(self):
return str(self._attr)
def height(self):
if self._left == None and self._right == None:
return 1
elif self._left == None and self._right != None:
return 1 + self._right.height()
elif self._left != None and self._right == None:
return 1 + self._left.height()
else:
return 1 + max(self._left.height(),self._right.height())
def get_children(self):
d=[]
if self._left != None:
d.append(self._left)
if self._right != None:
d.append(self._right)
return d
def get_root(self):
if self._parent == None:
return self
else:
return self._parent.get_root()
def get_key(self):
return self._attr
def preorder(self):
d = [self]
if self._left != None:
d.extend( self._left.preorder())
if self._right != None:
d.extend(self._right.preorder())
return d
def insert_node(self,val):
if val >= self._attr:
if self._right != None:
self._right.insert_node(val)
else:
t = Tree(val,None,None,self)
self._right = t
else:
if self._left != None:
self._left.insert_node(val)
else:
t = Tree(val,None,None,self)
self._left = t
def inorder(self):
res = []
if self._left == None and self._right == None:
return [self]
if self._left != None:
l = self._left.inorder()
res.extend(l)
res.extend([self])
if self._right != None:
r = self._right.inorder()
res.extend(r)
return res
def maxSimilarity(t1,t2):
maxSim= 999999.0
for u in t1.preorder():
sim = anchoredSimilarity(u,t2.get_root())
maxSim = min(sim,maxSim)
print '--------------'
for u in t2.preorder():
sim = anchoredSimilarity(t1.get_root(),u)
maxSim = min(sim,maxSim)
return maxSim
def anchoredSimilarity(u,w):
#print u,w
if u.get_children() == [] and w.get_children() == []:
return similarity(u,w)
cu = u.get_children()
cw = w.get_children()
dsize = max(len(cu),len(cw))
d_matrix = [[0 for v in xrange(dsize)] for l in xrange(dsize)]
if len(d_matrix[0]) > 1 :
for ui in xrange(len(cu)):
for wi in xrange(len(cw)):
dist = -1*anchoredSimilarity(cu[ui], cw[wi])
#print dist
d_matrix[ui][wi] = dist
#print "s matrix",d_matrix
assign = maxWeightMatching(d_matrix)[0]
#print assign
#for k in assign:
# print d_matrix[k][assign[k]]
assign = min(d_matrix[k][assign[k]] for k in assign)
#print assign
else :
assign = d_matrix[0][0]
#print d_matrix , assign
return similarity(u,w) + assign
def similarity(x,y):
#return sum([1 - x.get_key()[k].similarity(y.get_key()[k]) for k in x.get_key()]) / float(3)
import math
if type (x.get_key()) == int:
return 1 - (abs(x.get_key() - y.get_key()) / float(max(x.get_key(),y.get_key())))
res = sum([x.get_key()[k].similarity(y.get_key()[k]) for k in x.get_key()]) / float(2)
assert res <= 1.0
return res
def f(x,y):
#return f('',y) if x is None else f(x,'') if y is None else sum([1 - x.get_key()[k].similarity(y.get_key()[k]) for k in x.get_key()]) / float(3)
if x is None:
return sum([y[k].similarity(None) for k in y]) / 3
if y is None:
return sum([x[k].similarity(None) for k in x]) / 3
if x == y: return 0.0
#for k in x:
# print x[k],y[k], x[k].similarity(y[k])
return sum([x[k].similarity(y[k]) for k in x]) / 3
def g(x,y):
res = f(x,y)
try:
assert res <= 1
except:
for k in x:
print x[k], y[k] , x[k].similarity(y[k])
print x,y,res
raise
return res
def benchmark_similarity(alpha,k,options,args):
fields = options.fields.split(" ")
dim = options.dim.split(" ")
types = options.types.split(" ")
#test()
#For csv
#"(\d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d),[^,]*,[^,]*,([^,]*),([^,]*),[^,]*,[^,]*,[^,]*,[^,]*,[^,]*,[^,]*,[^,]*,([^,]*)"
dict_dim = {}
if len(dim) != len(types):
raise Exception("The number of defined dimensions and types has to be equal")
else:
for i in range(len(dim)):
dict_dim[dim[i]] = types[i]
trees = build_aggregate_tree(options,args,fields,dim,types,dict_dim)
options.input = '../test/app_ipv4/random.%s.reverse.txt'%k
trees1 = build_aggregate_tree(options,args,fields,dim,types,dict_dim)
print "Computing Similarity"
s0 = maxSimilarity(trees[0][0],trees[-1][0])
print "still computing"
s1 = maxSimilarity(trees[0][0],trees1[0][0])
print "done"
return len(trees[0][0].preorder()),s0,s1
def T(tree):
root = Node(tree.get_key())
for c in tree.get_children():
root.addkid(T(c))
return root
'''
import random
t = Tree(random.randint(0,99))
for i in xrange(0,100):
t.insert_node(random.randint(1,99))
f = lambda x,y: f(x,0) if y is None else f(0,y) if x is None else (abs(x - y) / float(max(x , y)))
test_tree = T(t)
print compare.distance(test_tree,test_tree,f)
t1 = Tree(random.randint(0,99))
for i in xrange(0,100):
t1.insert_node(random.randint(1,99))
test_tree1 = T(t1)
'''
def parse_options():
lineparser = OptionParser("")
lineparser.add_option('-i','--input', dest='input', default='../test/app_ipv4/random.0.txt',type='string',help="input file (txt flow file)", metavar="FILE")
lineparser.add_option('-w','--window-size', dest='window', default=5000,type='int',help="window size in seconds")
lineparser.add_option('-r','--reg-exp', dest='reg_exp', default="",type='string',help="regular expression to extract flow information")
lineparser.add_option('-f','--fields', dest='fields', default="src_ip",type='string',help="fields naming corresponding to the regular expression, have to be split by a space character and HAS TO INCLUDE value and timestamp")
lineparser.add_option('-d','--dimensions', dest='dim', default="src_ip",type='string',help="dimension to use for the radix tree, have to be split by a space character and correspond to the field naming")
lineparser.add_option('-t','--type-dimension', dest='types', default="ip_addr",type='string',help="types of dimension")
lineparser.add_option('-c','--cut', dest='cut', default=0.02,type='float',help="threshold (%) under which removing a node is not allowed during the construction(it's include the parents values)")
lineparser.add_option('-a','--aggregate', dest='aggregate', default=0.02,type='float',help="threshold (%) for the aggregation")
lineparser.add_option('-l','--log-file', dest='log',default="log.att",type='string',help="log file containing the attacks", metavar="FILE")
lineparser.add_option('-s','--split', dest='split', default=20,type='float',help="percentage of data used for training")
lineparser.add_option('-g','--type-aggregation', dest='type_aggr', default="NumericalValueNode",type='string',help="type of the aggregation for nodes")
lineparser.add_option('-n','--name', dest='namefile', default="bytes",type='string',help="suffix for name of file results")
lineparser.add_option('-S','--strategy', dest='strategy', default="",type='string',help="stratrgy for selecting nodes to aggregate")
lineparser.add_option('-m','--max-nodes', dest='max_nodes', default=99999,type='int',help="max size of tree")
lineparser.add_option('-o','--offset', dest='offset', default=0,type='int',help="offset")
lineparser.add_option('-b','--batch', dest='batch', default=30,type='int',help="batch")
options, args = lineparser.parse_args()
return options,args
def test_edit_distance():
options,args = parse_options()
fields = options.fields.split(" ")
dim = options.dim.split(" ")
types = options.types.split(" ")
#test()
#For csv
#"(\d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d),[^,]*,[^,]*,([^,]*),([^,]*),[^,]*,[^,]*,[^,]*,[^,]*,[^,]*,[^,]*,[^,]*,([^,]*)"
dict_dim = {}
if len(dim) != len(types):
raise Exception("The number of defined dimensions and types has to be equal")
else:
for i in range(len(dim)):
dict_dim[dim[i]] = types[i]
fout = open("similarity.edit.%s.dat"%options.offset,"w")
upper_limit = options.batch + options.offset
for i in xrange(0,options.batch):
i = i + options.offset
lrow = "%s\t"%i
rrow = ""
for alpha in [0.02, 0.5,1,2,4]:
options.aggregate = alpha
options.input = '../test/app_ipv4/random.%s.txt'%i
trees = build_aggregate_tree(options,args,fields,dim,types,dict_dim)
t = T(trees[0][0].get_root())
options.input = '../test/app_ipv4/random.%s.reverse.txt'%i
trees1 = build_aggregate_tree(options,args,fields,dim,types,dict_dim)
t1 = T(trees1[0][0].get_root())
options.input = '../test/app_ipv4/random.%s.txt'%(upper_limit-i)
trees3 = build_aggregate_tree(options,args,fields,dim,types,dict_dim)
t3 = T(trees3[0][0].get_root())
print "Edit Distance Based Similarity"
s0 = compare.similarity(t,t1,g)
s1 = compare.similarity(t,t3,g)
lrow += "%s\t"%s0
rrow += "%s\t"%s1
fout.write(lrow+rrow+"\n")
fout.close()
if __name__ == "__main__":
def main():
sys.setrecursionlimit(1000000)
options,args = parse_options()
fields = options.fields.split(" ")
dim = options.dim.split(" ")
types = options.types.split(" ")
#test()
#For csv
#"(\d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d),[^,]*,[^,]*,([^,]*),([^,]*),[^,]*,[^,]*,[^,]*,[^,]*,[^,]*,[^,]*,[^,]*,([^,]*)"
dict_dim = {}
if len(dim) != len(types):
raise Exception("The number of defined dimensions and types has to be equal")
else:
for i in range(len(dim)):
dict_dim[dim[i]] = types[i]
fout = open("similarity.2010224.test.dat","w")
for alpa in [2]:
options.aggregate = alpa
options.input = '../test/app_ipv4/20100224/20100224.%s.txt'%i
trees = build_aggregate_tree(options,args,fields,dim,types,dict_dim)
t = T(trees[0][0].get_root())
options.input = '../test/app_ipv4/20100224/20100224.%s.reverse.txt'%i
trees2 = build_aggregate_tree(options,args,fields,dim,types,dict_dim)
t2 = T(trees2[0][0].get_root())
simil = compare.similarity(t,t2,g)
main()
|
mjbommar/wellsettled-research | refs/heads/master | scratch/py/find_district_ws_phrase.py | 1 | # Standard imports
import codecs
import csv
import os
import re
from pprint import pprint
# WSR imports
from wsr.data.district import District, read_xml_document, read_xml_opinion
from wsr.config import SCRATCH_PATH, DISTRICT_FILE_NAME_LIST
from wsr.process.tokenize import sentence_tokenizer
from wsr.process.stem import process_sentence
def check_sentence_match(stems):
"""Check if the sentence stem sequence matches our desired
patterns."""
# Check minimum length
if len(stems) < 3:
return False
# Require initial "it"
if stems[0] != "it":
return False
# Check for "well*"
if not stems[1].startswith('well'):
return False
# Now handle hyphen vs. space
if '-' in stems[1]:
tokens = stems[1].split('-')
if tokens[1] in ['settl', 'establish']:
return True
elif stems[2] in ['settl', 'establish']:
return True
else:
return False
if __name__ == "__main__":
# Store the sentence list
ws_list = []
# Regex to clean square brackets
re_bracket = re.compile(r'\[(?:[^\]|]*\|)?([^\]|]*)\]')
# Load phrase stems
phrase_list = []
with codecs.open(os.path.join(SCRATCH_PATH, "results",
"ws_phrase_stems.csv"),
'r', 'utf8') as phrase_file:
for line in phrase_file:
phrase_list.append(tuple(line.strip().split(',')))
# Output to file
instance_file = codecs.open(os.path.join(SCRATCH_PATH, "results",
"district_ws_instances.csv"),
'w', 'utf8')
# Get the phrase mapping
phrase_id = range(len(phrase_list))
phrase_mapping = dict(zip(phrase_list, phrase_id))
# Write the phrase mapping to disk
phrase_mapping_file = codecs.open(os.path.join(SCRATCH_PATH, "results",
"ws_phrase_mapping.csv"),
'w', 'utf8')
for phrase in phrase_mapping:
phrase_mapping_file.write(u"\t".join([str(phrase_mapping[phrase]),
' '.join(phrase)]) + "\n")
phrase_mapping_file.close()
for district_file in DISTRICT_FILE_NAME_LIST:
# Load the dataset
district = District(district_file)
# Iterate over documents
for file_name in district.district_document_list:
# Get the XML document and sentence list
try:
doc = read_xml_document(district.read_document(file_name))
sentence_list = [s.strip().replace("\n", "").replace("\r", "") \
for s in sentence_tokenizer \
.sentences_from_text(read_xml_opinion(doc))]
except Exception, E:
print(E)
continue
for sentence in sentence_list:
# Process the sentences
sentence_stems = tuple(process_sentence(sentence))
# Inner loop
for phrase in phrase_list:
# Skip phrases that are too long
if len(phrase) > sentence_stems:
continue
if phrase[0] not in sentence_stems:
continue
for offset in range(len(sentence_stems) - len(phrase) + 1):
if sentence_stems[offset] == phrase[0] and \
phrase == sentence_stems[offset:(offset + len(phrase))]:
# Get the record
# Output
# Log
record = (file_name, str(phrase_mapping[phrase]), ",".join(sentence_stems))
pprint(record)
instance_file.write("\t".join(record) + "\n")
# Close file
instance_file.close()
|
compsoc-ssc/compsocssc | refs/heads/master | general/migrations/0009_auto_20150828_2348.py | 1 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('general', '0008_auto_20150628_1417'),
]
operations = [
migrations.DeleteModel(
name='SiteVisit',
),
migrations.RemoveField(
model_name='track',
name='user',
),
migrations.DeleteModel(
name='Track',
),
]
|
Jannes123/inasafe | refs/heads/develop | safe/impact_reports/report_mixin_base.py | 3 | # coding=utf-8
"""
InaSAFE Disaster risk assessment tool developed by AusAid -
**Impact Function Report Mixin Base Class**
Contact : ole.moller.nielsen@gmail.com
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Christian Christelis <christian@kartoza.com>'
from safe.common.tables import Table, TableRow
class ReportMixin(object):
"""Report Mixin Interface.
.. versionadded:: 3.1
"""
def generate_html_report(self):
"""Generate an HTML report.
:returns: The report in html format.
:rtype: basestring
"""
return self.parse_to_html(self.generate_report())
def generate_report(self):
"""Defining the interface.
:returns: An itemized breakdown of the report.
:rtype: list
"""
return []
def action_checklist(self):
"""The actions to be taken in for the impact on this exposure type.
:returns: The action checklist.
:rtype: list
"""
return []
def impact_summary(self):
"""The impact summary.
:returns: The action checklist.
:rtype: list
"""
return []
def notes(self):
"""Additional notes to be used.
:return: The notes to be added to this report
..Notes:
Notes are very much specific to IFs so it is expected that this method
is overwritten in the IF if needed.
"""
return []
@staticmethod
def parse_to_html(report):
"""Convert a json compatible list of results to a tabulated version.
:param report: A json compatible report
:type report: list
:returns: Returns a tabulated version of the report
:rtype: basestring
"""
tabulated_report = []
for row in report:
row_template = {
'content': '',
'condition': True,
'arguments': (),
'header': False
}
row_template.update(row)
if not row_template['condition']:
continue
content = row_template['content']
if row_template['arguments']:
arguments = row_template['arguments']
if hasattr(content, '__iter__'):
message = (
'Problem formatting arguments into content.'
'The element count of the arguments must equal '
'the element count of the content.')
assert len(content) == len(arguments), message
# pylint: disable=bad-builtin
# pylint: disable=deprecated-lambda
content = map(lambda c, a: c % a, content, arguments)
# pylint: enable=deprecated-lambda
# pylint: enable=bad-builtin
else:
content = row_template['content'] % arguments
if row_template['header']:
table_row = TableRow(content, header=True)
else:
table_row = TableRow(content)
tabulated_report.append(table_row)
html_tabulated_report = Table(tabulated_report).toNewlineFreeString()
return html_tabulated_report
|
jpaasen/cos | refs/heads/master | framework/linalg/tests/testMatrixOperations.py | 1 | '''
Created on 29. aug. 2011
@author: jpaasen
'''
import unittest
import framework.mynumpy as np
class TestMatrixOperations(unittest.TestCase):
def setUp(self):
self.n = 3
self.A = np.array([[2.0, -1.0, 0.0], [-1.0, 2.0, -1.0], [0.0, -1.0, 2.0]],dtype=complex)
self.A2 = np.array([[4.0, -2.0, 0.0], [-2.0, 4.0, -2.0], [0.0, -2.0, 4.0]],dtype=complex)
self.R = np.array([[1.414213562373095, -0.707106781186547, 0.0],
[0.0, 1.224744871391589, -0.816496580927726],
[0.0, 0.0, 1.154700538379252]],dtype=complex)
self.AA = np.array([[5, -4, 1], [-4, 6, -4], [1, -4, 5]],dtype=complex)
self.B = np.array([[1, 2, 3], [1, 2, 3], [1, 2, 3]],dtype=complex)
self.BT = np.array([[1, 1, 1], [2, 2, 2], [3, 3, 3]],dtype=complex)
self.b1 = np.array([1.0, 1.0, 1.0],dtype=complex)
self.x1 = np.array([3.0/2, 2.0, 3.0/2],dtype=complex)
self.b2 = np.array([1, 2, 3],dtype=complex)
self.x2 = np.array([5.0/2, 4, 7.0/2],dtype=complex)
self.C = np.array([[1, 2, 3], [0, 1, 1], [0, 0, 1]],dtype=complex)
self.b1c = np.array([1, 1, 1],dtype=complex)
self.x1c = np.array([-2, 0, 1],dtype=complex)
self.x1cT = np.array([1, -1, -1],dtype=complex)
self.x0_zero = np.zeros((3,), dtype=complex)
self.Ab2 = np.array([0, 0, 4],dtype=complex)
self.Ab1 = np.array([1, 0, 1],dtype=complex)
self.Ab1b1T = np.array([[3, 1, 3], [1, 6, 5], [3, 5, 11]],dtype=complex)
self.invA = np.array([[0.750, 0.50, 0.250], [0.50, 1.0, 0.50], [0.250, 0.50, 0.750]],dtype=complex)
self.invAb1b1T = np.array([[0.465909090909091, 0.045454545454545, -0.147727272727273],
[0.045454545454545, 0.272727272727273, -0.136363636363636],
[-0.147727272727273, -0.136363636363636, 0.193181818181818]],dtype=complex)
self.complexA = np.array([[2.0, 3.0 + 1.0j, 2.0 - 2.0j],
[3.0 - 1.0j, 9.0, -2.0j],
[2.0 + 2.0j, 2.0j, 14.0]], dtype=complex)
self.complexR = np.array([[1.414213562373095, 2.121320343559642 + 0.707106781186547j, 1.414213562373095 - 1.414213562373095j],
[0.0, 2.0, -1.0 + 1.0j],
[0.0, 0.0, 2.828427124746190]], dtype=complex)
self.complexb = np.array([1.0, 1.0 + 1.0j, 1.0 - 2.0j], dtype=complex)
self.complexy = np.array([0.707106781186547 - 0.0j,
-0.250 + 0.750j,
-0.353553390593273 - 0.883883476483184j], dtype=complex)
self.complexx = np.array([1.593749999999999 - 0.06250j,
-0.343750 + 0.281250j,
-0.1250 - 0.31250j], dtype=complex)
self.complexAb = np.array([2.0 - 2.0j, 8.0 + 6.0j, 14.0 - 24.0j], dtype=complex)
self.but4 = np.array([[0.5]*4, [0.5, -0.5j, -0.5, 0.5j], [0.5, -0.5]*2, [0.5, 0.5j, -0.5, -0.5j]], dtype=complex)
self.but3 = np.array([[0.577350269189626, 0.577350269189626, 0.577350269189626],
[0.577350269189626, -0.288675134594813 - 0.50j, -0.288675134594813 + 0.50j],
[0.577350269189626, -0.288675134594813 + 0.50j, -0.288675134594813 - 0.50j]], dtype=complex)
self.bsComplexb = np.array([1.732050807568878 - 0.577350269189626j, 1.50 + 0.288675134594813j], dtype=complex)
self.diag = 0.2
self.x = np.array([1.0, 1.0 + 1.0j, 1.0 - 2.0j, 2.0 + 1.0j], dtype=complex)
self.complexAbbH = np.array([[10.0, 11.0 + 1.0j, 10.0 - 2.0j],
[11.0 - 1.0j, 17.0, 8.0 - 2.0j],
[10.0 + 2.0j, 8.0 + 2.0j, 22.0]], dtype=complex)
self.complexInvAbbH = np.array([[1.067010309278351, -0.407216494845361 - 0.015463917525773j, -0.190721649484536 + 0.020618556701031j],
[-0.407216494845361 + 0.015463917525773j, 0.247422680412371, 0.077319587628866 - 0.015463917525773j],
[-0.190721649484536 - 0.020618556701031j, 0.077319587628866 + 0.015463917525773j, 0.087628865979381]], dtype=complex)
self.complexInvA = np.array([[1.906250, -0.593750 - 0.156250j, -0.250 + 0.18750j],
[-0.593750 + 0.156250j, 0.31250, 0.06250 - 0.06250j],
[-0.250 - 0.18750j, 0.06250 + 0.06250j, 0.1250]], dtype=complex)
self.complexA4x4 = np.array([[22.0, 8.0, 11.0 - 11.0j, 22.0 - 7.0j],
[8.0, 22.0, 17.0 - 2.0j, 11.0 - 7.0j],
[11.0 + 11.0j, 17.0 + 2.0j, 45.0, 23.0 - 5.0j],
[22.0 + 7.0j, 11.0 + 7.0j, 23.0 + 5.0j, 37.0]], dtype=complex)
self.U4x4 = np.array([[1.0000, 0.3636, 0.50 - 0.50j, 1.0 - 0.3182j],
[0.0, 1.0, 0.6810 + 0.1048j, 0.1571 - 0.2333j],
[0.0, 0.0, 1.0, 0.2776 - 0.3670j],
[0.0, 0.0, 0.0, 1.0]], dtype=complex)
self.D4x4 = np.array([22.0, 19.0909, 24.9381, 5.9806])
self.sonardata_R = np.array(np.load('./data/data_R.npy')) # created without diagonal loading
self.sonardata_a = np.array(np.load('./data/data_a.npy'))
self.sonardata_Ria = np.array(np.load('./data/data_Ria.npy'))
self.sonardata_ar = np.array(np.load('./data/data_ar.npy'))
self.sonardata_n = 32
# random data for testing
self.L = L = 24
self.d = d = 100
U = np.triu(np.random.randn(L,L) + np.random.randn(L,L)*1j) + np.eye(L)*d
self.randA = np.dot(U.conjugate().T, U)
self.randb = np.random.randn(L) + np.random.randn(L)*1j
def tearDown(self):
pass
def assertMatrixAlmosteEqual(self, first, second, places):
lenFirst = len(first)
lenSecond = len(second)
if lenFirst != lenSecond:
self.fail('First and second list have different number of row elements')
i,j = 0,0
for row in first:
secRow = second[i]
try:
lenFirst = len(row)
except:
lenFirst = 1
row = [row]
try:
lenSecond = len(secRow)
except:
lenSecond = 1
secRow = [secRow]
if lenFirst != lenSecond:
self.fail('First and second list have different number of column elements')
j = 0
for rowElem in row:
secElem = secRow[j]
self.assertAlmostEqual(rowElem.real, secElem.real, places,
'Expected %.16f was %.16f (diff %e)' %(rowElem.real,secElem.real,rowElem.real-secElem.real))
self.assertAlmostEqual(rowElem.imag, secElem.imag, places,
'Expected ' + str(rowElem.imag) + ' was ' + str(secElem.imag))
j = j + 1
i = i + 1
|
glennlive/gnuradio-wg-grc | refs/heads/master | gnuradio-runtime/python/gnuradio/ctrlport/RPCConnectionThrift.py | 25 | #!/usr/bin/env python
#
# Copyright 2015 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from thrift import Thrift
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
from gnuradio.ctrlport.GNURadio import ControlPort
from gnuradio.ctrlport import RPCConnection
from gnuradio import gr
import pmt
import sys
class ThriftRadioClient:
def __init__(self, host, port):
self.tsocket = TSocket.TSocket(host, port)
self.transport = TTransport.TBufferedTransport(self.tsocket)
self.protocol = TBinaryProtocol.TBinaryProtocol(self.transport)
self.radio = ControlPort.Client(self.protocol)
self.transport.open()
def __del__(self):
self.radio.shutdown()
self.transport.close()
def getRadio(self, host, port):
return self.radio
"""
RPC Client interface for the Apache Thrift middle-ware RPC transport.
Args:
port: port number of the connection
host: hostname of the connection
"""
class RPCConnectionThrift(RPCConnection.RPCConnection):
class Knob():
def __init__(self, key, value=None, ktype=0):
(self.key, self.value, self.ktype) = (key, value, ktype)
def __repr__(self):
return "({0} = {1})".format(self.key, self.value)
def __init__(self, host=None, port=None):
from gnuradio.ctrlport.GNURadio import ttypes
self.BaseTypes = ttypes.BaseTypes
self.KnobBase = ttypes.KnobBase
# If not set by the user, get the port number from the thrift
# config file, if one is set. Defaults to 9090 otherwise.
if port is None:
p = gr.prefs()
thrift_config_file = p.get_string("ControlPort", "config", "");
if(len(thrift_config_file) > 0):
p.add_config_file(thrift_config_file)
port = p.get_long("thrift", "port", 9090)
else:
port = 9090
else:
port = int(port)
super(RPCConnectionThrift, self).__init__(method='thrift', port=port, host=host)
self.newConnection(host, port)
self.unpack_dict = {
self.BaseTypes.BOOL: lambda k,b: self.Knob(k, b.value.a_bool, self.BaseTypes.BOOL),
self.BaseTypes.BYTE: lambda k,b: self.Knob(k, b.value.a_byte, self.BaseTypes.BYTE),
self.BaseTypes.SHORT: lambda k,b: self.Knob(k, b.value.a_short, self.BaseTypes.SHORT),
self.BaseTypes.INT: lambda k,b: self.Knob(k, b.value.a_int, self.BaseTypes.INT),
self.BaseTypes.LONG: lambda k,b: self.Knob(k, b.value.a_long, self.BaseTypes.LONG),
self.BaseTypes.DOUBLE: lambda k,b: self.Knob(k, b.value.a_double, self.BaseTypes.DOUBLE),
self.BaseTypes.STRING: lambda k,b: self.Knob(k, b.value.a_string, self.BaseTypes.STRING),
self.BaseTypes.COMPLEX: lambda k,b: self.Knob(k, b.value.a_complex, self.BaseTypes.COMPLEX),
self.BaseTypes.F32VECTOR: lambda k,b: self.Knob(k, b.value.a_f32vector, self.BaseTypes.F32VECTOR),
self.BaseTypes.F64VECTOR: lambda k,b: self.Knob(k, b.value.a_f64vector, self.BaseTypes.F64VECTOR),
self.BaseTypes.S64VECTOR: lambda k,b: self.Knob(k, b.value.a_s64vector, self.BaseTypes.S64VECTOR),
self.BaseTypes.S32VECTOR: lambda k,b: self.Knob(k, b.value.a_s32vector, self.BaseTypes.S32VECTOR),
self.BaseTypes.S16VECTOR: lambda k,b: self.Knob(k, b.value.a_s16vector, self.BaseTypes.S16VECTOR),
self.BaseTypes.S8VECTOR: lambda k,b: self.Knob(k, b.value.a_s8vector, self.BaseTypes.S8VECTOR),
self.BaseTypes.C32VECTOR: lambda k,b: self.Knob(k, b.value.a_c32vector, self.BaseTypes.C32VECTOR),
}
self.pack_dict = {
self.BaseTypes.BOOL: lambda k: ttypes.Knob(type=k.ktype, value=ttypes.KnobBase(a_bool = k.value)),
self.BaseTypes.BYTE: lambda k: ttypes.Knob(type=k.ktype, value=ttypes.KnobBase(a_byte = k.value)),
self.BaseTypes.SHORT: lambda k: ttypes.Knob(type=k.ktype, value=ttypes.KnobBase(a_short = k.value)),
self.BaseTypes.INT: lambda k: ttypes.Knob(type=k.ktype, value=ttypes.KnobBase(a_int = k.value)),
self.BaseTypes.LONG: lambda k: ttypes.Knob(type=k.ktype, value=ttypes.KnobBase(a_long = k.value)),
self.BaseTypes.DOUBLE: lambda k: ttypes.Knob(type=k.ktype, value=ttypes.KnobBase(a_double = k.value)),
self.BaseTypes.STRING: lambda k: ttypes.Knob(type=k.ktype, value=ttypes.KnobBase(a_string = k.value)),
self.BaseTypes.COMPLEX: lambda k: ttypes.Knob(type=k.ktype, value=ttypes.KnobBase(a_complex = k.value)),
self.BaseTypes.F32VECTOR: lambda k: ttypes.Knob(type=k.ktype, value=ttypes.KnobBase(a_f32vector = k.value)),
self.BaseTypes.F64VECTOR: lambda k: ttypes.Knob(type=k.ktype, value=ttypes.KnobBase(a_f64vector = k.value)),
self.BaseTypes.S64VECTOR: lambda k: ttypes.Knob(type=k.ktype, value=ttypes.KnobBase(a_s64vector = k.value)),
self.BaseTypes.S32VECTOR: lambda k: ttypes.Knob(type=k.ktype, value=ttypes.KnobBase(a_s32vector = k.value)),
self.BaseTypes.S16VECTOR: lambda k: ttypes.Knob(type=k.ktype, value=ttypes.KnobBase(a_s16vector = k.value)),
self.BaseTypes.S8VECTOR: lambda k: ttypes.Knob(type=k.ktype, value=ttypes.KnobBase(a_s8vector = k.value)),
self.BaseTypes.C32VECTOR: lambda k: ttypes.Knob(type=k.ktype, value=ttypes.KnobBase(a_c32vector = k.value)),
}
def unpackKnob(self, key, knob):
f = self.unpack_dict.get(knob.type, None)
if(f):
return f(key, knob)
else:
sys.stderr.write("unpackKnobs: Incorrect Knob type: {0}\n".format(knob.type))
raise exceptions.ValueError
def packKnob(self, knob):
f = self.pack_dict.get(knob.ktype, None)
if(f):
return f(knob)
else:
sys.stderr.write("packKnobs: Incorrect Knob type: {0}\n".format(knob.type))
raise exceptions.ValueError
def newConnection(self, host=None, port=None):
try:
self.thriftclient = ThriftRadioClient(self.getHost(), self.getPort())
except TTransport.TTransportException:
sys.stderr.write("Could not connect to ControlPort endpoint at {0}:{1}.\n\n".format(host, port))
sys.exit(1)
def properties(self, *args):
knobprops = self.thriftclient.radio.properties(*args)
for key, knobprop in knobprops.iteritems():
#print("key:", key, "value:", knobprop, "type:", knobprop.type)
knobprops[key].min = self.unpackKnob(key, knobprop.min)
knobprops[key].max = self.unpackKnob(key, knobprop.max)
knobprops[key].defaultvalue = self.unpackKnob(key, knobprop.defaultvalue)
return knobprops
def getKnobs(self, *args):
result = {}
for key, knob in self.thriftclient.radio.getKnobs(*args).iteritems():
#print("key:", key, "value:", knob, "type:", knob.type)
result[key] = self.unpackKnob(key, knob)
# If complex, convert to Python complex
# FIXME: better list iterator way to handle this?
if(knob.type == self.BaseTypes.C32VECTOR):
for i in xrange(len(result[key].value)):
result[key].value[i] = complex(result[key].value[i].re,
result[key].value[i].im)
return result
def getKnobsRaw(self, *args):
result = {}
for key, knob in self.thriftclient.radio.getKnobs(*args).iteritems():
#print("key:", key, "value:", knob, "type:", knob.type)
result[key] = knob
return result
def getRe(self,*args):
result = {}
for key, knob in self.thriftclient.radio.getRe(*args).iteritems():
result[key] = self.unpackKnob(key, knob)
return result
def setKnobs(self, *args):
if(type(*args) == dict):
a = dict(*args)
result = {}
for key, knob in a.iteritems():
result[key] = self.packKnob(knob)
self.thriftclient.radio.setKnobs(result)
elif(type(*args) == list or type(*args) == tuple):
a = list(*args)
result = {}
for k in a:
result[k.key] = self.packKnob(k)
self.thriftclient.radio.setKnobs(result)
else:
sys.stderr.write("setKnobs: Invalid type; must be dict, list, or tuple\n")
def shutdown(self):
self.thriftclient.radio.shutdown()
def postMessage(self, blk_alias, port, msg):
'''
blk_alias: the alias of the block we are posting the message
to; must have an open message port named 'port'.
Provide as a string.
port: The name of the message port we are sending the message to.
Provide as a string.
msg: The actual message. Provide this as a PMT of the form
right for the message port.
The alias and port names are converted to PMT symbols and
serialized. The msg is already a PMT and so just serialized.
'''
self.thriftclient.radio.postMessage(pmt.serialize_str(pmt.intern(blk_alias)),
pmt.serialize_str(pmt.intern(port)),
pmt.serialize_str(msg));
def printProperties(self, props):
info = ""
info += "Item:\t\t{0}\n".format(props.description)
info += "units:\t\t{0}\n".format(props.units)
info += "min:\t\t{0}\n".format(props.min.value)
info += "max:\t\t{0}\n".format(props.max.value)
info += "default:\t\t{0}\n".format(props.defaultvalue.value)
info += "Type Code:\t0x{0:x}\n".format(props.type)
info += "Disp Code:\t0x{0:x}\n".format(props.display)
return info
|
veger/ansible | refs/heads/devel | lib/ansible/utils/listify.py | 100 | # (c) 2014 Michael DeHaan, <michael@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.module_utils.six import string_types
from ansible.module_utils.common._collections_compat import Iterable
from ansible.template.safe_eval import safe_eval
__all__ = ['listify_lookup_plugin_terms']
def listify_lookup_plugin_terms(terms, templar, loader, fail_on_undefined=True, convert_bare=False):
if isinstance(terms, string_types):
terms = templar.template(terms.strip(), convert_bare=convert_bare, fail_on_undefined=fail_on_undefined)
else:
terms = templar.template(terms, fail_on_undefined=fail_on_undefined)
if isinstance(terms, string_types) or not isinstance(terms, Iterable):
terms = [terms]
return terms
|
nzlosh/st2 | refs/heads/master | st2api/st2api/controllers/v1/rbac.py | 3 | # Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from st2api.controllers.resource import ResourceController
from st2common.models.api.rbac import RoleAPI
from st2common.models.api.rbac import UserRoleAssignmentAPI
from st2common.persistence.rbac import Role
from st2common.rbac.types import get_resource_permission_types_with_descriptions
from st2common.persistence.rbac import UserRoleAssignment
from st2common.rbac.backends import get_rbac_backend
from st2common.router import exc
__all__ = ["RolesController", "RoleAssignmentsController", "PermissionTypesController"]
class RolesController(ResourceController):
model = RoleAPI
access = Role
supported_filters = {"name": "name", "system": "system"}
query_options = {"sort": ["name"]}
def get_one(self, name_or_id, requester_user):
rbac_utils = get_rbac_backend().get_utils_class()
rbac_utils.assert_user_is_admin(user_db=requester_user)
return self._get_one_by_name_or_id(
name_or_id=name_or_id, permission_type=None, requester_user=requester_user
)
def get_all(self, requester_user, sort=None, offset=0, limit=None, **raw_filters):
rbac_utils = get_rbac_backend().get_utils_class()
rbac_utils.assert_user_is_admin(user_db=requester_user)
return self._get_all(
sort=sort,
offset=offset,
limit=limit,
raw_filters=raw_filters,
requester_user=requester_user,
)
class RoleAssignmentsController(ResourceController):
"""
Meta controller for listing role assignments.
"""
model = UserRoleAssignmentAPI
access = UserRoleAssignment
supported_filters = {
"user": "user",
"role": "role",
"source": "source",
"remote": "is_remote",
}
def get_all(self, requester_user, sort=None, offset=0, limit=None, **raw_filters):
user = raw_filters.get("user", None)
rbac_utils = get_rbac_backend().get_utils_class()
rbac_utils.assert_user_is_admin_or_operating_on_own_resource(
user_db=requester_user, user=user
)
return self._get_all(
sort=sort,
offset=offset,
limit=limit,
raw_filters=raw_filters,
requester_user=requester_user,
)
def get_one(self, id, requester_user):
result = self._get_one_by_id(
id, requester_user=requester_user, permission_type=None
)
user = getattr(result, "user", None)
rbac_utils = get_rbac_backend().get_utils_class()
rbac_utils.assert_user_is_admin_or_operating_on_own_resource(
user_db=requester_user, user=user
)
return result
class PermissionTypesController(object):
"""
Meta controller for listing all the available permission types.
"""
def get_all(self, requester_user):
"""
List all the available permission types.
Handles requests:
GET /rbac/permission_types
"""
rbac_utils = get_rbac_backend().get_utils_class()
rbac_utils.assert_user_is_admin(user_db=requester_user)
result = get_resource_permission_types_with_descriptions()
return result
def get_one(self, resource_type, requester_user):
"""
List all the available permission types for a particular resource type.
Handles requests:
GET /rbac/permission_types/<resource type>
"""
rbac_utils = get_rbac_backend().get_utils_class()
rbac_utils.assert_user_is_admin(user_db=requester_user)
all_permission_types = get_resource_permission_types_with_descriptions()
permission_types = all_permission_types.get(resource_type, None)
if permission_types is None:
raise exc.HTTPNotFound("Invalid resource type: %s" % (resource_type))
return permission_types
roles_controller = RolesController()
role_assignments_controller = RoleAssignmentsController()
permission_types_controller = PermissionTypesController()
|
gaddman/ansible | refs/heads/devel | lib/ansible/executor/task_result.py | 40 | # Copyright: (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible import constants as C
from ansible.parsing.dataloader import DataLoader
from ansible.vars.clean import module_response_deepcopy, strip_internal_keys
_IGNORE = ('failed', 'skipped')
_PRESERVE = ('attempts', 'changed', 'retries')
_SUB_PRESERVE = {'_ansible_delegated_vars': ('ansible_host', 'ansible_port', 'ansible_user', 'ansible_connection')}
class TaskResult:
'''
This class is responsible for interpreting the resulting data
from an executed task, and provides helper methods for determining
the result of a given task.
'''
def __init__(self, host, task, return_data, task_fields=None):
self._host = host
self._task = task
if isinstance(return_data, dict):
self._result = return_data.copy()
else:
self._result = DataLoader().load(return_data)
if task_fields is None:
self._task_fields = dict()
else:
self._task_fields = task_fields
@property
def task_name(self):
return self._task_fields.get('name', None) or self._task.get_name()
def is_changed(self):
return self._check_key('changed')
def is_skipped(self):
# loop results
if 'results' in self._result:
results = self._result['results']
# Loop tasks are only considered skipped if all items were skipped.
# some squashed results (eg, yum) are not dicts and can't be skipped individually
if results and all(isinstance(res, dict) and res.get('skipped', False) for res in results):
return True
# regular tasks and squashed non-dict results
return self._result.get('skipped', False)
def is_failed(self):
if 'failed_when_result' in self._result or \
'results' in self._result and True in [True for x in self._result['results'] if 'failed_when_result' in x]:
return self._check_key('failed_when_result')
else:
return self._check_key('failed')
def is_unreachable(self):
return self._check_key('unreachable')
def needs_debugger(self, globally_enabled=False):
_debugger = self._task_fields.get('debugger')
_ignore_errors = C.TASK_DEBUGGER_IGNORE_ERRORS and self._task_fields.get('ignore_errors')
ret = False
if globally_enabled and ((self.is_failed() and not _ignore_errors) or self.is_unreachable()):
ret = True
if _debugger in ('always',):
ret = True
elif _debugger in ('never',):
ret = False
elif _debugger in ('on_failed',) and self.is_failed() and not _ignore_errors:
ret = True
elif _debugger in ('on_unreachable',) and self.is_unreachable():
ret = True
elif _debugger in('on_skipped',) and self.is_skipped():
ret = True
return ret
def _check_key(self, key):
'''get a specific key from the result or its items'''
if isinstance(self._result, dict) and key in self._result:
return self._result.get(key, False)
else:
flag = False
for res in self._result.get('results', []):
if isinstance(res, dict):
flag |= res.get(key, False)
return flag
def clean_copy(self):
''' returns 'clean' taskresult object '''
# FIXME: clean task_fields, _task and _host copies
result = TaskResult(self._host, self._task, {}, self._task_fields)
# statuses are already reflected on the event type
if result._task and result._task.action in ['debug']:
# debug is verbose by default to display vars, no need to add invocation
ignore = _IGNORE + ('invocation',)
else:
ignore = _IGNORE
if isinstance(self._task.no_log, bool) and self._task.no_log or self._result.get('_ansible_no_log', False):
x = {"censored": "the output has been hidden due to the fact that 'no_log: true' was specified for this result"}
# preserve full
for preserve in _PRESERVE:
if preserve in self._result:
x[preserve] = self._result[preserve]
# preserve subset
for sub in _SUB_PRESERVE:
if sub in self._result:
x[sub] = {}
for key in _SUB_PRESERVE[sub]:
if key in self._result[sub]:
x[sub][key] = self._result[sub][key]
result._result = x
elif self._result:
result._result = module_response_deepcopy(self._result)
# actualy remove
for remove_key in ignore:
if remove_key in result._result:
del result._result[remove_key]
# remove almost ALL internal keys, keep ones relevant to callback
strip_internal_keys(result._result, exceptions=('_ansible_verbose_always', '_ansible_item_label', '_ansible_no_log'))
return result
|
wintoncode/winton-kafka-streams | refs/heads/master | examples/binning/source.py | 1 | """Provides a wrapper to randomise whether underlying prices are generated"""
from random import Random as _Random
class Source(object):
"""\
Provides iterable class wrapping a price source and randomly produces a
price or not.
"""
def __init__(self, prob, prices, seed=123):
self.prob = prob
self.prices = prices
self._rand = _Random(seed)
def __next__(self):
return self.maybe_next_price()
def __iter__(self):
return self
def maybe_next_price(self):
"""Based on the probability, return a price or None"""
if self._rand.uniform(0.0, 1.0) <= self.prob:
return next(self.prices)
return None
|
undoware/neutron-drive | refs/heads/master | google_appengine/lib/django_1_3/tests/modeltests/reverse_lookup/__init__.py | 12133432 | |
savi-dev/quantum | refs/heads/master | quantum/plugins/nicira/__init__.py | 12133432 | |
machinaut/gym | refs/heads/master | gym/envs/tests/__init__.py | 12133432 | |
libracore/erpnext | refs/heads/v12 | erpnext/accounts/report/bank_clearance_summary/__init__.py | 12133432 | |
j0057/ansible-1 | refs/heads/fix-powershell-shebang-not-found | plugins/inventory/openshift.py | 132 | #!/usr/bin/env python
# (c) 2013, Michael Scherer <misc@zarb.org>
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
inventory: openshift
short_description: Openshift gears external inventory script
description:
- Generates inventory of Openshift gears using the REST interface
- this permit to reuse playbook to setup an Openshift gear
version_added: None
author: Michael Scherer
'''
import urllib2
try:
import json
except ImportError:
import simplejson as json
import os
import os.path
import sys
import ConfigParser
import StringIO
configparser = None
def get_from_rhc_config(variable):
global configparser
CONF_FILE = os.path.expanduser('~/.openshift/express.conf')
if os.path.exists(CONF_FILE):
if not configparser:
ini_str = '[root]\n' + open(CONF_FILE, 'r').read()
configparser = ConfigParser.SafeConfigParser()
configparser.readfp(StringIO.StringIO(ini_str))
try:
return configparser.get('root', variable)
except ConfigParser.NoOptionError:
return None
def get_config(env_var, config_var):
result = os.getenv(env_var)
if not result:
result = get_from_rhc_config(config_var)
if not result:
print "failed=True msg='missing %s'" % env_var
sys.exit(1)
return result
def get_json_from_api(url):
req = urllib2.Request(url, None, {'Accept': 'application/json; version=1.5'})
response = urllib2.urlopen(req)
return json.loads(response.read())['data']
def passwd_setup(top_level_url, username, password):
# create a password manager
password_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
password_mgr.add_password(None, top_level_url, username, password)
handler = urllib2.HTTPBasicAuthHandler(password_mgr)
opener = urllib2.build_opener(handler)
urllib2.install_opener(opener)
username = get_config('ANSIBLE_OPENSHIFT_USERNAME', 'default_rhlogin')
password = get_config('ANSIBLE_OPENSHIFT_PASSWORD', 'password')
broker_url = 'https://%s/broker/rest/' % get_config('ANSIBLE_OPENSHIFT_BROKER', 'libra_server')
passwd_setup(broker_url, username, password)
response = get_json_from_api(broker_url + '/domains')
response = get_json_from_api("%s/domains/%s/applications" %
(broker_url, response[0]['id']))
result = {}
for app in response:
# ssh://520311404832ce3e570000ff@blog-johndoe.example.org
(user, host) = app['ssh_url'][6:].split('@')
app_name = host.split('-')[0]
result[app_name] = {}
result[app_name]['hosts'] = []
result[app_name]['hosts'].append(host)
result[app_name]['vars'] = {}
result[app_name]['vars']['ansible_ssh_user'] = user
if len(sys.argv) == 2 and sys.argv[1] == '--list':
print json.dumps(result)
elif len(sys.argv) == 3 and sys.argv[1] == '--host':
print json.dumps({})
else:
print "Need an argument, either --list or --host <host>"
|
dbergan/AutobahnPython | refs/heads/master | examples/twisted/websocket/echo_httpheaders/client.py | 18 | ###############################################################################
##
## Copyright (C) 2013 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
import sys
from twisted.internet import reactor
from twisted.python import log
from autobahn.twisted.websocket import WebSocketClientFactory, \
WebSocketClientProtocol, \
connectWS
class EchoClientProtocol(WebSocketClientProtocol):
def onConnect(self, response):
print(response)
def sendHello(self):
self.sendMessage("Hello, world!".encode('utf8'))
def onOpen(self):
self.sendHello()
def onMessage(self, payload, isBinary):
if not isBinary:
print("Text message received: {}".format(payload.decode('utf8')))
reactor.callLater(1, self.sendHello)
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Need the WebSocket server address, i.e. ws://localhost:9000")
sys.exit(1)
if len(sys.argv) > 2 and sys.argv[2] == 'debug':
log.startLogging(sys.stdout)
debug = True
else:
debug = False
headers = {'MyCustomClientHeader': 'Bazbar'}
factory = WebSocketClientFactory(sys.argv[1],
headers = headers,
debug = debug,
debugCodePaths = debug)
factory.protocol = EchoClientProtocol
connectWS(factory)
reactor.run()
|
MissionCriticalCloud/marvin | refs/heads/master | marvin/cloudstackAPI/createInstanceGroup.py | 1 | """Creates a vm group"""
from baseCmd import *
from baseResponse import *
class createInstanceGroupCmd (baseCmd):
typeInfo = {}
def __init__(self):
self.isAsync = "false"
"""the name of the instance group"""
"""Required"""
self.name = None
self.typeInfo['name'] = 'string'
"""the account of the instance group. The account parameter must be used with the domainId parameter."""
self.account = None
self.typeInfo['account'] = 'string'
"""the domain ID of account owning the instance group"""
self.domainid = None
self.typeInfo['domainid'] = 'uuid'
"""The project of the instance group"""
self.projectid = None
self.typeInfo['projectid'] = 'uuid'
self.required = ["name", ]
class createInstanceGroupResponse (baseResponse):
typeInfo = {}
def __init__(self):
"""the ID of the instance group"""
self.id = None
self.typeInfo['id'] = 'string'
"""the account owning the instance group"""
self.account = None
self.typeInfo['account'] = 'string'
"""time and date the instance group was created"""
self.created = None
self.typeInfo['created'] = 'date'
"""the domain name of the instance group"""
self.domain = None
self.typeInfo['domain'] = 'string'
"""the domain ID of the instance group"""
self.domainid = None
self.typeInfo['domainid'] = 'string'
"""the name of the instance group"""
self.name = None
self.typeInfo['name'] = 'string'
"""the project name of the instance group"""
self.project = None
self.typeInfo['project'] = 'string'
"""the project ID of the instance group"""
self.projectid = None
self.typeInfo['projectid'] = 'string'
|
shangvven/Wox | refs/heads/master | PythonHome/Lib/site-packages/pip/_vendor/html5lib/filters/sanitizer.py | 1734 | from __future__ import absolute_import, division, unicode_literals
from . import _base
from ..sanitizer import HTMLSanitizerMixin
class Filter(_base.Filter, HTMLSanitizerMixin):
def __iter__(self):
for token in _base.Filter.__iter__(self):
token = self.sanitize_token(token)
if token:
yield token
|
yxl/emscripten-calligra-mobile | refs/heads/calligra/2.8 | 3rdparty/google-breakpad/src/tools/gyp/test/generator-output/gyptest-subdir2-deep.py | 74 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies building a target from a .gyp file a few subdirectories
deep when the --generator-output= option is used to put the build
configuration files in a separate directory tree.
"""
import TestGyp
# Ninja and Android don't support --generator-output.
test = TestGyp.TestGyp(formats=['!ninja', '!android'])
test.writable(test.workpath('src'), False)
test.writable(test.workpath('src/subdir2/deeper/build'), True)
test.run_gyp('deeper.gyp',
'-Dset_symroot=1',
'--generator-output=' + test.workpath('gypfiles'),
chdir='src/subdir2/deeper')
test.build('deeper.gyp', test.ALL, chdir='gypfiles')
chdir = 'gypfiles'
if test.format == 'xcode':
chdir = 'src/subdir2/deeper'
test.run_built_executable('deeper',
chdir=chdir,
stdout="Hello from deeper.c\n")
test.pass_test()
|
LokiCoder/Sick-Beard | refs/heads/torrent_1080_subtitles | sickbeard/naming.py | 4 | # Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
import datetime
import os
import sickbeard
from sickbeard import encodingKludge as ek
from sickbeard import tv
from sickbeard import common
from sickbeard import logger
from sickbeard.name_parser.parser import NameParser, InvalidNameException
from common import Quality, DOWNLOADED
name_presets = ('%SN - %Sx%0E - %EN',
'%S.N.S%0SE%0E.%E.N',
'%Sx%0E - %EN',
'S%0SE%0E - %EN',
'Season %0S/%S.N.S%0SE%0E.%Q.N-%RG'
)
name_abd_presets = ('%SN - %A-D - %EN',
'%S.N.%A.D.%E.N.%Q.N',
'%Y/%0M/%S.N.%A.D.%E.N-%RG'
)
class TVShow():
def __init__(self):
self.name = "Show Name"
self.genre = "Comedy"
self.air_by_date = 0
class TVEpisode(tv.TVEpisode):
def __init__(self, season, episode, name):
self.relatedEps = []
self._name = name
self._season = season
self._episode = episode
self._airdate = datetime.date(2010, 3, 9)
self.show = TVShow()
self._status = Quality.compositeStatus(common.DOWNLOADED, common.Quality.SDTV)
self._release_name = 'Show.Name.S02E03.HDTV.XviD-RLSGROUP'
def check_force_season_folders(pattern=None, multi=None):
"""
Checks if the name can still be parsed if you strip off the folders to determine if we need to force season folders
to be enabled or not.
Returns true if season folders need to be forced on or false otherwise.
"""
if pattern is None:
pattern = sickbeard.NAMING_PATTERN
valid = not validate_name(pattern, None, file_only=True)
if multi is not None:
valid = valid or not validate_name(pattern, multi, file_only=True)
return valid
def check_valid_naming(pattern=None, multi=None):
"""
Checks if the name is can be parsed back to its original form for both single and multi episodes.
Returns true if the naming is valid, false if not.
"""
if pattern is None:
pattern = sickbeard.NAMING_PATTERN
logger.log(u"Checking whether the pattern " + pattern + " is valid for a single episode", logger.DEBUG)
valid = validate_name(pattern, None)
if multi is not None:
logger.log(u"Checking whether the pattern " + pattern + " is valid for a multi episode", logger.DEBUG)
valid = valid and validate_name(pattern, multi)
return valid
def check_valid_abd_naming(pattern=None):
"""
Checks if the name is can be parsed back to its original form for an air-by-date format.
Returns true if the naming is valid, false if not.
"""
if pattern is None:
pattern = sickbeard.NAMING_PATTERN
logger.log(u"Checking whether the pattern " + pattern + " is valid for an air-by-date episode", logger.DEBUG)
valid = validate_name(pattern, abd=True)
return valid
def validate_name(pattern, multi=None, file_only=False, abd=False):
ep = _generate_sample_ep(multi, abd)
parser = NameParser(True)
new_name = ep.formatted_filename(pattern, multi) + '.ext'
new_path = ep.formatted_dir(pattern, multi)
if not file_only:
new_name = ek.ek(os.path.join, new_path, new_name)
if not new_name:
logger.log(u"Unable to create a name out of " + pattern, logger.DEBUG)
return False
logger.log(u"Trying to parse " + new_name, logger.DEBUG)
try:
result = parser.parse(new_name)
except InvalidNameException:
logger.log(u"Unable to parse " + new_name + ", not valid", logger.DEBUG)
return False
logger.log(u"The name " + new_name + " parsed into " + str(result), logger.DEBUG)
if abd:
if result.air_date != ep.airdate:
logger.log(u"Air date incorrect in parsed episode, pattern isn't valid", logger.DEBUG)
return False
else:
if result.season_number != ep.season:
logger.log(u"Season incorrect in parsed episode, pattern isn't valid", logger.DEBUG)
return False
if result.episode_numbers != [x.episode for x in [ep] + ep.relatedEps]:
logger.log(u"Episode incorrect in parsed episode, pattern isn't valid", logger.DEBUG)
return False
return True
def _generate_sample_ep(multi=None, abd=False):
# make a fake episode object
ep = TVEpisode(2, 3, "Ep Name")
ep._status = Quality.compositeStatus(DOWNLOADED, Quality.HDTV)
ep._airdate = datetime.date(2011, 3, 9)
if abd:
ep._release_name = 'Show.Name.2011.03.09.HDTV.XviD-RLSGROUP'
else:
ep._release_name = 'Show.Name.S02E03.HDTV.XviD-RLSGROUP'
if multi is not None:
ep._name = "Ep Name (1)"
ep._release_name = 'Show.Name.S02E03E04E05.HDTV.XviD-RLSGROUP'
secondEp = TVEpisode(2, 4, "Ep Name (2)")
secondEp._status = Quality.compositeStatus(DOWNLOADED, Quality.HDTV)
secondEp._release_name = ep._release_name
thirdEp = TVEpisode(2, 5, "Ep Name (3)")
thirdEp._status = Quality.compositeStatus(DOWNLOADED, Quality.HDTV)
thirdEp._release_name = ep._release_name
ep.relatedEps.append(secondEp)
ep.relatedEps.append(thirdEp)
return ep
def test_name(pattern, multi=None, abd=False):
ep = _generate_sample_ep(multi, abd)
return {'name': ep.formatted_filename(pattern, multi), 'dir': ep.formatted_dir(pattern, multi)}
|
tashaxe/Red-DiscordBot | refs/heads/develop | lib/pip/_vendor/requests/packages/chardet/chardetect.py | 1785 | #!/usr/bin/env python
"""
Script which takes one or more file paths and reports on their detected
encodings
Example::
% chardetect somefile someotherfile
somefile: windows-1252 with confidence 0.5
someotherfile: ascii with confidence 1.0
If no paths are provided, it takes its input from stdin.
"""
from __future__ import absolute_import, print_function, unicode_literals
import argparse
import sys
from io import open
from chardet import __version__
from chardet.universaldetector import UniversalDetector
def description_of(lines, name='stdin'):
"""
Return a string describing the probable encoding of a file or
list of strings.
:param lines: The lines to get the encoding of.
:type lines: Iterable of bytes
:param name: Name of file or collection of lines
:type name: str
"""
u = UniversalDetector()
for line in lines:
u.feed(line)
u.close()
result = u.result
if result['encoding']:
return '{0}: {1} with confidence {2}'.format(name, result['encoding'],
result['confidence'])
else:
return '{0}: no result'.format(name)
def main(argv=None):
'''
Handles command line arguments and gets things started.
:param argv: List of arguments, as if specified on the command-line.
If None, ``sys.argv[1:]`` is used instead.
:type argv: list of str
'''
# Get command line arguments
parser = argparse.ArgumentParser(
description="Takes one or more file paths and reports their detected \
encodings",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
conflict_handler='resolve')
parser.add_argument('input',
help='File whose encoding we would like to determine.',
type=argparse.FileType('rb'), nargs='*',
default=[sys.stdin])
parser.add_argument('--version', action='version',
version='%(prog)s {0}'.format(__version__))
args = parser.parse_args(argv)
for f in args.input:
if f.isatty():
print("You are running chardetect interactively. Press " +
"CTRL-D twice at the start of a blank line to signal the " +
"end of your input. If you want help, run chardetect " +
"--help\n", file=sys.stderr)
print(description_of(f, f.name))
if __name__ == '__main__':
main()
|
shortlab/mamba | refs/heads/master | problems/Miaomiao/MultiApp_2Phase_chem/InterpolationTransfer/parametric_chimneydensity_typicalPWR/MyScript_vaporheight.py | 12 | #!/usr/bin/env python
import csv, sys
from collections import defaultdict
from numpy import *
#GOAL: based on vapor_height value from sub_5th_noTransfer.csv, we would change the vapor part dimension and corresponding
thickness=0.025 #crud thicknesss
columns1 = defaultdict(list) # each value in each column is appended to a list
columns2 = defaultdict(list) # each value in each column is appended to a list
with open('subsub_5th_typical.csv') as f:
reader = csv.DictReader(f) # read rows into a dictionary format
for row in reader: # read a row as {column1: value1, column2: value2,...}
for (k,v) in row.items(): # go over each column name and value
columns1[k].append(v) # append the value into the appropriate list
# based on column name k
with open('sub_5th_typical.csv') as f:
reader = csv.DictReader(f) # read rows into a dictionary format
for row in reader: # read a row as {column1: value1, column2: value2,...}
for (k,v) in row.items(): # go over each column name and value
columns2[k].append(v) # append the value into the appropriate list
# based on column name k
thickness1=[float(columns1['vapor_height'][-3]),float(columns1['vapor_height'][-2]),float(columns1['vapor_height'][-1])]
vapor_height1=mean(thickness1)
thickness2=[float(columns2['vapor_height'][-3]),float(columns2['vapor_height'][-2]),float(columns2['vapor_height'][-1])]
vapor_height2=mean(thickness2)
print(vapor_height2)
|
pinkavaj/gnuradio | refs/heads/master | gr-digital/examples/ofdm/benchmark_rx.py | 54 | #!/usr/bin/env python
#
# Copyright 2006,2007,2011,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr
from gnuradio import eng_notation
from gnuradio.eng_option import eng_option
from optparse import OptionParser
from gnuradio import blocks
from gnuradio import digital
# from current dir
from receive_path import receive_path
from uhd_interface import uhd_receiver
import struct, sys
class my_top_block(gr.top_block):
def __init__(self, callback, options):
gr.top_block.__init__(self)
if(options.rx_freq is not None):
self.source = uhd_receiver(options.args,
options.bandwidth, options.rx_freq,
options.lo_offset, options.rx_gain,
options.spec, options.antenna,
options.clock_source, options.verbose)
elif(options.from_file is not None):
self.source = blocks.file_source(gr.sizeof_gr_complex, options.from_file)
else:
self.source = blocks.null_source(gr.sizeof_gr_complex)
# Set up receive path
# do this after for any adjustments to the options that may
# occur in the sinks (specifically the UHD sink)
self.rxpath = receive_path(callback, options)
self.connect(self.source, self.rxpath)
# /////////////////////////////////////////////////////////////////////////////
# main
# /////////////////////////////////////////////////////////////////////////////
def main():
global n_rcvd, n_right
n_rcvd = 0
n_right = 0
def rx_callback(ok, payload):
global n_rcvd, n_right
n_rcvd += 1
(pktno,) = struct.unpack('!H', payload[0:2])
if ok:
n_right += 1
print "ok: %r \t pktno: %d \t n_rcvd: %d \t n_right: %d" % (ok, pktno, n_rcvd, n_right)
if 0:
printlst = list()
for x in payload[2:]:
t = hex(ord(x)).replace('0x', '')
if(len(t) == 1):
t = '0' + t
printlst.append(t)
printable = ''.join(printlst)
print printable
print "\n"
parser = OptionParser(option_class=eng_option, conflict_handler="resolve")
expert_grp = parser.add_option_group("Expert")
parser.add_option("","--discontinuous", action="store_true", default=False,
help="enable discontinuous")
parser.add_option("","--from-file", default=None,
help="input file of samples to demod")
receive_path.add_options(parser, expert_grp)
uhd_receiver.add_options(parser)
digital.ofdm_demod.add_options(parser, expert_grp)
(options, args) = parser.parse_args ()
if options.from_file is None:
if options.rx_freq is None:
sys.stderr.write("You must specify -f FREQ or --freq FREQ\n")
parser.print_help(sys.stderr)
sys.exit(1)
# build the graph
tb = my_top_block(rx_callback, options)
r = gr.enable_realtime_scheduling()
if r != gr.RT_OK:
print "Warning: failed to enable realtime scheduling"
tb.start() # start flow graph
tb.wait() # wait for it to finish
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
pass
|
lindycoder/netman | refs/heads/master | tests/adapters/switches/__init__.py | 12133432 | |
elkingtonmcb/h2o-2 | refs/heads/master | h2o-docs-theme/demo_docs/source/test_py_module/__init__.py | 12133432 | |
chrishas35/django-travis-ci | refs/heads/master | tests/regressiontests/generic_views/__init__.py | 12133432 | |
tkaitchuck/nupic | refs/heads/master | external/linux64/lib/python2.6/site-packages/PIL/ImageDraw.py | 13 | #
# The Python Imaging Library
# $Id: ImageDraw.py 2817 2006-10-07 15:34:03Z fredrik $
#
# drawing interface operations
#
# History:
# 1996-04-13 fl Created (experimental)
# 1996-08-07 fl Filled polygons, ellipses.
# 1996-08-13 fl Added text support
# 1998-06-28 fl Handle I and F images
# 1998-12-29 fl Added arc; use arc primitive to draw ellipses
# 1999-01-10 fl Added shape stuff (experimental)
# 1999-02-06 fl Added bitmap support
# 1999-02-11 fl Changed all primitives to take options
# 1999-02-20 fl Fixed backwards compatibility
# 2000-10-12 fl Copy on write, when necessary
# 2001-02-18 fl Use default ink for bitmap/text also in fill mode
# 2002-10-24 fl Added support for CSS-style color strings
# 2002-12-10 fl Added experimental support for RGBA-on-RGB drawing
# 2002-12-11 fl Refactored low-level drawing API (work in progress)
# 2004-08-26 fl Made Draw() a factory function, added getdraw() support
# 2004-09-04 fl Added width support to line primitive
# 2004-09-10 fl Added font mode handling
# 2006-06-19 fl Added font bearing support (getmask2)
#
# Copyright (c) 1997-2006 by Secret Labs AB
# Copyright (c) 1996-2006 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
import Image, ImageColor
try:
import warnings
except ImportError:
warnings = None
##
# A simple 2D drawing interface for PIL images.
# <p>
# Application code should use the <b>Draw</b> factory, instead of
# directly.
class ImageDraw:
##
# Create a drawing instance.
#
# @param im The image to draw in.
# @param mode Optional mode to use for color values. For RGB
# images, this argument can be RGB or RGBA (to blend the
# drawing into the image). For all other modes, this argument
# must be the same as the image mode. If omitted, the mode
# defaults to the mode of the image.
def __init__(self, im, mode=None):
im.load()
if im.readonly:
im._copy() # make it writable
blend = 0
if mode is None:
mode = im.mode
if mode != im.mode:
if mode == "RGBA" and im.mode == "RGB":
blend = 1
else:
raise ValueError("mode mismatch")
if mode == "P":
self.palette = im.palette
else:
self.palette = None
self.im = im.im
self.draw = Image.core.draw(self.im, blend)
self.mode = mode
if mode in ("I", "F"):
self.ink = self.draw.draw_ink(1, mode)
else:
self.ink = self.draw.draw_ink(-1, mode)
if mode in ("1", "P", "I", "F"):
# FIXME: fix Fill2 to properly support matte for I+F images
self.fontmode = "1"
else:
self.fontmode = "L" # aliasing is okay for other modes
self.fill = 0
self.font = None
##
# Set the default pen color.
def setink(self, ink):
# compatibility
if warnings:
warnings.warn(
"'setink' is deprecated; use keyword arguments instead",
DeprecationWarning, stacklevel=2
)
if Image.isStringType(ink):
ink = ImageColor.getcolor(ink, self.mode)
if self.palette and not Image.isNumberType(ink):
ink = self.palette.getcolor(ink)
self.ink = self.draw.draw_ink(ink, self.mode)
##
# Set the default background color.
def setfill(self, onoff):
# compatibility
if warnings:
warnings.warn(
"'setfill' is deprecated; use keyword arguments instead",
DeprecationWarning, stacklevel=2
)
self.fill = onoff
##
# Set the default font.
def setfont(self, font):
# compatibility
self.font = font
##
# Get the current default font.
def getfont(self):
if not self.font:
# FIXME: should add a font repository
import ImageFont
self.font = ImageFont.load_default()
return self.font
def _getink(self, ink, fill=None):
if ink is None and fill is None:
if self.fill:
fill = self.ink
else:
ink = self.ink
else:
if ink is not None:
if Image.isStringType(ink):
ink = ImageColor.getcolor(ink, self.mode)
if self.palette and not Image.isNumberType(ink):
ink = self.palette.getcolor(ink)
ink = self.draw.draw_ink(ink, self.mode)
if fill is not None:
if Image.isStringType(fill):
fill = ImageColor.getcolor(fill, self.mode)
if self.palette and not Image.isNumberType(fill):
fill = self.palette.getcolor(fill)
fill = self.draw.draw_ink(fill, self.mode)
return ink, fill
##
# Draw an arc.
def arc(self, xy, start, end, fill=None):
ink, fill = self._getink(fill)
if ink is not None:
self.draw.draw_arc(xy, start, end, ink)
##
# Draw a bitmap.
def bitmap(self, xy, bitmap, fill=None):
bitmap.load()
ink, fill = self._getink(fill)
if ink is None:
ink = fill
if ink is not None:
self.draw.draw_bitmap(xy, bitmap.im, ink)
##
# Draw a chord.
def chord(self, xy, start, end, fill=None, outline=None):
ink, fill = self._getink(outline, fill)
if fill is not None:
self.draw.draw_chord(xy, start, end, fill, 1)
if ink is not None:
self.draw.draw_chord(xy, start, end, ink, 0)
##
# Draw an ellipse.
def ellipse(self, xy, fill=None, outline=None):
ink, fill = self._getink(outline, fill)
if fill is not None:
self.draw.draw_ellipse(xy, fill, 1)
if ink is not None:
self.draw.draw_ellipse(xy, ink, 0)
##
# Draw a line, or a connected sequence of line segments.
def line(self, xy, fill=None, width=0):
ink, fill = self._getink(fill)
if ink is not None:
self.draw.draw_lines(xy, ink, width)
##
# (Experimental) Draw a shape.
def shape(self, shape, fill=None, outline=None):
# experimental
shape.close()
ink, fill = self._getink(outline, fill)
if fill is not None:
self.draw.draw_outline(shape, fill, 1)
if ink is not None:
self.draw.draw_outline(shape, ink, 0)
##
# Draw a pieslice.
def pieslice(self, xy, start, end, fill=None, outline=None):
ink, fill = self._getink(outline, fill)
if fill is not None:
self.draw.draw_pieslice(xy, start, end, fill, 1)
if ink is not None:
self.draw.draw_pieslice(xy, start, end, ink, 0)
##
# Draw one or more individual pixels.
def point(self, xy, fill=None):
ink, fill = self._getink(fill)
if ink is not None:
self.draw.draw_points(xy, ink)
##
# Draw a polygon.
def polygon(self, xy, fill=None, outline=None):
ink, fill = self._getink(outline, fill)
if fill is not None:
self.draw.draw_polygon(xy, fill, 1)
if ink is not None:
self.draw.draw_polygon(xy, ink, 0)
##
# Draw a rectangle.
def rectangle(self, xy, fill=None, outline=None):
ink, fill = self._getink(outline, fill)
if fill is not None:
self.draw.draw_rectangle(xy, fill, 1)
if ink is not None:
self.draw.draw_rectangle(xy, ink, 0)
##
# Draw text.
def text(self, xy, text, fill=None, font=None, anchor=None):
ink, fill = self._getink(fill)
if font is None:
font = self.getfont()
if ink is None:
ink = fill
if ink is not None:
try:
mask, offset = font.getmask2(text, self.fontmode)
xy = xy[0] + offset[0], xy[1] + offset[1]
except AttributeError:
try:
mask = font.getmask(text, self.fontmode)
except TypeError:
mask = font.getmask(text)
self.draw.draw_bitmap(xy, mask, ink)
##
# Get the size of a given string, in pixels.
def textsize(self, text, font=None):
if font is None:
font = self.getfont()
return font.getsize(text)
##
# A simple 2D drawing interface for PIL images.
#
# @param im The image to draw in.
# @param mode Optional mode to use for color values. For RGB
# images, this argument can be RGB or RGBA (to blend the
# drawing into the image). For all other modes, this argument
# must be the same as the image mode. If omitted, the mode
# defaults to the mode of the image.
def Draw(im, mode=None):
try:
return im.getdraw(mode)
except AttributeError:
return ImageDraw(im, mode)
# experimental access to the outline API
try:
Outline = Image.core.outline
except:
Outline = None
##
# (Experimental) A more advanced 2D drawing interface for PIL images,
# based on the WCK interface.
#
# @param im The image to draw in.
# @param hints An optional list of hints.
# @return A (drawing context, drawing resource factory) tuple.
def getdraw(im=None, hints=None):
# FIXME: this needs more work!
# FIXME: come up with a better 'hints' scheme.
handler = None
if not hints or "nicest" in hints:
try:
import _imagingagg
handler = _imagingagg
except ImportError:
pass
if handler is None:
import ImageDraw2
handler = ImageDraw2
if im:
im = handler.Draw(im)
return im, handler
##
# (experimental) Fills a bounded region with a given color.
#
# @param image Target image.
# @param xy Seed position (a 2-item coordinate tuple).
# @param value Fill color.
# @param border Optional border value. If given, the region consists of
# pixels with a color different from the border color. If not given,
# the region consists of pixels having the same color as the seed
# pixel.
def floodfill(image, xy, value, border=None):
"Fill bounded region."
# based on an implementation by Eric S. Raymond
pixel = image.load()
x, y = xy
try:
background = pixel[x, y]
if background == value:
return # seed point already has fill color
pixel[x, y] = value
except IndexError:
return # seed point outside image
edge = [(x, y)]
if border is None:
while edge:
newedge = []
for (x, y) in edge:
for (s, t) in ((x+1, y), (x-1, y), (x, y+1), (x, y-1)):
try:
p = pixel[s, t]
except IndexError:
pass
else:
if p == background:
pixel[s, t] = value
newedge.append((s, t))
edge = newedge
else:
while edge:
newedge = []
for (x, y) in edge:
for (s, t) in ((x+1, y), (x-1, y), (x, y+1), (x, y-1)):
try:
p = pixel[s, t]
except IndexError:
pass
else:
if p != value and p != border:
pixel[s, t] = value
newedge.append((s, t))
edge = newedge
|
alvin319/CarnotKE | refs/heads/master | jyhton/lib-python/2.7/weakref.py | 187 | """Weak reference support for Python.
This module is an implementation of PEP 205:
http://www.python.org/dev/peps/pep-0205/
"""
# Naming convention: Variables named "wr" are weak reference objects;
# they are called this instead of "ref" to avoid name collisions with
# the module-global ref() function imported from _weakref.
import UserDict
from _weakref import (
getweakrefcount,
getweakrefs,
ref,
proxy,
CallableProxyType,
ProxyType,
ReferenceType)
from _weakrefset import WeakSet
from exceptions import ReferenceError
ProxyTypes = (ProxyType, CallableProxyType)
__all__ = ["ref", "proxy", "getweakrefcount", "getweakrefs",
"WeakKeyDictionary", "ReferenceError", "ReferenceType", "ProxyType",
"CallableProxyType", "ProxyTypes", "WeakValueDictionary", 'WeakSet']
class WeakValueDictionary(UserDict.UserDict):
"""Mapping class that references values weakly.
Entries in the dictionary will be discarded when no strong
reference to the value exists anymore
"""
# We inherit the constructor without worrying about the input
# dictionary; since it uses our .update() method, we get the right
# checks (if the other dictionary is a WeakValueDictionary,
# objects are unwrapped on the way out, and we always wrap on the
# way in).
def __init__(self, *args, **kw):
def remove(wr, selfref=ref(self)):
self = selfref()
if self is not None:
del self.data[wr.key]
self._remove = remove
UserDict.UserDict.__init__(self, *args, **kw)
def __getitem__(self, key):
o = self.data[key]()
if o is None:
raise KeyError, key
else:
return o
def __contains__(self, key):
try:
o = self.data[key]()
except KeyError:
return False
return o is not None
def has_key(self, key):
try:
o = self.data[key]()
except KeyError:
return False
return o is not None
def __repr__(self):
return "<WeakValueDictionary at %s>" % id(self)
def __setitem__(self, key, value):
self.data[key] = KeyedRef(value, self._remove, key)
def copy(self):
new = WeakValueDictionary()
for key, wr in self.data.items():
o = wr()
if o is not None:
new[key] = o
return new
__copy__ = copy
def __deepcopy__(self, memo):
from copy import deepcopy
new = self.__class__()
for key, wr in self.data.items():
o = wr()
if o is not None:
new[deepcopy(key, memo)] = o
return new
def get(self, key, default=None):
try:
wr = self.data[key]
except KeyError:
return default
else:
o = wr()
if o is None:
# This should only happen
return default
else:
return o
def items(self):
L = []
for key, wr in self.data.items():
o = wr()
if o is not None:
L.append((key, o))
return L
def iteritems(self):
for wr in self.data.itervalues():
value = wr()
if value is not None:
yield wr.key, value
def iterkeys(self):
return self.data.iterkeys()
def __iter__(self):
return self.data.iterkeys()
def itervaluerefs(self):
"""Return an iterator that yields the weak references to the values.
The references are not guaranteed to be 'live' at the time
they are used, so the result of calling the references needs
to be checked before being used. This can be used to avoid
creating references that will cause the garbage collector to
keep the values around longer than needed.
"""
return self.data.itervalues()
def itervalues(self):
for wr in self.data.itervalues():
obj = wr()
if obj is not None:
yield obj
def popitem(self):
while 1:
key, wr = self.data.popitem()
o = wr()
if o is not None:
return key, o
def pop(self, key, *args):
try:
o = self.data.pop(key)()
except KeyError:
if args:
return args[0]
raise
if o is None:
raise KeyError, key
else:
return o
def setdefault(self, key, default=None):
try:
wr = self.data[key]
except KeyError:
self.data[key] = KeyedRef(default, self._remove, key)
return default
else:
return wr()
def update(self, dict=None, **kwargs):
d = self.data
if dict is not None:
if not hasattr(dict, "items"):
dict = type({})(dict)
for key, o in dict.items():
d[key] = KeyedRef(o, self._remove, key)
if len(kwargs):
self.update(kwargs)
def valuerefs(self):
"""Return a list of weak references to the values.
The references are not guaranteed to be 'live' at the time
they are used, so the result of calling the references needs
to be checked before being used. This can be used to avoid
creating references that will cause the garbage collector to
keep the values around longer than needed.
"""
return self.data.values()
def values(self):
L = []
for wr in self.data.values():
o = wr()
if o is not None:
L.append(o)
return L
class KeyedRef(ref):
"""Specialized reference that includes a key corresponding to the value.
This is used in the WeakValueDictionary to avoid having to create
a function object for each key stored in the mapping. A shared
callback object can use the 'key' attribute of a KeyedRef instead
of getting a reference to the key from an enclosing scope.
"""
__slots__ = "key",
def __new__(type, ob, callback, key):
self = ref.__new__(type, ob, callback)
self.key = key
return self
def __init__(self, ob, callback, key):
super(KeyedRef, self).__init__(ob, callback)
class WeakKeyDictionary(UserDict.UserDict):
""" Mapping class that references keys weakly.
Entries in the dictionary will be discarded when there is no
longer a strong reference to the key. This can be used to
associate additional data with an object owned by other parts of
an application without adding attributes to those objects. This
can be especially useful with objects that override attribute
accesses.
"""
def __init__(self, dict=None):
self.data = {}
def remove(k, selfref=ref(self)):
self = selfref()
if self is not None:
del self.data[k]
self._remove = remove
if dict is not None: self.update(dict)
def __delitem__(self, key):
del self.data[ref(key)]
def __getitem__(self, key):
return self.data[ref(key)]
def __repr__(self):
return "<WeakKeyDictionary at %s>" % id(self)
def __setitem__(self, key, value):
self.data[ref(key, self._remove)] = value
def copy(self):
new = WeakKeyDictionary()
for key, value in self.data.items():
o = key()
if o is not None:
new[o] = value
return new
__copy__ = copy
def __deepcopy__(self, memo):
from copy import deepcopy
new = self.__class__()
for key, value in self.data.items():
o = key()
if o is not None:
new[o] = deepcopy(value, memo)
return new
def get(self, key, default=None):
return self.data.get(ref(key),default)
def has_key(self, key):
try:
wr = ref(key)
except TypeError:
return 0
return wr in self.data
def __contains__(self, key):
try:
wr = ref(key)
except TypeError:
return 0
return wr in self.data
def items(self):
L = []
for key, value in self.data.items():
o = key()
if o is not None:
L.append((o, value))
return L
def iteritems(self):
for wr, value in self.data.iteritems():
key = wr()
if key is not None:
yield key, value
def iterkeyrefs(self):
"""Return an iterator that yields the weak references to the keys.
The references are not guaranteed to be 'live' at the time
they are used, so the result of calling the references needs
to be checked before being used. This can be used to avoid
creating references that will cause the garbage collector to
keep the keys around longer than needed.
"""
return self.data.iterkeys()
def iterkeys(self):
for wr in self.data.iterkeys():
obj = wr()
if obj is not None:
yield obj
def __iter__(self):
return self.iterkeys()
def itervalues(self):
return self.data.itervalues()
def keyrefs(self):
"""Return a list of weak references to the keys.
The references are not guaranteed to be 'live' at the time
they are used, so the result of calling the references needs
to be checked before being used. This can be used to avoid
creating references that will cause the garbage collector to
keep the keys around longer than needed.
"""
return self.data.keys()
def keys(self):
L = []
for wr in self.data.keys():
o = wr()
if o is not None:
L.append(o)
return L
def popitem(self):
while 1:
key, value = self.data.popitem()
o = key()
if o is not None:
return o, value
def pop(self, key, *args):
return self.data.pop(ref(key), *args)
def setdefault(self, key, default=None):
return self.data.setdefault(ref(key, self._remove),default)
def update(self, dict=None, **kwargs):
d = self.data
if dict is not None:
if not hasattr(dict, "items"):
dict = type({})(dict)
for key, value in dict.items():
d[ref(key, self._remove)] = value
if len(kwargs):
self.update(kwargs)
|
yongju-hong/thrift | refs/heads/master | test/crossrunner/util.py | 17 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import copy
def domain_socket_path(port):
return '/tmp/ThriftTest.thrift.%d' % port
def merge_dict(base, update):
"""Update dict concatenating list values"""
res = copy.deepcopy(base)
for k, v in list(update.items()):
if k in list(res.keys()) and isinstance(v, list):
res[k].extend(v)
else:
res[k] = v
return res
|
IanLewis/podhmo_loader | refs/heads/master | example/manage.py | 480 | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "example.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
vmlaker/mpipe | refs/heads/master | test/count_nullops.py | 1 | from datetime import datetime
from builtins import range
def get_num_null_ops(duration, max_sample=1.0):
"""Return number of do-nothing loop iterations."""
for amount in [2**x for x in range(100)]: # 1,2,4,8,...
begin = datetime.now()
for ii in range(amount):
pass
elapsed = (datetime.now() - begin).total_seconds()
if elapsed > max_sample:
break
return int(amount/elapsed*duration)
if __name__ == '__main__':
print(get_num_null_ops(1.0))
|
JorgeCoock/django | refs/heads/master | tests/proxy_model_inheritance/app2/__init__.py | 12133432 | |
uTest/Androguard | refs/heads/master | androguard/__init__.py | 12133432 | |
jazcollins/models | refs/heads/master | object_detection/utils/__init__.py | 12133432 | |
ampax/edx-platform-backup | refs/heads/live | cms/djangoapps/xblock_config/__init__.py | 12133432 | |
chongtianfeiyu/kbengine | refs/heads/master | kbe/src/lib/python/Lib/asyncio/futures.py | 63 | """A Future class similar to the one in PEP 3148."""
__all__ = ['CancelledError', 'TimeoutError',
'InvalidStateError',
'Future', 'wrap_future',
]
import concurrent.futures._base
import logging
import reprlib
import sys
import traceback
from . import events
# States for Future.
_PENDING = 'PENDING'
_CANCELLED = 'CANCELLED'
_FINISHED = 'FINISHED'
_PY34 = sys.version_info >= (3, 4)
# TODO: Do we really want to depend on concurrent.futures internals?
Error = concurrent.futures._base.Error
CancelledError = concurrent.futures.CancelledError
TimeoutError = concurrent.futures.TimeoutError
STACK_DEBUG = logging.DEBUG - 1 # heavy-duty debugging
class InvalidStateError(Error):
"""The operation is not allowed in this state."""
# TODO: Show the future, its state, the method, and the required state.
class _TracebackLogger:
"""Helper to log a traceback upon destruction if not cleared.
This solves a nasty problem with Futures and Tasks that have an
exception set: if nobody asks for the exception, the exception is
never logged. This violates the Zen of Python: 'Errors should
never pass silently. Unless explicitly silenced.'
However, we don't want to log the exception as soon as
set_exception() is called: if the calling code is written
properly, it will get the exception and handle it properly. But
we *do* want to log it if result() or exception() was never called
-- otherwise developers waste a lot of time wondering why their
buggy code fails silently.
An earlier attempt added a __del__() method to the Future class
itself, but this backfired because the presence of __del__()
prevents garbage collection from breaking cycles. A way out of
this catch-22 is to avoid having a __del__() method on the Future
class itself, but instead to have a reference to a helper object
with a __del__() method that logs the traceback, where we ensure
that the helper object doesn't participate in cycles, and only the
Future has a reference to it.
The helper object is added when set_exception() is called. When
the Future is collected, and the helper is present, the helper
object is also collected, and its __del__() method will log the
traceback. When the Future's result() or exception() method is
called (and a helper object is present), it removes the the helper
object, after calling its clear() method to prevent it from
logging.
One downside is that we do a fair amount of work to extract the
traceback from the exception, even when it is never logged. It
would seem cheaper to just store the exception object, but that
references the traceback, which references stack frames, which may
reference the Future, which references the _TracebackLogger, and
then the _TracebackLogger would be included in a cycle, which is
what we're trying to avoid! As an optimization, we don't
immediately format the exception; we only do the work when
activate() is called, which call is delayed until after all the
Future's callbacks have run. Since usually a Future has at least
one callback (typically set by 'yield from') and usually that
callback extracts the callback, thereby removing the need to
format the exception.
PS. I don't claim credit for this solution. I first heard of it
in a discussion about closing files when they are collected.
"""
__slots__ = ('loop', 'source_traceback', 'exc', 'tb')
def __init__(self, future, exc):
self.loop = future._loop
self.source_traceback = future._source_traceback
self.exc = exc
self.tb = None
def activate(self):
exc = self.exc
if exc is not None:
self.exc = None
self.tb = traceback.format_exception(exc.__class__, exc,
exc.__traceback__)
def clear(self):
self.exc = None
self.tb = None
def __del__(self):
if self.tb:
msg = 'Future/Task exception was never retrieved'
if self.source_traceback:
msg += '\nFuture/Task created at (most recent call last):\n'
msg += ''.join(traceback.format_list(self.source_traceback))
msg += ''.join(self.tb).rstrip()
self.loop.call_exception_handler({'message': msg})
class Future:
"""This class is *almost* compatible with concurrent.futures.Future.
Differences:
- result() and exception() do not take a timeout argument and
raise an exception when the future isn't done yet.
- Callbacks registered with add_done_callback() are always called
via the event loop's call_soon_threadsafe().
- This class is not compatible with the wait() and as_completed()
methods in the concurrent.futures package.
(In Python 3.4 or later we may be able to unify the implementations.)
"""
# Class variables serving as defaults for instance variables.
_state = _PENDING
_result = None
_exception = None
_loop = None
_blocking = False # proper use of future (yield vs yield from)
_log_traceback = False # Used for Python 3.4 and later
_tb_logger = None # Used for Python 3.3 only
def __init__(self, *, loop=None):
"""Initialize the future.
The optional event_loop argument allows to explicitly set the event
loop object used by the future. If it's not provided, the future uses
the default event loop.
"""
if loop is None:
self._loop = events.get_event_loop()
else:
self._loop = loop
self._callbacks = []
if self._loop.get_debug():
self._source_traceback = traceback.extract_stack(sys._getframe(1))
else:
self._source_traceback = None
def _format_callbacks(self):
cb = self._callbacks
size = len(cb)
if not size:
cb = ''
def format_cb(callback):
return events._format_callback(callback, ())
if size == 1:
cb = format_cb(cb[0])
elif size == 2:
cb = '{}, {}'.format(format_cb(cb[0]), format_cb(cb[1]))
elif size > 2:
cb = '{}, <{} more>, {}'.format(format_cb(cb[0]),
size-2,
format_cb(cb[-1]))
return 'cb=[%s]' % cb
def _repr_info(self):
info = [self._state.lower()]
if self._state == _FINISHED:
if self._exception is not None:
info.append('exception={!r}'.format(self._exception))
else:
# use reprlib to limit the length of the output, especially
# for very long strings
result = reprlib.repr(self._result)
info.append('result={}'.format(result))
if self._callbacks:
info.append(self._format_callbacks())
if self._source_traceback:
frame = self._source_traceback[-1]
info.append('created at %s:%s' % (frame[0], frame[1]))
return info
def __repr__(self):
info = self._repr_info()
return '<%s %s>' % (self.__class__.__name__, ' '.join(info))
# On Python 3.3 or older, objects with a destructor part of a reference
# cycle are never destroyed. It's not more the case on Python 3.4 thanks to
# the PEP 442.
if _PY34:
def __del__(self):
if not self._log_traceback:
# set_exception() was not called, or result() or exception()
# has consumed the exception
return
exc = self._exception
context = {
'message': ('%s exception was never retrieved'
% self.__class__.__name__),
'exception': exc,
'future': self,
}
if self._source_traceback:
context['source_traceback'] = self._source_traceback
self._loop.call_exception_handler(context)
def cancel(self):
"""Cancel the future and schedule callbacks.
If the future is already done or cancelled, return False. Otherwise,
change the future's state to cancelled, schedule the callbacks and
return True.
"""
if self._state != _PENDING:
return False
self._state = _CANCELLED
self._schedule_callbacks()
return True
def _schedule_callbacks(self):
"""Internal: Ask the event loop to call all callbacks.
The callbacks are scheduled to be called as soon as possible. Also
clears the callback list.
"""
callbacks = self._callbacks[:]
if not callbacks:
return
self._callbacks[:] = []
for callback in callbacks:
self._loop.call_soon(callback, self)
def cancelled(self):
"""Return True if the future was cancelled."""
return self._state == _CANCELLED
# Don't implement running(); see http://bugs.python.org/issue18699
def done(self):
"""Return True if the future is done.
Done means either that a result / exception are available, or that the
future was cancelled.
"""
return self._state != _PENDING
def result(self):
"""Return the result this future represents.
If the future has been cancelled, raises CancelledError. If the
future's result isn't yet available, raises InvalidStateError. If
the future is done and has an exception set, this exception is raised.
"""
if self._state == _CANCELLED:
raise CancelledError
if self._state != _FINISHED:
raise InvalidStateError('Result is not ready.')
self._log_traceback = False
if self._tb_logger is not None:
self._tb_logger.clear()
self._tb_logger = None
if self._exception is not None:
raise self._exception
return self._result
def exception(self):
"""Return the exception that was set on this future.
The exception (or None if no exception was set) is returned only if
the future is done. If the future has been cancelled, raises
CancelledError. If the future isn't done yet, raises
InvalidStateError.
"""
if self._state == _CANCELLED:
raise CancelledError
if self._state != _FINISHED:
raise InvalidStateError('Exception is not set.')
self._log_traceback = False
if self._tb_logger is not None:
self._tb_logger.clear()
self._tb_logger = None
return self._exception
def add_done_callback(self, fn):
"""Add a callback to be run when the future becomes done.
The callback is called with a single argument - the future object. If
the future is already done when this is called, the callback is
scheduled with call_soon.
"""
if self._state != _PENDING:
self._loop.call_soon(fn, self)
else:
self._callbacks.append(fn)
# New method not in PEP 3148.
def remove_done_callback(self, fn):
"""Remove all instances of a callback from the "call when done" list.
Returns the number of callbacks removed.
"""
filtered_callbacks = [f for f in self._callbacks if f != fn]
removed_count = len(self._callbacks) - len(filtered_callbacks)
if removed_count:
self._callbacks[:] = filtered_callbacks
return removed_count
# So-called internal methods (note: no set_running_or_notify_cancel()).
def _set_result_unless_cancelled(self, result):
"""Helper setting the result only if the future was not cancelled."""
if self.cancelled():
return
self.set_result(result)
def set_result(self, result):
"""Mark the future done and set its result.
If the future is already done when this method is called, raises
InvalidStateError.
"""
if self._state != _PENDING:
raise InvalidStateError('{}: {!r}'.format(self._state, self))
self._result = result
self._state = _FINISHED
self._schedule_callbacks()
def set_exception(self, exception):
"""Mark the future done and set an exception.
If the future is already done when this method is called, raises
InvalidStateError.
"""
if self._state != _PENDING:
raise InvalidStateError('{}: {!r}'.format(self._state, self))
if isinstance(exception, type):
exception = exception()
self._exception = exception
self._state = _FINISHED
self._schedule_callbacks()
if _PY34:
self._log_traceback = True
else:
self._tb_logger = _TracebackLogger(self, exception)
# Arrange for the logger to be activated after all callbacks
# have had a chance to call result() or exception().
self._loop.call_soon(self._tb_logger.activate)
# Truly internal methods.
def _copy_state(self, other):
"""Internal helper to copy state from another Future.
The other Future may be a concurrent.futures.Future.
"""
assert other.done()
if self.cancelled():
return
assert not self.done()
if other.cancelled():
self.cancel()
else:
exception = other.exception()
if exception is not None:
self.set_exception(exception)
else:
result = other.result()
self.set_result(result)
def __iter__(self):
if not self.done():
self._blocking = True
yield self # This tells Task to wait for completion.
assert self.done(), "yield from wasn't used with future"
return self.result() # May raise too.
def wrap_future(fut, *, loop=None):
"""Wrap concurrent.futures.Future object."""
if isinstance(fut, Future):
return fut
assert isinstance(fut, concurrent.futures.Future), \
'concurrent.futures.Future is expected, got {!r}'.format(fut)
if loop is None:
loop = events.get_event_loop()
new_future = Future(loop=loop)
def _check_cancel_other(f):
if f.cancelled():
fut.cancel()
new_future.add_done_callback(_check_cancel_other)
fut.add_done_callback(
lambda future: loop.call_soon_threadsafe(
new_future._copy_state, fut))
return new_future
|
graingert/pip | refs/heads/develop | pip/_vendor/requests/packages/urllib3/exceptions.py | 156 |
## Base Exceptions
class HTTPError(Exception):
"Base exception used by this module."
pass
class HTTPWarning(Warning):
"Base warning used by this module."
pass
class PoolError(HTTPError):
"Base exception for errors caused within a pool."
def __init__(self, pool, message):
self.pool = pool
HTTPError.__init__(self, "%s: %s" % (pool, message))
def __reduce__(self):
# For pickling purposes.
return self.__class__, (None, None)
class RequestError(PoolError):
"Base exception for PoolErrors that have associated URLs."
def __init__(self, pool, url, message):
self.url = url
PoolError.__init__(self, pool, message)
def __reduce__(self):
# For pickling purposes.
return self.__class__, (None, self.url, None)
class SSLError(HTTPError):
"Raised when SSL certificate fails in an HTTPS connection."
pass
class ProxyError(HTTPError):
"Raised when the connection to a proxy fails."
pass
class DecodeError(HTTPError):
"Raised when automatic decoding based on Content-Type fails."
pass
class ProtocolError(HTTPError):
"Raised when something unexpected happens mid-request/response."
pass
#: Renamed to ProtocolError but aliased for backwards compatibility.
ConnectionError = ProtocolError
## Leaf Exceptions
class MaxRetryError(RequestError):
"""Raised when the maximum number of retries is exceeded.
:param pool: The connection pool
:type pool: :class:`~urllib3.connectionpool.HTTPConnectionPool`
:param string url: The requested Url
:param exceptions.Exception reason: The underlying error
"""
def __init__(self, pool, url, reason=None):
self.reason = reason
message = "Max retries exceeded with url: %s (Caused by %r)" % (
url, reason)
RequestError.__init__(self, pool, url, message)
class HostChangedError(RequestError):
"Raised when an existing pool gets a request for a foreign host."
def __init__(self, pool, url, retries=3):
message = "Tried to open a foreign host with url: %s" % url
RequestError.__init__(self, pool, url, message)
self.retries = retries
class TimeoutStateError(HTTPError):
""" Raised when passing an invalid state to a timeout """
pass
class TimeoutError(HTTPError):
""" Raised when a socket timeout error occurs.
Catching this error will catch both :exc:`ReadTimeoutErrors
<ReadTimeoutError>` and :exc:`ConnectTimeoutErrors <ConnectTimeoutError>`.
"""
pass
class ReadTimeoutError(TimeoutError, RequestError):
"Raised when a socket timeout occurs while receiving data from a server"
pass
# This timeout error does not have a URL attached and needs to inherit from the
# base HTTPError
class ConnectTimeoutError(TimeoutError):
"Raised when a socket timeout occurs while connecting to a server"
pass
class EmptyPoolError(PoolError):
"Raised when a pool runs out of connections and no more are allowed."
pass
class ClosedPoolError(PoolError):
"Raised when a request enters a pool after the pool has been closed."
pass
class LocationValueError(ValueError, HTTPError):
"Raised when there is something wrong with a given URL input."
pass
class LocationParseError(LocationValueError):
"Raised when get_host or similar fails to parse the URL input."
def __init__(self, location):
message = "Failed to parse: %s" % location
HTTPError.__init__(self, message)
self.location = location
class ResponseError(HTTPError):
"Used as a container for an error reason supplied in a MaxRetryError."
GENERIC_ERROR = 'too many error responses'
SPECIFIC_ERROR = 'too many {status_code} error responses'
class SecurityWarning(HTTPWarning):
"Warned when perfoming security reducing actions"
pass
class InsecureRequestWarning(SecurityWarning):
"Warned when making an unverified HTTPS request."
pass
class SystemTimeWarning(SecurityWarning):
"Warned when system time is suspected to be wrong"
pass
class InsecurePlatformWarning(SecurityWarning):
"Warned when certain SSL configuration is not available on a platform."
pass
|
tm1249wk/WASHLIGGGHTS-2.3.7 | refs/heads/master | tools/python/dump2cfg.py | 9 | #!/usr/local/bin/python
# Script: dump2cfg.py
# Purpose: convert a LAMMPS dump file to CFG format
# Syntax: dump2cfg.py dumpfile Nid Ntype Nx Ny Nz cfgfile
# dumpfile = LAMMPS dump file in native LAMMPS format
# Nid,Ntype,Nx,Ny,Nz = columns #s for ID,type,x,y,z
# (usually 1,2,3,4,5)
# cfgfile = new CFG file
# Author: Steve Plimpton (Sandia), sjplimp at sandia.gov
import sys,os
path = os.environ["LAMMPS_PYTHON_TOOLS"]
sys.path.append(path)
from dump import dump
from cfg import cfg
if len(sys.argv) != 8:
raise StandardError, "Syntax: dump2cfg.py dumpfile Nid Ntype Nx Ny Nz cfgfile"
dumpfile = sys.argv[1]
nid = int(sys.argv[2])
ntype = int(sys.argv[3])
nx = int(sys.argv[4])
ny = int(sys.argv[5])
nz = int(sys.argv[6])
cfgfile = sys.argv[7]
d = dump(dumpfile)
d.map(nid,"id",ntype,"type",nx,"x",ny,"y",nz,"z")
c = cfg(d)
c.one(cfgfile)
|
fabianp/scikit-learn | refs/heads/master | sklearn/linear_model/tests/test_logistic.py | 105 | import numpy as np
import scipy.sparse as sp
from scipy import linalg, optimize, sparse
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_raise_message
from sklearn.utils import ConvergenceWarning
from sklearn.linear_model.logistic import (
LogisticRegression,
logistic_regression_path, LogisticRegressionCV,
_logistic_loss_and_grad, _logistic_grad_hess,
_multinomial_grad_hess, _logistic_loss,
)
from sklearn.cross_validation import StratifiedKFold
from sklearn.datasets import load_iris, make_classification
X = [[-1, 0], [0, 1], [1, 1]]
X_sp = sp.csr_matrix(X)
Y1 = [0, 1, 1]
Y2 = [2, 1, 0]
iris = load_iris()
def check_predictions(clf, X, y):
"""Check that the model is able to fit the classification data"""
n_samples = len(y)
classes = np.unique(y)
n_classes = classes.shape[0]
predicted = clf.fit(X, y).predict(X)
assert_array_equal(clf.classes_, classes)
assert_equal(predicted.shape, (n_samples,))
assert_array_equal(predicted, y)
probabilities = clf.predict_proba(X)
assert_equal(probabilities.shape, (n_samples, n_classes))
assert_array_almost_equal(probabilities.sum(axis=1), np.ones(n_samples))
assert_array_equal(probabilities.argmax(axis=1), y)
def test_predict_2_classes():
# Simple sanity check on a 2 classes dataset
# Make sure it predicts the correct result on simple datasets.
check_predictions(LogisticRegression(random_state=0), X, Y1)
check_predictions(LogisticRegression(random_state=0), X_sp, Y1)
check_predictions(LogisticRegression(C=100, random_state=0), X, Y1)
check_predictions(LogisticRegression(C=100, random_state=0), X_sp, Y1)
check_predictions(LogisticRegression(fit_intercept=False,
random_state=0), X, Y1)
check_predictions(LogisticRegression(fit_intercept=False,
random_state=0), X_sp, Y1)
def test_error():
# Test for appropriate exception on errors
msg = "Penalty term must be positive"
assert_raise_message(ValueError, msg,
LogisticRegression(C=-1).fit, X, Y1)
assert_raise_message(ValueError, msg,
LogisticRegression(C="test").fit, X, Y1)
for LR in [LogisticRegression, LogisticRegressionCV]:
msg = "Tolerance for stopping criteria must be positive"
assert_raise_message(ValueError, msg, LR(tol=-1).fit, X, Y1)
assert_raise_message(ValueError, msg, LR(tol="test").fit, X, Y1)
msg = "Maximum number of iteration must be positive"
assert_raise_message(ValueError, msg, LR(max_iter=-1).fit, X, Y1)
assert_raise_message(ValueError, msg, LR(max_iter="test").fit, X, Y1)
def test_predict_3_classes():
check_predictions(LogisticRegression(C=10), X, Y2)
check_predictions(LogisticRegression(C=10), X_sp, Y2)
def test_predict_iris():
# Test logistic regression with the iris dataset
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
# Test that both multinomial and OvR solvers handle
# multiclass data correctly and give good accuracy
# score (>0.95) for the training data.
for clf in [LogisticRegression(C=len(iris.data)),
LogisticRegression(C=len(iris.data), solver='lbfgs',
multi_class='multinomial'),
LogisticRegression(C=len(iris.data), solver='newton-cg',
multi_class='multinomial')]:
clf.fit(iris.data, target)
assert_array_equal(np.unique(target), clf.classes_)
pred = clf.predict(iris.data)
assert_greater(np.mean(pred == target), .95)
probabilities = clf.predict_proba(iris.data)
assert_array_almost_equal(probabilities.sum(axis=1),
np.ones(n_samples))
pred = iris.target_names[probabilities.argmax(axis=1)]
assert_greater(np.mean(pred == target), .95)
def test_multinomial_validation():
for solver in ['lbfgs', 'newton-cg']:
lr = LogisticRegression(C=-1, solver=solver, multi_class='multinomial')
assert_raises(ValueError, lr.fit, [[0, 1], [1, 0]], [0, 1])
def test_check_solver_option():
X, y = iris.data, iris.target
for LR in [LogisticRegression, LogisticRegressionCV]:
msg = ("Logistic Regression supports only liblinear, newton-cg and"
" lbfgs solvers, got wrong_name")
lr = LR(solver="wrong_name")
assert_raise_message(ValueError, msg, lr.fit, X, y)
msg = "multi_class should be either multinomial or ovr, got wrong_name"
lr = LR(solver='newton-cg', multi_class="wrong_name")
assert_raise_message(ValueError, msg, lr.fit, X, y)
# all solver except 'newton-cg' and 'lfbgs'
for solver in ['liblinear']:
msg = ("Solver %s does not support a multinomial backend." %
solver)
lr = LR(solver=solver, multi_class='multinomial')
assert_raise_message(ValueError, msg, lr.fit, X, y)
# all solvers except 'liblinear'
for solver in ['newton-cg', 'lbfgs']:
msg = ("Solver %s supports only l2 penalties, got l1 penalty." %
solver)
lr = LR(solver=solver, penalty='l1')
assert_raise_message(ValueError, msg, lr.fit, X, y)
msg = ("Solver %s supports only dual=False, got dual=True" %
solver)
lr = LR(solver=solver, dual=True)
assert_raise_message(ValueError, msg, lr.fit, X, y)
def test_multinomial_binary():
# Test multinomial LR on a binary problem.
target = (iris.target > 0).astype(np.intp)
target = np.array(["setosa", "not-setosa"])[target]
for solver in ['lbfgs', 'newton-cg']:
clf = LogisticRegression(solver=solver, multi_class='multinomial')
clf.fit(iris.data, target)
assert_equal(clf.coef_.shape, (1, iris.data.shape[1]))
assert_equal(clf.intercept_.shape, (1,))
assert_array_equal(clf.predict(iris.data), target)
mlr = LogisticRegression(solver=solver, multi_class='multinomial',
fit_intercept=False)
mlr.fit(iris.data, target)
pred = clf.classes_[np.argmax(clf.predict_log_proba(iris.data),
axis=1)]
assert_greater(np.mean(pred == target), .9)
def test_sparsify():
# Test sparsify and densify members.
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
clf = LogisticRegression(random_state=0).fit(iris.data, target)
pred_d_d = clf.decision_function(iris.data)
clf.sparsify()
assert_true(sp.issparse(clf.coef_))
pred_s_d = clf.decision_function(iris.data)
sp_data = sp.coo_matrix(iris.data)
pred_s_s = clf.decision_function(sp_data)
clf.densify()
pred_d_s = clf.decision_function(sp_data)
assert_array_almost_equal(pred_d_d, pred_s_d)
assert_array_almost_equal(pred_d_d, pred_s_s)
assert_array_almost_equal(pred_d_d, pred_d_s)
def test_inconsistent_input():
# Test that an exception is raised on inconsistent input
rng = np.random.RandomState(0)
X_ = rng.random_sample((5, 10))
y_ = np.ones(X_.shape[0])
y_[0] = 0
clf = LogisticRegression(random_state=0)
# Wrong dimensions for training data
y_wrong = y_[:-1]
assert_raises(ValueError, clf.fit, X, y_wrong)
# Wrong dimensions for test data
assert_raises(ValueError, clf.fit(X_, y_).predict,
rng.random_sample((3, 12)))
def test_write_parameters():
# Test that we can write to coef_ and intercept_
clf = LogisticRegression(random_state=0)
clf.fit(X, Y1)
clf.coef_[:] = 0
clf.intercept_[:] = 0
assert_array_almost_equal(clf.decision_function(X), 0)
@raises(ValueError)
def test_nan():
# Test proper NaN handling.
# Regression test for Issue #252: fit used to go into an infinite loop.
Xnan = np.array(X, dtype=np.float64)
Xnan[0, 1] = np.nan
LogisticRegression(random_state=0).fit(Xnan, Y1)
def test_consistency_path():
# Test that the path algorithm is consistent
rng = np.random.RandomState(0)
X = np.concatenate((rng.randn(100, 2) + [1, 1], rng.randn(100, 2)))
y = [1] * 100 + [-1] * 100
Cs = np.logspace(0, 4, 10)
f = ignore_warnings
# can't test with fit_intercept=True since LIBLINEAR
# penalizes the intercept
for method in ('lbfgs', 'newton-cg', 'liblinear'):
coefs, Cs = f(logistic_regression_path)(
X, y, Cs=Cs, fit_intercept=False, tol=1e-16, solver=method)
for i, C in enumerate(Cs):
lr = LogisticRegression(C=C, fit_intercept=False, tol=1e-16)
lr.fit(X, y)
lr_coef = lr.coef_.ravel()
assert_array_almost_equal(lr_coef, coefs[i], decimal=4)
# test for fit_intercept=True
for method in ('lbfgs', 'newton-cg', 'liblinear'):
Cs = [1e3]
coefs, Cs = f(logistic_regression_path)(
X, y, Cs=Cs, fit_intercept=True, tol=1e-4, solver=method)
lr = LogisticRegression(C=Cs[0], fit_intercept=True, tol=1e-4,
intercept_scaling=10000)
lr.fit(X, y)
lr_coef = np.concatenate([lr.coef_.ravel(), lr.intercept_])
assert_array_almost_equal(lr_coef, coefs[0], decimal=4)
def test_liblinear_dual_random_state():
# random_state is relevant for liblinear solver only if dual=True
X, y = make_classification(n_samples=20)
lr1 = LogisticRegression(random_state=0, dual=True, max_iter=1, tol=1e-15)
lr1.fit(X, y)
lr2 = LogisticRegression(random_state=0, dual=True, max_iter=1, tol=1e-15)
lr2.fit(X, y)
lr3 = LogisticRegression(random_state=8, dual=True, max_iter=1, tol=1e-15)
lr3.fit(X, y)
# same result for same random state
assert_array_almost_equal(lr1.coef_, lr2.coef_)
# different results for different random states
msg = "Arrays are not almost equal to 6 decimals"
assert_raise_message(AssertionError, msg,
assert_array_almost_equal, lr1.coef_, lr3.coef_)
def test_logistic_loss_and_grad():
X_ref, y = make_classification(n_samples=20)
n_features = X_ref.shape[1]
X_sp = X_ref.copy()
X_sp[X_sp < .1] = 0
X_sp = sp.csr_matrix(X_sp)
for X in (X_ref, X_sp):
w = np.zeros(n_features)
# First check that our derivation of the grad is correct
loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.)
approx_grad = optimize.approx_fprime(
w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3
)
assert_array_almost_equal(grad, approx_grad, decimal=2)
# Second check that our intercept implementation is good
w = np.zeros(n_features + 1)
loss_interp, grad_interp = _logistic_loss_and_grad(
w, X, y, alpha=1.
)
assert_array_almost_equal(loss, loss_interp)
approx_grad = optimize.approx_fprime(
w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3
)
assert_array_almost_equal(grad_interp, approx_grad, decimal=2)
def test_logistic_grad_hess():
rng = np.random.RandomState(0)
n_samples, n_features = 50, 5
X_ref = rng.randn(n_samples, n_features)
y = np.sign(X_ref.dot(5 * rng.randn(n_features)))
X_ref -= X_ref.mean()
X_ref /= X_ref.std()
X_sp = X_ref.copy()
X_sp[X_sp < .1] = 0
X_sp = sp.csr_matrix(X_sp)
for X in (X_ref, X_sp):
w = .1 * np.ones(n_features)
# First check that _logistic_grad_hess is consistent
# with _logistic_loss_and_grad
loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.)
grad_2, hess = _logistic_grad_hess(w, X, y, alpha=1.)
assert_array_almost_equal(grad, grad_2)
# Now check our hessian along the second direction of the grad
vector = np.zeros_like(grad)
vector[1] = 1
hess_col = hess(vector)
# Computation of the Hessian is particularly fragile to numerical
# errors when doing simple finite differences. Here we compute the
# grad along a path in the direction of the vector and then use a
# least-square regression to estimate the slope
e = 1e-3
d_x = np.linspace(-e, e, 30)
d_grad = np.array([
_logistic_loss_and_grad(w + t * vector, X, y, alpha=1.)[1]
for t in d_x
])
d_grad -= d_grad.mean(axis=0)
approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel()
assert_array_almost_equal(approx_hess_col, hess_col, decimal=3)
# Second check that our intercept implementation is good
w = np.zeros(n_features + 1)
loss_interp, grad_interp = _logistic_loss_and_grad(w, X, y, alpha=1.)
loss_interp_2 = _logistic_loss(w, X, y, alpha=1.)
grad_interp_2, hess = _logistic_grad_hess(w, X, y, alpha=1.)
assert_array_almost_equal(loss_interp, loss_interp_2)
assert_array_almost_equal(grad_interp, grad_interp_2)
def test_logistic_cv():
# test for LogisticRegressionCV object
n_samples, n_features = 50, 5
rng = np.random.RandomState(0)
X_ref = rng.randn(n_samples, n_features)
y = np.sign(X_ref.dot(5 * rng.randn(n_features)))
X_ref -= X_ref.mean()
X_ref /= X_ref.std()
lr_cv = LogisticRegressionCV(Cs=[1.], fit_intercept=False,
solver='liblinear')
lr_cv.fit(X_ref, y)
lr = LogisticRegression(C=1., fit_intercept=False)
lr.fit(X_ref, y)
assert_array_almost_equal(lr.coef_, lr_cv.coef_)
assert_array_equal(lr_cv.coef_.shape, (1, n_features))
assert_array_equal(lr_cv.classes_, [-1, 1])
assert_equal(len(lr_cv.classes_), 2)
coefs_paths = np.asarray(list(lr_cv.coefs_paths_.values()))
assert_array_equal(coefs_paths.shape, (1, 3, 1, n_features))
assert_array_equal(lr_cv.Cs_.shape, (1, ))
scores = np.asarray(list(lr_cv.scores_.values()))
assert_array_equal(scores.shape, (1, 3, 1))
def test_logistic_cv_sparse():
X, y = make_classification(n_samples=50, n_features=5,
random_state=0)
X[X < 1.0] = 0.0
csr = sp.csr_matrix(X)
clf = LogisticRegressionCV(fit_intercept=True)
clf.fit(X, y)
clfs = LogisticRegressionCV(fit_intercept=True)
clfs.fit(csr, y)
assert_array_almost_equal(clfs.coef_, clf.coef_)
assert_array_almost_equal(clfs.intercept_, clf.intercept_)
assert_equal(clfs.C_, clf.C_)
def test_intercept_logistic_helper():
n_samples, n_features = 10, 5
X, y = make_classification(n_samples=n_samples, n_features=n_features,
random_state=0)
# Fit intercept case.
alpha = 1.
w = np.ones(n_features + 1)
grad_interp, hess_interp = _logistic_grad_hess(w, X, y, alpha)
loss_interp = _logistic_loss(w, X, y, alpha)
# Do not fit intercept. This can be considered equivalent to adding
# a feature vector of ones, i.e column of one vectors.
X_ = np.hstack((X, np.ones(10)[:, np.newaxis]))
grad, hess = _logistic_grad_hess(w, X_, y, alpha)
loss = _logistic_loss(w, X_, y, alpha)
# In the fit_intercept=False case, the feature vector of ones is
# penalized. This should be taken care of.
assert_almost_equal(loss_interp + 0.5 * (w[-1] ** 2), loss)
# Check gradient.
assert_array_almost_equal(grad_interp[:n_features], grad[:n_features])
assert_almost_equal(grad_interp[-1] + alpha * w[-1], grad[-1])
rng = np.random.RandomState(0)
grad = rng.rand(n_features + 1)
hess_interp = hess_interp(grad)
hess = hess(grad)
assert_array_almost_equal(hess_interp[:n_features], hess[:n_features])
assert_almost_equal(hess_interp[-1] + alpha * grad[-1], hess[-1])
def test_ovr_multinomial_iris():
# Test that OvR and multinomial are correct using the iris dataset.
train, target = iris.data, iris.target
n_samples, n_features = train.shape
# Use pre-defined fold as folds generated for different y
cv = StratifiedKFold(target, 3)
clf = LogisticRegressionCV(cv=cv)
clf.fit(train, target)
clf1 = LogisticRegressionCV(cv=cv)
target_copy = target.copy()
target_copy[target_copy == 0] = 1
clf1.fit(train, target_copy)
assert_array_almost_equal(clf.scores_[2], clf1.scores_[2])
assert_array_almost_equal(clf.intercept_[2:], clf1.intercept_)
assert_array_almost_equal(clf.coef_[2][np.newaxis, :], clf1.coef_)
# Test the shape of various attributes.
assert_equal(clf.coef_.shape, (3, n_features))
assert_array_equal(clf.classes_, [0, 1, 2])
coefs_paths = np.asarray(list(clf.coefs_paths_.values()))
assert_array_almost_equal(coefs_paths.shape, (3, 3, 10, n_features + 1))
assert_equal(clf.Cs_.shape, (10, ))
scores = np.asarray(list(clf.scores_.values()))
assert_equal(scores.shape, (3, 3, 10))
# Test that for the iris data multinomial gives a better accuracy than OvR
for solver in ['lbfgs', 'newton-cg']:
clf_multi = LogisticRegressionCV(
solver=solver, multi_class='multinomial', max_iter=15
)
clf_multi.fit(train, target)
multi_score = clf_multi.score(train, target)
ovr_score = clf.score(train, target)
assert_greater(multi_score, ovr_score)
# Test attributes of LogisticRegressionCV
assert_equal(clf.coef_.shape, clf_multi.coef_.shape)
assert_array_equal(clf_multi.classes_, [0, 1, 2])
coefs_paths = np.asarray(list(clf_multi.coefs_paths_.values()))
assert_array_almost_equal(coefs_paths.shape, (3, 3, 10,
n_features + 1))
assert_equal(clf_multi.Cs_.shape, (10, ))
scores = np.asarray(list(clf_multi.scores_.values()))
assert_equal(scores.shape, (3, 3, 10))
def test_logistic_regression_solvers():
X, y = make_classification(n_features=10, n_informative=5, random_state=0)
clf_n = LogisticRegression(solver='newton-cg', fit_intercept=False)
clf_n.fit(X, y)
clf_lbf = LogisticRegression(solver='lbfgs', fit_intercept=False)
clf_lbf.fit(X, y)
clf_lib = LogisticRegression(fit_intercept=False)
clf_lib.fit(X, y)
assert_array_almost_equal(clf_n.coef_, clf_lib.coef_, decimal=3)
assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=3)
assert_array_almost_equal(clf_n.coef_, clf_lbf.coef_, decimal=3)
def test_logistic_regression_solvers_multiclass():
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
n_classes=3, random_state=0)
clf_n = LogisticRegression(solver='newton-cg', fit_intercept=False)
clf_n.fit(X, y)
clf_lbf = LogisticRegression(solver='lbfgs', fit_intercept=False)
clf_lbf.fit(X, y)
clf_lib = LogisticRegression(fit_intercept=False)
clf_lib.fit(X, y)
assert_array_almost_equal(clf_n.coef_, clf_lib.coef_, decimal=4)
assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=4)
assert_array_almost_equal(clf_n.coef_, clf_lbf.coef_, decimal=4)
def test_logistic_regressioncv_class_weights():
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
n_classes=3, random_state=0)
# Test the liblinear fails when class_weight of type dict is
# provided, when it is multiclass. However it can handle
# binary problems.
clf_lib = LogisticRegressionCV(class_weight={0: 0.1, 1: 0.2},
solver='liblinear')
assert_raises(ValueError, clf_lib.fit, X, y)
y_ = y.copy()
y_[y == 2] = 1
clf_lib.fit(X, y_)
assert_array_equal(clf_lib.classes_, [0, 1])
# Test for class_weight=balanced
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
random_state=0)
clf_lbf = LogisticRegressionCV(solver='lbfgs', fit_intercept=False,
class_weight='balanced')
clf_lbf.fit(X, y)
clf_lib = LogisticRegressionCV(solver='liblinear', fit_intercept=False,
class_weight='balanced')
clf_lib.fit(X, y)
assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=4)
def test_logistic_regression_convergence_warnings():
# Test that warnings are raised if model does not converge
X, y = make_classification(n_samples=20, n_features=20)
clf_lib = LogisticRegression(solver='liblinear', max_iter=2, verbose=1)
assert_warns(ConvergenceWarning, clf_lib.fit, X, y)
assert_equal(clf_lib.n_iter_, 2)
def test_logistic_regression_multinomial():
# Tests for the multinomial option in logistic regression
# Some basic attributes of Logistic Regression
n_samples, n_features, n_classes = 50, 20, 3
X, y = make_classification(n_samples=n_samples,
n_features=n_features,
n_informative=10,
n_classes=n_classes, random_state=0)
clf_int = LogisticRegression(solver='lbfgs', multi_class='multinomial')
clf_int.fit(X, y)
assert_array_equal(clf_int.coef_.shape, (n_classes, n_features))
clf_wint = LogisticRegression(solver='lbfgs', multi_class='multinomial',
fit_intercept=False)
clf_wint.fit(X, y)
assert_array_equal(clf_wint.coef_.shape, (n_classes, n_features))
# Similar tests for newton-cg solver option
clf_ncg_int = LogisticRegression(solver='newton-cg',
multi_class='multinomial')
clf_ncg_int.fit(X, y)
assert_array_equal(clf_ncg_int.coef_.shape, (n_classes, n_features))
clf_ncg_wint = LogisticRegression(solver='newton-cg', fit_intercept=False,
multi_class='multinomial')
clf_ncg_wint.fit(X, y)
assert_array_equal(clf_ncg_wint.coef_.shape, (n_classes, n_features))
# Compare solutions between lbfgs and newton-cg
assert_almost_equal(clf_int.coef_, clf_ncg_int.coef_, decimal=3)
assert_almost_equal(clf_wint.coef_, clf_ncg_wint.coef_, decimal=3)
assert_almost_equal(clf_int.intercept_, clf_ncg_int.intercept_, decimal=3)
# Test that the path give almost the same results. However since in this
# case we take the average of the coefs after fitting across all the
# folds, it need not be exactly the same.
for solver in ['lbfgs', 'newton-cg']:
clf_path = LogisticRegressionCV(solver=solver,
multi_class='multinomial', Cs=[1.])
clf_path.fit(X, y)
assert_array_almost_equal(clf_path.coef_, clf_int.coef_, decimal=3)
assert_almost_equal(clf_path.intercept_, clf_int.intercept_, decimal=3)
def test_multinomial_grad_hess():
rng = np.random.RandomState(0)
n_samples, n_features, n_classes = 100, 5, 3
X = rng.randn(n_samples, n_features)
w = rng.rand(n_classes, n_features)
Y = np.zeros((n_samples, n_classes))
ind = np.argmax(np.dot(X, w.T), axis=1)
Y[range(0, n_samples), ind] = 1
w = w.ravel()
sample_weights = np.ones(X.shape[0])
grad, hessp = _multinomial_grad_hess(w, X, Y, alpha=1.,
sample_weight=sample_weights)
# extract first column of hessian matrix
vec = np.zeros(n_features * n_classes)
vec[0] = 1
hess_col = hessp(vec)
# Estimate hessian using least squares as done in
# test_logistic_grad_hess
e = 1e-3
d_x = np.linspace(-e, e, 30)
d_grad = np.array([
_multinomial_grad_hess(w + t * vec, X, Y, alpha=1.,
sample_weight=sample_weights)[0]
for t in d_x
])
d_grad -= d_grad.mean(axis=0)
approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel()
assert_array_almost_equal(hess_col, approx_hess_col)
def test_liblinear_decision_function_zero():
# Test negative prediction when decision_function values are zero.
# Liblinear predicts the positive class when decision_function values
# are zero. This is a test to verify that we do not do the same.
# See Issue: https://github.com/scikit-learn/scikit-learn/issues/3600
# and the PR https://github.com/scikit-learn/scikit-learn/pull/3623
X, y = make_classification(n_samples=5, n_features=5)
clf = LogisticRegression(fit_intercept=False)
clf.fit(X, y)
# Dummy data such that the decision function becomes zero.
X = np.zeros((5, 5))
assert_array_equal(clf.predict(X), np.zeros(5))
def test_liblinear_logregcv_sparse():
# Test LogRegCV with solver='liblinear' works for sparse matrices
X, y = make_classification(n_samples=10, n_features=5)
clf = LogisticRegressionCV(solver='liblinear')
clf.fit(sparse.csr_matrix(X), y)
def test_logreg_intercept_scaling():
# Test that the right error message is thrown when intercept_scaling <= 0
for i in [-1, 0]:
clf = LogisticRegression(intercept_scaling=i)
msg = ('Intercept scaling is %r but needs to be greater than 0.'
' To disable fitting an intercept,'
' set fit_intercept=False.' % clf.intercept_scaling)
assert_raise_message(ValueError, msg, clf.fit, X, Y1)
def test_logreg_intercept_scaling_zero():
# Test that intercept_scaling is ignored when fit_intercept is False
clf = LogisticRegression(fit_intercept=False)
clf.fit(X, Y1)
assert_equal(clf.intercept_, 0.)
def test_logreg_cv_penalty():
# Test that the correct penalty is passed to the final fit.
X, y = make_classification(n_samples=50, n_features=20, random_state=0)
lr_cv = LogisticRegressionCV(penalty="l1", Cs=[1.0], solver='liblinear')
lr_cv.fit(X, y)
lr = LogisticRegression(penalty="l1", C=1.0, solver='liblinear')
lr.fit(X, y)
assert_equal(np.count_nonzero(lr_cv.coef_), np.count_nonzero(lr.coef_))
|
OpenDroneMap/WebODM | refs/heads/master | coreplugins/contours/__init__.py | 29 | from .plugin import *
|
seppi91/CouchPotatoServer | refs/heads/develop | libs/bs4/builder/__init__.py | 447 | from collections import defaultdict
import itertools
import sys
from bs4.element import (
CharsetMetaAttributeValue,
ContentMetaAttributeValue,
whitespace_re
)
__all__ = [
'HTMLTreeBuilder',
'SAXTreeBuilder',
'TreeBuilder',
'TreeBuilderRegistry',
]
# Some useful features for a TreeBuilder to have.
FAST = 'fast'
PERMISSIVE = 'permissive'
STRICT = 'strict'
XML = 'xml'
HTML = 'html'
HTML_5 = 'html5'
class TreeBuilderRegistry(object):
def __init__(self):
self.builders_for_feature = defaultdict(list)
self.builders = []
def register(self, treebuilder_class):
"""Register a treebuilder based on its advertised features."""
for feature in treebuilder_class.features:
self.builders_for_feature[feature].insert(0, treebuilder_class)
self.builders.insert(0, treebuilder_class)
def lookup(self, *features):
if len(self.builders) == 0:
# There are no builders at all.
return None
if len(features) == 0:
# They didn't ask for any features. Give them the most
# recently registered builder.
return self.builders[0]
# Go down the list of features in order, and eliminate any builders
# that don't match every feature.
features = list(features)
features.reverse()
candidates = None
candidate_set = None
while len(features) > 0:
feature = features.pop()
we_have_the_feature = self.builders_for_feature.get(feature, [])
if len(we_have_the_feature) > 0:
if candidates is None:
candidates = we_have_the_feature
candidate_set = set(candidates)
else:
# Eliminate any candidates that don't have this feature.
candidate_set = candidate_set.intersection(
set(we_have_the_feature))
# The only valid candidates are the ones in candidate_set.
# Go through the original list of candidates and pick the first one
# that's in candidate_set.
if candidate_set is None:
return None
for candidate in candidates:
if candidate in candidate_set:
return candidate
return None
# The BeautifulSoup class will take feature lists from developers and use them
# to look up builders in this registry.
builder_registry = TreeBuilderRegistry()
class TreeBuilder(object):
"""Turn a document into a Beautiful Soup object tree."""
features = []
is_xml = False
preserve_whitespace_tags = set()
empty_element_tags = None # A tag will be considered an empty-element
# tag when and only when it has no contents.
# A value for these tag/attribute combinations is a space- or
# comma-separated list of CDATA, rather than a single CDATA.
cdata_list_attributes = {}
def __init__(self):
self.soup = None
def reset(self):
pass
def can_be_empty_element(self, tag_name):
"""Might a tag with this name be an empty-element tag?
The final markup may or may not actually present this tag as
self-closing.
For instance: an HTMLBuilder does not consider a <p> tag to be
an empty-element tag (it's not in
HTMLBuilder.empty_element_tags). This means an empty <p> tag
will be presented as "<p></p>", not "<p />".
The default implementation has no opinion about which tags are
empty-element tags, so a tag will be presented as an
empty-element tag if and only if it has no contents.
"<foo></foo>" will become "<foo />", and "<foo>bar</foo>" will
be left alone.
"""
if self.empty_element_tags is None:
return True
return tag_name in self.empty_element_tags
def feed(self, markup):
raise NotImplementedError()
def prepare_markup(self, markup, user_specified_encoding=None,
document_declared_encoding=None):
return markup, None, None, False
def test_fragment_to_document(self, fragment):
"""Wrap an HTML fragment to make it look like a document.
Different parsers do this differently. For instance, lxml
introduces an empty <head> tag, and html5lib
doesn't. Abstracting this away lets us write simple tests
which run HTML fragments through the parser and compare the
results against other HTML fragments.
This method should not be used outside of tests.
"""
return fragment
def set_up_substitutions(self, tag):
return False
def _replace_cdata_list_attribute_values(self, tag_name, attrs):
"""Replaces class="foo bar" with class=["foo", "bar"]
Modifies its input in place.
"""
if not attrs:
return attrs
if self.cdata_list_attributes:
universal = self.cdata_list_attributes.get('*', [])
tag_specific = self.cdata_list_attributes.get(
tag_name.lower(), None)
for attr in attrs.keys():
if attr in universal or (tag_specific and attr in tag_specific):
# We have a "class"-type attribute whose string
# value is a whitespace-separated list of
# values. Split it into a list.
value = attrs[attr]
if isinstance(value, basestring):
values = whitespace_re.split(value)
else:
# html5lib sometimes calls setAttributes twice
# for the same tag when rearranging the parse
# tree. On the second call the attribute value
# here is already a list. If this happens,
# leave the value alone rather than trying to
# split it again.
values = value
attrs[attr] = values
return attrs
class SAXTreeBuilder(TreeBuilder):
"""A Beautiful Soup treebuilder that listens for SAX events."""
def feed(self, markup):
raise NotImplementedError()
def close(self):
pass
def startElement(self, name, attrs):
attrs = dict((key[1], value) for key, value in list(attrs.items()))
#print "Start %s, %r" % (name, attrs)
self.soup.handle_starttag(name, attrs)
def endElement(self, name):
#print "End %s" % name
self.soup.handle_endtag(name)
def startElementNS(self, nsTuple, nodeName, attrs):
# Throw away (ns, nodeName) for now.
self.startElement(nodeName, attrs)
def endElementNS(self, nsTuple, nodeName):
# Throw away (ns, nodeName) for now.
self.endElement(nodeName)
#handler.endElementNS((ns, node.nodeName), node.nodeName)
def startPrefixMapping(self, prefix, nodeValue):
# Ignore the prefix for now.
pass
def endPrefixMapping(self, prefix):
# Ignore the prefix for now.
# handler.endPrefixMapping(prefix)
pass
def characters(self, content):
self.soup.handle_data(content)
def startDocument(self):
pass
def endDocument(self):
pass
class HTMLTreeBuilder(TreeBuilder):
"""This TreeBuilder knows facts about HTML.
Such as which tags are empty-element tags.
"""
preserve_whitespace_tags = set(['pre', 'textarea'])
empty_element_tags = set(['br' , 'hr', 'input', 'img', 'meta',
'spacer', 'link', 'frame', 'base'])
# The HTML standard defines these attributes as containing a
# space-separated list of values, not a single value. That is,
# class="foo bar" means that the 'class' attribute has two values,
# 'foo' and 'bar', not the single value 'foo bar'. When we
# encounter one of these attributes, we will parse its value into
# a list of values if possible. Upon output, the list will be
# converted back into a string.
cdata_list_attributes = {
"*" : ['class', 'accesskey', 'dropzone'],
"a" : ['rel', 'rev'],
"link" : ['rel', 'rev'],
"td" : ["headers"],
"th" : ["headers"],
"td" : ["headers"],
"form" : ["accept-charset"],
"object" : ["archive"],
# These are HTML5 specific, as are *.accesskey and *.dropzone above.
"area" : ["rel"],
"icon" : ["sizes"],
"iframe" : ["sandbox"],
"output" : ["for"],
}
def set_up_substitutions(self, tag):
# We are only interested in <meta> tags
if tag.name != 'meta':
return False
http_equiv = tag.get('http-equiv')
content = tag.get('content')
charset = tag.get('charset')
# We are interested in <meta> tags that say what encoding the
# document was originally in. This means HTML 5-style <meta>
# tags that provide the "charset" attribute. It also means
# HTML 4-style <meta> tags that provide the "content"
# attribute and have "http-equiv" set to "content-type".
#
# In both cases we will replace the value of the appropriate
# attribute with a standin object that can take on any
# encoding.
meta_encoding = None
if charset is not None:
# HTML 5 style:
# <meta charset="utf8">
meta_encoding = charset
tag['charset'] = CharsetMetaAttributeValue(charset)
elif (content is not None and http_equiv is not None
and http_equiv.lower() == 'content-type'):
# HTML 4 style:
# <meta http-equiv="content-type" content="text/html; charset=utf8">
tag['content'] = ContentMetaAttributeValue(content)
return (meta_encoding is not None)
def register_treebuilders_from(module):
"""Copy TreeBuilders from the given module into this module."""
# I'm fairly sure this is not the best way to do this.
this_module = sys.modules['bs4.builder']
for name in module.__all__:
obj = getattr(module, name)
if issubclass(obj, TreeBuilder):
setattr(this_module, name, obj)
this_module.__all__.append(name)
# Register the builder while we're at it.
this_module.builder_registry.register(obj)
class ParserRejectedMarkup(Exception):
pass
# Builders are registered in reverse order of priority, so that custom
# builder registrations will take precedence. In general, we want lxml
# to take precedence over html5lib, because it's faster. And we only
# want to use HTMLParser as a last result.
from . import _htmlparser
register_treebuilders_from(_htmlparser)
try:
from . import _html5lib
register_treebuilders_from(_html5lib)
except ImportError:
# They don't have html5lib installed.
pass
try:
from . import _lxml
register_treebuilders_from(_lxml)
except ImportError:
# They don't have lxml installed.
pass
|
darioizzo/pykep | refs/heads/master | pykep/planet/__init__.py | 2 | # -*- coding: iso-8859-1 -*-
"""
This module contains all the classes that allow to construct efficiently
low-thrust tajectories using our own flavour of the Sims-Flanagan model: a trajectory
transcription method that forms the basis for MALTO, the software in use in JPL
for preliminary interplanetary trajectory design.
"""
from pykep.planet.planet import *
from pykep.planet.planet import _base
def _keplerian_ctor(self, *args):
"""
pykep.planet.keplerian(when,orbital_elements, mu_central_body, mu_self,radius, safe_radius [, name = 'unknown'])
pykep.planet.keplerian(when,r,v, mu_central_body, mu_self,radius, safe_radius [, name = 'unknown'])
- when: a :py:class:`pykep.epoch` indicating the orbital elements reference epoch
- orbital_elements: a sequence of six containing a,e,i,W,w,M (SI units, i.e. meters and radiants)
- r,v: position and velocity of an object at when (SI units)
- mu_central_body: gravity parameter of the central body (SI units, i.e. m^2/s^3)
- mu_self: gravity parameter of the planet (SI units, i.e. m^2/s^3)
- radius: body radius (SI units, i.e. meters)
- safe_radius: mimimual radius that is safe during a fly-by of the planet (SI units, i.e. m)
- name: body name
.. note::
All classes having Keplerian ephemerides as :py:class:`pykep.planet.mpcorb` inherit from this (c++) class
Example::
earth = planet(epoch(54000,"mjd"),(9.99e-01 * AU, 1.67e-02, 8.85e-04 * DEG2RAD, 1.75e+02 * DEG2RAD, 2.87e+02 * DEG2RAD, 2.57e+02 * DEG2RAD), MU_SUN, 398600e9, 6378000, 6900000, 'Earth')"
"""
self._orig_init(*args)
keplerian._orig_init = keplerian.__init__
keplerian.__init__ = _keplerian_ctor
def _j2_ctor(self, *args):
"""
pykep.planet.j2(when,orbital_elements, mu_central_body, mu_self,radius, safe_radius, J2RG2 [, name = 'unknown'])
pykep.planet.j2(when,r,v, mu_central_body, mu_self,radius, safe_radius, J2RG2 [, name = 'unknown'])
- when: a :py:class:`pykep.epoch` indicating the orbital elements reference epoch
- orbital_elements: a sequence of six containing a,e,i,W,w,M (SI units, i.e. meters and radiants)
- r,v: position and velocity of an object at when (SI units)
- mu_central_body: gravity parameter of the central body (SI units, i.e. m^2/s^3)
- mu_self: gravity parameter of the planet (SI units, i.e. m^2/s^3)
- radius: body radius (SI units, i.e. meters)
- safe_radius: mimimual radius that is safe during a fly-by of the planet (SI units, i.e. m)
- J2RG2: the product of J2 and the mean radius of the oblate primary squared (SI units, i.e. m^2)
- name: body name
Example::
deb1 = planet.j2(epoch(54000,"mjd"),(7000000, 1.67e-02, 78.23 * DEG2RAD, 175. * DEG2RAD, 287. * DEG2RAD, 257 * DEG2RAD), MU_EARTH, 1, 1, 1, EARTH_J2*EARTH_RADIUS**2, 'deb1')
"""
self._orig_init(*args)
j2._orig_init = j2.__init__
j2.__init__ = _j2_ctor
|
mrquim/repository.mrquim | refs/heads/master | script.module.covenant/lib/resources/lib/modules/debrid.py | 6 | # -*- coding: utf-8 -*-
"""
Covenant Add-on
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from resources.lib.modules import log_utils
try:
import urlresolver
debrid_resolvers = [resolver() for resolver in urlresolver.relevant_resolvers(order_matters=True) if resolver.isUniversal()]
if len(debrid_resolvers) == 0:
# Support Rapidgator accounts! Unfortunately, `sources.py` assumes that rapidgator.net is only ever
# accessed via a debrid service, so we add rapidgator as a debrid resolver and everything just works.
# As a bonus(?), rapidgator links will be highlighted just like actual debrid links
debrid_resolvers = [resolver() for resolver in urlresolver.relevant_resolvers(order_matters=True,include_universal=False) if 'rapidgator.net' in resolver.domains]
except:
debrid_resolvers = []
def status():
return debrid_resolvers != []
def resolver(url, debrid):
try:
debrid_resolver = [resolver for resolver in debrid_resolvers if resolver.name == debrid][0]
debrid_resolver.login()
_host, _media_id = debrid_resolver.get_host_and_id(url)
stream_url = debrid_resolver.get_media_url(_host, _media_id)
return stream_url
except Exception as e:
log_utils.log('%s Resolve Failure: %s' % (debrid, e), log_utils.LOGWARNING)
return None
|
kmod/icbd | refs/heads/master | stdlib/python2.5/curses/panel.py | 77 | """curses.panel
Module for using panels with curses.
"""
__revision__ = "$Id: panel.py 36560 2004-07-18 06:16:08Z tim_one $"
from _curses_panel import *
|
huoxudong125/dlr | refs/heads/master | Samples/Hosting/Scenarios/merlin_web_page_code_behind.py | 3 | def OnLoad():
print "page loaded"
def Render():
return "query page elements: " + element1 + " " + element2
|
camilonova/django | refs/heads/master | django/contrib/gis/db/backends/base/operations.py | 7 | class BaseSpatialOperations:
truncate_params = {}
# Quick booleans for the type of this spatial backend, and
# an attribute for the spatial database version tuple (if applicable)
postgis = False
spatialite = False
mysql = False
oracle = False
spatial_version = None
# How the geometry column should be selected.
select = None
# Does the spatial database have a geometry or geography type?
geography = False
geometry = False
area = False
bounding_circle = False
centroid = False
difference = False
distance = False
distance_sphere = False
distance_spheroid = False
envelope = False
force_rhr = False
mem_size = False
num_geom = False
num_points = False
perimeter = False
perimeter3d = False
point_on_surface = False
polygonize = False
reverse = False
scale = False
snap_to_grid = False
sym_difference = False
transform = False
translate = False
union = False
# Aggregates
disallowed_aggregates = ()
geom_func_prefix = ''
# Mapping between Django function names and backend names, when names do not
# match; used in spatial_function_name().
function_names = {}
# Blacklist/set of known unsupported functions of the backend
unsupported_functions = {
'Area', 'AsGeoJSON', 'AsGML', 'AsKML', 'AsSVG',
'BoundingCircle', 'Centroid', 'Difference', 'Distance', 'Envelope',
'ForceRHR', 'GeoHash', 'Intersection', 'IsValid', 'Length', 'MakeValid',
'MemSize', 'NumGeometries', 'NumPoints', 'Perimeter', 'PointOnSurface',
'Reverse', 'Scale', 'SnapToGrid', 'SymDifference', 'Transform',
'Translate', 'Union',
}
# Serialization
geohash = False
geojson = False
gml = False
kml = False
svg = False
# Constructors
from_text = False
from_wkb = False
# Default conversion functions for aggregates; will be overridden if implemented
# for the spatial backend.
def convert_extent(self, box, srid):
raise NotImplementedError('Aggregate extent not implemented for this spatial backend.')
def convert_extent3d(self, box, srid):
raise NotImplementedError('Aggregate 3D extent not implemented for this spatial backend.')
# For quoting column values, rather than columns.
def geo_quote_name(self, name):
return "'%s'" % name
# GeometryField operations
def geo_db_type(self, f):
"""
Return the database column type for the geometry field on
the spatial backend.
"""
raise NotImplementedError('subclasses of BaseSpatialOperations must provide a geo_db_type() method')
def get_distance(self, f, value, lookup_type):
"""
Return the distance parameters for the given geometry field,
lookup value, and lookup type.
"""
raise NotImplementedError('Distance operations not available on this spatial backend.')
def get_geom_placeholder(self, f, value, compiler):
"""
Return the placeholder for the given geometry field with the given
value. Depending on the spatial backend, the placeholder may contain a
stored procedure call to the transformation function of the spatial
backend.
"""
raise NotImplementedError('subclasses of BaseSpatialOperations must provide a geo_db_placeholder() method')
def check_expression_support(self, expression):
if isinstance(expression, self.disallowed_aggregates):
raise NotImplementedError(
"%s spatial aggregation is not supported by this database backend." % expression.name
)
super().check_expression_support(expression)
def spatial_aggregate_name(self, agg_name):
raise NotImplementedError('Aggregate support not implemented for this spatial backend.')
def spatial_function_name(self, func_name):
if func_name in self.unsupported_functions:
raise NotImplementedError("This backend doesn't support the %s function." % func_name)
return self.function_names.get(func_name, self.geom_func_prefix + func_name)
# Routines for getting the OGC-compliant models.
def geometry_columns(self):
raise NotImplementedError('Subclasses of BaseSpatialOperations must provide a geometry_columns() method.')
def spatial_ref_sys(self):
raise NotImplementedError('subclasses of BaseSpatialOperations must a provide spatial_ref_sys() method')
|
bhupennewalkar1337/erpnext | refs/heads/develop | erpnext/hr/doctype/employee_leave_approver/__init__.py | 12133432 | |
benjaminrigaud/django | refs/heads/master | tests/test_discovery_sample/__init__.py | 12133432 | |
myfreecomm/python-passaporte-web | refs/heads/master | passaporte_web/__init__.py | 12133432 | |
wkfwkf/statsmodels | refs/heads/master | docs/fix_longtable.py | 43 | #!/usr/bin/env python
import sys
import os
BUILDDIR = sys.argv[-1]
read_file_path = os.path.join(BUILDDIR,'latex','statsmodels.tex')
write_file_path = os.path.join(BUILDDIR, 'latex','statsmodels_tmp.tex')
read_file = open(read_file_path,'r')
write_file = open(write_file_path, 'w')
for line in read_file:
if 'longtable}{LL' in line:
line = line.replace('longtable}{LL', 'longtable}{|l|l|')
write_file.write(line)
read_file.close()
write_file.close()
os.remove(read_file_path)
os.rename(write_file_path, read_file_path)
|
vortex-ape/scikit-learn | refs/heads/master | examples/linear_model/plot_sgd_comparison.py | 29 | """
==================================
Comparing various online solvers
==================================
An example showing how different online solvers perform
on the hand-written digits dataset.
"""
# Author: Rob Zinkov <rob at zinkov dot com>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.linear_model import SGDClassifier, Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import LogisticRegression
heldout = [0.95, 0.90, 0.75, 0.50, 0.01]
rounds = 20
digits = datasets.load_digits()
X, y = digits.data, digits.target
classifiers = [
("SGD", SGDClassifier(max_iter=100)),
("ASGD", SGDClassifier(average=True, max_iter=100)),
("Perceptron", Perceptron(tol=1e-3)),
("Passive-Aggressive I", PassiveAggressiveClassifier(loss='hinge',
C=1.0, tol=1e-4)),
("Passive-Aggressive II", PassiveAggressiveClassifier(loss='squared_hinge',
C=1.0, tol=1e-4)),
("SAG", LogisticRegression(solver='sag', tol=1e-1, C=1.e4 / X.shape[0]))
]
xx = 1. - np.array(heldout)
for name, clf in classifiers:
print("training %s" % name)
rng = np.random.RandomState(42)
yy = []
for i in heldout:
yy_ = []
for r in range(rounds):
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=i, random_state=rng)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
yy_.append(1 - np.mean(y_pred == y_test))
yy.append(np.mean(yy_))
plt.plot(xx, yy, label=name)
plt.legend(loc="upper right")
plt.xlabel("Proportion train")
plt.ylabel("Test Error Rate")
plt.show()
|
msebire/intellij-community | refs/heads/master | python/testData/refactoring/move/function/before/src/a.py | 83 | from lib1 import urlopen
def f(url):
'''Return the representation available at the URL.
'''
return urlopen(url).read()
def f_usage():
return f(14)
class C(object):
def g(self, x):
return x
class D(C):
def g(self, x, y):
return super(D, self).f(x) + y
class E(object):
def g(self):
return -1 |
anbangleo/NlsdeWeb | refs/heads/master | api/achievements/hackolantern/hackolantern.py | 11 | import datetime
def process(api, data):
date = datetime.datetime.now() + datetime.timedelta(hours=-5)
return date.day == 31 and date.month == 10, {}
|
TheGentlemanOctopus/thegentlemanoctopus | refs/heads/master | octopus_code/core/octopus/__init__.py | 12133432 | |
damdam-s/OCB | refs/heads/8.0 | addons/web_kanban/__openerp__.py | 423 | {
'name': 'Base Kanban',
'category': 'Hidden',
'description': """
OpenERP Web kanban view.
========================
""",
'version': '2.0',
'depends': ['web'],
'data' : [
'views/web_kanban.xml',
],
'qweb' : [
'static/src/xml/*.xml',
],
'auto_install': True
}
|
open-synergy/opnsynid-hr | refs/heads/8.0 | hr_employee_training_allowance/models/hr_training_category.py | 1 | # -*- coding: utf-8 -*-
# Copyright 2018 OpenSynergy Indonesia
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from openerp import models, fields
class HrTrainingCategory(models.Model):
_inherit = "hr.training_category"
allowance_pricelist_id = fields.Many2one(
string="Training Allowance Pricelist",
comodel_name="product.pricelist",
company_dependent=True,
)
|
wrxtasy/xbmc | refs/heads/master | tools/EventClients/lib/python/ps3/sixwatch.py | 44 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import pyudev
import sixpair
import threading
vendor = 0x054c
product = 0x0268
def main(mac):
context = pyudev.Context()
monitor = pyudev.Monitor.from_netlink(context)
monitor.filter_by(subsystem="usb")
for action, device in monitor:
if 'ID_VENDOR_ID' in device and 'ID_MODEL_ID' in device:
if device['ID_VENDOR_ID'] == '054c' and device['ID_MODEL_ID'] == '0268':
if action == 'add':
print("Detected sixaxis connected by usb")
try:
sixpair.set_pair_filename(device.attributes['busnum'], device.attributes['devnum'], mac)
except Exception as e:
print("Failed to check pairing of sixaxis: " + str(e))
pass
if __name__=="__main__":
main((0,0,0,0,0,0))
|
20tab/pybulletphysics | refs/heads/master | tests/__init__.py | 12133432 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.