repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
qrkourier/ansible
|
refs/heads/devel
|
lib/ansible/plugins/cliconf/aruba.py
|
43
|
#
# (c) 2017 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
import json
from itertools import chain
from ansible.module_utils._text import to_bytes, to_text
from ansible.module_utils.network_common import to_list
from ansible.plugins.cliconf import CliconfBase, enable_mode
class Cliconf(CliconfBase):
def get_device_info(self):
device_info = {}
device_info['network_os'] = 'aruba'
reply = self.get(b'show version')
data = to_text(reply, errors='surrogate_or_strict').strip()
match = re.search(r'Version (\S+)', data)
if match:
device_info['network_os_version'] = match.group(1)
match = re.search(r'^MODEL: (\S+)\),', data, re.M)
if match:
device_info['network_os_model'] = match.group(1)
reply = self.get(b'show hostname')
data = to_text(reply, errors='surrogate_or_strict').strip()
match = re.search(r'^Hostname is (.+)', data, re.M)
if match:
device_info['network_os_hostname'] = match.group(1)
return device_info
@enable_mode
def get_config(self, source='running'):
if source not in ('running', 'startup'):
return self.invalid_params("fetching configuration from %s is not supported" % source)
if source == 'running':
cmd = b'show running-config all'
else:
cmd = b'show startup-config'
return self.send_command(cmd)
@enable_mode
def edit_config(self, command):
for cmd in chain([b'configure terminal'], to_list(command), [b'end']):
self.send_command(cmd)
def get(self, *args, **kwargs):
return self.send_command(*args, **kwargs)
def get_capabilities(self):
result = {}
result['rpc'] = self.get_base_rpc()
result['network_api'] = 'cliconf'
result['device_info'] = self.get_device_info()
return json.dumps(result)
|
MakMukhi/grpc
|
refs/heads/master
|
src/python/grpcio_tests/tests/unit/framework/interfaces/face/_future_invocation_asynchronous_event_service.py
|
23
|
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Test code for the Face layer of RPC Framework."""
from __future__ import division
import abc
import contextlib
import itertools
import threading
import unittest
from concurrent import futures
import six
# test_interfaces is referenced from specification in this module.
from grpc.framework.foundation import future
from grpc.framework.foundation import logging_pool
from grpc.framework.interfaces.face import face
from tests.unit.framework.common import test_constants
from tests.unit.framework.common import test_control
from tests.unit.framework.common import test_coverage
from tests.unit.framework.interfaces.face import _3069_test_constant
from tests.unit.framework.interfaces.face import _digest
from tests.unit.framework.interfaces.face import _stock_service
from tests.unit.framework.interfaces.face import test_interfaces # pylint: disable=unused-import
class _PauseableIterator(object):
def __init__(self, upstream):
self._upstream = upstream
self._condition = threading.Condition()
self._paused = False
@contextlib.contextmanager
def pause(self):
with self._condition:
self._paused = True
yield
with self._condition:
self._paused = False
self._condition.notify_all()
def __iter__(self):
return self
def __next__(self):
return self.next()
def next(self):
with self._condition:
while self._paused:
self._condition.wait()
return next(self._upstream)
class _Callback(object):
def __init__(self):
self._condition = threading.Condition()
self._called = False
self._passed_future = None
self._passed_other_stuff = None
def __call__(self, *args, **kwargs):
with self._condition:
self._called = True
if args:
self._passed_future = args[0]
if 1 < len(args) or kwargs:
self._passed_other_stuff = tuple(args[1:]), dict(kwargs)
self._condition.notify_all()
def future(self):
with self._condition:
while True:
if self._passed_other_stuff is not None:
raise ValueError(
'Test callback passed unexpected values: %s',
self._passed_other_stuff)
elif self._called:
return self._passed_future
else:
self._condition.wait()
class TestCase(
six.with_metaclass(abc.ABCMeta, test_coverage.Coverage,
unittest.TestCase)):
"""A test of the Face layer of RPC Framework.
Concrete subclasses must have an "implementation" attribute of type
test_interfaces.Implementation and an "invoker_constructor" attribute of type
_invocation.InvokerConstructor.
"""
NAME = 'FutureInvocationAsynchronousEventServiceTest'
def setUp(self):
"""See unittest.TestCase.setUp for full specification.
Overriding implementations must call this implementation.
"""
self._control = test_control.PauseFailControl()
self._digest_pool = logging_pool.pool(test_constants.POOL_SIZE)
self._digest = _digest.digest(_stock_service.STOCK_TEST_SERVICE,
self._control, self._digest_pool)
generic_stub, dynamic_stubs, self._memo = self.implementation.instantiate(
self._digest.methods, self._digest.event_method_implementations,
None)
self._invoker = self.invoker_constructor.construct_invoker(
generic_stub, dynamic_stubs, self._digest.methods)
def tearDown(self):
"""See unittest.TestCase.tearDown for full specification.
Overriding implementations must call this implementation.
"""
self._invoker = None
self.implementation.destantiate(self._memo)
self._digest_pool.shutdown(wait=True)
def testSuccessfulUnaryRequestUnaryResponse(self):
for (group, method), test_messages_sequence in (
six.iteritems(self._digest.unary_unary_messages_sequences)):
for test_messages in test_messages_sequence:
request = test_messages.request()
callback = _Callback()
response_future = self._invoker.future(group, method)(
request, test_constants.LONG_TIMEOUT)
response_future.add_done_callback(callback)
response = response_future.result()
test_messages.verify(request, response, self)
self.assertIs(callback.future(), response_future)
self.assertIsNone(response_future.exception())
self.assertIsNone(response_future.traceback())
def testSuccessfulUnaryRequestStreamResponse(self):
for (group, method), test_messages_sequence in (
six.iteritems(self._digest.unary_stream_messages_sequences)):
for test_messages in test_messages_sequence:
request = test_messages.request()
response_iterator = self._invoker.future(group, method)(
request, test_constants.LONG_TIMEOUT)
responses = list(response_iterator)
test_messages.verify(request, responses, self)
def testSuccessfulStreamRequestUnaryResponse(self):
for (group, method), test_messages_sequence in (
six.iteritems(self._digest.stream_unary_messages_sequences)):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
request_iterator = _PauseableIterator(iter(requests))
callback = _Callback()
# Use of a paused iterator of requests allows us to test that control is
# returned to calling code before the iterator yields any requests.
with request_iterator.pause():
response_future = self._invoker.future(group, method)(
request_iterator, test_constants.LONG_TIMEOUT)
response_future.add_done_callback(callback)
future_passed_to_callback = callback.future()
response = future_passed_to_callback.result()
test_messages.verify(requests, response, self)
self.assertIs(future_passed_to_callback, response_future)
self.assertIsNone(response_future.exception())
self.assertIsNone(response_future.traceback())
def testSuccessfulStreamRequestStreamResponse(self):
for (group, method), test_messages_sequence in (
six.iteritems(self._digest.stream_stream_messages_sequences)):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
request_iterator = _PauseableIterator(iter(requests))
# Use of a paused iterator of requests allows us to test that control is
# returned to calling code before the iterator yields any requests.
with request_iterator.pause():
response_iterator = self._invoker.future(group, method)(
request_iterator, test_constants.LONG_TIMEOUT)
responses = list(response_iterator)
test_messages.verify(requests, responses, self)
def testSequentialInvocations(self):
for (group, method), test_messages_sequence in (
six.iteritems(self._digest.unary_unary_messages_sequences)):
for test_messages in test_messages_sequence:
first_request = test_messages.request()
second_request = test_messages.request()
first_response_future = self._invoker.future(group, method)(
first_request, test_constants.LONG_TIMEOUT)
first_response = first_response_future.result()
test_messages.verify(first_request, first_response, self)
second_response_future = self._invoker.future(group, method)(
second_request, test_constants.LONG_TIMEOUT)
second_response = second_response_future.result()
test_messages.verify(second_request, second_response, self)
def testParallelInvocations(self):
for (group, method), test_messages_sequence in (
six.iteritems(self._digest.unary_unary_messages_sequences)):
for test_messages in test_messages_sequence:
first_request = test_messages.request()
second_request = test_messages.request()
first_response_future = self._invoker.future(group, method)(
first_request, test_constants.LONG_TIMEOUT)
second_response_future = self._invoker.future(group, method)(
second_request, test_constants.LONG_TIMEOUT)
first_response = first_response_future.result()
second_response = second_response_future.result()
test_messages.verify(first_request, first_response, self)
test_messages.verify(second_request, second_response, self)
for (group, method), test_messages_sequence in (
six.iteritems(self._digest.unary_unary_messages_sequences)):
for test_messages in test_messages_sequence:
requests = []
response_futures = []
for _ in range(test_constants.THREAD_CONCURRENCY):
request = test_messages.request()
response_future = self._invoker.future(group, method)(
request, test_constants.LONG_TIMEOUT)
requests.append(request)
response_futures.append(response_future)
responses = [
response_future.result()
for response_future in response_futures
]
for request, response in zip(requests, responses):
test_messages.verify(request, response, self)
def testWaitingForSomeButNotAllParallelInvocations(self):
pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
for (group, method), test_messages_sequence in (
six.iteritems(self._digest.unary_unary_messages_sequences)):
for test_messages in test_messages_sequence:
requests = []
response_futures_to_indices = {}
for index in range(test_constants.THREAD_CONCURRENCY):
request = test_messages.request()
inner_response_future = self._invoker.future(group, method)(
request, test_constants.LONG_TIMEOUT)
outer_response_future = pool.submit(
inner_response_future.result)
requests.append(request)
response_futures_to_indices[outer_response_future] = index
some_completed_response_futures_iterator = itertools.islice(
futures.as_completed(response_futures_to_indices),
test_constants.THREAD_CONCURRENCY // 2)
for response_future in some_completed_response_futures_iterator:
index = response_futures_to_indices[response_future]
test_messages.verify(requests[index],
response_future.result(), self)
pool.shutdown(wait=True)
def testCancelledUnaryRequestUnaryResponse(self):
for (group, method), test_messages_sequence in (
six.iteritems(self._digest.unary_unary_messages_sequences)):
for test_messages in test_messages_sequence:
request = test_messages.request()
callback = _Callback()
with self._control.pause():
response_future = self._invoker.future(group, method)(
request, test_constants.LONG_TIMEOUT)
response_future.add_done_callback(callback)
cancel_method_return_value = response_future.cancel()
self.assertIs(callback.future(), response_future)
self.assertFalse(cancel_method_return_value)
self.assertTrue(response_future.cancelled())
with self.assertRaises(future.CancelledError):
response_future.result()
with self.assertRaises(future.CancelledError):
response_future.exception()
with self.assertRaises(future.CancelledError):
response_future.traceback()
def testCancelledUnaryRequestStreamResponse(self):
for (group, method), test_messages_sequence in (
six.iteritems(self._digest.unary_stream_messages_sequences)):
for test_messages in test_messages_sequence:
request = test_messages.request()
with self._control.pause():
response_iterator = self._invoker.future(group, method)(
request, test_constants.LONG_TIMEOUT)
response_iterator.cancel()
with self.assertRaises(face.CancellationError):
next(response_iterator)
def testCancelledStreamRequestUnaryResponse(self):
for (group, method), test_messages_sequence in (
six.iteritems(self._digest.stream_unary_messages_sequences)):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
callback = _Callback()
with self._control.pause():
response_future = self._invoker.future(group, method)(
iter(requests), test_constants.LONG_TIMEOUT)
response_future.add_done_callback(callback)
cancel_method_return_value = response_future.cancel()
self.assertIs(callback.future(), response_future)
self.assertFalse(cancel_method_return_value)
self.assertTrue(response_future.cancelled())
with self.assertRaises(future.CancelledError):
response_future.result()
with self.assertRaises(future.CancelledError):
response_future.exception()
with self.assertRaises(future.CancelledError):
response_future.traceback()
def testCancelledStreamRequestStreamResponse(self):
for (group, method), test_messages_sequence in (
six.iteritems(self._digest.stream_stream_messages_sequences)):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
with self._control.pause():
response_iterator = self._invoker.future(group, method)(
iter(requests), test_constants.LONG_TIMEOUT)
response_iterator.cancel()
with self.assertRaises(face.CancellationError):
next(response_iterator)
def testExpiredUnaryRequestUnaryResponse(self):
for (group, method), test_messages_sequence in (
six.iteritems(self._digest.unary_unary_messages_sequences)):
for test_messages in test_messages_sequence:
request = test_messages.request()
callback = _Callback()
with self._control.pause():
response_future = self._invoker.future(group, method)(
request, _3069_test_constant.REALLY_SHORT_TIMEOUT)
response_future.add_done_callback(callback)
self.assertIs(callback.future(), response_future)
self.assertIsInstance(response_future.exception(),
face.ExpirationError)
with self.assertRaises(face.ExpirationError):
response_future.result()
self.assertIsInstance(response_future.exception(),
face.AbortionError)
self.assertIsNotNone(response_future.traceback())
def testExpiredUnaryRequestStreamResponse(self):
for (group, method), test_messages_sequence in (
six.iteritems(self._digest.unary_stream_messages_sequences)):
for test_messages in test_messages_sequence:
request = test_messages.request()
with self._control.pause():
response_iterator = self._invoker.future(group, method)(
request, _3069_test_constant.REALLY_SHORT_TIMEOUT)
with self.assertRaises(face.ExpirationError):
list(response_iterator)
def testExpiredStreamRequestUnaryResponse(self):
for (group, method), test_messages_sequence in (
six.iteritems(self._digest.stream_unary_messages_sequences)):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
callback = _Callback()
with self._control.pause():
response_future = self._invoker.future(group, method)(
iter(requests),
_3069_test_constant.REALLY_SHORT_TIMEOUT)
response_future.add_done_callback(callback)
self.assertIs(callback.future(), response_future)
self.assertIsInstance(response_future.exception(),
face.ExpirationError)
with self.assertRaises(face.ExpirationError):
response_future.result()
self.assertIsInstance(response_future.exception(),
face.AbortionError)
self.assertIsNotNone(response_future.traceback())
def testExpiredStreamRequestStreamResponse(self):
for (group, method), test_messages_sequence in (
six.iteritems(self._digest.stream_stream_messages_sequences)):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
with self._control.pause():
response_iterator = self._invoker.future(group, method)(
iter(requests),
_3069_test_constant.REALLY_SHORT_TIMEOUT)
with self.assertRaises(face.ExpirationError):
list(response_iterator)
def testFailedUnaryRequestUnaryResponse(self):
for (group, method), test_messages_sequence in (
six.iteritems(self._digest.unary_unary_messages_sequences)):
for test_messages in test_messages_sequence:
request = test_messages.request()
callback = _Callback()
abortion_callback = _Callback()
with self._control.fail():
response_future = self._invoker.future(group, method)(
request, _3069_test_constant.REALLY_SHORT_TIMEOUT)
response_future.add_done_callback(callback)
response_future.add_abortion_callback(abortion_callback)
self.assertIs(callback.future(), response_future)
# Because the servicer fails outside of the thread from which the
# servicer-side runtime called into it its failure is
# indistinguishable from simply not having called its
# response_callback before the expiration of the RPC.
self.assertIsInstance(response_future.exception(),
face.ExpirationError)
with self.assertRaises(face.ExpirationError):
response_future.result()
self.assertIsNotNone(response_future.traceback())
self.assertIsNotNone(abortion_callback.future())
def testFailedUnaryRequestStreamResponse(self):
for (group, method), test_messages_sequence in (
six.iteritems(self._digest.unary_stream_messages_sequences)):
for test_messages in test_messages_sequence:
request = test_messages.request()
# Because the servicer fails outside of the thread from which the
# servicer-side runtime called into it its failure is indistinguishable
# from simply not having called its response_consumer before the
# expiration of the RPC.
with self._control.fail(), self.assertRaises(
face.ExpirationError):
response_iterator = self._invoker.future(group, method)(
request, _3069_test_constant.REALLY_SHORT_TIMEOUT)
list(response_iterator)
def testFailedStreamRequestUnaryResponse(self):
for (group, method), test_messages_sequence in (
six.iteritems(self._digest.stream_unary_messages_sequences)):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
callback = _Callback()
abortion_callback = _Callback()
with self._control.fail():
response_future = self._invoker.future(group, method)(
iter(requests),
_3069_test_constant.REALLY_SHORT_TIMEOUT)
response_future.add_done_callback(callback)
response_future.add_abortion_callback(abortion_callback)
self.assertIs(callback.future(), response_future)
# Because the servicer fails outside of the thread from which the
# servicer-side runtime called into it its failure is
# indistinguishable from simply not having called its
# response_callback before the expiration of the RPC.
self.assertIsInstance(response_future.exception(),
face.ExpirationError)
with self.assertRaises(face.ExpirationError):
response_future.result()
self.assertIsNotNone(response_future.traceback())
self.assertIsNotNone(abortion_callback.future())
def testFailedStreamRequestStreamResponse(self):
for (group, method), test_messages_sequence in (
six.iteritems(self._digest.stream_stream_messages_sequences)):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
# Because the servicer fails outside of the thread from which the
# servicer-side runtime called into it its failure is indistinguishable
# from simply not having called its response_consumer before the
# expiration of the RPC.
with self._control.fail(), self.assertRaises(
face.ExpirationError):
response_iterator = self._invoker.future(group, method)(
iter(requests),
_3069_test_constant.REALLY_SHORT_TIMEOUT)
list(response_iterator)
|
platux/vlc
|
refs/heads/master
|
extras/misc/stackhandler.py
|
99
|
#!/usr/bin/python
#####################################################################
# DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
# Version 2, December 2004
#
# Copyright (C) 2011-2012 Ludovic Fauvet <etix@videolan.org>
# Jean-Baptiste Kempf <jb@videolan.org>
#
# Everyone is permitted to copy and distribute verbatim or modified
# copies of this license document, and changing it is allowed as long
# as the name is changed.
#
# DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
# TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
#
# 0. You just DO WHAT THE FUCK YOU WANT TO.
#####################################################################
#
# This script can be started in two ways:
# - Without any arguments:
# The script will search for stacktrace in the WORKDIR, process
# them and dispatch them in their respective subdirectories.
# - With a stacktrace as only argument:
# The script will write the output on stdout and exit immediately
# after the stacktrace has been processed.
# The input file will stay in place, untouched.
#
# NOTE: Due to a bug in the mingw32-binutils > 2.19 the section
# .gnu_debuglink in the binary file is trimmed thus preventing
# gdb to find the associated symbols. This script will
# work around this issue and rerun gdb for each dbg file.
#
#####################################################################
VLC_VERSION = "2.0.3"
VLC_BIN = "/home/videolan/vlc/" + VLC_VERSION + "/vlc-" VLC_VERSION + "/vlc.exe"
VLC_BASE_DIR = "/home/videolan/vlc/" + VLC_VERSION + "/vlc-" + VLC_VERSION + "/"
VLC_SYMBOLS_DIR = "/home/videolan/vlc/" + VLC_VERSION + "/symbols-" + VLC_VERSION + "/"
WORKDIR = "/srv/ftp/crashes-win32"
FILE_MATCH = r"^\d{14}$"
FILE_MAX_SIZE = 10000
GDB_CMD = "gdb --exec=%(VLC_BIN)s --symbols=%(VLC_SYMBOLS_DIR)s%(DBG_FILE)s.dbg --batch -x %(BATCH_FILE)s"
EMAIL_TO = "bugreporter -- videolan.org"
EMAIL_FROM = "crashes@crash.videolan.org"
EMAIL_SUBJECT = "[CRASH] New Win32 crash report"
EMAIL_BODY = \
"""
Dear Bug Squasher,
This crash has been reported automatically and might be incomplete and/or broken.
Windows version: %(WIN32_VERSION)s
%(STACKTRACE)s
Truly yours,
a python script.
"""
import os, sys, re, tempfile
import string, shlex, subprocess
import smtplib, datetime, shutil
import traceback
from email.mime.text import MIMEText
def processFile(filename):
print "Processing " + filename
global win32_version
f = open(filename, 'r')
# Read (and repair) the input file
content = "".join(filter(lambda x: x in string.printable, f.read()))
f.close()
if os.path.getsize(filename) < 10:
print("File empty")
os.remove(filename)
return
# Check if VLC version match
if not isValidVersion(content):
print("Invalid VLC version")
moveFile(filename, outdated = True)
return
# Get Windows version
win32_version = getWinVersion(content) or 'unknown'
# Map eip <--> library
mapping = mapLibraries(content)
if not mapping:
print("Stacktrace not found")
os.remove(filename)
return
# Associate all eip to their respective lib
# lib1
# `- 0x6904f020
# - 0x6927d37c
# lib2
# `- 0x7e418734
# - 0x7e418816
# - 0x7e42bf15
sortedEIP,delta_libs = sortEIP(content,mapping)
# Compute the stacktrace using GDB
eipmap = findSymbols(sortedEIP)
# Generate the body of the email
body = genEmailBody(mapping, eipmap, delta_libs)
# Send the email
sendEmail(body)
# Print the output
print(body)
# Finally archive the stacktrace
moveFile(filename, outdated = False)
def isValidVersion(content):
pattern = re.compile(r"^VLC=%s " % VLC_VERSION, re.MULTILINE)
res = pattern.search(content)
return True if res else False
def getWinVersion(content):
pattern = re.compile(r"^OS=(.*)$", re.MULTILINE)
res = pattern.search(content)
if res is not None:
return res.group(1)
return None
def getDiffAddress(content, name):
plugin_name_section = content.find(name)
if plugin_name_section < 0:
return None
begin_index = content.rfind("\n", 0, plugin_name_section) + 1
end_index = content.find("|", begin_index)
tmp_index = name.rfind('plugins\\')
libname = name[tmp_index :].replace("\\", "/")
full_path = VLC_BASE_DIR + libname
if not os.path.isfile(full_path):
return None
cmd = "objdump -p " + full_path + " |grep ImageBase -|cut -f2-"
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True).stdout.read().strip()
diff = int(content[begin_index:end_index], 16) - int(p, 16)
return diff
def mapLibraries(content):
stacktrace_section = content.find("[stacktrace]")
if stacktrace_section < 0:
return None
stacklines = content[stacktrace_section:]
stacklines = stacklines.splitlines()
pattern = re.compile(r"^([0-9a-fA-F]+)\|(.+)$")
mapping = []
for line in stacklines:
m = pattern.match(line)
print(line)
if m is not None:
mapping.append(m.group(1, 2))
if len(mapping) == 0:
return None
return mapping
def sortEIP(content, mapping):
# Merge all EIP mapping to the same library
libs = {}
libs_address = {}
for item in mapping:
# Extract the library name (without the full path)
index = item[1].rfind('\\')
libname = item[1][index + 1:]
# Append the eip to its respective lib
if libname not in libs:
libs[libname] = []
diff = getDiffAddress(content, item[1])
if diff is not None:
libs_address[libname] = diff
else:
libs_address[libname] = 0
libs[libname].append(int(item[0],16) - libs_address[libname])
return libs,libs_address
def findSymbols(sortedEIP):
eipmap = {}
for k, v in sortedEIP.items():
# Create the gdb batchfile
batchfile = tempfile.NamedTemporaryFile(mode="w")
batchfile.write("set print symbol-filename on\n")
# Append all eip for this lib
for eip in v:
batchfile.write('p/a %s\n' % hex(eip))
batchfile.flush()
# Generate the command line
cmd = GDB_CMD % {"VLC_BIN": VLC_BIN, "VLC_SYMBOLS_DIR": VLC_SYMBOLS_DIR, "DBG_FILE": k, "BATCH_FILE": batchfile.name}
args = shlex.split(cmd)
# Start GDB and get result
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# Parse result
gdb_pattern = re.compile(r"^\$\d+ = (.+)$")
cnt = 0
while p.poll() == None:
o = p.stdout.readline()
if o != b'':
o = bytes.decode(o)
m = gdb_pattern.match(o)
if m is not None:
#print("LINE: [%s]" % m.group(1))
eipmap[v[cnt]] = m.group(1)
cnt += 1
batchfile.close()
return eipmap
def genEmailBody(mapping, eipmap, delta_libs):
stacktrace = ""
cnt = 0
for item in mapping:
index = item[1].rfind('\\')
libname = item[1][index + 1:]
print(int(item[0],16), delta_libs[libname])
#print(eipmap)
#print(mapping)
stacktrace += "%d. %s [in %s]\n" % (cnt, eipmap[int(item[0],16)-delta_libs[libname]], item[1])
cnt += 1
stacktrace = stacktrace.rstrip('\n')
return EMAIL_BODY % {"STACKTRACE": stacktrace, "WIN32_VERSION": win32_version}
def sendEmail(body):
msg = MIMEText(body)
msg['Subject'] = EMAIL_SUBJECT
msg['From'] = EMAIL_FROM
msg['To'] = EMAIL_TO
# Send the email
s = smtplib.SMTP()
s.connect("127.0.0.1")
s.sendmail(EMAIL_FROM, [EMAIL_TO], msg.as_string())
s.quit()
def moveFile(filename, outdated = False):
today = datetime.datetime.now().strftime("%Y%m%d")
today_path = "%s/%s" % (WORKDIR, today)
if not os.path.isdir(today_path):
os.mkdir(today_path)
if not outdated:
shutil.move(filename, "%s/%s" % (today_path, os.path.basename(filename)))
else:
outdated_path = "%s/outdated/" % today_path
if not os.path.isdir(outdated_path):
os.mkdir(outdated_path)
shutil.move(filename, "%s/%s" % (outdated_path, os.path.basename(filename)))
### ENTRY POINT ###
batch = len(sys.argv) != 2
if batch:
print("Running in batch mode")
input_files = []
if not batch:
if not os.path.isfile(sys.argv[1]):
exit("file does not exists")
input_files.append(sys.argv[1])
else:
file_pattern = re.compile(FILE_MATCH)
entries = os.listdir(WORKDIR)
for entry in entries:
path_entry = WORKDIR + "/" + entry
if not os.path.isfile(path_entry):
continue
if not file_pattern.match(entry):
print(entry)
os.remove(path_entry)
continue
if os.path.getsize(path_entry) > FILE_MAX_SIZE:
print("%s is too big" % entry)
os.remove(path_entry)
continue
input_files.append(path_entry)
if not len(input_files):
exit("Nothing to process")
# Start processing each file
for input_file in input_files:
try:
processFile(input_file)
except Exception as ex:
print(traceback.format_exc())
|
zero-ui/miniblink49
|
refs/heads/master
|
v8_5_7/testing/gtest/test/gtest_output_test.py
|
363
|
#!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests the text output of Google C++ Testing Framework.
SYNOPSIS
gtest_output_test.py --build_dir=BUILD/DIR --gengolden
# where BUILD/DIR contains the built gtest_output_test_ file.
gtest_output_test.py --gengolden
gtest_output_test.py
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import difflib
import os
import re
import sys
import gtest_test_utils
# The flag for generating the golden file
GENGOLDEN_FLAG = '--gengolden'
CATCH_EXCEPTIONS_ENV_VAR_NAME = 'GTEST_CATCH_EXCEPTIONS'
IS_WINDOWS = os.name == 'nt'
# TODO(vladl@google.com): remove the _lin suffix.
GOLDEN_NAME = 'gtest_output_test_golden_lin.txt'
PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath('gtest_output_test_')
# At least one command we exercise must not have the
# 'internal_skip_environment_and_ad_hoc_tests' argument.
COMMAND_LIST_TESTS = ({}, [PROGRAM_PATH, '--gtest_list_tests'])
COMMAND_WITH_COLOR = ({}, [PROGRAM_PATH, '--gtest_color=yes'])
COMMAND_WITH_TIME = ({}, [PROGRAM_PATH,
'--gtest_print_time',
'internal_skip_environment_and_ad_hoc_tests',
'--gtest_filter=FatalFailureTest.*:LoggingTest.*'])
COMMAND_WITH_DISABLED = (
{}, [PROGRAM_PATH,
'--gtest_also_run_disabled_tests',
'internal_skip_environment_and_ad_hoc_tests',
'--gtest_filter=*DISABLED_*'])
COMMAND_WITH_SHARDING = (
{'GTEST_SHARD_INDEX': '1', 'GTEST_TOTAL_SHARDS': '2'},
[PROGRAM_PATH,
'internal_skip_environment_and_ad_hoc_tests',
'--gtest_filter=PassingTest.*'])
GOLDEN_PATH = os.path.join(gtest_test_utils.GetSourceDir(), GOLDEN_NAME)
def ToUnixLineEnding(s):
"""Changes all Windows/Mac line endings in s to UNIX line endings."""
return s.replace('\r\n', '\n').replace('\r', '\n')
def RemoveLocations(test_output):
"""Removes all file location info from a Google Test program's output.
Args:
test_output: the output of a Google Test program.
Returns:
output with all file location info (in the form of
'DIRECTORY/FILE_NAME:LINE_NUMBER: 'or
'DIRECTORY\\FILE_NAME(LINE_NUMBER): ') replaced by
'FILE_NAME:#: '.
"""
return re.sub(r'.*[/\\](.+)(\:\d+|\(\d+\))\: ', r'\1:#: ', test_output)
def RemoveStackTraceDetails(output):
"""Removes all stack traces from a Google Test program's output."""
# *? means "find the shortest string that matches".
return re.sub(r'Stack trace:(.|\n)*?\n\n',
'Stack trace: (omitted)\n\n', output)
def RemoveStackTraces(output):
"""Removes all traces of stack traces from a Google Test program's output."""
# *? means "find the shortest string that matches".
return re.sub(r'Stack trace:(.|\n)*?\n\n', '', output)
def RemoveTime(output):
"""Removes all time information from a Google Test program's output."""
return re.sub(r'\(\d+ ms', '(? ms', output)
def RemoveTypeInfoDetails(test_output):
"""Removes compiler-specific type info from Google Test program's output.
Args:
test_output: the output of a Google Test program.
Returns:
output with type information normalized to canonical form.
"""
# some compilers output the name of type 'unsigned int' as 'unsigned'
return re.sub(r'unsigned int', 'unsigned', test_output)
def NormalizeToCurrentPlatform(test_output):
"""Normalizes platform specific output details for easier comparison."""
if IS_WINDOWS:
# Removes the color information that is not present on Windows.
test_output = re.sub('\x1b\\[(0;3\d)?m', '', test_output)
# Changes failure message headers into the Windows format.
test_output = re.sub(r': Failure\n', r': error: ', test_output)
# Changes file(line_number) to file:line_number.
test_output = re.sub(r'((\w|\.)+)\((\d+)\):', r'\1:\3:', test_output)
return test_output
def RemoveTestCounts(output):
"""Removes test counts from a Google Test program's output."""
output = re.sub(r'\d+ tests?, listed below',
'? tests, listed below', output)
output = re.sub(r'\d+ FAILED TESTS',
'? FAILED TESTS', output)
output = re.sub(r'\d+ tests? from \d+ test cases?',
'? tests from ? test cases', output)
output = re.sub(r'\d+ tests? from ([a-zA-Z_])',
r'? tests from \1', output)
return re.sub(r'\d+ tests?\.', '? tests.', output)
def RemoveMatchingTests(test_output, pattern):
"""Removes output of specified tests from a Google Test program's output.
This function strips not only the beginning and the end of a test but also
all output in between.
Args:
test_output: A string containing the test output.
pattern: A regex string that matches names of test cases or
tests to remove.
Returns:
Contents of test_output with tests whose names match pattern removed.
"""
test_output = re.sub(
r'.*\[ RUN \] .*%s(.|\n)*?\[( FAILED | OK )\] .*%s.*\n' % (
pattern, pattern),
'',
test_output)
return re.sub(r'.*%s.*\n' % pattern, '', test_output)
def NormalizeOutput(output):
"""Normalizes output (the output of gtest_output_test_.exe)."""
output = ToUnixLineEnding(output)
output = RemoveLocations(output)
output = RemoveStackTraceDetails(output)
output = RemoveTime(output)
return output
def GetShellCommandOutput(env_cmd):
"""Runs a command in a sub-process, and returns its output in a string.
Args:
env_cmd: The shell command. A 2-tuple where element 0 is a dict of extra
environment variables to set, and element 1 is a string with
the command and any flags.
Returns:
A string with the command's combined standard and diagnostic output.
"""
# Spawns cmd in a sub-process, and gets its standard I/O file objects.
# Set and save the environment properly.
environ = os.environ.copy()
environ.update(env_cmd[0])
p = gtest_test_utils.Subprocess(env_cmd[1], env=environ)
return p.output
def GetCommandOutput(env_cmd):
"""Runs a command and returns its output with all file location
info stripped off.
Args:
env_cmd: The shell command. A 2-tuple where element 0 is a dict of extra
environment variables to set, and element 1 is a string with
the command and any flags.
"""
# Disables exception pop-ups on Windows.
environ, cmdline = env_cmd
environ = dict(environ) # Ensures we are modifying a copy.
environ[CATCH_EXCEPTIONS_ENV_VAR_NAME] = '1'
return NormalizeOutput(GetShellCommandOutput((environ, cmdline)))
def GetOutputOfAllCommands():
"""Returns concatenated output from several representative commands."""
return (GetCommandOutput(COMMAND_WITH_COLOR) +
GetCommandOutput(COMMAND_WITH_TIME) +
GetCommandOutput(COMMAND_WITH_DISABLED) +
GetCommandOutput(COMMAND_WITH_SHARDING))
test_list = GetShellCommandOutput(COMMAND_LIST_TESTS)
SUPPORTS_DEATH_TESTS = 'DeathTest' in test_list
SUPPORTS_TYPED_TESTS = 'TypedTest' in test_list
SUPPORTS_THREADS = 'ExpectFailureWithThreadsTest' in test_list
SUPPORTS_STACK_TRACES = False
CAN_GENERATE_GOLDEN_FILE = (SUPPORTS_DEATH_TESTS and
SUPPORTS_TYPED_TESTS and
SUPPORTS_THREADS and
not IS_WINDOWS)
class GTestOutputTest(gtest_test_utils.TestCase):
def RemoveUnsupportedTests(self, test_output):
if not SUPPORTS_DEATH_TESTS:
test_output = RemoveMatchingTests(test_output, 'DeathTest')
if not SUPPORTS_TYPED_TESTS:
test_output = RemoveMatchingTests(test_output, 'TypedTest')
test_output = RemoveMatchingTests(test_output, 'TypedDeathTest')
test_output = RemoveMatchingTests(test_output, 'TypeParamDeathTest')
if not SUPPORTS_THREADS:
test_output = RemoveMatchingTests(test_output,
'ExpectFailureWithThreadsTest')
test_output = RemoveMatchingTests(test_output,
'ScopedFakeTestPartResultReporterTest')
test_output = RemoveMatchingTests(test_output,
'WorksConcurrently')
if not SUPPORTS_STACK_TRACES:
test_output = RemoveStackTraces(test_output)
return test_output
def testOutput(self):
output = GetOutputOfAllCommands()
golden_file = open(GOLDEN_PATH, 'r')
# A mis-configured source control system can cause \r appear in EOL
# sequences when we read the golden file irrespective of an operating
# system used. Therefore, we need to strip those \r's from newlines
# unconditionally.
golden = ToUnixLineEnding(golden_file.read())
golden_file.close()
# We want the test to pass regardless of certain features being
# supported or not.
# We still have to remove type name specifics in all cases.
normalized_actual = RemoveTypeInfoDetails(output)
normalized_golden = RemoveTypeInfoDetails(golden)
if CAN_GENERATE_GOLDEN_FILE:
self.assertEqual(normalized_golden, normalized_actual,
'\n'.join(difflib.unified_diff(
normalized_golden.split('\n'),
normalized_actual.split('\n'),
'golden', 'actual')))
else:
normalized_actual = NormalizeToCurrentPlatform(
RemoveTestCounts(normalized_actual))
normalized_golden = NormalizeToCurrentPlatform(
RemoveTestCounts(self.RemoveUnsupportedTests(normalized_golden)))
# This code is very handy when debugging golden file differences:
if os.getenv('DEBUG_GTEST_OUTPUT_TEST'):
open(os.path.join(
gtest_test_utils.GetSourceDir(),
'_gtest_output_test_normalized_actual.txt'), 'wb').write(
normalized_actual)
open(os.path.join(
gtest_test_utils.GetSourceDir(),
'_gtest_output_test_normalized_golden.txt'), 'wb').write(
normalized_golden)
self.assertEqual(normalized_golden, normalized_actual)
if __name__ == '__main__':
if sys.argv[1:] == [GENGOLDEN_FLAG]:
if CAN_GENERATE_GOLDEN_FILE:
output = GetOutputOfAllCommands()
golden_file = open(GOLDEN_PATH, 'wb')
golden_file.write(output)
golden_file.close()
else:
message = (
"""Unable to write a golden file when compiled in an environment
that does not support all the required features (death tests, typed tests,
and multiple threads). Please generate the golden file using a binary built
with those features enabled.""")
sys.stderr.write(message)
sys.exit(1)
else:
gtest_test_utils.Main()
|
KurtDeGreeff/infernal-twin
|
refs/heads/master
|
build/reportlab/src/rl_addons/rl_accel/tests/t1.py
|
14
|
import time
from reportlab.pdfbase.pdfmetrics import _fonts, findFontAndRegister, _py_getFont
from _rl_accel import getFontU
from getrc import getrc, checkrc
import sys
#fn0 = 'Times-Bold'
#fn1 = 'Times-Roman'
N = 1000000
def tim(N,msg,func,*args):
t0 = time.time()
for i in range(N):
x = func(*args)
t1 = time.time()
return "%s N=%d t=%.3f\n%r" % (msg,N,t1-t0,x)
fn0='Courier'
fn1='Helvetica'
font0=_py_getFont(fn0)
font1=_py_getFont(fn1)
getFontU(fn0)
defns = "font0 font1 fn0 fn1 _fonts"
rcv = getrc(defns)
for i in (0,1,2):
for fn in fn0,fn1:
print(tim(N,'getFontU',getFontU,fn))
print(tim(N,'_py_getFont',_py_getFont,fn))
del fn
print("rc diffs=(%s)" % checkrc(defns,rcv))
|
openstenoproject/plover
|
refs/heads/master
|
plover/oslayer/osxkeyboardlayout.py
|
4
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Author: @abarnert, @willwade, and @morinted
# Code taken and modified from
# <https://github.com/willwade/PyUserInput/blob/master/pykeyboard/mac_keycode.py>
# <https://stackoverflow.com/questions/1918841/how-to-convert-ascii-character-to-cgkeycode>
from threading import Thread
import ctypes
import ctypes.util
import re
import struct
import unicodedata
from PyObjCTools import AppHelper
import AppKit
import Foundation
from plover import log
from plover.key_combo import CHAR_TO_KEYNAME
from plover.misc import popcount_8
carbon_path = ctypes.util.find_library('Carbon')
carbon = ctypes.cdll.LoadLibrary(carbon_path)
CFIndex = ctypes.c_int64
class CFRange(ctypes.Structure):
_fields_ = [('loc', CFIndex),
('len', CFIndex)]
carbon.TISCopyCurrentKeyboardInputSource.argtypes = []
carbon.TISCopyCurrentKeyboardInputSource.restype = ctypes.c_void_p
carbon.TISCopyCurrentASCIICapableKeyboardLayoutInputSource.argtypes = []
carbon.TISCopyCurrentASCIICapableKeyboardLayoutInputSource.restype = ctypes.c_void_p
carbon.TISGetInputSourceProperty.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
carbon.TISGetInputSourceProperty.restype = ctypes.c_void_p
carbon.LMGetKbdType.argtypes = []
carbon.LMGetKbdType.restype = ctypes.c_uint32
carbon.CFDataGetLength.argtypes = [ctypes.c_void_p]
carbon.CFDataGetLength.restype = ctypes.c_uint64
carbon.CFDataGetBytes.argtypes = [ctypes.c_void_p, CFRange, ctypes.c_void_p]
carbon.CFDataGetBytes.restype = None
carbon.CFRelease.argtypes = [ctypes.c_void_p]
carbon.CFRelease.restype = None
kTISPropertyUnicodeKeyLayoutData = ctypes.c_void_p.in_dll(
carbon, 'kTISPropertyUnicodeKeyLayoutData')
kTISPropertyInputSourceID = ctypes.c_void_p.in_dll(
carbon, 'kTISPropertyInputSourceID')
kTISPropertyInputSourceIsASCIICapable = ctypes.c_void_p.in_dll(
carbon, 'kTISPropertyInputSourceIsASCIICapable')
COMMAND = '⌘'
SHIFT = '⇧'
OPTION = '⌥'
CONTROL = '⌃'
CAPS = '⇪'
UNKNOWN = '?'
UNKNOWN_L = '?_L'
KEY_CODE_VISUALIZATION = """
┌───────────────────Layout of Apple Extended Keyboard II───────────────────────┐
│ 53 122 120 99 118 96 97 98 100 101 109 103 111 105 107 113 │
│ │
│ 50─── 18 19 20 21 23 22 26 28 25 29 27 24 ─────51 114 115 116 71 81 75 67 │
│ 48──── 12 13 14 15 17 16 32 34 31 35 33 30 ────42 117 119 121 89 91 92 78 │
│ 57───── 0 1 2 3 5 4 38 40 37 41 39 ──────36 86 87 88 69 │
│ 56────── 6 7 8 9 11 45 46 43 47 44 ───────56 126 83 84 85 76 │
│ 59── 58─ 55─ ──────────49───────── ─55 ──58 ───59 123 125 124 82─── 65 76 │
└──────────────────────────────────────────────────────────────────────────────┘
Valid keycodes not visible on layout:
10, 52, 54, 60-64, 66, 68, 70, 72-74, 77, 79, 80,
90, 93-95, 102, 104, 106, 108, 110, 112, 127
"""
SPECIAL_KEY_NAMES = {
'\x1b': 'Esc', # Will map both Esc and Clear (key codes 53 and 71)
'\xa0': 'nbsp',
'\x08': 'Bksp',
'\x05': 'Help',
'\x01': 'Home',
'\x7f': 'Del',
'\x04': 'End',
'\x0c': 'PgDn',
'\x0b': 'PgUp', # \x0b is also the clear character signal
}
DEFAULT_SEQUENCE = (None, 0),
def is_printable(string):
for character in string:
category = unicodedata.category(character)
if category[0] in 'C':
# Exception: the "Apple" character that most Mac layouts have
return False if string != "" else True
elif category == 'Zs' and character != ' ':
return False
elif category in 'Zl, Zp':
return False
return True
def get_printable_string(s):
if s is None:
return 'None'
return s if is_printable(s) else SPECIAL_KEY_NAMES.setdefault(
s, s.encode('unicode_escape').decode("utf-8")
)
class KeyboardLayout:
def __init__(self, watch_layout=True):
self._char_to_key_sequence = None
self._key_sequence_to_char = None
self._modifier_masks = None
self._deadkey_symbol_to_key_sequence = None
# Spawn a thread that responds to system keyboard layout changes.
if watch_layout:
self._watcher = Thread(
target=self._layout_watcher,
name="LayoutWatcher")
self._watcher.start()
self._update_layout()
def _layout_watcher(self):
layout = self
class LayoutWatchingCallback(AppKit.NSObject):
def layoutChanged_(self, event):
log.info('Mac keyboard layout changed, updating')
layout._update_layout()
center = Foundation.NSDistributedNotificationCenter.defaultCenter()
watcher_callback = LayoutWatchingCallback.new()
center.addObserver_selector_name_object_suspensionBehavior_(
watcher_callback,
'layoutChanged:',
'com.apple.Carbon.TISNotifySelectedKeyboardInputSourceChanged',
None,
Foundation.NSNotificationSuspensionBehaviorDeliverImmediately
)
AppHelper.runConsoleEventLoop(installInterrupt=True)
def _update_layout(self):
char_to_key_sequence, key_sequence_to_char, modifier_masks = KeyboardLayout._get_layout()
self._char_to_key_sequence = char_to_key_sequence
self._key_sequence_to_char = key_sequence_to_char
self._modifier_masks = modifier_masks
self._deadkey_symbol_to_key_sequence = self._deadkeys_by_symbols()
def deadkey_symbol_to_key_sequence(self, symbol):
return self._deadkey_symbol_to_key_sequence.get(symbol, DEFAULT_SEQUENCE)
def key_code_to_char(self, key_code, modifier=0):
"""Provide a key code and a modifier and it provides the character"""
return get_printable_string(
self._key_sequence_to_char[key_code, modifier]
)
def char_to_key_sequence(self, char):
"""Finds the key code and modifier sequence for the character"""
key_sequence = self._char_to_key_sequence.get(char, DEFAULT_SEQUENCE)
if not isinstance(key_sequence[0], tuple):
key_sequence = key_sequence,
return key_sequence
def _deadkeys_by_symbols(self):
# We store deadkeys as "characters"; dkX, where X is the symbol.
symbols = {
'`': '`',
'´': '´',
'^': ('^', 'ˆ'),
'~': ('~', '˜'),
'¨': '¨',
}
deadkeys_by_symbol = {}
for symbol, equivalent_symbols in symbols.items():
for equivalent_symbol in equivalent_symbols:
sequence = self.char_to_key_sequence('dk%s' % equivalent_symbol)
if sequence[0][0] is not None:
deadkeys_by_symbol[symbol] = sequence
return deadkeys_by_symbol
def format_modifier_header(self):
modifiers = (
'| {}\t'.format(KeyboardLayout._modifier_string(mod)).expandtabs(8)
for mod in sorted(self._modifier_masks.values())
)
header = 'Keycode\t{}'.format(''.join(modifiers))
return '%s\n%s' % (header, re.sub(r'[^|]', '-', header))
def format_keycode_keys(self, keycode):
"""Returns all the variations of the keycode with modifiers"""
keys = ('| {}\t'.format(get_printable_string(
self._key_sequence_to_char[keycode, mod])).expandtabs(8)
for mod in sorted(self._modifier_masks.values()))
return '{}\t{}'.format(keycode, ''.join(keys)).expandtabs(8)
@staticmethod
def _modifier_dictionary(modifier_mask):
"""Provide a dictionary of active modifier keys from mod mask"""
modifiers = {
SHIFT: False,
COMMAND: False,
CONTROL: False,
OPTION: False,
CAPS: False,
UNKNOWN: False,
UNKNOWN_L: False
}
if modifier_mask & 16:
modifiers[CONTROL] = True
if modifier_mask & 8:
modifiers[OPTION] = True
if modifier_mask & 4:
modifiers[CAPS] = True
if modifier_mask & 2:
modifiers[SHIFT] = True
if modifier_mask & 1:
modifiers[COMMAND] = True
return modifiers
@staticmethod
def _modifier_string(modifier_mask):
"""Turn modifier mask into string representing modifiers"""
s = ''
modifiers = KeyboardLayout._modifier_dictionary(modifier_mask)
for key in modifiers:
s += key if modifiers[key] else ''
return s
@staticmethod
def _get_layout():
keyboard_input_source = carbon.TISCopyCurrentKeyboardInputSource()
layout_source = carbon.TISGetInputSourceProperty(
keyboard_input_source, kTISPropertyUnicodeKeyLayoutData
)
# Some keyboard layouts don't return UnicodeKeyLayoutData so we turn to a different source.
if layout_source is None:
carbon.CFRelease(keyboard_input_source)
keyboard_input_source = carbon.TISCopyCurrentASCIICapableKeyboardLayoutInputSource()
layout_source = carbon.TISGetInputSourceProperty(
keyboard_input_source, kTISPropertyUnicodeKeyLayoutData
)
layout_size = carbon.CFDataGetLength(layout_source)
layout_buffer = ctypes.create_string_buffer(b'\000' * layout_size)
carbon.CFDataGetBytes(
layout_source, CFRange(0, layout_size), ctypes.byref(layout_buffer)
)
keyboard_type = carbon.LMGetKbdType()
parsed_layout = KeyboardLayout._parse_layout(
layout_buffer, keyboard_type
)
carbon.CFRelease(keyboard_input_source)
return parsed_layout
@staticmethod
def _parse_layout(buf, ktype):
hf, dv, featureinfo, ktcount = struct.unpack_from('HHII', buf)
offset = struct.calcsize('HHII')
ktsize = struct.calcsize('IIIIIII')
kts = [struct.unpack_from('IIIIIII', buf, offset + ktsize * i)
for i in range(ktcount)]
for i, kt in enumerate(kts):
if kt[0] <= ktype <= kt[1]:
kentry = i
break
else:
kentry = 0
ktf, ktl, modoff, charoff, sroff, stoff, seqoff = kts[kentry]
# Modifiers
mf, deftable, mcount = struct.unpack_from('HHI', buf, modoff)
modtableoff = modoff + struct.calcsize('HHI')
modtables = struct.unpack_from('B' * mcount, buf, modtableoff)
modifier_masks = {}
for i, table in enumerate(modtables):
modifier_masks.setdefault(table, i)
# Sequences
sequences = []
if seqoff:
sf, scount = struct.unpack_from('HH', buf, seqoff)
seqtableoff = seqoff + struct.calcsize('HH')
lastoff = -1
for soff in struct.unpack_from('H' * scount, buf, seqtableoff):
if lastoff >= 0:
sequences.append(
buf[seqoff + lastoff:seqoff + soff].decode('utf-16'))
lastoff = soff
def lookupseq(key):
if key >= 0xFFFE:
return None
if key & 0xC000:
seq = key & ~0xC000
if seq < len(sequences):
return sequences[seq]
return chr(key)
# Dead keys
deadkeys = []
if sroff:
srf, srcount = struct.unpack_from('HH', buf, sroff)
srtableoff = sroff + struct.calcsize('HH')
for recoff in struct.unpack_from('I' * srcount, buf, srtableoff):
cdata, nextstate, ecount, eformat = struct.unpack_from('HHHH',
buf,
recoff)
recdataoff = recoff + struct.calcsize('HHHH')
edata = buf[recdataoff:recdataoff + 4 * ecount]
deadkeys.append((cdata, nextstate, ecount, eformat, edata))
if stoff:
stf, stcount = struct.unpack_from('HH', buf, stoff)
sttableoff = stoff + struct.calcsize('HH')
dkterms = struct.unpack_from('H' * stcount, buf, sttableoff)
else:
dkterms = []
# Get char tables
char_to_key_sequence = {}
key_sequence_to_char = {}
deadkey_state_to_key_sequence = {}
key_sequence_to_deadkey_state = {}
def favored_modifiers(modifier_a, modifier_b):
"""0 if they are equal, 1 if a is better, -1 if b is better"""
a_favored_over_b = 0
modifiers_a = KeyboardLayout._modifier_dictionary(modifier_a)
modifiers_b = KeyboardLayout._modifier_dictionary(modifier_b)
count_a = popcount_8(modifier_a)
count_b = popcount_8(modifier_b)
# Favor no CAPS modifier
if modifiers_a[CAPS] and not modifiers_b[CAPS]:
a_favored_over_b = -1
elif modifiers_b[CAPS] and not modifiers_a[CAPS]:
a_favored_over_b = 1
# Favor no command modifier
elif modifiers_a[COMMAND] and not modifiers_b[COMMAND]:
a_favored_over_b = -1
elif modifiers_b[COMMAND] and not modifiers_a[COMMAND]:
a_favored_over_b = 1
# Favor no control modifier
elif modifiers_a[CONTROL] and not modifiers_b[CONTROL]:
a_favored_over_b = -1
elif modifiers_b[CONTROL] and not modifiers_a[CONTROL]:
a_favored_over_b = 1
# Finally, favor fewer modifiers
elif count_a > count_b:
a_favored_over_b = -1
elif count_b > count_a:
a_favored_over_b = 1
return a_favored_over_b
def save_shortest_key_sequence(character, new_sequence):
current_sequence = char_to_key_sequence.setdefault(character,
new_sequence)
if current_sequence != new_sequence:
# Convert responses to tuples...
if not isinstance(current_sequence[0], tuple):
current_sequence = current_sequence,
if not isinstance(new_sequence[0], tuple):
new_sequence = new_sequence,
first_current_better = favored_modifiers(
current_sequence[-1][1], new_sequence[-1][1]
)
last_current_better = favored_modifiers(
current_sequence[0][1], new_sequence[0][1]
)
# Favor key sequence with best modifiers (avoids a short sequence with awful modifiers)
# e.g. ABC Extended wants ¯ from (⌘⌥a,) instead of the saner (⌥a, space)
if first_current_better == last_current_better != 0:
if first_current_better < 0:
char_to_key_sequence[character] = new_sequence
# Favor shortest sequence (fewer separate key presses)
elif len(current_sequence) < len(new_sequence):
pass
elif len(new_sequence) < len(current_sequence):
char_to_key_sequence[character] = new_sequence[0]
# Favor fewer modifiers on last item
elif last_current_better < 0:
char_to_key_sequence[character] = new_sequence
# Favor lower modifiers on first item if last item is the same
elif last_current_better == 0 and first_current_better < 0:
char_to_key_sequence[character] = new_sequence
def lookup_and_add(key, j, mod):
ch = lookupseq(key)
save_shortest_key_sequence(ch, (j, mod))
key_sequence_to_char[j, mod] = ch
cf, csize, ccount = struct.unpack_from('HHI', buf, charoff)
chartableoff = charoff + struct.calcsize('HHI')
for i, table_offset in enumerate(
struct.unpack_from('I' * ccount, buf, chartableoff)):
mod = modifier_masks[i]
for j, key in enumerate(
struct.unpack_from('H' * csize, buf, table_offset)):
if key == 65535:
key_sequence_to_char[j, mod] = 'mod'
elif key >= 0xFFFE:
key_sequence_to_char[j, mod] = '<{}>'.format(key)
elif key & 0x0C000 == 0x4000:
dead = key & ~0xC000
if dead < len(deadkeys):
cdata, nextstate, ecount, eformat, edata = deadkeys[dead]
if eformat == 0 and nextstate:
# Initial: symbols, e.g. `, ~, ^
new_deadkey = (j, mod)
current_deadkey = deadkey_state_to_key_sequence.setdefault(
nextstate, new_deadkey
)
if new_deadkey != current_deadkey:
if favored_modifiers(current_deadkey[1],
new_deadkey[1]) < 0:
deadkey_state_to_key_sequence[nextstate] = new_deadkey
if nextstate - 1 < len(dkterms):
base_key = lookupseq(dkterms[nextstate - 1])
dead_key_name = 'dk{}'.format(base_key)
else:
dead_key_name = 'dk#{}'.format(nextstate)
key_sequence_to_char[j, mod] = dead_key_name
save_shortest_key_sequence(dead_key_name, (j, mod))
elif eformat == 1 or (eformat == 0 and not nextstate):
# Terminal: letters, e.g. a, e, o, A, E, O
key_sequence_to_deadkey_state[j, mod] = deadkeys[dead]
lookup_and_add(cdata, j, mod)
elif eformat == 2: # range
# TODO!
pass
else:
lookup_and_add(key, j, mod)
for key, dead in key_sequence_to_deadkey_state.items():
j, mod = key
cdata, nextstate, ecount, eformat, edata = dead
entries = [struct.unpack_from('HH', edata, i * 4) for i in
range(ecount)]
for state, key in entries:
# Ignore if unknown state...
if state in deadkey_state_to_key_sequence:
dj, dmod = deadkey_state_to_key_sequence[state]
ch = lookupseq(key)
save_shortest_key_sequence(ch, ((dj, dmod), (j, mod)))
key_sequence_to_char[(dj, dmod), (j, mod)] = ch
char_to_key_sequence['\n'] = (36, 0)
char_to_key_sequence['\r'] = (36, 0)
char_to_key_sequence['\t'] = (48, 0)
return char_to_key_sequence, key_sequence_to_char, modifier_masks
if __name__ == '__main__':
layout = KeyboardLayout(False)
print(KEY_CODE_VISUALIZATION)
print()
print(layout.format_modifier_header())
for keycode in range(127):
print(layout.format_keycode_keys(keycode))
print()
unmapped_characters = []
for char, keyname in sorted(CHAR_TO_KEYNAME.items()):
sequence = []
for code, mod in layout.char_to_key_sequence(char):
if code is not None:
sequence.append(
(code, '{}{}'.format(
layout._modifier_string(mod),
layout.key_code_to_char(code, 0)
))
)
else:
unmapped_characters.append(char)
if sequence:
print('Name:\t\t{}\nCharacter:\t{}\nSequence:\t‘{}’\nBase codes:\t‘{}’\n'.format(
keyname, char, '’, ‘'.join(combo[1] for combo in sequence),
'’, ‘'.join(str(combo[0]) for combo in sequence)
))
print('No mapping on this layout for characters: ‘{}’'.format(
'’, ‘'.join(unmapped_characters)
))
|
RGreinacher/bachelor-thesis
|
refs/heads/master
|
Apps/Vorverarbeitung/create_annotated_corpus.py
|
1
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# import python libs
import re
import json
import argparse
import json
import random
import math
import os
import copy
import sys
from os import listdir
from os.path import isfile, join
from pprint import pprint as pp
# import project libs
sys.path.append('../Auswertung')
import compare_annotations
# defining globals & constants
SUBJECTS_TABEL_JSON = '/Users/rg/Nextcloud/Uni/Bachelorarbeit/Apps/Vorverarbeitung/subjects_tabel.json'
QUESTIONNAIRE_DOCUMENT_JSON = '/Users/rg/Nextcloud/Uni/Bachelorarbeit/Apps/Vorverarbeitung/questionnaire_document.json'
# PLAIN_CORPUS_FILES = '/Users/rg/Nextcloud/Uni/Bachelorarbeit/Apps/Auswertung/test_text/raw/'
# GOLD_ANNOTATED_CORPUS_FILES = '/Users/rg/Nextcloud/Uni/Bachelorarbeit/Apps/Auswertung/test_text/gold-annotiert/'
PLAIN_CORPUS_FILES = '/Users/rg/Nextcloud/Uni/Bachelorarbeit/Korpora/Implisense/json/'
GOLD_ANNOTATED_CORPUS_FILES = '/Users/rg/Nextcloud/Uni/Bachelorarbeit/Korpora/Implisense/json gold/'
SUBJECT_CORPUS_FOLDER = '/Users/rg/Nextcloud/Uni/Bachelorarbeit/Korpora/VP vorbereitet'
CORPUS_SUFFIX = 'preannotated_subject_corpus_document'
# methods
def read_subject_table():
def flatten_annotations_per_block(blocks_per_subject):
# return a list of paragraphs for each subject
subjects = []
for blocks_for_subject in blocks_per_subject:
paragraphs = []
for block in blocks_for_subject:
for paragraph in block:
paragraphs.append(paragraph)
subjects.append(paragraphs)
return subjects
file_handler = open(SUBJECTS_TABEL_JSON, 'r', encoding='utf-8')
raw_content = file_handler.read()
file_handler.close()
blocks = json.JSONDecoder().decode(raw_content)
return flatten_annotations_per_block(blocks)
def read_questionnaire_document():
file_handler = open(QUESTIONNAIRE_DOCUMENT_JSON, 'r', encoding='utf-8')
raw_content = file_handler.read()
file_handler.close()
return json.JSONDecoder().decode(raw_content)
def read_corpus_files(path):
def clean_unrecognized_labels(corpus):
for document in corpus:
for paragraph in document['data']:
for sentence in paragraph:
for token in sentence:
if 'annotation' in token and token['annotation']['label'] not in LABELS:
del token['annotation']
corpus = []
for file_name in sorted(listdir(path)):
if not (isfile(join(path, file_name)) and file_name.endswith('.json')): continue
file_handler = open(path + file_name, 'r', encoding='utf-8')
raw_content = file_handler.read()
file_handler.close()
deconded_content = json.JSONDecoder().decode(raw_content)
corpus.append(deconded_content)
# clean_unrecognized_labels(corpus)
return corpus
def create_annotated_corpus(subject_corpus, gold_annotated_corpus, subject_annotation_classes):
total_paragraph_index = -1
for document_index, document in enumerate(gold_annotated_corpus):
for paragraph_index, paragraph in enumerate(document['data']):
total_paragraph_index += 1
for sentence_index, sentence in enumerate(paragraph):
annotation_classes_of_sentence = subject_annotation_classes[total_paragraph_index][sentence_index]
if len(annotation_classes_of_sentence) == 0: continue
annotation_number = 0
for token_index, token in enumerate(sentence):
next_annotation_class = annotation_classes_of_sentence[annotation_number]
subject_sentence = subject_corpus[document_index]['data'][paragraph_index][sentence_index]
# look ahead annotation for class 4 annotations
if next_annotation_class == 4:
(new_subject_sentence, _) = unnecessary_annotation(subject_sentence, token_index, sentence)
# skip if token not annotated
elif not ('annotation' in token):
continue
# if current token is annotated, apply the specificated annotation class
else:
manipulator = ANNOTATION_CLASSES[next_annotation_class]
(new_subject_sentence, _) = manipulator(subject_sentence, token_index, sentence)
subject_corpus[document_index]['data'][paragraph_index][sentence_index] = new_subject_sentence
annotation_number += 1
if annotation_number >= len(annotation_classes_of_sentence): break
return subject_corpus
# annotation manipulation
def manipulate_span(sentence, token_index, reference_sentence):
length = reference_sentence[token_index]['annotation']['length']
debug_length = reference_sentence[token_index]['annotation']['length']
new_start_index = token_index
index_offset = 0
possible_offsets_reference = possible_start_index_offsets(reference_sentence, token_index)
possible_offsets_sentence = possible_start_index_offsets(sentence, token_index)
possible_offsets = list(set(possible_offsets_reference).intersection(possible_offsets_sentence))
if not (len(possible_offsets) == 0):
index_offset = random.choice(possible_offsets)
new_start_index += index_offset
sentence[new_start_index]['annotation'] = copy.deepcopy(reference_sentence[token_index]['annotation'])
# chunk has a new beginning but stil same length - let's change that!
length_with_offset = (length - index_offset)
if index_offset < 0:
min_length = length_with_offset - (length - 1)
if min_length < 1: min_length = 1
max_length = maximum_chunk_length(reference_sentence, token_index + 1) - index_offset
else:
min_length = length_with_offset
max_length = maximum_chunk_length(reference_sentence, new_start_index)
if min_length == length and length + 1 <= max_length:
min_length += 1
length = random.choice(range(min_length, max_length + 1))
if length > 4:
length = 4
sentence[new_start_index]['annotation']['length'] = length
return (sentence, new_start_index)
def manipulate_cat(sentence, token_index, reference_sentence):
annotated_token = reference_sentence[token_index]
new_label = change_label(annotated_token['annotation']['label'])
annotated_token['annotation']['label'] = new_label
sentence[token_index] = annotated_token
return (sentence, token_index)
def manipulate_cat_span(sentence, token_index, reference_sentence):
(sentence, token_index) = manipulate_span(sentence, token_index, reference_sentence)
return manipulate_cat(sentence, token_index, sentence)
def correct_annotation(sentence, token_index, reference_sentence):
sentence[token_index] = copy.deepcopy(reference_sentence[token_index])
return (sentence, token_index)
def no_annotation(sentence, token_index, reference_sentence):
return (sentence, token_index)
def unnecessary_annotation(sentence, token_index, reference_sentence):
if token_index == 0:
begin = 0
else:
if not 'annotation' in reference_sentence[token_index - 1]: # DEBUG!
print('DEBBUG token_index', token_index, 'sentence:')
pp(reference_sentence)
current_annotation_length = reference_sentence[token_index - 1]['annotation']['length']
begin = token_index - 1 + current_annotation_length
end = begin + maximum_chunk_length(reference_sentence, begin)
annotation_index = random.choice(range(begin, end))
max_length = maximum_chunk_length(reference_sentence, annotation_index)
annotation_label = random.choice(LABELS)
annotation_length = 1
if max_length >= 3:
annotation_length = random.choice([1, 2, 3])
elif max_length == 2:
annotation_length = random.choice([1, 2])
sentence[annotation_index]['annotation'] = {
'label': annotation_label,
'length': annotation_length
}
return (sentence, annotation_index)
# helpers
def possible_start_index_offsets(sentence, token_index):
possible_offsets = []
length = 3 # >= 2
if 'annotation' in sentence[token_index]:
length = sentence[token_index]['annotation']['length']
max_left_shift = token_index - earliest_chunk_start_index(sentence, token_index)
max_right_shift = maximum_chunk_length(sentence, token_index) - 1
if max_left_shift >= 2 and length > 2:
possible_offsets = [-2, -1]
elif max_left_shift >= 1:
possible_offsets = [-1]
if max_right_shift >= 2 and length > 2:
possible_offsets += [1, 2]
elif max_right_shift >= 1 and length >= 2:
possible_offsets += [1]
return possible_offsets
def earliest_chunk_start_index(sentence, start_index):
sentence_length = len(sentence)
if start_index == 0:
return 0
earliest_index = start_index
for iteration_index in range((start_index - 1), -1, -1):
earliest_index -= 1
if 'annotation' in sentence[iteration_index]:
length = sentence[iteration_index]['annotation']['length']
earliest_index += length
break
return earliest_index
def maximum_chunk_length(sentence, start_index):
sentence_length = len(sentence)
if start_index == sentence_length - 1:
return 1
max_chunk_length = 1
for iteration_index in range((start_index + 1), sentence_length):
if 'annotation' in sentence[iteration_index]:
break
max_chunk_length = max_chunk_length + 1
return max_chunk_length
def change_label(label):
index = LABELS.index(label)
if index > 0: return LABELS[0]
return LABELS[1]
def save_document_to_file(corpus, subject_id):
for document_index, document in enumerate(corpus):
prefix = chr(97 + document_index)
folder_postfix = subject_id
if subject_id < 10:
folder_postfix = "%s%s" % (0, subject_id)
subject_folder = "%s/VP%s" % (SUBJECT_CORPUS_FOLDER, folder_postfix)
file_name = "%s_%s.json" % (prefix, CORPUS_SUFFIX)
file_path = "%s/%s" % (subject_folder, file_name)
if not os.path.exists(subject_folder):
os.makedirs(subject_folder)
json_encoded_document = json.dumps(document)
file_handler = open(file_path, 'w')
file_handler.write(json_encoded_document)
file_handler.close()
# experiment definitions
ANNOTATION_CLASSES = {
0 : correct_annotation,
1 : manipulate_span,
2 : manipulate_cat,
3 : manipulate_cat_span,
4 : unnecessary_annotation,
5 : no_annotation
}
LABELS = [
'PER', 'COM'
]
# entry point as a stand alone script
if __name__ == '__main__':
subjects_table = read_subject_table()
questionnaire_document = read_questionnaire_document()
for subject_id, subject_annotation_classes in enumerate(subjects_table):
# for i in range(0, 20):
# subject_id = i
# subject_annotation_classes = subjects_table[i]
print('create corpus for subject #', subject_id)
gold_annotated_corpus = read_corpus_files(GOLD_ANNOTATED_CORPUS_FILES)
plain_corpus = read_corpus_files(PLAIN_CORPUS_FILES)
subject_corpus = create_annotated_corpus(plain_corpus, gold_annotated_corpus, subject_annotation_classes)
subject_corpus.insert(0, questionnaire_document)
save_document_to_file(subject_corpus, subject_id)
|
moijes12/oh-mainline
|
refs/heads/master
|
vendor/packages/distribute/setuptools/tests/server.py
|
62
|
"""Basic http server for tests to simulate PyPI or custom indexes
"""
import urllib2
import sys
from threading import Thread
from BaseHTTPServer import HTTPServer
from SimpleHTTPServer import SimpleHTTPRequestHandler
class IndexServer(HTTPServer):
"""Basic single-threaded http server simulating a package index
You can use this server in unittest like this::
s = IndexServer()
s.start()
index_url = s.base_url() + 'mytestindex'
# do some test requests to the index
# The index files should be located in setuptools/tests/indexes
s.stop()
"""
def __init__(self):
HTTPServer.__init__(self, ('', 0), SimpleHTTPRequestHandler)
self._run = True
def serve(self):
while True:
self.handle_request()
if not self._run: break
def start(self):
self.thread = Thread(target=self.serve)
self.thread.start()
def stop(self):
"""self.shutdown is not supported on python < 2.6"""
self._run = False
try:
if sys.version > '2.6':
urllib2.urlopen('http://127.0.0.1:%s/' % self.server_port,
None, 5)
else:
urllib2.urlopen('http://127.0.0.1:%s/' % self.server_port)
except urllib2.URLError:
pass
self.thread.join()
def base_url(self):
port = self.server_port
return 'http://127.0.0.1:%s/setuptools/tests/indexes/' % port
|
edmundgentle/schoolscript
|
refs/heads/master
|
SchoolScript/bin/Debug/pythonlib/Lib/urllib/response.py
|
3
|
"""Response classes used by urllib.
The base class, addbase, defines a minimal file-like interface,
including read() and readline(). The typical response object is an
addinfourl instance, which defines an info() method that returns
headers and a geturl() method that returns the url.
"""
class addbase(object):
"""Base class for addinfo and addclosehook."""
# XXX Add a method to expose the timeout on the underlying socket?
def __init__(self, fp):
# TODO(jhylton): Is there a better way to delegate using io?
self.fp = fp
self.read = self.fp.read
self.readline = self.fp.readline
# TODO(jhylton): Make sure an object with readlines() is also iterable
if hasattr(self.fp, "readlines"):
self.readlines = self.fp.readlines
if hasattr(self.fp, "fileno"):
self.fileno = self.fp.fileno
else:
self.fileno = lambda: None
def __iter__(self):
# Assigning `__iter__` to the instance doesn't work as intended
# because the iter builtin does something like `cls.__iter__(obj)`
# and thus fails to find the _bound_ method `obj.__iter__`.
# Returning just `self.fp` works for built-in file objects but
# might not work for general file-like objects.
return iter(self.fp)
def __repr__(self):
return '<%s at %r whose fp = %r>' % (self.__class__.__name__,
id(self), self.fp)
def close(self):
self.read = None
self.readline = None
self.readlines = None
self.fileno = None
if self.fp: self.fp.close()
self.fp = None
def __enter__(self):
if self.fp is None:
raise ValueError("I/O operation on closed file")
return self
def __exit__(self, type, value, traceback):
self.close()
class addclosehook(addbase):
"""Class to add a close hook to an open file."""
def __init__(self, fp, closehook, *hookargs):
addbase.__init__(self, fp)
self.closehook = closehook
self.hookargs = hookargs
def close(self):
addbase.close(self)
if self.closehook:
self.closehook(*self.hookargs)
self.closehook = None
self.hookargs = None
class addinfo(addbase):
"""class to add an info() method to an open file."""
def __init__(self, fp, headers):
addbase.__init__(self, fp)
self.headers = headers
def info(self):
return self.headers
class addinfourl(addbase):
"""class to add info() and geturl() methods to an open file."""
def __init__(self, fp, headers, url, code=None):
addbase.__init__(self, fp)
self.headers = headers
self.url = url
self.code = code
def info(self):
return self.headers
def getcode(self):
return self.code
def geturl(self):
return self.url
|
MjAbuz/watchdog
|
refs/heads/master
|
vendor/rdflib-2.4.0/test/sparql/testSPARQL.py
|
4
|
#!/d/Bin/Python/python.exe
# -*- coding: utf-8 -*-
#
#
# $Date: 2005/04/02 07:29:30 $, by $Author: ivan $, $Revision: 1.1 $
#
"""
"""
import sys, os, time, datetime
from rdflib.constants import RDFNS as ns_rdf
from rdflib.constants import RDFSNS as ns_rdfs
#from rdflib.sparql import ns_dc as ns_dc
#from rdflib.sparql import ns_owl as ns_owl
from rdflib.sparql.sparql import type_integer
from rdflib.sparql.sparql import type_double
from rdflib.sparql.sparql import type_float
from rdflib.sparql.sparql import type_decimal
from rdflib.sparql.sparql import type_dateTime
from rdflib.Namespace import Namespace
ns_foaf = Namespace("http://xmlns.com/foaf/0.1/")
ns_ns = Namespace("http://example.org/ns#")
ns_book = Namespace("http://example.org/book")
ns_person = Namespace("http://example.org/person#")
ns_dt = Namespace("http://example.org/datatype#")
ns_dc0 = Namespace("http://purl.org/dc/elements/1.0/")
ns_dc = Namespace("http://purl.org/dc/elements/1.1/")
ns_vcard = Namespace("http://www.w3.org/2001/vcard-rdf/3.0#")
|
xdatravelbug/N909D_Kernel_JB_4.1.2
|
refs/heads/master
|
tools/perf/util/setup.py
|
4998
|
#!/usr/bin/python2
from distutils.core import setup, Extension
from os import getenv
from distutils.command.build_ext import build_ext as _build_ext
from distutils.command.install_lib import install_lib as _install_lib
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
self.build_lib = build_lib
self.build_temp = build_tmp
class install_lib(_install_lib):
def finalize_options(self):
_install_lib.finalize_options(self)
self.build_dir = build_lib
cflags = ['-fno-strict-aliasing', '-Wno-write-strings']
cflags += getenv('CFLAGS', '').split()
build_lib = getenv('PYTHON_EXTBUILD_LIB')
build_tmp = getenv('PYTHON_EXTBUILD_TMP')
ext_sources = [f.strip() for f in file('util/python-ext-sources')
if len(f.strip()) > 0 and f[0] != '#']
perf = Extension('perf',
sources = ext_sources,
include_dirs = ['util/include'],
extra_compile_args = cflags,
)
setup(name='perf',
version='0.1',
description='Interface with the Linux profiling infrastructure',
author='Arnaldo Carvalho de Melo',
author_email='acme@redhat.com',
license='GPLv2',
url='http://perf.wiki.kernel.org',
ext_modules=[perf],
cmdclass={'build_ext': build_ext, 'install_lib': install_lib})
|
sgraham/nope
|
refs/heads/master
|
tools/gyp/test/win/gyptest-system-include.py
|
120
|
#!/usr/bin/env python
# Copyright (c) 2014 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Checks that msvs_system_include_dirs works.
"""
import TestGyp
import sys
if sys.platform == 'win32':
test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
CHDIR = 'system-include'
test.run_gyp('test.gyp', chdir=CHDIR)
test.build('test.gyp', test.ALL, chdir=CHDIR)
test.pass_test()
|
vCentre/vFRP-6233
|
refs/heads/master
|
frappe/workflow/doctype/workflow_document_state/workflow_document_state.py
|
73
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class WorkflowDocumentState(Document):
pass
|
dezynetechnologies/odoo
|
refs/heads/8.0
|
addons/hr_recruitment/report/hr_recruitment_report.py
|
325
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import tools
from openerp.osv import fields, osv
from .. import hr_recruitment
from openerp.addons.decimal_precision import decimal_precision as dp
class hr_recruitment_report(osv.Model):
_name = "hr.recruitment.report"
_description = "Recruitments Statistics"
_auto = False
_rec_name = 'date_create'
_order = 'date_create desc'
_columns = {
'user_id': fields.many2one('res.users', 'User', readonly=True),
'company_id': fields.many2one('res.company', 'Company', readonly=True),
'date_create': fields.datetime('Create Date', readonly=True),
'date_last_stage_update': fields.datetime('Last Stage Update', readonly=True),
'date_closed': fields.date('Closed', readonly=True),
'job_id': fields.many2one('hr.job', 'Applied Job',readonly=True),
'stage_id': fields.many2one ('hr.recruitment.stage', 'Stage'),
'type_id': fields.many2one('hr.recruitment.degree', 'Degree'),
'department_id': fields.many2one('hr.department','Department',readonly=True),
'priority': fields.selection(hr_recruitment.AVAILABLE_PRIORITIES, 'Appreciation'),
'salary_prop' : fields.float("Salary Proposed", digits_compute=dp.get_precision('Account')),
'salary_prop_avg' : fields.float("Avg. Proposed Salary", group_operator="avg", digits_compute=dp.get_precision('Account')),
'salary_exp' : fields.float("Salary Expected", digits_compute=dp.get_precision('Account')),
'salary_exp_avg' : fields.float("Avg. Expected Salary", group_operator="avg", digits_compute=dp.get_precision('Account')),
'partner_id': fields.many2one('res.partner', 'Partner',readonly=True),
'available': fields.float("Availability"),
'delay_close': fields.float('Avg. Delay to Close', digits=(16,2), readonly=True, group_operator="avg",
help="Number of Days to close the project issue"),
'last_stage_id': fields.many2one ('hr.recruitment.stage', 'Last Stage'),
}
def init(self, cr):
tools.drop_view_if_exists(cr, 'hr_recruitment_report')
cr.execute("""
create or replace view hr_recruitment_report as (
select
min(s.id) as id,
s.create_date as date_create,
date(s.date_closed) as date_closed,
s.date_last_stage_update as date_last_stage_update,
s.partner_id,
s.company_id,
s.user_id,
s.job_id,
s.type_id,
sum(s.availability) as available,
s.department_id,
s.priority,
s.stage_id,
s.last_stage_id,
sum(salary_proposed) as salary_prop,
(sum(salary_proposed)/count(*)) as salary_prop_avg,
sum(salary_expected) as salary_exp,
(sum(salary_expected)/count(*)) as salary_exp_avg,
extract('epoch' from (s.write_date-s.create_date))/(3600*24) as delay_close,
count(*) as nbr
from hr_applicant s
group by
s.date_open,
s.create_date,
s.write_date,
s.date_closed,
s.date_last_stage_update,
s.partner_id,
s.company_id,
s.user_id,
s.stage_id,
s.last_stage_id,
s.type_id,
s.priority,
s.job_id,
s.department_id
)
""")
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
zspotter/dream-link
|
refs/heads/master
|
src/app/models.py
|
1
|
from google.appengine.ext import ndb
class Dream( ndb.Model ):
tags = ndb.StringProperty(repeated=True)
def to_dict( self ):
return { 'key' : self.key.urlsafe(),
'tags' : self.tags }
|
luzi82/HiSocial
|
refs/heads/master
|
core/user/Permission.py
|
1
|
from user.GroupPermission import GroupPermission
from user.UserGroup import UserGroup
from sqlalchemy.sql.expression import desc
def get_user_permission(session,user_id,permission_name):
'''
Get permission of a user
:type session: sqlalchemy.orm.session.Session
:param session: sqlalchemy DB Session
:type user_id: str
:param user_id: The user id
:type permission_name: str
:param permission_name: The permission id
:rtype: boolean
:return: permission enabled
'''
q=session.\
query(GroupPermission.enable).\
filter(UserGroup.user_id==user_id).\
filter(GroupPermission.permission_name==permission_name).\
filter(GroupPermission.group_id==UserGroup.group_id).\
order_by(desc(GroupPermission.order)).\
first()
if q == None : return False
return q[0]
|
0x0all/scikit-learn
|
refs/heads/master
|
sklearn/decomposition/tests/test_pca.py
|
25
|
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.decomposition import RandomizedPCA
from sklearn.decomposition.pca import _assess_dimension_
from sklearn.decomposition.pca import _infer_dimension_
iris = datasets.load_iris()
def test_pca():
"""PCA on dense arrays"""
pca = PCA(n_components=2)
X = iris.data
X_r = pca.fit(X).transform(X)
np.testing.assert_equal(X_r.shape[1], 2)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
pca = PCA()
pca.fit(X)
assert_almost_equal(pca.explained_variance_ratio_.sum(), 1.0, 3)
X_r = pca.transform(X)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
# Test get_covariance and get_precision with n_components == n_features
# with n_components < n_features and with n_components == 0
for n_components in [0, 2, X.shape[1]]:
pca.n_components = n_components
pca.fit(X)
cov = pca.get_covariance()
precision = pca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]), 12)
def test_whitening():
"""Check that PCA output has unit-variance"""
rng = np.random.RandomState(0)
n_samples = 100
n_features = 80
n_components = 30
rank = 50
# some low rank data with correlated features
X = np.dot(rng.randn(n_samples, rank),
np.dot(np.diag(np.linspace(10.0, 1.0, rank)),
rng.randn(rank, n_features)))
# the component-wise variance of the first 50 features is 3 times the
# mean component-wise variance of the remaingin 30 features
X[:, :50] *= 3
assert_equal(X.shape, (n_samples, n_features))
# the component-wise variance is thus highly varying:
assert_almost_equal(X.std(axis=0).std(), 43.9, 1)
for this_PCA, copy in [(x, y) for x in (PCA, RandomizedPCA)
for y in (True, False)]:
# whiten the data while projecting to the lower dim subspace
X_ = X.copy() # make sure we keep an original across iterations.
pca = this_PCA(n_components=n_components, whiten=True, copy=copy)
# test fit_transform
X_whitened = pca.fit_transform(X_.copy())
assert_equal(X_whitened.shape, (n_samples, n_components))
X_whitened2 = pca.transform(X_)
assert_array_almost_equal(X_whitened, X_whitened2)
assert_almost_equal(X_whitened.std(axis=0), np.ones(n_components))
assert_almost_equal(X_whitened.mean(axis=0), np.zeros(n_components))
X_ = X.copy()
pca = this_PCA(n_components=n_components, whiten=False,
copy=copy).fit(X_)
X_unwhitened = pca.transform(X_)
assert_equal(X_unwhitened.shape, (n_samples, n_components))
# in that case the output components still have varying variances
assert_almost_equal(X_unwhitened.std(axis=0).std(), 74.1, 1)
# we always center, so no test for non-centering.
def test_explained_variance():
"""Check that PCA output has unit-variance"""
rng = np.random.RandomState(0)
n_samples = 100
n_features = 80
X = rng.randn(n_samples, n_features)
pca = PCA(n_components=2).fit(X)
rpca = RandomizedPCA(n_components=2, random_state=42).fit(X)
assert_array_almost_equal(pca.explained_variance_,
rpca.explained_variance_, 1)
assert_array_almost_equal(pca.explained_variance_ratio_,
rpca.explained_variance_ratio_, 3)
# compare to empirical variances
X_pca = pca.transform(X)
assert_array_almost_equal(pca.explained_variance_,
np.var(X_pca, axis=0))
X_rpca = rpca.transform(X)
assert_array_almost_equal(rpca.explained_variance_,
np.var(X_rpca, axis=0))
def test_pca_check_projection():
"""Test that the projection of data is correct"""
rng = np.random.RandomState(0)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
Yt = PCA(n_components=2).fit(X).transform(Xt)
Yt /= np.sqrt((Yt ** 2).sum())
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_pca_inverse():
"""Test that the projection of data can be inverted"""
rng = np.random.RandomState(0)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
pca = PCA(n_components=2).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
# same as above with whitening (approximate reconstruction)
pca = PCA(n_components=2, whiten=True)
pca.fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
def test_pca_validation():
X = [[0, 1], [1, 0]]
for n_components in [-1, 3]:
assert_raises(ValueError, PCA(n_components).fit, X)
def test_randomized_pca_check_projection():
"""Test that the projection by RandomizedPCA on dense data is correct"""
rng = np.random.RandomState(0)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
Yt = RandomizedPCA(n_components=2, random_state=0).fit(X).transform(Xt)
Yt /= np.sqrt((Yt ** 2).sum())
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_randomized_pca_check_list():
"""Test that the projection by RandomizedPCA on list data is correct"""
X = [[1.0, 0.0], [0.0, 1.0]]
X_transformed = RandomizedPCA(n_components=1,
random_state=0).fit(X).transform(X)
assert_equal(X_transformed.shape, (2, 1))
assert_almost_equal(X_transformed.mean(), 0.00, 2)
assert_almost_equal(X_transformed.std(), 0.71, 2)
def test_randomized_pca_inverse():
"""Test that RandomizedPCA is inversible on dense data"""
rng = np.random.RandomState(0)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed signal
# (since the data is almost of rank n_components)
pca = RandomizedPCA(n_components=2, random_state=0).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=2)
# same as above with whitening (approximate reconstruction)
pca = RandomizedPCA(n_components=2, whiten=True,
random_state=0).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
relative_max_delta = (np.abs(X - Y_inverse) / np.abs(X).mean()).max()
assert_almost_equal(relative_max_delta, 0.11, decimal=2)
def test_pca_dim():
"""Check automated dimensionality setting"""
rng = np.random.RandomState(0)
n, p = 100, 5
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
pca = PCA(n_components='mle').fit(X)
assert_equal(pca.n_components, 'mle')
assert_equal(pca.n_components_, 1)
def test_infer_dim_1():
"""TODO: explain what this is testing
Or at least use explicit variable names...
"""
n, p = 1000, 5
rng = np.random.RandomState(0)
X = (rng.randn(n, p) * .1 + rng.randn(n, 1) * np.array([3, 4, 5, 1, 2])
+ np.array([1, 0, 7, 4, 6]))
pca = PCA(n_components=p)
pca.fit(X)
spect = pca.explained_variance_
ll = []
for k in range(p):
ll.append(_assess_dimension_(spect, k, n, p))
ll = np.array(ll)
assert_greater(ll[1], ll.max() - .01 * n)
def test_infer_dim_2():
"""TODO: explain what this is testing
Or at least use explicit variable names...
"""
n, p = 1000, 5
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
X[10:20] += np.array([6, 0, 7, 2, -1])
pca = PCA(n_components=p)
pca.fit(X)
spect = pca.explained_variance_
assert_greater(_infer_dimension_(spect, n, p), 1)
def test_infer_dim_3():
"""
"""
n, p = 100, 5
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
X[10:20] += np.array([6, 0, 7, 2, -1])
X[30:40] += 2 * np.array([-1, 1, -1, 1, -1])
pca = PCA(n_components=p)
pca.fit(X)
spect = pca.explained_variance_
assert_greater(_infer_dimension_(spect, n, p), 2)
def test_infer_dim_by_explained_variance():
X = iris.data
pca = PCA(n_components=0.95)
pca.fit(X)
assert_equal(pca.n_components, 0.95)
assert_equal(pca.n_components_, 2)
pca = PCA(n_components=0.01)
pca.fit(X)
assert_equal(pca.n_components, 0.01)
assert_equal(pca.n_components_, 1)
rng = np.random.RandomState(0)
# more features than samples
X = rng.rand(5, 20)
pca = PCA(n_components=.5).fit(X)
assert_equal(pca.n_components, 0.5)
assert_equal(pca.n_components_, 2)
def test_pca_score():
"""Test that probabilistic PCA scoring yields a reasonable score"""
n, p = 1000, 3
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1 + np.array([3, 4, 5])
pca = PCA(n_components=2)
pca.fit(X)
ll1 = pca.score(X)
h = -0.5 * np.log(2 * np.pi * np.exp(1) * 0.1 ** 2) * p
np.testing.assert_almost_equal(ll1 / h, 1, 0)
def test_pca_score2():
"""Test that probabilistic PCA correctly separated different datasets"""
n, p = 100, 3
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1 + np.array([3, 4, 5])
pca = PCA(n_components=2)
pca.fit(X)
ll1 = pca.score(X)
ll2 = pca.score(rng.randn(n, p) * .2 + np.array([3, 4, 5]))
assert_greater(ll1, ll2)
# Test that it gives the same scores if whiten=True
pca = PCA(n_components=2, whiten=True)
pca.fit(X)
ll2 = pca.score(X)
assert_almost_equal(ll1, ll2)
def test_pca_score3():
"""Check that probabilistic PCA selects the right model"""
n, p = 200, 3
rng = np.random.RandomState(0)
Xl = (rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5])
+ np.array([1, 0, 7]))
Xt = (rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5])
+ np.array([1, 0, 7]))
ll = np.zeros(p)
for k in range(p):
pca = PCA(n_components=k)
pca.fit(Xl)
ll[k] = pca.score(Xt)
assert_true(ll.argmax() == 1)
if __name__ == '__main__':
import nose
nose.run(argv=['', __file__])
|
UTSA-ICS/python-keystoneclient-SID
|
refs/heads/master
|
keystoneclient/v3/credentials.py
|
3
|
# Copyright 2011 OpenStack Foundation
# Copyright 2011 Nebula, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystoneclient import base
from keystoneclient import utils
class Credential(base.Resource):
"""Represents an Identity credential.
Attributes:
* id: a uuid that identifies the credential
"""
pass
class CredentialManager(base.CrudManager):
"""Manager class for manipulating Identity credentials."""
resource_class = Credential
collection_key = 'credentials'
key = 'credential'
def _get_data_blob(self, blob, data):
# Ref bug #1259461, the <= 0.4.1 keystoneclient calling convention was
# to pass "data", but the underlying API expects "blob", so
# support both in the python API for backwards compatibility
if blob is not None:
return blob
elif data is not None:
# FIXME(shardy): Passing data is deprecated. Provide an
# appropriate warning.
return data
else:
raise ValueError(
"Credential requires blob to be specified")
@utils.positional(1, enforcement=utils.positional.WARN)
def create(self, user, type, blob=None, data=None, project=None, **kwargs):
return super(CredentialManager, self).create(
user_id=base.getid(user),
type=type,
blob=self._get_data_blob(blob, data),
project_id=base.getid(project),
**kwargs)
def get(self, credential):
return super(CredentialManager, self).get(
credential_id=base.getid(credential))
def list(self, **kwargs):
"""List credentials.
If ``**kwargs`` are provided, then filter credentials with
attributes matching ``**kwargs``.
"""
return super(CredentialManager, self).list(**kwargs)
@utils.positional(2, enforcement=utils.positional.WARN)
def update(self, credential, user, type=None, blob=None, data=None,
project=None, **kwargs):
return super(CredentialManager, self).update(
credential_id=base.getid(credential),
user_id=base.getid(user),
type=type,
blob=self._get_data_blob(blob, data),
project_id=base.getid(project),
**kwargs)
def delete(self, credential):
return super(CredentialManager, self).delete(
credential_id=base.getid(credential))
|
stewartpark/django
|
refs/heads/master
|
django/db/backends/mysql/features.py
|
1
|
from django.db.backends.base.features import BaseDatabaseFeatures
from django.utils.functional import cached_property
from .base import Database
try:
import pytz
except ImportError:
pytz = None
class DatabaseFeatures(BaseDatabaseFeatures):
empty_fetchmany_value = ()
update_can_self_select = False
allows_group_by_pk = True
related_fields_match_type = True
allow_sliced_subqueries = False
has_bulk_insert = True
has_select_for_update = True
has_select_for_update_nowait = False
supports_forward_references = False
supports_regex_backreferencing = False
supports_date_lookup_using_string = False
can_introspect_autofield = True
can_introspect_binary_field = False
can_introspect_small_integer_field = True
supports_timezones = False
requires_explicit_null_ordering_when_grouping = True
allows_auto_pk_0 = False
uses_savepoints = True
can_release_savepoints = True
atomic_transactions = False
supports_column_check_constraints = False
can_clone_databases = True
@cached_property
def _mysql_storage_engine(self):
"Internal method used in Django tests. Don't rely on this from your code"
with self.connection.cursor() as cursor:
cursor.execute("SELECT ENGINE FROM INFORMATION_SCHEMA.ENGINES WHERE SUPPORT = 'DEFAULT'")
result = cursor.fetchone()
return result[0]
@cached_property
def can_introspect_foreign_keys(self):
"Confirm support for introspected foreign keys"
return self._mysql_storage_engine != 'MyISAM'
@cached_property
def can_return_last_inserted_id_with_auto_is_null(self):
with self.connection.cursor() as cursor:
cursor.execute("SELECT @@SQL_AUTO_IS_NULL")
return cursor.fetchone()[0] == 1
@cached_property
def supports_microsecond_precision(self):
# See https://github.com/farcepest/MySQLdb1/issues/24 for the reason
# about requiring MySQLdb 1.2.5
return self.connection.mysql_version >= (5, 6, 4) and Database.version_info >= (1, 2, 5)
@cached_property
def has_zoneinfo_database(self):
# MySQL accepts full time zones names (eg. Africa/Nairobi) but rejects
# abbreviations (eg. EAT). When pytz isn't installed and the current
# time zone is LocalTimezone (the only sensible value in this
# context), the current time zone name will be an abbreviation. As a
# consequence, MySQL cannot perform time zone conversions reliably.
if pytz is None:
return False
# Test if the time zone definitions are installed.
with self.connection.cursor() as cursor:
cursor.execute("SELECT 1 FROM mysql.time_zone LIMIT 1")
return cursor.fetchone() is not None
def introspected_boolean_field_type(self, *args, **kwargs):
return 'IntegerField'
|
takis/django
|
refs/heads/master
|
tests/validation/test_custom_messages.py
|
519
|
from . import ValidationTestCase
from .models import CustomMessagesModel
class CustomMessagesTest(ValidationTestCase):
def test_custom_simple_validator_message(self):
cmm = CustomMessagesModel(number=12)
self.assertFieldFailsValidationWithMessage(cmm.full_clean, 'number', ['AAARGH'])
def test_custom_null_message(self):
cmm = CustomMessagesModel()
self.assertFieldFailsValidationWithMessage(cmm.full_clean, 'number', ['NULL'])
|
valrus/mingus3
|
refs/heads/python3
|
unittest/test_Instrument.py
|
3
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
sys.path += ['../']
from mingus.containers.Instrument import Instrument, Piano, Guitar
from mingus.containers.NoteContainer import NoteContainer
import unittest
class test_Instrument(unittest.TestCase):
def setUp(self):
self.i = Instrument()
self.p = Piano()
self.g = Guitar()
self.notes = NoteContainer(['A', 'B', 'C', 'D', 'E'])
self.noteslow = NoteContainer(['C-0', 'D-0', 'E-0'])
self.noteshigh = NoteContainer(['A-12', 'B-12', 'C-12', 'D-12', 'E-12'])
def test_note_in_range(self):
for x in self.notes:
self.assert_(self.i.note_in_range(x))
self.assert_(self.p.note_in_range(x))
self.assert_(self.g.note_in_range(x))
for x in self.noteslow + self.noteshigh:
self.assertEqual(False, self.p.note_in_range(x),
'%s should not be able to be played by a Piano'
% x)
self.assertEqual(False, self.g.note_in_range(x),
'%s should not be able to be played by a Guitar'
% x)
def test_can_play_notes(self):
self.assert_(self.i.can_play_notes(self.notes))
self.assert_(self.p.can_play_notes(self.notes))
self.assert_(self.g.can_play_notes(self.notes))
self.assertEqual(False, self.p.can_play_notes(self.noteslow))
self.assertEqual(False, self.g.can_play_notes(self.noteslow))
self.assertEqual(False, self.p.can_play_notes(self.noteshigh))
self.assertEqual(False, self.g.can_play_notes(self.noteshigh))
self.assertEqual(False, self.g.can_play_notes(NoteContainer([
'A',
'B',
'C',
'D',
'E',
'F',
'G',
])))
def suite():
return unittest.TestLoader().loadTestsFromTestCase(test_Instrument)
|
jimberlage/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/tools/third_party/pytest/src/_pytest/python.py
|
32
|
""" Python test discovery, setup and run of test functions. """
from __future__ import absolute_import, division, print_function
import fnmatch
import inspect
import sys
import os
import collections
import warnings
from textwrap import dedent
from itertools import count
import py
import six
from _pytest.mark import MarkerError
from _pytest.config import hookimpl
import _pytest
import pluggy
from _pytest import fixtures
from _pytest import nodes
from _pytest import deprecated
from _pytest.compat import (
isclass,
isfunction,
is_generator,
ascii_escaped,
REGEX_TYPE,
STRING_TYPES,
NoneType,
NOTSET,
get_real_func,
getfslineno,
safe_getattr,
safe_str,
getlocation,
enum,
get_default_arg_names,
)
from _pytest.outcomes import fail
from _pytest.mark.structures import transfer_markers, get_unpacked_marks
# relative paths that we use to filter traceback entries from appearing to the user;
# see filter_traceback
# note: if we need to add more paths than what we have now we should probably use a list
# for better maintenance
_pluggy_dir = py.path.local(pluggy.__file__.rstrip("oc"))
# pluggy is either a package or a single module depending on the version
if _pluggy_dir.basename == "__init__.py":
_pluggy_dir = _pluggy_dir.dirpath()
_pytest_dir = py.path.local(_pytest.__file__).dirpath()
_py_dir = py.path.local(py.__file__).dirpath()
def filter_traceback(entry):
"""Return True if a TracebackEntry instance should be removed from tracebacks:
* dynamically generated code (no code to show up for it);
* internal traceback from pytest or its internal libraries, py and pluggy.
"""
# entry.path might sometimes return a str object when the entry
# points to dynamically generated code
# see https://bitbucket.org/pytest-dev/py/issues/71
raw_filename = entry.frame.code.raw.co_filename
is_generated = "<" in raw_filename and ">" in raw_filename
if is_generated:
return False
# entry.path might point to a non-existing file, in which case it will
# also return a str object. see #1133
p = py.path.local(entry.path)
return not p.relto(_pluggy_dir) and not p.relto(_pytest_dir) and not p.relto(
_py_dir
)
def pyobj_property(name):
def get(self):
node = self.getparent(getattr(__import__("pytest"), name))
if node is not None:
return node.obj
doc = "python %s object this node was collected from (can be None)." % (
name.lower(),
)
return property(get, None, None, doc)
def pytest_addoption(parser):
group = parser.getgroup("general")
group.addoption(
"--fixtures",
"--funcargs",
action="store_true",
dest="showfixtures",
default=False,
help="show available fixtures, sorted by plugin appearance "
"(fixtures with leading '_' are only shown with '-v')",
)
group.addoption(
"--fixtures-per-test",
action="store_true",
dest="show_fixtures_per_test",
default=False,
help="show fixtures per test",
)
parser.addini(
"usefixtures",
type="args",
default=[],
help="list of default fixtures to be used with this project",
)
parser.addini(
"python_files",
type="args",
default=["test_*.py", "*_test.py"],
help="glob-style file patterns for Python test module discovery",
)
parser.addini(
"python_classes",
type="args",
default=["Test"],
help="prefixes or glob names for Python test class discovery",
)
parser.addini(
"python_functions",
type="args",
default=["test"],
help="prefixes or glob names for Python test function and " "method discovery",
)
group.addoption(
"--import-mode",
default="prepend",
choices=["prepend", "append"],
dest="importmode",
help="prepend/append to sys.path when importing test modules, "
"default is to prepend.",
)
def pytest_cmdline_main(config):
if config.option.showfixtures:
showfixtures(config)
return 0
if config.option.show_fixtures_per_test:
show_fixtures_per_test(config)
return 0
def pytest_generate_tests(metafunc):
# those alternative spellings are common - raise a specific error to alert
# the user
alt_spellings = ["parameterize", "parametrise", "parameterise"]
for attr in alt_spellings:
if hasattr(metafunc.function, attr):
msg = "{0} has '{1}', spelling should be 'parametrize'"
raise MarkerError(msg.format(metafunc.function.__name__, attr))
for marker in metafunc.definition.iter_markers(name="parametrize"):
metafunc.parametrize(*marker.args, **marker.kwargs)
def pytest_configure(config):
config.addinivalue_line(
"markers",
"parametrize(argnames, argvalues): call a test function multiple "
"times passing in different arguments in turn. argvalues generally "
"needs to be a list of values if argnames specifies only one name "
"or a list of tuples of values if argnames specifies multiple names. "
"Example: @parametrize('arg1', [1,2]) would lead to two calls of the "
"decorated test function, one with arg1=1 and another with arg1=2."
"see http://pytest.org/latest/parametrize.html for more info and "
"examples.",
)
config.addinivalue_line(
"markers",
"usefixtures(fixturename1, fixturename2, ...): mark tests as needing "
"all of the specified fixtures. see http://pytest.org/latest/fixture.html#usefixtures ",
)
@hookimpl(trylast=True)
def pytest_pyfunc_call(pyfuncitem):
testfunction = pyfuncitem.obj
if pyfuncitem._isyieldedfunction():
testfunction(*pyfuncitem._args)
else:
funcargs = pyfuncitem.funcargs
testargs = {}
for arg in pyfuncitem._fixtureinfo.argnames:
testargs[arg] = funcargs[arg]
testfunction(**testargs)
return True
def pytest_collect_file(path, parent):
ext = path.ext
if ext == ".py":
if not parent.session.isinitpath(path):
for pat in parent.config.getini("python_files"):
if path.fnmatch(pat):
break
else:
return
ihook = parent.session.gethookproxy(path)
return ihook.pytest_pycollect_makemodule(path=path, parent=parent)
def pytest_pycollect_makemodule(path, parent):
return Module(path, parent)
@hookimpl(hookwrapper=True)
def pytest_pycollect_makeitem(collector, name, obj):
outcome = yield
res = outcome.get_result()
if res is not None:
return
# nothing was collected elsewhere, let's do it here
if isclass(obj):
if collector.istestclass(obj, name):
Class = collector._getcustomclass("Class")
outcome.force_result(Class(name, parent=collector))
elif collector.istestfunction(obj, name):
# mock seems to store unbound methods (issue473), normalize it
obj = getattr(obj, "__func__", obj)
# We need to try and unwrap the function if it's a functools.partial
# or a funtools.wrapped.
# We musn't if it's been wrapped with mock.patch (python 2 only)
if not (isfunction(obj) or isfunction(get_real_func(obj))):
collector.warn(
code="C2",
message="cannot collect %r because it is not a function." % name,
)
elif getattr(obj, "__test__", True):
if is_generator(obj):
res = Generator(name, parent=collector)
else:
res = list(collector._genfunctions(name, obj))
outcome.force_result(res)
def pytest_make_parametrize_id(config, val, argname=None):
return None
class PyobjContext(object):
module = pyobj_property("Module")
cls = pyobj_property("Class")
instance = pyobj_property("Instance")
class PyobjMixin(PyobjContext):
_ALLOW_MARKERS = True
def __init__(self, *k, **kw):
super(PyobjMixin, self).__init__(*k, **kw)
def obj():
def fget(self):
obj = getattr(self, "_obj", None)
if obj is None:
self._obj = obj = self._getobj()
# XXX evil hack
# used to avoid Instance collector marker duplication
if self._ALLOW_MARKERS:
self.own_markers.extend(get_unpacked_marks(self.obj))
return obj
def fset(self, value):
self._obj = value
return property(fget, fset, None, "underlying python object")
obj = obj()
def _getobj(self):
return getattr(self.parent.obj, self.name)
def getmodpath(self, stopatmodule=True, includemodule=False):
""" return python path relative to the containing module. """
chain = self.listchain()
chain.reverse()
parts = []
for node in chain:
if isinstance(node, Instance):
continue
name = node.name
if isinstance(node, Module):
name = os.path.splitext(name)[0]
if stopatmodule:
if includemodule:
parts.append(name)
break
parts.append(name)
parts.reverse()
s = ".".join(parts)
return s.replace(".[", "[")
def _getfslineno(self):
return getfslineno(self.obj)
def reportinfo(self):
# XXX caching?
obj = self.obj
compat_co_firstlineno = getattr(obj, "compat_co_firstlineno", None)
if isinstance(compat_co_firstlineno, int):
# nose compatibility
fspath = sys.modules[obj.__module__].__file__
if fspath.endswith(".pyc"):
fspath = fspath[:-1]
lineno = compat_co_firstlineno
else:
fspath, lineno = getfslineno(obj)
modpath = self.getmodpath()
assert isinstance(lineno, int)
return fspath, lineno, modpath
class PyCollector(PyobjMixin, nodes.Collector):
def funcnamefilter(self, name):
return self._matches_prefix_or_glob_option("python_functions", name)
def isnosetest(self, obj):
""" Look for the __test__ attribute, which is applied by the
@nose.tools.istest decorator
"""
# We explicitly check for "is True" here to not mistakenly treat
# classes with a custom __getattr__ returning something truthy (like a
# function) as test classes.
return safe_getattr(obj, "__test__", False) is True
def classnamefilter(self, name):
return self._matches_prefix_or_glob_option("python_classes", name)
def istestfunction(self, obj, name):
if self.funcnamefilter(name) or self.isnosetest(obj):
if isinstance(obj, staticmethod):
# static methods need to be unwrapped
obj = safe_getattr(obj, "__func__", False)
if obj is False:
# Python 2.6 wraps in a different way that we won't try to handle
msg = "cannot collect static method %r because it is not a function"
self.warn(code="C2", message=msg % name)
return False
return (
safe_getattr(obj, "__call__", False)
and fixtures.getfixturemarker(obj) is None
)
else:
return False
def istestclass(self, obj, name):
return self.classnamefilter(name) or self.isnosetest(obj)
def _matches_prefix_or_glob_option(self, option_name, name):
"""
checks if the given name matches the prefix or glob-pattern defined
in ini configuration.
"""
for option in self.config.getini(option_name):
if name.startswith(option):
return True
# check that name looks like a glob-string before calling fnmatch
# because this is called for every name in each collected module,
# and fnmatch is somewhat expensive to call
elif ("*" in option or "?" in option or "[" in option) and fnmatch.fnmatch(
name, option
):
return True
return False
def collect(self):
if not getattr(self.obj, "__test__", True):
return []
# NB. we avoid random getattrs and peek in the __dict__ instead
# (XXX originally introduced from a PyPy need, still true?)
dicts = [getattr(self.obj, "__dict__", {})]
for basecls in inspect.getmro(self.obj.__class__):
dicts.append(basecls.__dict__)
seen = {}
values = []
for dic in dicts:
for name, obj in list(dic.items()):
if name in seen:
continue
seen[name] = True
res = self._makeitem(name, obj)
if res is None:
continue
if not isinstance(res, list):
res = [res]
values.extend(res)
values.sort(key=lambda item: item.reportinfo()[:2])
return values
def makeitem(self, name, obj):
warnings.warn(deprecated.COLLECTOR_MAKEITEM, stacklevel=2)
self._makeitem(name, obj)
def _makeitem(self, name, obj):
# assert self.ihook.fspath == self.fspath, self
return self.ihook.pytest_pycollect_makeitem(collector=self, name=name, obj=obj)
def _genfunctions(self, name, funcobj):
module = self.getparent(Module).obj
clscol = self.getparent(Class)
cls = clscol and clscol.obj or None
transfer_markers(funcobj, cls, module)
fm = self.session._fixturemanager
definition = FunctionDefinition(name=name, parent=self, callobj=funcobj)
fixtureinfo = fm.getfixtureinfo(definition, funcobj, cls)
metafunc = Metafunc(
definition, fixtureinfo, self.config, cls=cls, module=module
)
methods = []
if hasattr(module, "pytest_generate_tests"):
methods.append(module.pytest_generate_tests)
if hasattr(cls, "pytest_generate_tests"):
methods.append(cls().pytest_generate_tests)
if methods:
self.ihook.pytest_generate_tests.call_extra(
methods, dict(metafunc=metafunc)
)
else:
self.ihook.pytest_generate_tests(metafunc=metafunc)
Function = self._getcustomclass("Function")
if not metafunc._calls:
yield Function(name, parent=self, fixtureinfo=fixtureinfo)
else:
# add funcargs() as fixturedefs to fixtureinfo.arg2fixturedefs
fixtures.add_funcarg_pseudo_fixture_def(self, metafunc, fm)
for callspec in metafunc._calls:
subname = "%s[%s]" % (name, callspec.id)
yield Function(
name=subname,
parent=self,
callspec=callspec,
callobj=funcobj,
fixtureinfo=fixtureinfo,
keywords={callspec.id: True},
originalname=name,
)
class Module(nodes.File, PyCollector):
""" Collector for test classes and functions. """
def _getobj(self):
return self._importtestmodule()
def collect(self):
self.session._fixturemanager.parsefactories(self)
return super(Module, self).collect()
def _importtestmodule(self):
# we assume we are only called once per module
importmode = self.config.getoption("--import-mode")
try:
mod = self.fspath.pyimport(ensuresyspath=importmode)
except SyntaxError:
raise self.CollectError(
_pytest._code.ExceptionInfo().getrepr(style="short")
)
except self.fspath.ImportMismatchError:
e = sys.exc_info()[1]
raise self.CollectError(
"import file mismatch:\n"
"imported module %r has this __file__ attribute:\n"
" %s\n"
"which is not the same as the test file we want to collect:\n"
" %s\n"
"HINT: remove __pycache__ / .pyc files and/or use a "
"unique basename for your test file modules" % e.args
)
except ImportError:
from _pytest._code.code import ExceptionInfo
exc_info = ExceptionInfo()
if self.config.getoption("verbose") < 2:
exc_info.traceback = exc_info.traceback.filter(filter_traceback)
exc_repr = exc_info.getrepr(
style="short"
) if exc_info.traceback else exc_info.exconly()
formatted_tb = safe_str(exc_repr)
raise self.CollectError(
"ImportError while importing test module '{fspath}'.\n"
"Hint: make sure your test modules/packages have valid Python names.\n"
"Traceback:\n"
"{traceback}".format(fspath=self.fspath, traceback=formatted_tb)
)
except _pytest.runner.Skipped as e:
if e.allow_module_level:
raise
raise self.CollectError(
"Using pytest.skip outside of a test is not allowed. "
"To decorate a test function, use the @pytest.mark.skip "
"or @pytest.mark.skipif decorators instead, and to skip a "
"module use `pytestmark = pytest.mark.{skip,skipif}."
)
self.config.pluginmanager.consider_module(mod)
return mod
def setup(self):
setup_module = _get_xunit_setup_teardown(self.obj, "setUpModule")
if setup_module is None:
setup_module = _get_xunit_setup_teardown(self.obj, "setup_module")
if setup_module is not None:
setup_module()
teardown_module = _get_xunit_setup_teardown(self.obj, "tearDownModule")
if teardown_module is None:
teardown_module = _get_xunit_setup_teardown(self.obj, "teardown_module")
if teardown_module is not None:
self.addfinalizer(teardown_module)
def _get_xunit_setup_teardown(holder, attr_name, param_obj=None):
"""
Return a callable to perform xunit-style setup or teardown if
the function exists in the ``holder`` object.
The ``param_obj`` parameter is the parameter which will be passed to the function
when the callable is called without arguments, defaults to the ``holder`` object.
Return ``None`` if a suitable callable is not found.
"""
param_obj = param_obj if param_obj is not None else holder
result = _get_xunit_func(holder, attr_name)
if result is not None:
arg_count = result.__code__.co_argcount
if inspect.ismethod(result):
arg_count -= 1
if arg_count:
return lambda: result(param_obj)
else:
return result
def _get_xunit_func(obj, name):
"""Return the attribute from the given object to be used as a setup/teardown
xunit-style function, but only if not marked as a fixture to
avoid calling it twice.
"""
meth = getattr(obj, name, None)
if fixtures.getfixturemarker(meth) is None:
return meth
class Class(PyCollector):
""" Collector for test methods. """
def collect(self):
if not safe_getattr(self.obj, "__test__", True):
return []
if hasinit(self.obj):
self.warn(
"C1",
"cannot collect test class %r because it has a "
"__init__ constructor" % self.obj.__name__,
)
return []
elif hasnew(self.obj):
self.warn(
"C1",
"cannot collect test class %r because it has a "
"__new__ constructor" % self.obj.__name__,
)
return []
return [self._getcustomclass("Instance")(name="()", parent=self)]
def setup(self):
setup_class = _get_xunit_func(self.obj, "setup_class")
if setup_class is not None:
setup_class = getattr(setup_class, "im_func", setup_class)
setup_class = getattr(setup_class, "__func__", setup_class)
setup_class(self.obj)
fin_class = getattr(self.obj, "teardown_class", None)
if fin_class is not None:
fin_class = getattr(fin_class, "im_func", fin_class)
fin_class = getattr(fin_class, "__func__", fin_class)
self.addfinalizer(lambda: fin_class(self.obj))
class Instance(PyCollector):
_ALLOW_MARKERS = False # hack, destroy later
# instances share the object with their parents in a way
# that duplicates markers instances if not taken out
# can be removed at node strucutre reorganization time
def _getobj(self):
return self.parent.obj()
def collect(self):
self.session._fixturemanager.parsefactories(self)
return super(Instance, self).collect()
def newinstance(self):
self.obj = self._getobj()
return self.obj
class FunctionMixin(PyobjMixin):
""" mixin for the code common to Function and Generator.
"""
def setup(self):
""" perform setup for this test function. """
if hasattr(self, "_preservedparent"):
obj = self._preservedparent
elif isinstance(self.parent, Instance):
obj = self.parent.newinstance()
self.obj = self._getobj()
else:
obj = self.parent.obj
if inspect.ismethod(self.obj):
setup_name = "setup_method"
teardown_name = "teardown_method"
else:
setup_name = "setup_function"
teardown_name = "teardown_function"
setup_func_or_method = _get_xunit_setup_teardown(
obj, setup_name, param_obj=self.obj
)
if setup_func_or_method is not None:
setup_func_or_method()
teardown_func_or_method = _get_xunit_setup_teardown(
obj, teardown_name, param_obj=self.obj
)
if teardown_func_or_method is not None:
self.addfinalizer(teardown_func_or_method)
def _prunetraceback(self, excinfo):
if hasattr(self, "_obj") and not self.config.option.fulltrace:
code = _pytest._code.Code(get_real_func(self.obj))
path, firstlineno = code.path, code.firstlineno
traceback = excinfo.traceback
ntraceback = traceback.cut(path=path, firstlineno=firstlineno)
if ntraceback == traceback:
ntraceback = ntraceback.cut(path=path)
if ntraceback == traceback:
ntraceback = ntraceback.filter(filter_traceback)
if not ntraceback:
ntraceback = traceback
excinfo.traceback = ntraceback.filter()
# issue364: mark all but first and last frames to
# only show a single-line message for each frame
if self.config.option.tbstyle == "auto":
if len(excinfo.traceback) > 2:
for entry in excinfo.traceback[1:-1]:
entry.set_repr_style("short")
def _repr_failure_py(self, excinfo, style="long"):
if excinfo.errisinstance(fail.Exception):
if not excinfo.value.pytrace:
return py._builtin._totext(excinfo.value)
return super(FunctionMixin, self)._repr_failure_py(excinfo, style=style)
def repr_failure(self, excinfo, outerr=None):
assert outerr is None, "XXX outerr usage is deprecated"
style = self.config.option.tbstyle
if style == "auto":
style = "long"
return self._repr_failure_py(excinfo, style=style)
class Generator(FunctionMixin, PyCollector):
def collect(self):
# test generators are seen as collectors but they also
# invoke setup/teardown on popular request
# (induced by the common "test_*" naming shared with normal tests)
from _pytest import deprecated
self.session._setupstate.prepare(self)
# see FunctionMixin.setup and test_setupstate_is_preserved_134
self._preservedparent = self.parent.obj
values = []
seen = {}
for i, x in enumerate(self.obj()):
name, call, args = self.getcallargs(x)
if not callable(call):
raise TypeError("%r yielded non callable test %r" % (self.obj, call))
if name is None:
name = "[%d]" % i
else:
name = "['%s']" % name
if name in seen:
raise ValueError(
"%r generated tests with non-unique name %r" % (self, name)
)
seen[name] = True
values.append(self.Function(name, self, args=args, callobj=call))
self.warn("C1", deprecated.YIELD_TESTS)
return values
def getcallargs(self, obj):
if not isinstance(obj, (tuple, list)):
obj = (obj,)
# explicit naming
if isinstance(obj[0], six.string_types):
name = obj[0]
obj = obj[1:]
else:
name = None
call, args = obj[0], obj[1:]
return name, call, args
def hasinit(obj):
init = getattr(obj, "__init__", None)
if init:
return init != object.__init__
def hasnew(obj):
new = getattr(obj, "__new__", None)
if new:
return new != object.__new__
class CallSpec2(object):
def __init__(self, metafunc):
self.metafunc = metafunc
self.funcargs = {}
self._idlist = []
self.params = {}
self._globalid = NOTSET
self._globalid_args = set()
self._globalparam = NOTSET
self._arg2scopenum = {} # used for sorting parametrized resources
self.marks = []
self.indices = {}
def copy(self, metafunc):
cs = CallSpec2(self.metafunc)
cs.funcargs.update(self.funcargs)
cs.params.update(self.params)
cs.marks.extend(self.marks)
cs.indices.update(self.indices)
cs._arg2scopenum.update(self._arg2scopenum)
cs._idlist = list(self._idlist)
cs._globalid = self._globalid
cs._globalid_args = self._globalid_args
cs._globalparam = self._globalparam
return cs
def _checkargnotcontained(self, arg):
if arg in self.params or arg in self.funcargs:
raise ValueError("duplicate %r" % (arg,))
def getparam(self, name):
try:
return self.params[name]
except KeyError:
if self._globalparam is NOTSET:
raise ValueError(name)
return self._globalparam
@property
def id(self):
return "-".join(map(str, filter(None, self._idlist)))
def setmulti2(self, valtypes, argnames, valset, id, marks, scopenum, param_index):
for arg, val in zip(argnames, valset):
self._checkargnotcontained(arg)
valtype_for_arg = valtypes[arg]
getattr(self, valtype_for_arg)[arg] = val
self.indices[arg] = param_index
self._arg2scopenum[arg] = scopenum
self._idlist.append(id)
self.marks.extend(marks)
def setall(self, funcargs, id, param):
for x in funcargs:
self._checkargnotcontained(x)
self.funcargs.update(funcargs)
if id is not NOTSET:
self._idlist.append(id)
if param is not NOTSET:
assert self._globalparam is NOTSET
self._globalparam = param
for arg in funcargs:
self._arg2scopenum[arg] = fixtures.scopenum_function
class Metafunc(fixtures.FuncargnamesCompatAttr):
"""
Metafunc objects are passed to the :func:`pytest_generate_tests <_pytest.hookspec.pytest_generate_tests>` hook.
They help to inspect a test function and to generate tests according to
test configuration or values specified in the class or module where a
test function is defined.
"""
def __init__(self, definition, fixtureinfo, config, cls=None, module=None):
#: access to the :class:`_pytest.config.Config` object for the test session
assert (
isinstance(definition, FunctionDefinition)
or type(definition).__name__ == "DefinitionMock"
)
self.definition = definition
self.config = config
#: the module object where the test function is defined in.
self.module = module
#: underlying python test function
self.function = definition.obj
#: set of fixture names required by the test function
self.fixturenames = fixtureinfo.names_closure
#: class object where the test function is defined in or ``None``.
self.cls = cls
self._calls = []
self._ids = set()
self._arg2fixturedefs = fixtureinfo.name2fixturedefs
def parametrize(self, argnames, argvalues, indirect=False, ids=None, scope=None):
""" Add new invocations to the underlying test function using the list
of argvalues for the given argnames. Parametrization is performed
during the collection phase. If you need to setup expensive resources
see about setting indirect to do it rather at test setup time.
:arg argnames: a comma-separated string denoting one or more argument
names, or a list/tuple of argument strings.
:arg argvalues: The list of argvalues determines how often a
test is invoked with different argument values. If only one
argname was specified argvalues is a list of values. If N
argnames were specified, argvalues must be a list of N-tuples,
where each tuple-element specifies a value for its respective
argname.
:arg indirect: The list of argnames or boolean. A list of arguments'
names (subset of argnames). If True the list contains all names from
the argnames. Each argvalue corresponding to an argname in this list will
be passed as request.param to its respective argname fixture
function so that it can perform more expensive setups during the
setup phase of a test rather than at collection time.
:arg ids: list of string ids, or a callable.
If strings, each is corresponding to the argvalues so that they are
part of the test id. If None is given as id of specific test, the
automatically generated id for that argument will be used.
If callable, it should take one argument (a single argvalue) and return
a string or return None. If None, the automatically generated id for that
argument will be used.
If no ids are provided they will be generated automatically from
the argvalues.
:arg scope: if specified it denotes the scope of the parameters.
The scope is used for grouping tests by parameter instances.
It will also override any fixture-function defined scope, allowing
to set a dynamic scope using test context or configuration.
"""
from _pytest.fixtures import scope2index
from _pytest.mark import ParameterSet
from py.io import saferepr
argnames, parameters = ParameterSet._for_parametrize(
argnames, argvalues, self.function, self.config
)
del argvalues
default_arg_names = set(get_default_arg_names(self.function))
if scope is None:
scope = _find_parametrized_scope(argnames, self._arg2fixturedefs, indirect)
scopenum = scope2index(scope, descr="call to {}".format(self.parametrize))
valtypes = {}
for arg in argnames:
if arg not in self.fixturenames:
if arg in default_arg_names:
raise ValueError(
"%r already takes an argument %r with a default value"
% (self.function, arg)
)
else:
if isinstance(indirect, (tuple, list)):
name = "fixture" if arg in indirect else "argument"
else:
name = "fixture" if indirect else "argument"
raise ValueError("%r uses no %s %r" % (self.function, name, arg))
if indirect is True:
valtypes = dict.fromkeys(argnames, "params")
elif indirect is False:
valtypes = dict.fromkeys(argnames, "funcargs")
elif isinstance(indirect, (tuple, list)):
valtypes = dict.fromkeys(argnames, "funcargs")
for arg in indirect:
if arg not in argnames:
raise ValueError(
"indirect given to %r: fixture %r doesn't exist"
% (self.function, arg)
)
valtypes[arg] = "params"
idfn = None
if callable(ids):
idfn = ids
ids = None
if ids:
if len(ids) != len(parameters):
raise ValueError(
"%d tests specified with %d ids" % (len(parameters), len(ids))
)
for id_value in ids:
if id_value is not None and not isinstance(id_value, six.string_types):
msg = "ids must be list of strings, found: %s (type: %s)"
raise ValueError(
msg % (saferepr(id_value), type(id_value).__name__)
)
ids = idmaker(argnames, parameters, idfn, ids, self.config)
newcalls = []
for callspec in self._calls or [CallSpec2(self)]:
elements = zip(ids, parameters, count())
for a_id, param, param_index in elements:
if len(param.values) != len(argnames):
raise ValueError(
'In "parametrize" the number of values ({}) must be '
"equal to the number of names ({})".format(
param.values, argnames
)
)
newcallspec = callspec.copy(self)
newcallspec.setmulti2(
valtypes,
argnames,
param.values,
a_id,
param.marks,
scopenum,
param_index,
)
newcalls.append(newcallspec)
self._calls = newcalls
def addcall(self, funcargs=None, id=NOTSET, param=NOTSET):
""" Add a new call to the underlying test function during the collection phase of a test run.
.. deprecated:: 3.3
Use :meth:`parametrize` instead.
Note that request.addcall() is called during the test collection phase prior and
independently to actual test execution. You should only use addcall()
if you need to specify multiple arguments of a test function.
:arg funcargs: argument keyword dictionary used when invoking
the test function.
:arg id: used for reporting and identification purposes. If you
don't supply an `id` an automatic unique id will be generated.
:arg param: a parameter which will be exposed to a later fixture function
invocation through the ``request.param`` attribute.
"""
if self.config:
self.config.warn(
"C1", message=deprecated.METAFUNC_ADD_CALL, fslocation=None
)
assert funcargs is None or isinstance(funcargs, dict)
if funcargs is not None:
for name in funcargs:
if name not in self.fixturenames:
fail("funcarg %r not used in this function." % name)
else:
funcargs = {}
if id is None:
raise ValueError("id=None not allowed")
if id is NOTSET:
id = len(self._calls)
id = str(id)
if id in self._ids:
raise ValueError("duplicate id %r" % id)
self._ids.add(id)
cs = CallSpec2(self)
cs.setall(funcargs, id, param)
self._calls.append(cs)
def _find_parametrized_scope(argnames, arg2fixturedefs, indirect):
"""Find the most appropriate scope for a parametrized call based on its arguments.
When there's at least one direct argument, always use "function" scope.
When a test function is parametrized and all its arguments are indirect
(e.g. fixtures), return the most narrow scope based on the fixtures used.
Related to issue #1832, based on code posted by @Kingdread.
"""
from _pytest.fixtures import scopes
indirect_as_list = isinstance(indirect, (list, tuple))
all_arguments_are_fixtures = indirect is True or indirect_as_list and len(
indirect
) == argnames
if all_arguments_are_fixtures:
fixturedefs = arg2fixturedefs or {}
used_scopes = [fixturedef[0].scope for name, fixturedef in fixturedefs.items()]
if used_scopes:
# Takes the most narrow scope from used fixtures
for scope in reversed(scopes):
if scope in used_scopes:
return scope
return "function"
def _idval(val, argname, idx, idfn, config=None):
if idfn:
s = None
try:
s = idfn(val)
except Exception:
# See issue https://github.com/pytest-dev/pytest/issues/2169
import warnings
msg = "Raised while trying to determine id of parameter %s at position %d." % (
argname, idx
)
msg += "\nUpdate your code as this will raise an error in pytest-4.0."
warnings.warn(msg, DeprecationWarning)
if s:
return ascii_escaped(s)
if config:
hook_id = config.hook.pytest_make_parametrize_id(
config=config, val=val, argname=argname
)
if hook_id:
return hook_id
if isinstance(val, STRING_TYPES):
return ascii_escaped(val)
elif isinstance(val, (float, int, bool, NoneType)):
return str(val)
elif isinstance(val, REGEX_TYPE):
return ascii_escaped(val.pattern)
elif enum is not None and isinstance(val, enum.Enum):
return str(val)
elif (isclass(val) or isfunction(val)) and hasattr(val, "__name__"):
return val.__name__
return str(argname) + str(idx)
def _idvalset(idx, parameterset, argnames, idfn, ids, config=None):
if parameterset.id is not None:
return parameterset.id
if ids is None or (idx >= len(ids) or ids[idx] is None):
this_id = [
_idval(val, argname, idx, idfn, config)
for val, argname in zip(parameterset.values, argnames)
]
return "-".join(this_id)
else:
return ascii_escaped(ids[idx])
def idmaker(argnames, parametersets, idfn=None, ids=None, config=None):
ids = [
_idvalset(valindex, parameterset, argnames, idfn, ids, config)
for valindex, parameterset in enumerate(parametersets)
]
if len(set(ids)) != len(ids):
# The ids are not unique
duplicates = [testid for testid in ids if ids.count(testid) > 1]
counters = collections.defaultdict(lambda: 0)
for index, testid in enumerate(ids):
if testid in duplicates:
ids[index] = testid + str(counters[testid])
counters[testid] += 1
return ids
def show_fixtures_per_test(config):
from _pytest.main import wrap_session
return wrap_session(config, _show_fixtures_per_test)
def _show_fixtures_per_test(config, session):
import _pytest.config
session.perform_collect()
curdir = py.path.local()
tw = _pytest.config.create_terminal_writer(config)
verbose = config.getvalue("verbose")
def get_best_relpath(func):
loc = getlocation(func, curdir)
return curdir.bestrelpath(loc)
def write_fixture(fixture_def):
argname = fixture_def.argname
if verbose <= 0 and argname.startswith("_"):
return
if verbose > 0:
bestrel = get_best_relpath(fixture_def.func)
funcargspec = "{} -- {}".format(argname, bestrel)
else:
funcargspec = argname
tw.line(funcargspec, green=True)
fixture_doc = fixture_def.func.__doc__
if fixture_doc:
write_docstring(tw, fixture_doc)
else:
tw.line(" no docstring available", red=True)
def write_item(item):
try:
info = item._fixtureinfo
except AttributeError:
# doctests items have no _fixtureinfo attribute
return
if not info.name2fixturedefs:
# this test item does not use any fixtures
return
tw.line()
tw.sep("-", "fixtures used by {}".format(item.name))
tw.sep("-", "({})".format(get_best_relpath(item.function)))
# dict key not used in loop but needed for sorting
for _, fixturedefs in sorted(info.name2fixturedefs.items()):
assert fixturedefs is not None
if not fixturedefs:
continue
# last item is expected to be the one used by the test item
write_fixture(fixturedefs[-1])
for session_item in session.items:
write_item(session_item)
def showfixtures(config):
from _pytest.main import wrap_session
return wrap_session(config, _showfixtures_main)
def _showfixtures_main(config, session):
import _pytest.config
session.perform_collect()
curdir = py.path.local()
tw = _pytest.config.create_terminal_writer(config)
verbose = config.getvalue("verbose")
fm = session._fixturemanager
available = []
seen = set()
for argname, fixturedefs in fm._arg2fixturedefs.items():
assert fixturedefs is not None
if not fixturedefs:
continue
for fixturedef in fixturedefs:
loc = getlocation(fixturedef.func, curdir)
if (fixturedef.argname, loc) in seen:
continue
seen.add((fixturedef.argname, loc))
available.append(
(
len(fixturedef.baseid),
fixturedef.func.__module__,
curdir.bestrelpath(loc),
fixturedef.argname,
fixturedef,
)
)
available.sort()
currentmodule = None
for baseid, module, bestrel, argname, fixturedef in available:
if currentmodule != module:
if not module.startswith("_pytest."):
tw.line()
tw.sep("-", "fixtures defined from %s" % (module,))
currentmodule = module
if verbose <= 0 and argname[0] == "_":
continue
if verbose > 0:
funcargspec = "%s -- %s" % (argname, bestrel)
else:
funcargspec = argname
tw.line(funcargspec, green=True)
loc = getlocation(fixturedef.func, curdir)
doc = fixturedef.func.__doc__ or ""
if doc:
write_docstring(tw, doc)
else:
tw.line(" %s: no docstring available" % (loc,), red=True)
def write_docstring(tw, doc):
INDENT = " "
doc = doc.rstrip()
if "\n" in doc:
firstline, rest = doc.split("\n", 1)
else:
firstline, rest = doc, ""
if firstline.strip():
tw.line(INDENT + firstline.strip())
if rest:
for line in dedent(rest).split("\n"):
tw.write(INDENT + line + "\n")
class Function(FunctionMixin, nodes.Item, fixtures.FuncargnamesCompatAttr):
""" a Function Item is responsible for setting up and executing a
Python test function.
"""
_genid = None
# disable since functions handle it themselfes
_ALLOW_MARKERS = False
def __init__(
self,
name,
parent,
args=None,
config=None,
callspec=None,
callobj=NOTSET,
keywords=None,
session=None,
fixtureinfo=None,
originalname=None,
):
super(Function, self).__init__(name, parent, config=config, session=session)
self._args = args
if callobj is not NOTSET:
self.obj = callobj
self.keywords.update(self.obj.__dict__)
self.own_markers.extend(get_unpacked_marks(self.obj))
if callspec:
self.callspec = callspec
# this is total hostile and a mess
# keywords are broken by design by now
# this will be redeemed later
for mark in callspec.marks:
# feel free to cry, this was broken for years before
# and keywords cant fix it per design
self.keywords[mark.name] = mark
self.own_markers.extend(callspec.marks)
if keywords:
self.keywords.update(keywords)
if fixtureinfo is None:
fixtureinfo = self.session._fixturemanager.getfixtureinfo(
self, self.obj, self.cls, funcargs=not self._isyieldedfunction()
)
self._fixtureinfo = fixtureinfo
self.fixturenames = fixtureinfo.names_closure
self._initrequest()
#: original function name, without any decorations (for example
#: parametrization adds a ``"[...]"`` suffix to function names).
#:
#: .. versionadded:: 3.0
self.originalname = originalname
def _initrequest(self):
self.funcargs = {}
if self._isyieldedfunction():
assert not hasattr(
self, "callspec"
), "yielded functions (deprecated) cannot have funcargs"
else:
if hasattr(self, "callspec"):
callspec = self.callspec
assert not callspec.funcargs
self._genid = callspec.id
if hasattr(callspec, "param"):
self.param = callspec.param
self._request = fixtures.FixtureRequest(self)
@property
def function(self):
"underlying python 'function' object"
return getattr(self.obj, "im_func", self.obj)
def _getobj(self):
name = self.name
i = name.find("[") # parametrization
if i != -1:
name = name[:i]
return getattr(self.parent.obj, name)
@property
def _pyfuncitem(self):
"(compatonly) for code expecting pytest-2.2 style request objects"
return self
def _isyieldedfunction(self):
return getattr(self, "_args", None) is not None
def runtest(self):
""" execute the underlying test function. """
self.ihook.pytest_pyfunc_call(pyfuncitem=self)
def setup(self):
super(Function, self).setup()
fixtures.fillfixtures(self)
class FunctionDefinition(Function):
"""
internal hack until we get actual definition nodes instead of the
crappy metafunc hack
"""
def runtest(self):
raise RuntimeError("function definitions are not supposed to be used")
setup = runtest
|
yangkf1985/tornado
|
refs/heads/master
|
tornado/test/testing_test.py
|
144
|
#!/usr/bin/env python
from __future__ import absolute_import, division, print_function, with_statement
from tornado import gen, ioloop
from tornado.log import app_log
from tornado.testing import AsyncTestCase, gen_test, ExpectLog
from tornado.test.util import unittest
import contextlib
import os
import traceback
@contextlib.contextmanager
def set_environ(name, value):
old_value = os.environ.get(name)
os.environ[name] = value
try:
yield
finally:
if old_value is None:
del os.environ[name]
else:
os.environ[name] = old_value
class AsyncTestCaseTest(AsyncTestCase):
def test_exception_in_callback(self):
self.io_loop.add_callback(lambda: 1 / 0)
try:
self.wait()
self.fail("did not get expected exception")
except ZeroDivisionError:
pass
def test_wait_timeout(self):
time = self.io_loop.time
# Accept default 5-second timeout, no error
self.io_loop.add_timeout(time() + 0.01, self.stop)
self.wait()
# Timeout passed to wait()
self.io_loop.add_timeout(time() + 1, self.stop)
with self.assertRaises(self.failureException):
self.wait(timeout=0.01)
# Timeout set with environment variable
self.io_loop.add_timeout(time() + 1, self.stop)
with set_environ('ASYNC_TEST_TIMEOUT', '0.01'):
with self.assertRaises(self.failureException):
self.wait()
def test_subsequent_wait_calls(self):
"""
This test makes sure that a second call to wait()
clears the first timeout.
"""
self.io_loop.add_timeout(self.io_loop.time() + 0.01, self.stop)
self.wait(timeout=0.02)
self.io_loop.add_timeout(self.io_loop.time() + 0.03, self.stop)
self.wait(timeout=0.15)
def test_multiple_errors(self):
def fail(message):
raise Exception(message)
self.io_loop.add_callback(lambda: fail("error one"))
self.io_loop.add_callback(lambda: fail("error two"))
# The first error gets raised; the second gets logged.
with ExpectLog(app_log, "multiple unhandled exceptions"):
with self.assertRaises(Exception) as cm:
self.wait()
self.assertEqual(str(cm.exception), "error one")
class AsyncTestCaseWrapperTest(unittest.TestCase):
def test_undecorated_generator(self):
class Test(AsyncTestCase):
def test_gen(self):
yield
test = Test('test_gen')
result = unittest.TestResult()
test.run(result)
self.assertEqual(len(result.errors), 1)
self.assertIn("should be decorated", result.errors[0][1])
def test_undecorated_generator_with_skip(self):
class Test(AsyncTestCase):
@unittest.skip("don't run this")
def test_gen(self):
yield
test = Test('test_gen')
result = unittest.TestResult()
test.run(result)
self.assertEqual(len(result.errors), 0)
self.assertEqual(len(result.skipped), 1)
def test_other_return(self):
class Test(AsyncTestCase):
def test_other_return(self):
return 42
test = Test('test_other_return')
result = unittest.TestResult()
test.run(result)
self.assertEqual(len(result.errors), 1)
self.assertIn("Return value from test method ignored", result.errors[0][1])
class SetUpTearDownTest(unittest.TestCase):
def test_set_up_tear_down(self):
"""
This test makes sure that AsyncTestCase calls super methods for
setUp and tearDown.
InheritBoth is a subclass of both AsyncTestCase and
SetUpTearDown, with the ordering so that the super of
AsyncTestCase will be SetUpTearDown.
"""
events = []
result = unittest.TestResult()
class SetUpTearDown(unittest.TestCase):
def setUp(self):
events.append('setUp')
def tearDown(self):
events.append('tearDown')
class InheritBoth(AsyncTestCase, SetUpTearDown):
def test(self):
events.append('test')
InheritBoth('test').run(result)
expected = ['setUp', 'test', 'tearDown']
self.assertEqual(expected, events)
class GenTest(AsyncTestCase):
def setUp(self):
super(GenTest, self).setUp()
self.finished = False
def tearDown(self):
self.assertTrue(self.finished)
super(GenTest, self).tearDown()
@gen_test
def test_sync(self):
self.finished = True
@gen_test
def test_async(self):
yield gen.Task(self.io_loop.add_callback)
self.finished = True
def test_timeout(self):
# Set a short timeout and exceed it.
@gen_test(timeout=0.1)
def test(self):
yield gen.Task(self.io_loop.add_timeout, self.io_loop.time() + 1)
# This can't use assertRaises because we need to inspect the
# exc_info triple (and not just the exception object)
try:
test(self)
self.fail("did not get expected exception")
except ioloop.TimeoutError:
# The stack trace should blame the add_timeout line, not just
# unrelated IOLoop/testing internals.
self.assertIn(
"gen.Task(self.io_loop.add_timeout, self.io_loop.time() + 1)",
traceback.format_exc())
self.finished = True
def test_no_timeout(self):
# A test that does not exceed its timeout should succeed.
@gen_test(timeout=1)
def test(self):
time = self.io_loop.time
yield gen.Task(self.io_loop.add_timeout, time() + 0.1)
test(self)
self.finished = True
def test_timeout_environment_variable(self):
@gen_test(timeout=0.5)
def test_long_timeout(self):
time = self.io_loop.time
yield gen.Task(self.io_loop.add_timeout, time() + 0.25)
# Uses provided timeout of 0.5 seconds, doesn't time out.
with set_environ('ASYNC_TEST_TIMEOUT', '0.1'):
test_long_timeout(self)
self.finished = True
def test_no_timeout_environment_variable(self):
@gen_test(timeout=0.01)
def test_short_timeout(self):
time = self.io_loop.time
yield gen.Task(self.io_loop.add_timeout, time() + 1)
# Uses environment-variable timeout of 0.1, times out.
with set_environ('ASYNC_TEST_TIMEOUT', '0.1'):
with self.assertRaises(ioloop.TimeoutError):
test_short_timeout(self)
self.finished = True
def test_with_method_args(self):
@gen_test
def test_with_args(self, *args):
self.assertEqual(args, ('test',))
yield gen.Task(self.io_loop.add_callback)
test_with_args(self, 'test')
self.finished = True
def test_with_method_kwargs(self):
@gen_test
def test_with_kwargs(self, **kwargs):
self.assertDictEqual(kwargs, {'test': 'test'})
yield gen.Task(self.io_loop.add_callback)
test_with_kwargs(self, test='test')
self.finished = True
if __name__ == '__main__':
unittest.main()
|
jesramirez/odoo
|
refs/heads/8.0
|
addons/website_hr_recruitment/__openerp__.py
|
370
|
{
'name': 'Jobs',
'category': 'Website',
'version': '1.0',
'summary': 'Job Descriptions And Application Forms',
'description': """
OpenERP Contact Form
====================
""",
'author': 'OpenERP SA',
'depends': ['website_partner', 'hr_recruitment', 'website_mail'],
'data': [
'security/ir.model.access.csv',
'security/website_hr_recruitment_security.xml',
'data/config_data.xml',
'views/hr_job_views.xml',
'views/templates.xml',
],
'demo': [
'data/hr_job_demo.xml',
],
'installable': True,
}
|
zubron/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/tools/pywebsocket/src/example/cookie_wsh.py
|
451
|
# Copyright 2014 Google Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-style
# license that can be found in the COPYING file or at
# https://developers.google.com/open-source/licenses/bsd
import urlparse
def _add_set_cookie(request, value):
request.extra_headers.append(('Set-Cookie', value))
def web_socket_do_extra_handshake(request):
components = urlparse.urlparse(request.uri)
command = components[4]
ONE_DAY_LIFE = 'Max-Age=86400'
if command == 'set':
_add_set_cookie(request, '; '.join(['foo=bar', ONE_DAY_LIFE]))
elif command == 'set_httponly':
_add_set_cookie(request,
'; '.join(['httpOnlyFoo=bar', ONE_DAY_LIFE, 'httpOnly']))
elif command == 'clear':
_add_set_cookie(request, 'foo=0; Max-Age=0')
_add_set_cookie(request, 'httpOnlyFoo=0; Max-Age=0')
def web_socket_transfer_data(request):
pass
|
praneethkumarpidugu/matchmaking
|
refs/heads/master
|
lib/python2.7/site-packages/requests/packages/chardet/sjisprober.py
|
1776
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import SJISDistributionAnalysis
from .jpcntx import SJISContextAnalysis
from .mbcssm import SJISSMModel
from . import constants
class SJISProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(SJISSMModel)
self._mDistributionAnalyzer = SJISDistributionAnalysis()
self._mContextAnalyzer = SJISContextAnalysis()
self.reset()
def reset(self):
MultiByteCharSetProber.reset(self)
self._mContextAnalyzer.reset()
def get_charset_name(self):
return self._mContextAnalyzer.get_charset_name()
def feed(self, aBuf):
aLen = len(aBuf)
for i in range(0, aLen):
codingState = self._mCodingSM.next_state(aBuf[i])
if codingState == constants.eError:
if constants._debug:
sys.stderr.write(self.get_charset_name()
+ ' prober hit error at byte ' + str(i)
+ '\n')
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
charLen = self._mCodingSM.get_current_charlen()
if i == 0:
self._mLastChar[1] = aBuf[0]
self._mContextAnalyzer.feed(self._mLastChar[2 - charLen:],
charLen)
self._mDistributionAnalyzer.feed(self._mLastChar, charLen)
else:
self._mContextAnalyzer.feed(aBuf[i + 1 - charLen:i + 3
- charLen], charLen)
self._mDistributionAnalyzer.feed(aBuf[i - 1:i + 1],
charLen)
self._mLastChar[0] = aBuf[aLen - 1]
if self.get_state() == constants.eDetecting:
if (self._mContextAnalyzer.got_enough_data() and
(self.get_confidence() > constants.SHORTCUT_THRESHOLD)):
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
contxtCf = self._mContextAnalyzer.get_confidence()
distribCf = self._mDistributionAnalyzer.get_confidence()
return max(contxtCf, distribCf)
|
gilsondev/balin
|
refs/heads/master
|
balin/auth_balin/forms.py
|
1
|
# -*- coding: utf-8 -*-
from django import forms
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext as _
from django.contrib.auth.forms import UserCreationForm, UserChangeForm
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Div, Submit, HTML, Field
from crispy_forms.bootstrap import FormActions
class CreateUserForm(UserCreationForm):
"""
Form que herda as implementações de `UserCreationForm`
"""
email = forms.EmailField(
label=_("Email address")
)
is_superuser = forms.BooleanField(
label=_("Set as Administrator"),
required=False,
help_text=_("Define user to Administrador and access all "
"funcionalities of system")
)
helper = FormHelper()
helper.form_method = 'POST'
helper.layout = Layout(
Div(
Field('first_name', css_class='form-control'),
css_class='form-group'
),
Div(
Field('last_name', css_class='form-control'),
css_class='form-group'
),
Div(
Field('email', css_class='form-control'),
css_class='form-group'
),
Div(
Field('username', css_class='form-control'),
css_class='form-group'
),
Div(
Field('password1', css_class='form-control'),
css_class='form-group'
),
Div(
Field('password2', css_class='form-control'),
css_class='form-group'
),
Div(
Field('is_superuser'),
css_class='form-group checkbox'
),
Div(
FormActions(
Submit('save', 'Salvar', css_class="btn-lg btn-primary"),
),
css_class='well'
)
)
class Meta(UserCreationForm.Meta):
fields = ("first_name", "last_name", "username", "email",)
class ChangeUserForm(UserChangeForm):
"""
Form que herda as implementações de `UserChangeForm`
"""
email = forms.EmailField(
label=_("Email address")
)
is_superuser = forms.BooleanField(
label=_("Set as Administrator"),
required=False,
help_text=_("Define user to Administrador and access all "
"funcionalities of system")
)
helper = FormHelper()
helper.form_method = 'POST'
helper.layout = Layout(
Div(
Field('first_name', css_class='form-control'),
css_class='form-group'
),
Div(
Field('last_name', css_class='form-control'),
css_class='form-group'
),
Div(
Field('email', css_class='form-control'),
css_class='form-group'
),
Div(
Field('username', css_class='form-control'),
css_class='form-group'
),
Div(
Field('is_superuser'),
css_class='form-group, checkbox'
),
Div(
FormActions(
Submit('update', 'Atualizar', css_class="btn-lg btn-primary")
),
css_class='well'
)
)
class Meta(UserChangeForm.Meta):
fields = ("first_name", "last_name", "username", "email",)
exclude = ("password",)
def clean_password(self):
pass
|
davidam/python-examples
|
refs/heads/master
|
pandas/jsonpandas.py
|
1
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2018 David Arroyo Menéndez
# Author: David Arroyo Menéndez <davidam@gnu.org>
# Maintainer: David Arroyo Menéndez <davidam@gnu.org>
# This file is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
# This file is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with GNU Emacs; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301 USA,
import pandas as pd
df = pd.DataFrame([['a', 'b'], ['c', 'd']], index=['row 1', 'row 2'], columns=['col 1', 'col 2'])
print(df.to_json(orient='split'))
print(df.to_json(orient='index'))
print(df.to_json(orient='records'))
print(df.to_json(orient='table'))
|
vllab/TSMC_DL
|
refs/heads/master
|
DCGAN/image_utils.py
|
2
|
from scipy import misc
import numpy as np
# inverse_transform: transform image value from [-1, 1] to [0, 1]
def inverse_transform(images):
return (images + 1.) / 2.
# Do inverse_transform before saving the grid image
def save_images(image_path, images, grid_size):
return imsave(image_path, inverse_transform(images), grid_size)
# Save the grid image
def imsave(image_path, images, grid_size):
return misc.toimage(merge(images, grid_size), cmin=0, cmax=1).save(image_path)
# merge images to a grid image
def merge(images, grid_size):
h, w = images.shape[1], images.shape[2]
if len(images.shape) == 3: #batch, row, col
c = 1
img = np.zeros((h * grid_size[0], w * grid_size[1]))
else:
c = images.shape[3]
img = np.zeros((h * grid_size[0], w * grid_size[1], c))
for idx, image in enumerate(images):
i = idx % grid_size[0]
j = idx // grid_size[0]
if c == 1:
img[i*w:i*w+w, j*h:j*h+h] = image
else:
img[i*w:i*w+w, j*h:j*h+h, :] = image
return img
|
ajbc/lda-svi
|
refs/heads/master
|
process_to_tmv_db.py
|
1
|
#!/usr/bin/python
# Copyright (C) 2014 Allison Chaney
import cPickle, sys
from os.path import join
import onlineldavb
import generalrandom
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description = \
'Fit LDA to a set of documents with online VB.')
parser.add_argument('fit_path', type=str, \
help = 'path to the fit directory')
parser.add_argument('tmv_path', type=str, \
help = 'path to tmv source template (e.g. \'../tmv/BasicBrowser\')')
# parse the arguments
args = parser.parse_args()
sys.path.append(args.tmv_path)
import db
# load model settings: vocab, K, docgen
print "loading model settings"
f = open(join(args.fit_path, 'settings.pickle'))
vocab, K, docgen = cPickle.load(f)
f.close()
# load model itself, the olda object
print "loading model"
f = open(join(args.fit_path, 'olda.pickle'))
olda = cPickle.load(f)
f.close()
# Add terms and topics to the DB
print "initializing db"
db.init()
print "adding vocab terms"
db.add_terms(vocab)
print "adding",K,"topics"
db.add_topics(K)
# write out the final topics to the db
print "writing out final topics to tmv db"
for topic in range(len(olda._lambda)):
topic_terms_array = []
lambda_sum = sum(olda._lambda[topic])
for term in range(len(olda._lambda[topic])):
topic_terms_array.append((term, \
olda._lambda[topic][term]/lambda_sum))
db.update_topic_terms(topic, topic_terms_array)
# do a final pass over all documents
print "doing a final E step over all documents"
per_time = dict()
i = 0
import time
s = time.time()
D = 1850000 #TODO: this should be read in from settings
for filename, alltxt, title, subtitle in docgen:
length = 0
for word in alltxt.split():
if word in vocab:
length += 1
# TODO: this should be done less hackishly
t = int(filename.split('/')[6])
if length == 0:
continue
db.add_doc(title, subtitle, length, filename, t)
(gamma, ss) = olda.do_e_step(alltxt)
if t not in per_time:
per_time[t] = ss
else:
per_time[t] += ss
db.add_doc_topics(filename, gamma.tolist()[0])
if i % 100 == 0:
tn = (time.time() - s) / 3600
rem = D - i
time_rem = rem * (tn) / (i+1)
print "doc %d (%d)" % (i, t), tn, str(time_rem)+'h', \
str(time_rem/24)+'d'
i += 1
# slice up topics by time
print "calculating time-slice topics"
for t in per_time:
per_time[t] += olda._eta
db.add_time_topics(t, per_time[t])
|
Lh4cKg/sl4a
|
refs/heads/master
|
python/src/Lib/test/test_asynchat.py
|
58
|
# test asynchat -- requires threading
import thread # If this fails, we can't test this module
import asyncore, asynchat, socket, threading, time
import unittest
import sys
from test import test_support
HOST = test_support.HOST
SERVER_QUIT = 'QUIT\n'
class echo_server(threading.Thread):
# parameter to determine the number of bytes passed back to the
# client each send
chunk_size = 1
def __init__(self, event):
threading.Thread.__init__(self)
self.event = event
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = test_support.bind_port(self.sock)
def run(self):
self.sock.listen(1)
self.event.set()
conn, client = self.sock.accept()
self.buffer = ""
# collect data until quit message is seen
while SERVER_QUIT not in self.buffer:
data = conn.recv(1)
if not data:
break
self.buffer = self.buffer + data
# remove the SERVER_QUIT message
self.buffer = self.buffer.replace(SERVER_QUIT, '')
# re-send entire set of collected data
try:
# this may fail on some tests, such as test_close_when_done, since
# the client closes the channel when it's done sending
while self.buffer:
n = conn.send(self.buffer[:self.chunk_size])
time.sleep(0.001)
self.buffer = self.buffer[n:]
except:
pass
conn.close()
self.sock.close()
class echo_client(asynchat.async_chat):
def __init__(self, terminator, server_port):
asynchat.async_chat.__init__(self)
self.contents = []
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.connect((HOST, server_port))
self.set_terminator(terminator)
self.buffer = ''
def handle_connect(self):
pass
if sys.platform == 'darwin':
# select.poll returns a select.POLLHUP at the end of the tests
# on darwin, so just ignore it
def handle_expt(self):
pass
def collect_incoming_data(self, data):
self.buffer += data
def found_terminator(self):
self.contents.append(self.buffer)
self.buffer = ""
def start_echo_server():
event = threading.Event()
s = echo_server(event)
s.start()
event.wait()
event.clear()
time.sleep(0.01) # Give server time to start accepting.
return s, event
class TestAsynchat(unittest.TestCase):
usepoll = False
def setUp (self):
pass
def tearDown (self):
pass
def line_terminator_check(self, term, server_chunk):
event = threading.Event()
s = echo_server(event)
s.chunk_size = server_chunk
s.start()
event.wait()
event.clear()
time.sleep(0.01) # Give server time to start accepting.
c = echo_client(term, s.port)
c.push("hello ")
c.push("world%s" % term)
c.push("I'm not dead yet!%s" % term)
c.push(SERVER_QUIT)
asyncore.loop(use_poll=self.usepoll, count=300, timeout=.01)
s.join()
self.assertEqual(c.contents, ["hello world", "I'm not dead yet!"])
# the line terminator tests below check receiving variously-sized
# chunks back from the server in order to exercise all branches of
# async_chat.handle_read
def test_line_terminator1(self):
# test one-character terminator
for l in (1,2,3):
self.line_terminator_check('\n', l)
def test_line_terminator2(self):
# test two-character terminator
for l in (1,2,3):
self.line_terminator_check('\r\n', l)
def test_line_terminator3(self):
# test three-character terminator
for l in (1,2,3):
self.line_terminator_check('qqq', l)
def numeric_terminator_check(self, termlen):
# Try reading a fixed number of bytes
s, event = start_echo_server()
c = echo_client(termlen, s.port)
data = "hello world, I'm not dead yet!\n"
c.push(data)
c.push(SERVER_QUIT)
asyncore.loop(use_poll=self.usepoll, count=300, timeout=.01)
s.join()
self.assertEqual(c.contents, [data[:termlen]])
def test_numeric_terminator1(self):
# check that ints & longs both work (since type is
# explicitly checked in async_chat.handle_read)
self.numeric_terminator_check(1)
self.numeric_terminator_check(1L)
def test_numeric_terminator2(self):
self.numeric_terminator_check(6L)
def test_none_terminator(self):
# Try reading a fixed number of bytes
s, event = start_echo_server()
c = echo_client(None, s.port)
data = "hello world, I'm not dead yet!\n"
c.push(data)
c.push(SERVER_QUIT)
asyncore.loop(use_poll=self.usepoll, count=300, timeout=.01)
s.join()
self.assertEqual(c.contents, [])
self.assertEqual(c.buffer, data)
def test_simple_producer(self):
s, event = start_echo_server()
c = echo_client('\n', s.port)
data = "hello world\nI'm not dead yet!\n"
p = asynchat.simple_producer(data+SERVER_QUIT, buffer_size=8)
c.push_with_producer(p)
asyncore.loop(use_poll=self.usepoll, count=300, timeout=.01)
s.join()
self.assertEqual(c.contents, ["hello world", "I'm not dead yet!"])
def test_string_producer(self):
s, event = start_echo_server()
c = echo_client('\n', s.port)
data = "hello world\nI'm not dead yet!\n"
c.push_with_producer(data+SERVER_QUIT)
asyncore.loop(use_poll=self.usepoll, count=300, timeout=.01)
s.join()
self.assertEqual(c.contents, ["hello world", "I'm not dead yet!"])
def test_empty_line(self):
# checks that empty lines are handled correctly
s, event = start_echo_server()
c = echo_client('\n', s.port)
c.push("hello world\n\nI'm not dead yet!\n")
c.push(SERVER_QUIT)
asyncore.loop(use_poll=self.usepoll, count=300, timeout=.01)
s.join()
self.assertEqual(c.contents, ["hello world", "", "I'm not dead yet!"])
def test_close_when_done(self):
s, event = start_echo_server()
c = echo_client('\n', s.port)
c.push("hello world\nI'm not dead yet!\n")
c.push(SERVER_QUIT)
c.close_when_done()
asyncore.loop(use_poll=self.usepoll, count=300, timeout=.01)
s.join()
self.assertEqual(c.contents, [])
# the server might have been able to send a byte or two back, but this
# at least checks that it received something and didn't just fail
# (which could still result in the client not having received anything)
self.assertTrue(len(s.buffer) > 0)
class TestAsynchat_WithPoll(TestAsynchat):
usepoll = True
class TestHelperFunctions(unittest.TestCase):
def test_find_prefix_at_end(self):
self.assertEqual(asynchat.find_prefix_at_end("qwerty\r", "\r\n"), 1)
self.assertEqual(asynchat.find_prefix_at_end("qwertydkjf", "\r\n"), 0)
class TestFifo(unittest.TestCase):
def test_basic(self):
f = asynchat.fifo()
f.push(7)
f.push('a')
self.assertEqual(len(f), 2)
self.assertEqual(f.first(), 7)
self.assertEqual(f.pop(), (1, 7))
self.assertEqual(len(f), 1)
self.assertEqual(f.first(), 'a')
self.assertEqual(f.is_empty(), False)
self.assertEqual(f.pop(), (1, 'a'))
self.assertEqual(len(f), 0)
self.assertEqual(f.is_empty(), True)
self.assertEqual(f.pop(), (0, None))
def test_given_list(self):
f = asynchat.fifo(['x', 17, 3])
self.assertEqual(len(f), 3)
self.assertEqual(f.pop(), (1, 'x'))
self.assertEqual(f.pop(), (1, 17))
self.assertEqual(f.pop(), (1, 3))
self.assertEqual(f.pop(), (0, None))
def test_main(verbose=None):
test_support.run_unittest(TestAsynchat, TestAsynchat_WithPoll,
TestHelperFunctions, TestFifo)
if __name__ == "__main__":
test_main(verbose=True)
|
shakalaca/ASUS_ZenFone_A400CG
|
refs/heads/android-5.0
|
prebuilts/gcc/darwin-x86/x86/x86_64-linux-android-4.8/share/gdb/python/gdb/command/prompt.py
|
137
|
# Extended prompt.
# Copyright (C) 2011-2013 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""GDB command for working with extended prompts."""
import gdb
import gdb.prompt
class _ExtendedPrompt(gdb.Parameter):
"""Set the extended prompt.
Usage: set extended-prompt VALUE
Substitutions are applied to VALUE to compute the real prompt.
The currently defined substitutions are:
"""
# Add the prompt library's dynamically generated help to the
# __doc__ string.
__doc__ = __doc__ + gdb.prompt.prompt_help()
set_doc = "Set the extended prompt."
show_doc = "Show the extended prompt."
def __init__(self):
super(_ExtendedPrompt, self).__init__("extended-prompt",
gdb.COMMAND_SUPPORT,
gdb.PARAM_STRING_NOESCAPE)
self.value = ''
self.hook_set = False
def get_show_string (self, pvalue):
if self.value is not '':
return "The extended prompt is: " + self.value
else:
return "The extended prompt is not set."
def get_set_string (self):
if self.hook_set == False:
gdb.prompt_hook = self.before_prompt_hook
self.hook_set = True
return ""
def before_prompt_hook(self, current):
if self.value is not '':
newprompt = gdb.prompt.substitute_prompt(self.value)
return newprompt.replace('\\', '\\\\')
else:
return None
_ExtendedPrompt()
|
erinspace/osf.io
|
refs/heads/develop
|
api/addons/views.py
|
5
|
import re
from django.apps import apps
from rest_framework.exceptions import NotFound, PermissionDenied
from rest_framework import generics, permissions as drf_permissions
from framework.auth.oauth_scopes import CoreScopes
from api.addons.serializers import AddonSerializer
from api.base.filters import ListFilterMixin
from api.base.pagination import MaxSizePagination
from api.base.permissions import TokenHasScope
from api.base.settings import ADDONS_OAUTH
from api.base.views import JSONAPIBaseView
from website import settings as osf_settings
class AddonSettingsMixin(object):
"""Mixin with convenience method for retrieving the current <Addon><Node|User>Settings based on the
current URL. By default, fetches the settings based on the user or node available in self context.
"""
def get_addon_settings(self, provider=None, fail_if_absent=True, check_object_permissions=True):
owner = None
provider = provider or self.kwargs['provider']
if hasattr(self, 'get_user'):
owner = self.get_user()
owner_type = 'user'
elif hasattr(self, 'get_node'):
owner = self.get_node()
owner_type = 'node'
try:
addon_module = apps.get_app_config('addons_{}'.format(provider))
except LookupError:
raise NotFound('Requested addon unrecognized')
if not owner or provider not in ADDONS_OAUTH or owner_type not in addon_module.owners:
raise NotFound('Requested addon unavailable')
addon_settings = owner.get_addon(provider)
if not addon_settings and fail_if_absent:
raise NotFound('Requested addon not enabled')
if not addon_settings or addon_settings.deleted:
return None
if addon_settings and check_object_permissions:
authorizer = None
if owner_type == 'user':
authorizer = addon_settings.owner
elif hasattr(addon_settings, 'user_settings'):
authorizer = addon_settings.user_settings.owner
if authorizer and authorizer != self.request.user:
raise PermissionDenied('Must be addon authorizer to list folders')
return addon_settings
class AddonList(JSONAPIBaseView, generics.ListAPIView, ListFilterMixin):
"""The documentation for this endpoint can be found [here](https://developer.osf.io/#operation/addons_list).
"""
permission_classes = (
drf_permissions.AllowAny,
drf_permissions.IsAuthenticatedOrReadOnly,
TokenHasScope, )
required_read_scopes = [CoreScopes.ALWAYS_PUBLIC]
required_write_scopes = [CoreScopes.NULL]
pagination_class = MaxSizePagination
serializer_class = AddonSerializer
view_category = 'addons'
view_name = 'addon-list'
ordering = ()
def get_default_queryset(self):
return [conf for conf in osf_settings.ADDONS_AVAILABLE_DICT.itervalues() if 'accounts' in conf.configs]
def get_queryset(self):
return self.get_queryset_from_request()
def param_queryset(self, query_params, default_queryset):
"""filters default queryset based on query parameters"""
filters = self.parse_query_params(query_params)
queryset = set(default_queryset)
if filters:
for key, field_names in filters.iteritems():
match = self.QUERY_PATTERN.match(key)
fields = match.groupdict()['fields']
statement = len(re.findall(self.FILTER_FIELDS, fields)) > 1 # This indicates an OR statement
sub_query = set() if statement else set(default_queryset)
for field_name, data in field_names.iteritems():
operations = data if isinstance(data, list) else [data]
for operation in operations:
if statement:
sub_query = sub_query.union(set(self.get_filtered_queryset(field_name, operation, list(default_queryset))))
else:
sub_query = sub_query.intersection(set(self.get_filtered_queryset(field_name, operation, list(default_queryset))))
queryset = sub_query.intersection(queryset)
return list(queryset)
|
possoumous/Watchers
|
refs/heads/master
|
seleniumbase/plugins/page_source.py
|
4
|
"""
The plugin for capturing and storing the page source on errors and failures.
"""
import os
import codecs
from nose.plugins import Plugin
from seleniumbase.config import settings
from seleniumbase.core import log_helper
class PageSource(Plugin):
"""
This plugin will capture the page source when a test fails
or raises an error. It will store the page source in the
logs file specified, along with default test information.
"""
name = "page_source" # Usage: --with-page_source
logfile_name = settings.PAGE_SOURCE_NAME
def options(self, parser, env):
super(PageSource, self).options(parser, env=env)
def configure(self, options, conf):
super(PageSource, self).configure(options, conf)
if not self.enabled:
return
self.options = options
def addError(self, test, err, capt=None):
try:
page_source = test.driver.page_source
except Exception:
# Since we can't get the page source from here, skip saving it
return
test_logpath = self.options.log_path + "/" + test.id()
if not os.path.exists(test_logpath):
os.makedirs(test_logpath)
html_file_name = "%s/%s" % (test_logpath, self.logfile_name)
html_file = codecs.open(html_file_name, "w+", "utf-8")
rendered_source = log_helper.get_html_source_with_base_href(
test.driver, page_source)
html_file.write(rendered_source)
html_file.close()
def addFailure(self, test, err, capt=None, tbinfo=None):
try:
page_source = test.driver.page_source
except Exception:
# Since we can't get the page source from here, skip saving it
return
test_logpath = self.options.log_path + "/" + test.id()
if not os.path.exists(test_logpath):
os.makedirs(test_logpath)
html_file_name = "%s/%s" % (test_logpath, self.logfile_name)
html_file = codecs.open(html_file_name, "w+", "utf-8")
rendered_source = log_helper.get_html_source_with_base_href(
test.driver, page_source)
html_file.write(rendered_source)
html_file.close()
|
jolevq/odoopub
|
refs/heads/master
|
openerp/report/render/simple.py
|
324
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import render
from cStringIO import StringIO
import xml.dom.minidom
from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer, Table
from reportlab.lib.units import mm
from reportlab.lib.pagesizes import A4
import reportlab.lib
import copy
class simple(render.render):
def _render(self):
self.result = StringIO()
parser = xml.dom.minidom.parseString(self.xml)
title = parser.documentElement.tagName
doc = SimpleDocTemplate(self.result, pagesize=A4, title=title,
author='Odoo, Fabien Pinckaers', leftmargin=10*mm, rightmargin=10*mm)
styles = reportlab.lib.styles.getSampleStyleSheet()
title_style = copy.deepcopy(styles["Heading1"])
title_style.alignment = reportlab.lib.enums.TA_CENTER
story = [ Paragraph(title, title_style) ]
style_level = {}
nodes = [ (parser.documentElement,0) ]
while len(nodes):
node = nodes.pop(0)
value = ''
n=len(node[0].childNodes)-1
while n>=0:
if node[0].childNodes[n].nodeType==3:
value += node[0].childNodes[n].nodeValue
else:
nodes.insert( 0, (node[0].childNodes[n], node[1]+1) )
n-=1
if not node[1] in style_level:
style = copy.deepcopy(styles["Normal"])
style.leftIndent=node[1]*6*mm
style.firstLineIndent=-3*mm
style_level[node[1]] = style
story.append( Paragraph('<b>%s</b>: %s' % (node[0].tagName, value), style_level[node[1]]))
doc.build(story)
return self.result.getvalue()
if __name__=='__main__':
s = simple()
s.xml = '''<test>
<author-list>
<author>
<name>Fabien Pinckaers</name>
<age>23</age>
</author>
<author>
<name>Michel Pinckaers</name>
<age>53</age>
</author>
No other
</author-list>
</test>'''
if s.render():
print s.get()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
RafiKueng/SteMM
|
refs/heads/master
|
controller.py
|
1
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
PHOTOMETRYDEMO - controller.py
basically the interface to galfit and fits files
Created on Tue Sep 23 12:09:45 2014
@author: rafik
"""
import time
import subprocess
templates = {
'header' : '''
================================================================================
# IMAGE and GALFIT CONTROL PARAMETERS
A) {A:<20} # Input data image (FITS file)
B) {B:<20} # Output data image block
C) {C:<20} # Sigma image name (made from data if blank or "none")
D) {D:<20} # Input PSF image and (optional) diffusion kernel
E) {E:<20} # PSF fine sampling factor relative to data
F) {F:<20} # Bad pixel mask (FITS image or ASCII coord list)
G) {G:<20} # File with parameter constraints (ASCII file)
H) {H:<20} # Image region to fit (xmin xmax ymin ymax)
I) {I:<20} # Size of the convolution box (x y)
J) {J:<20} # Magnitude photometric zeropoint
K) {K:<20} # Plate scale (dx dy) [arcsec per pixel]
O) {O:<20} # Display type (regular, curses, both)
P) {P:<20} # Options: 0=normal run; 1,2=make model/imgblock & quit
''',
'sersic' : '''
# Sersic function
0) sersic # Object type
1) {p1:<16} {p1t} # position x, y [pixel]
3) {p3:<18} {p3t} # total magnitude
4) {p4:<18} {p4t} # R_e [Pixels]
5) {p5:<18} {p5t} # Sersic exponent (deVauc=4, expdisk=1)
9) {p9:<18} {p9t} # axis ratio (b/a)
10) {p10:<18} {p10t} # position angle (PA) [Degrees: Up=0, Left=90]
Z) {pZ:<20} # Skip this model in output image? (yes=1, no=0)
''',
'sky' : '''
# sky
0) sky
1) {p1:<18} {p1t} # sky background [ADU counts]
2) {p2:<18} {p2t} # dsky/dx (sky gradient in x)
3) {p3:<18} {p3t} # dsky/dy (sky gradient in y)
Z) {pZ:<20} # Skip this model in output image? (yes=1, no=0)
''',
}
ParamsAvail = { # dont list the Z
'sersic': ['1','3','4','5','9','10'],
'sky': ['1','2','3']
}
#defaults = {
#'header': {
# 'A': 'gal.fits',
# 'B': 'imgblock.fits',
# 'C': 'none',
# 'D': 'psf.fits',
# 'E': '1',
# 'F': 'none',
# 'G': 'none',
# 'H': '1 93 1 93',
# 'I': '100 100',
# 'J': '26.563',
# 'K': '0.038 0.038',
# 'O': 'regular',
# 'P': '0',
# },
#
#'sersic': {
# 'p1': '',
# 'p1t': '1',
# 'p2': '',
# 'p2t': '1',
# 'p3': '',
# 'p3t': '1',
# 'p4': '',
# 'p4t': '1',
# 'p5': '',
# 'p5t': '1',
# 'p9': '',
# 'p9t': '1',
# 'p10': '',
# 'p10t': '1',
# 'pZ': '0',
# },
#}
class Controller(object):
def __init__(self, model, view=None):
self.model = model
self.view = view
self.prefix = '_' # prefix for paths for generated files
self.configfn = None # filename of config file .galfit / .feedme ...
def setView(self, V):
self.view = V
def galfit(self):
print 'create config file'
self.view.msg('create config file')
self.createConfigFile()
print 'config file done'
self.view.msg('running galfit')
# process = sp.Popen('./galfit '+self.model.name+'.galfit', shell=True, stdout=sp.PIPE)
# process.wait()
# print process.returncode
# cmd = ['./galfit', self.model.name+'.galfit']
# p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
# for line in p.stdout:
# print line
# p.wait()
# print p.returncode
rc = subprocess.call('./galfit '+self.configfn, shell=True)
self.view.msg('done')
return "success"
def createConfigFile(self):
fn = self.prefix + self.model.name + '.galfit'
#create header
params = {}
for c in 'ABCDEFGHIJKOP':
params[c] = self.getParam(c)
head = templates['header'].format(**params)
# create all object entries
objstxt = ''
for obj in ['sersic', 'sky']: #TODO make this more dynamic
oparams = {}
for c in ParamsAvail[obj]:
print obj, c
p, a = self.getObjParams(obj, c)
oparams['p%s'%c] = p
oparams['p%st'%c] = a
oparams['pZ'] = 0
print oparams
objtxt = templates[obj].format(**oparams)
objstxt += objtxt
txt = head + objstxt
with open(fn, 'w') as f:
f.write(txt)
self.configfn = fn
return fn
def getParam(self, p):
if p=="A":
return self.model.filename
elif p=='B':
#return self.view.askOutfileName()
return self.prefix + 'out.fits'
elif p=='C':
return 'none'
elif p=='D':
if not self.model.psf:
self.model.createPSF(pfx=self.prefix)
return self.prefix+self.model.psf.getFileName()
elif p=='E':
return '1'
elif p=='F':
maskfn = self.model.getMaskFilename(pfx=self.prefix)
return maskfn
elif p=='G':
return self.generateContraintsFile(self.prefix)
elif p=='H':
xmin, ymin, xmax, ymax = self.model.getRegionCoords()
return '%.1f %.1f %.1f %.1f' % (xmin, xmax, ymin, ymax)
elif p=='I':
if not self.model.psf:
self.model.createPSF(pfx=self.prefix)
return '%.1f %.1f' % self.model.psf.getBoxSize()
elif p=='J':
return self.model.getPhotometricZeropoint()
elif p=='K':
return '%.5f %.5f' % self.model.getPlateScale()
elif p=='O':
return 'regular'
elif p=='P':
return 0
def getObjParams(self, typ, p):
if typ == 'sersic':
if p=='1':
return ("%i %i" % self.model.ellipse.getCoords(), '1 1')
elif p=='3':
#TODO
return (12, 1)
elif p=='4':
return (self.model.ellipse.getRe(), 1)
elif p=='5':
return (4, 1)
elif p=='9':
return (self.model.ellipse.getAxisRatio(), 1)
elif p=='10':
return (self.model.ellipse.getPositionAngle(), 1)
elif typ == 'sky':
if p=='1':
#TODO
return (1.3, '1')
elif p=='2':
#TODO
return (0, 1)
elif p=='3':
return (0, 1)
def generateContraintsFile(self, pfx=''):
filename = pfx + 'constr.txt'
#TODO
txt = '''
# Component/ parameter constraint Comment
# operation (see below) range
1 n 3.5 to 6 # sersic index
'''
with open(filename, 'w') as f:
f.write(txt)
return filename
def sendToGalfit():
pass
def openFitsFile(filename):
pass
def printdata():
print model.masks
# testing
if __name__ == "__main__":
pass
|
sk364/chat-api
|
refs/heads/master
|
chat/backend/main/utils/constants.py
|
1
|
IMAGE_EXTENSIONS = ['jpg', 'jpeg', 'png', 'gif']
|
jfpla/odoo
|
refs/heads/8.0
|
addons/hr_gamification/__openerp__.py
|
320
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013 OpenERP SA (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'HR Gamification',
'version': '1.0',
'author': 'OpenERP SA',
'category': 'hidden',
'website': 'https://www.odoo.com/page/employees',
'depends': ['gamification', 'hr'],
'description': """Use the HR ressources for the gamification process.
The HR officer can now manage challenges and badges.
This allow the user to send badges to employees instead of simple users.
Badge received are displayed on the user profile.
""",
'data': [
'security/ir.model.access.csv',
'security/gamification_security.xml',
'wizard/grant_badge.xml',
'views/gamification.xml',
'views/hr_gamification.xml',
],
'auto_install': True,
}
|
wfxiang08/django190
|
refs/heads/master
|
tests/template_tests/filter_tests/test_slugify.py
|
324
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.template.defaultfilters import slugify
from django.test import SimpleTestCase
from django.utils.safestring import mark_safe
from ..utils import setup
class SlugifyTests(SimpleTestCase):
"""
Running slugify on a pre-escaped string leads to odd behavior,
but the result is still safe.
"""
@setup({'slugify01': '{% autoescape off %}{{ a|slugify }} {{ b|slugify }}{% endautoescape %}'})
def test_slugify01(self):
output = self.engine.render_to_string('slugify01', {'a': 'a & b', 'b': mark_safe('a & b')})
self.assertEqual(output, 'a-b a-amp-b')
@setup({'slugify02': '{{ a|slugify }} {{ b|slugify }}'})
def test_slugify02(self):
output = self.engine.render_to_string('slugify02', {'a': 'a & b', 'b': mark_safe('a & b')})
self.assertEqual(output, 'a-b a-amp-b')
class FunctionTests(SimpleTestCase):
def test_slugify(self):
self.assertEqual(
slugify(' Jack & Jill like numbers 1,2,3 and 4 and silly characters ?%.$!/'),
'jack-jill-like-numbers-123-and-4-and-silly-characters',
)
def test_unicode(self):
self.assertEqual(
slugify("Un \xe9l\xe9phant \xe0 l'or\xe9e du bois"),
'un-elephant-a-loree-du-bois',
)
def test_non_string_input(self):
self.assertEqual(slugify(123), '123')
|
SirAnthony/marvin-xmpp
|
refs/heads/master
|
plugins/fortune.py
|
1
|
import random
class Fortune:
_marvinModule = True
public = ['fortune', 'testm']
def fortune(self, message):
fortunes = ['The gene pool could use a little chlorine.',
'Make it idiot proof and someone will make a better idiot.',
'He who laughs last thinks slowest.',
'A flashlight is a case for holding dead batteries.',
'Lottery: A tax on people who are bad at math.',
'I wouldnt be caught dead with a necrophiliac.',
'Consciousness: That annoying time between naps.',
'I dont suffer from insanity. I enjoy every minute of it.'
'All of the books in the world contain no more information than is broadcast as video in a single large American city in a single year. Not all bits have equal value.',
'And, for an instant, she stared directly into those soft blue eyes and knew, with an instinctive mammalian certainty, that the exceedingly rich were no longer even remotely human.',
'Any sufficiently advanced technology is indistinguishable from magic.',
'Bill Gates is a very rich man today... and do you want to know why? The answer is one word: versions.',
'Champagne, if you are seeking the truth, is better than a lie detector. It encourages a man to be expansive, even reckless, while lie detectors are only a challenge to tell lies successfully.',
'Civilization advances by extending the number of important operations which we can perform without thinking of them.',
'Congress will pass a law restricting public comment on the Internet to individuals who have spent a minimum of one hour actually accomplishing a specific task while on line.',
'Cyberspace. A consensual hallucination experienced daily by billions of legitimate operators, in every nation, by children being taught mathematical concepts.',
'Do you realize if it werent for Edison we\'d be watching TV by candlelight?',
'Doing linear scans over an associative array is like trying to club someone to death with a loaded Uzi.',
'Dreaming in public is an important part of our job description, as science writers, but there are bad dreams as well as good dreams. We are dreamers, you see, but we are also realists, of a sort.',
'Everybody gets so much information all day long that they lose their common sense.',
'For a successful technology, reality must take precedence over public relations, for Nature cannot be fooled.',
'For my confirmation, I didnt get a watch and my first pair of long pants, like most Lutheran boys. I got a telescope. My mother thought it would make the best gift.',
'For years I have been mourning and not for my dead, it is for this boy for whatever corner in my heart died when his childhood slid out of my arms.',
'Gates is the ultimate programming machine. He believes everything can be defined, examined, reduced to essentials, and rearranged into a logical sequence that will achieve a particular goal.',
'Getting information off the Internet is like taking a drink from a fire hydrant.',
'Globalization, as defined by rich people like us, is a very nice thing... you are talking about the Internet, you are talking about cell phones, you are talking about computers. This doesnt affect two-thirds of the people of the world.',
'Humanity is acquiring all the right technology for all the wrong reasons.',
'I am sorry to say that there is too much point to the wisecrack that life is extinct on other planets because their scientists were more advanced than ours.'
]
quote = fortunes[random.randint(1,len(fortunes))]
message.reply(quote)
def testm(senlf, message):
message.reply('It is a test module.')
|
faribas/RMG-Py
|
refs/heads/master
|
rmgpy/kinetics/arrheniusTest.py
|
4
|
#!/usr/bin/env python
# encoding: utf-8
################################################################################
#
# RMG - Reaction Mechanism Generator
#
# Copyright (c) 2002-2009 Prof. William H. Green (whgreen@mit.edu) and the
# RMG Team (rmg_dev@mit.edu)
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
################################################################################
"""
This script contains unit tests of the :mod:`rmgpy.kinetics.arrhenius` module.
"""
import unittest
import math
import numpy
from rmgpy.kinetics.arrhenius import Arrhenius, ArrheniusEP, PDepArrhenius, MultiArrhenius, MultiPDepArrhenius
import rmgpy.constants as constants
################################################################################
class TestArrhenius(unittest.TestCase):
"""
Contains unit tests of the :class:`Arrhenius` class.
"""
def setUp(self):
"""
A function run before each unit test in this class.
"""
self.A = 1.0e12
self.n = 0.5
self.Ea = 41.84
self.T0 = 1.
self.Tmin = 300.
self.Tmax = 3000.
self.comment = 'C2H6'
self.arrhenius = Arrhenius(
A = (self.A,"cm^3/(mol*s)"),
n = self.n,
Ea = (self.Ea,"kJ/mol"),
T0 = (self.T0,"K"),
Tmin = (self.Tmin,"K"),
Tmax = (self.Tmax,"K"),
comment = self.comment,
)
def test_A(self):
"""
Test that the Arrhenius A property was properly set.
"""
self.assertAlmostEqual(self.arrhenius.A.value_si * 1e6, self.A, delta=1e0)
def test_n(self):
"""
Test that the Arrhenius n property was properly set.
"""
self.assertAlmostEqual(self.arrhenius.n.value_si, self.n, 6)
def test_Ea(self):
"""
Test that the Arrhenius Ea property was properly set.
"""
self.assertAlmostEqual(self.arrhenius.Ea.value_si * 0.001, self.Ea, 6)
def test_T0(self):
"""
Test that the Arrhenius T0 property was properly set.
"""
self.assertAlmostEqual(self.arrhenius.T0.value_si, self.T0, 6)
def test_Tmin(self):
"""
Test that the Arrhenius Tmin property was properly set.
"""
self.assertAlmostEqual(self.arrhenius.Tmin.value_si, self.Tmin, 6)
def test_Tmax(self):
"""
Test that the Arrhenius Tmax property was properly set.
"""
self.assertAlmostEqual(self.arrhenius.Tmax.value_si, self.Tmax, 6)
def test_comment(self):
"""
Test that the Arrhenius comment property was properly set.
"""
self.assertEqual(self.arrhenius.comment, self.comment)
def test_isTemperatureValid(self):
"""
Test the Arrhenius.isTemperatureValid() method.
"""
Tdata = numpy.array([200,400,600,800,1000,1200,1400,1600,1800,2000])
validdata = numpy.array([False,True,True,True,True,True,True,True,True,True], numpy.bool)
for T, valid in zip(Tdata, validdata):
valid0 = self.arrhenius.isTemperatureValid(T)
self.assertEqual(valid0, valid)
def test_getRateCoefficient(self):
"""
Test the Arrhenius.getRateCoefficient() method.
"""
Tlist = numpy.array([200,400,600,800,1000,1200,1400,1600,1800,2000])
kexplist = numpy.array([1.6721e-4, 6.8770e1, 5.5803e3, 5.2448e4, 2.0632e5, 5.2285e5, 1.0281e6, 1.7225e6, 2.5912e6, 3.6123e6])
for T, kexp in zip(Tlist, kexplist):
kact = self.arrhenius.getRateCoefficient(T)
self.assertAlmostEqual(kexp, kact, delta=1e-4*kexp)
def test_changeT0(self):
"""
Test the Arrhenius.changeT0() method.
"""
Tlist = numpy.array([300,400,500,600,700,800,900,1000,1100,1200,1300,1400,1500])
k0list = numpy.array([self.arrhenius.getRateCoefficient(T) for T in Tlist])
self.arrhenius.changeT0(300)
self.assertEqual(self.arrhenius.T0.value_si, 300)
for T, kexp in zip(Tlist, k0list):
kact = self.arrhenius.getRateCoefficient(T)
self.assertAlmostEqual(kexp, kact, delta=1e-6*kexp)
def test_fitToData(self):
"""
Test the Arrhenius.fitToData() method.
"""
Tdata = numpy.array([300,400,500,600,700,800,900,1000,1100,1200,1300,1400,1500])
kdata = numpy.array([self.arrhenius.getRateCoefficient(T) for T in Tdata])
arrhenius = Arrhenius().fitToData(Tdata, kdata, kunits="m^3/(mol*s)")
self.assertEqual(float(self.arrhenius.T0.value_si), 1)
for T, k in zip(Tdata, kdata):
self.assertAlmostEqual(k, arrhenius.getRateCoefficient(T), delta=1e-6*k)
self.assertAlmostEqual(arrhenius.A.value_si, self.arrhenius.A.value_si, delta=1e0)
self.assertAlmostEqual(arrhenius.n.value_si, self.arrhenius.n.value_si, 1, 4)
self.assertAlmostEqual(arrhenius.Ea.value_si, self.arrhenius.Ea.value_si, 2)
self.assertAlmostEqual(arrhenius.T0.value_si, self.arrhenius.T0.value_si, 4)
def test_pickle(self):
"""
Test that an Arrhenius object can be pickled and unpickled with no loss
of information.
"""
import cPickle
arrhenius = cPickle.loads(cPickle.dumps(self.arrhenius,-1))
self.assertAlmostEqual(self.arrhenius.A.value, arrhenius.A.value, delta=1e0)
self.assertEqual(self.arrhenius.A.units, arrhenius.A.units)
self.assertAlmostEqual(self.arrhenius.n.value, arrhenius.n.value, 4)
self.assertAlmostEqual(self.arrhenius.Ea.value, arrhenius.Ea.value, 4)
self.assertEqual(self.arrhenius.Ea.units, arrhenius.Ea.units)
self.assertAlmostEqual(self.arrhenius.T0.value, arrhenius.T0.value, 4)
self.assertEqual(self.arrhenius.T0.units, arrhenius.T0.units)
self.assertAlmostEqual(self.arrhenius.Tmin.value, arrhenius.Tmin.value, 4)
self.assertEqual(self.arrhenius.Tmin.units, arrhenius.Tmin.units)
self.assertAlmostEqual(self.arrhenius.Tmax.value, arrhenius.Tmax.value, 4)
self.assertEqual(self.arrhenius.Tmax.units, arrhenius.Tmax.units)
self.assertEqual(self.arrhenius.comment, arrhenius.comment)
def test_repr(self):
"""
Test that an Arrhenius object can be reconstructed from its repr()
output with no loss of information.
"""
arrhenius = None
exec('arrhenius = {0!r}'.format(self.arrhenius))
self.assertAlmostEqual(self.arrhenius.A.value, arrhenius.A.value, delta=1e0)
self.assertEqual(self.arrhenius.A.units, arrhenius.A.units)
self.assertAlmostEqual(self.arrhenius.n.value, arrhenius.n.value, 4)
self.assertAlmostEqual(self.arrhenius.Ea.value, arrhenius.Ea.value, 4)
self.assertEqual(self.arrhenius.Ea.units, arrhenius.Ea.units)
self.assertAlmostEqual(self.arrhenius.T0.value, arrhenius.T0.value, 4)
self.assertEqual(self.arrhenius.T0.units, arrhenius.T0.units)
self.assertAlmostEqual(self.arrhenius.Tmin.value, arrhenius.Tmin.value, 4)
self.assertEqual(self.arrhenius.Tmin.units, arrhenius.Tmin.units)
self.assertAlmostEqual(self.arrhenius.Tmax.value, arrhenius.Tmax.value, 4)
self.assertEqual(self.arrhenius.Tmax.units, arrhenius.Tmax.units)
self.assertEqual(self.arrhenius.comment, arrhenius.comment)
def test_changeRate(self):
"""
Test the Arrhenius.changeRate() method.
"""
Tlist = numpy.array([300,400,500,600,700,800,900,1000,1100,1200,1300,1400,1500])
k0list = numpy.array([self.arrhenius.getRateCoefficient(T) for T in Tlist])
self.arrhenius.changeRate(2)
for T, kexp in zip(Tlist, k0list):
kact = self.arrhenius.getRateCoefficient(T)
self.assertAlmostEqual(2*kexp, kact, delta=1e-6*kexp)
################################################################################
class TestArrheniusEP(unittest.TestCase):
"""
Contains unit tests of the :class:`ArrheniusEP` class.
"""
def setUp(self):
"""
A function run before each unit test in this class.
"""
self.A = 1.0e12
self.n = 0.5
self.alpha = 0.5
self.E0 = 41.84
self.Tmin = 300.
self.Tmax = 3000.
self.comment = 'C2H6'
self.arrhenius = ArrheniusEP(
A = (self.A,"cm^3/(mol*s)"),
n = self.n,
alpha = self.alpha,
E0 = (self.E0,"kJ/mol"),
Tmin = (self.Tmin,"K"),
Tmax = (self.Tmax,"K"),
comment = self.comment,
)
def test_A(self):
"""
Test that the ArrheniusEP A property was properly set.
"""
self.assertAlmostEqual(self.arrhenius.A.value_si * 1e6, self.A, delta=1e0)
def test_n(self):
"""
Test that the ArrheniusEP n property was properly set.
"""
self.assertAlmostEqual(self.arrhenius.n.value_si, self.n, 6)
def test_alpha(self):
"""
Test that the ArrheniusEP alpha property was properly set.
"""
self.assertAlmostEqual(self.arrhenius.alpha.value_si, self.alpha, 6)
def test_E0(self):
"""
Test that the ArrheniusEP E0 property was properly set.
"""
self.assertAlmostEqual(self.arrhenius.E0.value_si * 0.001, self.E0, 6)
def test_Tmin(self):
"""
Test that the ArrheniusEP Tmin property was properly set.
"""
self.assertAlmostEqual(self.arrhenius.Tmin.value_si, self.Tmin, 6)
def test_Tmax(self):
"""
Test that the ArrheniusEP Tmax property was properly set.
"""
self.assertAlmostEqual(self.arrhenius.Tmax.value_si, self.Tmax, 6)
def test_comment(self):
"""
Test that the ArrheniusEP comment property was properly set.
"""
self.assertEqual(self.arrhenius.comment, self.comment)
def test_isTemperatureValid(self):
"""
Test the ArrheniusEP.isTemperatureValid() method.
"""
Tdata = numpy.array([200,400,600,800,1000,1200,1400,1600,1800,2000])
validdata = numpy.array([False,True,True,True,True,True,True,True,True,True], numpy.bool)
for T, valid in zip(Tdata, validdata):
valid0 = self.arrhenius.isTemperatureValid(T)
self.assertEqual(valid0, valid)
def test_getRateCoefficient(self):
"""
Test the ArrheniusEP.getRateCoefficient() method.
"""
Tlist = numpy.array([200,400,600,800,1000,1200,1400,1600,1800,2000])
kexplist = numpy.array([1.6721e-4, 6.8770e1, 5.5803e3, 5.2448e4, 2.0632e5, 5.2285e5, 1.0281e6, 1.7225e6, 2.5912e6, 3.6123e6])
for T, kexp in zip(Tlist, kexplist):
kact = self.arrhenius.getRateCoefficient(T, )
self.assertAlmostEqual(kexp, kact, delta=1e-4*kexp)
def test_pickle(self):
"""
Test that an ArrheniusEP object can be pickled and unpickled with no loss
of information.
"""
import cPickle
arrhenius = cPickle.loads(cPickle.dumps(self.arrhenius, -1))
self.assertAlmostEqual(self.arrhenius.A.value, arrhenius.A.value, delta=1e0)
self.assertEqual(self.arrhenius.A.units, arrhenius.A.units)
self.assertAlmostEqual(self.arrhenius.n.value, arrhenius.n.value, 4)
self.assertAlmostEqual(self.arrhenius.alpha.value, arrhenius.alpha.value, 4)
self.assertAlmostEqual(self.arrhenius.E0.value, arrhenius.E0.value, 4)
self.assertEqual(self.arrhenius.E0.units, arrhenius.E0.units)
self.assertAlmostEqual(self.arrhenius.Tmin.value, arrhenius.Tmin.value, 4)
self.assertEqual(self.arrhenius.Tmin.units, arrhenius.Tmin.units)
self.assertAlmostEqual(self.arrhenius.Tmax.value, arrhenius.Tmax.value, 4)
self.assertEqual(self.arrhenius.Tmax.units, arrhenius.Tmax.units)
self.assertEqual(self.arrhenius.comment, arrhenius.comment)
def test_repr(self):
"""
Test that an ArrheniusEP object can be reconstructed from its repr()
output with no loss of information.
"""
arrhenius = None
exec('arrhenius = {0!r}'.format(self.arrhenius))
self.assertAlmostEqual(self.arrhenius.A.value, arrhenius.A.value, delta=1e0)
self.assertEqual(self.arrhenius.A.units, arrhenius.A.units)
self.assertAlmostEqual(self.arrhenius.n.value, arrhenius.n.value, 4)
self.assertAlmostEqual(self.arrhenius.alpha.value, arrhenius.alpha.value, 4)
self.assertAlmostEqual(self.arrhenius.E0.value, arrhenius.E0.value, 4)
self.assertEqual(self.arrhenius.E0.units, arrhenius.E0.units)
self.assertAlmostEqual(self.arrhenius.Tmin.value, arrhenius.Tmin.value, 4)
self.assertEqual(self.arrhenius.Tmin.units, arrhenius.Tmin.units)
self.assertAlmostEqual(self.arrhenius.Tmax.value, arrhenius.Tmax.value, 4)
self.assertEqual(self.arrhenius.Tmax.units, arrhenius.Tmax.units)
self.assertEqual(self.arrhenius.comment, arrhenius.comment)
def test_changeRate(self):
"""
Test the ArrheniusEP.changeRate() method.
"""
Tlist = numpy.array([300,400,500,600,700,800,900,1000,1100,1200,1300,1400,1500])
k0list = numpy.array([self.arrhenius.getRateCoefficient(T) for T in Tlist])
self.arrhenius.changeRate(2)
for T, kexp in zip(Tlist, k0list):
kact = self.arrhenius.getRateCoefficient(T)
self.assertAlmostEqual(2*kexp, kact, delta=1e-6*kexp)
################################################################################
class TestPDepArrhenius(unittest.TestCase):
"""
Contains unit tests of the :class:`PDepArrhenius` class.
"""
def setUp(self):
"""
A function run before each unit test in this class.
"""
self.arrhenius0 = Arrhenius(
A = (1.0e6,"s^-1"),
n = 1.0,
Ea = (10.0,"kJ/mol"),
T0 = (300.0,"K"),
Tmin = (300.0,"K"),
Tmax = (2000.0,"K"),
comment = """This data is completely made up""",
)
self.arrhenius1 = Arrhenius(
A = (1.0e12,"s^-1"),
n = 1.0,
Ea = (20.0,"kJ/mol"),
T0 = (300.0,"K"),
Tmin = (300.0,"K"),
Tmax = (2000.0,"K"),
comment = """This data is completely made up""",
)
self.pressures = numpy.array([0.1, 10.0])
self.arrhenius = [self.arrhenius0, self.arrhenius1]
self.Tmin = 300.0
self.Tmax = 2000.0
self.Pmin = 0.1
self.Pmax = 10.0
self.comment = """This data is completely made up"""
self.kinetics = PDepArrhenius(
pressures = (self.pressures,"bar"),
arrhenius = self.arrhenius,
Tmin = (self.Tmin,"K"),
Tmax = (self.Tmax,"K"),
Pmin = (self.Pmin,"bar"),
Pmax = (self.Pmax,"bar"),
comment = self.comment,
)
def test_pressures(self):
"""
Test that the PDepArrhenius pressures property was properly set.
"""
self.assertEqual(len(self.kinetics.pressures.value_si), 2)
for i in range(2):
self.assertAlmostEqual(self.kinetics.pressures.value_si[i] * 1e-5, self.pressures[i], 4)
def test_arrhenius(self):
"""
Test that the PDepArrhenius arrhenius property was properly set.
"""
self.assertEqual(len(self.kinetics.arrhenius), 2)
for i in range(2):
self.assertAlmostEqual(self.kinetics.arrhenius[i].A.value, self.arrhenius[i].A.value, delta=1e0)
self.assertEqual(self.kinetics.arrhenius[i].A.units, self.arrhenius[i].A.units)
self.assertAlmostEqual(self.kinetics.arrhenius[i].n.value, self.arrhenius[i].n.value, 4)
self.assertAlmostEqual(self.kinetics.arrhenius[i].Ea.value, self.arrhenius[i].Ea.value, 4)
self.assertEqual(self.kinetics.arrhenius[i].Ea.units, self.arrhenius[i].Ea.units)
self.assertAlmostEqual(self.kinetics.arrhenius[i].T0.value, self.arrhenius[i].T0.value, 4)
self.assertEqual(self.kinetics.arrhenius[i].T0.units, self.arrhenius[i].T0.units)
self.assertAlmostEqual(self.kinetics.arrhenius[i].Tmin.value, self.arrhenius[i].Tmin.value, 4)
self.assertEqual(self.kinetics.arrhenius[i].Tmin.units, self.arrhenius[i].Tmin.units)
self.assertAlmostEqual(self.kinetics.arrhenius[i].Tmax.value, self.arrhenius[i].Tmax.value, 4)
self.assertEqual(self.kinetics.arrhenius[i].Tmax.units, self.arrhenius[i].Tmax.units)
self.assertEqual(self.kinetics.arrhenius[i].comment, self.arrhenius[i].comment)
def test_Tmin(self):
"""
Test that the PDepArrhenius Tmin property was properly set.
"""
self.assertAlmostEqual(self.kinetics.Tmin.value_si, self.Tmin, 6)
def test_Tmax(self):
"""
Test that the PDepArrhenius Tmax property was properly set.
"""
self.assertAlmostEqual(self.kinetics.Tmax.value_si, self.Tmax, 6)
def test_Pmin(self):
"""
Test that the PDepArrhenius Pmin property was properly set.
"""
self.assertAlmostEqual(self.kinetics.Pmin.value_si*1e-5, self.Pmin, 6)
def test_Pmax(self):
"""
Test that the PDepArrhenius Pmax property was properly set.
"""
self.assertAlmostEqual(self.kinetics.Pmax.value_si*1e-5, self.Pmax, 6)
def test_comment(self):
"""
Test that the PDepArrhenius comment property was properly set.
"""
self.assertEqual(self.kinetics.comment, self.comment)
def test_isPressureDependent(self):
"""
Test the PDepArrhenius.isPressureDependent() method.
"""
self.assertTrue(self.kinetics.isPressureDependent())
def test_getRateCoefficient(self):
"""
Test the PDepArrhenius.getRateCoefficient() method.
"""
P = 1e4
for T in [300,400,500,600,700,800,900,1000,1100,1200,1300,1400,1500]:
k0 = self.kinetics.getRateCoefficient(T, P)
k1 = self.arrhenius0.getRateCoefficient(T)
self.assertAlmostEqual(k0, k1, delta=1e-6*k1)
P = 1e6
for T in [300,400,500,600,700,800,900,1000,1100,1200,1300,1400,1500]:
k0 = self.kinetics.getRateCoefficient(T, P)
k1 = self.arrhenius1.getRateCoefficient(T)
self.assertAlmostEqual(k0, k1, delta=1e-6*k1)
P = 1e5
for T in [300,400,500,600,700,800,900,1000,1100,1200,1300,1400,1500]:
k0 = self.kinetics.getRateCoefficient(T, P)
k1 = math.sqrt(self.arrhenius0.getRateCoefficient(T) * self.arrhenius1.getRateCoefficient(T))
self.assertAlmostEqual(k0, k1, delta=1e-6*k1)
def test_fitToData(self):
"""
Test the PDepArrhenius.fitToData() method.
"""
Tdata = numpy.array([300,400,500,600,700,800,900,1000,1100,1200,1300,1400,1500], numpy.float)
Pdata = numpy.array([1e4,3e4,1e5,3e5,1e6], numpy.float)
kdata = numpy.zeros([len(Tdata),len(Pdata)], numpy.float)
for t in range(len(Tdata)):
for p in range(len(Pdata)):
kdata[t,p] = self.kinetics.getRateCoefficient(Tdata[t], Pdata[p])
kinetics = PDepArrhenius().fitToData(Tdata, Pdata, kdata, kunits="s^-1")
for t in range(len(Tdata)):
for p in range(len(Pdata)):
self.assertAlmostEqual(kinetics.getRateCoefficient(Tdata[t], Pdata[p]), kdata[t,p], delta=1e-6*kdata[t,p])
def test_pickle(self):
"""
Test that a PDepArrhenius object can be successfully pickled and
unpickled with no loss of information.
"""
import cPickle
kinetics = cPickle.loads(cPickle.dumps(self.kinetics,-1))
Narrh = 2
self.assertEqual(len(self.kinetics.pressures.value), Narrh)
self.assertEqual(len(kinetics.pressures.value), Narrh)
self.assertEqual(len(self.kinetics.arrhenius), Narrh)
self.assertEqual(len(kinetics.arrhenius), Narrh)
for i in range(Narrh):
self.assertAlmostEqual(self.kinetics.pressures.value[i], kinetics.pressures.value[i], 4)
self.assertAlmostEqual(self.kinetics.arrhenius[i].A.value, kinetics.arrhenius[i].A.value, delta=1e0)
self.assertEqual(self.kinetics.arrhenius[i].A.units, kinetics.arrhenius[i].A.units)
self.assertAlmostEqual(self.kinetics.arrhenius[i].n.value, kinetics.arrhenius[i].n.value)
self.assertAlmostEqual(self.kinetics.arrhenius[i].T0.value, kinetics.arrhenius[i].T0.value, 4)
self.assertEqual(self.kinetics.arrhenius[i].T0.units, kinetics.arrhenius[i].T0.units)
self.assertAlmostEqual(self.kinetics.arrhenius[i].Ea.value, kinetics.arrhenius[i].Ea.value, 4)
self.assertEqual(self.kinetics.arrhenius[i].Ea.units, kinetics.arrhenius[i].Ea.units)
self.assertAlmostEqual(self.kinetics.Tmin.value, kinetics.Tmin.value, 4)
self.assertEqual(self.kinetics.Tmin.units, kinetics.Tmin.units)
self.assertAlmostEqual(self.kinetics.Tmax.value, kinetics.Tmax.value, 4)
self.assertEqual(self.kinetics.Tmax.units, kinetics.Tmax.units)
self.assertAlmostEqual(self.kinetics.Pmin.value, kinetics.Pmin.value, 4)
self.assertEqual(self.kinetics.Pmin.units, kinetics.Pmin.units)
self.assertAlmostEqual(self.kinetics.Pmax.value, kinetics.Pmax.value, 4)
self.assertEqual(self.kinetics.Pmax.units, kinetics.Pmax.units)
self.assertEqual(self.kinetics.comment, kinetics.comment)
def test_repr(self):
"""
Test that a PDepArrhenius object can be successfully reconstructed
from its repr() output with no loss of information.
"""
kinetics = None
exec('kinetics = {0!r}'.format(self.kinetics))
Narrh = 2
self.assertEqual(len(self.kinetics.pressures.value), Narrh)
self.assertEqual(len(kinetics.pressures.value), Narrh)
self.assertEqual(len(self.kinetics.arrhenius), Narrh)
self.assertEqual(len(kinetics.arrhenius), Narrh)
for i in range(Narrh):
self.assertAlmostEqual(self.kinetics.pressures.value[i], kinetics.pressures.value[i], 4)
self.assertAlmostEqual(self.kinetics.arrhenius[i].A.value, kinetics.arrhenius[i].A.value, delta=1e0)
self.assertEqual(self.kinetics.arrhenius[i].A.units, kinetics.arrhenius[i].A.units)
self.assertAlmostEqual(self.kinetics.arrhenius[i].n.value, kinetics.arrhenius[i].n.value)
self.assertAlmostEqual(self.kinetics.arrhenius[i].T0.value, kinetics.arrhenius[i].T0.value, 4)
self.assertEqual(self.kinetics.arrhenius[i].T0.units, kinetics.arrhenius[i].T0.units)
self.assertAlmostEqual(self.kinetics.arrhenius[i].Ea.value, kinetics.arrhenius[i].Ea.value, 4)
self.assertEqual(self.kinetics.arrhenius[i].Ea.units, kinetics.arrhenius[i].Ea.units)
self.assertAlmostEqual(self.kinetics.Tmin.value, kinetics.Tmin.value, 4)
self.assertEqual(self.kinetics.Tmin.units, kinetics.Tmin.units)
self.assertAlmostEqual(self.kinetics.Tmax.value, kinetics.Tmax.value, 4)
self.assertEqual(self.kinetics.Tmax.units, kinetics.Tmax.units)
self.assertAlmostEqual(self.kinetics.Pmin.value, kinetics.Pmin.value, 4)
self.assertEqual(self.kinetics.Pmin.units, kinetics.Pmin.units)
self.assertAlmostEqual(self.kinetics.Pmax.value, kinetics.Pmax.value, 4)
self.assertEqual(self.kinetics.Pmax.units, kinetics.Pmax.units)
self.assertEqual(self.kinetics.comment, kinetics.comment)
def test_changeRate(self):
"""
Test the PDepArrhenius.changeRate() method.
"""
Tlist = numpy.array([300,400,500,600,700,800,900,1000,1100,1200,1300,1400,1500])
k0list = numpy.array([self.kinetics.getRateCoefficient(T, 1e5) for T in Tlist])
self.kinetics.changeRate(2)
for T, kexp in zip(Tlist, k0list):
kact = self.kinetics.getRateCoefficient(T, 1e5)
self.assertAlmostEqual(2*kexp, kact, delta=1e-6*kexp)
################################################################################
class TestMultiArrhenius(unittest.TestCase):
"""
Contains unit tests of the :class:`MultiArrhenius` class.
"""
def setUp(self):
"""
A function run before each unit test in this class.
"""
self.Tmin = 350.
self.Tmax = 1500.
self.comment = 'Comment'
self.arrhenius = [
Arrhenius(
A = (9.3e-14,"cm^3/(molecule*s)"),
n = 0.0,
Ea = (4740*constants.R*0.001,"kJ/mol"),
T0 = (1,"K"),
Tmin = (self.Tmin,"K"),
Tmax = (self.Tmax,"K"),
comment = self.comment,
),
Arrhenius(
A = (1.4e-9,"cm^3/(molecule*s)"),
n = 0.0,
Ea = (11200*constants.R*0.001,"kJ/mol"),
T0 = (1,"K"),
Tmin = (self.Tmin,"K"),
Tmax = (self.Tmax,"K"),
comment = self.comment,
),
]
self.kinetics = MultiArrhenius(
arrhenius = self.arrhenius,
Tmin = (self.Tmin,"K"),
Tmax = (self.Tmax,"K"),
comment = self.comment,
)
self.single_kinetics = MultiArrhenius(
arrhenius = self.arrhenius[:1],
Tmin = (self.Tmin,"K"),
Tmax = (self.Tmax,"K"),
comment = self.comment,
)
def test_arrhenius(self):
"""
Test that the MultiArrhenius A property was properly set.
"""
self.assertEqual(self.kinetics.arrhenius, self.arrhenius)
def test_Tmin(self):
"""
Test that the MultiArrhenius Tmin property was properly set.
"""
self.assertAlmostEqual(self.kinetics.Tmin.value_si, self.Tmin, 6)
def test_Tmax(self):
"""
Test that the MultiArrhenius Tmax property was properly set.
"""
self.assertAlmostEqual(self.kinetics.Tmax.value_si, self.Tmax, 6)
def test_comment(self):
"""
Test that the MultiArrhenius comment property was properly set.
"""
self.assertEqual(self.kinetics.comment, self.comment)
def test_isTemperatureValid(self):
"""
Test the MultiArrhenius.isTemperatureValid() method.
"""
Tdata = numpy.array([200,400,600,800,1000,1200,1400,1600,1800,2000])
validdata = numpy.array([False,True,True,True,True,True,True,False,False,False], numpy.bool)
for T, valid in zip(Tdata, validdata):
valid0 = self.kinetics.isTemperatureValid(T)
self.assertEqual(valid0, valid)
def test_getRateCoefficient(self):
"""
Test the MultiArrhenius.getRateCoefficient() method.
"""
Tlist = numpy.array([200,400,600,800,1000,1200,1400,1600,1800,2000])
kexplist = numpy.array([2.85400e-06, 4.00384e-01, 2.73563e+01, 8.50699e+02, 1.20181e+04, 7.56312e+04, 2.84724e+05, 7.71702e+05, 1.67743e+06, 3.12290e+06])
for T, kexp in zip(Tlist, kexplist):
kact = self.kinetics.getRateCoefficient(T)
self.assertAlmostEqual(kexp, kact, delta=1e-4*kexp)
def test_pickle(self):
"""
Test that a MultiArrhenius object can be pickled and unpickled with no loss
of information.
"""
import cPickle
kinetics = cPickle.loads(cPickle.dumps(self.kinetics,-1))
self.assertEqual(len(self.kinetics.arrhenius), len(kinetics.arrhenius))
for arrh0, arrh in zip(self.kinetics.arrhenius, kinetics.arrhenius):
self.assertAlmostEqual(arrh0.A.value, arrh.A.value, delta=1e-18)
self.assertEqual(arrh0.A.units, arrh.A.units)
self.assertAlmostEqual(arrh0.n.value, arrh.n.value, 4)
self.assertAlmostEqual(arrh0.Ea.value, arrh.Ea.value, 4)
self.assertEqual(arrh0.Ea.units, arrh.Ea.units)
self.assertAlmostEqual(arrh0.T0.value, arrh.T0.value, 4)
self.assertEqual(arrh0.T0.units, arrh.T0.units)
self.assertAlmostEqual(self.kinetics.Tmin.value, kinetics.Tmin.value, 4)
self.assertEqual(self.kinetics.Tmin.units, kinetics.Tmin.units)
self.assertAlmostEqual(self.kinetics.Tmax.value, kinetics.Tmax.value, 4)
self.assertEqual(self.kinetics.Tmax.units, kinetics.Tmax.units)
self.assertEqual(self.kinetics.comment, kinetics.comment)
def test_repr(self):
"""
Test that a MultiArrhenius object can be reconstructed from its repr()
output with no loss of information.
"""
kinetics = None
exec('kinetics = {0!r}'.format(self.kinetics))
self.assertEqual(len(self.kinetics.arrhenius), len(kinetics.arrhenius))
for arrh0, arrh in zip(self.kinetics.arrhenius, kinetics.arrhenius):
self.assertAlmostEqual(arrh0.A.value, arrh.A.value, delta=1e-18)
self.assertEqual(arrh0.A.units, arrh.A.units)
self.assertAlmostEqual(arrh0.n.value, arrh.n.value, 4)
self.assertAlmostEqual(arrh0.Ea.value, arrh.Ea.value, 4)
self.assertEqual(arrh0.Ea.units, arrh.Ea.units)
self.assertAlmostEqual(arrh0.T0.value, arrh.T0.value, 4)
self.assertEqual(arrh0.T0.units, arrh.T0.units)
self.assertAlmostEqual(self.kinetics.Tmin.value, kinetics.Tmin.value, 4)
self.assertEqual(self.kinetics.Tmin.units, kinetics.Tmin.units)
self.assertAlmostEqual(self.kinetics.Tmax.value, kinetics.Tmax.value, 4)
self.assertEqual(self.kinetics.Tmax.units, kinetics.Tmax.units)
self.assertEqual(self.kinetics.comment, kinetics.comment)
def test_toArrhenius(self):
"""
Test that we can convert to an Arrhenius
"""
answer = self.single_kinetics.arrhenius[0]
fitted = self.single_kinetics.toArrhenius()
self.assertAlmostEqual(fitted.A.value_si, answer.A.value_si, delta=1e0)
self.assertAlmostEqual(fitted.n.value_si, answer.n.value_si, 1, 4)
self.assertAlmostEqual(fitted.Ea.value_si, answer.Ea.value_si, 2)
self.assertAlmostEqual(fitted.T0.value_si, answer.T0.value_si, 4)
def test_toArrheniusTrange(self):
"""
Test the toArrhenius temperature range is set correctly.
"""
answer = self.single_kinetics.arrhenius[0]
fitted = self.single_kinetics.toArrhenius(Tmin=800, Tmax=1200)
self.assertAlmostEqual(fitted.Tmin.value_si, 800.0)
self.assertAlmostEqual(fitted.Tmax.value_si, 1200.0)
for T in [800,1000,1200]:
self.assertAlmostEqual(fitted.getRateCoefficient(T) / answer.getRateCoefficient(T), 1.0)
def test_toArrheniusMultiple(self):
"""
Test the toArrhenius fitting multiple kinetics over a small range, see if we're within 5% at a few points
"""
answer = self.kinetics
fitted = self.kinetics.toArrhenius(Tmin=800, Tmax=1200)
self.assertAlmostEqual(fitted.Tmin.value_si, 800.0)
self.assertAlmostEqual(fitted.Tmax.value_si, 1200.0)
for T in [800,1000,1200]:
self.assertAlmostEqual(fitted.getRateCoefficient(T) / answer.getRateCoefficient(T), 1.0, delta=0.05)
def test_changeRate(self):
"""
Test the MultiArrhenius.changeRate() method.
"""
Tlist = numpy.array([300,400,500,600,700,800,900,1000,1100,1200,1300,1400,1500])
k0list = numpy.array([self.kinetics.getRateCoefficient(T) for T in Tlist])
self.kinetics.changeRate(2)
for T, kexp in zip(Tlist, k0list):
kact = self.kinetics.getRateCoefficient(T)
self.assertAlmostEqual(2*kexp, kact, delta=1e-6*kexp)
################################################################################
class TestMultiPDepArrhenius(unittest.TestCase):
"""
Contains unit tests of the :class:`MultiPDepArrhenius` class.
"""
def setUp(self):
"""
A function run before each unit test in this class.
"""
self.Tmin = 350.
self.Tmax = 1500.
self.Pmin = 1e-1
self.Pmax = 1e1
self.pressures = numpy.array([1e-1,1e1])
self.comment = 'CH3 + C2H6 <=> CH4 + C2H5 (Baulch 2005)'
self.arrhenius = [
PDepArrhenius(
pressures = (self.pressures,"bar"),
arrhenius = [
Arrhenius(
A = (9.3e-16,"cm^3/(molecule*s)"),
n = 0.0,
Ea = (4740*constants.R*0.001,"kJ/mol"),
T0 = (1,"K"),
Tmin = (self.Tmin,"K"),
Tmax = (self.Tmax,"K"),
comment = self.comment,
),
Arrhenius(
A = (9.3e-14,"cm^3/(molecule*s)"),
n = 0.0,
Ea = (4740*constants.R*0.001,"kJ/mol"),
T0 = (1,"K"),
Tmin = (self.Tmin,"K"),
Tmax = (self.Tmax,"K"),
comment = self.comment,
),
],
Tmin = (self.Tmin,"K"),
Tmax = (self.Tmax,"K"),
Pmin = (self.Pmin,"bar"),
Pmax = (self.Pmax,"bar"),
comment = self.comment,
),
PDepArrhenius(
pressures = (self.pressures,"bar"),
arrhenius = [
Arrhenius(
A = (1.4e-11,"cm^3/(molecule*s)"),
n = 0.0,
Ea = (11200*constants.R*0.001,"kJ/mol"),
T0 = (1,"K"),
Tmin = (self.Tmin,"K"),
Tmax = (self.Tmax,"K"),
comment = self.comment,
),
Arrhenius(
A = (1.4e-9,"cm^3/(molecule*s)"),
n = 0.0,
Ea = (11200*constants.R*0.001,"kJ/mol"),
T0 = (1,"K"),
Tmin = (self.Tmin,"K"),
Tmax = (self.Tmax,"K"),
comment = self.comment,
),
],
Tmin = (self.Tmin,"K"),
Tmax = (self.Tmax,"K"),
Pmin = (self.Pmin,"bar"),
Pmax = (self.Pmax,"bar"),
comment = self.comment,
),
]
self.kinetics = MultiPDepArrhenius(
arrhenius = self.arrhenius,
Tmin = (self.Tmin,"K"),
Tmax = (self.Tmax,"K"),
Pmin = (self.Pmin,"bar"),
Pmax = (self.Pmax,"bar"),
comment = self.comment,
)
def test_arrhenius(self):
"""
Test that the MultiPDepArrhenius arrhenius property was properly set.
"""
self.assertEqual(self.kinetics.arrhenius, self.arrhenius)
def test_Tmin(self):
"""
Test that the MultiPDepArrhenius Tmin property was properly set.
"""
self.assertAlmostEqual(self.kinetics.Tmin.value_si, self.Tmin, 6)
def test_Tmax(self):
"""
Test that the MultiPDepArrhenius Tmax property was properly set.
"""
self.assertAlmostEqual(self.kinetics.Tmax.value_si, self.Tmax, 6)
def test_Pmin(self):
"""
Test that the MultiPDepArrhenius Pmin property was properly set.
"""
self.assertAlmostEqual(self.kinetics.Pmin.value_si*1e-5, self.Pmin, 6)
def test_Pmax(self):
"""
Test that the MultiPDepArrhenius Pmax property was properly set.
"""
self.assertAlmostEqual(self.kinetics.Pmax.value_si*1e-5, self.Pmax, 6)
def test_comment(self):
"""
Test that the MultiPDepArrhenius comment property was properly set.
"""
self.assertEqual(self.kinetics.comment, self.comment)
def test_isTemperatureValid(self):
"""
Test the MultiPDepArrhenius.isTemperatureValid() method.
"""
Tdata = numpy.array([200,400,600,800,1000,1200,1400,1600,1800,2000])
validdata = numpy.array([False,True,True,True,True,True,True,False,False,False], numpy.bool)
for T, valid in zip(Tdata, validdata):
valid0 = self.kinetics.isTemperatureValid(T)
self.assertEqual(valid0, valid)
def test_isPressureValid(self):
"""
Test the MultiPDepArrhenius.isPressureValid() method.
"""
Pdata = numpy.array([1e3,1e4,1e5,1e6,1e7])
validdata = numpy.array([False,True,True,True,False], numpy.bool)
for P, valid in zip(Pdata, validdata):
valid0 = self.kinetics.isPressureValid(P)
self.assertEqual(valid0, valid)
def test_getRateCoefficient(self):
"""
Test the MultiPDepArrhenius.getRateCoefficient() method.
"""
Tlist = numpy.array([200,400,600,800,1000,1200,1400,1600,1800,2000])
Plist = numpy.array([1e4,1e5,1e6])
kexplist = numpy.array([
[2.85400e-08, 4.00384e-03, 2.73563e-01, 8.50699e+00, 1.20181e+02, 7.56312e+02, 2.84724e+03, 7.71702e+03, 1.67743e+04, 3.12290e+04],
[2.85400e-07, 4.00384e-02, 2.73563e+00, 8.50699e+01, 1.20181e+03, 7.56312e+03, 2.84724e+04, 7.71702e+04, 1.67743e+05, 3.12290e+05],
[2.85400e-06, 4.00384e-01, 2.73563e+01, 8.50699e+02, 1.20181e+04, 7.56312e+04, 2.84724e+05, 7.71702e+05, 1.67743e+06, 3.12290e+06],
]).T
for i in range(Tlist.shape[0]):
for j in range(Plist.shape[0]):
kexp = kexplist[i,j]
kact = self.kinetics.getRateCoefficient(Tlist[i], Plist[j])
self.assertAlmostEqual(kexp, kact, delta=1e-4*kexp)
def test_pickle(self):
"""
Test that a MultiPDepArrhenius object can be pickled and unpickled with
no loss of information.
"""
import cPickle
kinetics = cPickle.loads(cPickle.dumps(self.kinetics,-1))
self.assertEqual(len(self.kinetics.arrhenius), len(kinetics.arrhenius))
self.assertAlmostEqual(self.kinetics.Tmin.value, kinetics.Tmin.value, 4)
self.assertEqual(self.kinetics.Tmin.units, kinetics.Tmin.units)
self.assertAlmostEqual(self.kinetics.Tmax.value, kinetics.Tmax.value, 4)
self.assertEqual(self.kinetics.Tmax.units, kinetics.Tmax.units)
self.assertEqual(self.kinetics.comment, kinetics.comment)
def test_repr(self):
"""
Test that a MultiPDepArrhenius object can be reconstructed from its
repr() output with no loss of information.
"""
kinetics = None
exec('kinetics = {0!r}'.format(self.kinetics))
self.assertEqual(len(self.kinetics.arrhenius), len(kinetics.arrhenius))
self.assertAlmostEqual(self.kinetics.Tmin.value, kinetics.Tmin.value, 4)
self.assertEqual(self.kinetics.Tmin.units, kinetics.Tmin.units)
self.assertAlmostEqual(self.kinetics.Tmax.value, kinetics.Tmax.value, 4)
self.assertEqual(self.kinetics.Tmax.units, kinetics.Tmax.units)
self.assertEqual(self.kinetics.comment, kinetics.comment)
def test_changeRate(self):
"""
Test the PDepMultiArrhenius.changeRate() method.
"""
Tlist = numpy.array([300,400,500,600,700,800,900,1000,1100,1200,1300,1400,1500])
k0list = numpy.array([self.kinetics.getRateCoefficient(T,1e5) for T in Tlist])
self.kinetics.changeRate(2)
for T, kexp in zip(Tlist, k0list):
kact = self.kinetics.getRateCoefficient(T,1e5)
self.assertAlmostEqual(2*kexp, kact, delta=1e-6*kexp)
|
jacklee0810/QMarkdowner
|
refs/heads/master
|
tftpy/TftpServer.py
|
17
|
"""This module implements the TFTP Server functionality. Instantiate an
instance of the server, and then run the listen() method to listen for client
requests. Logging is performed via a standard logging object set in
TftpShared."""
import socket, os, time
import select
from TftpShared import *
from TftpPacketTypes import *
from TftpPacketFactory import TftpPacketFactory
from TftpContexts import TftpContextServer
class TftpServer(TftpSession):
"""This class implements a tftp server object. Run the listen() method to
listen for client requests. It takes two optional arguments. tftproot is
the path to the tftproot directory to serve files from and/or write them
to. dyn_file_func is a callable that must return a file-like object to
read from during downloads. This permits the serving of dynamic
content."""
def __init__(self, tftproot='/tftpboot', dyn_file_func=None):
self.listenip = None
self.listenport = None
self.sock = None
# FIXME: What about multiple roots?
self.root = os.path.abspath(tftproot)
self.dyn_file_func = dyn_file_func
# A dict of sessions, where each session is keyed by a string like
# ip:tid for the remote end.
self.sessions = {}
if os.path.exists(self.root):
log.debug("tftproot %s does exist" % self.root)
if not os.path.isdir(self.root):
raise TftpException, "The tftproot must be a directory."
else:
log.debug("tftproot %s is a directory" % self.root)
if os.access(self.root, os.R_OK):
log.debug("tftproot %s is readable" % self.root)
else:
raise TftpException, "The tftproot must be readable"
if os.access(self.root, os.W_OK):
log.debug("tftproot %s is writable" % self.root)
else:
log.warning("The tftproot %s is not writable" % self.root)
else:
raise TftpException, "The tftproot does not exist."
def listen(self,
listenip="",
listenport=DEF_TFTP_PORT,
timeout=SOCK_TIMEOUT):
"""Start a server listening on the supplied interface and port. This
defaults to INADDR_ANY (all interfaces) and UDP port 69. You can also
supply a different socket timeout value, if desired."""
tftp_factory = TftpPacketFactory()
# Don't use new 2.5 ternary operator yet
# listenip = listenip if listenip else '0.0.0.0'
if not listenip: listenip = '0.0.0.0'
log.info("Server requested on ip %s, port %s"
% (listenip, listenport))
try:
# FIXME - sockets should be non-blocking
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.sock.bind((listenip, listenport))
except socket.error, err:
# Reraise it for now.
raise
log.info("Starting receive loop...")
while True:
# Build the inputlist array of sockets to select() on.
inputlist = []
inputlist.append(self.sock)
for key in self.sessions:
inputlist.append(self.sessions[key].sock)
# Block until some socket has input on it.
log.debug("Performing select on this inputlist: %s" % inputlist)
readyinput, readyoutput, readyspecial = select.select(inputlist,
[],
[],
SOCK_TIMEOUT)
deletion_list = []
# Handle the available data, if any. Maybe we timed-out.
for readysock in readyinput:
# Is the traffic on the main server socket? ie. new session?
if readysock == self.sock:
log.debug("Data ready on our main socket")
buffer, (raddress, rport) = self.sock.recvfrom(MAX_BLKSIZE)
log.debug("Read %d bytes" % len(buffer))
# Forge a session key based on the client's IP and port,
# which should safely work through NAT.
key = "%s:%s" % (raddress, rport)
if not self.sessions.has_key(key):
log.debug("Creating new server context for "
"session key = %s" % key)
self.sessions[key] = TftpContextServer(raddress,
rport,
timeout,
self.root,
self.dyn_file_func)
try:
self.sessions[key].start(buffer)
except TftpException, err:
deletion_list.append(key)
log.error("Fatal exception thrown from "
"session %s: %s" % (key, str(err)))
else:
log.warn("received traffic on main socket for "
"existing session??")
log.info("Currently handling these sessions:")
for session_key, session in self.sessions.items():
log.info(" %s" % session)
else:
# Must find the owner of this traffic.
for key in self.sessions:
if readysock == self.sessions[key].sock:
log.info("Matched input to session key %s"
% key)
try:
self.sessions[key].cycle()
if self.sessions[key].state == None:
log.info("Successful transfer.")
deletion_list.append(key)
except TftpException, err:
deletion_list.append(key)
log.error("Fatal exception thrown from "
"session %s: %s"
% (key, str(err)))
# Break out of for loop since we found the correct
# session.
break
else:
log.error("Can't find the owner for this packet. "
"Discarding.")
log.debug("Looping on all sessions to check for timeouts")
now = time.time()
for key in self.sessions:
try:
self.sessions[key].checkTimeout(now)
except TftpTimeout, err:
log.error(str(err))
self.sessions[key].retry_count += 1
if self.sessions[key].retry_count >= TIMEOUT_RETRIES:
log.debug("hit max retries on %s, giving up"
% self.sessions[key])
deletion_list.append(key)
else:
log.debug("resending on session %s"
% self.sessions[key])
self.sessions[key].state.resendLast()
log.debug("Iterating deletion list.")
for key in deletion_list:
log.info('')
log.info("Session %s complete" % key)
if self.sessions.has_key(key):
log.debug("Gathering up metrics from session before deleting")
self.sessions[key].end()
metrics = self.sessions[key].metrics
if metrics.duration == 0:
log.info("Duration too short, rate undetermined")
else:
log.info("Transferred %d bytes in %.2f seconds"
% (metrics.bytes, metrics.duration))
log.info("Average rate: %.2f kbps" % metrics.kbps)
log.info("%.2f bytes in resent data" % metrics.resent_bytes)
log.info("%d duplicate packets" % metrics.dupcount)
log.debug("Deleting session %s" % key)
del self.sessions[key]
log.debug("Session list is now %s" % self.sessions)
else:
log.warn("Strange, session %s is not on the deletion list"
% key)
|
ingokegel/intellij-community
|
refs/heads/master
|
python/testData/inspections/PyUnboundLocalVariableInspection/UnboundAugmentedAssignment.py
|
83
|
def f(c):
if c:
x = 1
<warning descr="Local variable 'x' might be referenced before assignment">x</warning> += 1 #fail
return x
|
cursesun/zhihu-python
|
refs/heads/master
|
auth.py
|
10
|
#!/usr/bin/env python
#-*- coding:utf-8 -*-
# Build-in / Std
import os, sys, time, platform, random
import re, json, cookielib
# requirements
import requests, termcolor
requests = requests.Session()
requests.cookies = cookielib.LWPCookieJar('cookies')
try:
requests.cookies.load(ignore_discard=True)
except:
pass
class Logging:
flag = True
@staticmethod
def error(msg):
if Logging.flag == True:
print "".join( [ termcolor.colored("ERROR", "red"), ": ", termcolor.colored(msg, "white") ] )
@staticmethod
def warn(msg):
if Logging.flag == True:
print "".join( [ termcolor.colored("WARN", "yellow"), ": ", termcolor.colored(msg, "white") ] )
@staticmethod
def info(msg):
# attrs=['reverse', 'blink']
if Logging.flag == True:
print "".join( [ termcolor.colored("INFO", "magenta"), ": ", termcolor.colored(msg, "white") ] )
@staticmethod
def debug(msg):
if Logging.flag == True:
print "".join( [ termcolor.colored("DEBUG", "magenta"), ": ", termcolor.colored(msg, "white") ] )
@staticmethod
def success(msg):
if Logging.flag == True:
print "".join( [ termcolor.colored("SUCCES", "green"), ": ", termcolor.colored(msg, "white") ] )
# Setting Logging
Logging.flag = True
class LoginPasswordError(Exception):
def __init__(self, message):
if type(message) != type("") or message == "": self.message = u"帐号密码错误"
else: self.message = message
Logging.error(self.message)
class NetworkError(Exception):
def __init__(self, message):
if type(message) != type("") or message == "": self.message = u"网络异常"
else: self.message = message
Logging.error(self.message)
class AccountError(Exception):
def __init__(self, message):
if type(message) != type("") or message == "": self.message = u"帐号类型错误"
else: self.message = message
Logging.error(self.message)
def download_captcha():
url = "http://www.zhihu.com/captcha.gif"
r = requests.get(url, params={"r": random.random()} )
if int(r.status_code) != 200:
raise NetworkError(u"验证码请求失败")
image_name = u"verify." + r.headers['content-type'].split("/")[1]
open( image_name, "wb").write(r.content)
"""
System platform: https://docs.python.org/2/library/platform.html
"""
Logging.info(u"正在调用外部程序渲染验证码 ... ")
if platform.system() == "Linux":
Logging.info(u"Command: xdg-open %s &" % image_name )
os.system("xdg-open %s &" % image_name )
elif platform.system() == "Darwin":
Logging.info(u"Command: open %s &" % image_name )
os.system("open %s &" % image_name )
elif platform.system() == "SunOS":
os.system("open %s &" % image_name )
elif platform.system() == "FreeBSD":
os.system("open %s &" % image_name )
elif platform.system() == "Unix":
os.system("open %s &" % image_name )
elif platform.system() == "OpenBSD":
os.system("open %s &" % image_name )
elif platform.system() == "NetBSD":
os.system("open %s &" % image_name )
elif platform.system() == "Windows":
os.system("open %s &" % image_name )
else:
Logging.info(u"我们无法探测你的作业系统,请自行打开验证码 %s 文件,并输入验证码。" % os.path.join(os.getcwd(), image_name) )
captcha_code = raw_input( termcolor.colored("请输入验证码: ", "cyan") )
return captcha_code
def search_xsrf():
url = "http://www.zhihu.com/"
r = requests.get(url)
if int(r.status_code) != 200:
raise NetworkError(u"验证码请求失败")
results = re.compile(r"\<input\stype=\"hidden\"\sname=\"_xsrf\"\svalue=\"(\S+)\"", re.DOTALL).findall(r.text)
if len(results) < 1:
Logging.info(u"提取XSRF 代码失败" )
return None
return results[0]
def build_form(account, password):
account_type = "email"
if re.match(r"^\d{11}$", account): account_type = "phone"
elif re.match(r"^\S+\@\S+\.\S+$", account): account_type = "email"
else: raise AccountError(u"帐号类型错误")
form = {account_type: account, "password": password, "remember_me": True }
form['_xsrf'] = search_xsrf()
form['captcha'] = download_captcha()
return form
def upload_form(form):
url = "http://www.zhihu.com/login/email"
headers = {
'User-Agent': "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.135 Safari/537.36",
'Host': "www.zhihu.com",
'Origin': "http://www.zhihu.com",
'Pragma': "no-cache",
'Referer': "http://www.zhihu.com/",
'X-Requested-With': "XMLHttpRequest"
}
r = requests.post(url, data=form, headers=headers)
if int(r.status_code) != 200:
raise NetworkError(u"表单上传失败!")
if r.headers['content-type'].lower() == "application/json":
result = r.json()
if result["r"] == 0:
Logging.success(u"登录成功!" )
return {"result": True}
elif result["r"] == 1:
Logging.success(u"登录失败!" )
return {"error": {"code": int(result['errcode']), "message": result['msg'], "data": result['data'] } }
else:
Logging.warn(u"表单上传出现未知错误: \n \t %s )" % ( str(result) ) )
return {"error": {"code": -1, "message": u"unknow error"} }
else:
Logging.warn(u"无法解析服务器的响应内容: \n \t %s " % r.text )
return {"error": {"code": -2, "message": u"parse error"} }
def islogin():
# check session
url = "http://www.zhihu.com/settings/profile"
r = requests.get(url, allow_redirects=False)
status_code = int(r.status_code)
if status_code == 301 or status_code == 302:
# 未登录
return False
elif status_code == 200:
return True
else:
Logging.warn(u"网络故障")
return None
def read_account_from_config_file(config_file="config.ini"):
# NOTE: The ConfigParser module has been renamed to configparser in Python 3.
# The 2to3 tool will automatically adapt imports when converting your sources to Python 3.
# https://docs.python.org/2/library/configparser.html
from ConfigParser import ConfigParser
cf = ConfigParser()
if os.path.exists(config_file) and os.path.isfile(config_file):
Logging.info(u"正在加载配置文件 ...")
cf.read(config_file)
email = cf.get("info", "email")
password = cf.get("info", "password")
if email == "" or password == "":
Logging.warn(u"帐号信息无效")
return (None, None)
else: return (email, password)
else:
Logging.error(u"配置文件加载失败!")
return (None, None)
def login(account=None, password=None):
if islogin() == True:
Logging.success(u"你已经登录过咯")
return True
if account == None:
(account, password) = read_account_from_config_file()
if account == None:
account = raw_input("请输入登录帐号: ")
password = raw_input("请输入登录密码: ")
form_data = build_form(account, password)
"""
result:
{"result": True}
{"error": {"code": 19855555, "message": "unknow.", "data": "data" } }
{"error": {"code": -1, "message": u"unknow error"} }
"""
result = upload_form(form_data)
if "error" in result:
if result["error"]['code'] == 1991829:
# 验证码错误
Logging.error(u"验证码输入错误,请准备重新输入。" )
return login()
else:
Logging.warn(u"unknow error." )
return False
elif "result" in result and result['result'] == True:
# 登录成功
Logging.success(u"登录成功!" )
requests.cookies.save()
return True
if __name__ == "__main__":
# login(account="xxxx@email.com", password="xxxxx")
login()
|
dulems/hue
|
refs/heads/master
|
desktop/core/ext-py/Django-1.6.10/django/contrib/gis/db/backends/oracle/base.py
|
623
|
from django.db.backends.oracle.base import *
from django.db.backends.oracle.base import DatabaseWrapper as OracleDatabaseWrapper
from django.contrib.gis.db.backends.oracle.creation import OracleCreation
from django.contrib.gis.db.backends.oracle.introspection import OracleIntrospection
from django.contrib.gis.db.backends.oracle.operations import OracleOperations
class DatabaseWrapper(OracleDatabaseWrapper):
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.ops = OracleOperations(self)
self.creation = OracleCreation(self)
self.introspection = OracleIntrospection(self)
|
prmtl/fuel-web
|
refs/heads/master
|
nailgun/nailgun/api/v1/validators/json_schema/node.py
|
1
|
# -*- coding: utf-8 -*-
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nailgun import consts
from nailgun.api.v1.validators.json_schema import base_types
# TODO(@ikalnitsky): add `required` properties to all needed objects
single_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "Node",
"description": "Serialized Node object",
"type": "object",
"properties": {
"mac": base_types.MAC_ADDRESS,
"ip": base_types.IP_ADDRESS,
"meta": {
"type": "object",
"properties": {
# I guess the format schema below will be used somewhere else,
# so it would be great to move it out in the future.
"interfaces": {
"type": "array",
"items": {
"type": "object",
"properties": {
"ip": base_types.NULLABLE_IP_ADDRESS,
"netmask": base_types.NET_ADDRESS,
"mac": base_types.MAC_ADDRESS,
"state": {"type": "string"},
"name": {"type": "string"},
"driver": {"type": "string"},
"bus_info": {"type": "string"},
"offloading_modes": {
"type": "array",
"items": {
"type": "object",
"properties": {
"name": {"type": "string"},
"state": {
"type": [
"boolean",
"null"
]
},
"sub": {
"$ref": "#/properties/meta/"
"properties/interfaces/"
"items/properties/"
"offloading_modes"
}
}
}
},
"pxe": {"type": "boolean"}
}
}
},
# I guess the format schema below will be used somewhere else,
# so it would be great to move it out in the future.
"disks": {
"type": "array",
"items": {
"type": "object",
"properties": {
"model": base_types.NULLABLE_STRING,
"disk": {"type": "string"},
"size": {"type": "number"},
"name": {"type": "string"},
}
}
},
"memory": {
"type": "object",
"properties": {
"total": {"type": "number"}
}
},
"cpu": {
"type": "object",
"properties": {
"spec": {
"type": "array",
"items": {
"type": "object",
"properties": {
"model": {"type": "string"},
"frequency": {"type": "number"}
}
}
},
"total": {"type": "integer"},
"real": {"type": "integer"},
}
},
"system": {
"type": "object",
"properties": {
"manufacturer": {"type": "string"},
"version": {"type": "string"},
"serial": {"type": "string"},
"family": {"type": "string"},
"fqdn": {"type": "string"},
}
},
}
},
"id": {"type": "integer"},
"status": {"enum": list(consts.NODE_STATUSES)},
"cluster_id": base_types.NULLABLE_ID,
"name": {"type": "string"},
"manufacturer": base_types.NULLABLE_STRING,
"os_platform": base_types.NULLABLE_STRING,
"is_agent": {"type": "boolean"},
"platform_name": base_types.NULLABLE_STRING,
"group_id": {"type": "number"},
"fqdn": base_types.NULLABLE_STRING,
"kernel_params": base_types.NULLABLE_STRING,
"progress": {"type": "number"},
"pending_addition": {"type": "boolean"},
"pending_deletion": {"type": "boolean"},
"error_type": base_types.NULLABLE_ENUM(list(consts.NODE_ERRORS)),
"error_msg": {"type": "string"},
"online": {"type": "boolean"},
"roles": {"type": "array"},
"pending_roles": {"type": "array"},
"agent_checksum": {"type": "string"}
},
}
|
ArianaGashi/Techstitution
|
refs/heads/master
|
venv/lib/python2.7/site-packages/flask/testsuite/test_apps/config_module_app.py
|
1257
|
import os
import flask
here = os.path.abspath(os.path.dirname(__file__))
app = flask.Flask(__name__)
|
0x7678/nfi
|
refs/heads/master
|
DBHandler.py
|
2
|
'''
NFI -- Silensec's Nyuki Forensics Investigator
Copyright (C) 2014 George Nicolaou (george[at]silensec[dot]com)
Silensec Ltd.
This file is part of Nyuki Forensics Investigator (NFI).
NFI is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
NFI is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with NFI. If not, see <http://www.gnu.org/licenses/>.
'''
import sqlite3,os
import Queue, time, thread
from threading import Thread
dbschema = [
"""CREATE TABLE officers (
officer_id INTEGER PRIMARY KEY AUTOINCREMENT,
officer_name TEXT,
officer_badge TEXT
)""",
"CREATE INDEX officer_id ON officers ( officer_id )",
"""CREATE TABLE case_type (
type_id INTEGER PRIMARY KEY AUTOINCREMENT,
type_text TEXT
)""",
"CREATE INDEX type_id ON case_type ( type_id )",
"""INSERT INTO case_type ( type_id, type_text )
VALUES ( 1, 'Logical Image Dump' )""",
"""INSERT INTO case_type ( type_id, type_text )
VALUES ( 2, 'Device Backup Folder' )""",
"""CREATE TABLE cases (
case_id INTEGER PRIMARY KEY AUTOINCREMENT,
case_name TEXT,
case_date INTEGER,
case_comments TEXT,
type_id INTEGER,
case_appsmount TEXT,
case_sysmount TEXT,
case_scanned BOOLEAN
)""",
"CREATE INDEX case_id ON cases ( case_id )",
"""CREATE TABLE case_store (
cs_id INTEGER PRIMARY KEY AUTOINCREMENT,
case_id INTEGER,
cs_path TEXT
)""",
"""CREATE TABLE case_file (
cf_id INTEGER PRIMARY KEY AUTOINCREMENT,
cf_location TEXT,
cf_signature TEXT,
cf_date_saved INTEGER,
cf_date_accessed INTEGER,
cf_active INTEGER,
case_id INTEGER
)""",
"CREATE INDEX cf_id ON case_file ( cf_id )",
"""CREATE TABLE case_officers (
co_id INTEGER PRIMARY KEY AUTOINCREMENT,
officer_id INTEGER,
case_id INTEGER
)""",
"""CREATE TRIGGER on_case_delete BEFORE DELETE ON cases
BEGIN
DELETE FROM case_officers WHERE case_officers.case_id = old.case_id;
DELETE FROM case_file WHERE case_file.case_id = old.case_id;
DELETE FROM case_store WHERE case_store.case_id = old.case_id;
END;
""",
"""CREATE VIEW view_case_officers AS
SELECT
case_officers.case_id, case_officers.officer_id, officers.officer_name
FROM case_officers
INNER JOIN officers ON case_officers.officer_id = officers.officer_id
"""
]
def dict_factory(cursor, row):
d = {}
for idx, col in enumerate(cursor.description):
d[col[0]] = row[idx]
return d
class DBCmd(object):
cmd = None
params = None
result = None
def __init__(self, cmd, params=None ):
self.cmd = cmd
self.params = params
class DBHandler(Thread):
db = None
dbq = None
def _populate_db(self,conn):
for statement in dbschema:
conn.execute(statement)
"""
def __del__(self,signal,frame):
if self.db != None:
print "Closing DB"
self.db.close()
"""
def __init__(self, dbfile):
Thread.__init__(self)
self.dbq = Queue.Queue()
self.dbfile = dbfile
self.daemon = True
self.start()
return
def run(self):
exists = os.path.exists(self.dbfile)
con = sqlite3.connect(self.dbfile)
con.row_factory = dict_factory
cur = con.cursor()
if exists == False:
self._populate_db(cur)
con.commit()
while True:
req = self.dbq.get()
if req.params != None:
print "Executing: " + str(req.cmd) + " PARAMS: " + str(req.params)
cur.execute(req.cmd, req.params)
else:
print "Executing: " + str(req.cmd)
cur.execute(req.cmd)
if req.cmd.upper().startswith("INSERT"):
lastid = cur.lastrowid
req.result.put(lastid)
else:
results = cur.fetchall()
req.result.put(results)
if not req.cmd.upper().startswith("SELECT"):
con.commit()
def _exec_sql(self, query, params=None):
qobj = DBCmd(query,params)
qobj.result = Queue.Queue()
self.dbq.put(qobj)
return qobj.result.get()
def create_case(self, casename, officers, comments, ctype, apps, system):
q = """INSERT INTO cases (
case_name,
case_date,
case_comments,
type_id,
case_appsmount,
case_sysmount,
case_scanned
) VALUES (?, ?, ?, ?, ?, ?, ?)"""
cid = self._exec_sql(q, (casename, int(time.time()), comments, ctype,
apps, system, 0))
for officer in officers:
q = """INSERT INTO case_officers
( officer_id, case_id ) VALUES ( ?, ? )"""
self._exec_sql(q, (officer, cid))
return cid
"""
Update one or more fields from a table.
Args:
table: The table's name
fields: Dictionary containing columns and values to update. For example:
{'column_1': 1, 'column_2': 2}
where: The WHERE string. Can also be used in conjuction with
where_params for security. Eg: col_1 = ? AND col_2 = ?
where_params: An array containing the parameters to replace question
marks with to construct the SQL statement. eg: [1, 2]
"""
def update(self, table, fields, where=None, where_params=None ):
q = """UPDATE {tbl} SET """.format(tbl=table)
q += ', '.join( "{col} = ?".format(col=key) for key in fields.iterkeys())
params = fields.values()
if where != None:
q += " WHERE " + where
if where_params != None:
params += where_params
return self._exec_sql(q, tuple(params))
def add_case_file(self, case_id, case_filepath, file_signature):
tstamp = int(time.time())
self.update( "case_file", {"cf_active":0}, "case_id = ?", [case_id])
q = """INSERT INTO
case_file (
cf_location,
cf_signature,
cf_date_saved,
cf_date_accessed,
cf_active,
case_id
) VALUES
( ?, ?, '{t}', '{t}', '1', ? )""".format(t=tstamp)
return self._exec_sql(q, (case_filepath, file_signature, case_id))
def add_case_storepath(self, case_id, store_path):
q = """INSERT INTO
case_store (
case_id,
cs_path
) VALUES
( ?, ? )
"""
return self._exec_sql(q, ( case_id, store_path ) )
def get_case_storepath(self, case_id ):
q = """SELECT cs_path FROM case_store WHERE case_id = ?"""
result = self._exec_sql(q, (case_id,) )
if len(result) != 0:
return result[0]["cs_path"]
return None
def get_case_file(self, case_id, accessing=False, allfiles=None, cf_id=None):
if allfiles == None: allfiles = False
q = """SELECT
cf_id,
cf_location,
cf_signature,
cf_date_saved,
cf_date_accessed
FROM case_file
WHERE case_id = ?"""
if allfiles == False:
if cf_id != None:
q += """ AND cf_id = ?"""
params = (case_id, cf_id)
else:
q += """ AND cf_active = 1"""
params = (case_id,)
else:
params = (case_id,)
case_files = self._exec_sql(q, params)
if accessing:
for case_file in case_files:
q = """UPDATE case_file SET cf_date_accessed = {t} WHERE
cf_id = ?""".format(t=int(time.time()))
self._exec_sql(q, (case_file["cf_id"],))
if len(case_files) == 0:
return None
elif allfiles:
return case_files
else:
return case_files[0]
def remove_case(self, case_id):
q = """DELETE FROM cases WHERE case_id = ?"""
return self._exec_sql(q, (case_id,))
def get_officers(self):
q = """SELECT officer_id, officer_name, officer_badge from officers"""
return self._exec_sql(q)
def add_officer(self, name, badge):
q = "INSERT INTO officers ( officer_name, officer_badge ) VALUES ( ?, ?)"
return self._exec_sql(q, (name, badge))
def get_cases(self, case_id=None):
query = """SELECT
cases.case_id as case_id,
case_name,
case_date,
case_comments,
case_appsmount,
case_sysmount,
case_scanned,
case_type.type_text as type_text,
GROUP_CONCAT(view_case_officers.officer_name, ', ') as officers
FROM cases
INNER JOIN case_type ON cases.type_id = case_type.type_id
INNER JOIN view_case_officers ON cases.case_id = view_case_officers.case_id"""
group = """
GROUP BY cases.case_id
"""
if case_id != None:
query += " WHERE cases.case_id = ? " + group
result = self._exec_sql(query, [case_id])
else:
query += group
result = self._exec_sql(query)
if len(result) != 0:
if result[0]['case_name'] == None:
return []
return result
|
meabsence/python-for-android
|
refs/heads/master
|
python3-alpha/python3-src/Tools/scripts/parseentities.py
|
46
|
#!/usr/bin/env python3
""" Utility for parsing HTML entity definitions available from:
http://www.w3.org/ as e.g.
http://www.w3.org/TR/REC-html40/HTMLlat1.ent
Input is read from stdin, output is written to stdout in form of a
Python snippet defining a dictionary "entitydefs" mapping literal
entity name to character or numeric entity.
Marc-Andre Lemburg, mal@lemburg.com, 1999.
Use as you like. NO WARRANTIES.
"""
import re,sys
import TextTools
entityRE = re.compile('<!ENTITY +(\w+) +CDATA +"([^"]+)" +-- +((?:.|\n)+?) *-->')
def parse(text,pos=0,endpos=None):
pos = 0
if endpos is None:
endpos = len(text)
d = {}
while 1:
m = entityRE.search(text,pos,endpos)
if not m:
break
name,charcode,comment = m.groups()
d[name] = charcode,comment
pos = m.end()
return d
def writefile(f,defs):
f.write("entitydefs = {\n")
items = sorted(defs.items())
for name, (charcode,comment) in items:
if charcode[:2] == '&#':
code = int(charcode[2:-1])
if code < 256:
charcode = "'\%o'" % code
else:
charcode = repr(charcode)
else:
charcode = repr(charcode)
comment = TextTools.collapse(comment)
f.write(" '%s':\t%s, \t# %s\n" % (name,charcode,comment))
f.write('\n}\n')
if __name__ == '__main__':
if len(sys.argv) > 1:
infile = open(sys.argv[1])
else:
infile = sys.stdin
if len(sys.argv) > 2:
outfile = open(sys.argv[2],'w')
else:
outfile = sys.stdout
text = infile.read()
defs = parse(text)
writefile(outfile,defs)
|
gregorlarson/loxodo
|
refs/heads/cmdline1
|
src/__init__.py
|
28
|
#
# Loxodo -- Password Safe V3 compatible Password Vault
# Copyright (C) 2008 Christoph Sommer <mail@christoph-sommer.de>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
|
asacamano/keyczar
|
refs/heads/master
|
cpp/src/tools/scons/scons-local-1.2.0.d20090223/SCons/Tool/tar.py
|
19
|
"""SCons.Tool.tar
Tool-specific initialization for tar.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/tar.py 4043 2009/02/23 09:06:45 scons"
import SCons.Action
import SCons.Builder
import SCons.Defaults
import SCons.Node.FS
import SCons.Util
tars = ['tar', 'gtar']
TarAction = SCons.Action.Action('$TARCOM', '$TARCOMSTR')
TarBuilder = SCons.Builder.Builder(action = TarAction,
source_factory = SCons.Node.FS.Entry,
source_scanner = SCons.Defaults.DirScanner,
suffix = '$TARSUFFIX',
multi = 1)
def generate(env):
"""Add Builders and construction variables for tar to an Environment."""
try:
bld = env['BUILDERS']['Tar']
except KeyError:
bld = TarBuilder
env['BUILDERS']['Tar'] = bld
env['TAR'] = env.Detect(tars) or 'gtar'
env['TARFLAGS'] = SCons.Util.CLVar('-c')
env['TARCOM'] = '$TAR $TARFLAGS -f $TARGET $SOURCES'
env['TARSUFFIX'] = '.tar'
def exists(env):
return env.Detect(tars)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
berndw1960/creategmap
|
refs/heads/master
|
mapdata.py
|
1
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import configparser
import time
WORK_DIR = (os.environ['HOME'] + "/map_build/")
def info(msg):
print("II: " + msg)
def warn(msg):
print("WW: " + msg)
def error(msg):
print("EE: " + msg)
# configparser
def write_config():
with open('pygmap3.cfg', 'w') as configfile:
config.write(configfile)
config = configparser.ConfigParser()
def create_o5m():
os.chdir(WORK_DIR)
config.read('pygmap3.cfg')
region = config['runtime']['region']
if os.path.exists("planet/planet-latest.osm.pbf"):
os.remane("planet-latest.osm.pbf", "planet.osm.pbf")
if not os.path.exists("poly/" + region + ".poly"):
print()
error("No poly file for " + region + " found!")
print()
quit()
for planet in ["planet/planet.o5m", "planet/planet.osm.pbf"]:
if os.path.exists(planet):
ftime = os.path.getmtime(planet)
curtime = time.time()
difftime = curtime - ftime
if difftime > 1741800:
print()
warn("Your planet file is older then one month")
print(" You should update it.")
print()
info("now extracting " + region
+ ".o5m from Planet, please wait...")
os.system("osmconvert " + planet + " "
+ "--complete-ways "
+ "--complete-multipolygons "
+ "--complete-boundaries "
+ "--drop-version "
+ "--drop-author "
+ "-B=poly/" + region + ".poly "
+ " -o=o5m/" + region + ".o5m ")
break
else:
print()
error("No planet file found, couldn't extract the raw data!")
print()
quit()
def update_o5m():
os.chdir(WORK_DIR)
config.read('pygmap3.cfg')
region = config['runtime']['region']
if config.has_option('runtime', 'minutely'):
update_opts = " --hourly -- minutely "
elif config.has_option('runtime', 'hourly'):
update_opts = " --hourly "
else:
update_opts = " "
print()
info("updating " + region + ".o5m, please wait...")
poly = " "
if os.path.exists("poly/" + region + ".poly"):
poly = " -B=poly/" + region + ".poly "
os.system("osmupdate --daily "
+ "--drop-version "
+ "--drop-author "
+ update_opts
+ poly
+ "--keep-tempfiles "
+ "o5m/" + region + ".o5m o5m/" + region + "_new.o5m")
os.chdir("o5m")
if os.path.exists(region + "_new.o5m"):
os.rename(region + ".o5m", region + "_temp.o5m")
os.rename(region + "_new.o5m", region + ".o5m")
if os.path.exists(region + ".o5m"):
os.remove(region + "_temp.o5m")
os.chdir(WORK_DIR)
write_config()
|
Taapat/enigma2-openpli-vuplus
|
refs/heads/master
|
lib/python/Components/EpgList.py
|
5
|
from HTMLComponent import HTMLComponent
from GUIComponent import GUIComponent
from enigma import eEPGCache, eListbox, eListboxPythonMultiContent, gFont, \
RT_HALIGN_LEFT, RT_HALIGN_RIGHT, RT_HALIGN_CENTER, RT_VALIGN_CENTER
from Tools.Alternatives import CompareWithAlternatives
from Tools.LoadPixmap import LoadPixmap
from time import localtime, time
from Components.config import config
from ServiceReference import ServiceReference
from Tools.Directories import resolveFilename, SCOPE_CURRENT_SKIN
from skin import parseFont
EPG_TYPE_SINGLE = 0
EPG_TYPE_MULTI = 1
EPG_TYPE_SIMILAR = 2
class Rect:
def __init__(self, x, y, width, height):
self.x = x
self.y = y
self.w = width
self.h = height
# silly, but backward compatible
def left(self):
return self.x
def top(self):
return self.y
def height(self):
return self.h
def width(self):
return self.w
class EPGList(HTMLComponent, GUIComponent):
def __init__(self, type=EPG_TYPE_SINGLE, selChangedCB=None, timer = None):
self.days = (_("Mon"), _("Tue"), _("Wed"), _("Thu"), _("Fri"), _("Sat"), _("Sun"))
self.timer = timer
self.onSelChanged = [ ]
if selChangedCB is not None:
self.onSelChanged.append(selChangedCB)
GUIComponent.__init__(self)
self.type=type
self.l = eListboxPythonMultiContent()
self.eventItemFont = gFont("Regular", 22)
self.eventTimeFont = gFont("Regular", 16)
self.iconSize = 21
self.iconDistance = 2
self.colGap = 10
self.skinColumns = False
self.tw = 90
self.dy = 0
if type == EPG_TYPE_SINGLE:
self.l.setBuildFunc(self.buildSingleEntry)
elif type == EPG_TYPE_MULTI:
self.l.setBuildFunc(self.buildMultiEntry)
else:
assert(type == EPG_TYPE_SIMILAR)
self.l.setBuildFunc(self.buildSimilarEntry)
self.epgcache = eEPGCache.getInstance()
self.clocks = [ LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/epgclock_add.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/epgclock_pre.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/epgclock.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/epgclock_prepost.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/epgclock_post.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/zapclock_add.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/zapclock_pre.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/zapclock.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/zapclock_prepost.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/zapclock_post.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/zaprecclock_add.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/zaprecclock_pre.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/zaprecclock.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/zaprecclock_prepost.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/zaprecclock_post.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/repepgclock_add.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/repepgclock_pre.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/repepgclock.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/repepgclock_prepost.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/repepgclock_post.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/repzapclock_add.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/repzapclock_pre.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/repzapclock.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/repzapclock_prepost.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/repzapclock_post.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/repzaprecclock_add.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/repzaprecclock_pre.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/repzaprecclock.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/repzaprecclock_prepost.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/repzaprecclock_post.png')) ]
def getEventFromId(self, service, eventid):
event = None
if self.epgcache is not None and eventid is not None:
event = self.epgcache.lookupEventId(service.ref, eventid)
return event
def getCurrentChangeCount(self):
if self.type == EPG_TYPE_MULTI and self.l.getCurrentSelection() is not None:
return self.l.getCurrentSelection()[0]
return 0
def getCurrent(self):
idx=0
if self.type == EPG_TYPE_MULTI:
idx += 1
tmp = self.l.getCurrentSelection()
if tmp is None:
return ( None, None )
eventid = tmp[idx+1]
service = ServiceReference(tmp[idx])
event = self.getEventFromId(service, eventid)
return ( event, service )
def moveUp(self):
self.instance.moveSelection(self.instance.moveUp)
def moveDown(self):
self.instance.moveSelection(self.instance.moveDown)
def connectSelectionChanged(func):
if not self.onSelChanged.count(func):
self.onSelChanged.append(func)
def disconnectSelectionChanged(func):
self.onSelChanged.remove(func)
def selectionChanged(self):
for x in self.onSelChanged:
if x is not None:
x()
# try:
# x()
# except: # FIXME!!!
# print "FIXME in EPGList.selectionChanged"
# pass
GUI_WIDGET = eListbox
def postWidgetCreate(self, instance):
instance.setWrapAround(True)
instance.selectionChanged.get().append(self.selectionChanged)
instance.setContent(self.l)
def preWidgetRemove(self, instance):
instance.selectionChanged.get().remove(self.selectionChanged)
instance.setContent(None)
def recalcEntrySize(self):
esize = self.l.getItemSize()
width = esize.width()
height = esize.height()
try:
self.iconSize = self.clocks[0].size().height()
except:
pass
self.space = self.iconSize + self.iconDistance
self.dy = int((height - self.iconSize)/2.)
if self.type == EPG_TYPE_SINGLE:
if self.skinColumns:
x = 0
self.weekday_rect = Rect(0, 0, self.gap(self.col[0]), height)
x += self.col[0]
self.datetime_rect = Rect(x, 0, self.gap(self.col[1]), height)
x += self.col[1]
self.descr_rect = Rect(x, 0, width-x, height)
else:
self.weekday_rect = Rect(0, 0, width/20*2-10, height)
self.datetime_rect = Rect(width/20*2, 0, width/20*5-15, height)
self.descr_rect = Rect(width/20*7, 0, width/20*13, height)
elif self.type == EPG_TYPE_MULTI:
if self.skinColumns:
x = 0
self.service_rect = Rect(x, 0, self.gap(self.col[0]), height)
x += self.col[0]
self.progress_rect = Rect(x, 8, self.gap(self.col[1]), height-16)
self.start_end_rect = Rect(x, 0, self.gap(self.col[1]), height)
x += self.col[1]
self.descr_rect = Rect(x, 0, width-x, height)
else:
xpos = 0
w = width/10*3
self.service_rect = Rect(xpos, 0, w-10, height)
xpos += w
w = width/10*2
self.start_end_rect = Rect(xpos, 0, w-10, height)
self.progress_rect = Rect(xpos, 4, w-10, height-8)
xpos += w
w = width/10*5
self.descr_rect = Rect(xpos, 0, width, height)
else: # EPG_TYPE_SIMILAR
if self.skinColumns:
x = 0
self.weekday_rect = Rect(0, 0, self.gap(self.col[0]), height)
x += self.col[0]
self.datetime_rect = Rect(x, 0, self.gap(self.col[1]), height)
x += self.col[1]
self.service_rect = Rect(x, 0, width-x, height)
else:
self.weekday_rect = Rect(0, 0, width/20*2-10, height)
self.datetime_rect = Rect(width/20*2, 0, width/20*5-15, height)
self.service_rect = Rect(width/20*7, 0, width/20*13, height)
def gap(self, width):
return width - self.colGap
def getClockTypesForEntry(self, service, eventId, beginTime, duration):
if not beginTime:
return None
rec = self.timer.isInTimer(eventId, beginTime, duration, service)
if rec is not None:
return rec[1]
else:
return None
def buildSingleEntry(self, service, eventId, beginTime, duration, EventName):
clock_types = self.getClockTypesForEntry(service, eventId, beginTime, duration)
r1=self.weekday_rect
r2=self.datetime_rect
r3=self.descr_rect
t = localtime(beginTime)
res = [
None, # no private data needed
(eListboxPythonMultiContent.TYPE_TEXT, r1.x, r1.y, r1.w, r1.h, 0, RT_HALIGN_RIGHT|RT_VALIGN_CENTER, self.days[t[6]]),
(eListboxPythonMultiContent.TYPE_TEXT, r2.x, r2.y, r2.w, r1.h, 0, RT_HALIGN_RIGHT|RT_VALIGN_CENTER, "%2d.%02d, %02d:%02d"%(t[2],t[1],t[3],t[4]))
]
if clock_types:
for i in range(len(clock_types)):
res.append((eListboxPythonMultiContent.TYPE_PIXMAP_ALPHATEST, r3.x + i * self.space, r3.y + self.dy, self.iconSize, self.iconSize, self.clocks[clock_types[i]]))
res.append((eListboxPythonMultiContent.TYPE_TEXT, r3.x + (i + 1) * self.space, r3.y, r3.w, r3.h, 0, RT_HALIGN_LEFT|RT_VALIGN_CENTER, EventName))
else:
res.append((eListboxPythonMultiContent.TYPE_TEXT, r3.x, r3.y, r3.w, r3.h, 0, RT_HALIGN_LEFT|RT_VALIGN_CENTER, EventName))
return res
def buildSimilarEntry(self, service, eventId, beginTime, service_name, duration):
clock_types = self.getClockTypesForEntry(service, eventId, beginTime, duration)
r1=self.weekday_rect
r2=self.datetime_rect
r3=self.service_rect
t = localtime(beginTime)
res = [
None, # no private data needed
(eListboxPythonMultiContent.TYPE_TEXT, r1.x, r1.y, r1.w, r1.h, 0, RT_HALIGN_RIGHT|RT_VALIGN_CENTER, self.days[t[6]]),
(eListboxPythonMultiContent.TYPE_TEXT, r2.x, r2.y, r2.w, r1.h, 0, RT_HALIGN_RIGHT|RT_VALIGN_CENTER, "%2d.%02d, %02d:%02d"%(t[2],t[1],t[3],t[4]))
]
if clock_types:
for i in range(len(clock_types)):
res.append((eListboxPythonMultiContent.TYPE_PIXMAP_ALPHATEST, r3.x + i * self.space, r3.y + self.dy, self.iconSize, self.iconSize, self.clocks[clock_types[i]]))
res.append((eListboxPythonMultiContent.TYPE_TEXT, r3.x + (i + 1) * self.space, r3.y, r3.w, r3.h, 0, RT_HALIGN_LEFT|RT_VALIGN_CENTER, service_name))
else:
res.append((eListboxPythonMultiContent.TYPE_TEXT, r3.x, r3.y, r3.w, r3.h, 0, RT_HALIGN_LEFT|RT_VALIGN_CENTER, service_name))
return res
def buildMultiEntry(self, changecount, service, eventId, beginTime, duration, EventName, nowTime, service_name):
clock_types = self.getClockTypesForEntry(service, eventId, beginTime, duration)
r1=self.service_rect
r2=self.progress_rect
r3=self.descr_rect
r4=self.start_end_rect
res = [ None ] # no private data needed
if clock_types:
res.append((eListboxPythonMultiContent.TYPE_TEXT, r1.x, r1.y, r1.w - self.space * len(clock_types), r1.h, 0, RT_HALIGN_LEFT|RT_VALIGN_CENTER, service_name))
for i in range(len(clock_types)):
res.append((eListboxPythonMultiContent.TYPE_PIXMAP_ALPHATEST, r1.x + r1.w - self.space * (i + 1), r1.y + self.dy, self.iconSize, self.iconSize, self.clocks[clock_types[len(clock_types) - 1 - i]]))
else:
res.append((eListboxPythonMultiContent.TYPE_TEXT, r1.x, r1.y, r1.w, r1.h, 0, RT_HALIGN_LEFT|RT_VALIGN_CENTER, service_name))
if beginTime is not None:
if nowTime < beginTime:
begin = localtime(beginTime)
end = localtime(beginTime+duration)
res.extend((
(eListboxPythonMultiContent.TYPE_TEXT, r4.x, r4.y, r4.w, r4.h, 1, RT_HALIGN_CENTER|RT_VALIGN_CENTER, "%02d.%02d - %02d.%02d"%(begin[3],begin[4],end[3],end[4])),
(eListboxPythonMultiContent.TYPE_TEXT, r3.x, r3.y, self.gap(self.tw), r3.h, 1, RT_HALIGN_RIGHT|RT_VALIGN_CENTER, _("%d min") % (duration / 60)),
(eListboxPythonMultiContent.TYPE_TEXT, r3.x + self.tw, r3.y, r3.w, r3.h, 0, RT_HALIGN_LEFT, EventName)
))
else:
percent = (nowTime - beginTime) * 100 / duration
prefix = "+"
remaining = ((beginTime+duration) - int(time())) / 60
if remaining <= 0:
prefix = ""
res.extend((
(eListboxPythonMultiContent.TYPE_PROGRESS, r2.x, r2.y, r2.w, r2.h, percent),
(eListboxPythonMultiContent.TYPE_TEXT, r3.x, r3.y, self.gap(self.tw), r3.h, 1, RT_HALIGN_RIGHT|RT_VALIGN_CENTER, _("%s%d min") % (prefix, remaining)),
(eListboxPythonMultiContent.TYPE_TEXT, r3.x + self.tw, r3.y, r3.w, r3.h, 0, RT_HALIGN_LEFT, EventName)
))
return res
def queryEPG(self, list, buildFunc=None):
if self.epgcache is not None:
if buildFunc is not None:
return self.epgcache.lookupEvent(list, buildFunc)
else:
return self.epgcache.lookupEvent(list)
return [ ]
def fillMultiEPG(self, services, stime=-1):
#t = time()
test = [ (service.ref.toString(), 0, stime) for service in services ]
test.insert(0, 'X0RIBDTCn')
self.list = self.queryEPG(test)
self.l.setList(self.list)
#print time() - t
self.selectionChanged()
def updateMultiEPG(self, direction):
#t = time()
test = [ x[3] and (x[1], direction, x[3]) or (x[1], direction, 0) for x in self.list ]
test.insert(0, 'XRIBDTCn')
tmp = self.queryEPG(test)
cnt=0
for x in tmp:
changecount = self.list[cnt][0] + direction
if changecount >= 0:
if x[2] is not None:
self.list[cnt]=(changecount, x[0], x[1], x[2], x[3], x[4], x[5], x[6])
cnt+=1
self.l.setList(self.list)
#print time() - t
self.selectionChanged()
def fillSingleEPG(self, service):
t = time()
epg_time = t - config.epg.histminutes.getValue()*60
test = [ 'RIBDT', (service.ref.toString(), 0, epg_time, -1) ]
self.list = self.queryEPG(test)
self.l.setList(self.list)
if t != epg_time:
idx = 0
for x in self.list:
idx += 1
if t < x[2]+x[3]:
break
self.instance.moveSelectionTo(idx-1)
self.selectionChanged()
def sortSingleEPG(self, type):
list = self.list
if list:
event_id = self.getSelectedEventId()
if type == 1:
list.sort(key=lambda x: (x[4] and x[4].lower(), x[2]))
else:
assert(type == 0)
list.sort(key=lambda x: x[2])
self.l.invalidate()
self.moveToEventId(event_id)
def getSelectedEventId(self):
x = self.l.getCurrentSelection()
return x and x[1]
def moveToService(self,serviceref):
if not serviceref:
return
index = 0
refstr = serviceref.toString()
for x in self.list:
if CompareWithAlternatives(x[1], refstr):
self.instance.moveSelectionTo(index)
break
index += 1
def moveToEventId(self, eventId):
if not eventId:
return
index = 0
for x in self.list:
if x[1] == eventId:
self.instance.moveSelectionTo(index)
break
index += 1
def fillSimilarList(self, refstr, event_id):
t = time()
# search similar broadcastings
if event_id is None:
return
l = self.epgcache.search(('RIBND', 1024, eEPGCache.SIMILAR_BROADCASTINGS_SEARCH, refstr, event_id))
if l and len(l):
l.sort(key=lambda x: x[2])
self.l.setList(l)
self.selectionChanged()
print time() - t
def applySkin(self, desktop, parent):
def warningWrongSkinParameter(string):
print "[EPGList] wrong '%s' skin parameters" % string
def setEventItemFont(value):
self.eventItemFont = parseFont(value, ((1,1),(1,1)))
def setEventTimeFont(value):
self.eventTimeFont = parseFont(value, ((1,1),(1,1)))
def setIconDistance(value):
self.iconDistance = int(value)
def setIconShift(value):
self.dy = int(value)
def setTimeWidth(value):
self.tw = int(value)
def setColWidths(value):
self.col = map(int, value.split(','))
if len(self.col) == 2:
self.skinColumns = True
else:
warningWrongSkinParameter(attrib)
def setColGap(value):
self.colGap = int(value)
for (attrib, value) in self.skinAttributes[:]:
try:
locals().get(attrib)(value)
self.skinAttributes.remove((attrib, value))
except:
pass
self.l.setFont(0, self.eventItemFont)
self.l.setFont(1, self.eventTimeFont)
return GUIComponent.applySkin(self, desktop, parent)
|
mogoweb/chromium-crosswalk
|
refs/heads/master
|
third_party/protobuf/python/google/protobuf/internal/service_reflection_test.py
|
559
|
#! /usr/bin/python
#
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests for google.protobuf.internal.service_reflection."""
__author__ = 'petar@google.com (Petar Petrov)'
import unittest
from google.protobuf import unittest_pb2
from google.protobuf import service_reflection
from google.protobuf import service
class FooUnitTest(unittest.TestCase):
def testService(self):
class MockRpcChannel(service.RpcChannel):
def CallMethod(self, method, controller, request, response, callback):
self.method = method
self.controller = controller
self.request = request
callback(response)
class MockRpcController(service.RpcController):
def SetFailed(self, msg):
self.failure_message = msg
self.callback_response = None
class MyService(unittest_pb2.TestService):
pass
self.callback_response = None
def MyCallback(response):
self.callback_response = response
rpc_controller = MockRpcController()
channel = MockRpcChannel()
srvc = MyService()
srvc.Foo(rpc_controller, unittest_pb2.FooRequest(), MyCallback)
self.assertEqual('Method Foo not implemented.',
rpc_controller.failure_message)
self.assertEqual(None, self.callback_response)
rpc_controller.failure_message = None
service_descriptor = unittest_pb2.TestService.GetDescriptor()
srvc.CallMethod(service_descriptor.methods[1], rpc_controller,
unittest_pb2.BarRequest(), MyCallback)
self.assertEqual('Method Bar not implemented.',
rpc_controller.failure_message)
self.assertEqual(None, self.callback_response)
class MyServiceImpl(unittest_pb2.TestService):
def Foo(self, rpc_controller, request, done):
self.foo_called = True
def Bar(self, rpc_controller, request, done):
self.bar_called = True
srvc = MyServiceImpl()
rpc_controller.failure_message = None
srvc.Foo(rpc_controller, unittest_pb2.FooRequest(), MyCallback)
self.assertEqual(None, rpc_controller.failure_message)
self.assertEqual(True, srvc.foo_called)
rpc_controller.failure_message = None
srvc.CallMethod(service_descriptor.methods[1], rpc_controller,
unittest_pb2.BarRequest(), MyCallback)
self.assertEqual(None, rpc_controller.failure_message)
self.assertEqual(True, srvc.bar_called)
def testServiceStub(self):
class MockRpcChannel(service.RpcChannel):
def CallMethod(self, method, controller, request,
response_class, callback):
self.method = method
self.controller = controller
self.request = request
callback(response_class())
self.callback_response = None
def MyCallback(response):
self.callback_response = response
channel = MockRpcChannel()
stub = unittest_pb2.TestService_Stub(channel)
rpc_controller = 'controller'
request = 'request'
# GetDescriptor now static, still works as instance method for compatability
self.assertEqual(unittest_pb2.TestService_Stub.GetDescriptor(),
stub.GetDescriptor())
# Invoke method.
stub.Foo(rpc_controller, request, MyCallback)
self.assertTrue(isinstance(self.callback_response,
unittest_pb2.FooResponse))
self.assertEqual(request, channel.request)
self.assertEqual(rpc_controller, channel.controller)
self.assertEqual(stub.GetDescriptor().methods[0], channel.method)
if __name__ == '__main__':
unittest.main()
|
mtscampagnolo/sistema_login
|
refs/heads/master
|
app/__init__.py
|
1
|
from bottle import Bottle
from bottle.ext import sqlalchemy
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
import bottle_session
Base = declarative_base()
engine = create_engine('sqlite:///database.db', echo=True)
app = Bottle()
plugin = sqlalchemy.Plugin(
engine,
Base.metadata,
keyword='db',
create=True,
commit=True,
use_kwargs=False
)
plugin_session = bottle_session.SessionPlugin(cookie_lifetime=300)
app.install(plugin)
app.install(plugin_session)
from app.controllers import default
from app.models import default
|
xbmc/xbmc-antiquated
|
refs/heads/master
|
xbmc/lib/libPython/Python/Lib/test/test_inspect.py
|
13
|
source = '''# line 1
'A module docstring.'
import sys, inspect
# line 5
# line 7
def spam(a, b, c, d=3, (e, (f,))=(4, (5,)), *g, **h):
eggs(b + d, c + f)
# line 11
def eggs(x, y):
"A docstring."
global fr, st
fr = inspect.currentframe()
st = inspect.stack()
p = x
q = y / 0
# line 20
class StupidGit:
"""A longer,
indented
docstring."""
# line 27
def abuse(self, a, b, c): # a comment
"""Another
\tdocstring
containing
\ttabs
\t
"""
self.argue(a, b, c)
# line 40
def argue(self, a, b, c):
try:
spam(a, b, c)
except:
self.ex = sys.exc_info()
self.tr = inspect.trace()
# line 48
class MalodorousPervert(StupidGit):
pass
class ParrotDroppings:
pass
class FesteringGob(MalodorousPervert, ParrotDroppings):
pass
'''
# Functions tested in this suite:
# ismodule, isclass, ismethod, isfunction, istraceback, isframe, iscode,
# isbuiltin, isroutine, getmembers, getdoc, getfile, getmodule,
# getsourcefile, getcomments, getsource, getclasstree, getargspec,
# getargvalues, formatargspec, formatargvalues, currentframe, stack, trace
# isdatadescriptor
from test.test_support import TestFailed, TESTFN
import sys, imp, os, string
def test(assertion, message, *args):
if not assertion:
raise TestFailed, message % args
import inspect
file = open(TESTFN, 'w')
file.write(source)
file.close()
# Note that load_source creates file TESTFN+'c' or TESTFN+'o'.
mod = imp.load_source('testmod', TESTFN)
files_to_clean_up = [TESTFN, TESTFN + 'c', TESTFN + 'o']
def istest(func, exp):
obj = eval(exp)
test(func(obj), '%s(%s)' % (func.__name__, exp))
for other in [inspect.isbuiltin, inspect.isclass, inspect.iscode,
inspect.isframe, inspect.isfunction, inspect.ismethod,
inspect.ismodule, inspect.istraceback]:
if other is not func:
test(not other(obj), 'not %s(%s)' % (other.__name__, exp))
git = mod.StupidGit()
try:
1/0
except:
tb = sys.exc_traceback
istest(inspect.isbuiltin, 'sys.exit')
istest(inspect.isbuiltin, '[].append')
istest(inspect.isclass, 'mod.StupidGit')
istest(inspect.iscode, 'mod.spam.func_code')
istest(inspect.isframe, 'tb.tb_frame')
istest(inspect.isfunction, 'mod.spam')
istest(inspect.ismethod, 'mod.StupidGit.abuse')
istest(inspect.ismethod, 'git.argue')
istest(inspect.ismodule, 'mod')
istest(inspect.istraceback, 'tb')
import __builtin__
istest(inspect.isdatadescriptor, '__builtin__.file.closed')
istest(inspect.isdatadescriptor, '__builtin__.file.softspace')
test(inspect.isroutine(mod.spam), 'isroutine(mod.spam)')
test(inspect.isroutine([].count), 'isroutine([].count)')
classes = inspect.getmembers(mod, inspect.isclass)
test(classes ==
[('FesteringGob', mod.FesteringGob),
('MalodorousPervert', mod.MalodorousPervert),
('ParrotDroppings', mod.ParrotDroppings),
('StupidGit', mod.StupidGit)], 'class list')
tree = inspect.getclasstree(map(lambda x: x[1], classes), 1)
test(tree ==
[(mod.ParrotDroppings, ()),
(mod.StupidGit, ()),
[(mod.MalodorousPervert, (mod.StupidGit,)),
[(mod.FesteringGob, (mod.MalodorousPervert, mod.ParrotDroppings))
]
]
], 'class tree')
functions = inspect.getmembers(mod, inspect.isfunction)
test(functions == [('eggs', mod.eggs), ('spam', mod.spam)], 'function list')
test(inspect.getdoc(mod) == 'A module docstring.', 'getdoc(mod)')
test(inspect.getcomments(mod) == '# line 1\n', 'getcomments(mod)')
test(inspect.getmodule(mod.StupidGit) == mod, 'getmodule(mod.StupidGit)')
test(inspect.getfile(mod.StupidGit) == TESTFN, 'getfile(mod.StupidGit)')
test(inspect.getsourcefile(mod.spam) == TESTFN, 'getsourcefile(mod.spam)')
test(inspect.getsourcefile(git.abuse) == TESTFN, 'getsourcefile(git.abuse)')
def sourcerange(top, bottom):
lines = string.split(source, '\n')
return string.join(lines[top-1:bottom], '\n') + '\n'
test(inspect.getsource(git.abuse) == sourcerange(29, 39),
'getsource(git.abuse)')
test(inspect.getsource(mod.StupidGit) == sourcerange(21, 46),
'getsource(mod.StupidGit)')
test(inspect.getdoc(mod.StupidGit) ==
'A longer,\n\nindented\n\ndocstring.', 'getdoc(mod.StupidGit)')
test(inspect.getdoc(git.abuse) ==
'Another\n\ndocstring\n\ncontaining\n\ntabs', 'getdoc(git.abuse)')
test(inspect.getcomments(mod.StupidGit) == '# line 20\n',
'getcomments(mod.StupidGit)')
git.abuse(7, 8, 9)
istest(inspect.istraceback, 'git.ex[2]')
istest(inspect.isframe, 'mod.fr')
test(len(git.tr) == 3, 'trace() length')
test(git.tr[0][1:] == (TESTFN, 43, 'argue',
[' spam(a, b, c)\n'], 0),
'trace() row 2')
test(git.tr[1][1:] == (TESTFN, 9, 'spam', [' eggs(b + d, c + f)\n'], 0),
'trace() row 2')
test(git.tr[2][1:] == (TESTFN, 18, 'eggs', [' q = y / 0\n'], 0),
'trace() row 3')
test(len(mod.st) >= 5, 'stack() length')
test(mod.st[0][1:] ==
(TESTFN, 16, 'eggs', [' st = inspect.stack()\n'], 0),
'stack() row 1')
test(mod.st[1][1:] ==
(TESTFN, 9, 'spam', [' eggs(b + d, c + f)\n'], 0),
'stack() row 2')
test(mod.st[2][1:] ==
(TESTFN, 43, 'argue', [' spam(a, b, c)\n'], 0),
'stack() row 3')
test(mod.st[3][1:] ==
(TESTFN, 39, 'abuse', [' self.argue(a, b, c)\n'], 0),
'stack() row 4')
args, varargs, varkw, locals = inspect.getargvalues(mod.fr)
test(args == ['x', 'y'], 'mod.fr args')
test(varargs == None, 'mod.fr varargs')
test(varkw == None, 'mod.fr varkw')
test(locals == {'x': 11, 'p': 11, 'y': 14}, 'mod.fr locals')
test(inspect.formatargvalues(args, varargs, varkw, locals) ==
'(x=11, y=14)', 'mod.fr formatted argvalues')
args, varargs, varkw, locals = inspect.getargvalues(mod.fr.f_back)
test(args == ['a', 'b', 'c', 'd', ['e', ['f']]], 'mod.fr.f_back args')
test(varargs == 'g', 'mod.fr.f_back varargs')
test(varkw == 'h', 'mod.fr.f_back varkw')
test(inspect.formatargvalues(args, varargs, varkw, locals) ==
'(a=7, b=8, c=9, d=3, (e=4, (f=5,)), *g=(), **h={})',
'mod.fr.f_back formatted argvalues')
for fname in files_to_clean_up:
try:
os.unlink(fname)
except:
pass
# Test for decorators as well.
source = r"""
def wrap(foo=None):
def wrapper(func):
return func
return wrapper
def replace(func):
def insteadfunc():
print 'hello'
return insteadfunc
# two decorators, one with argument
@wrap()
@wrap(wrap)
def wrapped():
pass
@replace
def gone():
pass"""
file = open(TESTFN + "2", "w")
file.write(source)
file.close()
files_to_clean_up = [TESTFN + "2", TESTFN + '2c', TESTFN + '2o']
mod2 = imp.load_source("testmod3", TESTFN + "2")
test(inspect.getsource(mod2.wrapped) == sourcerange(13, 16),
"inspect.getsource(mod.wrapped)")
test(inspect.getsource(mod2.gone) == sourcerange(8, 9),
"inspect.getsource(mod.gone)")
for fname in files_to_clean_up:
try:
os.unlink(fname)
except:
pass
# Test classic-class method resolution order.
class A: pass
class B(A): pass
class C(A): pass
class D(B, C): pass
expected = (D, B, A, C)
got = inspect.getmro(D)
test(expected == got, "expected %r mro, got %r", expected, got)
# The same w/ new-class MRO.
class A(object): pass
class B(A): pass
class C(A): pass
class D(B, C): pass
expected = (D, B, C, A, object)
got = inspect.getmro(D)
test(expected == got, "expected %r mro, got %r", expected, got)
# Test classify_class_attrs.
def attrs_wo_objs(cls):
return [t[:3] for t in inspect.classify_class_attrs(cls)]
class A:
def s(): pass
s = staticmethod(s)
def c(cls): pass
c = classmethod(c)
def getp(self): pass
p = property(getp)
def m(self): pass
def m1(self): pass
datablob = '1'
attrs = attrs_wo_objs(A)
test(('s', 'static method', A) in attrs, 'missing static method')
test(('c', 'class method', A) in attrs, 'missing class method')
test(('p', 'property', A) in attrs, 'missing property')
test(('m', 'method', A) in attrs, 'missing plain method')
test(('m1', 'method', A) in attrs, 'missing plain method')
test(('datablob', 'data', A) in attrs, 'missing data')
class B(A):
def m(self): pass
attrs = attrs_wo_objs(B)
test(('s', 'static method', A) in attrs, 'missing static method')
test(('c', 'class method', A) in attrs, 'missing class method')
test(('p', 'property', A) in attrs, 'missing property')
test(('m', 'method', B) in attrs, 'missing plain method')
test(('m1', 'method', A) in attrs, 'missing plain method')
test(('datablob', 'data', A) in attrs, 'missing data')
class C(A):
def m(self): pass
def c(self): pass
attrs = attrs_wo_objs(C)
test(('s', 'static method', A) in attrs, 'missing static method')
test(('c', 'method', C) in attrs, 'missing plain method')
test(('p', 'property', A) in attrs, 'missing property')
test(('m', 'method', C) in attrs, 'missing plain method')
test(('m1', 'method', A) in attrs, 'missing plain method')
test(('datablob', 'data', A) in attrs, 'missing data')
class D(B, C):
def m1(self): pass
attrs = attrs_wo_objs(D)
test(('s', 'static method', A) in attrs, 'missing static method')
test(('c', 'class method', A) in attrs, 'missing class method')
test(('p', 'property', A) in attrs, 'missing property')
test(('m', 'method', B) in attrs, 'missing plain method')
test(('m1', 'method', D) in attrs, 'missing plain method')
test(('datablob', 'data', A) in attrs, 'missing data')
# Repeat all that, but w/ new-style classes.
class A(object):
def s(): pass
s = staticmethod(s)
def c(cls): pass
c = classmethod(c)
def getp(self): pass
p = property(getp)
def m(self): pass
def m1(self): pass
datablob = '1'
attrs = attrs_wo_objs(A)
test(('s', 'static method', A) in attrs, 'missing static method')
test(('c', 'class method', A) in attrs, 'missing class method')
test(('p', 'property', A) in attrs, 'missing property')
test(('m', 'method', A) in attrs, 'missing plain method')
test(('m1', 'method', A) in attrs, 'missing plain method')
test(('datablob', 'data', A) in attrs, 'missing data')
class B(A):
def m(self): pass
attrs = attrs_wo_objs(B)
test(('s', 'static method', A) in attrs, 'missing static method')
test(('c', 'class method', A) in attrs, 'missing class method')
test(('p', 'property', A) in attrs, 'missing property')
test(('m', 'method', B) in attrs, 'missing plain method')
test(('m1', 'method', A) in attrs, 'missing plain method')
test(('datablob', 'data', A) in attrs, 'missing data')
class C(A):
def m(self): pass
def c(self): pass
attrs = attrs_wo_objs(C)
test(('s', 'static method', A) in attrs, 'missing static method')
test(('c', 'method', C) in attrs, 'missing plain method')
test(('p', 'property', A) in attrs, 'missing property')
test(('m', 'method', C) in attrs, 'missing plain method')
test(('m1', 'method', A) in attrs, 'missing plain method')
test(('datablob', 'data', A) in attrs, 'missing data')
class D(B, C):
def m1(self): pass
attrs = attrs_wo_objs(D)
test(('s', 'static method', A) in attrs, 'missing static method')
test(('c', 'method', C) in attrs, 'missing plain method')
test(('p', 'property', A) in attrs, 'missing property')
test(('m', 'method', B) in attrs, 'missing plain method')
test(('m1', 'method', D) in attrs, 'missing plain method')
test(('datablob', 'data', A) in attrs, 'missing data')
args, varargs, varkw, defaults = inspect.getargspec(mod.eggs)
test(args == ['x', 'y'], 'mod.eggs args')
test(varargs == None, 'mod.eggs varargs')
test(varkw == None, 'mod.eggs varkw')
test(defaults == None, 'mod.eggs defaults')
test(inspect.formatargspec(args, varargs, varkw, defaults) ==
'(x, y)', 'mod.eggs formatted argspec')
args, varargs, varkw, defaults = inspect.getargspec(mod.spam)
test(args == ['a', 'b', 'c', 'd', ['e', ['f']]], 'mod.spam args')
test(varargs == 'g', 'mod.spam varargs')
test(varkw == 'h', 'mod.spam varkw')
test(defaults == (3, (4, (5,))), 'mod.spam defaults')
test(inspect.formatargspec(args, varargs, varkw, defaults) ==
'(a, b, c, d=3, (e, (f,))=(4, (5,)), *g, **h)',
'mod.spam formatted argspec')
args, varargs, varkw, defaults = inspect.getargspec(A.m)
test(args == ['self'], 'A.m args')
test(varargs is None, 'A.m varargs')
test(varkw is None, 'A.m varkw')
test(defaults is None, 'A.m defaults')
# Doc/lib/libinspect.tex claims there are 11 such functions
count = len(filter(lambda x:x.startswith('is'), dir(inspect)))
test(count == 11, "There are %d (not 11) is* functions", count)
def sublistOfOne((foo)): return 1
args, varargs, varkw, defaults = inspect.getargspec(sublistOfOne)
test(args == [['foo']], 'sublistOfOne args')
test(varargs is None, 'sublistOfOne varargs')
test(varkw is None, 'sublistOfOne varkw')
test(defaults is None, 'sublistOfOn defaults')
|
netease-youdao/hex
|
refs/heads/master
|
tools/make_hexium.py
|
5
|
# Copyright (c) 2012-2013 NetEase Youdao Inc. and other heX contributors. All
# rights reserved. Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file.
from date_util import *
from file_util import *
from gclient_util import *
from optparse import OptionParser
import os
import re
import shlex
import subprocess
from svn_util import *
import sys
import zipfile
def create_archive(input_dir, zip_file):
""" Creates a zip archive of the specified input directory. """
zf = zipfile.ZipFile(zip_file, 'w', zipfile.ZIP_DEFLATED)
def addDir(dir):
for f in os.listdir(dir):
full_path = os.path.join(dir, f)
if os.path.isdir(full_path):
addDir(full_path)
else:
zf.write(full_path, os.path.relpath(full_path, \
os.path.join(input_dir, os.pardir)))
addDir(input_dir)
zf.close()
def create_7z_archive(input_dir, zip_file):
""" Creates a 7z archive of the specified input directory. """
command = os.environ['CEF_COMMAND_7ZIP']
run('"' + command + '" a -y ' + zip_file + ' ' + input_dir, os.path.split(zip_file)[0])
def create_output_dir(name, parent_dir):
""" Creates an output directory and adds the path to the archive list. """
output_dir = os.path.abspath(os.path.join(parent_dir, name))
remove_dir(output_dir, options.quiet)
make_dir(output_dir, options.quiet)
archive_dirs.append(output_dir)
return output_dir
def eval_file(src):
""" Loads and evaluates the contents of the specified file. """
return eval(read_file(src), {'__builtins__': None}, None)
def run(command_line, working_dir):
""" Run a command. """
sys.stdout.write('-------- Running "'+command_line+'" in "'+\
working_dir+'"...'+"\n")
args = shlex.split(command_line.replace('\\', '\\\\'))
return subprocess.check_call(args, cwd=working_dir, env=os.environ,
shell=(sys.platform == 'win32'))
# cannot be loaded as a module
if __name__ != "__main__":
sys.stderr.write('This file cannot be loaded as a module!')
sys.exit()
# parse command-line options
disc = """
This utility builds the HeXium Binary Distribution.
"""
parser = OptionParser(description=disc)
parser.add_option('--output-dir', dest='outputdir', metavar='DIR',
help='output directory [required]')
parser.add_option('--no-archive',
action='store_true', dest='noarchive', default=False,
help='don\'t create archives for output directories')
parser.add_option('-q', '--quiet',
action='store_true', dest='quiet', default=False,
help='do not output detailed status information')
(options, args) = parser.parse_args()
# Test the operating system.
platform = '';
if sys.platform == 'win32':
platform = 'windows'
elif sys.platform == 'darwin':
platform = 'macosx'
elif sys.platform.startswith('linux'):
platform = 'linux'
# the outputdir option is required
if options.outputdir is None:
parser.print_help(sys.stderr)
sys.exit()
# script directory
script_dir = os.path.dirname(__file__)
# heX root directory
hex_dir = os.path.abspath(os.curdir)
# src directory
src_dir = os.path.abspath(os.path.join(hex_dir, os.pardir))
# build directory
if platform == 'windows':
build_dir = os.path.abspath(os.path.join(src_dir, 'build', 'Release'))
elif platform == 'macosx':
build_dir = os.path.abspath(os.path.join(src_dir, 'xcodebuild', 'Release'))
# retrieve url, revision and date information
chromium_info = get_svn_info(os.path.join(hex_dir, os.pardir))
chromium_url = chromium_info['url']
chromium_rev = chromium_info['revision']
date = get_date()
# Read and parse the version file (key=value pairs, one per line)
args = {}
read_version_file(os.path.join(hex_dir, '../chrome/VERSION'), args)
read_version_file(os.path.join(hex_dir, 'VERSION'), args)
hex_url = 'http://hex.youdao.com'
hex_ver = args['HEX_MAJOR']+'.'+args['HEX_MINOR']+'.'+args['HEX_BUILD']+'_'+args['BUILD']
chromium_ver = args['MAJOR']+'.'+args['MINOR']+'.'+args['BUILD']+'.'+args['PATCH']
# list of output directories to be archived
archive_dirs = []
platform_arch = '32'
# output directory
output_dir_base = 'hexium_' + hex_ver
output_dir_name = output_dir_base + '_' + platform + platform_arch
output_dir = create_output_dir(output_dir_name, options.outputdir)
# transfer the LICENSE.txt file
copy_file(os.path.join(hex_dir, 'LICENSE'), output_dir, options.quiet)
copy_file(os.path.join(hex_dir, 'AUTHORS'), output_dir, options.quiet)
copy_file(os.path.join(hex_dir, 'README.md'), output_dir, options.quiet)
if platform == 'windows':
copy_file(os.path.join(build_dir, 'chrome.dll'), output_dir, options.quiet)
copy_file(os.path.join(build_dir, 'chrome.pak'), output_dir, options.quiet)
copy_file(os.path.join(build_dir, 'chrome_100_percent.pak'), output_dir, options.quiet)
copy_file(os.path.join(build_dir, 'chrome_child.dll'), output_dir, options.quiet)
copy_files(os.path.join(build_dir, 'd3dcompiler_*.dll'), output_dir, options.quiet)
copy_file(os.path.join(build_dir, 'ffmpegsumo.dll'), output_dir, options.quiet)
copy_file(os.path.join(build_dir, 'hex.dll'), output_dir, options.quiet)
copy_file(os.path.join(build_dir, 'hexium.exe'), output_dir, options.quiet)
copy_file(os.path.join(build_dir, 'icudt.dll'), output_dir, options.quiet)
copy_file(os.path.join(build_dir, 'libEGL.dll'), output_dir, options.quiet)
copy_file(os.path.join(build_dir, 'libGLESv2.dll'), output_dir, options.quiet)
copy_file(os.path.join(build_dir, 'metro_driver.dll'), output_dir, options.quiet)
copy_file(os.path.join(build_dir, 'node.dll'), output_dir, options.quiet)
copy_file(os.path.join(build_dir, 'resources.pak'), output_dir, options.quiet)
copy_file(os.path.join(build_dir, 'uv.dll'), output_dir, options.quiet)
copy_file(os.path.join(build_dir, 'v8.dll'), output_dir, options.quiet)
copy_dir(os.path.join(build_dir, 'locales'), os.path.join(output_dir, 'locales'), options.quiet)
elif platform == 'macosx':
copy_dir(os.path.join(build_dir, 'Chromium.app'), os.path.join(output_dir, 'Chromium.app'), options.quiet)
if not options.noarchive:
# create an archive for each output directory
archive_extenstion = '.zip'
if os.getenv('CEF_COMMAND_7ZIP', '') != '':
archive_extenstion = '.7z'
for dir in archive_dirs:
zip_file = os.path.split(dir)[1] + archive_extenstion
if not options.quiet:
sys.stdout.write('Creating '+zip_file+"...\n")
if archive_extenstion == '.zip':
create_archive(dir, os.path.join(dir, os.pardir, zip_file))
else:
create_7z_archive(dir, os.path.join(dir, os.pardir, zip_file))
|
veyesys/opencvr
|
refs/heads/master
|
3rdparty/protobuf/gmock/gtest/test/gtest_env_var_test.py
|
2408
|
#!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that Google Test correctly parses environment variables."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import gtest_test_utils
IS_WINDOWS = os.name == 'nt'
IS_LINUX = os.name == 'posix' and os.uname()[0] == 'Linux'
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_env_var_test_')
environ = os.environ.copy()
def AssertEq(expected, actual):
if expected != actual:
print 'Expected: %s' % (expected,)
print ' Actual: %s' % (actual,)
raise AssertionError
def SetEnvVar(env_var, value):
"""Sets the env variable to 'value'; unsets it when 'value' is None."""
if value is not None:
environ[env_var] = value
elif env_var in environ:
del environ[env_var]
def GetFlag(flag):
"""Runs gtest_env_var_test_ and returns its output."""
args = [COMMAND]
if flag is not None:
args += [flag]
return gtest_test_utils.Subprocess(args, env=environ).output
def TestFlag(flag, test_val, default_val):
"""Verifies that the given flag is affected by the corresponding env var."""
env_var = 'GTEST_' + flag.upper()
SetEnvVar(env_var, test_val)
AssertEq(test_val, GetFlag(flag))
SetEnvVar(env_var, None)
AssertEq(default_val, GetFlag(flag))
class GTestEnvVarTest(gtest_test_utils.TestCase):
def testEnvVarAffectsFlag(self):
"""Tests that environment variable should affect the corresponding flag."""
TestFlag('break_on_failure', '1', '0')
TestFlag('color', 'yes', 'auto')
TestFlag('filter', 'FooTest.Bar', '*')
TestFlag('output', 'xml:tmp/foo.xml', '')
TestFlag('print_time', '0', '1')
TestFlag('repeat', '999', '1')
TestFlag('throw_on_failure', '1', '0')
TestFlag('death_test_style', 'threadsafe', 'fast')
TestFlag('catch_exceptions', '0', '1')
if IS_LINUX:
TestFlag('death_test_use_fork', '1', '0')
TestFlag('stack_trace_depth', '0', '100')
if __name__ == '__main__':
gtest_test_utils.Main()
|
realsobek/freeipa
|
refs/heads/master
|
ipalib/base.py
|
7
|
# Authors:
# Jason Gerard DeRose <jderose@redhat.com>
#
# Copyright (C) 2008 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Foundational classes and functions.
"""
import re
import six
from ipalib.constants import NAME_REGEX, NAME_ERROR
from ipalib.constants import TYPE_ERROR, SET_ERROR, DEL_ERROR, OVERRIDE_ERROR
class ReadOnly(object):
"""
Base class for classes that can be locked into a read-only state.
Be forewarned that Python does not offer true read-only attributes for
user-defined classes. Do *not* rely upon the read-only-ness of this
class for security purposes!
The point of this class is not to make it impossible to set or to delete
attributes after an instance is locked, but to make it impossible to do so
*accidentally*. Rather than constantly reminding our programmers of things
like, for example, "Don't set any attributes on this ``FooBar`` instance
because doing so wont be thread-safe", this class offers a real way to
enforce read-only attribute usage.
For example, before a `ReadOnly` instance is locked, you can set and delete
its attributes as normal:
>>> class Person(ReadOnly):
... pass
...
>>> p = Person()
>>> p.name = 'John Doe'
>>> p.phone = '123-456-7890'
>>> del p.phone
But after an instance is locked, you cannot set its attributes:
>>> p.__islocked__() # Is this instance locked?
False
>>> p.__lock__() # This will lock the instance
>>> p.__islocked__()
True
>>> p.department = 'Engineering'
Traceback (most recent call last):
...
AttributeError: locked: cannot set Person.department to 'Engineering'
Nor can you deleted its attributes:
>>> del p.name
Traceback (most recent call last):
...
AttributeError: locked: cannot delete Person.name
However, as noted at the start, there are still obscure ways in which
attributes can be set or deleted on a locked `ReadOnly` instance. For
example:
>>> object.__setattr__(p, 'department', 'Engineering')
>>> p.department
'Engineering'
>>> object.__delattr__(p, 'name')
>>> hasattr(p, 'name')
False
But again, the point is that a programmer would never employ the above
techniques *accidentally*.
Lastly, this example aside, you should use the `lock()` function rather
than the `ReadOnly.__lock__()` method. And likewise, you should
use the `islocked()` function rather than the `ReadOnly.__islocked__()`
method. For example:
>>> readonly = ReadOnly()
>>> islocked(readonly)
False
>>> lock(readonly) is readonly # lock() returns the instance
True
>>> islocked(readonly)
True
"""
__locked = False
def __lock__(self):
"""
Put this instance into a read-only state.
After the instance has been locked, attempting to set or delete an
attribute will raise an AttributeError.
"""
assert self.__locked is False, '__lock__() can only be called once'
self.__locked = True
def __islocked__(self):
"""
Return True if instance is locked, otherwise False.
"""
return self.__locked
def __setattr__(self, name, value):
"""
If unlocked, set attribute named ``name`` to ``value``.
If this instance is locked, an AttributeError will be raised.
:param name: Name of attribute to set.
:param value: Value to assign to attribute.
"""
if self.__locked:
raise AttributeError(
SET_ERROR % (self.__class__.__name__, name, value)
)
return object.__setattr__(self, name, value)
def __delattr__(self, name):
"""
If unlocked, delete attribute named ``name``.
If this instance is locked, an AttributeError will be raised.
:param name: Name of attribute to delete.
"""
if self.__locked:
raise AttributeError(
DEL_ERROR % (self.__class__.__name__, name)
)
return object.__delattr__(self, name)
def lock(instance):
"""
Lock an instance of the `ReadOnly` class or similar.
This function can be used to lock instances of any class that implements
the same locking API as the `ReadOnly` class. For example, this function
can lock instances of the `config.Env` class.
So that this function can be easily used within an assignment, ``instance``
is returned after it is locked. For example:
>>> readonly = ReadOnly()
>>> readonly is lock(readonly)
True
>>> readonly.attr = 'This wont work'
Traceback (most recent call last):
...
AttributeError: locked: cannot set ReadOnly.attr to 'This wont work'
Also see the `islocked()` function.
:param instance: The instance of `ReadOnly` (or similar) to lock.
"""
assert instance.__islocked__() is False, 'already locked: %r' % instance
instance.__lock__()
assert instance.__islocked__() is True, 'failed to lock: %r' % instance
return instance
def islocked(instance):
"""
Return ``True`` if ``instance`` is locked.
This function can be used on an instance of the `ReadOnly` class or an
instance of any other class implemented the same locking API.
For example:
>>> readonly = ReadOnly()
>>> islocked(readonly)
False
>>> readonly.__lock__()
>>> islocked(readonly)
True
Also see the `lock()` function.
:param instance: The instance of `ReadOnly` (or similar) to interrogate.
"""
assert (
hasattr(instance, '__lock__') and callable(instance.__lock__)
), 'no __lock__() method: %r' % instance
return instance.__islocked__()
def check_name(name):
"""
Verify that ``name`` is suitable for a `NameSpace` member name.
In short, ``name`` must be a valid lower-case Python identifier that
neither starts nor ends with an underscore. Otherwise an exception is
raised.
This function will raise a ``ValueError`` if ``name`` does not match the
`constants.NAME_REGEX` regular expression. For example:
>>> check_name('MyName')
Traceback (most recent call last):
...
ValueError: name must match '^[a-z][_a-z0-9]*[a-z0-9]$|^[a-z]$'; got 'MyName'
Also, this function will raise a ``TypeError`` if ``name`` is not an
``str`` instance. For example:
>>> check_name(u'my_name')
Traceback (most recent call last):
...
TypeError: name: need a <type 'str'>; got u'my_name' (a <type 'unicode'>)
So that `check_name()` can be easily used within an assignment, ``name``
is returned unchanged if it passes the check. For example:
>>> n = check_name('my_name')
>>> n
'my_name'
:param name: Identifier to test.
"""
if type(name) is not str:
raise TypeError(
TYPE_ERROR % ('name', str, name, type(name))
)
if re.match(NAME_REGEX, name) is None:
raise ValueError(
NAME_ERROR % (NAME_REGEX, name)
)
return name
class NameSpace(ReadOnly):
"""
A read-only name-space with handy container behaviours.
A `NameSpace` instance is an ordered, immutable mapping object whose values
can also be accessed as attributes. A `NameSpace` instance is constructed
from an iterable providing its *members*, which are simply arbitrary objects
with a ``name`` attribute whose value:
1. Is unique among the members
2. Passes the `check_name()` function
Beyond that, no restrictions are placed on the members: they can be
classes or instances, and of any type.
The members can be accessed as attributes on the `NameSpace` instance or
through a dictionary interface. For example, say we create a `NameSpace`
instance from a list containing a single member, like this:
>>> class my_member(object):
... name = 'my_name'
...
>>> namespace = NameSpace([my_member])
>>> namespace
NameSpace(<1 member>, sort=True)
We can then access ``my_member`` both as an attribute and as a dictionary
item:
>>> my_member is namespace.my_name # As an attribute
True
>>> my_member is namespace['my_name'] # As dictionary item
True
For a more detailed example, say we create a `NameSpace` instance from a
generator like this:
>>> class Member(object):
... def __init__(self, i):
... self.i = i
... self.name = self.__name__ = 'member%d' % i
... def __repr__(self):
... return 'Member(%d)' % self.i
...
>>> ns = NameSpace(Member(i) for i in range(3))
>>> ns
NameSpace(<3 members>, sort=True)
As above, the members can be accessed as attributes and as dictionary items:
>>> ns.member0 is ns['member0']
True
>>> ns.member1 is ns['member1']
True
>>> ns.member2 is ns['member2']
True
Members can also be accessed by index and by slice. For example:
>>> ns[0]
Member(0)
>>> ns[-1]
Member(2)
>>> ns[1:]
(Member(1), Member(2))
(Note that slicing a `NameSpace` returns a ``tuple``.)
`NameSpace` instances provide standard container emulation for membership
testing, counting, and iteration. For example:
>>> 'member3' in ns # Is there a member named 'member3'?
False
>>> 'member2' in ns # But there is a member named 'member2'
True
>>> len(ns) # The number of members
3
>>> list(ns) # Iterate through the member names
['member0', 'member1', 'member2']
Although not a standard container feature, the `NameSpace.__call__()` method
provides a convenient (and efficient) way to iterate through the *members*
(as opposed to the member names). Think of it like an ordered version of
the ``dict.itervalues()`` method. For example:
>>> list(ns[name] for name in ns) # One way to do it
[Member(0), Member(1), Member(2)]
>>> list(ns()) # A more efficient, simpler way to do it
[Member(0), Member(1), Member(2)]
Another convenience method is `NameSpace.__todict__()`, which will return
a copy of the ``dict`` mapping the member names to the members.
For example:
>>> ns.__todict__()
{'member1': Member(1), 'member0': Member(0), 'member2': Member(2)}
As `NameSpace.__init__()` locks the instance, `NameSpace` instances are
read-only from the get-go. An ``AttributeError`` is raised if you try to
set *any* attribute on a `NameSpace` instance. For example:
>>> ns.member3 = Member(3) # Lets add that missing 'member3'
Traceback (most recent call last):
...
AttributeError: locked: cannot set NameSpace.member3 to Member(3)
(For information on the locking protocol, see the `ReadOnly` class, of which
`NameSpace` is a subclass.)
By default the members will be sorted alphabetically by the member name.
For example:
>>> sorted_ns = NameSpace([Member(7), Member(3), Member(5)])
>>> sorted_ns
NameSpace(<3 members>, sort=True)
>>> list(sorted_ns)
['member3', 'member5', 'member7']
>>> sorted_ns[0]
Member(3)
But if the instance is created with the ``sort=False`` keyword argument, the
original order of the members is preserved. For example:
>>> unsorted_ns = NameSpace([Member(7), Member(3), Member(5)], sort=False)
>>> unsorted_ns
NameSpace(<3 members>, sort=False)
>>> list(unsorted_ns)
['member7', 'member3', 'member5']
>>> unsorted_ns[0]
Member(7)
As a special extension, NameSpace objects can be indexed by objects that
have a "__name__" attribute (e.g. classes). These lookups are converted
to lookups on the name:
>>> class_ns = NameSpace([Member(7), Member(3), Member(5)], sort=False)
>>> unsorted_ns[Member(3)]
Member(3)
The `NameSpace` class is used in many places throughout freeIPA. For a few
examples, see the `plugable.API` and the `frontend.Command` classes.
"""
def __init__(self, members, sort=True, name_attr='name'):
"""
:param members: An iterable providing the members.
:param sort: Whether to sort the members by member name.
"""
if type(sort) is not bool:
raise TypeError(
TYPE_ERROR % ('sort', bool, sort, type(sort))
)
self.__sort = sort
if sort:
self.__members = tuple(
sorted(members, key=lambda m: getattr(m, name_attr))
)
else:
self.__members = tuple(members)
self.__names = tuple(getattr(m, name_attr) for m in self.__members)
self.__map = dict()
for member in self.__members:
name = check_name(getattr(member, name_attr))
if name in self.__map:
raise AttributeError(OVERRIDE_ERROR %
(self.__class__.__name__, name, self.__map[name], member)
)
assert not hasattr(self, name), 'Ouch! Has attribute %r' % name
self.__map[name] = member
setattr(self, name, member)
lock(self)
def __len__(self):
"""
Return the number of members.
"""
return len(self.__members)
def __iter__(self):
"""
Iterate through the member names.
If this instance was created with ``sort=False``, the names will be in
the same order as the members were passed to the constructor; otherwise
the names will be in alphabetical order (which is the default).
This method is like an ordered version of ``dict.iterkeys()``.
"""
for name in self.__names:
yield name
def __call__(self):
"""
Iterate through the members.
If this instance was created with ``sort=False``, the members will be
in the same order as they were passed to the constructor; otherwise the
members will be in alphabetical order by name (which is the default).
This method is like an ordered version of ``dict.itervalues()``.
"""
for member in self.__members:
yield member
def __contains__(self, name):
"""
Return ``True`` if namespace has a member named ``name``.
"""
name = getattr(name, '__name__', name)
return name in self.__map
def __getitem__(self, key):
"""
Return a member by name or index, or return a slice of members.
:param key: The name or index of a member, or a slice object.
"""
key = getattr(key, '__name__', key)
if isinstance(key, six.string_types):
return self.__map[key]
if type(key) in (int, slice):
return self.__members[key]
raise TypeError(
TYPE_ERROR % ('key', (str, int, slice, 'object with __name__'),
key, type(key))
)
def __repr__(self):
"""
Return a pseudo-valid expression that could create this instance.
"""
cnt = len(self)
if cnt == 1:
m = 'member'
else:
m = 'members'
return '%s(<%d %s>, sort=%r)' % (
self.__class__.__name__,
cnt,
m,
self.__sort,
)
def __todict__(self):
"""
Return a copy of the private dict mapping member name to member.
"""
return dict(self.__map)
|
wndhydrnt/airflow
|
refs/heads/master
|
airflow/utils/db.py
|
2
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from functools import wraps
import os
import contextlib
from airflow import settings
from airflow.utils.log.logging_mixin import LoggingMixin
log = LoggingMixin().log
@contextlib.contextmanager
def create_session():
"""
Contextmanager that will create and teardown a session.
"""
session = settings.Session()
try:
yield session
session.expunge_all()
session.commit()
except:
session.rollback()
raise
finally:
session.close()
def provide_session(func):
"""
Function decorator that provides a session if it isn't provided.
If you want to reuse a session or run the function as part of a
database transaction, you pass it to the function, if not this wrapper
will create one and close it for you.
"""
@wraps(func)
def wrapper(*args, **kwargs):
arg_session = 'session'
func_params = func.__code__.co_varnames
session_in_args = arg_session in func_params and \
func_params.index(arg_session) < len(args)
session_in_kwargs = arg_session in kwargs
if session_in_kwargs or session_in_args:
return func(*args, **kwargs)
else:
with create_session() as session:
kwargs[arg_session] = session
return func(*args, **kwargs)
return wrapper
@provide_session
def merge_conn(conn, session=None):
from airflow import models
C = models.Connection
if not session.query(C).filter(C.conn_id == conn.conn_id).first():
session.add(conn)
session.commit()
def initdb(rbac):
session = settings.Session()
from airflow import models
upgradedb()
merge_conn(
models.Connection(
conn_id='airflow_db', conn_type='mysql',
host='localhost', login='root', password='',
schema='airflow'))
merge_conn(
models.Connection(
conn_id='airflow_ci', conn_type='mysql',
host='localhost', login='root', extra="{\"local_infile\": true}",
schema='airflow_ci'))
merge_conn(
models.Connection(
conn_id='beeline_default', conn_type='beeline', port="10000",
host='localhost', extra="{\"use_beeline\": true, \"auth\": \"\"}",
schema='default'))
merge_conn(
models.Connection(
conn_id='bigquery_default', conn_type='google_cloud_platform',
schema='default'))
merge_conn(
models.Connection(
conn_id='local_mysql', conn_type='mysql',
host='localhost', login='airflow', password='airflow',
schema='airflow'))
merge_conn(
models.Connection(
conn_id='presto_default', conn_type='presto',
host='localhost',
schema='hive', port=3400))
merge_conn(
models.Connection(
conn_id='google_cloud_default', conn_type='google_cloud_platform',
schema='default',))
merge_conn(
models.Connection(
conn_id='hive_cli_default', conn_type='hive_cli',
schema='default',))
merge_conn(
models.Connection(
conn_id='hiveserver2_default', conn_type='hiveserver2',
host='localhost',
schema='default', port=10000))
merge_conn(
models.Connection(
conn_id='metastore_default', conn_type='hive_metastore',
host='localhost', extra="{\"authMechanism\": \"PLAIN\"}",
port=9083))
merge_conn(
models.Connection(
conn_id='mysql_default', conn_type='mysql',
login='root',
host='localhost'))
merge_conn(
models.Connection(
conn_id='postgres_default', conn_type='postgres',
login='postgres',
schema='airflow',
host='localhost'))
merge_conn(
models.Connection(
conn_id='sqlite_default', conn_type='sqlite',
host='/tmp/sqlite_default.db'))
merge_conn(
models.Connection(
conn_id='http_default', conn_type='http',
host='https://www.google.com/'))
merge_conn(
models.Connection(
conn_id='mssql_default', conn_type='mssql',
host='localhost', port=1433))
merge_conn(
models.Connection(
conn_id='vertica_default', conn_type='vertica',
host='localhost', port=5433))
merge_conn(
models.Connection(
conn_id='wasb_default', conn_type='wasb',
extra='{"sas_token": null}'))
merge_conn(
models.Connection(
conn_id='webhdfs_default', conn_type='hdfs',
host='localhost', port=50070))
merge_conn(
models.Connection(
conn_id='ssh_default', conn_type='ssh',
host='localhost'))
merge_conn(
models.Connection(
conn_id='sftp_default', conn_type='sftp',
host='localhost', port=22, login='travis',
extra='''
{"private_key": "~/.ssh/id_rsa", "ignore_hostkey_verification": true}
'''))
merge_conn(
models.Connection(
conn_id='fs_default', conn_type='fs',
extra='{"path": "/"}'))
merge_conn(
models.Connection(
conn_id='aws_default', conn_type='aws',
extra='{"region_name": "us-east-1"}'))
merge_conn(
models.Connection(
conn_id='spark_default', conn_type='spark',
host='yarn', extra='{"queue": "root.default"}'))
merge_conn(
models.Connection(
conn_id='druid_broker_default', conn_type='druid',
host='druid-broker', port=8082, extra='{"endpoint": "druid/v2/sql"}'))
merge_conn(
models.Connection(
conn_id='druid_ingest_default', conn_type='druid',
host='druid-overlord', port=8081, extra='{"endpoint": "druid/indexer/v1/task"}'))
merge_conn(
models.Connection(
conn_id='redis_default', conn_type='redis',
host='localhost', port=6379,
extra='{"db": 0}'))
merge_conn(
models.Connection(
conn_id='sqoop_default', conn_type='sqoop',
host='rmdbs', extra=''))
merge_conn(
models.Connection(
conn_id='emr_default', conn_type='emr',
extra='''
{ "Name": "default_job_flow_name",
"LogUri": "s3://my-emr-log-bucket/default_job_flow_location",
"ReleaseLabel": "emr-4.6.0",
"Instances": {
"InstanceGroups": [
{
"Name": "Master nodes",
"Market": "ON_DEMAND",
"InstanceRole": "MASTER",
"InstanceType": "r3.2xlarge",
"InstanceCount": 1
},
{
"Name": "Slave nodes",
"Market": "ON_DEMAND",
"InstanceRole": "CORE",
"InstanceType": "r3.2xlarge",
"InstanceCount": 1
}
]
},
"Ec2KeyName": "mykey",
"KeepJobFlowAliveWhenNoSteps": false,
"TerminationProtected": false,
"Ec2SubnetId": "somesubnet",
"Applications":[
{ "Name": "Spark" }
],
"VisibleToAllUsers": true,
"JobFlowRole": "EMR_EC2_DefaultRole",
"ServiceRole": "EMR_DefaultRole",
"Tags": [
{
"Key": "app",
"Value": "analytics"
},
{
"Key": "environment",
"Value": "development"
}
]
}
'''))
merge_conn(
models.Connection(
conn_id='databricks_default', conn_type='databricks',
host='localhost'))
merge_conn(
models.Connection(
conn_id='qubole_default', conn_type='qubole',
host= 'localhost'))
# Known event types
KET = models.KnownEventType
if not session.query(KET).filter(KET.know_event_type == 'Holiday').first():
session.add(KET(know_event_type='Holiday'))
if not session.query(KET).filter(KET.know_event_type == 'Outage').first():
session.add(KET(know_event_type='Outage'))
if not session.query(KET).filter(
KET.know_event_type == 'Natural Disaster').first():
session.add(KET(know_event_type='Natural Disaster'))
if not session.query(KET).filter(
KET.know_event_type == 'Marketing Campaign').first():
session.add(KET(know_event_type='Marketing Campaign'))
session.commit()
dagbag = models.DagBag()
# Save individual DAGs in the ORM
for dag in dagbag.dags.values():
dag.sync_to_db()
# Deactivate the unknown ones
models.DAG.deactivate_unknown_dags(dagbag.dags.keys())
Chart = models.Chart
chart_label = "Airflow task instance by type"
chart = session.query(Chart).filter(Chart.label == chart_label).first()
if not chart:
chart = Chart(
label=chart_label,
conn_id='airflow_db',
chart_type='bar',
x_is_date=False,
sql=(
"SELECT state, COUNT(1) as number "
"FROM task_instance "
"WHERE dag_id LIKE 'example%' "
"GROUP BY state"),
)
session.add(chart)
session.commit()
if rbac:
from flask_appbuilder.security.sqla import models
from flask_appbuilder.models.sqla import Base
Base.metadata.create_all(settings.engine)
def upgradedb():
# alembic adds significant import time, so we import it lazily
from alembic import command
from alembic.config import Config
log.info("Creating tables")
current_dir = os.path.dirname(os.path.abspath(__file__))
package_dir = os.path.normpath(os.path.join(current_dir, '..'))
directory = os.path.join(package_dir, 'migrations')
config = Config(os.path.join(package_dir, 'alembic.ini'))
config.set_main_option('script_location', directory)
config.set_main_option('sqlalchemy.url', settings.SQL_ALCHEMY_CONN)
command.upgrade(config, 'heads')
def resetdb(rbac):
'''
Clear out the database
'''
from airflow import models
# alembic adds significant import time, so we import it lazily
from alembic.migration import MigrationContext
log.info("Dropping tables that exist")
models.Base.metadata.drop_all(settings.engine)
mc = MigrationContext.configure(settings.engine)
if mc._version.exists(settings.engine):
mc._version.drop(settings.engine)
if rbac:
# drop rbac security tables
from flask_appbuilder.security.sqla import models
from flask_appbuilder.models.sqla import Base
Base.metadata.drop_all(settings.engine)
initdb(rbac)
|
3upperm2n/DIGITS
|
refs/heads/master
|
digits/config/current_config.py
|
15
|
# Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
from jobs_dir import JobsDirOption
from gpu_list import GpuListOption
from log_file import LogFileOption
from log_level import LogLevelOption
from server_name import ServerNameOption
from secret_key import SecretKeyOption
from caffe_option import CaffeOption
option_list = None
def reset():
"""
Reset option_list to a list of unset Options
"""
global option_list
option_list = [
JobsDirOption(),
GpuListOption(),
LogFileOption(),
LogLevelOption(),
ServerNameOption(),
SecretKeyOption(),
CaffeOption(),
]
reset()
def config_value(key):
"""
Return the current configuration value for the given option
Arguments:
key -- the key of the configuration option
"""
for option in option_list:
if key == option.config_file_key():
if not option.valid():
raise RuntimeError('No valid value set for "%s"' % key)
return option.config_dict_value()
raise RuntimeError('No option found for "%s"' % key)
|
lokirius/python-for-android
|
refs/heads/master
|
python3-alpha/python3-src/Lib/encodings/koi8_u.py
|
272
|
""" Python Character Mapping Codec koi8_u generated from 'python-mappings/KOI8-U.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='koi8-u',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> NULL
'\x01' # 0x01 -> START OF HEADING
'\x02' # 0x02 -> START OF TEXT
'\x03' # 0x03 -> END OF TEXT
'\x04' # 0x04 -> END OF TRANSMISSION
'\x05' # 0x05 -> ENQUIRY
'\x06' # 0x06 -> ACKNOWLEDGE
'\x07' # 0x07 -> BELL
'\x08' # 0x08 -> BACKSPACE
'\t' # 0x09 -> HORIZONTAL TABULATION
'\n' # 0x0A -> LINE FEED
'\x0b' # 0x0B -> VERTICAL TABULATION
'\x0c' # 0x0C -> FORM FEED
'\r' # 0x0D -> CARRIAGE RETURN
'\x0e' # 0x0E -> SHIFT OUT
'\x0f' # 0x0F -> SHIFT IN
'\x10' # 0x10 -> DATA LINK ESCAPE
'\x11' # 0x11 -> DEVICE CONTROL ONE
'\x12' # 0x12 -> DEVICE CONTROL TWO
'\x13' # 0x13 -> DEVICE CONTROL THREE
'\x14' # 0x14 -> DEVICE CONTROL FOUR
'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x16 -> SYNCHRONOUS IDLE
'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
'\x18' # 0x18 -> CANCEL
'\x19' # 0x19 -> END OF MEDIUM
'\x1a' # 0x1A -> SUBSTITUTE
'\x1b' # 0x1B -> ESCAPE
'\x1c' # 0x1C -> FILE SEPARATOR
'\x1d' # 0x1D -> GROUP SEPARATOR
'\x1e' # 0x1E -> RECORD SEPARATOR
'\x1f' # 0x1F -> UNIT SEPARATOR
' ' # 0x20 -> SPACE
'!' # 0x21 -> EXCLAMATION MARK
'"' # 0x22 -> QUOTATION MARK
'#' # 0x23 -> NUMBER SIGN
'$' # 0x24 -> DOLLAR SIGN
'%' # 0x25 -> PERCENT SIGN
'&' # 0x26 -> AMPERSAND
"'" # 0x27 -> APOSTROPHE
'(' # 0x28 -> LEFT PARENTHESIS
')' # 0x29 -> RIGHT PARENTHESIS
'*' # 0x2A -> ASTERISK
'+' # 0x2B -> PLUS SIGN
',' # 0x2C -> COMMA
'-' # 0x2D -> HYPHEN-MINUS
'.' # 0x2E -> FULL STOP
'/' # 0x2F -> SOLIDUS
'0' # 0x30 -> DIGIT ZERO
'1' # 0x31 -> DIGIT ONE
'2' # 0x32 -> DIGIT TWO
'3' # 0x33 -> DIGIT THREE
'4' # 0x34 -> DIGIT FOUR
'5' # 0x35 -> DIGIT FIVE
'6' # 0x36 -> DIGIT SIX
'7' # 0x37 -> DIGIT SEVEN
'8' # 0x38 -> DIGIT EIGHT
'9' # 0x39 -> DIGIT NINE
':' # 0x3A -> COLON
';' # 0x3B -> SEMICOLON
'<' # 0x3C -> LESS-THAN SIGN
'=' # 0x3D -> EQUALS SIGN
'>' # 0x3E -> GREATER-THAN SIGN
'?' # 0x3F -> QUESTION MARK
'@' # 0x40 -> COMMERCIAL AT
'A' # 0x41 -> LATIN CAPITAL LETTER A
'B' # 0x42 -> LATIN CAPITAL LETTER B
'C' # 0x43 -> LATIN CAPITAL LETTER C
'D' # 0x44 -> LATIN CAPITAL LETTER D
'E' # 0x45 -> LATIN CAPITAL LETTER E
'F' # 0x46 -> LATIN CAPITAL LETTER F
'G' # 0x47 -> LATIN CAPITAL LETTER G
'H' # 0x48 -> LATIN CAPITAL LETTER H
'I' # 0x49 -> LATIN CAPITAL LETTER I
'J' # 0x4A -> LATIN CAPITAL LETTER J
'K' # 0x4B -> LATIN CAPITAL LETTER K
'L' # 0x4C -> LATIN CAPITAL LETTER L
'M' # 0x4D -> LATIN CAPITAL LETTER M
'N' # 0x4E -> LATIN CAPITAL LETTER N
'O' # 0x4F -> LATIN CAPITAL LETTER O
'P' # 0x50 -> LATIN CAPITAL LETTER P
'Q' # 0x51 -> LATIN CAPITAL LETTER Q
'R' # 0x52 -> LATIN CAPITAL LETTER R
'S' # 0x53 -> LATIN CAPITAL LETTER S
'T' # 0x54 -> LATIN CAPITAL LETTER T
'U' # 0x55 -> LATIN CAPITAL LETTER U
'V' # 0x56 -> LATIN CAPITAL LETTER V
'W' # 0x57 -> LATIN CAPITAL LETTER W
'X' # 0x58 -> LATIN CAPITAL LETTER X
'Y' # 0x59 -> LATIN CAPITAL LETTER Y
'Z' # 0x5A -> LATIN CAPITAL LETTER Z
'[' # 0x5B -> LEFT SQUARE BRACKET
'\\' # 0x5C -> REVERSE SOLIDUS
']' # 0x5D -> RIGHT SQUARE BRACKET
'^' # 0x5E -> CIRCUMFLEX ACCENT
'_' # 0x5F -> LOW LINE
'`' # 0x60 -> GRAVE ACCENT
'a' # 0x61 -> LATIN SMALL LETTER A
'b' # 0x62 -> LATIN SMALL LETTER B
'c' # 0x63 -> LATIN SMALL LETTER C
'd' # 0x64 -> LATIN SMALL LETTER D
'e' # 0x65 -> LATIN SMALL LETTER E
'f' # 0x66 -> LATIN SMALL LETTER F
'g' # 0x67 -> LATIN SMALL LETTER G
'h' # 0x68 -> LATIN SMALL LETTER H
'i' # 0x69 -> LATIN SMALL LETTER I
'j' # 0x6A -> LATIN SMALL LETTER J
'k' # 0x6B -> LATIN SMALL LETTER K
'l' # 0x6C -> LATIN SMALL LETTER L
'm' # 0x6D -> LATIN SMALL LETTER M
'n' # 0x6E -> LATIN SMALL LETTER N
'o' # 0x6F -> LATIN SMALL LETTER O
'p' # 0x70 -> LATIN SMALL LETTER P
'q' # 0x71 -> LATIN SMALL LETTER Q
'r' # 0x72 -> LATIN SMALL LETTER R
's' # 0x73 -> LATIN SMALL LETTER S
't' # 0x74 -> LATIN SMALL LETTER T
'u' # 0x75 -> LATIN SMALL LETTER U
'v' # 0x76 -> LATIN SMALL LETTER V
'w' # 0x77 -> LATIN SMALL LETTER W
'x' # 0x78 -> LATIN SMALL LETTER X
'y' # 0x79 -> LATIN SMALL LETTER Y
'z' # 0x7A -> LATIN SMALL LETTER Z
'{' # 0x7B -> LEFT CURLY BRACKET
'|' # 0x7C -> VERTICAL LINE
'}' # 0x7D -> RIGHT CURLY BRACKET
'~' # 0x7E -> TILDE
'\x7f' # 0x7F -> DELETE
'\u2500' # 0x80 -> BOX DRAWINGS LIGHT HORIZONTAL
'\u2502' # 0x81 -> BOX DRAWINGS LIGHT VERTICAL
'\u250c' # 0x82 -> BOX DRAWINGS LIGHT DOWN AND RIGHT
'\u2510' # 0x83 -> BOX DRAWINGS LIGHT DOWN AND LEFT
'\u2514' # 0x84 -> BOX DRAWINGS LIGHT UP AND RIGHT
'\u2518' # 0x85 -> BOX DRAWINGS LIGHT UP AND LEFT
'\u251c' # 0x86 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
'\u2524' # 0x87 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
'\u252c' # 0x88 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
'\u2534' # 0x89 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
'\u253c' # 0x8A -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
'\u2580' # 0x8B -> UPPER HALF BLOCK
'\u2584' # 0x8C -> LOWER HALF BLOCK
'\u2588' # 0x8D -> FULL BLOCK
'\u258c' # 0x8E -> LEFT HALF BLOCK
'\u2590' # 0x8F -> RIGHT HALF BLOCK
'\u2591' # 0x90 -> LIGHT SHADE
'\u2592' # 0x91 -> MEDIUM SHADE
'\u2593' # 0x92 -> DARK SHADE
'\u2320' # 0x93 -> TOP HALF INTEGRAL
'\u25a0' # 0x94 -> BLACK SQUARE
'\u2219' # 0x95 -> BULLET OPERATOR
'\u221a' # 0x96 -> SQUARE ROOT
'\u2248' # 0x97 -> ALMOST EQUAL TO
'\u2264' # 0x98 -> LESS-THAN OR EQUAL TO
'\u2265' # 0x99 -> GREATER-THAN OR EQUAL TO
'\xa0' # 0x9A -> NO-BREAK SPACE
'\u2321' # 0x9B -> BOTTOM HALF INTEGRAL
'\xb0' # 0x9C -> DEGREE SIGN
'\xb2' # 0x9D -> SUPERSCRIPT TWO
'\xb7' # 0x9E -> MIDDLE DOT
'\xf7' # 0x9F -> DIVISION SIGN
'\u2550' # 0xA0 -> BOX DRAWINGS DOUBLE HORIZONTAL
'\u2551' # 0xA1 -> BOX DRAWINGS DOUBLE VERTICAL
'\u2552' # 0xA2 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
'\u0451' # 0xA3 -> CYRILLIC SMALL LETTER IO
'\u0454' # 0xA4 -> CYRILLIC SMALL LETTER UKRAINIAN IE
'\u2554' # 0xA5 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
'\u0456' # 0xA6 -> CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I
'\u0457' # 0xA7 -> CYRILLIC SMALL LETTER YI (UKRAINIAN)
'\u2557' # 0xA8 -> BOX DRAWINGS DOUBLE DOWN AND LEFT
'\u2558' # 0xA9 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
'\u2559' # 0xAA -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
'\u255a' # 0xAB -> BOX DRAWINGS DOUBLE UP AND RIGHT
'\u255b' # 0xAC -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
'\u0491' # 0xAD -> CYRILLIC SMALL LETTER UKRAINIAN GHE WITH UPTURN
'\u255d' # 0xAE -> BOX DRAWINGS DOUBLE UP AND LEFT
'\u255e' # 0xAF -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
'\u255f' # 0xB0 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
'\u2560' # 0xB1 -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
'\u2561' # 0xB2 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
'\u0401' # 0xB3 -> CYRILLIC CAPITAL LETTER IO
'\u0404' # 0xB4 -> CYRILLIC CAPITAL LETTER UKRAINIAN IE
'\u2563' # 0xB5 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
'\u0406' # 0xB6 -> CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
'\u0407' # 0xB7 -> CYRILLIC CAPITAL LETTER YI (UKRAINIAN)
'\u2566' # 0xB8 -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
'\u2567' # 0xB9 -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
'\u2568' # 0xBA -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
'\u2569' # 0xBB -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
'\u256a' # 0xBC -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
'\u0490' # 0xBD -> CYRILLIC CAPITAL LETTER UKRAINIAN GHE WITH UPTURN
'\u256c' # 0xBE -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
'\xa9' # 0xBF -> COPYRIGHT SIGN
'\u044e' # 0xC0 -> CYRILLIC SMALL LETTER YU
'\u0430' # 0xC1 -> CYRILLIC SMALL LETTER A
'\u0431' # 0xC2 -> CYRILLIC SMALL LETTER BE
'\u0446' # 0xC3 -> CYRILLIC SMALL LETTER TSE
'\u0434' # 0xC4 -> CYRILLIC SMALL LETTER DE
'\u0435' # 0xC5 -> CYRILLIC SMALL LETTER IE
'\u0444' # 0xC6 -> CYRILLIC SMALL LETTER EF
'\u0433' # 0xC7 -> CYRILLIC SMALL LETTER GHE
'\u0445' # 0xC8 -> CYRILLIC SMALL LETTER HA
'\u0438' # 0xC9 -> CYRILLIC SMALL LETTER I
'\u0439' # 0xCA -> CYRILLIC SMALL LETTER SHORT I
'\u043a' # 0xCB -> CYRILLIC SMALL LETTER KA
'\u043b' # 0xCC -> CYRILLIC SMALL LETTER EL
'\u043c' # 0xCD -> CYRILLIC SMALL LETTER EM
'\u043d' # 0xCE -> CYRILLIC SMALL LETTER EN
'\u043e' # 0xCF -> CYRILLIC SMALL LETTER O
'\u043f' # 0xD0 -> CYRILLIC SMALL LETTER PE
'\u044f' # 0xD1 -> CYRILLIC SMALL LETTER YA
'\u0440' # 0xD2 -> CYRILLIC SMALL LETTER ER
'\u0441' # 0xD3 -> CYRILLIC SMALL LETTER ES
'\u0442' # 0xD4 -> CYRILLIC SMALL LETTER TE
'\u0443' # 0xD5 -> CYRILLIC SMALL LETTER U
'\u0436' # 0xD6 -> CYRILLIC SMALL LETTER ZHE
'\u0432' # 0xD7 -> CYRILLIC SMALL LETTER VE
'\u044c' # 0xD8 -> CYRILLIC SMALL LETTER SOFT SIGN
'\u044b' # 0xD9 -> CYRILLIC SMALL LETTER YERU
'\u0437' # 0xDA -> CYRILLIC SMALL LETTER ZE
'\u0448' # 0xDB -> CYRILLIC SMALL LETTER SHA
'\u044d' # 0xDC -> CYRILLIC SMALL LETTER E
'\u0449' # 0xDD -> CYRILLIC SMALL LETTER SHCHA
'\u0447' # 0xDE -> CYRILLIC SMALL LETTER CHE
'\u044a' # 0xDF -> CYRILLIC SMALL LETTER HARD SIGN
'\u042e' # 0xE0 -> CYRILLIC CAPITAL LETTER YU
'\u0410' # 0xE1 -> CYRILLIC CAPITAL LETTER A
'\u0411' # 0xE2 -> CYRILLIC CAPITAL LETTER BE
'\u0426' # 0xE3 -> CYRILLIC CAPITAL LETTER TSE
'\u0414' # 0xE4 -> CYRILLIC CAPITAL LETTER DE
'\u0415' # 0xE5 -> CYRILLIC CAPITAL LETTER IE
'\u0424' # 0xE6 -> CYRILLIC CAPITAL LETTER EF
'\u0413' # 0xE7 -> CYRILLIC CAPITAL LETTER GHE
'\u0425' # 0xE8 -> CYRILLIC CAPITAL LETTER HA
'\u0418' # 0xE9 -> CYRILLIC CAPITAL LETTER I
'\u0419' # 0xEA -> CYRILLIC CAPITAL LETTER SHORT I
'\u041a' # 0xEB -> CYRILLIC CAPITAL LETTER KA
'\u041b' # 0xEC -> CYRILLIC CAPITAL LETTER EL
'\u041c' # 0xED -> CYRILLIC CAPITAL LETTER EM
'\u041d' # 0xEE -> CYRILLIC CAPITAL LETTER EN
'\u041e' # 0xEF -> CYRILLIC CAPITAL LETTER O
'\u041f' # 0xF0 -> CYRILLIC CAPITAL LETTER PE
'\u042f' # 0xF1 -> CYRILLIC CAPITAL LETTER YA
'\u0420' # 0xF2 -> CYRILLIC CAPITAL LETTER ER
'\u0421' # 0xF3 -> CYRILLIC CAPITAL LETTER ES
'\u0422' # 0xF4 -> CYRILLIC CAPITAL LETTER TE
'\u0423' # 0xF5 -> CYRILLIC CAPITAL LETTER U
'\u0416' # 0xF6 -> CYRILLIC CAPITAL LETTER ZHE
'\u0412' # 0xF7 -> CYRILLIC CAPITAL LETTER VE
'\u042c' # 0xF8 -> CYRILLIC CAPITAL LETTER SOFT SIGN
'\u042b' # 0xF9 -> CYRILLIC CAPITAL LETTER YERU
'\u0417' # 0xFA -> CYRILLIC CAPITAL LETTER ZE
'\u0428' # 0xFB -> CYRILLIC CAPITAL LETTER SHA
'\u042d' # 0xFC -> CYRILLIC CAPITAL LETTER E
'\u0429' # 0xFD -> CYRILLIC CAPITAL LETTER SHCHA
'\u0427' # 0xFE -> CYRILLIC CAPITAL LETTER CHE
'\u042a' # 0xFF -> CYRILLIC CAPITAL LETTER HARD SIGN
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
TinyOS-Camp/DDEA-DEV
|
refs/heads/master
|
DDEA-DEMO/main.py
|
1
|
#!/usr/bin/python
# To force float point division
"""
Created on Mon Mar 24 19:24:11 2014
@author: NGO Quang Minh Khiem
@e-mail: khiem.ngo@adsc.com.sg
"""
from __future__ import division
from multiprocessing import Process, JoinableQueue, Event
from SimpleHTTPServer import SimpleHTTPRequestHandler
from SimpleWebSocketServer import WebSocket, SimpleWebSocketServer
import time, BaseHTTPServer, sys, simplejson, os
import datetime as dt
from datetime import datetime
import SimpleHTTPServer, SocketServer, logging, cgi
from shared_constants import *
from quasar_url_reader import read_sensor_data
from ddea_proc import ddea_process
import mytool as mt
import pickle, logging, logging.handlers, SocketServer, struct
user_cmd_q = JoinableQueue()
ddea_msg_q = JoinableQueue()
cmd_lock = Event()
def ascii_encode_dict(data):
ascii_encode = lambda x: x.encode('ascii')
return dict(map(ascii_encode, pair) for pair in data.items())
class ExecProc(Process):
def __init__(self, cmd_q, status_q):
Process.__init__(self)
self.cmd_q = cmd_q
self.status_q = status_q
def run(self):
from log_util import log
try:
while True:
cmd = None
try:
cmd = self.cmd_q.get(block=True, timeout=0.1)
except Exception as e:
continue
finally:
if cmd:
self.cmd_q.task_done()
try:
with open(META_DIR + "wip.json", 'w') as f:
f.write(simplejson.dumps({"wip": 1}))
cmdset = simplejson.loads(cmd)
sensor_hash = cmdset['selected-nodes']
s_date = datetime.strptime(cmdset['start-date'], '%Y-%m-%d')
e_date = datetime.strptime(cmdset['end-date'], '%Y-%m-%d')
if not len(sensor_hash):
log.critical("No sensor is selected!")
else:
log.info('****************************** Begining of DDEA *******************************')
bldg_key = 'SODA'
#exemplar by user
#pname_key = '_POWER_'
pname_key = 'POWER'
s_epoch = int(time.mktime(s_date.timetuple()))
e_epoch = int(time.mktime(e_date.timetuple()))
time_inv = dt.timedelta(seconds=cmdset['time-interval'])
log.info("Cleaning up old output...")
mt.remove_all_files(FIG_DIR)
mt.remove_all_files(JSON_DIR)
mt.remove_all_files(PROC_OUT_DIR)
log.info("start epoch : " + str(s_epoch) + " end epoch : " + str(e_epoch))
log.info(str(time_inv) + ' time slot interval is set for this data set !!!')
log.info("BLDG_KEY : " + bldg_key + " PNAME_KEY : " + pname_key)
log.info('*' * 80)
log.info("Retrieve sensor data from quasar TSDB")
sensor_names_hash = mt.sensor_name_uid_dict(bldg_key, sensor_hash)
sensor_data = read_sensor_data(sensor_names_hash, s_epoch, e_epoch)
if sensor_data and len(sensor_data):
ddea_process(sensor_names_hash, sensor_data, s_epoch, e_epoch, time_inv, bldg_key, pname_key)
else:
log.critical("No sensor data available for time period and sensor selected!")
log.info('******************************** End of DDEA **********************************')
os.remove(META_DIR + "wip.json")
cmd_lock.clear()
log.info("execution-lock cleared")
log.info('~' * 80)
except Exception as e:
os.remove(META_DIR + "wip.json")
cmd_lock.clear()
print e
log.error(str(e))
except Exception as e:
os.remove(META_DIR + "wip.json")
cmd_lock.clear()
print e
log.error(str(e))
finally:
sys.exit(0)
class LogRecordStreamHandler(SocketServer.StreamRequestHandler):
def __init__(self, request, client_address, server, wsserver):
self.wsserver = wsserver
SocketServer.StreamRequestHandler.__init__(self, request, client_address, server)
def handle(self):
while True:
chunk = self.connection.recv(4)
if len(chunk) < 4:
break
slen = struct.unpack('>L', chunk)[0]
chunk = self.connection.recv(slen)
while len(chunk) < slen:
chunk = chunk + self.connection.recv(slen - len(chunk))
obj = pickle.loads(chunk)
record = logging.makeLogRecord(obj)
self.wsserver.broadcastmsg(record.msg)
class LogRecordSocketReceiver(SocketServer.ThreadingTCPServer):
allow_reuse_address = 1
def __init__(self, host='localhost',
port=logging.handlers.DEFAULT_TCP_LOGGING_PORT,
handler=LogRecordStreamHandler):
SocketServer.ThreadingTCPServer.__init__(self, (host, port), handler)
self.abort = 0
self.timeout = 1
self.logname = None
self.requstHandle = None
self.wsserver = SimpleWebSocketServer('', 8081, WebSocket)
def finish_request(self, request, client_address):
LogRecordStreamHandler(request, client_address, self, self.wsserver)
def cleanup(self):
self.wsserver.close()
def serve_until_stopped(self):
import select
abort = 0
while not abort:
self.wsserver.servconnman()
rd, wr, ex = select.select([self.socket.fileno()], [], [], self.timeout)
if rd:
self.handle_request()
abort = self.abort
class WSProc(Process):
def __init__(self, status_q):
Process.__init__(self)
self.status_q = status_q
def run(self):
tcpserver = LogRecordSocketReceiver()
try:
tcpserver.serve_until_stopped()
except Exception as e:
pass
finally:
tcpserver.cleanup()
sys.exit(0)
class DDEARequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
"""
def send_head(self):
path = self.translate_path(self.path)
f = None
if os.path.isdir(path):
if not self.path.endswith('/'):
# redirect browser - doing basically what apache does
self.send_response(301)
self.send_header("Location", self.path + "/")
self.end_headers()
return None
for index in "index.html", "index.htm":
index = os.path.join(path, index)
if os.path.exists(index):
path = index
break
else:
return self.list_directory(path)
ctype = self.guess_type(path)
try:
# Always read in binary mode. Opening files in text mode may cause
# newline translations, making the actual size of the content
# transmitted *less* than the content-length!
f = open(path, 'rb')
except IOError:
self.send_error(404, "File not found")
return None
self.send_response(200)
self.send_header("Content-type", ctype)
if self.path == "/" or ("index.html" in self.path):
print "- allow origin modification -"
self.send_header('Access-Control-Allow-Origin', '*')
self.send_header('Access-Control-Allow-Methods', 'GET POST')
fs = os.fstat(f.fileno())
self.send_header("Content-Length", str(fs[6]))
self.send_header("Last-Modified", self.date_time_string(fs.st_mtime))
self.end_headers()
return f
def do_GET(self):
f = self.send_head()
if f:
self.copyfile(f, self.wfile)
f.close()
"""
def do_GET(self):
self.path = 'resources/' + self.path
SimpleHTTPServer.SimpleHTTPRequestHandler.do_GET(self)
def do_POST(self):
length = int(self.headers.getheader('content-length'))
data = self.rfile.read(length)
#print self.path, data
if cmd_lock.is_set():
# return unauthorized
self.send_response(401)
else:
#Oh, so ungodly!
user_cmd_q.put_nowait(data)
# service cmd. lock
cmd_lock.set()
# return authorized
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.send_header("Content-length", 0)
self.end_headers()
self.wfile.write("")
self.finish()
self.connection.close()
if __name__ == '__main__':
processes = list()
processes.append(ExecProc(user_cmd_q, ddea_msg_q))
processes.append(WSProc(ddea_msg_q))
print time.asctime(), "Staring DDEA..."
try:
for p in processes:
p.start()
BaseHTTPServer\
.HTTPServer(('0.0.0.0', 8080), DDEARequestHandler)\
.serve_forever()
except Exception as e:
for p in processes:
p.terminate()
finally:
print '\n', time.asctime(), "Stopping CPS..."
exit(0)
|
ConeyLiu/spark
|
refs/heads/master
|
python/pyspark/ml/tests/test_training_summary.py
|
1
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import unittest
if sys.version > '3':
basestring = str
from pyspark.ml.classification import BinaryLogisticRegressionSummary, LinearSVC, \
LinearSVCSummary, LogisticRegression, LogisticRegressionSummary
from pyspark.ml.clustering import BisectingKMeans, GaussianMixture, KMeans
from pyspark.ml.linalg import Vectors
from pyspark.ml.regression import GeneralizedLinearRegression, LinearRegression
from pyspark.sql import DataFrame
from pyspark.testing.mlutils import SparkSessionTestCase
class TrainingSummaryTest(SparkSessionTestCase):
def test_linear_regression_summary(self):
df = self.spark.createDataFrame([(1.0, 2.0, Vectors.dense(1.0)),
(0.0, 2.0, Vectors.sparse(1, [], []))],
["label", "weight", "features"])
lr = LinearRegression(maxIter=5, regParam=0.0, solver="normal", weightCol="weight",
fitIntercept=False)
model = lr.fit(df)
self.assertTrue(model.hasSummary)
s = model.summary
# test that api is callable and returns expected types
self.assertEqual(s.totalIterations, 0)
self.assertTrue(isinstance(s.predictions, DataFrame))
self.assertEqual(s.predictionCol, "prediction")
self.assertEqual(s.labelCol, "label")
self.assertEqual(s.featuresCol, "features")
objHist = s.objectiveHistory
self.assertTrue(isinstance(objHist, list) and isinstance(objHist[0], float))
self.assertAlmostEqual(s.explainedVariance, 0.25, 2)
self.assertAlmostEqual(s.meanAbsoluteError, 0.0)
self.assertAlmostEqual(s.meanSquaredError, 0.0)
self.assertAlmostEqual(s.rootMeanSquaredError, 0.0)
self.assertAlmostEqual(s.r2, 1.0, 2)
self.assertAlmostEqual(s.r2adj, 1.0, 2)
self.assertTrue(isinstance(s.residuals, DataFrame))
self.assertEqual(s.numInstances, 2)
self.assertEqual(s.degreesOfFreedom, 1)
devResiduals = s.devianceResiduals
self.assertTrue(isinstance(devResiduals, list) and isinstance(devResiduals[0], float))
coefStdErr = s.coefficientStandardErrors
self.assertTrue(isinstance(coefStdErr, list) and isinstance(coefStdErr[0], float))
tValues = s.tValues
self.assertTrue(isinstance(tValues, list) and isinstance(tValues[0], float))
pValues = s.pValues
self.assertTrue(isinstance(pValues, list) and isinstance(pValues[0], float))
# test evaluation (with training dataset) produces a summary with same values
# one check is enough to verify a summary is returned
# The child class LinearRegressionTrainingSummary runs full test
sameSummary = model.evaluate(df)
self.assertAlmostEqual(sameSummary.explainedVariance, s.explainedVariance)
def test_glr_summary(self):
from pyspark.ml.linalg import Vectors
df = self.spark.createDataFrame([(1.0, 2.0, Vectors.dense(1.0)),
(0.0, 2.0, Vectors.sparse(1, [], []))],
["label", "weight", "features"])
glr = GeneralizedLinearRegression(family="gaussian", link="identity", weightCol="weight",
fitIntercept=False)
model = glr.fit(df)
self.assertTrue(model.hasSummary)
s = model.summary
# test that api is callable and returns expected types
self.assertEqual(s.numIterations, 1) # this should default to a single iteration of WLS
self.assertTrue(isinstance(s.predictions, DataFrame))
self.assertEqual(s.predictionCol, "prediction")
self.assertEqual(s.numInstances, 2)
self.assertTrue(isinstance(s.residuals(), DataFrame))
self.assertTrue(isinstance(s.residuals("pearson"), DataFrame))
coefStdErr = s.coefficientStandardErrors
self.assertTrue(isinstance(coefStdErr, list) and isinstance(coefStdErr[0], float))
tValues = s.tValues
self.assertTrue(isinstance(tValues, list) and isinstance(tValues[0], float))
pValues = s.pValues
self.assertTrue(isinstance(pValues, list) and isinstance(pValues[0], float))
self.assertEqual(s.degreesOfFreedom, 1)
self.assertEqual(s.residualDegreeOfFreedom, 1)
self.assertEqual(s.residualDegreeOfFreedomNull, 2)
self.assertEqual(s.rank, 1)
self.assertTrue(isinstance(s.solver, basestring))
self.assertTrue(isinstance(s.aic, float))
self.assertTrue(isinstance(s.deviance, float))
self.assertTrue(isinstance(s.nullDeviance, float))
self.assertTrue(isinstance(s.dispersion, float))
# test evaluation (with training dataset) produces a summary with same values
# one check is enough to verify a summary is returned
# The child class GeneralizedLinearRegressionTrainingSummary runs full test
sameSummary = model.evaluate(df)
self.assertAlmostEqual(sameSummary.deviance, s.deviance)
def test_binary_logistic_regression_summary(self):
df = self.spark.createDataFrame([(1.0, 2.0, Vectors.dense(1.0)),
(0.0, 2.0, Vectors.sparse(1, [], []))],
["label", "weight", "features"])
lr = LogisticRegression(maxIter=5, regParam=0.01, weightCol="weight", fitIntercept=False)
model = lr.fit(df)
self.assertTrue(model.hasSummary)
s = model.summary
# test that api is callable and returns expected types
self.assertTrue(isinstance(s.predictions, DataFrame))
self.assertEqual(s.probabilityCol, "probability")
self.assertEqual(s.labelCol, "label")
self.assertEqual(s.featuresCol, "features")
self.assertEqual(s.predictionCol, "prediction")
objHist = s.objectiveHistory
self.assertTrue(isinstance(objHist, list) and isinstance(objHist[0], float))
self.assertGreater(s.totalIterations, 0)
self.assertTrue(isinstance(s.labels, list))
self.assertTrue(isinstance(s.truePositiveRateByLabel, list))
self.assertTrue(isinstance(s.falsePositiveRateByLabel, list))
self.assertTrue(isinstance(s.precisionByLabel, list))
self.assertTrue(isinstance(s.recallByLabel, list))
self.assertTrue(isinstance(s.fMeasureByLabel(), list))
self.assertTrue(isinstance(s.fMeasureByLabel(1.0), list))
self.assertTrue(isinstance(s.roc, DataFrame))
self.assertAlmostEqual(s.areaUnderROC, 1.0, 2)
self.assertTrue(isinstance(s.pr, DataFrame))
self.assertTrue(isinstance(s.fMeasureByThreshold, DataFrame))
self.assertTrue(isinstance(s.precisionByThreshold, DataFrame))
self.assertTrue(isinstance(s.recallByThreshold, DataFrame))
self.assertAlmostEqual(s.accuracy, 1.0, 2)
self.assertAlmostEqual(s.weightedTruePositiveRate, 1.0, 2)
self.assertAlmostEqual(s.weightedFalsePositiveRate, 0.0, 2)
self.assertAlmostEqual(s.weightedRecall, 1.0, 2)
self.assertAlmostEqual(s.weightedPrecision, 1.0, 2)
self.assertAlmostEqual(s.weightedFMeasure(), 1.0, 2)
self.assertAlmostEqual(s.weightedFMeasure(1.0), 1.0, 2)
# test evaluation (with training dataset) produces a summary with same values
# one check is enough to verify a summary is returned, Scala version runs full test
sameSummary = model.evaluate(df)
self.assertTrue(isinstance(sameSummary, BinaryLogisticRegressionSummary))
self.assertAlmostEqual(sameSummary.areaUnderROC, s.areaUnderROC)
def test_multiclass_logistic_regression_summary(self):
df = self.spark.createDataFrame([(1.0, 2.0, Vectors.dense(1.0)),
(0.0, 2.0, Vectors.sparse(1, [], [])),
(2.0, 2.0, Vectors.dense(2.0)),
(2.0, 2.0, Vectors.dense(1.9))],
["label", "weight", "features"])
lr = LogisticRegression(maxIter=5, regParam=0.01, weightCol="weight", fitIntercept=False)
model = lr.fit(df)
self.assertTrue(model.hasSummary)
s = model.summary
# test that api is callable and returns expected types
self.assertTrue(isinstance(s.predictions, DataFrame))
self.assertEqual(s.probabilityCol, "probability")
self.assertEqual(s.labelCol, "label")
self.assertEqual(s.featuresCol, "features")
self.assertEqual(s.predictionCol, "prediction")
objHist = s.objectiveHistory
self.assertTrue(isinstance(objHist, list) and isinstance(objHist[0], float))
self.assertGreater(s.totalIterations, 0)
self.assertTrue(isinstance(s.labels, list))
self.assertTrue(isinstance(s.truePositiveRateByLabel, list))
self.assertTrue(isinstance(s.falsePositiveRateByLabel, list))
self.assertTrue(isinstance(s.precisionByLabel, list))
self.assertTrue(isinstance(s.recallByLabel, list))
self.assertTrue(isinstance(s.fMeasureByLabel(), list))
self.assertTrue(isinstance(s.fMeasureByLabel(1.0), list))
self.assertAlmostEqual(s.accuracy, 0.75, 2)
self.assertAlmostEqual(s.weightedTruePositiveRate, 0.75, 2)
self.assertAlmostEqual(s.weightedFalsePositiveRate, 0.25, 2)
self.assertAlmostEqual(s.weightedRecall, 0.75, 2)
self.assertAlmostEqual(s.weightedPrecision, 0.583, 2)
self.assertAlmostEqual(s.weightedFMeasure(), 0.65, 2)
self.assertAlmostEqual(s.weightedFMeasure(1.0), 0.65, 2)
# test evaluation (with training dataset) produces a summary with same values
# one check is enough to verify a summary is returned, Scala version runs full test
sameSummary = model.evaluate(df)
self.assertTrue(isinstance(sameSummary, LogisticRegressionSummary))
self.assertFalse(isinstance(sameSummary, BinaryLogisticRegressionSummary))
self.assertAlmostEqual(sameSummary.accuracy, s.accuracy)
def test_linear_svc_summary(self):
df = self.spark.createDataFrame([(1.0, 2.0, Vectors.dense(1.0, 1.0, 1.0)),
(0.0, 2.0, Vectors.dense(1.0, 2.0, 3.0))],
["label", "weight", "features"])
svc = LinearSVC(maxIter=5, weightCol="weight")
model = svc.fit(df)
self.assertTrue(model.hasSummary)
s = model.summary()
# test that api is callable and returns expected types
self.assertTrue(isinstance(s.predictions, DataFrame))
self.assertEqual(s.scoreCol, "rawPrediction")
self.assertEqual(s.labelCol, "label")
self.assertEqual(s.predictionCol, "prediction")
objHist = s.objectiveHistory
self.assertTrue(isinstance(objHist, list) and isinstance(objHist[0], float))
self.assertGreater(s.totalIterations, 0)
self.assertTrue(isinstance(s.labels, list))
self.assertTrue(isinstance(s.truePositiveRateByLabel, list))
self.assertTrue(isinstance(s.falsePositiveRateByLabel, list))
self.assertTrue(isinstance(s.precisionByLabel, list))
self.assertTrue(isinstance(s.recallByLabel, list))
self.assertTrue(isinstance(s.fMeasureByLabel(), list))
self.assertTrue(isinstance(s.fMeasureByLabel(1.0), list))
self.assertTrue(isinstance(s.roc, DataFrame))
self.assertAlmostEqual(s.areaUnderROC, 1.0, 2)
self.assertTrue(isinstance(s.pr, DataFrame))
self.assertTrue(isinstance(s.fMeasureByThreshold, DataFrame))
self.assertTrue(isinstance(s.precisionByThreshold, DataFrame))
self.assertTrue(isinstance(s.recallByThreshold, DataFrame))
print(s.weightedTruePositiveRate)
self.assertAlmostEqual(s.weightedTruePositiveRate, 0.5, 2)
self.assertAlmostEqual(s.weightedFalsePositiveRate, 0.5, 2)
self.assertAlmostEqual(s.weightedRecall, 0.5, 2)
self.assertAlmostEqual(s.weightedPrecision, 0.25, 2)
self.assertAlmostEqual(s.weightedFMeasure(), 0.3333333333333333, 2)
self.assertAlmostEqual(s.weightedFMeasure(1.0), 0.3333333333333333, 2)
# test evaluation (with training dataset) produces a summary with same values
# one check is enough to verify a summary is returned, Scala version runs full test
sameSummary = model.evaluate(df)
self.assertTrue(isinstance(sameSummary, LinearSVCSummary))
self.assertAlmostEqual(sameSummary.areaUnderROC, s.areaUnderROC)
def test_gaussian_mixture_summary(self):
data = [(Vectors.dense(1.0),), (Vectors.dense(5.0),), (Vectors.dense(10.0),),
(Vectors.sparse(1, [], []),)]
df = self.spark.createDataFrame(data, ["features"])
gmm = GaussianMixture(k=2)
model = gmm.fit(df)
self.assertTrue(model.hasSummary)
s = model.summary
self.assertTrue(isinstance(s.predictions, DataFrame))
self.assertEqual(s.probabilityCol, "probability")
self.assertTrue(isinstance(s.probability, DataFrame))
self.assertEqual(s.featuresCol, "features")
self.assertEqual(s.predictionCol, "prediction")
self.assertTrue(isinstance(s.cluster, DataFrame))
self.assertEqual(len(s.clusterSizes), 2)
self.assertEqual(s.k, 2)
self.assertEqual(s.numIter, 3)
def test_bisecting_kmeans_summary(self):
data = [(Vectors.dense(1.0),), (Vectors.dense(5.0),), (Vectors.dense(10.0),),
(Vectors.sparse(1, [], []),)]
df = self.spark.createDataFrame(data, ["features"])
bkm = BisectingKMeans(k=2)
model = bkm.fit(df)
self.assertTrue(model.hasSummary)
s = model.summary
self.assertTrue(isinstance(s.predictions, DataFrame))
self.assertEqual(s.featuresCol, "features")
self.assertEqual(s.predictionCol, "prediction")
self.assertTrue(isinstance(s.cluster, DataFrame))
self.assertEqual(len(s.clusterSizes), 2)
self.assertEqual(s.k, 2)
self.assertEqual(s.numIter, 20)
def test_kmeans_summary(self):
data = [(Vectors.dense([0.0, 0.0]),), (Vectors.dense([1.0, 1.0]),),
(Vectors.dense([9.0, 8.0]),), (Vectors.dense([8.0, 9.0]),)]
df = self.spark.createDataFrame(data, ["features"])
kmeans = KMeans(k=2, seed=1)
model = kmeans.fit(df)
self.assertTrue(model.hasSummary)
s = model.summary
self.assertTrue(isinstance(s.predictions, DataFrame))
self.assertEqual(s.featuresCol, "features")
self.assertEqual(s.predictionCol, "prediction")
self.assertTrue(isinstance(s.cluster, DataFrame))
self.assertEqual(len(s.clusterSizes), 2)
self.assertEqual(s.k, 2)
self.assertEqual(s.numIter, 1)
if __name__ == "__main__":
from pyspark.ml.tests.test_training_summary import *
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
|
vikeen/band-maker
|
refs/heads/master
|
songs/tests/test_songs.py
|
1
|
from django.contrib.auth.models import User
from django.test import TestCase
from django.core.urlresolvers import reverse
from django.core.exceptions import ObjectDoesNotExist
from ..models import Song, SongStats
class SongTestCase(TestCase):
def setUp(self):
self.user_creator = User.objects.create_user(
username='creator',
email='creator@email.com', password='password')
self.user_contributor = User.objects.create_user(
username='contributor', email='contributor@gmail.com', password='password')
def login(self, user):
self.client.login(username=user.username, password='password')
class IndexSongTestCase(SongTestCase):
def setUp(self):
super().setUp()
self.song_index_url = reverse("songs:index")
def test_song_index_denies_anonymous(self):
response = self.client.get(self.song_index_url)
self.assertRedirects(response, '%s/?next=%s' % (reverse('accounts:login'), self.song_index_url))
def test_song_list_loads(self):
super().login(self.user_creator)
response = self.client.get(self.song_index_url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'songs/song_list.html')
class CreateSongTestCase(SongTestCase):
def setUp(self):
super().setUp()
self.create_song_url = reverse("songs:wizard_create")
def test_song_create_denies_anonymous(self):
response = self.client.get(self.create_song_url)
self.assertRedirects(response, '%s/?next=%s' % (reverse('accounts:login'), self.create_song_url))
def test_song_create_loads(self):
super().login(self.user_creator)
response = self.client.get(self.create_song_url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'songs/song_wizard_detail.html')
def test_song_create_submits(self):
super().login(self.user_creator)
response = self.client.post(self.create_song_url, {
'title': 'song title',
'description': 'song description',
'license': 'cc-by-4.0'
})
song = Song.objects.filter(created_by=self.user_creator).first()
self.assertRedirects(response, reverse('songs:edit', kwargs={
'pk': song.pk
}))
self.assertEqual(song.title, 'song title')
self.assertEqual(song.description, 'song description')
self.assertEqual(song.created_by, self.user_creator)
class DeleteSongTestCase(SongTestCase):
def setUp(self):
super().setUp()
self.song = Song.objects.create(title='title', description='description', created_by=self.user_creator)
SongStats.objects.create(song=self.song)
self.song_delete_url = reverse("songs:delete", kwargs={'pk': self.song.pk})
def test_song_delete_denies_anonymous(self):
response = self.client.get(self.song_delete_url)
self.assertRedirects(response, '%s/?next=%s' % (reverse('accounts:login'), self.song_delete_url))
def test_song_delete_denies_non_creators(self):
super().login(self.user_contributor)
response = self.client.get(self.song_delete_url)
self.assertRedirects(response, reverse('songs:detail', kwargs={
'pk': self.song.pk
}))
def test_song_delete_submits(self):
super().login(self.user_creator)
response = self.client.post(self.song_delete_url)
self.assertRedirects(response, reverse('users:detail', kwargs={
'username': self.user_creator.username
}))
try:
Song.objects.get(pk=self.song.pk)
except ObjectDoesNotExist:
self.assertRaises(ObjectDoesNotExist)
class UpdateSongTestCase(SongTestCase):
def setUp(self):
super().setUp()
self.song = Song.objects.create(title='song title', description='song description',
created_by=self.user_creator)
SongStats.objects.create(song=self.song)
self.song_update_url = reverse("songs:edit", kwargs={'pk': self.song.pk})
def test_song_update_denies_anonymous(self):
response = self.client.get(self.song_update_url)
self.assertRedirects(response, '%s/?next=%s' % (reverse('accounts:login'), self.song_update_url))
def test_song_update_denies_non_creators(self):
super().login(self.user_contributor)
response = self.client.get(self.song_update_url)
self.assertRedirects(response, reverse('songs:detail', kwargs={
'pk': self.song.pk
}))
def test_song_update_submits(self):
super().login(self.user_creator)
response = self.client.post(self.song_update_url, {
'title': 'new song title',
'description': 'new song description',
'license': 'cc-by-4.0',
# 'published': True
})
updated_song = Song.objects.get(pk=self.song.pk)
self.assertRedirects(response, self.song_update_url)
self.assertEqual(updated_song.title, 'new song title')
self.assertEqual(updated_song.description, 'new song description')
# self.assertEqual(updated_song.published, True)
self.assertEqual(updated_song.created_by, self.user_creator)
|
40223137/150601
|
refs/heads/master
|
static/Brython3.1.3-20150514-095342/Lib/sre_compile.py
|
630
|
#
# Secret Labs' Regular Expression Engine
#
# convert template to internal format
#
# Copyright (c) 1997-2001 by Secret Labs AB. All rights reserved.
#
# See the sre.py file for information on usage and redistribution.
#
"""Internal support module for sre"""
import sys
import _sre
import sre_parse
from sre_constants import *
from _sre import MAXREPEAT
assert _sre.MAGIC == MAGIC, "SRE module mismatch"
if _sre.CODESIZE == 2:
MAXCODE = 65535
else:
MAXCODE = 0xFFFFFFFF
def _identityfunction(x):
return x
_LITERAL_CODES = set([LITERAL, NOT_LITERAL])
_REPEATING_CODES = set([REPEAT, MIN_REPEAT, MAX_REPEAT])
_SUCCESS_CODES = set([SUCCESS, FAILURE])
_ASSERT_CODES = set([ASSERT, ASSERT_NOT])
def _compile(code, pattern, flags):
# internal: compile a (sub)pattern
emit = code.append
_len = len
LITERAL_CODES = _LITERAL_CODES
REPEATING_CODES = _REPEATING_CODES
SUCCESS_CODES = _SUCCESS_CODES
ASSERT_CODES = _ASSERT_CODES
for op, av in pattern:
#print('sre_compile.py:_compile:42', op, av)
#print('sre_compile.py:_compile:42', code)
if op in LITERAL_CODES:
if flags & SRE_FLAG_IGNORECASE:
emit(OPCODES[OP_IGNORE[op]])
emit(_sre.getlower(av, flags))
else:
emit(OPCODES[op])
emit(av)
elif op is IN:
if flags & SRE_FLAG_IGNORECASE:
emit(OPCODES[OP_IGNORE[op]])
def fixup(literal, flags=flags):
return _sre.getlower(literal, flags)
else:
emit(OPCODES[op])
fixup = _identityfunction
skip = _len(code); emit(0)
_compile_charset(av, flags, code, fixup)
code[skip] = _len(code) - skip
elif op is ANY:
if flags & SRE_FLAG_DOTALL:
emit(OPCODES[ANY_ALL])
else:
emit(OPCODES[ANY])
elif op in REPEATING_CODES:
if flags & SRE_FLAG_TEMPLATE:
raise error("internal: unsupported template operator")
emit(OPCODES[REPEAT])
skip = _len(code); emit(0)
emit(av[0])
emit(av[1])
_compile(code, av[2], flags)
emit(OPCODES[SUCCESS])
code[skip] = _len(code) - skip
elif _simple(av) and op is not REPEAT:
if op is MAX_REPEAT:
emit(OPCODES[REPEAT_ONE])
else:
emit(OPCODES[MIN_REPEAT_ONE])
skip = _len(code); emit(0)
emit(av[0])
emit(av[1])
_compile(code, av[2], flags)
emit(OPCODES[SUCCESS])
code[skip] = _len(code) - skip
else:
emit(OPCODES[REPEAT])
skip = _len(code); emit(0)
emit(av[0])
emit(av[1])
_compile(code, av[2], flags)
code[skip] = _len(code) - skip
if op is MAX_REPEAT:
emit(OPCODES[MAX_UNTIL])
else:
emit(OPCODES[MIN_UNTIL])
elif op is SUBPATTERN:
if av[0]:
emit(OPCODES[MARK])
emit((av[0]-1)*2)
# _compile_info(code, av[1], flags)
_compile(code, av[1], flags)
if av[0]:
emit(OPCODES[MARK])
emit((av[0]-1)*2+1)
elif op in SUCCESS_CODES:
emit(OPCODES[op])
elif op in ASSERT_CODES:
emit(OPCODES[op])
skip = _len(code); emit(0)
if av[0] >= 0:
emit(0) # look ahead
else:
lo, hi = av[1].getwidth()
if lo != hi:
raise error("look-behind requires fixed-width pattern")
emit(lo) # look behind
_compile(code, av[1], flags)
emit(OPCODES[SUCCESS])
code[skip] = _len(code) - skip
elif op is CALL:
emit(OPCODES[op])
skip = _len(code); emit(0)
_compile(code, av, flags)
emit(OPCODES[SUCCESS])
code[skip] = _len(code) - skip
elif op is AT:
emit(OPCODES[op])
if flags & SRE_FLAG_MULTILINE:
av = AT_MULTILINE.get(av, av)
if flags & SRE_FLAG_LOCALE:
av = AT_LOCALE.get(av, av)
elif flags & SRE_FLAG_UNICODE:
av = AT_UNICODE.get(av, av)
emit(ATCODES[av])
elif op is BRANCH:
emit(OPCODES[op])
tail = []
tailappend = tail.append
for av in av[1]:
skip = _len(code); emit(0)
# _compile_info(code, av, flags)
_compile(code, av, flags)
emit(OPCODES[JUMP])
tailappend(_len(code)); emit(0)
code[skip] = _len(code) - skip
emit(0) # end of branch
for tail in tail:
code[tail] = _len(code) - tail
elif op is CATEGORY:
emit(OPCODES[op])
if flags & SRE_FLAG_LOCALE:
av = CH_LOCALE[av]
elif flags & SRE_FLAG_UNICODE:
av = CH_UNICODE[av]
emit(CHCODES[av])
elif op is GROUPREF:
if flags & SRE_FLAG_IGNORECASE:
emit(OPCODES[OP_IGNORE[op]])
else:
emit(OPCODES[op])
emit(av-1)
elif op is GROUPREF_EXISTS:
emit(OPCODES[op])
emit(av[0]-1)
skipyes = _len(code); emit(0)
_compile(code, av[1], flags)
if av[2]:
emit(OPCODES[JUMP])
skipno = _len(code); emit(0)
code[skipyes] = _len(code) - skipyes + 1
_compile(code, av[2], flags)
code[skipno] = _len(code) - skipno
else:
code[skipyes] = _len(code) - skipyes + 1
else:
raise ValueError("unsupported operand type", op)
def _compile_charset(charset, flags, code, fixup=None):
# compile charset subprogram
emit = code.append
if fixup is None:
fixup = _identityfunction
for op, av in _optimize_charset(charset, fixup):
emit(OPCODES[op])
if op is NEGATE:
pass
elif op is LITERAL:
emit(fixup(av))
elif op is RANGE:
emit(fixup(av[0]))
emit(fixup(av[1]))
elif op is CHARSET:
code.extend(av)
elif op is BIGCHARSET:
code.extend(av)
elif op is CATEGORY:
if flags & SRE_FLAG_LOCALE:
emit(CHCODES[CH_LOCALE[av]])
elif flags & SRE_FLAG_UNICODE:
emit(CHCODES[CH_UNICODE[av]])
else:
emit(CHCODES[av])
else:
raise error("internal: unsupported set operator")
emit(OPCODES[FAILURE])
def _optimize_charset(charset, fixup):
# internal: optimize character set
out = []
outappend = out.append
charmap = [0]*256
try:
for op, av in charset:
if op is NEGATE:
outappend((op, av))
elif op is LITERAL:
charmap[fixup(av)] = 1
elif op is RANGE:
for i in range(fixup(av[0]), fixup(av[1])+1):
charmap[i] = 1
elif op is CATEGORY:
# XXX: could append to charmap tail
return charset # cannot compress
except IndexError:
# character set contains unicode characters
return _optimize_unicode(charset, fixup)
# compress character map
i = p = n = 0
runs = []
runsappend = runs.append
for c in charmap:
if c:
if n == 0:
p = i
n = n + 1
elif n:
runsappend((p, n))
n = 0
i = i + 1
if n:
runsappend((p, n))
if len(runs) <= 2:
# use literal/range
for p, n in runs:
if n == 1:
outappend((LITERAL, p))
else:
outappend((RANGE, (p, p+n-1)))
if len(out) < len(charset):
return out
else:
# use bitmap
data = _mk_bitmap(charmap)
outappend((CHARSET, data))
return out
return charset
def _mk_bitmap(bits):
data = []
dataappend = data.append
if _sre.CODESIZE == 2:
start = (1, 0)
else:
start = (1, 0)
m, v = start
for c in bits:
if c:
v = v + m
m = m + m
if m > MAXCODE:
dataappend(v)
m, v = start
return data
# To represent a big charset, first a bitmap of all characters in the
# set is constructed. Then, this bitmap is sliced into chunks of 256
# characters, duplicate chunks are eliminated, and each chunk is
# given a number. In the compiled expression, the charset is
# represented by a 16-bit word sequence, consisting of one word for
# the number of different chunks, a sequence of 256 bytes (128 words)
# of chunk numbers indexed by their original chunk position, and a
# sequence of chunks (16 words each).
# Compression is normally good: in a typical charset, large ranges of
# Unicode will be either completely excluded (e.g. if only cyrillic
# letters are to be matched), or completely included (e.g. if large
# subranges of Kanji match). These ranges will be represented by
# chunks of all one-bits or all zero-bits.
# Matching can be also done efficiently: the more significant byte of
# the Unicode character is an index into the chunk number, and the
# less significant byte is a bit index in the chunk (just like the
# CHARSET matching).
# In UCS-4 mode, the BIGCHARSET opcode still supports only subsets
# of the basic multilingual plane; an efficient representation
# for all of UTF-16 has not yet been developed. This means,
# in particular, that negated charsets cannot be represented as
# bigcharsets.
def _optimize_unicode(charset, fixup):
try:
import array
except ImportError:
return charset
charmap = [0]*65536
negate = 0
try:
for op, av in charset:
if op is NEGATE:
negate = 1
elif op is LITERAL:
charmap[fixup(av)] = 1
elif op is RANGE:
for i in range(fixup(av[0]), fixup(av[1])+1):
charmap[i] = 1
elif op is CATEGORY:
# XXX: could expand category
return charset # cannot compress
except IndexError:
# non-BMP characters; XXX now they should work
return charset
if negate:
if sys.maxunicode != 65535:
# XXX: negation does not work with big charsets
# XXX2: now they should work, but removing this will make the
# charmap 17 times bigger
return charset
for i in range(65536):
charmap[i] = not charmap[i]
comps = {}
mapping = [0]*256
block = 0
data = []
for i in range(256):
chunk = tuple(charmap[i*256:(i+1)*256])
new = comps.setdefault(chunk, block)
mapping[i] = new
if new == block:
block = block + 1
data = data + _mk_bitmap(chunk)
header = [block]
if _sre.CODESIZE == 2:
code = 'H'
else:
code = 'I'
# Convert block indices to byte array of 256 bytes
mapping = array.array('b', mapping).tobytes()
# Convert byte array to word array
mapping = array.array(code, mapping)
assert mapping.itemsize == _sre.CODESIZE
assert len(mapping) * mapping.itemsize == 256
header = header + mapping.tolist()
data[0:0] = header
return [(BIGCHARSET, data)]
def _simple(av):
# check if av is a "simple" operator
lo, hi = av[2].getwidth()
if lo == 0 and hi == MAXREPEAT:
raise error("nothing to repeat")
return lo == hi == 1 and av[2][0][0] != SUBPATTERN
def _compile_info(code, pattern, flags):
# internal: compile an info block. in the current version,
# this contains min/max pattern width, and an optional literal
# prefix or a character map
lo, hi = pattern.getwidth()
#print('sre_compile.py:_compile_info:370', lo, hi)
if lo == 0:
return # not worth it
# look for a literal prefix
prefix = []
prefixappend = prefix.append
prefix_skip = 0
charset = [] # not used
charsetappend = charset.append
if not (flags & SRE_FLAG_IGNORECASE):
# look for literal prefix
for op, av in pattern.data:
#print('sre_compile.py:_code:381',op,av)
if op is LITERAL:
if len(prefix) == prefix_skip:
prefix_skip = prefix_skip + 1
prefixappend(av)
elif op is SUBPATTERN and len(av[1]) == 1:
op, av = av[1][0]
if op is LITERAL:
prefixappend(av)
else:
break
else:
break
# if no prefix, look for charset prefix
if not prefix and pattern.data:
op, av = pattern.data[0]
if op is SUBPATTERN and av[1]:
op, av = av[1][0]
if op is LITERAL:
charsetappend((op, av))
elif op is BRANCH:
c = []
cappend = c.append
for p in av[1]:
if not p:
break
op, av = p[0]
if op is LITERAL:
cappend((op, av))
else:
break
else:
charset = c
elif op is BRANCH:
c = []
cappend = c.append
for p in av[1]:
if not p:
break
op, av = p[0]
if op is LITERAL:
cappend((op, av))
else:
break
else:
charset = c
elif op is IN:
charset = av
#print('sre_compile.py:_code:430', code)
## if prefix:
## print "*** PREFIX", prefix, prefix_skip
## if charset:
## print "*** CHARSET", charset
# add an info block
emit = code.append
emit(OPCODES[INFO])
skip = len(code); emit(0)
# literal flag
mask = 0
if prefix:
mask = SRE_INFO_PREFIX
if len(prefix) == prefix_skip == len(pattern.data):
mask = mask + SRE_INFO_LITERAL
elif charset:
mask = mask + SRE_INFO_CHARSET
emit(mask)
# pattern length
if lo < MAXCODE:
emit(lo)
else:
emit(MAXCODE)
prefix = prefix[:MAXCODE]
if hi < MAXCODE:
emit(hi)
else:
emit(0)
# add literal prefix
#print('sre_compile.py:_code:457', code)
if prefix:
emit(len(prefix)) # length
emit(prefix_skip) # skip
code.extend(prefix)
# generate overlap table
table = [-1] + ([0]*len(prefix))
for i in range(len(prefix)):
table[i+1] = table[i]+1
while table[i+1] > 0 and prefix[i] != prefix[table[i+1]-1]:
table[i+1] = table[table[i+1]-1]+1
code.extend(table[1:]) # don't store first entry
elif charset:
_compile_charset(charset, flags, code)
code[skip] = len(code) - skip
def isstring(obj):
return isinstance(obj, (str, bytes))
def _code(p, flags):
flags = p.pattern.flags | flags
code = []
# compile info block
_compile_info(code, p, flags)
# compile the pattern
_compile(code, p.data, flags)
code.append(OPCODES[SUCCESS])
return code
def compile(p, flags=0):
# internal: convert pattern list to internal format
#print("sre_compile.py:compile:504:p", p)
if isstring(p):
pattern = p
p = sre_parse.parse(p, flags)
else:
pattern = None
#print('sre_compile.py:498:p', p)
code = _code(p, flags)
#print('sre_compile.py:501:code', code)
# print code
# XXX: <fl> get rid of this limitation!
if p.pattern.groups > 100:
raise AssertionError(
"sorry, but this version only supports 100 named groups"
)
# map in either direction
groupindex = p.pattern.groupdict
indexgroup = [None] * p.pattern.groups
for k, i in groupindex.items():
indexgroup[i] = k
return _sre.compile(
pattern, flags | p.pattern.flags, code,
p.pattern.groups-1,
groupindex, indexgroup
)
|
gnuhub/intellij-community
|
refs/heads/master
|
python/testData/refactoring/pushdown/multiple.before.py
|
83
|
class Foo:
def foo(self):
print("a")
class Zope(Foo):
def _mine(self):
print "zope"
class Boo(Foo):
def boo(self):
print "rrrrr"
|
DreamerKing/LightweightHtmlWidgets
|
refs/heads/master
|
publish-rc/v1.0/files/Ipy.Lib/distutils/extension.py
|
250
|
"""distutils.extension
Provides the Extension class, used to describe C/C++ extension
modules in setup scripts."""
__revision__ = "$Id$"
import os, string, sys
from types import *
try:
import warnings
except ImportError:
warnings = None
# This class is really only used by the "build_ext" command, so it might
# make sense to put it in distutils.command.build_ext. However, that
# module is already big enough, and I want to make this class a bit more
# complex to simplify some common cases ("foo" module in "foo.c") and do
# better error-checking ("foo.c" actually exists).
#
# Also, putting this in build_ext.py means every setup script would have to
# import that large-ish module (indirectly, through distutils.core) in
# order to do anything.
class Extension:
"""Just a collection of attributes that describes an extension
module and everything needed to build it (hopefully in a portable
way, but there are hooks that let you be as unportable as you need).
Instance attributes:
name : string
the full name of the extension, including any packages -- ie.
*not* a filename or pathname, but Python dotted name
sources : [string]
list of source filenames, relative to the distribution root
(where the setup script lives), in Unix form (slash-separated)
for portability. Source files may be C, C++, SWIG (.i),
platform-specific resource files, or whatever else is recognized
by the "build_ext" command as source for a Python extension.
include_dirs : [string]
list of directories to search for C/C++ header files (in Unix
form for portability)
define_macros : [(name : string, value : string|None)]
list of macros to define; each macro is defined using a 2-tuple,
where 'value' is either the string to define it to or None to
define it without a particular value (equivalent of "#define
FOO" in source or -DFOO on Unix C compiler command line)
undef_macros : [string]
list of macros to undefine explicitly
library_dirs : [string]
list of directories to search for C/C++ libraries at link time
libraries : [string]
list of library names (not filenames or paths) to link against
runtime_library_dirs : [string]
list of directories to search for C/C++ libraries at run time
(for shared extensions, this is when the extension is loaded)
extra_objects : [string]
list of extra files to link with (eg. object files not implied
by 'sources', static library that must be explicitly specified,
binary resource files, etc.)
extra_compile_args : [string]
any extra platform- and compiler-specific information to use
when compiling the source files in 'sources'. For platforms and
compilers where "command line" makes sense, this is typically a
list of command-line arguments, but for other platforms it could
be anything.
extra_link_args : [string]
any extra platform- and compiler-specific information to use
when linking object files together to create the extension (or
to create a new static Python interpreter). Similar
interpretation as for 'extra_compile_args'.
export_symbols : [string]
list of symbols to be exported from a shared extension. Not
used on all platforms, and not generally necessary for Python
extensions, which typically export exactly one symbol: "init" +
extension_name.
swig_opts : [string]
any extra options to pass to SWIG if a source file has the .i
extension.
depends : [string]
list of files that the extension depends on
language : string
extension language (i.e. "c", "c++", "objc"). Will be detected
from the source extensions if not provided.
"""
# When adding arguments to this constructor, be sure to update
# setup_keywords in core.py.
def __init__ (self, name, sources,
include_dirs=None,
define_macros=None,
undef_macros=None,
library_dirs=None,
libraries=None,
runtime_library_dirs=None,
extra_objects=None,
extra_compile_args=None,
extra_link_args=None,
export_symbols=None,
swig_opts = None,
depends=None,
language=None,
**kw # To catch unknown keywords
):
assert type(name) is StringType, "'name' must be a string"
assert (type(sources) is ListType and
map(type, sources) == [StringType]*len(sources)), \
"'sources' must be a list of strings"
self.name = name
self.sources = sources
self.include_dirs = include_dirs or []
self.define_macros = define_macros or []
self.undef_macros = undef_macros or []
self.library_dirs = library_dirs or []
self.libraries = libraries or []
self.runtime_library_dirs = runtime_library_dirs or []
self.extra_objects = extra_objects or []
self.extra_compile_args = extra_compile_args or []
self.extra_link_args = extra_link_args or []
self.export_symbols = export_symbols or []
self.swig_opts = swig_opts or []
self.depends = depends or []
self.language = language
# If there are unknown keyword options, warn about them
if len(kw):
L = kw.keys() ; L.sort()
L = map(repr, L)
msg = "Unknown Extension options: " + string.join(L, ', ')
if warnings is not None:
warnings.warn(msg)
else:
sys.stderr.write(msg + '\n')
# class Extension
def read_setup_file (filename):
from distutils.sysconfig import \
parse_makefile, expand_makefile_vars, _variable_rx
from distutils.text_file import TextFile
from distutils.util import split_quoted
# First pass over the file to gather "VAR = VALUE" assignments.
vars = parse_makefile(filename)
# Second pass to gobble up the real content: lines of the form
# <module> ... [<sourcefile> ...] [<cpparg> ...] [<library> ...]
file = TextFile(filename,
strip_comments=1, skip_blanks=1, join_lines=1,
lstrip_ws=1, rstrip_ws=1)
try:
extensions = []
while 1:
line = file.readline()
if line is None: # eof
break
if _variable_rx.match(line): # VAR=VALUE, handled in first pass
continue
if line[0] == line[-1] == "*":
file.warn("'%s' lines not handled yet" % line)
continue
#print "original line: " + line
line = expand_makefile_vars(line, vars)
words = split_quoted(line)
#print "expanded line: " + line
# NB. this parses a slightly different syntax than the old
# makesetup script: here, there must be exactly one extension per
# line, and it must be the first word of the line. I have no idea
# why the old syntax supported multiple extensions per line, as
# they all wind up being the same.
module = words[0]
ext = Extension(module, [])
append_next_word = None
for word in words[1:]:
if append_next_word is not None:
append_next_word.append(word)
append_next_word = None
continue
suffix = os.path.splitext(word)[1]
switch = word[0:2] ; value = word[2:]
if suffix in (".c", ".cc", ".cpp", ".cxx", ".c++", ".m", ".mm"):
# hmm, should we do something about C vs. C++ sources?
# or leave it up to the CCompiler implementation to
# worry about?
ext.sources.append(word)
elif switch == "-I":
ext.include_dirs.append(value)
elif switch == "-D":
equals = string.find(value, "=")
if equals == -1: # bare "-DFOO" -- no value
ext.define_macros.append((value, None))
else: # "-DFOO=blah"
ext.define_macros.append((value[0:equals],
value[equals+2:]))
elif switch == "-U":
ext.undef_macros.append(value)
elif switch == "-C": # only here 'cause makesetup has it!
ext.extra_compile_args.append(word)
elif switch == "-l":
ext.libraries.append(value)
elif switch == "-L":
ext.library_dirs.append(value)
elif switch == "-R":
ext.runtime_library_dirs.append(value)
elif word == "-rpath":
append_next_word = ext.runtime_library_dirs
elif word == "-Xlinker":
append_next_word = ext.extra_link_args
elif word == "-Xcompiler":
append_next_word = ext.extra_compile_args
elif switch == "-u":
ext.extra_link_args.append(word)
if not value:
append_next_word = ext.extra_link_args
elif word == "-Xcompiler":
append_next_word = ext.extra_compile_args
elif switch == "-u":
ext.extra_link_args.append(word)
if not value:
append_next_word = ext.extra_link_args
elif suffix in (".a", ".so", ".sl", ".o", ".dylib"):
# NB. a really faithful emulation of makesetup would
# append a .o file to extra_objects only if it
# had a slash in it; otherwise, it would s/.o/.c/
# and append it to sources. Hmmmm.
ext.extra_objects.append(word)
else:
file.warn("unrecognized argument '%s'" % word)
extensions.append(ext)
finally:
file.close()
#print "module:", module
#print "source files:", source_files
#print "cpp args:", cpp_args
#print "lib args:", library_args
#extensions[module] = { 'sources': source_files,
# 'cpp_args': cpp_args,
# 'lib_args': library_args }
return extensions
# read_setup_file ()
|
saurbkumar/programming_question_python
|
refs/heads/master
|
my_question/binary-tree-all-path-to-given-sum.py
|
1
|
#First case
#http://www.geeksforgeeks.org/root-to-leaf-path-sum-equal-to-a-given-number/
'''Print all the paths from root, with a specified sum in Binary tree - For that Second If condition'''
'''Print all the paths from root, with a specified node in Binary tree - For that first If condition'''
class Node:
def __init__(self, key):
self.key = key
self.left = None
self.right = None
def findPath( root, path, k):
if root is None:
return False
path.append(root.key)
findPath(root.left, path, k)
findPath(root.right, path, k)
# If want to print all the path, from root to given node
if path[-1]==k:# path of -1 will give the las element
# if just appended element is the element that we are looking for,
# then path found
print (path)
# If want to print all the path, from root to given node for given sum
if sum(path)==k:
print (path)
path.pop()
return False
root = Node(1)
root.left = Node(2)
root.right = Node(3)
root.left.left = Node(4)
root.left.right = Node(5)
root.right.left = Node(6)
root.right.right = Node(3)
path = []
findPath(root, path, 7)
|
boogiekodi/plugin.program.ump
|
refs/heads/master
|
lib/third/imsize.py
|
2
|
#-------------------------------------------------------------------------------
# Name: get_image_size
# Purpose: extract image dimensions given a file path using just
# core modules
#
# Author: Paulo Scardine (based on code from Emmanuel VAISSE)
# Ported to stream data by Huseyin BIYIK
#
# Created: 26/09/2013
# Copyright: (c) Paulo Scardine 2013
# Licence: MIT
#-------------------------------------------------------------------------------
#!/usr/bin/env python
import struct
class UnknownImageFormat(Exception):
pass
class NotEnoughData(Exception):
pass
def read(offset,ln,data):
if offset>len(data):
raise NotEnoughData("Not Enough stream legth ( %d ) for header"%len(data))
return offset+ln,data[offset:offset+ln]
def get_image_size(data):
"""
Return (width, height) for a given img file content - no external
dependencies except the os and struct modules from core
"""
size=len(data)
height = -1
width = -1
type= -1
if (size >= 10) and data[:6] in ('GIF87a', 'GIF89a'):
# GIFs
w, h = struct.unpack("<HH", data[6:10])
width = int(w)
height = int(h)
type= "gif"
elif ((size >= 24) and data.startswith('\211PNG\r\n\032\n')
and (data[12:16] == 'IHDR')):
# PNGs
w, h = struct.unpack(">LL", data[16:24])
width = int(w)
height = int(h)
type= "png"
elif (size >= 16) and data.startswith('\211PNG\r\n\032\n'):
# older PNGs?
w, h = struct.unpack(">LL", data[8:16])
width = int(w)
height = int(h)
type = "png"
elif (size >= 2) and data.startswith('\377\330'):
# JPEG
type= "jpeg"
msg = " raised while trying to decode as JPEG."
offs=0
offs,c = read(offs,2,data)
offs,byte = read(offs,1,data)
try:
while byte != b"":
while byte != b'\xff':
offs,byte = read(offs,1,data)
while byte == b'\xff':
offs,byte = read(offs,1,data)
hasChunk = ord(byte) not in range( 0xD0, 0xDA) + [0x00]
if hasChunk:
offs,cs=read(offs,2,data)
ChunkSize = struct.unpack( ">H", cs)[0] - 2
Next_ChunkOffset = offs + ChunkSize
if (byte >= b'\xC0' and byte <= b'\xC3'):
# Found SOF1..3 data chunk - Read it and quit
offs,c = read(offs,1,data)
offs,h=read(offs,2,data)
h = struct.unpack( ">H", h)[0]
offs,w=read(offs,2,data)
w = struct.unpack( ">H", w)[0]
break
elif (byte == b'\xD9') or offs >=len(data):
# Found End of Image
EOI = offs
break
# else:
# Seek to next data chunk
# print "Pos: %.4x %x" % (offs, ChunkSize)
if hasChunk :
offs=Next_ChunkOffset
byte = read(offs,1,data)
width = int(w)
height = int(h)
except struct.error:
raise UnknownImageFormat("StructError" + msg)
except ValueError:
raise UnknownImageFormat("ValueError" + msg)
# except Exception as e:
# raise UnknownImageFormat(e.__class__.__name__ + msg)
else:
raise UnknownImageFormat(
"Sorry, don't know how to get information from this file."
)
return type,width, height
|
dmordom/nipype
|
refs/heads/master
|
nipype/interfaces/camino/tests/test_auto_SFLUTGen.py
|
5
|
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from nipype.testing import assert_equal
from nipype.interfaces.camino.calib import SFLUTGen
def test_SFLUTGen_inputs():
input_map = dict(args=dict(argstr='%s',
),
binincsize=dict(argstr='-binincsize %d',
units='NA',
),
directmap=dict(argstr='-directmap',
),
environ=dict(nohash=True,
usedefault=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
in_file=dict(argstr='-inputfile %s',
mandatory=True,
),
info_file=dict(argstr='-infofile %s',
mandatory=True,
),
minvectsperbin=dict(argstr='-minvectsperbin %d',
units='NA',
),
order=dict(argstr='-order %d',
units='NA',
),
out_file=dict(argstr='> %s',
genfile=True,
position=-1,
),
outputstem=dict(argstr='-outputstem %s',
usedefault=True,
),
pdf=dict(argstr='-pdf %s',
usedefault=True,
),
terminal_output=dict(mandatory=True,
nohash=True,
),
)
inputs = SFLUTGen.input_spec()
for key, metadata in input_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_SFLUTGen_outputs():
output_map = dict(lut_one_fibre=dict(),
lut_two_fibres=dict(),
)
outputs = SFLUTGen.output_spec()
for key, metadata in output_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(outputs.traits()[key], metakey), value
|
nharraud/invenio-oaiharvester
|
refs/heads/master
|
invenio_oaiharvester/views.py
|
3
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""OAIHarverster Blueprint."""
from __future__ import absolute_import, print_function, unicode_literals
from flask import Blueprint
blueprint = Blueprint('oaiharvester', __name__,
template_folder='templates', static_folder='static')
|
gameduell/duell
|
refs/heads/master
|
bin/win/python2.7.9/Lib/lib2to3/fixes/fix_isinstance.py
|
326
|
# Copyright 2008 Armin Ronacher.
# Licensed to PSF under a Contributor Agreement.
"""Fixer that cleans up a tuple argument to isinstance after the tokens
in it were fixed. This is mainly used to remove double occurrences of
tokens as a leftover of the long -> int / unicode -> str conversion.
eg. isinstance(x, (int, long)) -> isinstance(x, (int, int))
-> isinstance(x, int)
"""
from .. import fixer_base
from ..fixer_util import token
class FixIsinstance(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
power<
'isinstance'
trailer< '(' arglist< any ',' atom< '('
args=testlist_gexp< any+ >
')' > > ')' >
>
"""
run_order = 6
def transform(self, node, results):
names_inserted = set()
testlist = results["args"]
args = testlist.children
new_args = []
iterator = enumerate(args)
for idx, arg in iterator:
if arg.type == token.NAME and arg.value in names_inserted:
if idx < len(args) - 1 and args[idx + 1].type == token.COMMA:
iterator.next()
continue
else:
new_args.append(arg)
if arg.type == token.NAME:
names_inserted.add(arg.value)
if new_args and new_args[-1].type == token.COMMA:
del new_args[-1]
if len(new_args) == 1:
atom = testlist.parent
new_args[0].prefix = atom.prefix
atom.replace(new_args[0])
else:
args[:] = new_args
node.changed()
|
shaon/eutester
|
refs/heads/master
|
testcases/cloud_admin/load/locust/locustfile.py
|
6
|
import time
from eucaops import Eucaops
from locust import Locust, events, web
import user_profiles
@web.app.route("/added_page")
def my_added_page():
return "Another page"
class EucaopsClient(Eucaops):
def __init__(self, *args, **kwargs):
"""
This class extends Eucaops in order to provide a feedback
loop to LocustIO. It generates a Eucaops client and fires events
to the LocustIO when the time_operation wrapper is called with a method
as its arguments.
:param args: positional args passed to Eucaops constructor
:param kwargs: keyword args passed to Eucaops constructor
"""
super(EucaopsClient, self).__init__(*args, **kwargs)
self.output_file = open("test-output", "a")
self.output_file.write('='*10 + " Starting Test " + '='*10 + "\n")
def time_operation(self, method, *args, **kwargs):
start_time = time.time()
output_format = "{0:20} {1:20} {2:20}\n"
method_name = method.__name__
try:
result = method(*args, **kwargs)
except Exception as e:
total_time = int((time.time() - start_time) * 1000)
events.request_failure.fire(request_type="eutester",
name=method_name,
response_time=total_time, exception=e)
self.output_file.write(output_format.format(method_name, total_time,
"f"))
else:
total_time = int((time.time() - start_time) * 1000)
try:
length = len(result)
except:
length = 0
events.request_success.fire(request_type="eutester",
name=method_name,
response_time=total_time,
response_length=length)
self.output_file.write(output_format.format(method_name, total_time,
"p"))
return result
class EucaopsLocust(Locust):
def __init__(self):
super(EucaopsLocust, self).__init__()
self.client = EucaopsClient(credpath="creds")
class EucaopsUser(EucaopsLocust):
min_wait = 1
max_wait = 1
task_set = user_profiles.EC2Read
def on_start(self):
pass
def on_stop(self):
self.client.cleanup_resources()
|
jimbobhickville/libcloud
|
refs/heads/trunk
|
libcloud/test/common/test_openstack.py
|
42
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import unittest
from mock import Mock
from libcloud.common.openstack import OpenStackBaseConnection
from libcloud.utils.py3 import PY25
class OpenStackBaseConnectionTest(unittest.TestCase):
def setUp(self):
self.timeout = 10
OpenStackBaseConnection.conn_classes = (None, Mock())
self.connection = OpenStackBaseConnection('foo', 'bar',
timeout=self.timeout,
ex_force_auth_url='https://127.0.0.1')
self.connection.driver = Mock()
self.connection.driver.name = 'OpenStackDriver'
def test_base_connection_timeout(self):
self.connection.connect()
self.assertEqual(self.connection.timeout, self.timeout)
if PY25:
self.connection.conn_classes[1].assert_called_with(host='127.0.0.1',
port=443)
else:
self.connection.conn_classes[1].assert_called_with(host='127.0.0.1',
port=443,
timeout=10)
if __name__ == '__main__':
sys.exit(unittest.main())
|
simon-pepin/scikit-learn
|
refs/heads/master
|
sklearn/linear_model/tests/test_base.py
|
120
|
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
#
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.linear_model.base import LinearRegression
from sklearn.linear_model.base import center_data, sparse_center_data
from sklearn.utils import check_random_state
from sklearn.datasets.samples_generator import make_sparse_uncorrelated
from sklearn.datasets.samples_generator import make_regression
def test_linear_regression():
# Test LinearRegression on a simple dataset.
# a simple dataset
X = [[1], [2]]
Y = [1, 2]
clf = LinearRegression()
clf.fit(X, Y)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.predict(X), [1, 2])
# test it also for degenerate input
X = [[1]]
Y = [0]
clf = LinearRegression()
clf.fit(X, Y)
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.predict(X), [0])
def test_fit_intercept():
# Test assertions on betas shape.
X2 = np.array([[0.38349978, 0.61650022],
[0.58853682, 0.41146318]])
X3 = np.array([[0.27677969, 0.70693172, 0.01628859],
[0.08385139, 0.20692515, 0.70922346]])
y = np.array([1, 1])
lr2_without_intercept = LinearRegression(fit_intercept=False).fit(X2, y)
lr2_with_intercept = LinearRegression(fit_intercept=True).fit(X2, y)
lr3_without_intercept = LinearRegression(fit_intercept=False).fit(X3, y)
lr3_with_intercept = LinearRegression(fit_intercept=True).fit(X3, y)
assert_equal(lr2_with_intercept.coef_.shape,
lr2_without_intercept.coef_.shape)
assert_equal(lr3_with_intercept.coef_.shape,
lr3_without_intercept.coef_.shape)
assert_equal(lr2_without_intercept.coef_.ndim,
lr3_without_intercept.coef_.ndim)
def test_linear_regression_sparse(random_state=0):
"Test that linear regression also works with sparse data"
random_state = check_random_state(random_state)
for i in range(10):
n = 100
X = sparse.eye(n, n)
beta = random_state.rand(n)
y = X * beta[:, np.newaxis]
ols = LinearRegression()
ols.fit(X, y.ravel())
assert_array_almost_equal(beta, ols.coef_ + ols.intercept_)
assert_array_almost_equal(ols.residues_, 0)
def test_linear_regression_multiple_outcome(random_state=0):
"Test multiple-outcome linear regressions"
X, y = make_regression(random_state=random_state)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
clf = LinearRegression(fit_intercept=True)
clf.fit((X), Y)
assert_equal(clf.coef_.shape, (2, n_features))
Y_pred = clf.predict(X)
clf.fit(X, y)
y_pred = clf.predict(X)
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
def test_linear_regression_sparse_multiple_outcome(random_state=0):
"Test multiple-outcome linear regressions with sparse data"
random_state = check_random_state(random_state)
X, y = make_sparse_uncorrelated(random_state=random_state)
X = sparse.coo_matrix(X)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
ols = LinearRegression()
ols.fit(X, Y)
assert_equal(ols.coef_.shape, (2, n_features))
Y_pred = ols.predict(X)
ols.fit(X, y.ravel())
y_pred = ols.predict(X)
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
def test_center_data():
n_samples = 200
n_features = 2
rng = check_random_state(0)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
expected_X_mean = np.mean(X, axis=0)
# XXX: currently scaled to variance=n_samples
expected_X_std = np.std(X, axis=0) * np.sqrt(X.shape[0])
expected_y_mean = np.mean(y, axis=0)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=False,
normalize=False)
assert_array_almost_equal(X_mean, np.zeros(n_features))
assert_array_almost_equal(y_mean, 0)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt, X)
assert_array_almost_equal(yt, y)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=False)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt, X - expected_X_mean)
assert_array_almost_equal(yt, y - expected_y_mean)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=True)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, expected_X_std)
assert_array_almost_equal(Xt, (X - expected_X_mean) / expected_X_std)
assert_array_almost_equal(yt, y - expected_y_mean)
def test_center_data_multioutput():
n_samples = 200
n_features = 3
n_outputs = 2
rng = check_random_state(0)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_outputs)
expected_y_mean = np.mean(y, axis=0)
args = [(center_data, X), (sparse_center_data, sparse.csc_matrix(X))]
for center, X in args:
_, yt, _, y_mean, _ = center(X, y, fit_intercept=False,
normalize=False)
assert_array_almost_equal(y_mean, np.zeros(n_outputs))
assert_array_almost_equal(yt, y)
_, yt, _, y_mean, _ = center(X, y, fit_intercept=True,
normalize=False)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(yt, y - y_mean)
_, yt, _, y_mean, _ = center(X, y, fit_intercept=True,
normalize=True)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(yt, y - y_mean)
def test_center_data_weighted():
n_samples = 200
n_features = 2
rng = check_random_state(0)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
sample_weight = rng.rand(n_samples)
expected_X_mean = np.average(X, axis=0, weights=sample_weight)
expected_y_mean = np.average(y, axis=0, weights=sample_weight)
# XXX: if normalize=True, should we expect a weighted standard deviation?
# Currently not weighted, but calculated with respect to weighted mean
# XXX: currently scaled to variance=n_samples
expected_X_std = (np.sqrt(X.shape[0]) *
np.mean((X - expected_X_mean) ** 2, axis=0) ** .5)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=False,
sample_weight=sample_weight)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt, X - expected_X_mean)
assert_array_almost_equal(yt, y - expected_y_mean)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=True,
sample_weight=sample_weight)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, expected_X_std)
assert_array_almost_equal(Xt, (X - expected_X_mean) / expected_X_std)
assert_array_almost_equal(yt, y - expected_y_mean)
def test_sparse_center_data():
n_samples = 200
n_features = 2
rng = check_random_state(0)
# random_state not supported yet in sparse.rand
X = sparse.rand(n_samples, n_features, density=.5) # , random_state=rng
X = X.tolil()
y = rng.rand(n_samples)
XA = X.toarray()
# XXX: currently scaled to variance=n_samples
expected_X_std = np.std(XA, axis=0) * np.sqrt(X.shape[0])
Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y,
fit_intercept=False,
normalize=False)
assert_array_almost_equal(X_mean, np.zeros(n_features))
assert_array_almost_equal(y_mean, 0)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt.A, XA)
assert_array_almost_equal(yt, y)
Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y,
fit_intercept=True,
normalize=False)
assert_array_almost_equal(X_mean, np.mean(XA, axis=0))
assert_array_almost_equal(y_mean, np.mean(y, axis=0))
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt.A, XA)
assert_array_almost_equal(yt, y - np.mean(y, axis=0))
Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y,
fit_intercept=True,
normalize=True)
assert_array_almost_equal(X_mean, np.mean(XA, axis=0))
assert_array_almost_equal(y_mean, np.mean(y, axis=0))
assert_array_almost_equal(X_std, expected_X_std)
assert_array_almost_equal(Xt.A, XA / expected_X_std)
assert_array_almost_equal(yt, y - np.mean(y, axis=0))
def test_csr_sparse_center_data():
# Test output format of sparse_center_data, when input is csr
X, y = make_regression()
X[X < 2.5] = 0.0
csr = sparse.csr_matrix(X)
csr_, y, _, _, _ = sparse_center_data(csr, y, True)
assert_equal(csr_.getformat(), 'csr')
|
soldag/home-assistant
|
refs/heads/dev
|
homeassistant/components/yessssms/const.py
|
18
|
"""Const for YesssSMS."""
CONF_PROVIDER = "provider"
|
hyperized/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/openstack/_os_server_actions.py
|
61
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['removed'],
'supported_by': 'community'}
from ansible.module_utils.common.removed import removed_module
if __name__ == '__main__':
removed_module(removed_in='2.8')
|
terkkila/scikit-learn
|
refs/heads/master
|
benchmarks/bench_20newsgroups.py
|
377
|
from __future__ import print_function, division
from time import time
import argparse
import numpy as np
from sklearn.dummy import DummyClassifier
from sklearn.datasets import fetch_20newsgroups_vectorized
from sklearn.metrics import accuracy_score
from sklearn.utils.validation import check_array
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import MultinomialNB
ESTIMATORS = {
"dummy": DummyClassifier(),
"random_forest": RandomForestClassifier(n_estimators=100,
max_features="sqrt",
min_samples_split=10),
"extra_trees": ExtraTreesClassifier(n_estimators=100,
max_features="sqrt",
min_samples_split=10),
"logistic_regression": LogisticRegression(),
"naive_bayes": MultinomialNB(),
"adaboost": AdaBoostClassifier(n_estimators=10),
}
###############################################################################
# Data
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-e', '--estimators', nargs="+", required=True,
choices=ESTIMATORS)
args = vars(parser.parse_args())
data_train = fetch_20newsgroups_vectorized(subset="train")
data_test = fetch_20newsgroups_vectorized(subset="test")
X_train = check_array(data_train.data, dtype=np.float32,
accept_sparse="csc")
X_test = check_array(data_test.data, dtype=np.float32, accept_sparse="csr")
y_train = data_train.target
y_test = data_test.target
print("20 newsgroups")
print("=============")
print("X_train.shape = {0}".format(X_train.shape))
print("X_train.format = {0}".format(X_train.format))
print("X_train.dtype = {0}".format(X_train.dtype))
print("X_train density = {0}"
"".format(X_train.nnz / np.product(X_train.shape)))
print("y_train {0}".format(y_train.shape))
print("X_test {0}".format(X_test.shape))
print("X_test.format = {0}".format(X_test.format))
print("X_test.dtype = {0}".format(X_test.dtype))
print("y_test {0}".format(y_test.shape))
print()
print("Classifier Training")
print("===================")
accuracy, train_time, test_time = {}, {}, {}
for name in sorted(args["estimators"]):
clf = ESTIMATORS[name]
try:
clf.set_params(random_state=0)
except (TypeError, ValueError):
pass
print("Training %s ... " % name, end="")
t0 = time()
clf.fit(X_train, y_train)
train_time[name] = time() - t0
t0 = time()
y_pred = clf.predict(X_test)
test_time[name] = time() - t0
accuracy[name] = accuracy_score(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print()
print("%s %s %s %s" % ("Classifier ", "train-time", "test-time",
"Accuracy"))
print("-" * 44)
for name in sorted(accuracy, key=accuracy.get):
print("%s %s %s %s" % (name.ljust(16),
("%.4fs" % train_time[name]).center(10),
("%.4fs" % test_time[name]).center(10),
("%.4f" % accuracy[name]).center(10)))
print()
|
mark-ignacio/phantomjs
|
refs/heads/master
|
src/qt/qtwebkit/Tools/Scripts/webkitpy/test/runner_unittest.py
|
124
|
# Copyright (C) 2012 Google, Inc.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
import re
import StringIO
import unittest2 as unittest
from webkitpy.tool.mocktool import MockOptions
from webkitpy.test.printer import Printer
from webkitpy.test.runner import Runner
class FakeModuleSuite(object):
def __init__(self, name, result, msg):
self.name = name
self.result = result
self.msg = msg
def __str__(self):
return self.name
def run(self, result):
result.testsRun += 1
if self.result == 'F':
result.failures.append((self.name, self.msg))
elif self.result == 'E':
result.errors.append((self.name, self.msg))
class FakeTopSuite(object):
def __init__(self, tests):
self._tests = tests
class FakeLoader(object):
def __init__(self, *test_triples):
self.triples = test_triples
self._tests = []
self._results = {}
for test_name, result, msg in self.triples:
self._tests.append(test_name)
m = re.match("(\w+) \(([\w.]+)\)", test_name)
self._results['%s.%s' % (m.group(2), m.group(1))] = tuple([test_name, result, msg])
def top_suite(self):
return FakeTopSuite(self._tests)
def loadTestsFromName(self, name, _):
return FakeModuleSuite(*self._results[name])
class RunnerTest(unittest.TestCase):
def setUp(self):
# Here we have to jump through a hoop to make sure test-webkitpy doesn't log
# any messages from these tests :(.
self.root_logger = logging.getLogger()
self.log_levels = []
self.log_handlers = self.root_logger.handlers[:]
for handler in self.log_handlers:
self.log_levels.append(handler.level)
handler.level = logging.CRITICAL
def tearDown(self):
for handler in self.log_handlers:
handler.level = self.log_levels.pop(0)
def test_run(self, verbose=0, timing=False, child_processes=1, quiet=False):
options = MockOptions(verbose=verbose, timing=timing, child_processes=child_processes, quiet=quiet, pass_through=False)
stream = StringIO.StringIO()
loader = FakeLoader(('test1 (Foo)', '.', ''),
('test2 (Foo)', 'F', 'test2\nfailed'),
('test3 (Foo)', 'E', 'test3\nerred'))
runner = Runner(Printer(stream, options), loader)
runner.run(['Foo.test1', 'Foo.test2', 'Foo.test3'], 1)
self.assertEqual(runner.tests_run, 3)
self.assertEqual(len(runner.failures), 1)
self.assertEqual(len(runner.errors), 1)
|
awkspace/ansible
|
refs/heads/devel
|
test/integration/targets/ansible-runner/files/adhoc_example1.py
|
87
|
import json
import os
import sys
import ansible_runner
# the first positional arg should be where the artifacts live
output_dir = sys.argv[1]
# this calls a single module directly, aka "adhoc" mode
r = ansible_runner.run(
private_data_dir=output_dir,
host_pattern='localhost',
module='shell',
module_args='whoami'
)
data = {
'rc': r.rc,
'status': r.status,
'events': [x['event'] for x in r.events],
'stats': r.stats
}
# insert this header for the flask controller
print('#STARTJSON')
json.dump(data, sys.stdout)
|
drexly/tonginBlobStore
|
refs/heads/master
|
lib/django/shortcuts.py
|
135
|
"""
This module collects helper functions and classes that "span" multiple levels
of MVC. In other words, these functions/classes introduce controlled coupling
for convenience's sake.
"""
import warnings
from django.core import urlresolvers
from django.db.models.base import ModelBase
from django.db.models.manager import Manager
from django.db.models.query import QuerySet
from django.http import (
Http404, HttpResponse, HttpResponsePermanentRedirect, HttpResponseRedirect,
)
from django.template import RequestContext, loader
from django.template.context import _current_app_undefined
from django.template.engine import (
_context_instance_undefined, _dictionary_undefined, _dirs_undefined,
)
from django.utils import six
from django.utils.deprecation import RemovedInDjango110Warning
from django.utils.encoding import force_text
from django.utils.functional import Promise
def render_to_response(template_name, context=None,
context_instance=_context_instance_undefined,
content_type=None, status=None, dirs=_dirs_undefined,
dictionary=_dictionary_undefined, using=None):
"""
Returns a HttpResponse whose content is filled with the result of calling
django.template.loader.render_to_string() with the passed arguments.
"""
if (context_instance is _context_instance_undefined
and dirs is _dirs_undefined
and dictionary is _dictionary_undefined):
# No deprecated arguments were passed - use the new code path
content = loader.render_to_string(template_name, context, using=using)
else:
# Some deprecated arguments were passed - use the legacy code path
content = loader.render_to_string(
template_name, context, context_instance, dirs, dictionary,
using=using)
return HttpResponse(content, content_type, status)
def render(request, template_name, context=None,
context_instance=_context_instance_undefined,
content_type=None, status=None, current_app=_current_app_undefined,
dirs=_dirs_undefined, dictionary=_dictionary_undefined,
using=None):
"""
Returns a HttpResponse whose content is filled with the result of calling
django.template.loader.render_to_string() with the passed arguments.
Uses a RequestContext by default.
"""
if (context_instance is _context_instance_undefined
and current_app is _current_app_undefined
and dirs is _dirs_undefined
and dictionary is _dictionary_undefined):
# No deprecated arguments were passed - use the new code path
# In Django 1.10, request should become a positional argument.
content = loader.render_to_string(
template_name, context, request=request, using=using)
else:
# Some deprecated arguments were passed - use the legacy code path
if context_instance is not _context_instance_undefined:
if current_app is not _current_app_undefined:
raise ValueError('If you provide a context_instance you must '
'set its current_app before calling render()')
else:
context_instance = RequestContext(request)
if current_app is not _current_app_undefined:
warnings.warn(
"The current_app argument of render is deprecated. "
"Set the current_app attribute of request instead.",
RemovedInDjango110Warning, stacklevel=2)
request.current_app = current_app
# Directly set the private attribute to avoid triggering the
# warning in RequestContext.__init__.
context_instance._current_app = current_app
content = loader.render_to_string(
template_name, context, context_instance, dirs, dictionary,
using=using)
return HttpResponse(content, content_type, status)
def redirect(to, *args, **kwargs):
"""
Returns an HttpResponseRedirect to the appropriate URL for the arguments
passed.
The arguments could be:
* A model: the model's `get_absolute_url()` function will be called.
* A view name, possibly with arguments: `urlresolvers.reverse()` will
be used to reverse-resolve the name.
* A URL, which will be used as-is for the redirect location.
By default issues a temporary redirect; pass permanent=True to issue a
permanent redirect
"""
if kwargs.pop('permanent', False):
redirect_class = HttpResponsePermanentRedirect
else:
redirect_class = HttpResponseRedirect
return redirect_class(resolve_url(to, *args, **kwargs))
def _get_queryset(klass):
"""
Returns a QuerySet from a Model, Manager, or QuerySet. Created to make
get_object_or_404 and get_list_or_404 more DRY.
Raises a ValueError if klass is not a Model, Manager, or QuerySet.
"""
if isinstance(klass, QuerySet):
return klass
elif isinstance(klass, Manager):
manager = klass
elif isinstance(klass, ModelBase):
manager = klass._default_manager
else:
if isinstance(klass, type):
klass__name = klass.__name__
else:
klass__name = klass.__class__.__name__
raise ValueError("Object is of type '%s', but must be a Django Model, "
"Manager, or QuerySet" % klass__name)
return manager.all()
def get_object_or_404(klass, *args, **kwargs):
"""
Uses get() to return an object, or raises a Http404 exception if the object
does not exist.
klass may be a Model, Manager, or QuerySet object. All other passed
arguments and keyword arguments are used in the get() query.
Note: Like with get(), an MultipleObjectsReturned will be raised if more than one
object is found.
"""
queryset = _get_queryset(klass)
try:
return queryset.get(*args, **kwargs)
except queryset.model.DoesNotExist:
raise Http404('No %s matches the given query.' % queryset.model._meta.object_name)
def get_list_or_404(klass, *args, **kwargs):
"""
Uses filter() to return a list of objects, or raise a Http404 exception if
the list is empty.
klass may be a Model, Manager, or QuerySet object. All other passed
arguments and keyword arguments are used in the filter() query.
"""
queryset = _get_queryset(klass)
obj_list = list(queryset.filter(*args, **kwargs))
if not obj_list:
raise Http404('No %s matches the given query.' % queryset.model._meta.object_name)
return obj_list
def resolve_url(to, *args, **kwargs):
"""
Return a URL appropriate for the arguments passed.
The arguments could be:
* A model: the model's `get_absolute_url()` function will be called.
* A view name, possibly with arguments: `urlresolvers.reverse()` will
be used to reverse-resolve the name.
* A URL, which will be returned as-is.
"""
# If it's a model, use get_absolute_url()
if hasattr(to, 'get_absolute_url'):
return to.get_absolute_url()
if isinstance(to, Promise):
# Expand the lazy instance, as it can cause issues when it is passed
# further to some Python functions like urlparse.
to = force_text(to)
if isinstance(to, six.string_types):
# Handle relative URLs
if to.startswith(('./', '../')):
return to
# Next try a reverse URL resolution.
try:
return urlresolvers.reverse(to, args=args, kwargs=kwargs)
except urlresolvers.NoReverseMatch:
# If this is a callable, re-raise.
if callable(to):
raise
# If this doesn't "feel" like a URL, re-raise.
if '/' not in to and '.' not in to:
raise
# Finally, fall back and assume it's a URL
return to
|
EliasTouil/simpleBlog
|
refs/heads/master
|
simpleBlog/Lib/encodings/iso2022_jp_3.py
|
816
|
#
# iso2022_jp_3.py: Python Unicode Codec for ISO2022_JP_3
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
#
import _codecs_iso2022, codecs
import _multibytecodec as mbc
codec = _codecs_iso2022.getcodec('iso2022_jp_3')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='iso2022_jp_3',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
|
joshmgrant/selenium
|
refs/heads/master
|
py/test/selenium/webdriver/common/repr_tests.py
|
31
|
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from selenium.webdriver.support.wait import WebDriverWait
def testShouldImplementReprForWebDriver(driver):
driver_repr = repr(driver)
assert type(driver).__name__ in driver_repr
assert driver.session_id in driver_repr
def testShouldImplementReprForWebElement(driver, pages):
pages.load('simpleTest.html')
elem = driver.find_element_by_id("validImgTag")
elem_repr = repr(elem)
assert type(elem).__name__ in elem_repr
assert driver.session_id in elem_repr
assert elem._id in elem_repr
def testShouldImplementReprForWait(driver):
wait = WebDriverWait(driver, 30)
wait_repr = repr(wait)
assert type(wait).__name__ in wait_repr
assert driver.session_id in wait_repr
|
kharts/kastodi
|
refs/heads/master
|
resources/lib/google/protobuf/unittest_import_pb2.py
|
8
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/protobuf/unittest_import.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import unittest_import_public_pb2 as google_dot_protobuf_dot_unittest__import__public__pb2
from google.protobuf.unittest_import_public_pb2 import *
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/protobuf/unittest_import.proto',
package='protobuf_unittest_import',
syntax='proto2',
serialized_pb=_b('\n%google/protobuf/unittest_import.proto\x12\x18protobuf_unittest_import\x1a,google/protobuf/unittest_import_public.proto\"\x1a\n\rImportMessage\x12\t\n\x01\x64\x18\x01 \x01(\x05*<\n\nImportEnum\x12\x0e\n\nIMPORT_FOO\x10\x07\x12\x0e\n\nIMPORT_BAR\x10\x08\x12\x0e\n\nIMPORT_BAZ\x10\t*1\n\x10ImportEnumForMap\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x07\n\x03\x46OO\x10\x01\x12\x07\n\x03\x42\x41R\x10\x02\x42\x1f\n\x18\x63om.google.protobuf.testH\x01\xf8\x01\x01P\x00')
,
dependencies=[google_dot_protobuf_dot_unittest__import__public__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_IMPORTENUM = _descriptor.EnumDescriptor(
name='ImportEnum',
full_name='protobuf_unittest_import.ImportEnum',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='IMPORT_FOO', index=0, number=7,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='IMPORT_BAR', index=1, number=8,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='IMPORT_BAZ', index=2, number=9,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=141,
serialized_end=201,
)
_sym_db.RegisterEnumDescriptor(_IMPORTENUM)
ImportEnum = enum_type_wrapper.EnumTypeWrapper(_IMPORTENUM)
_IMPORTENUMFORMAP = _descriptor.EnumDescriptor(
name='ImportEnumForMap',
full_name='protobuf_unittest_import.ImportEnumForMap',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNKNOWN', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FOO', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='BAR', index=2, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=203,
serialized_end=252,
)
_sym_db.RegisterEnumDescriptor(_IMPORTENUMFORMAP)
ImportEnumForMap = enum_type_wrapper.EnumTypeWrapper(_IMPORTENUMFORMAP)
IMPORT_FOO = 7
IMPORT_BAR = 8
IMPORT_BAZ = 9
UNKNOWN = 0
FOO = 1
BAR = 2
_IMPORTMESSAGE = _descriptor.Descriptor(
name='ImportMessage',
full_name='protobuf_unittest_import.ImportMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='d', full_name='protobuf_unittest_import.ImportMessage.d', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=113,
serialized_end=139,
)
DESCRIPTOR.message_types_by_name['ImportMessage'] = _IMPORTMESSAGE
DESCRIPTOR.enum_types_by_name['ImportEnum'] = _IMPORTENUM
DESCRIPTOR.enum_types_by_name['ImportEnumForMap'] = _IMPORTENUMFORMAP
ImportMessage = _reflection.GeneratedProtocolMessageType('ImportMessage', (_message.Message,), dict(
DESCRIPTOR = _IMPORTMESSAGE,
__module__ = 'google.protobuf.unittest_import_pb2'
# @@protoc_insertion_point(class_scope:protobuf_unittest_import.ImportMessage)
))
_sym_db.RegisterMessage(ImportMessage)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\030com.google.protobuf.testH\001\370\001\001'))
# @@protoc_insertion_point(module_scope)
|
goldeneye-source/ges-python
|
refs/heads/master
|
lib/multiprocessing/popen_fork.py
|
83
|
import os
import sys
import signal
import errno
from . import util
__all__ = ['Popen']
#
# Start child process using fork
#
class Popen(object):
method = 'fork'
def __init__(self, process_obj):
sys.stdout.flush()
sys.stderr.flush()
self.returncode = None
self._launch(process_obj)
def duplicate_for_child(self, fd):
return fd
def poll(self, flag=os.WNOHANG):
if self.returncode is None:
while True:
try:
pid, sts = os.waitpid(self.pid, flag)
except OSError as e:
if e.errno == errno.EINTR:
continue
# Child process not yet created. See #1731717
# e.errno == errno.ECHILD == 10
return None
else:
break
if pid == self.pid:
if os.WIFSIGNALED(sts):
self.returncode = -os.WTERMSIG(sts)
else:
assert os.WIFEXITED(sts)
self.returncode = os.WEXITSTATUS(sts)
return self.returncode
def wait(self, timeout=None):
if self.returncode is None:
if timeout is not None:
from multiprocessing.connection import wait
if not wait([self.sentinel], timeout):
return None
# This shouldn't block if wait() returned successfully.
return self.poll(os.WNOHANG if timeout == 0.0 else 0)
return self.returncode
def terminate(self):
if self.returncode is None:
try:
os.kill(self.pid, signal.SIGTERM)
except ProcessLookupError:
pass
except OSError:
if self.wait(timeout=0.1) is None:
raise
def _launch(self, process_obj):
code = 1
parent_r, child_w = os.pipe()
self.pid = os.fork()
if self.pid == 0:
try:
os.close(parent_r)
if 'random' in sys.modules:
import random
random.seed()
code = process_obj._bootstrap()
finally:
os._exit(code)
else:
os.close(child_w)
util.Finalize(self, os.close, (parent_r,))
self.sentinel = parent_r
|
kivy/plyer
|
refs/heads/master
|
examples/text2speech/main.py
|
1
|
import kivy
from kivy.app import App
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.popup import Popup
from plyer import tts
kivy.require('1.8.0')
class Text2SpeechDemo(BoxLayout):
def do_read(self):
try:
tts.speak(self.ids.notification_text.text)
except NotImplementedError:
popup = ErrorPopup()
popup.open()
class Text2SpeechDemoApp(App):
def build(self):
return Text2SpeechDemo()
def on_pause(self):
return True
class ErrorPopup(Popup):
pass
if __name__ == '__main__':
Text2SpeechDemoApp().run()
|
InstigatorX/InstigatorX-V2-Kernel
|
refs/heads/master
|
scripts/build-all.py
|
1250
|
#! /usr/bin/env python
# Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Code Aurora nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Build the kernel for all targets using the Android build environment.
#
# TODO: Accept arguments to indicate what to build.
import glob
from optparse import OptionParser
import subprocess
import os
import os.path
import shutil
import sys
version = 'build-all.py, version 0.01'
build_dir = '../all-kernels'
make_command = ["vmlinux", "modules"]
make_env = os.environ
make_env.update({
'ARCH': 'arm',
'CROSS_COMPILE': 'arm-none-linux-gnueabi-',
'KCONFIG_NOTIMESTAMP': 'true' })
all_options = {}
def error(msg):
sys.stderr.write("error: %s\n" % msg)
def fail(msg):
"""Fail with a user-printed message"""
error(msg)
sys.exit(1)
def check_kernel():
"""Ensure that PWD is a kernel directory"""
if (not os.path.isfile('MAINTAINERS') or
not os.path.isfile('arch/arm/mach-msm/Kconfig')):
fail("This doesn't seem to be an MSM kernel dir")
def check_build():
"""Ensure that the build directory is present."""
if not os.path.isdir(build_dir):
try:
os.makedirs(build_dir)
except OSError as exc:
if exc.errno == errno.EEXIST:
pass
else:
raise
def update_config(file, str):
print 'Updating %s with \'%s\'\n' % (file, str)
defconfig = open(file, 'a')
defconfig.write(str + '\n')
defconfig.close()
def scan_configs():
"""Get the full list of defconfigs appropriate for this tree."""
names = {}
for n in glob.glob('arch/arm/configs/[fm]sm[0-9-]*_defconfig'):
names[os.path.basename(n)[:-10]] = n
for n in glob.glob('arch/arm/configs/qsd*_defconfig'):
names[os.path.basename(n)[:-10]] = n
for n in glob.glob('arch/arm/configs/apq*_defconfig'):
names[os.path.basename(n)[:-10]] = n
return names
class Builder:
def __init__(self, logname):
self.logname = logname
self.fd = open(logname, 'w')
def run(self, args):
devnull = open('/dev/null', 'r')
proc = subprocess.Popen(args, stdin=devnull,
env=make_env,
bufsize=0,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
count = 0
# for line in proc.stdout:
rawfd = proc.stdout.fileno()
while True:
line = os.read(rawfd, 1024)
if not line:
break
self.fd.write(line)
self.fd.flush()
if all_options.verbose:
sys.stdout.write(line)
sys.stdout.flush()
else:
for i in range(line.count('\n')):
count += 1
if count == 64:
count = 0
print
sys.stdout.write('.')
sys.stdout.flush()
print
result = proc.wait()
self.fd.close()
return result
failed_targets = []
def build(target):
dest_dir = os.path.join(build_dir, target)
log_name = '%s/log-%s.log' % (build_dir, target)
print 'Building %s in %s log %s' % (target, dest_dir, log_name)
if not os.path.isdir(dest_dir):
os.mkdir(dest_dir)
defconfig = 'arch/arm/configs/%s_defconfig' % target
dotconfig = '%s/.config' % dest_dir
savedefconfig = '%s/defconfig' % dest_dir
shutil.copyfile(defconfig, dotconfig)
devnull = open('/dev/null', 'r')
subprocess.check_call(['make', 'O=%s' % dest_dir,
'%s_defconfig' % target], env=make_env, stdin=devnull)
devnull.close()
if not all_options.updateconfigs:
build = Builder(log_name)
result = build.run(['make', 'O=%s' % dest_dir] + make_command)
if result != 0:
if all_options.keep_going:
failed_targets.append(target)
fail_or_error = error
else:
fail_or_error = fail
fail_or_error("Failed to build %s, see %s" % (target, build.logname))
# Copy the defconfig back.
if all_options.configs or all_options.updateconfigs:
devnull = open('/dev/null', 'r')
subprocess.check_call(['make', 'O=%s' % dest_dir,
'savedefconfig'], env=make_env, stdin=devnull)
devnull.close()
shutil.copyfile(savedefconfig, defconfig)
def build_many(allconf, targets):
print "Building %d target(s)" % len(targets)
for target in targets:
if all_options.updateconfigs:
update_config(allconf[target], all_options.updateconfigs)
build(target)
if failed_targets:
fail('\n '.join(["Failed targets:"] +
[target for target in failed_targets]))
def main():
global make_command
check_kernel()
check_build()
configs = scan_configs()
usage = ("""
%prog [options] all -- Build all targets
%prog [options] target target ... -- List specific targets
%prog [options] perf -- Build all perf targets
%prog [options] noperf -- Build all non-perf targets""")
parser = OptionParser(usage=usage, version=version)
parser.add_option('--configs', action='store_true',
dest='configs',
help="Copy configs back into tree")
parser.add_option('--list', action='store_true',
dest='list',
help='List available targets')
parser.add_option('-v', '--verbose', action='store_true',
dest='verbose',
help='Output to stdout in addition to log file')
parser.add_option('--oldconfig', action='store_true',
dest='oldconfig',
help='Only process "make oldconfig"')
parser.add_option('--updateconfigs',
dest='updateconfigs',
help="Update defconfigs with provided option setting, "
"e.g. --updateconfigs=\'CONFIG_USE_THING=y\'")
parser.add_option('-j', '--jobs', type='int', dest="jobs",
help="Number of simultaneous jobs")
parser.add_option('-l', '--load-average', type='int',
dest='load_average',
help="Don't start multiple jobs unless load is below LOAD_AVERAGE")
parser.add_option('-k', '--keep-going', action='store_true',
dest='keep_going', default=False,
help="Keep building other targets if a target fails")
parser.add_option('-m', '--make-target', action='append',
help='Build the indicated make target (default: %s)' %
' '.join(make_command))
(options, args) = parser.parse_args()
global all_options
all_options = options
if options.list:
print "Available targets:"
for target in configs.keys():
print " %s" % target
sys.exit(0)
if options.oldconfig:
make_command = ["oldconfig"]
elif options.make_target:
make_command = options.make_target
if options.jobs:
make_command.append("-j%d" % options.jobs)
if options.load_average:
make_command.append("-l%d" % options.load_average)
if args == ['all']:
build_many(configs, configs.keys())
elif args == ['perf']:
targets = []
for t in configs.keys():
if "perf" in t:
targets.append(t)
build_many(configs, targets)
elif args == ['noperf']:
targets = []
for t in configs.keys():
if "perf" not in t:
targets.append(t)
build_many(configs, targets)
elif len(args) > 0:
targets = []
for t in args:
if t not in configs.keys():
parser.error("Target '%s' not one of %s" % (t, configs.keys()))
targets.append(t)
build_many(configs, targets)
else:
parser.error("Must specify a target to build, or 'all'")
if __name__ == "__main__":
main()
|
fairdk/fair-ubuntu-centre
|
refs/heads/master
|
installscripts/postinstall/filesystem/opt/fair-apps/shutdown/deploy.py
|
1
|
import datetime
import os
import subprocess, shlex
import re
import sys
import time
REMOTE_SSH_EXEC = "ssh -o CheckHostIP=no -o StrictHostKeyChecking=no -o PasswordAuthentication=no root@%s \"%s\""
REMOTE_SCP = "scp -o CheckHostIP=no -o StrictHostKeyChecking=no %s root@%s:%s"
REMOTE_IPS = ["192.168.10.%d" % x for x in xrange(20,255)]
COMMANDS = ["""sed -i 's/^.*nfs.*$//g' /etc/fstab""",]
RESPONSE_ERROR = re.compile(r"(No\sroute\sto\shost|Connection\srefused|Permission\sdenied|Network\sis\sunreachable)")
# Some old example...
def deploy_sequential(cmds):
ssh_exec = subprocess.Popen(shlex.split("rm /root/.ssh/known_hosts"),
stdout=subprocess.PIPE)
success = 0
for ip in REMOTE_IPS:
for cmd in cmds:
ssh_exec = subprocess.Popen(shlex.split(REMOTE_SSH_EXEC % (ip, cmd)),
stdout=subprocess.PIPE)
response = ssh_exec.stdout.read()
if "No route to host" in response or "Connection refused" in response or "Permission denied" or "Network is unreachable" in response:
pass
else:
success = success + 1
return ssh_exec, success
def deploy(cmds):
ssh_exec = subprocess.Popen(shlex.split("rm /root/.ssh/known_hosts"),
stdout=subprocess.PIPE)
for ip in REMOTE_IPS:
for cmd in cmds:
ssh_exec = subprocess.Popen(shlex.split(REMOTE_SSH_EXEC % (ip, cmd)),
stdout=subprocess.PIPE)
def sendfile(src, dst):
ssh_exec = subprocess.Popen(shlex.split("rm ~/.ssh/known_hosts"),
stdout=subprocess.PIPE)
for ip in REMOTE_IPS:
ssh_exec = subprocess.Popen(shlex.split(REMOTE_SCP % (src, ip, dst)),
stdout=subprocess.PIPE)
|
arbrandes/edx-platform
|
refs/heads/master
|
lms/djangoapps/mobile_api/tests/test_middleware.py
|
5
|
"""
Tests for Version Based App Upgrade Middleware
"""
from datetime import datetime
from unittest import mock
import ddt
from django.core.cache import caches
from django.http import HttpRequest, HttpResponse
from pytz import UTC
from lms.djangoapps.mobile_api.middleware import AppVersionUpgrade
from lms.djangoapps.mobile_api.models import AppVersionConfig
from openedx.core.djangolib.testing.utils import CacheIsolationTestCase
@ddt.ddt
class TestAppVersionUpgradeMiddleware(CacheIsolationTestCase):
"""
Tests for version based app upgrade middleware
"""
ENABLED_CACHES = ['default']
def setUp(self):
super().setUp()
self.middleware = AppVersionUpgrade()
self.set_app_version_config()
def set_app_version_config(self):
""" Creates configuration data for platform versions """
AppVersionConfig(platform="iOS", version="1.1.1", expire_at=None, enabled=True).save()
AppVersionConfig(
platform="iOS",
version="2.2.2",
expire_at=datetime(2014, 1, 1, tzinfo=UTC),
enabled=True
).save()
AppVersionConfig(
platform="iOS",
version="4.4.4",
expire_at=datetime(9000, 1, 1, tzinfo=UTC),
enabled=True
).save()
AppVersionConfig(platform="iOS", version="6.6.6", expire_at=None, enabled=True).save()
AppVersionConfig(platform="Android", version="1.1.1", expire_at=None, enabled=True).save()
AppVersionConfig(
platform="Android",
version="2.2.2",
expire_at=datetime(2014, 1, 1, tzinfo=UTC),
enabled=True
).save()
AppVersionConfig(
platform="Android",
version="4.4.4",
expire_at=datetime(5000, 1, 1, tzinfo=UTC),
enabled=True
).save()
AppVersionConfig(platform="Android", version="8.8.8", expire_at=None, enabled=True).save()
def process_middleware(self, user_agent, cache_get_many_calls_for_request=1):
""" Helper function that makes calls to middle process_request and process_response """
fake_request = HttpRequest()
fake_request.META['HTTP_USER_AGENT'] = user_agent
with mock.patch.object(caches['default'], 'get_many', wraps=caches['default'].get_many) as mocked_code:
request_response = self.middleware.process_request(fake_request)
assert cache_get_many_calls_for_request == mocked_code.call_count
with mock.patch.object(caches['default'], 'get_many', wraps=caches['default'].get_many) as mocked_code:
processed_response = self.middleware.process_response(fake_request, request_response or HttpResponse())
assert 0 == mocked_code.call_count
return request_response, processed_response
@ddt.data(
("Mozilla/5.0 (Linux; Android 5.1; Nexus 5 Build/LMY47I; wv) AppleWebKit/537.36 (KHTML, like Gecko) "
"Version/4.0 Chrome/47.0.2526.100 Mobile Safari/537.36 edX/org.edx.mobile/2.0.0"),
("Mozilla/5.0 (iPhone; CPU iPhone OS 9_2 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) "
"Mobile/13C75 edX/org.edx.mobile/2.2.1"),
("Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.116 "
"Safari/537.36"),
)
def test_non_mobile_app_requests(self, user_agent):
with self.assertNumQueries(0):
request_response, processed_response = self.process_middleware(user_agent, 0)
assert request_response is None
assert 200 == processed_response.status_code
assert AppVersionUpgrade.LATEST_VERSION_HEADER not in processed_response
assert AppVersionUpgrade.LAST_SUPPORTED_DATE_HEADER not in processed_response
@ddt.data(
"edX/org.edx.mobile (6.6.6; OS Version 9.2 (Build 13C75))",
"edX/org.edx.mobile (7.7.7; OS Version 9.2 (Build 13C75))",
"Dalvik/2.1.0 (Linux; U; Android 5.1; Nexus 5 Build/LMY47I) edX/org.edx.mobile/8.8.8",
"Dalvik/2.1.0 (Linux; U; Android 5.1; Nexus 5 Build/LMY47I) edX/org.edx.mobile/9.9.9",
)
def test_no_update(self, user_agent):
with self.assertNumQueries(2):
request_response, processed_response = self.process_middleware(user_agent)
assert request_response is None
assert 200 == processed_response.status_code
assert AppVersionUpgrade.LATEST_VERSION_HEADER not in processed_response
assert AppVersionUpgrade.LAST_SUPPORTED_DATE_HEADER not in processed_response
with self.assertNumQueries(0):
self.process_middleware(user_agent)
@ddt.data(
("edX/org.edx.mobile (5.1.1; OS Version 9.2 (Build 13C75))", "6.6.6"),
("edX/org.edx.mobile (5.1.1.RC; OS Version 9.2 (Build 13C75))", "6.6.6"),
("Dalvik/2.1.0 (Linux; U; Android 5.1; Nexus 5 Build/LMY47I) edX/org.edx.mobile/5.1.1", "8.8.8"),
("Dalvik/2.1.0 (Linux; U; Android 5.1; Nexus 5 Build/LMY47I) edX/org.edx.mobile/5.1.1.RC", "8.8.8"),
)
@ddt.unpack
def test_new_version_available(self, user_agent, latest_version):
with self.assertNumQueries(2):
request_response, processed_response = self.process_middleware(user_agent)
assert request_response is None
assert 200 == processed_response.status_code
assert latest_version == processed_response[AppVersionUpgrade.LATEST_VERSION_HEADER]
assert AppVersionUpgrade.LAST_SUPPORTED_DATE_HEADER not in processed_response
with self.assertNumQueries(0):
self.process_middleware(user_agent)
@ddt.data(
("edX/org.edx.mobile (1.0.1; OS Version 9.2 (Build 13C75))", "6.6.6"),
("edX/org.edx.mobile (1.1.1; OS Version 9.2 (Build 13C75))", "6.6.6"),
("edX/org.edx.mobile (2.0.5.RC; OS Version 9.2 (Build 13C75))", "6.6.6"),
("edX/org.edx.mobile (2.2.2; OS Version 9.2 (Build 13C75))", "6.6.6"),
("Dalvik/2.1.0 (Linux; U; Android 5.1; Nexus 5 Build/LMY47I) edX/org.edx.mobile/1.0.1", "8.8.8"),
("Dalvik/2.1.0 (Linux; U; Android 5.1; Nexus 5 Build/LMY47I) edX/org.edx.mobile/1.1.1", "8.8.8"),
("Dalvik/2.1.0 (Linux; U; Android 5.1; Nexus 5 Build/LMY47I) edX/org.edx.mobile/2.0.5.RC", "8.8.8"),
("Dalvik/2.1.0 (Linux; U; Android 5.1; Nexus 5 Build/LMY47I) edX/org.edx.mobile/2.2.2", "8.8.8"),
)
@ddt.unpack
def test_version_update_required(self, user_agent, latest_version):
with self.assertNumQueries(2):
request_response, processed_response = self.process_middleware(user_agent)
assert request_response is not None
assert 426 == processed_response.status_code
assert latest_version == processed_response[AppVersionUpgrade.LATEST_VERSION_HEADER]
with self.assertNumQueries(0):
self.process_middleware(user_agent)
@ddt.data(
("edX/org.edx.mobile (4.4.4; OS Version 9.2 (Build 13C75))", "6.6.6", '9000-01-01T00:00:00+00:00'),
(
"Dalvik/2.1.0 (Linux; U; Android 5.1; Nexus 5 Build/LMY47I) edX/org.edx.mobile/4.4.4",
"8.8.8",
'5000-01-01T00:00:00+00:00',
),
)
@ddt.unpack
def test_version_update_available_with_deadline(self, user_agent, latest_version, upgrade_date):
with self.assertNumQueries(2):
request_response, processed_response = self.process_middleware(user_agent)
assert request_response is None
assert 200 == processed_response.status_code
assert latest_version == processed_response[AppVersionUpgrade.LATEST_VERSION_HEADER]
assert upgrade_date == processed_response[AppVersionUpgrade.LAST_SUPPORTED_DATE_HEADER]
with self.assertNumQueries(0):
self.process_middleware(user_agent)
|
s-hertel/ansible
|
refs/heads/devel
|
test/integration/targets/missing_required_lib/library/missing_required_lib.py
|
28
|
#!/usr/bin/python
# Copyright: (c) 2020, Matt Martz <matt@sivel.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
try:
import ansible_missing_lib
HAS_LIB = True
except ImportError as e:
HAS_LIB = False
def main():
module = AnsibleModule({
'url': {'type': 'bool'},
'reason': {'type': 'bool'},
})
kwargs = {}
if module.params['url']:
kwargs['url'] = 'https://github.com/ansible/ansible'
if module.params['reason']:
kwargs['reason'] = 'for fun'
if not HAS_LIB:
module.fail_json(
msg=missing_required_lib(
'ansible_missing_lib',
**kwargs
),
)
if __name__ == '__main__':
main()
|
glue-viz/bermuda
|
refs/heads/master
|
bermuda/demos/shape_options.py
|
1
|
import matplotlib.pyplt as plt
from bermuda import ellipse, polygon, rectangle
plt.plot([1,2,3], [2,3,4])
ax = plg.gca()
# default choices for everything
e = ellipse(ax)
# custom position, genric interface for all shapes
e = ellipse(ax, bbox = (x, y, w, h, theta))
e = ellipse(ax, cen=(x, y), width=w, height=h, theta=theta)
# force square/circle?
e = ellipse(ax, aspect_equal=True)
# freeze properties?
e = ellipse(ax, width=1, height=2, aspect_frozen = True)
e = ellipse(ax, rotation_frozen=True)
e = ellipse(ax, center_frozen=True)
e = ellipse(ax, size_frozen=True)
# all of these kwargs should be settable properties as well
e.bbox = (x, y, w, h, theta)
e.aspect_equal = True
e.aspect_frozen = True
|
kneeks/is210-week-03-synthesizing
|
refs/heads/master
|
inquisition.py
|
25
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Nobody expects this module."""
SPANISH = '''Nobody expects the Spanish Inquisition!
Our chief weapon is surprise...surprise and fear...fear and surprise....
Our two weapons are fear and surprise...and ruthless efficiency....
Our three weapons are fear, surprise, and ruthless efficiency... .
and an almost fanatical devotion to the Pope....
Our four...no... amongst our weapons....
amongst our weaponry are such elements as fear, surprise....
I'll come in again.'''
|
themiken/mtasa-blue
|
refs/heads/master
|
vendor/google-breakpad/src/tools/gyp/test/win/vs-macros/as.py
|
332
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from optparse import OptionParser
parser = OptionParser()
parser.add_option('-a', dest='platform')
parser.add_option('-o', dest='output')
parser.add_option('-p', dest='path')
(options, args) = parser.parse_args()
f = open(options.output, 'w')
print >>f, 'options', options
print >>f, 'args', args
f.close()
|
yanigisawa/voting-tornado
|
refs/heads/master
|
server/seeddata.py
|
1
|
from pymongo import MongoClient
import urllib
from datetime import datetime
import json
import config
from models import Event, EventEncoder, Team, TeamMember
import requests
server = config.dbServer
user = urllib.parse.quote_plus(config.dbUser)
password = urllib.parse.quote_plus(config.dbPassword)
uri = 'mongodb://{0}:{1}@{2}'.format(user, password, server)
def getDate(dateString):
return datetime.strptime(dateString, '%m/%d/%Y')
_events = [
{'title': 'Nerdtacular', 'startDate' : getDate('6/29/2017'),
'endDate' : getDate('7/1/2017'), 'categories' : [
{'id': 0, 'name': 'Likeable Characters', 'weight': 0.8},
{'id': 1, 'name': 'Likeable Universe', 'weight': 0.6},
{'id': 2, 'name': 'Most Likely', 'weight': 0.4}
]},
{ 'title': 'PyOhio', 'startDate' : getDate('7/29/2017'),
'endDate' : getDate('7/30/2017'), 'categories' : [
{'id': 0, 'name': 'Likeable Characters', 'weight': 0.8},
{'id': 1, 'name': 'Likeable Universe', 'weight': 0.6},
{'id': 2, 'name': 'Most Likely', 'weight': 0.4}
]},
{ 'title': 'Female Founders Conference', 'startDate' : getDate('6/29/2017'),
'endDate' : getDate('6/29/2017'), 'categories' : [
{'id': 0, 'name': 'Likeable Characters', 'weight': 0.8},
{'id': 1, 'name': 'Likeable Universe', 'weight': 0.6},
{'id': 2, 'name': 'Most Likely', 'weight': 0.4}
]},
{'title': 'Black Hat', 'startDate' : getDate('7/22/2017'),
'endDate' : getDate('7/27/2017'), 'categories' : [
{'id': 0, 'name': 'Likeable Characters', 'weight': 0.8},
{'id': 1, 'name': 'Likeable Universe', 'weight': 0.6},
{'id': 2, 'name': 'Most Likely', 'weight': 0.4}
]},
{ 'title': 'Cron Con', 'startDate' : getDate('1/19/2038'),
'endDate' : getDate('12/13/1901'), 'categories' : [
{'id': 0, 'name': 'Likeable Characters', 'weight': 0.8},
{'id': 1, 'name': 'Likeable Universe', 'weight': 0.6},
{'id': 2, 'name': 'Most Likely', 'weight': 0.4}
]}
]
_votes = [
{'teamId': '123', 'userId': 'a', 'categories': [
{'id': 0, 'weight': 0.8, 'rank': 1},
{'id': 1, 'weight': 0.6, 'rank': 1},
{'id': 2, 'weight': 0.4, 'rank': 1}
]},
{'teamId': '123', 'userId': 'b', 'categories': [
{'id': 0, 'weight': 0.8, 'rank': 2},
{'id': 1, 'weight': 0.6, 'rank': 2},
{'id': 2, 'weight': 0.4, 'rank': 2}
]},
{'teamId': '123', 'userId': 'c', 'categories': [
{'id': 0, 'weight': 0.8, 'rank': 3},
{'id': 1, 'weight': 0.6, 'rank': 3},
{'id': 2, 'weight': 0.4, 'rank': 3}
]},
{'teamId': '456', 'userId': 'a', 'categories': [
{'id': 0, 'weight': 0.8, 'rank': 4},
{'id': 1, 'weight': 0.6, 'rank': 4},
{'id': 2, 'weight': 0.4, 'rank': 4}
]},
{'teamId': '456', 'userId': 'b', 'categories': [
{'id': 0, 'weight': 0.8, 'rank': 5},
{'id': 1, 'weight': 0.6, 'rank': 5},
{'id': 2, 'weight': 0.4, 'rank': 5}
]},
{'teamId': '456', 'userId': 'c', 'categories': [
{'id': 0, 'weight': 0.8, 'rank': 1},
{'id': 1, 'weight': 0.6, 'rank': 1},
{'id': 2, 'weight': 0.4, 'rank': 1}
]},
{'teamId': '789', 'userId': 'a', 'categories': [
{'id': 0, 'weight': 0.8, 'rank': 2},
{'id': 1, 'weight': 0.6, 'rank': 2},
{'id': 2, 'weight': 0.4, 'rank': 2}
]},
{'teamId': '789', 'userId': 'b', 'categories': [
{'id': 0, 'weight': 0.8, 'rank': 3},
{'id': 1, 'weight': 0.6, 'rank': 3},
{'id': 2, 'weight': 0.4, 'rank': 3}
]},
{'teamId': '789', 'userId': 'c', 'categories': [
{'id': 0, 'weight': 0.8, 'rank': 4},
{'id': 1, 'weight': 0.6, 'rank': 4},
{'id': 2, 'weight': 0.4, 'rank': 4}
]}
]
def getJsonEvents():
jsonArr = []
for e in _events:
tmp = {}
tmp['title'] = e['title']
tmp['startDate'] = e['startDate'].strftime('%m/%d/%Y')
tmp['endDate'] = e['endDate'].strftime('%m/%d/%Y')
tmp['categories'] = e['categories']
jsonArr.append(tmp)
return json.dumps(jsonArr)
def insert_events(events):
client = MongoClient(uri)
db = client['voting-tornado-db']
db_events = db['events']
mongo_events = [Event(e).mongo_encode() for e in events]
inserted_events = db_events.insert_many(mongo_events)
return insert_events
def get_auth0_groups():
headers = { 'content-type': 'application/json' }
data = {
'client_id': config.auth0_clientId, 'client_secret': config.auth0_secret,
'audience': 'urn:auth0-authz-api', 'grant_type': 'client_credentials' }
payload = json.dumps(data)
resp = requests.post(config.auth0_url + '/oauth/token', headers=headers, data=payload)
resp.raise_for_status()
token_json = resp.json()
headers = { 'authorization': token_json['token_type'] + ' ' + token_json['access_token']}
resp = requests.get(config.auth0_authorization_url + '/groups', headers=headers)
resp.raise_for_status()
return resp.json()
def get_auth0_users():
headers = { 'content-type': 'application/json' }
data = {
'client_id': config.auth0_clientId, 'client_secret': config.auth0_secret,
'audience': config.auth0_url + '/api/v2/', 'grant_type': 'client_credentials' }
management_auth_req = requests.post(config.auth0_url + '/oauth/token', headers=headers, data=json.dumps(data))
management_auth_req.raise_for_status()
token_json = management_auth_req.json()
headers = { 'authorization': token_json['token_type'] + ' ' + token_json['access_token']}
users_req = requests.get(config.auth0_url + '/api/v2/users', headers=headers)
return users_req.json()
def seedData():
# Query groups from auth0
groups = get_auth0_groups()['groups']
teams = []
# Query users from auth0
users = get_auth0_users()
for g in groups:
members = []
for auth0Id in g['members']:
member = next( (u for u in users if u['user_id'] == auth0Id), None)
if 'app_metadata' in member.keys():
if 'team_lead' in member['app_metadata']:
g['team_lead'] = member['user_id']
if member != None:
members.append(member)
g['members'] = members
teams.append(g)
# Create all teams in all events
events = []
for e in _events:
ev = e
ev.update()
ev['teams'] = teams
events.append(ev)
insert_events(events)
if __name__ == "__main__":
seedData()
|
fhaoquan/kbengine
|
refs/heads/master
|
kbe/res/scripts/common/Lib/encodings/cp1250.py
|
272
|
""" Python Character Mapping Codec cp1250 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP1250.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp1250',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> NULL
'\x01' # 0x01 -> START OF HEADING
'\x02' # 0x02 -> START OF TEXT
'\x03' # 0x03 -> END OF TEXT
'\x04' # 0x04 -> END OF TRANSMISSION
'\x05' # 0x05 -> ENQUIRY
'\x06' # 0x06 -> ACKNOWLEDGE
'\x07' # 0x07 -> BELL
'\x08' # 0x08 -> BACKSPACE
'\t' # 0x09 -> HORIZONTAL TABULATION
'\n' # 0x0A -> LINE FEED
'\x0b' # 0x0B -> VERTICAL TABULATION
'\x0c' # 0x0C -> FORM FEED
'\r' # 0x0D -> CARRIAGE RETURN
'\x0e' # 0x0E -> SHIFT OUT
'\x0f' # 0x0F -> SHIFT IN
'\x10' # 0x10 -> DATA LINK ESCAPE
'\x11' # 0x11 -> DEVICE CONTROL ONE
'\x12' # 0x12 -> DEVICE CONTROL TWO
'\x13' # 0x13 -> DEVICE CONTROL THREE
'\x14' # 0x14 -> DEVICE CONTROL FOUR
'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x16 -> SYNCHRONOUS IDLE
'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
'\x18' # 0x18 -> CANCEL
'\x19' # 0x19 -> END OF MEDIUM
'\x1a' # 0x1A -> SUBSTITUTE
'\x1b' # 0x1B -> ESCAPE
'\x1c' # 0x1C -> FILE SEPARATOR
'\x1d' # 0x1D -> GROUP SEPARATOR
'\x1e' # 0x1E -> RECORD SEPARATOR
'\x1f' # 0x1F -> UNIT SEPARATOR
' ' # 0x20 -> SPACE
'!' # 0x21 -> EXCLAMATION MARK
'"' # 0x22 -> QUOTATION MARK
'#' # 0x23 -> NUMBER SIGN
'$' # 0x24 -> DOLLAR SIGN
'%' # 0x25 -> PERCENT SIGN
'&' # 0x26 -> AMPERSAND
"'" # 0x27 -> APOSTROPHE
'(' # 0x28 -> LEFT PARENTHESIS
')' # 0x29 -> RIGHT PARENTHESIS
'*' # 0x2A -> ASTERISK
'+' # 0x2B -> PLUS SIGN
',' # 0x2C -> COMMA
'-' # 0x2D -> HYPHEN-MINUS
'.' # 0x2E -> FULL STOP
'/' # 0x2F -> SOLIDUS
'0' # 0x30 -> DIGIT ZERO
'1' # 0x31 -> DIGIT ONE
'2' # 0x32 -> DIGIT TWO
'3' # 0x33 -> DIGIT THREE
'4' # 0x34 -> DIGIT FOUR
'5' # 0x35 -> DIGIT FIVE
'6' # 0x36 -> DIGIT SIX
'7' # 0x37 -> DIGIT SEVEN
'8' # 0x38 -> DIGIT EIGHT
'9' # 0x39 -> DIGIT NINE
':' # 0x3A -> COLON
';' # 0x3B -> SEMICOLON
'<' # 0x3C -> LESS-THAN SIGN
'=' # 0x3D -> EQUALS SIGN
'>' # 0x3E -> GREATER-THAN SIGN
'?' # 0x3F -> QUESTION MARK
'@' # 0x40 -> COMMERCIAL AT
'A' # 0x41 -> LATIN CAPITAL LETTER A
'B' # 0x42 -> LATIN CAPITAL LETTER B
'C' # 0x43 -> LATIN CAPITAL LETTER C
'D' # 0x44 -> LATIN CAPITAL LETTER D
'E' # 0x45 -> LATIN CAPITAL LETTER E
'F' # 0x46 -> LATIN CAPITAL LETTER F
'G' # 0x47 -> LATIN CAPITAL LETTER G
'H' # 0x48 -> LATIN CAPITAL LETTER H
'I' # 0x49 -> LATIN CAPITAL LETTER I
'J' # 0x4A -> LATIN CAPITAL LETTER J
'K' # 0x4B -> LATIN CAPITAL LETTER K
'L' # 0x4C -> LATIN CAPITAL LETTER L
'M' # 0x4D -> LATIN CAPITAL LETTER M
'N' # 0x4E -> LATIN CAPITAL LETTER N
'O' # 0x4F -> LATIN CAPITAL LETTER O
'P' # 0x50 -> LATIN CAPITAL LETTER P
'Q' # 0x51 -> LATIN CAPITAL LETTER Q
'R' # 0x52 -> LATIN CAPITAL LETTER R
'S' # 0x53 -> LATIN CAPITAL LETTER S
'T' # 0x54 -> LATIN CAPITAL LETTER T
'U' # 0x55 -> LATIN CAPITAL LETTER U
'V' # 0x56 -> LATIN CAPITAL LETTER V
'W' # 0x57 -> LATIN CAPITAL LETTER W
'X' # 0x58 -> LATIN CAPITAL LETTER X
'Y' # 0x59 -> LATIN CAPITAL LETTER Y
'Z' # 0x5A -> LATIN CAPITAL LETTER Z
'[' # 0x5B -> LEFT SQUARE BRACKET
'\\' # 0x5C -> REVERSE SOLIDUS
']' # 0x5D -> RIGHT SQUARE BRACKET
'^' # 0x5E -> CIRCUMFLEX ACCENT
'_' # 0x5F -> LOW LINE
'`' # 0x60 -> GRAVE ACCENT
'a' # 0x61 -> LATIN SMALL LETTER A
'b' # 0x62 -> LATIN SMALL LETTER B
'c' # 0x63 -> LATIN SMALL LETTER C
'd' # 0x64 -> LATIN SMALL LETTER D
'e' # 0x65 -> LATIN SMALL LETTER E
'f' # 0x66 -> LATIN SMALL LETTER F
'g' # 0x67 -> LATIN SMALL LETTER G
'h' # 0x68 -> LATIN SMALL LETTER H
'i' # 0x69 -> LATIN SMALL LETTER I
'j' # 0x6A -> LATIN SMALL LETTER J
'k' # 0x6B -> LATIN SMALL LETTER K
'l' # 0x6C -> LATIN SMALL LETTER L
'm' # 0x6D -> LATIN SMALL LETTER M
'n' # 0x6E -> LATIN SMALL LETTER N
'o' # 0x6F -> LATIN SMALL LETTER O
'p' # 0x70 -> LATIN SMALL LETTER P
'q' # 0x71 -> LATIN SMALL LETTER Q
'r' # 0x72 -> LATIN SMALL LETTER R
's' # 0x73 -> LATIN SMALL LETTER S
't' # 0x74 -> LATIN SMALL LETTER T
'u' # 0x75 -> LATIN SMALL LETTER U
'v' # 0x76 -> LATIN SMALL LETTER V
'w' # 0x77 -> LATIN SMALL LETTER W
'x' # 0x78 -> LATIN SMALL LETTER X
'y' # 0x79 -> LATIN SMALL LETTER Y
'z' # 0x7A -> LATIN SMALL LETTER Z
'{' # 0x7B -> LEFT CURLY BRACKET
'|' # 0x7C -> VERTICAL LINE
'}' # 0x7D -> RIGHT CURLY BRACKET
'~' # 0x7E -> TILDE
'\x7f' # 0x7F -> DELETE
'\u20ac' # 0x80 -> EURO SIGN
'\ufffe' # 0x81 -> UNDEFINED
'\u201a' # 0x82 -> SINGLE LOW-9 QUOTATION MARK
'\ufffe' # 0x83 -> UNDEFINED
'\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK
'\u2026' # 0x85 -> HORIZONTAL ELLIPSIS
'\u2020' # 0x86 -> DAGGER
'\u2021' # 0x87 -> DOUBLE DAGGER
'\ufffe' # 0x88 -> UNDEFINED
'\u2030' # 0x89 -> PER MILLE SIGN
'\u0160' # 0x8A -> LATIN CAPITAL LETTER S WITH CARON
'\u2039' # 0x8B -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
'\u015a' # 0x8C -> LATIN CAPITAL LETTER S WITH ACUTE
'\u0164' # 0x8D -> LATIN CAPITAL LETTER T WITH CARON
'\u017d' # 0x8E -> LATIN CAPITAL LETTER Z WITH CARON
'\u0179' # 0x8F -> LATIN CAPITAL LETTER Z WITH ACUTE
'\ufffe' # 0x90 -> UNDEFINED
'\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK
'\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK
'\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK
'\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK
'\u2022' # 0x95 -> BULLET
'\u2013' # 0x96 -> EN DASH
'\u2014' # 0x97 -> EM DASH
'\ufffe' # 0x98 -> UNDEFINED
'\u2122' # 0x99 -> TRADE MARK SIGN
'\u0161' # 0x9A -> LATIN SMALL LETTER S WITH CARON
'\u203a' # 0x9B -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
'\u015b' # 0x9C -> LATIN SMALL LETTER S WITH ACUTE
'\u0165' # 0x9D -> LATIN SMALL LETTER T WITH CARON
'\u017e' # 0x9E -> LATIN SMALL LETTER Z WITH CARON
'\u017a' # 0x9F -> LATIN SMALL LETTER Z WITH ACUTE
'\xa0' # 0xA0 -> NO-BREAK SPACE
'\u02c7' # 0xA1 -> CARON
'\u02d8' # 0xA2 -> BREVE
'\u0141' # 0xA3 -> LATIN CAPITAL LETTER L WITH STROKE
'\xa4' # 0xA4 -> CURRENCY SIGN
'\u0104' # 0xA5 -> LATIN CAPITAL LETTER A WITH OGONEK
'\xa6' # 0xA6 -> BROKEN BAR
'\xa7' # 0xA7 -> SECTION SIGN
'\xa8' # 0xA8 -> DIAERESIS
'\xa9' # 0xA9 -> COPYRIGHT SIGN
'\u015e' # 0xAA -> LATIN CAPITAL LETTER S WITH CEDILLA
'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xac' # 0xAC -> NOT SIGN
'\xad' # 0xAD -> SOFT HYPHEN
'\xae' # 0xAE -> REGISTERED SIGN
'\u017b' # 0xAF -> LATIN CAPITAL LETTER Z WITH DOT ABOVE
'\xb0' # 0xB0 -> DEGREE SIGN
'\xb1' # 0xB1 -> PLUS-MINUS SIGN
'\u02db' # 0xB2 -> OGONEK
'\u0142' # 0xB3 -> LATIN SMALL LETTER L WITH STROKE
'\xb4' # 0xB4 -> ACUTE ACCENT
'\xb5' # 0xB5 -> MICRO SIGN
'\xb6' # 0xB6 -> PILCROW SIGN
'\xb7' # 0xB7 -> MIDDLE DOT
'\xb8' # 0xB8 -> CEDILLA
'\u0105' # 0xB9 -> LATIN SMALL LETTER A WITH OGONEK
'\u015f' # 0xBA -> LATIN SMALL LETTER S WITH CEDILLA
'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\u013d' # 0xBC -> LATIN CAPITAL LETTER L WITH CARON
'\u02dd' # 0xBD -> DOUBLE ACUTE ACCENT
'\u013e' # 0xBE -> LATIN SMALL LETTER L WITH CARON
'\u017c' # 0xBF -> LATIN SMALL LETTER Z WITH DOT ABOVE
'\u0154' # 0xC0 -> LATIN CAPITAL LETTER R WITH ACUTE
'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE
'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
'\u0102' # 0xC3 -> LATIN CAPITAL LETTER A WITH BREVE
'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
'\u0139' # 0xC5 -> LATIN CAPITAL LETTER L WITH ACUTE
'\u0106' # 0xC6 -> LATIN CAPITAL LETTER C WITH ACUTE
'\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA
'\u010c' # 0xC8 -> LATIN CAPITAL LETTER C WITH CARON
'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
'\u0118' # 0xCA -> LATIN CAPITAL LETTER E WITH OGONEK
'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS
'\u011a' # 0xCC -> LATIN CAPITAL LETTER E WITH CARON
'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE
'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
'\u010e' # 0xCF -> LATIN CAPITAL LETTER D WITH CARON
'\u0110' # 0xD0 -> LATIN CAPITAL LETTER D WITH STROKE
'\u0143' # 0xD1 -> LATIN CAPITAL LETTER N WITH ACUTE
'\u0147' # 0xD2 -> LATIN CAPITAL LETTER N WITH CARON
'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
'\u0150' # 0xD5 -> LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
'\xd7' # 0xD7 -> MULTIPLICATION SIGN
'\u0158' # 0xD8 -> LATIN CAPITAL LETTER R WITH CARON
'\u016e' # 0xD9 -> LATIN CAPITAL LETTER U WITH RING ABOVE
'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE
'\u0170' # 0xDB -> LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\xdd' # 0xDD -> LATIN CAPITAL LETTER Y WITH ACUTE
'\u0162' # 0xDE -> LATIN CAPITAL LETTER T WITH CEDILLA
'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S
'\u0155' # 0xE0 -> LATIN SMALL LETTER R WITH ACUTE
'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE
'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
'\u0103' # 0xE3 -> LATIN SMALL LETTER A WITH BREVE
'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
'\u013a' # 0xE5 -> LATIN SMALL LETTER L WITH ACUTE
'\u0107' # 0xE6 -> LATIN SMALL LETTER C WITH ACUTE
'\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA
'\u010d' # 0xE8 -> LATIN SMALL LETTER C WITH CARON
'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
'\u0119' # 0xEA -> LATIN SMALL LETTER E WITH OGONEK
'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS
'\u011b' # 0xEC -> LATIN SMALL LETTER E WITH CARON
'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE
'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX
'\u010f' # 0xEF -> LATIN SMALL LETTER D WITH CARON
'\u0111' # 0xF0 -> LATIN SMALL LETTER D WITH STROKE
'\u0144' # 0xF1 -> LATIN SMALL LETTER N WITH ACUTE
'\u0148' # 0xF2 -> LATIN SMALL LETTER N WITH CARON
'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
'\u0151' # 0xF5 -> LATIN SMALL LETTER O WITH DOUBLE ACUTE
'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
'\xf7' # 0xF7 -> DIVISION SIGN
'\u0159' # 0xF8 -> LATIN SMALL LETTER R WITH CARON
'\u016f' # 0xF9 -> LATIN SMALL LETTER U WITH RING ABOVE
'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE
'\u0171' # 0xFB -> LATIN SMALL LETTER U WITH DOUBLE ACUTE
'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
'\xfd' # 0xFD -> LATIN SMALL LETTER Y WITH ACUTE
'\u0163' # 0xFE -> LATIN SMALL LETTER T WITH CEDILLA
'\u02d9' # 0xFF -> DOT ABOVE
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
ishay2b/tensorflow
|
refs/heads/segnet
|
tensorflow/contrib/linalg/python/__init__.py
|
959
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""ops module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
|
czgu/metaHack
|
refs/heads/master
|
env/lib/python2.7/site-packages/django/conf/locale/fr/formats.py
|
82
|
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j F Y'
TIME_FORMAT = 'H:i:s'
DATETIME_FORMAT = 'j F Y H:i:s'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'j N Y'
SHORT_DATETIME_FORMAT = 'j N Y H:i:s'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%d/%m/%Y', '%d/%m/%y', # '25/10/2006', '25/10/06'
'%d.%m.%Y', '%d.%m.%y', # Swiss (fr_CH), '25.10.2006', '25.10.06'
# '%d %B %Y', '%d %b %Y', # '25 octobre 2006', '25 oct. 2006'
)
DATETIME_INPUT_FORMATS = (
'%d/%m/%Y %H:%M:%S', # '25/10/2006 14:30:59'
'%d/%m/%Y %H:%M:%S.%f', # '25/10/2006 14:30:59.000200'
'%d/%m/%Y %H:%M', # '25/10/2006 14:30'
'%d/%m/%Y', # '25/10/2006'
'%d.%m.%Y %H:%M:%S', # Swiss (fr_CH), '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M:%S.%f', # Swiss (fr_CH), '25.10.2006 14:30:59.000200'
'%d.%m.%Y %H:%M', # Swiss (fr_CH), '25.10.2006 14:30'
'%d.%m.%Y', # Swiss (fr_CH), '25.10.2006'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '\xa0' # non-breaking space
NUMBER_GROUPING = 3
|
syci/OCB
|
refs/heads/9.0
|
addons/product_uos/models/product_uos.py
|
11
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from openerp import api, fields, models
import openerp.addons.decimal_precision as dp
class ProductTemplate(models.Model):
_inherit = "product.template"
uos_id = fields.Many2one('product.uom', 'Unit of Sale',
help='Specify a unit of measure here if invoicing is made in another'
' unit of measure than inventory. Keep empty to use the default unit of measure.')
uos_coeff = fields.Float('Unit of Measure -> UOS Coeff', digits_compute=dp.get_precision('Product Unit of Measure'),
help='Coefficient to convert default Unit of Measure to Unit of Sale'
' uos = uom * coeff')
class SaleOrderLine(models.Model):
_inherit = 'sale.order.line'
@api.one
def _set_uos(self):
if self.product_id.uos_coeff:
self.product_uom_qty = self.product_uos_qty / self.product_id.uos_coeff
self.product_uom = self.product_id.uom_id
@api.one
def _compute_uos(self):
self.product_uos_qty = self.product_uom_qty * self.product_id.uos_coeff
product_uos_qty = fields.Float(string='Quantity', digits_compute=dp.get_precision('Product Unit of Measure'),
compute='_compute_uos', inverse='_set_uos', readonly=False)
product_uos = fields.Many2one('product.uom', string='Unit of Measure', required=True,
related='product_id.uos_id', readonly=True)
|
hassanabidpk/django
|
refs/heads/master
|
tests/sessions_tests/custom_db_backend.py
|
159
|
"""
This custom Session model adds an extra column to store an account ID. In
real-world applications, it gives you the option of querying the database for
all active sessions for a particular account.
"""
from django.contrib.sessions.backends.db import SessionStore as DBStore
from django.contrib.sessions.base_session import AbstractBaseSession
from django.db import models
class CustomSession(AbstractBaseSession):
"""
A session model with a column for an account ID.
"""
account_id = models.IntegerField(null=True, db_index=True)
class Meta:
app_label = 'sessions'
@classmethod
def get_session_store_class(cls):
return SessionStore
class SessionStore(DBStore):
"""
A database session store, that handles updating the account ID column
inside the custom session model.
"""
@classmethod
def get_model_class(cls):
return CustomSession
def create_model_instance(self, data):
obj = super(SessionStore, self).create_model_instance(data)
try:
account_id = int(data.get('_auth_user_id'))
except (ValueError, TypeError):
account_id = None
obj.account_id = account_id
return obj
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.