repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
ryanrhymes/panns | panns/utils.py | 2 | 5104 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2014 Liang Wang <liang.wang@cs.helsinki.fi>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
#
# Liang Wang @ CS Dept, Helsinki Univ, Finland
# 2014.05.01
#
import logging
import numpy
import tempfile
from scipy import linalg
from scipy.spatial import distance
logger = logging.getLogger('panns.utils')
class Node():
__slots__ = ['proj', 'ofst', 'lchd', 'rchd', 'nlst']
pass
class NaiveTree(object):
def __init__(self):
self.root = Node()
pass
pass
class Metric():
"""
The basic metric class used in Panns index building. Super class
of MetricEuclidean and MetricCosine.
"""
@staticmethod
def split(u, idxs, mtx):
"""
Project the data points on a random vector and return the
average value.
Parameters:
u: random vector.
idxs: data points to project.
mtx: data set.
"""
v = numpy.zeros(len(u), u.dtype)
for i in idxs:
v += mtx[i]
a = numpy.dot(u, v) / len(idxs)
return a
@staticmethod
def side(u, v, offset):
"""
Project v on u then check which side it falls in given the
offset.
Parameters:
u: random vector.
v: data point to project.
"""
r = None
x = numpy.dot(u, v) - offset
### Todo: need to be fixed for small value
if abs(x) < 1e-08:
r = ( numpy.random.uniform(0,1,1)[0] > 0.5 )
else:
r = ( x > 0 )
return r
pass
class MetricEuclidean(Metric):
"""
Metric class for Euclidean index.
"""
@staticmethod
def distance(u, v):
return distance.euclidean(u, v)
pass
class MetricCosine(Metric):
"""
Metric class for cosine index.
"""
@staticmethod
def distance(u, v):
return 1.0 - numpy.dot(u, v)
pass
def gaussian_vector(size, normalize=False, dtype='float32', seed=None):
"""
Returns a (normalized) Gaussian random vector.
Parameters:
normalize: the vector length is normalized to 1 if True.
"""
numpy.random.seed(seed)
v = numpy.random.normal(0,1,size)
if normalize:
v = v / linalg.norm(v)
return v
def precision(relevant, retrieved):
"""
Return the precision of the search result.
Parameters:
relevant: the relevant data points.
retrieved: the retireved data points
"""
r = 1.0 * len(set(relevant) & set(retrieved)) / len(retrieved)
return r
def recall(relevant, retrieved):
"""
Return the recall of the search result.
Parameters:
relevant: the relevant data points.
retrieved: the retireved data points
"""
r = 1.0 * len(set(relevant) & set(retrieved)) / len(relevant)
return r
def build_parallel(mtx, shape_mtx, K, dtype, t):
"""
The function for parallel building index. Implemented here because
the default python serialization cannot pickle instance function.
Parameters:
mtx: a row-based data set, should be an numpy matrix.
K: max number of data points on a leaf.
t: index of binary trees.
"""
logger.info('pass %i ...' % t)
mtx = numpy.memmap(mtx, dtype=dtype, mode='r', shape=shape_mtx)
numpy.random.seed(t**2 + numpy.random.randint(2**30))
tree = NaiveTree()
children = range(len(mtx))
make_tree_parallel(tree.root, children, mtx, shape_mtx[1], dtype, K)
return tree
def make_tree_parallel(parent, children, mtx, dim, dtype, K, lvl=0):
"""
Builds up a binary tree recursively, for parallel building.
Parameters:
parent: parent node index.
children: a list of children node indices.
mtx: a row-based data set.
K: max number of data points on a leaf.
"""
if len(children) <= max(K, lvl):
parent.nlst = children
return
l_child, r_child = None, None
for attempt in xrange(16):
parent.proj = numpy.random.randint(2**32-1)
u = gaussian_vector(dim, True, dtype, parent.proj)
parent.ofst = Metric.split(u, children, mtx)
l_child, r_child = [], []
for i in children:
if Metric.side(mtx[i], u, parent.ofst):
r_child.append(i)
else:
l_child.append(i)
if len(l_child) > 0 and len(r_child) > 0:
break
parent.lchd = Node()
parent.rchd = Node()
make_tree_parallel(parent.lchd, l_child, mtx, dim, dtype, K)
make_tree_parallel(parent.rchd, r_child, mtx, dim, dtype, K)
return
def make_mmap(mtx, shape, dtype, fname=None):
m, n = shape
if fname is None:
fname = tempfile.mkstemp()[1]
logger.info('mmaping the data to %s ...' % fname)
fpw = numpy.memmap(fname, dtype=dtype, mode='w+', shape=(m,n))
for i in xrange(m):
fpw[i] = mtx[i]
del fpw
return fname
def load_mmap(fname, shape, dtype):
mtx = numpy.memmap(fname, dtype=dtype, mode='r', shape=shape)
return mtx
| gpl-2.0 |
bbc/kamaelia | Sketches/MH/RTP/ConnectedSocketAdapter.py | 3 | 12440 | # -*- coding: utf-8 -*-
# ConnectedSocketAdapter Component Class
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
"""\
==========================
Talking to network sockets
==========================
A Connected Socket Adapter (CSA) component talks to a network server socket.
Data is sent to and received from the socket via this component's inboxes and
outboxes. A CSA is effectively a wrapper for a socket.
Most components should not need to create CSAs themselves. Instead, use
components such as TCPClient to make an outgoing connection, or TCPServer or
SimpleServer to be a server that responds to incoming connections.
Example Usage
-------------
See source code for TCPClient to see how Connected Socket Adapters can be used.
See also
--------
- TCPClient -- for making a connection to a server
- TCPServer --
- SimpleServer -- a prefab chassis for building a server
How does it work?
-----------------
A CSA is usually created either by a component such as TCPClient that wants to
establish a connection to a server; or by a primary listener socket - a
component acting as a server - listening for incoming connections from clients.
The socket should be set up and passed to the constructor to make the CSA.
At initialisation, specify the 'sendTo' destination (address,port) if data needs
to be sent to the socket using socket.sendto (specifying a destination) rather
than the socket.send method.
Incoming data, read by the CSA, is sent out of its "outbox" outbox as strings
containing the received binary data. Send data by sending it, as strings, to
the "inbox" outbox.
The CSA expects to be wired to a component that will notify it when new data
has arrived at its socket (by sending an Axon.Ipc.status message to its
"ReadReady" inbox. This is to allow the CSA to sleep rather than busy-wait or
blocking when waiting for new data to arrive. Typically this is the Selector
component.
This component will terminate (and close its socket) if it receives a
producerFinished message on its "control" inbox.
When this component terminates, it sends a socketShutdown(socket) message out of
its "CreatorFeedback" outbox and a shutdownCSA((selfCSA,self.socket)) message
out of its "signal" outbox.
The message sent to "CreatorFeedback" is to notify the original creator that
the socket is now closed and that this component should be unwired.
The message sent to the "signal" outbox serves to notify any other component
involved - such as the one feeding notifications to the "ReadReady" inbox (eg.
the Selector component).
"""
import socket, time
import errno
import Axon
from Axon.Component import component
from Axon.Ipc import wouldblock, status, producerFinished, shutdownMicroprocess
from Kamaelia.IPC import socketShutdown,newCSA,shutdownCSA
from Kamaelia.IPC import removeReader, removeWriter
from Kamaelia.IPC import newReader, newWriter
from Kamaelia.KamaeliaExceptions import *
import traceback
import pprint
whinge = { "socketSendingFailure": True, "socketRecievingFailure": True }
crashAndBurn = { "uncheckedSocketShutdown" : True,
"receivingDataFailed" : True,
"sendingDataFailed" : True }
class ConnectedSocketAdapter(component):
"""\
ConnectedSocketAdapter(listensocket,
selectorService
[,crashOnBadDataToSend]
[,noisyErrors]
[,sendTo]) -> new ConnectedSocketAdaptor component.
Component for communicating with a socket. Send to its "inbox" inbox to
send data, and receive data from its "outbox" outbox.
"ReadReady" inbox must be wired to something that will notify it when new
data has arrived at the socket.
Keyword arguments::
- listensocket -- the open socket to send/receive data to/from
- selectorService -- (component,inboxname) for a Selector component
- crashOnBadDataToSend -- True for TypeError to be raised if data to send is the wrong type, otherwise False (default=False)
- noisyErrors -- True for errors to be printed to stdout, otherwise False (default=True)
- sendTo -- None, or (host,port) to which socket will always be asked to send data.
"""
Inboxes = { "inbox" : "Data for this CSA to send through the socket (Axon.Ipc.status message)",
"control" : "Shutdown on producerFinished message (incoming & outgoing data is flushed first)",
"ReadReady" : "Notify this CSA that there is incoming data ready on the socket",
"SendReady" : "Notify this CSA that the socket is ready to send",
}
Outboxes = { "outbox" : "Data received from the socket",
"CreatorFeedback" : "Expected to be connected to some form of signal input on the CSA's creator. Signals socketShutdown (this socket has closed)",
"signal" : "Signals shutdownCSA (this CSA is shutting down)",
"_selectorSignal" : "For communication to the selector",
}
def __init__(self, listensocket, selectorService, crashOnBadDataToSend=False, noisyErrors = True, sendTo = None):
"""x.__init__(...) initializes x; see x.__class__.__doc__ for signature"""
super(ConnectedSocketAdapter, self).__init__()
self.socket = listensocket
self.sendQueue = []
self.crashOnBadDataToSend = crashOnBadDataToSend
self.noisyErrors = noisyErrors
self.sendTo = sendTo
self.selectorService = selectorService
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 131072)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 131072)
print self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF)
print self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF)
self.howDied=False
def handleControl(self):
"""Check for producerFinished message and shutdown in response"""
if self.dataReady("control"):
data = self.recv("control")
if isinstance(data, producerFinished):
# print "Raising shutdown: ConnectedSocketAdapter recieved producerFinished Message", self,data
self.connectionRECVLive = False
self.connectionSENDLive = False
self.howDied = "producer finished"
elif isinstance(data, shutdownMicroprocess):
# print "Raising shutdown: ConnectedSocketAdapter recieved shutdownMicroprocess Message", self,data
self.connectionRECVLive = False
self.connectionSENDLive = False
self.howDied = "shutdown microprocess"
else:
pass # unrecognised message
def handleSendRequest(self):
"""Check for data to send to the socket, add to an internal send queue buffer."""
if self.dataReady("inbox"):
data = self.recv("inbox")
self.sendQueue.append(data)
def passOnShutdown(self):
self.send(socketShutdown(self,[self.socket,self.howDied]), "CreatorFeedback")
self.send(shutdownCSA(self, (self,self.socket)), "signal")
def _safesend(self, sock, data):
"""Internal only function, used for sending data, and handling EAGAIN style
retry scenarios gracefully"""
bytes_sent = 0
try:
if self.sendTo:
bytes_sent = sock.sendto(data, self.sendTo)
else:
bytes_sent = sock.send(data)
return bytes_sent
except socket.error, socket.msg:
(errorno, errmsg) = socket.msg.args
if not (errorno == errno.EAGAIN or errorno == errno.EWOULDBLOCK):
self.connectionSENDLive = False
self.howDied = socket.msg
except TypeError, ex:
if self.noisyErrors:
print "CSA: Exception sending on socket: ", ex, "(no automatic conversion to string occurs)."
if self.crashOnBadDataToSend:
raise ex
self.sending = False
if self.connectionSENDLive:
self.send(newWriter(self, ((self, "SendReady"), sock)), "_selectorSignal")
return bytes_sent
def flushSendQueue(self):
if len(self.sendQueue) > 0:
data = self.sendQueue[0]
bytes_sent = self._safesend(self.socket, data)
if bytes_sent:
if bytes_sent == len(data):
del self.sendQueue[0]
else:
self.sendQueue[0] = data[bytes_sent:]
def _saferecv(self, sock, size=32768):
"""Internal only function, used for recieving data, and handling EAGAIN style
retry scenarios gracefully"""
try:
data = sock.recv(size)
if data:
self.failcount = 0
return data
else: # This implies the connection has closed for some reason
self.connectionRECVLive = False
except socket.error, socket.msg:
(errorno, errmsg) = socket.msg.args
if not (errorno == errno.EAGAIN or errorno == errno.EWOULDBLOCK):
# "Recieving an error other than EAGAIN or EWOULDBLOCK when reading is a genuine error we don't handle"
self.connectionRECVLive = False
self.howDied = socket.msg
self.receiving = False
if self.connectionRECVLive:
self.send(newReader(self, ((self, "ReadReady"), sock)), "_selectorSignal")
return None # Explicit rather than implicit.
def handleReceive(self):
successful = True
while successful and self.connectionRECVLive: ### Fixme - probably want maximum iterations here
socketdata = self._saferecv(self.socket, 32768) ### Receiving may die horribly
if (socketdata):
self.send(socketdata, "outbox")
successful = True
else:
successful = False
# print "There!",successful
# if not self.connectionRECVLive:
# print len(self.outboxes["outbox"]), "FOO", socketdata
# print "self.howDied", self.howDied
def checkSocketStatus(self):
if self.dataReady("ReadReady"):
self.receiving = True
self.recv("ReadReady")
if self.dataReady("SendReady"):
self.sending = True
self.recv("SendReady")
def canDoSomething(self):
if self.sending and len(self.sendQueue) > 0:
return True
if self.receiving:
return True
if self.anyReady():
return True
return False
def main(self):
self.link((self, "_selectorSignal"), self.selectorService)
# self.selectorService ...
self.sending = True
self.receiving = True
self.connectionRECVLive = True
self.connectionRECVLive = True
self.connectionSENDLive = True
while self.connectionRECVLive and self.connectionSENDLive: # Note, this means half close == close
yield 1
self.checkSocketStatus() # To be written
self.handleSendRequest() # Check for data, in our "inbox", to send to the socket, add to an internal send queue buffer.
self.handleControl() # Check for producerFinished message in "control" and shutdown in response
if self.sending:
self.flushSendQueue()
if self.receiving:
self.handleReceive()
if not self.canDoSomething():
self.pause()
self.passOnShutdown()
# NOTE: the creator of this CSA is responsible for removing it from the selector
__kamaelia_components__ = ( ConnectedSocketAdapter, )
| apache-2.0 |
rohitwaghchaure/digitales_erpnext | erpnext/buying/doctype/quality_inspection/quality_inspection.py | 37 | 1831 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class QualityInspection(Document):
def get_item_specification_details(self):
self.set('qa_specification_details', [])
specification = frappe.db.sql("select specification, value from `tabItem Quality Inspection Parameter` \
where parent = '%s' order by idx" % (self.item_code))
for d in specification:
child = self.append('qa_specification_details', {})
child.specification = d[0]
child.value = d[1]
child.status = 'Accepted'
def on_submit(self):
if self.purchase_receipt_no:
frappe.db.sql("""update `tabPurchase Receipt Item` t1, `tabPurchase Receipt` t2
set t1.qa_no = %s, t2.modified = %s
where t1.parent = %s and t1.item_code = %s and t1.parent = t2.name""",
(self.name, self.modified, self.purchase_receipt_no,
self.item_code))
def on_cancel(self):
if self.purchase_receipt_no:
frappe.db.sql("""update `tabPurchase Receipt Item` t1, `tabPurchase Receipt` t2
set t1.qa_no = '', t2.modified = %s
where t1.parent = %s and t1.item_code = %s and t1.parent = t2.name""",
(self.modified, self.purchase_receipt_no, self.item_code))
def item_query(doctype, txt, searchfield, start, page_len, filters):
if filters.get("from"):
from frappe.widgets.reportview import get_match_cond
filters.update({
"txt": txt,
"mcond": get_match_cond(filters["from"]),
"start": start,
"page_len": page_len
})
return frappe.db.sql("""select item_code from `tab%(from)s`
where parent='%(parent)s' and docstatus < 2 and item_code like '%%%(txt)s%%' %(mcond)s
order by item_code limit %(start)s, %(page_len)s""" % filters) | agpl-3.0 |
codemac/servo | tests/wpt/css-tests/tools/wptserve/wptserve/server.py | 41 | 17484 | import BaseHTTPServer
import errno
import os
import re
import socket
from SocketServer import ThreadingMixIn
import ssl
import sys
import threading
import time
import traceback
import types
import urlparse
import routes as default_routes
from logger import get_logger
from request import Server, Request
from response import Response
from router import Router
from utils import HTTPException
"""HTTP server designed for testing purposes.
The server is designed to provide flexibility in the way that
requests are handled, and to provide control both of exactly
what bytes are put on the wire for the response, and in the
timing of sending those bytes.
The server is based on the stdlib HTTPServer, but with some
notable differences in the way that requests are processed.
Overall processing is handled by a WebTestRequestHandler,
which is a subclass of BaseHTTPRequestHandler. This is responsible
for parsing the incoming request. A RequestRewriter is then
applied and may change the request data if it matches a
supplied rule.
Once the request data had been finalised, Request and Reponse
objects are constructed. These are used by the other parts of the
system to read information about the request and manipulate the
response.
Each request is handled by a particular handler function. The
mapping between Request and the appropriate handler is determined
by a Router. By default handlers are installed to interpret files
under the document root with .py extensions as executable python
files (see handlers.py for the api for such files), .asis files as
bytestreams to be sent literally and all other files to be served
statically.
The handler functions are responsible for either populating the
fields of the response object, which will then be written when the
handler returns, or for directly writing to the output stream.
"""
class RequestRewriter(object):
def __init__(self, rules):
"""Object for rewriting the request path.
:param rules: Initial rules to add; a list of three item tuples
(method, input_path, output_path), defined as for
register()
"""
self.rules = {}
for rule in reversed(rules):
self.register(*rule)
self.logger = get_logger()
def register(self, methods, input_path, output_path):
"""Register a rewrite rule.
:param methods: Set of methods this should match. "*" is a
special value indicating that all methods should
be matched.
:param input_path: Path to match for the initial request.
:param output_path: Path to replace the input path with in
the request.
"""
if type(methods) in types.StringTypes:
methods = [methods]
self.rules[input_path] = (methods, output_path)
def rewrite(self, request_handler):
"""Rewrite the path in a BaseHTTPRequestHandler instance, if
it matches a rule.
:param request_handler: BaseHTTPRequestHandler for which to
rewrite the request.
"""
split_url = urlparse.urlsplit(request_handler.path)
if split_url.path in self.rules:
methods, destination = self.rules[split_url.path]
if "*" in methods or request_handler.command in methods:
self.logger.debug("Rewriting request path %s to %s" %
(request_handler.path, destination))
new_url = list(split_url)
new_url[2] = destination
new_url = urlparse.urlunsplit(new_url)
request_handler.path = new_url
class WebTestServer(ThreadingMixIn, BaseHTTPServer.HTTPServer):
allow_reuse_address = True
acceptable_errors = (errno.EPIPE, errno.ECONNABORTED)
request_queue_size = 2000
# Ensure that we don't hang on shutdown waiting for requests
daemon_threads = True
def __init__(self, server_address, RequestHandlerClass, router, rewriter, bind_hostname,
config=None, use_ssl=False, key_file=None, certificate=None,
encrypt_after_connect=False, latency=None, **kwargs):
"""Server for HTTP(s) Requests
:param server_address: tuple of (server_name, port)
:param RequestHandlerClass: BaseHTTPRequestHandler-like class to use for
handling requests.
:param router: Router instance to use for matching requests to handler
functions
:param rewriter: RequestRewriter-like instance to use for preprocessing
requests before they are routed
:param config: Dictionary holding environment configuration settings for
handlers to read, or None to use the default values.
:param use_ssl: Boolean indicating whether the server should use SSL
:param key_file: Path to key file to use if SSL is enabled.
:param certificate: Path to certificate to use if SSL is enabled.
:param encrypt_after_connect: For each connection, don't start encryption
until a CONNECT message has been received.
This enables the server to act as a
self-proxy.
:param bind_hostname True to bind the server to both the hostname and
port specified in the server_address parameter.
False to bind the server only to the port in the
server_address parameter, but not to the hostname.
:param latency: Delay in ms to wait before seving each response, or
callable that returns a delay in ms
"""
self.router = router
self.rewriter = rewriter
self.scheme = "https" if use_ssl else "http"
self.logger = get_logger()
self.latency = latency
if bind_hostname:
hostname_port = server_address
else:
hostname_port = ("",server_address[1])
#super doesn't work here because BaseHTTPServer.HTTPServer is old-style
BaseHTTPServer.HTTPServer.__init__(self, hostname_port, RequestHandlerClass, **kwargs)
if config is not None:
Server.config = config
else:
self.logger.debug("Using default configuration")
Server.config = {"host": server_address[0],
"domains": {"": server_address[0]},
"ports": {"http": [self.server_address[1]]}}
self.key_file = key_file
self.certificate = certificate
self.encrypt_after_connect = use_ssl and encrypt_after_connect
if use_ssl and not encrypt_after_connect:
self.socket = ssl.wrap_socket(self.socket,
keyfile=self.key_file,
certfile=self.certificate,
server_side=True)
def handle_error(self, request, client_address):
error = sys.exc_value
if ((isinstance(error, socket.error) and
isinstance(error.args, tuple) and
error.args[0] in self.acceptable_errors)
or
(isinstance(error, IOError) and
error.errno in self.acceptable_errors)):
pass # remote hang up before the result is sent
else:
self.logger.error(traceback.format_exc())
class WebTestRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""RequestHandler for WebTestHttpd"""
protocol_version = "HTTP/1.1"
def handle_one_request(self):
response = None
self.logger = get_logger()
try:
self.close_connection = False
request_line_is_valid = self.get_request_line()
if self.close_connection:
return
request_is_valid = self.parse_request()
if not request_is_valid:
#parse_request() actually sends its own error responses
return
self.server.rewriter.rewrite(self)
request = Request(self)
response = Response(self, request)
if request.method == "CONNECT":
self.handle_connect(response)
return
if not request_line_is_valid:
response.set_error(414)
response.write()
return
self.logger.debug("%s %s" % (request.method, request.request_path))
handler = self.server.router.get_handler(request)
if self.server.latency is not None:
if callable(self.server.latency):
latency = self.server.latency()
else:
latency = self.server.latency
self.logger.warning("Latency enabled. Sleeping %i ms" % latency)
time.sleep(latency / 1000.)
if handler is None:
response.set_error(404)
else:
try:
handler(request, response)
except HTTPException as e:
response.set_error(e.code, e.message)
except Exception as e:
if e.message:
err = [e.message]
else:
err = []
err.append(traceback.format_exc())
response.set_error(500, "\n".join(err))
self.logger.debug("%i %s %s (%s) %i" % (response.status[0],
request.method,
request.request_path,
request.headers.get('Referer'),
request.raw_input.length))
if not response.writer.content_written:
response.write()
# If we want to remove this in the future, a solution is needed for
# scripts that produce a non-string iterable of content, since these
# can't set a Content-Length header. A notable example of this kind of
# problem is with the trickle pipe i.e. foo.js?pipe=trickle(d1)
if response.close_connection:
self.close_connection = True
if not self.close_connection:
# Ensure that the whole request has been read from the socket
request.raw_input.read()
except socket.timeout, e:
self.log_error("Request timed out: %r", e)
self.close_connection = True
return
except Exception as e:
err = traceback.format_exc()
if response:
response.set_error(500, err)
response.write()
logger.error(err)
def get_request_line(self):
try:
self.raw_requestline = self.rfile.readline(65537)
except socket.error:
self.close_connection = True
return False
if len(self.raw_requestline) > 65536:
self.requestline = ''
self.request_version = ''
self.command = ''
return False
if not self.raw_requestline:
self.close_connection = True
return True
def handle_connect(self, response):
self.logger.debug("Got CONNECT")
response.status = 200
response.write()
if self.server.encrypt_after_connect:
self.logger.debug("Enabling SSL for connection")
self.request = ssl.wrap_socket(self.connection,
keyfile=self.server.key_file,
certfile=self.server.certificate,
server_side=True)
self.setup()
return
class WebTestHttpd(object):
"""
:param host: Host from which to serve (default: 127.0.0.1)
:param port: Port from which to serve (default: 8000)
:param server_cls: Class to use for the server (default depends on ssl vs non-ssl)
:param handler_cls: Class to use for the RequestHandler
:param use_ssl: Use a SSL server if no explicit server_cls is supplied
:param key_file: Path to key file to use if ssl is enabled
:param certificate: Path to certificate file to use if ssl is enabled
:param encrypt_after_connect: For each connection, don't start encryption
until a CONNECT message has been received.
This enables the server to act as a
self-proxy.
:param router_cls: Router class to use when matching URLs to handlers
:param doc_root: Document root for serving files
:param routes: List of routes with which to initialize the router
:param rewriter_cls: Class to use for request rewriter
:param rewrites: List of rewrites with which to initialize the rewriter_cls
:param config: Dictionary holding environment configuration settings for
handlers to read, or None to use the default values.
:param bind_hostname: Boolean indicating whether to bind server to hostname.
:param latency: Delay in ms to wait before seving each response, or
callable that returns a delay in ms
HTTP server designed for testing scenarios.
Takes a router class which provides one method get_handler which takes a Request
and returns a handler function.
.. attribute:: host
The host name or ip address of the server
.. attribute:: port
The port on which the server is running
.. attribute:: router
The Router object used to associate requests with resources for this server
.. attribute:: rewriter
The Rewriter object used for URL rewriting
.. attribute:: use_ssl
Boolean indicating whether the server is using ssl
.. attribute:: started
Boolean indictaing whether the server is running
"""
def __init__(self, host="127.0.0.1", port=8000,
server_cls=None, handler_cls=WebTestRequestHandler,
use_ssl=False, key_file=None, certificate=None, encrypt_after_connect=False,
router_cls=Router, doc_root=os.curdir, routes=None,
rewriter_cls=RequestRewriter, bind_hostname=True, rewrites=None,
latency=None, config=None):
if routes is None:
routes = default_routes.routes
self.host = host
self.router = router_cls(doc_root, routes)
self.rewriter = rewriter_cls(rewrites if rewrites is not None else [])
self.use_ssl = use_ssl
self.logger = get_logger()
if server_cls is None:
server_cls = WebTestServer
if use_ssl:
if key_file is not None:
assert os.path.exists(key_file)
assert certificate is not None and os.path.exists(certificate)
try:
self.httpd = server_cls((host, port),
handler_cls,
self.router,
self.rewriter,
config=config,
bind_hostname=bind_hostname,
use_ssl=use_ssl,
key_file=key_file,
certificate=certificate,
encrypt_after_connect=encrypt_after_connect,
latency=latency)
self.started = False
_host, self.port = self.httpd.socket.getsockname()
except Exception:
self.logger.error('Init failed! You may need to modify your hosts file. Refer to README.md.');
raise
def start(self, block=False):
"""Start the server.
:param block: True to run the server on the current thread, blocking,
False to run on a separate thread."""
self.logger.info("Starting http server on %s:%s" % (self.host, self.port))
self.started = True
if block:
self.httpd.serve_forever()
else:
self.server_thread = threading.Thread(target=self.httpd.serve_forever)
self.server_thread.setDaemon(True) # don't hang on exit
self.server_thread.start()
def stop(self):
"""
Stops the server.
If the server is not running, this method has no effect.
"""
if self.started:
try:
self.httpd.shutdown()
self.httpd.server_close()
self.server_thread.join()
self.server_thread = None
self.logger.info("Stopped http server on %s:%s" % (self.host, self.port))
except AttributeError:
pass
self.started = False
self.httpd = None
def get_url(self, path="/", query=None, fragment=None):
if not self.started:
return None
return urlparse.urlunsplit(("http" if not self.use_ssl else "https",
"%s:%s" % (self.host, self.port),
path, query, fragment))
| mpl-2.0 |
sushantgoel/Flask | Work/Trivia - Module 5/env/Lib/site-packages/flask/blueprints.py | 773 | 16320 | # -*- coding: utf-8 -*-
"""
flask.blueprints
~~~~~~~~~~~~~~~~
Blueprints are the recommended way to implement larger or more
pluggable applications in Flask 0.7 and later.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from functools import update_wrapper
from .helpers import _PackageBoundObject, _endpoint_from_view_func
class BlueprintSetupState(object):
"""Temporary holder object for registering a blueprint with the
application. An instance of this class is created by the
:meth:`~flask.Blueprint.make_setup_state` method and later passed
to all register callback functions.
"""
def __init__(self, blueprint, app, options, first_registration):
#: a reference to the current application
self.app = app
#: a reference to the blueprint that created this setup state.
self.blueprint = blueprint
#: a dictionary with all options that were passed to the
#: :meth:`~flask.Flask.register_blueprint` method.
self.options = options
#: as blueprints can be registered multiple times with the
#: application and not everything wants to be registered
#: multiple times on it, this attribute can be used to figure
#: out if the blueprint was registered in the past already.
self.first_registration = first_registration
subdomain = self.options.get('subdomain')
if subdomain is None:
subdomain = self.blueprint.subdomain
#: The subdomain that the blueprint should be active for, `None`
#: otherwise.
self.subdomain = subdomain
url_prefix = self.options.get('url_prefix')
if url_prefix is None:
url_prefix = self.blueprint.url_prefix
#: The prefix that should be used for all URLs defined on the
#: blueprint.
self.url_prefix = url_prefix
#: A dictionary with URL defaults that is added to each and every
#: URL that was defined with the blueprint.
self.url_defaults = dict(self.blueprint.url_values_defaults)
self.url_defaults.update(self.options.get('url_defaults', ()))
def add_url_rule(self, rule, endpoint=None, view_func=None, **options):
"""A helper method to register a rule (and optionally a view function)
to the application. The endpoint is automatically prefixed with the
blueprint's name.
"""
if self.url_prefix:
rule = self.url_prefix + rule
options.setdefault('subdomain', self.subdomain)
if endpoint is None:
endpoint = _endpoint_from_view_func(view_func)
defaults = self.url_defaults
if 'defaults' in options:
defaults = dict(defaults, **options.pop('defaults'))
self.app.add_url_rule(rule, '%s.%s' % (self.blueprint.name, endpoint),
view_func, defaults=defaults, **options)
class Blueprint(_PackageBoundObject):
"""Represents a blueprint. A blueprint is an object that records
functions that will be called with the
:class:`~flask.blueprint.BlueprintSetupState` later to register functions
or other things on the main application. See :ref:`blueprints` for more
information.
.. versionadded:: 0.7
"""
warn_on_modifications = False
_got_registered_once = False
def __init__(self, name, import_name, static_folder=None,
static_url_path=None, template_folder=None,
url_prefix=None, subdomain=None, url_defaults=None):
_PackageBoundObject.__init__(self, import_name, template_folder)
self.name = name
self.url_prefix = url_prefix
self.subdomain = subdomain
self.static_folder = static_folder
self.static_url_path = static_url_path
self.deferred_functions = []
self.view_functions = {}
if url_defaults is None:
url_defaults = {}
self.url_values_defaults = url_defaults
def record(self, func):
"""Registers a function that is called when the blueprint is
registered on the application. This function is called with the
state as argument as returned by the :meth:`make_setup_state`
method.
"""
if self._got_registered_once and self.warn_on_modifications:
from warnings import warn
warn(Warning('The blueprint was already registered once '
'but is getting modified now. These changes '
'will not show up.'))
self.deferred_functions.append(func)
def record_once(self, func):
"""Works like :meth:`record` but wraps the function in another
function that will ensure the function is only called once. If the
blueprint is registered a second time on the application, the
function passed is not called.
"""
def wrapper(state):
if state.first_registration:
func(state)
return self.record(update_wrapper(wrapper, func))
def make_setup_state(self, app, options, first_registration=False):
"""Creates an instance of :meth:`~flask.blueprints.BlueprintSetupState`
object that is later passed to the register callback functions.
Subclasses can override this to return a subclass of the setup state.
"""
return BlueprintSetupState(self, app, options, first_registration)
def register(self, app, options, first_registration=False):
"""Called by :meth:`Flask.register_blueprint` to register a blueprint
on the application. This can be overridden to customize the register
behavior. Keyword arguments from
:func:`~flask.Flask.register_blueprint` are directly forwarded to this
method in the `options` dictionary.
"""
self._got_registered_once = True
state = self.make_setup_state(app, options, first_registration)
if self.has_static_folder:
state.add_url_rule(self.static_url_path + '/<path:filename>',
view_func=self.send_static_file,
endpoint='static')
for deferred in self.deferred_functions:
deferred(state)
def route(self, rule, **options):
"""Like :meth:`Flask.route` but for a blueprint. The endpoint for the
:func:`url_for` function is prefixed with the name of the blueprint.
"""
def decorator(f):
endpoint = options.pop("endpoint", f.__name__)
self.add_url_rule(rule, endpoint, f, **options)
return f
return decorator
def add_url_rule(self, rule, endpoint=None, view_func=None, **options):
"""Like :meth:`Flask.add_url_rule` but for a blueprint. The endpoint for
the :func:`url_for` function is prefixed with the name of the blueprint.
"""
if endpoint:
assert '.' not in endpoint, "Blueprint endpoint's should not contain dot's"
self.record(lambda s:
s.add_url_rule(rule, endpoint, view_func, **options))
def endpoint(self, endpoint):
"""Like :meth:`Flask.endpoint` but for a blueprint. This does not
prefix the endpoint with the blueprint name, this has to be done
explicitly by the user of this method. If the endpoint is prefixed
with a `.` it will be registered to the current blueprint, otherwise
it's an application independent endpoint.
"""
def decorator(f):
def register_endpoint(state):
state.app.view_functions[endpoint] = f
self.record_once(register_endpoint)
return f
return decorator
def app_template_filter(self, name=None):
"""Register a custom template filter, available application wide. Like
:meth:`Flask.template_filter` but for a blueprint.
:param name: the optional name of the filter, otherwise the
function name will be used.
"""
def decorator(f):
self.add_app_template_filter(f, name=name)
return f
return decorator
def add_app_template_filter(self, f, name=None):
"""Register a custom template filter, available application wide. Like
:meth:`Flask.add_template_filter` but for a blueprint. Works exactly
like the :meth:`app_template_filter` decorator.
:param name: the optional name of the filter, otherwise the
function name will be used.
"""
def register_template(state):
state.app.jinja_env.filters[name or f.__name__] = f
self.record_once(register_template)
def app_template_test(self, name=None):
"""Register a custom template test, available application wide. Like
:meth:`Flask.template_test` but for a blueprint.
.. versionadded:: 0.10
:param name: the optional name of the test, otherwise the
function name will be used.
"""
def decorator(f):
self.add_app_template_test(f, name=name)
return f
return decorator
def add_app_template_test(self, f, name=None):
"""Register a custom template test, available application wide. Like
:meth:`Flask.add_template_test` but for a blueprint. Works exactly
like the :meth:`app_template_test` decorator.
.. versionadded:: 0.10
:param name: the optional name of the test, otherwise the
function name will be used.
"""
def register_template(state):
state.app.jinja_env.tests[name or f.__name__] = f
self.record_once(register_template)
def app_template_global(self, name=None):
"""Register a custom template global, available application wide. Like
:meth:`Flask.template_global` but for a blueprint.
.. versionadded:: 0.10
:param name: the optional name of the global, otherwise the
function name will be used.
"""
def decorator(f):
self.add_app_template_global(f, name=name)
return f
return decorator
def add_app_template_global(self, f, name=None):
"""Register a custom template global, available application wide. Like
:meth:`Flask.add_template_global` but for a blueprint. Works exactly
like the :meth:`app_template_global` decorator.
.. versionadded:: 0.10
:param name: the optional name of the global, otherwise the
function name will be used.
"""
def register_template(state):
state.app.jinja_env.globals[name or f.__name__] = f
self.record_once(register_template)
def before_request(self, f):
"""Like :meth:`Flask.before_request` but for a blueprint. This function
is only executed before each request that is handled by a function of
that blueprint.
"""
self.record_once(lambda s: s.app.before_request_funcs
.setdefault(self.name, []).append(f))
return f
def before_app_request(self, f):
"""Like :meth:`Flask.before_request`. Such a function is executed
before each request, even if outside of a blueprint.
"""
self.record_once(lambda s: s.app.before_request_funcs
.setdefault(None, []).append(f))
return f
def before_app_first_request(self, f):
"""Like :meth:`Flask.before_first_request`. Such a function is
executed before the first request to the application.
"""
self.record_once(lambda s: s.app.before_first_request_funcs.append(f))
return f
def after_request(self, f):
"""Like :meth:`Flask.after_request` but for a blueprint. This function
is only executed after each request that is handled by a function of
that blueprint.
"""
self.record_once(lambda s: s.app.after_request_funcs
.setdefault(self.name, []).append(f))
return f
def after_app_request(self, f):
"""Like :meth:`Flask.after_request` but for a blueprint. Such a function
is executed after each request, even if outside of the blueprint.
"""
self.record_once(lambda s: s.app.after_request_funcs
.setdefault(None, []).append(f))
return f
def teardown_request(self, f):
"""Like :meth:`Flask.teardown_request` but for a blueprint. This
function is only executed when tearing down requests handled by a
function of that blueprint. Teardown request functions are executed
when the request context is popped, even when no actual request was
performed.
"""
self.record_once(lambda s: s.app.teardown_request_funcs
.setdefault(self.name, []).append(f))
return f
def teardown_app_request(self, f):
"""Like :meth:`Flask.teardown_request` but for a blueprint. Such a
function is executed when tearing down each request, even if outside of
the blueprint.
"""
self.record_once(lambda s: s.app.teardown_request_funcs
.setdefault(None, []).append(f))
return f
def context_processor(self, f):
"""Like :meth:`Flask.context_processor` but for a blueprint. This
function is only executed for requests handled by a blueprint.
"""
self.record_once(lambda s: s.app.template_context_processors
.setdefault(self.name, []).append(f))
return f
def app_context_processor(self, f):
"""Like :meth:`Flask.context_processor` but for a blueprint. Such a
function is executed each request, even if outside of the blueprint.
"""
self.record_once(lambda s: s.app.template_context_processors
.setdefault(None, []).append(f))
return f
def app_errorhandler(self, code):
"""Like :meth:`Flask.errorhandler` but for a blueprint. This
handler is used for all requests, even if outside of the blueprint.
"""
def decorator(f):
self.record_once(lambda s: s.app.errorhandler(code)(f))
return f
return decorator
def url_value_preprocessor(self, f):
"""Registers a function as URL value preprocessor for this
blueprint. It's called before the view functions are called and
can modify the url values provided.
"""
self.record_once(lambda s: s.app.url_value_preprocessors
.setdefault(self.name, []).append(f))
return f
def url_defaults(self, f):
"""Callback function for URL defaults for this blueprint. It's called
with the endpoint and values and should update the values passed
in place.
"""
self.record_once(lambda s: s.app.url_default_functions
.setdefault(self.name, []).append(f))
return f
def app_url_value_preprocessor(self, f):
"""Same as :meth:`url_value_preprocessor` but application wide.
"""
self.record_once(lambda s: s.app.url_value_preprocessors
.setdefault(None, []).append(f))
return f
def app_url_defaults(self, f):
"""Same as :meth:`url_defaults` but application wide.
"""
self.record_once(lambda s: s.app.url_default_functions
.setdefault(None, []).append(f))
return f
def errorhandler(self, code_or_exception):
"""Registers an error handler that becomes active for this blueprint
only. Please be aware that routing does not happen local to a
blueprint so an error handler for 404 usually is not handled by
a blueprint unless it is caused inside a view function. Another
special case is the 500 internal server error which is always looked
up from the application.
Otherwise works as the :meth:`~flask.Flask.errorhandler` decorator
of the :class:`~flask.Flask` object.
"""
def decorator(f):
self.record_once(lambda s: s.app._register_error_handler(
self.name, code_or_exception, f))
return f
return decorator
| apache-2.0 |
ryfeus/lambda-packs | Sklearn_scipy_numpy/source/numpy/polynomial/hermite.py | 49 | 56931 | """
Objects for dealing with Hermite series.
This module provides a number of objects (mostly functions) useful for
dealing with Hermite series, including a `Hermite` class that
encapsulates the usual arithmetic operations. (General information
on how this module represents and works with such polynomials is in the
docstring for its "parent" sub-package, `numpy.polynomial`).
Constants
---------
- `hermdomain` -- Hermite series default domain, [-1,1].
- `hermzero` -- Hermite series that evaluates identically to 0.
- `hermone` -- Hermite series that evaluates identically to 1.
- `hermx` -- Hermite series for the identity map, ``f(x) = x``.
Arithmetic
----------
- `hermmulx` -- multiply a Hermite series in ``P_i(x)`` by ``x``.
- `hermadd` -- add two Hermite series.
- `hermsub` -- subtract one Hermite series from another.
- `hermmul` -- multiply two Hermite series.
- `hermdiv` -- divide one Hermite series by another.
- `hermval` -- evaluate a Hermite series at given points.
- `hermval2d` -- evaluate a 2D Hermite series at given points.
- `hermval3d` -- evaluate a 3D Hermite series at given points.
- `hermgrid2d` -- evaluate a 2D Hermite series on a Cartesian product.
- `hermgrid3d` -- evaluate a 3D Hermite series on a Cartesian product.
Calculus
--------
- `hermder` -- differentiate a Hermite series.
- `hermint` -- integrate a Hermite series.
Misc Functions
--------------
- `hermfromroots` -- create a Hermite series with specified roots.
- `hermroots` -- find the roots of a Hermite series.
- `hermvander` -- Vandermonde-like matrix for Hermite polynomials.
- `hermvander2d` -- Vandermonde-like matrix for 2D power series.
- `hermvander3d` -- Vandermonde-like matrix for 3D power series.
- `hermgauss` -- Gauss-Hermite quadrature, points and weights.
- `hermweight` -- Hermite weight function.
- `hermcompanion` -- symmetrized companion matrix in Hermite form.
- `hermfit` -- least-squares fit returning a Hermite series.
- `hermtrim` -- trim leading coefficients from a Hermite series.
- `hermline` -- Hermite series of given straight line.
- `herm2poly` -- convert a Hermite series to a polynomial.
- `poly2herm` -- convert a polynomial to a Hermite series.
Classes
-------
- `Hermite` -- A Hermite series class.
See also
--------
`numpy.polynomial`
"""
from __future__ import division, absolute_import, print_function
import warnings
import numpy as np
import numpy.linalg as la
from . import polyutils as pu
from ._polybase import ABCPolyBase
__all__ = [
'hermzero', 'hermone', 'hermx', 'hermdomain', 'hermline', 'hermadd',
'hermsub', 'hermmulx', 'hermmul', 'hermdiv', 'hermpow', 'hermval',
'hermder', 'hermint', 'herm2poly', 'poly2herm', 'hermfromroots',
'hermvander', 'hermfit', 'hermtrim', 'hermroots', 'Hermite',
'hermval2d', 'hermval3d', 'hermgrid2d', 'hermgrid3d', 'hermvander2d',
'hermvander3d', 'hermcompanion', 'hermgauss', 'hermweight']
hermtrim = pu.trimcoef
def poly2herm(pol):
"""
poly2herm(pol)
Convert a polynomial to a Hermite series.
Convert an array representing the coefficients of a polynomial (relative
to the "standard" basis) ordered from lowest degree to highest, to an
array of the coefficients of the equivalent Hermite series, ordered
from lowest to highest degree.
Parameters
----------
pol : array_like
1-D array containing the polynomial coefficients
Returns
-------
c : ndarray
1-D array containing the coefficients of the equivalent Hermite
series.
See Also
--------
herm2poly
Notes
-----
The easy way to do conversions between polynomial basis sets
is to use the convert method of a class instance.
Examples
--------
>>> from numpy.polynomial.hermite import poly2herm
>>> poly2herm(np.arange(4))
array([ 1. , 2.75 , 0.5 , 0.375])
"""
[pol] = pu.as_series([pol])
deg = len(pol) - 1
res = 0
for i in range(deg, -1, -1):
res = hermadd(hermmulx(res), pol[i])
return res
def herm2poly(c):
"""
Convert a Hermite series to a polynomial.
Convert an array representing the coefficients of a Hermite series,
ordered from lowest degree to highest, to an array of the coefficients
of the equivalent polynomial (relative to the "standard" basis) ordered
from lowest to highest degree.
Parameters
----------
c : array_like
1-D array containing the Hermite series coefficients, ordered
from lowest order term to highest.
Returns
-------
pol : ndarray
1-D array containing the coefficients of the equivalent polynomial
(relative to the "standard" basis) ordered from lowest order term
to highest.
See Also
--------
poly2herm
Notes
-----
The easy way to do conversions between polynomial basis sets
is to use the convert method of a class instance.
Examples
--------
>>> from numpy.polynomial.hermite import herm2poly
>>> herm2poly([ 1. , 2.75 , 0.5 , 0.375])
array([ 0., 1., 2., 3.])
"""
from .polynomial import polyadd, polysub, polymulx
[c] = pu.as_series([c])
n = len(c)
if n == 1:
return c
if n == 2:
c[1] *= 2
return c
else:
c0 = c[-2]
c1 = c[-1]
# i is the current degree of c1
for i in range(n - 1, 1, -1):
tmp = c0
c0 = polysub(c[i - 2], c1*(2*(i - 1)))
c1 = polyadd(tmp, polymulx(c1)*2)
return polyadd(c0, polymulx(c1)*2)
#
# These are constant arrays are of integer type so as to be compatible
# with the widest range of other types, such as Decimal.
#
# Hermite
hermdomain = np.array([-1, 1])
# Hermite coefficients representing zero.
hermzero = np.array([0])
# Hermite coefficients representing one.
hermone = np.array([1])
# Hermite coefficients representing the identity x.
hermx = np.array([0, 1/2])
def hermline(off, scl):
"""
Hermite series whose graph is a straight line.
Parameters
----------
off, scl : scalars
The specified line is given by ``off + scl*x``.
Returns
-------
y : ndarray
This module's representation of the Hermite series for
``off + scl*x``.
See Also
--------
polyline, chebline
Examples
--------
>>> from numpy.polynomial.hermite import hermline, hermval
>>> hermval(0,hermline(3, 2))
3.0
>>> hermval(1,hermline(3, 2))
5.0
"""
if scl != 0:
return np.array([off, scl/2])
else:
return np.array([off])
def hermfromroots(roots):
"""
Generate a Hermite series with given roots.
The function returns the coefficients of the polynomial
.. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n),
in Hermite form, where the `r_n` are the roots specified in `roots`.
If a zero has multiplicity n, then it must appear in `roots` n times.
For instance, if 2 is a root of multiplicity three and 3 is a root of
multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The
roots can appear in any order.
If the returned coefficients are `c`, then
.. math:: p(x) = c_0 + c_1 * H_1(x) + ... + c_n * H_n(x)
The coefficient of the last term is not generally 1 for monic
polynomials in Hermite form.
Parameters
----------
roots : array_like
Sequence containing the roots.
Returns
-------
out : ndarray
1-D array of coefficients. If all roots are real then `out` is a
real array, if some of the roots are complex, then `out` is complex
even if all the coefficients in the result are real (see Examples
below).
See Also
--------
polyfromroots, legfromroots, lagfromroots, chebfromroots,
hermefromroots.
Examples
--------
>>> from numpy.polynomial.hermite import hermfromroots, hermval
>>> coef = hermfromroots((-1, 0, 1))
>>> hermval((-1, 0, 1), coef)
array([ 0., 0., 0.])
>>> coef = hermfromroots((-1j, 1j))
>>> hermval((-1j, 1j), coef)
array([ 0.+0.j, 0.+0.j])
"""
if len(roots) == 0:
return np.ones(1)
else:
[roots] = pu.as_series([roots], trim=False)
roots.sort()
p = [hermline(-r, 1) for r in roots]
n = len(p)
while n > 1:
m, r = divmod(n, 2)
tmp = [hermmul(p[i], p[i+m]) for i in range(m)]
if r:
tmp[0] = hermmul(tmp[0], p[-1])
p = tmp
n = m
return p[0]
def hermadd(c1, c2):
"""
Add one Hermite series to another.
Returns the sum of two Hermite series `c1` + `c2`. The arguments
are sequences of coefficients ordered from lowest order term to
highest, i.e., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Hermite series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the Hermite series of their sum.
See Also
--------
hermsub, hermmul, hermdiv, hermpow
Notes
-----
Unlike multiplication, division, etc., the sum of two Hermite series
is a Hermite series (without having to "reproject" the result onto
the basis set) so addition, just like that of "standard" polynomials,
is simply "component-wise."
Examples
--------
>>> from numpy.polynomial.hermite import hermadd
>>> hermadd([1, 2, 3], [1, 2, 3, 4])
array([ 2., 4., 6., 4.])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c1[:c2.size] += c2
ret = c1
else:
c2[:c1.size] += c1
ret = c2
return pu.trimseq(ret)
def hermsub(c1, c2):
"""
Subtract one Hermite series from another.
Returns the difference of two Hermite series `c1` - `c2`. The
sequences of coefficients are from lowest order term to highest, i.e.,
[1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Hermite series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of Hermite series coefficients representing their difference.
See Also
--------
hermadd, hermmul, hermdiv, hermpow
Notes
-----
Unlike multiplication, division, etc., the difference of two Hermite
series is a Hermite series (without having to "reproject" the result
onto the basis set) so subtraction, just like that of "standard"
polynomials, is simply "component-wise."
Examples
--------
>>> from numpy.polynomial.hermite import hermsub
>>> hermsub([1, 2, 3, 4], [1, 2, 3])
array([ 0., 0., 0., 4.])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c1[:c2.size] -= c2
ret = c1
else:
c2 = -c2
c2[:c1.size] += c1
ret = c2
return pu.trimseq(ret)
def hermmulx(c):
"""Multiply a Hermite series by x.
Multiply the Hermite series `c` by x, where x is the independent
variable.
Parameters
----------
c : array_like
1-D array of Hermite series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the result of the multiplication.
Notes
-----
The multiplication uses the recursion relationship for Hermite
polynomials in the form
.. math::
xP_i(x) = (P_{i + 1}(x)/2 + i*P_{i - 1}(x))
Examples
--------
>>> from numpy.polynomial.hermite import hermmulx
>>> hermmulx([1, 2, 3])
array([ 2. , 6.5, 1. , 1.5])
"""
# c is a trimmed copy
[c] = pu.as_series([c])
# The zero series needs special treatment
if len(c) == 1 and c[0] == 0:
return c
prd = np.empty(len(c) + 1, dtype=c.dtype)
prd[0] = c[0]*0
prd[1] = c[0]/2
for i in range(1, len(c)):
prd[i + 1] = c[i]/2
prd[i - 1] += c[i]*i
return prd
def hermmul(c1, c2):
"""
Multiply one Hermite series by another.
Returns the product of two Hermite series `c1` * `c2`. The arguments
are sequences of coefficients, from lowest order "term" to highest,
e.g., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Hermite series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of Hermite series coefficients representing their product.
See Also
--------
hermadd, hermsub, hermdiv, hermpow
Notes
-----
In general, the (polynomial) product of two C-series results in terms
that are not in the Hermite polynomial basis set. Thus, to express
the product as a Hermite series, it is necessary to "reproject" the
product onto said basis set, which may produce "unintuitive" (but
correct) results; see Examples section below.
Examples
--------
>>> from numpy.polynomial.hermite import hermmul
>>> hermmul([1, 2, 3], [0, 1, 2])
array([ 52., 29., 52., 7., 6.])
"""
# s1, s2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c = c2
xs = c1
else:
c = c1
xs = c2
if len(c) == 1:
c0 = c[0]*xs
c1 = 0
elif len(c) == 2:
c0 = c[0]*xs
c1 = c[1]*xs
else:
nd = len(c)
c0 = c[-2]*xs
c1 = c[-1]*xs
for i in range(3, len(c) + 1):
tmp = c0
nd = nd - 1
c0 = hermsub(c[-i]*xs, c1*(2*(nd - 1)))
c1 = hermadd(tmp, hermmulx(c1)*2)
return hermadd(c0, hermmulx(c1)*2)
def hermdiv(c1, c2):
"""
Divide one Hermite series by another.
Returns the quotient-with-remainder of two Hermite series
`c1` / `c2`. The arguments are sequences of coefficients from lowest
order "term" to highest, e.g., [1,2,3] represents the series
``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Hermite series coefficients ordered from low to
high.
Returns
-------
[quo, rem] : ndarrays
Of Hermite series coefficients representing the quotient and
remainder.
See Also
--------
hermadd, hermsub, hermmul, hermpow
Notes
-----
In general, the (polynomial) division of one Hermite series by another
results in quotient and remainder terms that are not in the Hermite
polynomial basis set. Thus, to express these results as a Hermite
series, it is necessary to "reproject" the results onto the Hermite
basis set, which may produce "unintuitive" (but correct) results; see
Examples section below.
Examples
--------
>>> from numpy.polynomial.hermite import hermdiv
>>> hermdiv([ 52., 29., 52., 7., 6.], [0, 1, 2])
(array([ 1., 2., 3.]), array([ 0.]))
>>> hermdiv([ 54., 31., 52., 7., 6.], [0, 1, 2])
(array([ 1., 2., 3.]), array([ 2., 2.]))
>>> hermdiv([ 53., 30., 52., 7., 6.], [0, 1, 2])
(array([ 1., 2., 3.]), array([ 1., 1.]))
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if c2[-1] == 0:
raise ZeroDivisionError()
lc1 = len(c1)
lc2 = len(c2)
if lc1 < lc2:
return c1[:1]*0, c1
elif lc2 == 1:
return c1/c2[-1], c1[:1]*0
else:
quo = np.empty(lc1 - lc2 + 1, dtype=c1.dtype)
rem = c1
for i in range(lc1 - lc2, - 1, -1):
p = hermmul([0]*i + [1], c2)
q = rem[-1]/p[-1]
rem = rem[:-1] - q*p[:-1]
quo[i] = q
return quo, pu.trimseq(rem)
def hermpow(c, pow, maxpower=16):
"""Raise a Hermite series to a power.
Returns the Hermite series `c` raised to the power `pow`. The
argument `c` is a sequence of coefficients ordered from low to high.
i.e., [1,2,3] is the series ``P_0 + 2*P_1 + 3*P_2.``
Parameters
----------
c : array_like
1-D array of Hermite series coefficients ordered from low to
high.
pow : integer
Power to which the series will be raised
maxpower : integer, optional
Maximum power allowed. This is mainly to limit growth of the series
to unmanageable size. Default is 16
Returns
-------
coef : ndarray
Hermite series of power.
See Also
--------
hermadd, hermsub, hermmul, hermdiv
Examples
--------
>>> from numpy.polynomial.hermite import hermpow
>>> hermpow([1, 2, 3], 2)
array([ 81., 52., 82., 12., 9.])
"""
# c is a trimmed copy
[c] = pu.as_series([c])
power = int(pow)
if power != pow or power < 0:
raise ValueError("Power must be a non-negative integer.")
elif maxpower is not None and power > maxpower:
raise ValueError("Power is too large")
elif power == 0:
return np.array([1], dtype=c.dtype)
elif power == 1:
return c
else:
# This can be made more efficient by using powers of two
# in the usual way.
prd = c
for i in range(2, power + 1):
prd = hermmul(prd, c)
return prd
def hermder(c, m=1, scl=1, axis=0):
"""
Differentiate a Hermite series.
Returns the Hermite series coefficients `c` differentiated `m` times
along `axis`. At each iteration the result is multiplied by `scl` (the
scaling factor is for use in a linear change of variable). The argument
`c` is an array of coefficients from low to high degree along each
axis, e.g., [1,2,3] represents the series ``1*H_0 + 2*H_1 + 3*H_2``
while [[1,2],[1,2]] represents ``1*H_0(x)*H_0(y) + 1*H_1(x)*H_0(y) +
2*H_0(x)*H_1(y) + 2*H_1(x)*H_1(y)`` if axis=0 is ``x`` and axis=1 is
``y``.
Parameters
----------
c : array_like
Array of Hermite series coefficients. If `c` is multidimensional the
different axis correspond to different variables with the degree in
each axis given by the corresponding index.
m : int, optional
Number of derivatives taken, must be non-negative. (Default: 1)
scl : scalar, optional
Each differentiation is multiplied by `scl`. The end result is
multiplication by ``scl**m``. This is for use in a linear change of
variable. (Default: 1)
axis : int, optional
Axis over which the derivative is taken. (Default: 0).
.. versionadded:: 1.7.0
Returns
-------
der : ndarray
Hermite series of the derivative.
See Also
--------
hermint
Notes
-----
In general, the result of differentiating a Hermite series does not
resemble the same operation on a power series. Thus the result of this
function may be "unintuitive," albeit correct; see Examples section
below.
Examples
--------
>>> from numpy.polynomial.hermite import hermder
>>> hermder([ 1. , 0.5, 0.5, 0.5])
array([ 1., 2., 3.])
>>> hermder([-0.5, 1./2., 1./8., 1./12., 1./16.], m=2)
array([ 1., 2., 3.])
"""
c = np.array(c, ndmin=1, copy=1)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
cnt, iaxis = [int(t) for t in [m, axis]]
if cnt != m:
raise ValueError("The order of derivation must be integer")
if cnt < 0:
raise ValueError("The order of derivation must be non-negative")
if iaxis != axis:
raise ValueError("The axis must be integer")
if not -c.ndim <= iaxis < c.ndim:
raise ValueError("The axis is out of range")
if iaxis < 0:
iaxis += c.ndim
if cnt == 0:
return c
c = np.rollaxis(c, iaxis)
n = len(c)
if cnt >= n:
c = c[:1]*0
else:
for i in range(cnt):
n = n - 1
c *= scl
der = np.empty((n,) + c.shape[1:], dtype=c.dtype)
for j in range(n, 0, -1):
der[j - 1] = (2*j)*c[j]
c = der
c = np.rollaxis(c, 0, iaxis + 1)
return c
def hermint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
"""
Integrate a Hermite series.
Returns the Hermite series coefficients `c` integrated `m` times from
`lbnd` along `axis`. At each iteration the resulting series is
**multiplied** by `scl` and an integration constant, `k`, is added.
The scaling factor is for use in a linear change of variable. ("Buyer
beware": note that, depending on what one is doing, one may want `scl`
to be the reciprocal of what one might expect; for more information,
see the Notes section below.) The argument `c` is an array of
coefficients from low to high degree along each axis, e.g., [1,2,3]
represents the series ``H_0 + 2*H_1 + 3*H_2`` while [[1,2],[1,2]]
represents ``1*H_0(x)*H_0(y) + 1*H_1(x)*H_0(y) + 2*H_0(x)*H_1(y) +
2*H_1(x)*H_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``.
Parameters
----------
c : array_like
Array of Hermite series coefficients. If c is multidimensional the
different axis correspond to different variables with the degree in
each axis given by the corresponding index.
m : int, optional
Order of integration, must be positive. (Default: 1)
k : {[], list, scalar}, optional
Integration constant(s). The value of the first integral at
``lbnd`` is the first value in the list, the value of the second
integral at ``lbnd`` is the second value, etc. If ``k == []`` (the
default), all constants are set to zero. If ``m == 1``, a single
scalar can be given instead of a list.
lbnd : scalar, optional
The lower bound of the integral. (Default: 0)
scl : scalar, optional
Following each integration the result is *multiplied* by `scl`
before the integration constant is added. (Default: 1)
axis : int, optional
Axis over which the integral is taken. (Default: 0).
.. versionadded:: 1.7.0
Returns
-------
S : ndarray
Hermite series coefficients of the integral.
Raises
------
ValueError
If ``m < 0``, ``len(k) > m``, ``np.isscalar(lbnd) == False``, or
``np.isscalar(scl) == False``.
See Also
--------
hermder
Notes
-----
Note that the result of each integration is *multiplied* by `scl`.
Why is this important to note? Say one is making a linear change of
variable :math:`u = ax + b` in an integral relative to `x`. Then
.. math::`dx = du/a`, so one will need to set `scl` equal to
:math:`1/a` - perhaps not what one would have first thought.
Also note that, in general, the result of integrating a C-series needs
to be "reprojected" onto the C-series basis set. Thus, typically,
the result of this function is "unintuitive," albeit correct; see
Examples section below.
Examples
--------
>>> from numpy.polynomial.hermite import hermint
>>> hermint([1,2,3]) # integrate once, value 0 at 0.
array([ 1. , 0.5, 0.5, 0.5])
>>> hermint([1,2,3], m=2) # integrate twice, value & deriv 0 at 0
array([-0.5 , 0.5 , 0.125 , 0.08333333, 0.0625 ])
>>> hermint([1,2,3], k=1) # integrate once, value 1 at 0.
array([ 2. , 0.5, 0.5, 0.5])
>>> hermint([1,2,3], lbnd=-1) # integrate once, value 0 at -1
array([-2. , 0.5, 0.5, 0.5])
>>> hermint([1,2,3], m=2, k=[1,2], lbnd=-1)
array([ 1.66666667, -0.5 , 0.125 , 0.08333333, 0.0625 ])
"""
c = np.array(c, ndmin=1, copy=1)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if not np.iterable(k):
k = [k]
cnt, iaxis = [int(t) for t in [m, axis]]
if cnt != m:
raise ValueError("The order of integration must be integer")
if cnt < 0:
raise ValueError("The order of integration must be non-negative")
if len(k) > cnt:
raise ValueError("Too many integration constants")
if iaxis != axis:
raise ValueError("The axis must be integer")
if not -c.ndim <= iaxis < c.ndim:
raise ValueError("The axis is out of range")
if iaxis < 0:
iaxis += c.ndim
if cnt == 0:
return c
c = np.rollaxis(c, iaxis)
k = list(k) + [0]*(cnt - len(k))
for i in range(cnt):
n = len(c)
c *= scl
if n == 1 and np.all(c[0] == 0):
c[0] += k[i]
else:
tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype)
tmp[0] = c[0]*0
tmp[1] = c[0]/2
for j in range(1, n):
tmp[j + 1] = c[j]/(2*(j + 1))
tmp[0] += k[i] - hermval(lbnd, tmp)
c = tmp
c = np.rollaxis(c, 0, iaxis + 1)
return c
def hermval(x, c, tensor=True):
"""
Evaluate an Hermite series at points x.
If `c` is of length `n + 1`, this function returns the value:
.. math:: p(x) = c_0 * H_0(x) + c_1 * H_1(x) + ... + c_n * H_n(x)
The parameter `x` is converted to an array only if it is a tuple or a
list, otherwise it is treated as a scalar. In either case, either `x`
or its elements must support multiplication and addition both with
themselves and with the elements of `c`.
If `c` is a 1-D array, then `p(x)` will have the same shape as `x`. If
`c` is multidimensional, then the shape of the result depends on the
value of `tensor`. If `tensor` is true the shape will be c.shape[1:] +
x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that
scalars have shape (,).
Trailing zeros in the coefficients will be used in the evaluation, so
they should be avoided if efficiency is a concern.
Parameters
----------
x : array_like, compatible object
If `x` is a list or tuple, it is converted to an ndarray, otherwise
it is left unchanged and treated as a scalar. In either case, `x`
or its elements must support addition and multiplication with
with themselves and with the elements of `c`.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree n are contained in c[n]. If `c` is multidimensional the
remaining indices enumerate multiple polynomials. In the two
dimensional case the coefficients may be thought of as stored in
the columns of `c`.
tensor : boolean, optional
If True, the shape of the coefficient array is extended with ones
on the right, one for each dimension of `x`. Scalars have dimension 0
for this action. The result is that every column of coefficients in
`c` is evaluated for every element of `x`. If False, `x` is broadcast
over the columns of `c` for the evaluation. This keyword is useful
when `c` is multidimensional. The default value is True.
.. versionadded:: 1.7.0
Returns
-------
values : ndarray, algebra_like
The shape of the return value is described above.
See Also
--------
hermval2d, hermgrid2d, hermval3d, hermgrid3d
Notes
-----
The evaluation uses Clenshaw recursion, aka synthetic division.
Examples
--------
>>> from numpy.polynomial.hermite import hermval
>>> coef = [1,2,3]
>>> hermval(1, coef)
11.0
>>> hermval([[1,2],[3,4]], coef)
array([[ 11., 51.],
[ 115., 203.]])
"""
c = np.array(c, ndmin=1, copy=0)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if isinstance(x, (tuple, list)):
x = np.asarray(x)
if isinstance(x, np.ndarray) and tensor:
c = c.reshape(c.shape + (1,)*x.ndim)
x2 = x*2
if len(c) == 1:
c0 = c[0]
c1 = 0
elif len(c) == 2:
c0 = c[0]
c1 = c[1]
else:
nd = len(c)
c0 = c[-2]
c1 = c[-1]
for i in range(3, len(c) + 1):
tmp = c0
nd = nd - 1
c0 = c[-i] - c1*(2*(nd - 1))
c1 = tmp + c1*x2
return c0 + c1*x2
def hermval2d(x, y, c):
"""
Evaluate a 2-D Hermite series at points (x, y).
This function returns the values:
.. math:: p(x,y) = \\sum_{i,j} c_{i,j} * H_i(x) * H_j(y)
The parameters `x` and `y` are converted to arrays only if they are
tuples or a lists, otherwise they are treated as a scalars and they
must have the same shape after conversion. In either case, either `x`
and `y` or their elements must support multiplication and addition both
with themselves and with the elements of `c`.
If `c` is a 1-D array a one is implicitly appended to its shape to make
it 2-D. The shape of the result will be c.shape[2:] + x.shape.
Parameters
----------
x, y : array_like, compatible objects
The two dimensional series is evaluated at the points `(x, y)`,
where `x` and `y` must have the same shape. If `x` or `y` is a list
or tuple, it is first converted to an ndarray, otherwise it is left
unchanged and if it isn't an ndarray it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term
of multi-degree i,j is contained in ``c[i,j]``. If `c` has
dimension greater than two the remaining indices enumerate multiple
sets of coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional polynomial at points formed with
pairs of corresponding values from `x` and `y`.
See Also
--------
hermval, hermgrid2d, hermval3d, hermgrid3d
Notes
-----
.. versionadded::1.7.0
"""
try:
x, y = np.array((x, y), copy=0)
except:
raise ValueError('x, y are incompatible')
c = hermval(x, c)
c = hermval(y, c, tensor=False)
return c
def hermgrid2d(x, y, c):
"""
Evaluate a 2-D Hermite series on the Cartesian product of x and y.
This function returns the values:
.. math:: p(a,b) = \sum_{i,j} c_{i,j} * H_i(a) * H_j(b)
where the points `(a, b)` consist of all pairs formed by taking
`a` from `x` and `b` from `y`. The resulting points form a grid with
`x` in the first dimension and `y` in the second.
The parameters `x` and `y` are converted to arrays only if they are
tuples or a lists, otherwise they are treated as a scalars. In either
case, either `x` and `y` or their elements must support multiplication
and addition both with themselves and with the elements of `c`.
If `c` has fewer than two dimensions, ones are implicitly appended to
its shape to make it 2-D. The shape of the result will be c.shape[2:] +
x.shape.
Parameters
----------
x, y : array_like, compatible objects
The two dimensional series is evaluated at the points in the
Cartesian product of `x` and `y`. If `x` or `y` is a list or
tuple, it is first converted to an ndarray, otherwise it is left
unchanged and, if it isn't an ndarray, it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree i,j are contained in ``c[i,j]``. If `c` has dimension
greater than two the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional polynomial at points in the Cartesian
product of `x` and `y`.
See Also
--------
hermval, hermval2d, hermval3d, hermgrid3d
Notes
-----
.. versionadded::1.7.0
"""
c = hermval(x, c)
c = hermval(y, c)
return c
def hermval3d(x, y, z, c):
"""
Evaluate a 3-D Hermite series at points (x, y, z).
This function returns the values:
.. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * H_i(x) * H_j(y) * H_k(z)
The parameters `x`, `y`, and `z` are converted to arrays only if
they are tuples or a lists, otherwise they are treated as a scalars and
they must have the same shape after conversion. In either case, either
`x`, `y`, and `z` or their elements must support multiplication and
addition both with themselves and with the elements of `c`.
If `c` has fewer than 3 dimensions, ones are implicitly appended to its
shape to make it 3-D. The shape of the result will be c.shape[3:] +
x.shape.
Parameters
----------
x, y, z : array_like, compatible object
The three dimensional series is evaluated at the points
`(x, y, z)`, where `x`, `y`, and `z` must have the same shape. If
any of `x`, `y`, or `z` is a list or tuple, it is first converted
to an ndarray, otherwise it is left unchanged and if it isn't an
ndarray it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term of
multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension
greater than 3 the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the multidimensional polynomial on points formed with
triples of corresponding values from `x`, `y`, and `z`.
See Also
--------
hermval, hermval2d, hermgrid2d, hermgrid3d
Notes
-----
.. versionadded::1.7.0
"""
try:
x, y, z = np.array((x, y, z), copy=0)
except:
raise ValueError('x, y, z are incompatible')
c = hermval(x, c)
c = hermval(y, c, tensor=False)
c = hermval(z, c, tensor=False)
return c
def hermgrid3d(x, y, z, c):
"""
Evaluate a 3-D Hermite series on the Cartesian product of x, y, and z.
This function returns the values:
.. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * H_i(a) * H_j(b) * H_k(c)
where the points `(a, b, c)` consist of all triples formed by taking
`a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form
a grid with `x` in the first dimension, `y` in the second, and `z` in
the third.
The parameters `x`, `y`, and `z` are converted to arrays only if they
are tuples or a lists, otherwise they are treated as a scalars. In
either case, either `x`, `y`, and `z` or their elements must support
multiplication and addition both with themselves and with the elements
of `c`.
If `c` has fewer than three dimensions, ones are implicitly appended to
its shape to make it 3-D. The shape of the result will be c.shape[3:] +
x.shape + y.shape + z.shape.
Parameters
----------
x, y, z : array_like, compatible objects
The three dimensional series is evaluated at the points in the
Cartesian product of `x`, `y`, and `z`. If `x`,`y`, or `z` is a
list or tuple, it is first converted to an ndarray, otherwise it is
left unchanged and, if it isn't an ndarray, it is treated as a
scalar.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree i,j are contained in ``c[i,j]``. If `c` has dimension
greater than two the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional polynomial at points in the Cartesian
product of `x` and `y`.
See Also
--------
hermval, hermval2d, hermgrid2d, hermval3d
Notes
-----
.. versionadded::1.7.0
"""
c = hermval(x, c)
c = hermval(y, c)
c = hermval(z, c)
return c
def hermvander(x, deg):
"""Pseudo-Vandermonde matrix of given degree.
Returns the pseudo-Vandermonde matrix of degree `deg` and sample points
`x`. The pseudo-Vandermonde matrix is defined by
.. math:: V[..., i] = H_i(x),
where `0 <= i <= deg`. The leading indices of `V` index the elements of
`x` and the last index is the degree of the Hermite polynomial.
If `c` is a 1-D array of coefficients of length `n + 1` and `V` is the
array ``V = hermvander(x, n)``, then ``np.dot(V, c)`` and
``hermval(x, c)`` are the same up to roundoff. This equivalence is
useful both for least squares fitting and for the evaluation of a large
number of Hermite series of the same degree and sample points.
Parameters
----------
x : array_like
Array of points. The dtype is converted to float64 or complex128
depending on whether any of the elements are complex. If `x` is
scalar it is converted to a 1-D array.
deg : int
Degree of the resulting matrix.
Returns
-------
vander : ndarray
The pseudo-Vandermonde matrix. The shape of the returned matrix is
``x.shape + (deg + 1,)``, where The last index is the degree of the
corresponding Hermite polynomial. The dtype will be the same as
the converted `x`.
Examples
--------
>>> from numpy.polynomial.hermite import hermvander
>>> x = np.array([-1, 0, 1])
>>> hermvander(x, 3)
array([[ 1., -2., 2., 4.],
[ 1., 0., -2., -0.],
[ 1., 2., 2., -4.]])
"""
ideg = int(deg)
if ideg != deg:
raise ValueError("deg must be integer")
if ideg < 0:
raise ValueError("deg must be non-negative")
x = np.array(x, copy=0, ndmin=1) + 0.0
dims = (ideg + 1,) + x.shape
dtyp = x.dtype
v = np.empty(dims, dtype=dtyp)
v[0] = x*0 + 1
if ideg > 0:
x2 = x*2
v[1] = x2
for i in range(2, ideg + 1):
v[i] = (v[i-1]*x2 - v[i-2]*(2*(i - 1)))
return np.rollaxis(v, 0, v.ndim)
def hermvander2d(x, y, deg):
"""Pseudo-Vandermonde matrix of given degrees.
Returns the pseudo-Vandermonde matrix of degrees `deg` and sample
points `(x, y)`. The pseudo-Vandermonde matrix is defined by
.. math:: V[..., deg[1]*i + j] = H_i(x) * H_j(y),
where `0 <= i <= deg[0]` and `0 <= j <= deg[1]`. The leading indices of
`V` index the points `(x, y)` and the last index encodes the degrees of
the Hermite polynomials.
If ``V = hermvander2d(x, y, [xdeg, ydeg])``, then the columns of `V`
correspond to the elements of a 2-D coefficient array `c` of shape
(xdeg + 1, ydeg + 1) in the order
.. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ...
and ``np.dot(V, c.flat)`` and ``hermval2d(x, y, c)`` will be the same
up to roundoff. This equivalence is useful both for least squares
fitting and for the evaluation of a large number of 2-D Hermite
series of the same degrees and sample points.
Parameters
----------
x, y : array_like
Arrays of point coordinates, all of the same shape. The dtypes
will be converted to either float64 or complex128 depending on
whether any of the elements are complex. Scalars are converted to 1-D
arrays.
deg : list of ints
List of maximum degrees of the form [x_deg, y_deg].
Returns
-------
vander2d : ndarray
The shape of the returned matrix is ``x.shape + (order,)``, where
:math:`order = (deg[0]+1)*(deg([1]+1)`. The dtype will be the same
as the converted `x` and `y`.
See Also
--------
hermvander, hermvander3d. hermval2d, hermval3d
Notes
-----
.. versionadded::1.7.0
"""
ideg = [int(d) for d in deg]
is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)]
if is_valid != [1, 1]:
raise ValueError("degrees must be non-negative integers")
degx, degy = ideg
x, y = np.array((x, y), copy=0) + 0.0
vx = hermvander(x, degx)
vy = hermvander(y, degy)
v = vx[..., None]*vy[..., None,:]
return v.reshape(v.shape[:-2] + (-1,))
def hermvander3d(x, y, z, deg):
"""Pseudo-Vandermonde matrix of given degrees.
Returns the pseudo-Vandermonde matrix of degrees `deg` and sample
points `(x, y, z)`. If `l, m, n` are the given degrees in `x, y, z`,
then The pseudo-Vandermonde matrix is defined by
.. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = H_i(x)*H_j(y)*H_k(z),
where `0 <= i <= l`, `0 <= j <= m`, and `0 <= j <= n`. The leading
indices of `V` index the points `(x, y, z)` and the last index encodes
the degrees of the Hermite polynomials.
If ``V = hermvander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns
of `V` correspond to the elements of a 3-D coefficient array `c` of
shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order
.. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},...
and ``np.dot(V, c.flat)`` and ``hermval3d(x, y, z, c)`` will be the
same up to roundoff. This equivalence is useful both for least squares
fitting and for the evaluation of a large number of 3-D Hermite
series of the same degrees and sample points.
Parameters
----------
x, y, z : array_like
Arrays of point coordinates, all of the same shape. The dtypes will
be converted to either float64 or complex128 depending on whether
any of the elements are complex. Scalars are converted to 1-D
arrays.
deg : list of ints
List of maximum degrees of the form [x_deg, y_deg, z_deg].
Returns
-------
vander3d : ndarray
The shape of the returned matrix is ``x.shape + (order,)``, where
:math:`order = (deg[0]+1)*(deg([1]+1)*(deg[2]+1)`. The dtype will
be the same as the converted `x`, `y`, and `z`.
See Also
--------
hermvander, hermvander3d. hermval2d, hermval3d
Notes
-----
.. versionadded::1.7.0
"""
ideg = [int(d) for d in deg]
is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)]
if is_valid != [1, 1, 1]:
raise ValueError("degrees must be non-negative integers")
degx, degy, degz = ideg
x, y, z = np.array((x, y, z), copy=0) + 0.0
vx = hermvander(x, degx)
vy = hermvander(y, degy)
vz = hermvander(z, degz)
v = vx[..., None, None]*vy[..., None,:, None]*vz[..., None, None,:]
return v.reshape(v.shape[:-3] + (-1,))
def hermfit(x, y, deg, rcond=None, full=False, w=None):
"""
Least squares fit of Hermite series to data.
Return the coefficients of a Hermite series of degree `deg` that is the
least squares fit to the data values `y` given at points `x`. If `y` is
1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple
fits are done, one for each column of `y`, and the resulting
coefficients are stored in the corresponding columns of a 2-D return.
The fitted polynomial(s) are in the form
.. math:: p(x) = c_0 + c_1 * H_1(x) + ... + c_n * H_n(x),
where `n` is `deg`.
Parameters
----------
x : array_like, shape (M,)
x-coordinates of the M sample points ``(x[i], y[i])``.
y : array_like, shape (M,) or (M, K)
y-coordinates of the sample points. Several data sets of sample
points sharing the same x-coordinates can be fitted at once by
passing in a 2D-array that contains one dataset per column.
deg : int
Degree of the fitting polynomial
rcond : float, optional
Relative condition number of the fit. Singular values smaller than
this relative to the largest singular value will be ignored. The
default value is len(x)*eps, where eps is the relative precision of
the float type, about 2e-16 in most cases.
full : bool, optional
Switch determining nature of return value. When it is False (the
default) just the coefficients are returned, when True diagnostic
information from the singular value decomposition is also returned.
w : array_like, shape (`M`,), optional
Weights. If not None, the contribution of each point
``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the
weights are chosen so that the errors of the products ``w[i]*y[i]``
all have the same variance. The default value is None.
Returns
-------
coef : ndarray, shape (M,) or (M, K)
Hermite coefficients ordered from low to high. If `y` was 2-D,
the coefficients for the data in column k of `y` are in column
`k`.
[residuals, rank, singular_values, rcond] : list
These values are only returned if `full` = True
resid -- sum of squared residuals of the least squares fit
rank -- the numerical rank of the scaled Vandermonde matrix
sv -- singular values of the scaled Vandermonde matrix
rcond -- value of `rcond`.
For more details, see `linalg.lstsq`.
Warns
-----
RankWarning
The rank of the coefficient matrix in the least-squares fit is
deficient. The warning is only raised if `full` = False. The
warnings can be turned off by
>>> import warnings
>>> warnings.simplefilter('ignore', RankWarning)
See Also
--------
chebfit, legfit, lagfit, polyfit, hermefit
hermval : Evaluates a Hermite series.
hermvander : Vandermonde matrix of Hermite series.
hermweight : Hermite weight function
linalg.lstsq : Computes a least-squares fit from the matrix.
scipy.interpolate.UnivariateSpline : Computes spline fits.
Notes
-----
The solution is the coefficients of the Hermite series `p` that
minimizes the sum of the weighted squared errors
.. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2,
where the :math:`w_j` are the weights. This problem is solved by
setting up the (typically) overdetermined matrix equation
.. math:: V(x) * c = w * y,
where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the
coefficients to be solved for, `w` are the weights, `y` are the
observed values. This equation is then solved using the singular value
decomposition of `V`.
If some of the singular values of `V` are so small that they are
neglected, then a `RankWarning` will be issued. This means that the
coefficient values may be poorly determined. Using a lower order fit
will usually get rid of the warning. The `rcond` parameter can also be
set to a value smaller than its default, but the resulting fit may be
spurious and have large contributions from roundoff error.
Fits using Hermite series are probably most useful when the data can be
approximated by ``sqrt(w(x)) * p(x)``, where `w(x)` is the Hermite
weight. In that case the weight ``sqrt(w(x[i])`` should be used
together with data values ``y[i]/sqrt(w(x[i])``. The weight function is
available as `hermweight`.
References
----------
.. [1] Wikipedia, "Curve fitting",
http://en.wikipedia.org/wiki/Curve_fitting
Examples
--------
>>> from numpy.polynomial.hermite import hermfit, hermval
>>> x = np.linspace(-10, 10)
>>> err = np.random.randn(len(x))/10
>>> y = hermval(x, [1, 2, 3]) + err
>>> hermfit(x, y, 2)
array([ 0.97902637, 1.99849131, 3.00006 ])
"""
order = int(deg) + 1
x = np.asarray(x) + 0.0
y = np.asarray(y) + 0.0
# check arguments.
if deg < 0:
raise ValueError("expected deg >= 0")
if x.ndim != 1:
raise TypeError("expected 1D vector for x")
if x.size == 0:
raise TypeError("expected non-empty vector for x")
if y.ndim < 1 or y.ndim > 2:
raise TypeError("expected 1D or 2D array for y")
if len(x) != len(y):
raise TypeError("expected x and y to have same length")
# set up the least squares matrices in transposed form
lhs = hermvander(x, deg).T
rhs = y.T
if w is not None:
w = np.asarray(w) + 0.0
if w.ndim != 1:
raise TypeError("expected 1D vector for w")
if len(x) != len(w):
raise TypeError("expected x and w to have same length")
# apply weights. Don't use inplace operations as they
# can cause problems with NA.
lhs = lhs * w
rhs = rhs * w
# set rcond
if rcond is None:
rcond = len(x)*np.finfo(x.dtype).eps
# Determine the norms of the design matrix columns.
if issubclass(lhs.dtype.type, np.complexfloating):
scl = np.sqrt((np.square(lhs.real) + np.square(lhs.imag)).sum(1))
else:
scl = np.sqrt(np.square(lhs).sum(1))
scl[scl == 0] = 1
# Solve the least squares problem.
c, resids, rank, s = la.lstsq(lhs.T/scl, rhs.T, rcond)
c = (c.T/scl).T
# warn on rank reduction
if rank != order and not full:
msg = "The fit may be poorly conditioned"
warnings.warn(msg, pu.RankWarning)
if full:
return c, [resids, rank, s, rcond]
else:
return c
def hermcompanion(c):
"""Return the scaled companion matrix of c.
The basis polynomials are scaled so that the companion matrix is
symmetric when `c` is an Hermite basis polynomial. This provides
better eigenvalue estimates than the unscaled case and for basis
polynomials the eigenvalues are guaranteed to be real if
`numpy.linalg.eigvalsh` is used to obtain them.
Parameters
----------
c : array_like
1-D array of Hermite series coefficients ordered from low to high
degree.
Returns
-------
mat : ndarray
Scaled companion matrix of dimensions (deg, deg).
Notes
-----
.. versionadded::1.7.0
"""
# c is a trimmed copy
[c] = pu.as_series([c])
if len(c) < 2:
raise ValueError('Series must have maximum degree of at least 1.')
if len(c) == 2:
return np.array([[-.5*c[0]/c[1]]])
n = len(c) - 1
mat = np.zeros((n, n), dtype=c.dtype)
scl = np.hstack((1., 1./np.sqrt(2.*np.arange(n - 1, 0, -1))))
scl = np.multiply.accumulate(scl)[::-1]
top = mat.reshape(-1)[1::n+1]
bot = mat.reshape(-1)[n::n+1]
top[...] = np.sqrt(.5*np.arange(1, n))
bot[...] = top
mat[:, -1] -= scl*c[:-1]/(2.0*c[-1])
return mat
def hermroots(c):
"""
Compute the roots of a Hermite series.
Return the roots (a.k.a. "zeros") of the polynomial
.. math:: p(x) = \\sum_i c[i] * H_i(x).
Parameters
----------
c : 1-D array_like
1-D array of coefficients.
Returns
-------
out : ndarray
Array of the roots of the series. If all the roots are real,
then `out` is also real, otherwise it is complex.
See Also
--------
polyroots, legroots, lagroots, chebroots, hermeroots
Notes
-----
The root estimates are obtained as the eigenvalues of the companion
matrix, Roots far from the origin of the complex plane may have large
errors due to the numerical instability of the series for such
values. Roots with multiplicity greater than 1 will also show larger
errors as the value of the series near such points is relatively
insensitive to errors in the roots. Isolated roots near the origin can
be improved by a few iterations of Newton's method.
The Hermite series basis polynomials aren't powers of `x` so the
results of this function may seem unintuitive.
Examples
--------
>>> from numpy.polynomial.hermite import hermroots, hermfromroots
>>> coef = hermfromroots([-1, 0, 1])
>>> coef
array([ 0. , 0.25 , 0. , 0.125])
>>> hermroots(coef)
array([ -1.00000000e+00, -1.38777878e-17, 1.00000000e+00])
"""
# c is a trimmed copy
[c] = pu.as_series([c])
if len(c) <= 1:
return np.array([], dtype=c.dtype)
if len(c) == 2:
return np.array([-.5*c[0]/c[1]])
m = hermcompanion(c)
r = la.eigvals(m)
r.sort()
return r
def _normed_hermite_n(x, n):
"""
Evaluate a normalized Hermite polynomial.
Compute the value of the normalized Hermite polynomial of degree ``n``
at the points ``x``.
Parameters
----------
x : ndarray of double.
Points at which to evaluate the function
n : int
Degree of the normalized Hermite function to be evaluated.
Returns
-------
values : ndarray
The shape of the return value is described above.
Notes
-----
.. versionadded:: 1.10.0
This function is needed for finding the Gauss points and integration
weights for high degrees. The values of the standard Hermite functions
overflow when n >= 207.
"""
if n == 0:
return np.ones(x.shape)/np.sqrt(np.sqrt(np.pi))
c0 = 0.
c1 = 1./np.sqrt(np.sqrt(np.pi))
nd = float(n)
for i in range(n - 1):
tmp = c0
c0 = -c1*np.sqrt((nd - 1.)/nd)
c1 = tmp + c1*x*np.sqrt(2./nd)
nd = nd - 1.0
return c0 + c1*x*np.sqrt(2)
def hermgauss(deg):
"""
Gauss-Hermite quadrature.
Computes the sample points and weights for Gauss-Hermite quadrature.
These sample points and weights will correctly integrate polynomials of
degree :math:`2*deg - 1` or less over the interval :math:`[-\inf, \inf]`
with the weight function :math:`f(x) = \exp(-x^2)`.
Parameters
----------
deg : int
Number of sample points and weights. It must be >= 1.
Returns
-------
x : ndarray
1-D ndarray containing the sample points.
y : ndarray
1-D ndarray containing the weights.
Notes
-----
.. versionadded::1.7.0
The results have only been tested up to degree 100, higher degrees may
be problematic. The weights are determined by using the fact that
.. math:: w_k = c / (H'_n(x_k) * H_{n-1}(x_k))
where :math:`c` is a constant independent of :math:`k` and :math:`x_k`
is the k'th root of :math:`H_n`, and then scaling the results to get
the right value when integrating 1.
"""
ideg = int(deg)
if ideg != deg or ideg < 1:
raise ValueError("deg must be a non-negative integer")
# first approximation of roots. We use the fact that the companion
# matrix is symmetric in this case in order to obtain better zeros.
c = np.array([0]*deg + [1], dtype=np.float64)
m = hermcompanion(c)
x = la.eigvalsh(m)
x.sort()
# improve roots by one application of Newton
dy = _normed_hermite_n(x, ideg)
df = _normed_hermite_n(x, ideg - 1) * np.sqrt(2*ideg)
x -= dy/df
# compute the weights. We scale the factor to avoid possible numerical
# overflow.
fm = _normed_hermite_n(x, ideg - 1)
fm /= np.abs(fm).max()
w = 1/(fm * fm)
# for Hermite we can also symmetrize
w = (w + w[::-1])/2
x = (x - x[::-1])/2
# scale w to get the right value
w *= np.sqrt(np.pi) / w.sum()
return x, w
def hermweight(x):
"""
Weight function of the Hermite polynomials.
The weight function is :math:`\exp(-x^2)` and the interval of
integration is :math:`[-\inf, \inf]`. the Hermite polynomials are
orthogonal, but not normalized, with respect to this weight function.
Parameters
----------
x : array_like
Values at which the weight function will be computed.
Returns
-------
w : ndarray
The weight function at `x`.
Notes
-----
.. versionadded::1.7.0
"""
w = np.exp(-x**2)
return w
#
# Hermite series class
#
class Hermite(ABCPolyBase):
"""An Hermite series class.
The Hermite class provides the standard Python numerical methods
'+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the
attributes and methods listed in the `ABCPolyBase` documentation.
Parameters
----------
coef : array_like
Hermite coefficients in order of increasing degree, i.e,
``(1, 2, 3)`` gives ``1*H_0(x) + 2*H_1(X) + 3*H_2(x)``.
domain : (2,) array_like, optional
Domain to use. The interval ``[domain[0], domain[1]]`` is mapped
to the interval ``[window[0], window[1]]`` by shifting and scaling.
The default value is [-1, 1].
window : (2,) array_like, optional
Window, see `domain` for its use. The default value is [-1, 1].
.. versionadded:: 1.6.0
"""
# Virtual Functions
_add = staticmethod(hermadd)
_sub = staticmethod(hermsub)
_mul = staticmethod(hermmul)
_div = staticmethod(hermdiv)
_pow = staticmethod(hermpow)
_val = staticmethod(hermval)
_int = staticmethod(hermint)
_der = staticmethod(hermder)
_fit = staticmethod(hermfit)
_line = staticmethod(hermline)
_roots = staticmethod(hermroots)
_fromroots = staticmethod(hermfromroots)
# Virtual properties
nickname = 'herm'
domain = np.array(hermdomain)
window = np.array(hermdomain)
| mit |
superphy/backend | app/modules/qc/qc.py | 1 | 4021 | from __future__ import division
import os
import tempfile
import subprocess
import argparse
import pandas as pd
from middleware.graphers.turtle_grapher import generate_turtle_skeleton
def create_blast_db(query_file):
'''
:param query_file: genome file that was given by the user.
'''
tempdir = tempfile.mkdtemp()
blast_db_path = os.path.join(tempdir, 'ecoli_blastdb')
ret_code = subprocess.call(["makeblastdb",
"-in", query_file,
"-dbtype", "nucl",
"-title", "ecoli_blastdb",
"-out", blast_db_path])
if ret_code == 0:
return blast_db_path
else:
raise Exception("Could not create blast db")
def run_blast(blast_db):
'''
Compares db made from user submitted data against https://raw.githubusercontent.com/superphy/version-1/master/Sequences/genome_content_panseq/putative_e_coli_specific.fasta
The ref contains 10 ecoli-specific gene sequences
Output format is set to '10'(csv)
'''
ecoli_ref = os.path.abspath(os.path.dirname(os.path.abspath(__file__)) + '/' + 'putative_e_coli_specific.fasta')
blast_output_file = blast_db + '.output'
ret_code = subprocess.call(["blastn",
"-query", ecoli_ref,
"-db", blast_db,
"-out", blast_output_file,
"-outfmt", '10 " qseqid qlen sseqid length pident sstart send sframe "',
"-word_size", "11"])
if ret_code == 0:
return blast_output_file
else:
raise Exception("Could not run blast")
def parse_blast_records(blast_output_file):
'''
Recall, headers are: https://edwards.sdsu.edu/research/blast-output-8/
For QC, we only consider perfect matches against our reference.
returns a list of unique hits from the reference db
'''
print blast_output_file
blast_records = pd.read_csv(blast_output_file, header=None)
blast_records.columns = ['qseqid','qlen','sseqid','length','pident','sstart','send','sframe']
# filter for results with percent identity >= 90%
blast_records_pi_passed = blast_records[blast_records['pident']>=90]
print blast_records_pi_passed
# calculate percent length
blast_records_pi_passed['pl'] = blast_records_pi_passed['length']/blast_records_pi_passed['qlen'] * 100
# filter for results with percent length >= 90%
blast_records_pi_pl_passed = blast_records_pi_passed[blast_records_pi_passed['pl'] >= 90]
print blast_records_pi_pl_passed
# take only unique hits of the reference sequence that pass pi/pl checks (we don't count repeats)
unique_hits = blast_records_pi_pl_passed['qseqid'].unique()
print unique_hits
return unique_hits
def check_header_parsing(query_file):
'''
Checks that SeqIO can parse the file okay before continuing.
'''
try:
graph = generate_turtle_skeleton(query_file)
return True
except:
return False
def check_ecoli(query_file):
# Checks if the query_file is an E.Coli genome.
# run blast for ecoli specific sequences
blast_db = create_blast_db(query_file)
blast_output_file = run_blast(blast_db)
unique_hits = parse_blast_records(blast_output_file)
if len(unique_hits) >= 3:
return True
else:
return False
def qc(query_file):
'''
Compares the query_file against a reference db of ecoli-specific gene sequences.
We consider a "pass" if the query_file has >=3 of the sequences.
Returns True for pass, False for failed qc check (not ecoli.)
'''
return check_header_parsing(query_file) and check_ecoli(query_file)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-i", required=True)
args = parser.parse_args()
print qc(args.i)
| apache-2.0 |
yaii/yai | share/extensions/rtree.py | 3 | 2561 | #!/usr/bin/env python
'''
Copyright (C) 2005 Aaron Spike, aaron@ekips.org
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
'''
import inkex, simplestyle, pturtle, random
def rtree(turtle, size, min):
if size < min:
return
turtle.fd(size)
turn = random.uniform(20, 40)
turtle.lt(turn)
rtree(turtle, size*random.uniform(0.5,0.9), min)
turtle.rt(turn)
turn = random.uniform(20, 40)
turtle.rt(turn)
rtree(turtle, size*random.uniform(0.5,0.9), min)
turtle.lt(turn)
turtle.bk(size)
class RTreeTurtle(inkex.Effect):
def __init__(self):
inkex.Effect.__init__(self)
self.OptionParser.add_option("-s", "--size",
action="store", type="float",
dest="size", default=100.0,
help="initial branch size")
self.OptionParser.add_option("-m", "--minimum",
action="store", type="float",
dest="minimum", default=4.0,
help="minimum branch size")
def effect(self):
self.options.size = self.unittouu(str(self.options.size) + 'px')
self.options.minimum = self.unittouu(str(self.options.minimum) + 'px')
s = {'stroke-linejoin': 'miter', 'stroke-width': str(self.unittouu('1px')),
'stroke-opacity': '1.0', 'fill-opacity': '1.0',
'stroke': '#000000', 'stroke-linecap': 'butt',
'fill': 'none'}
t = pturtle.pTurtle()
t.pu()
t.setpos(self.view_center)
t.pd()
rtree(t, self.options.size, self.options.minimum)
attribs = {'d':t.getPath(),'style':simplestyle.formatStyle(s)}
inkex.etree.SubElement(self.current_layer, inkex.addNS('path','svg'), attribs)
if __name__ == '__main__':
e = RTreeTurtle()
e.affect()
# vim: expandtab shiftwidth=4 tabstop=8 softtabstop=4 fileencoding=utf-8 textwidth=99
| gpl-2.0 |
2014c2g14/2015cd_midterm | static/Brython3.1.1-20150328-091302/Lib/importlib/__init__.py | 610 | 3472 | """A pure Python implementation of import."""
__all__ = ['__import__', 'import_module', 'invalidate_caches']
# Bootstrap help #####################################################
# Until bootstrapping is complete, DO NOT import any modules that attempt
# to import importlib._bootstrap (directly or indirectly). Since this
# partially initialised package would be present in sys.modules, those
# modules would get an uninitialised copy of the source version, instead
# of a fully initialised version (either the frozen one or the one
# initialised below if the frozen one is not available).
import _imp # Just the builtin component, NOT the full Python module
import sys
from . import machinery #fix me brython
try:
import _frozen_importlib as _bootstrap
except ImportError:
from . import _bootstrap
_bootstrap._setup(sys, _imp)
else:
# importlib._bootstrap is the built-in import, ensure we don't create
# a second copy of the module.
_bootstrap.__name__ = 'importlib._bootstrap'
_bootstrap.__package__ = 'importlib'
_bootstrap.__file__ = __file__.replace('__init__.py', '_bootstrap.py')
sys.modules['importlib._bootstrap'] = _bootstrap
# To simplify imports in test code
_w_long = _bootstrap._w_long
_r_long = _bootstrap._r_long
# Fully bootstrapped at this point, import whatever you like, circular
# dependencies and startup overhead minimisation permitting :)
# Public API #########################################################
from ._bootstrap import __import__
def invalidate_caches():
"""Call the invalidate_caches() method on all meta path finders stored in
sys.meta_path (where implemented)."""
for finder in sys.meta_path:
if hasattr(finder, 'invalidate_caches'):
finder.invalidate_caches()
def find_loader(name, path=None):
"""Find the loader for the specified module.
First, sys.modules is checked to see if the module was already imported. If
so, then sys.modules[name].__loader__ is returned. If that happens to be
set to None, then ValueError is raised. If the module is not in
sys.modules, then sys.meta_path is searched for a suitable loader with the
value of 'path' given to the finders. None is returned if no loader could
be found.
Dotted names do not have their parent packages implicitly imported. You will
most likely need to explicitly import all parent packages in the proper
order for a submodule to get the correct loader.
"""
try:
loader = sys.modules[name].__loader__
if loader is None:
raise ValueError('{}.__loader__ is None'.format(name))
else:
return loader
except KeyError:
pass
return _bootstrap._find_module(name, path)
def import_module(name, package=None):
"""Import a module.
The 'package' argument is required when performing a relative import. It
specifies the package to use as the anchor point from which to resolve the
relative import to an absolute import.
"""
level = 0
if name.startswith('.'):
if not package:
raise TypeError("relative imports require the 'package' argument")
for character in name:
if character != '.':
break
level += 1
return _bootstrap._gcd_import(name[level:], package, level)
#need at least one import hook for importlib stuff to work.
import basehook
sys.meta_path.append(basehook.BaseHook())
| gpl-3.0 |
CenterForOpenScience/osf.io | addons/onedrive/tests/test_models.py | 13 | 3765 | # -*- coding: utf-8 -*-
import mock
import pytest
import unittest
from framework.auth import Auth
from addons.base.tests.models import OAuthAddonNodeSettingsTestSuiteMixin
from addons.base.tests.models import OAuthAddonUserSettingTestSuiteMixin
from addons.onedrive.models import NodeSettings, OneDriveProvider
from addons.onedrive.client import OneDriveClient
from addons.onedrive.tests.factories import (
OneDriveAccountFactory,
OneDriveNodeSettingsFactory,
OneDriveUserSettingsFactory,
)
pytestmark = pytest.mark.django_db
class TestOneDriveProvider(unittest.TestCase):
def setUp(self):
super(TestOneDriveProvider, self).setUp()
self.provider = OneDriveProvider()
@mock.patch.object(OneDriveClient, 'user_info_for_token')
def test_handle_callback(self, mock_client):
fake_response = {'access_token': 'abc123'}
fake_info = {'id': '12345', 'name': 'fakename', 'link': 'fakeUrl'}
mock_client.return_value = fake_info
res = self.provider.handle_callback(fake_response)
assert res['provider_id'] == '12345'
assert res['display_name'] == 'fakename'
assert res['profile_url'] == 'fakeUrl'
class TestUserSettings(OAuthAddonUserSettingTestSuiteMixin, unittest.TestCase):
short_name = 'onedrive'
full_name = 'Microsoft OneDrive'
ExternalAccountFactory = OneDriveAccountFactory
class TestNodeSettings(OAuthAddonNodeSettingsTestSuiteMixin, unittest.TestCase):
short_name = 'onedrive'
full_name = 'Microsoft OneDrive'
ExternalAccountFactory = OneDriveAccountFactory
NodeSettingsFactory = OneDriveNodeSettingsFactory
NodeSettingsClass = NodeSettings
UserSettingsFactory = OneDriveUserSettingsFactory
def setUp(self):
self.mock_refresh = mock.patch.object(
OneDriveProvider,
'refresh_oauth_key'
)
self.mock_refresh.return_value = True
self.mock_refresh.start()
super(TestNodeSettings, self).setUp()
def tearDown(self):
self.mock_refresh.stop()
super(TestNodeSettings, self).tearDown()
@mock.patch('addons.onedrive.models.OneDriveProvider')
def test_api_not_cached(self, mock_odp):
# The first call to .api returns a new object
api = self.node_settings.api
mock_odp.assert_called_once_with(self.external_account)
assert api == mock_odp()
@mock.patch('addons.onedrive.models.OneDriveProvider')
def test_api_cached(self, mock_odp):
# Repeated calls to .api returns the same object
self.node_settings._api = 'testapi'
api = self.node_settings.api
assert mock_odp.called is False
assert api == 'testapi'
def test_selected_folder_name_root(self):
self.node_settings.folder_id = 'root'
assert self.node_settings.selected_folder_name == '/ (Full OneDrive)'
def test_selected_folder_name_empty(self):
self.node_settings.folder_id = None
assert self.node_settings.selected_folder_name == ''
## Overrides ##
def test_set_folder(self):
folder = {
'id': 'fake-folder-id',
'name': 'fake-folder-name',
'path': 'fake_path'
}
self.node_settings.set_folder(folder, auth=Auth(self.user))
self.node_settings.save()
# Folder was set
assert self.node_settings.folder_id == folder['id']
# Log was saved
last_log = self.node.logs.latest()
assert last_log.action == '{0}_folder_selected'.format(self.short_name)
def test_serialize_settings(self):
settings = self.node_settings.serialize_waterbutler_settings()
expected = {'folder': self.node_settings.folder_id}
assert settings == expected
| apache-2.0 |
andreivasiliu2211/upm | examples/python/wheelencoder.py | 14 | 1854 | #!/usr/bin/python
# Author: Jon Trulson <jtrulson@ics.com>
# Copyright (c) 2015 Intel Corporation.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import time, sys, signal, atexit
import pyupm_wheelencoder as sensorObj
# Instantiate a DFRobot Wheel Encoder on digital pin D2
sensor = sensorObj.WheelEncoder(2)
## Exit handlers ##
# This function stops python from printing a stacktrace when you hit control-C
def SIGINTHandler(signum, frame):
raise SystemExit
# This function lets you run code on exit
def exitHandler():
print "Exiting"
sys.exit(0)
# Register exit handlers
atexit.register(exitHandler)
signal.signal(signal.SIGINT, SIGINTHandler)
# set the counter to 0 and start counting
sensor.clearCounter();
sensor.startCounter();
while (1):
print "Millis:", sensor.getMillis(), "Count:", sensor.counter()
time.sleep(1)
| mit |
jinluyuan/osf.io | scripts/consistency/impute_wiki_date.py | 64 | 1064 | """Due to an unknown bug, wiki pages were saved without dates between
September 4 and 6. This script identifies wiki pages without dates and
imputes dates using ObjectIds.
Dry run: python -m scripts/consistency/impute_wiki_date
Real: python -m scripts/consistency/impute_wiki_date false
"""
from bson import ObjectId
from website.app import init_app
from website import models
from framework import Q
app = init_app()
def impute_wiki_date(dry_run=True):
no_date = models.NodeWikiPage.find(
Q('date', 'eq', None)
)
for wiki in no_date:
oid = ObjectId(wiki._primary_key)
imputed_date = oid.generation_time
print u'Imputing date {} for wiki ID {}'.format(
imputed_date.strftime('%c'),
wiki._primary_key,
)
if not dry_run:
wiki._fields['date'].__set__(wiki, imputed_date, safe=True)
wiki.save()
if __name__ == '__main__':
import sys
dry_run = len(sys.argv) == 1 or sys.argv[1].lower() not in ['f', 'false']
impute_wiki_date(dry_run=dry_run)
| apache-2.0 |
Azure/azure-sdk-for-python | sdk/purview/azure-purview-scanning/azure/purview/scanning/aio/_azure_purview_scanning_client.py | 1 | 4355 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from copy import deepcopy
from typing import Any, TYPE_CHECKING
from azure.core import AsyncPipelineClient
from azure.purview.scanning.core.rest import AsyncHttpResponse, HttpRequest, _AsyncStreamContextManager
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Dict
from azure.core.credentials_async import AsyncTokenCredential
from ._configuration import PurviewScanningClientConfiguration
class PurviewScanningClient(object):
"""Creates a Microsoft.Scanning management client.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param endpoint: The scanning endpoint of your purview account. Example: https://{accountName}.scan.purview.azure.com.
:type endpoint: str
"""
def __init__(
self,
credential: "AsyncTokenCredential",
endpoint: str,
**kwargs: Any
) -> None:
base_url = '{Endpoint}'
self._config = PurviewScanningClientConfiguration(credential, endpoint, **kwargs)
self._client = AsyncPipelineClient(base_url=base_url, config=self._config, **kwargs)
self._serialize = Serializer()
self._deserialize = Deserializer()
self._serialize.client_side_validation = False
async def send_request(self, http_request: HttpRequest, **kwargs: Any) -> AsyncHttpResponse:
"""Runs the network request through the client's chained policies.
We have helper methods to create requests specific to this service in `azure.purview.scanning.rest`.
Use these helper methods to create the request you pass to this method. See our example below:
>>> from azure.purview.scanning.rest import build_get_request
>>> request = build_get_request(key_vault_name)
<HttpRequest [GET], url: '/azureKeyVaults/{keyVaultName}'>
>>> response = await client.send_request(request)
<AsyncHttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/python/protocol/quickstart
For advanced cases, you can also create your own :class:`~azure.purview.scanning.core.rest.HttpRequest`
and pass it in.
:param http_request: The network request you want to make. Required.
:type http_request: ~azure.purview.scanning.core.rest.HttpRequest
:keyword bool stream_response: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.purview.scanning.core.rest.AsyncHttpResponse
"""
request_copy = deepcopy(http_request)
path_format_arguments = {
'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request_copy.url = self._client.format_url(request_copy.url, **path_format_arguments)
if kwargs.pop("stream_response", False):
return _AsyncStreamContextManager(
client=self._client._pipeline,
request=request_copy,
)
pipeline_response = await self._client._pipeline.run(request_copy._internal_request, **kwargs)
response = AsyncHttpResponse(
status_code=pipeline_response.http_response.status_code,
request=request_copy,
_internal_response=pipeline_response.http_response
)
await response.read()
return response
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "PurviewScanningClient":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
| mit |
justinjfu/doodad | doodad/test_mount.py | 1 | 1128 | import unittest
import os
import os.path as path
import shutil
import tempfile
import contextlib
from doodad import mount
from doodad.utils import TESTING_DIR
from doodad.credentials import ssh, ec2
class TestLocal(unittest.TestCase):
def test_filter(self):
local_mount = mount.MountLocal('dummy', filter_ext=['.abc', '.xyz'], filter_dir=('foo',))
to_ignore = local_mount.ignore_patterns('/data', ['a.abc', 'b.txt', 'foo', 'bar'])
self.assertEqual({'a.abc', 'foo'}, set(to_ignore))
def test_shutil_copy(self):
source_dir = path.join(TESTING_DIR, 'mount_test', 'source_dir')
target_dir = path.join(TESTING_DIR, 'mount_test', 'target_dir')
local_mount = mount.MountLocal('dummy', filter_ext=['.pyc'], filter_dir=('foo',))
try:
shutil.copytree(source_dir, target_dir, ignore=local_mount.ignore_patterns)
print('SOURCE_DIR:', os.listdir(source_dir))
print('TARGET_DIR:', os.listdir(target_dir))
self.assertEqual({'a.txt', 'bar'}, set(os.listdir(target_dir)))
finally:
shutil.rmtree(target_dir)
| gpl-3.0 |
plotly/plotly.py | packages/python/plotly/plotly/tests/test_core/test_subplots/test_get_subplot.py | 2 | 6220 | from __future__ import absolute_import
from unittest import TestCase
from plotly.graph_objs import Figure
from plotly import subplots
import plotly.graph_objs as go
from plotly.subplots import SubplotXY, SubplotDomain
class TestGetSubplot(TestCase):
def test_get_subplot(self):
# Make Figure with subplot types
fig = subplots.make_subplots(
rows=4,
cols=2,
specs=[
[{}, {"secondary_y": True}],
[{"type": "polar"}, {"type": "ternary"}],
[{"type": "scene"}, {"type": "geo"}],
[{"type": "domain", "colspan": 2}, None],
],
)
fig.add_scatter(y=[2, 1, 3], row=1, col=1)
fig.add_scatter(y=[2, 1, 3], row=1, col=2)
fig.add_scatter(y=[1, 3, 2], row=1, col=2, secondary_y=True)
fig.add_trace(go.Scatterpolar(r=[2, 1, 3], theta=[20, 50, 125]), row=2, col=1)
fig.add_traces(
[go.Scatterternary(a=[0.2, 0.1, 0.3], b=[0.4, 0.6, 0.5])],
rows=[2],
cols=[2],
)
fig.add_scatter3d(
x=[2, 0, 1], y=[0, 1, 0], z=[0, 1, 2], mode="lines", row=3, col=1
)
fig.add_scattergeo(lat=[0, 40], lon=[10, 5], mode="lines", row=3, col=2)
fig.add_parcats(
dimensions=[
{"values": ["A", "A", "B", "A", "B"]},
{"values": ["a", "a", "a", "b", "b"]},
],
row=4,
col=1,
)
fig.update_traces(uid=None)
fig.update(layout_height=1200)
# Check
expected = Figure(
{
"data": [
{"type": "scatter", "xaxis": "x", "y": [2, 1, 3], "yaxis": "y"},
{"type": "scatter", "xaxis": "x2", "y": [2, 1, 3], "yaxis": "y2"},
{"type": "scatter", "xaxis": "x2", "y": [1, 3, 2], "yaxis": "y3"},
{
"r": [2, 1, 3],
"subplot": "polar",
"theta": [20, 50, 125],
"type": "scatterpolar",
},
{
"a": [0.2, 0.1, 0.3],
"b": [0.4, 0.6, 0.5],
"subplot": "ternary",
"type": "scatterternary",
},
{
"mode": "lines",
"scene": "scene",
"type": "scatter3d",
"x": [2, 0, 1],
"y": [0, 1, 0],
"z": [0, 1, 2],
},
{
"geo": "geo",
"lat": [0, 40],
"lon": [10, 5],
"mode": "lines",
"type": "scattergeo",
},
{
"dimensions": [
{"values": ["A", "A", "B", "A", "B"]},
{"values": ["a", "a", "a", "b", "b"]},
],
"domain": {"x": [0.0, 0.9400000000000001], "y": [0.0, 0.19375]},
"type": "parcats",
},
],
"layout": {
"geo": {
"domain": {
"x": [0.5700000000000001, 0.9400000000000001],
"y": [0.26875, 0.4625],
}
},
"height": 1200,
"polar": {"domain": {"x": [0.0, 0.37], "y": [0.5375, 0.73125]}},
"scene": {"domain": {"x": [0.0, 0.37], "y": [0.26875, 0.4625]}},
"ternary": {
"domain": {
"x": [0.5700000000000001, 0.9400000000000001],
"y": [0.5375, 0.73125],
}
},
"xaxis": {"anchor": "y", "domain": [0.0, 0.37]},
"xaxis2": {
"anchor": "y2",
"domain": [0.5700000000000001, 0.9400000000000001],
},
"yaxis": {"anchor": "x", "domain": [0.80625, 1.0]},
"yaxis2": {"anchor": "x2", "domain": [0.80625, 1.0]},
"yaxis3": {"anchor": "x2", "overlaying": "y2", "side": "right"},
},
}
)
expected.update_traces(uid=None)
# Make sure we have expected starting figure
self.assertEqual(fig, expected)
# (1, 1)
subplot = fig.get_subplot(1, 1)
self.assertEqual(
subplot, SubplotXY(xaxis=fig.layout.xaxis, yaxis=fig.layout.yaxis)
)
# (1, 2) Primary
subplot = fig.get_subplot(1, 2)
self.assertEqual(
subplot, SubplotXY(xaxis=fig.layout.xaxis2, yaxis=fig.layout.yaxis2)
)
# (1, 2) Primary
subplot = fig.get_subplot(1, 2, secondary_y=True)
self.assertEqual(
subplot, SubplotXY(xaxis=fig.layout.xaxis2, yaxis=fig.layout.yaxis3)
)
# (2, 1)
subplot = fig.get_subplot(2, 1)
self.assertEqual(subplot, fig.layout.polar)
# (2, 2)
subplot = fig.get_subplot(2, 2)
self.assertEqual(subplot, fig.layout.ternary)
# (3, 1)
subplot = fig.get_subplot(3, 1)
self.assertEqual(subplot, fig.layout.scene)
# (3, 2)
subplot = fig.get_subplot(3, 2)
self.assertEqual(subplot, fig.layout.geo)
# (4, 1)
subplot = fig.get_subplot(4, 1)
domain = fig.data[-1].domain
self.assertEqual(subplot, SubplotDomain(x=domain.x, y=domain.y))
def test_get_subplot_out_of_bounds(self):
fig = subplots.make_subplots(rows=4, cols=2)
self.assertRaises(ValueError, lambda: fig.get_subplot(0, 1))
self.assertRaises(ValueError, lambda: fig.get_subplot(5, 1))
self.assertRaises(ValueError, lambda: fig.get_subplot(1, 0))
self.assertRaises(ValueError, lambda: fig.get_subplot(1, 3))
| mit |
mohamed--abdel-maksoud/chromium.src | third_party/android_testrunner/errors.py | 171 | 1340 | #!/usr/bin/python2.4
#
#
# Copyright 2008, The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines common exception classes for this package."""
class MsgException(Exception):
"""Generic exception with an optional string msg."""
def __init__(self, msg=""):
self.msg = msg
class WaitForResponseTimedOutError(Exception):
"""We sent a command and had to wait too long for response."""
class DeviceUnresponsiveError(Exception):
"""Device is unresponsive to command."""
class InstrumentationError(Exception):
"""Failed to run instrumentation."""
class AbortError(MsgException):
"""Generic exception that indicates a fatal error has occurred and program
execution should be aborted."""
class ParseError(MsgException):
"""Raised when xml data to parse has unrecognized format."""
| bsd-3-clause |
mattrberry/hnpy | hnpy.py | 1 | 4988 | import requests
import time
baseURL = "https://hacker-news.firebaseio.com/v0/"
# itemid is the string of the numerical id
def makeItem(itemid):
itemjson = requests.get(baseURL + "item/" + itemid + ".json").json()
itemtype = itemjson["type"]
if "story" in itemtype:
# Items of type "ask" say they are of type "story"
if "url" not in itemjson or itemjson["url"] == "":
item = Ask(itemjson)
else:
item = Story(itemjson)
elif "comment" in itemtype:
item = Comment(itemjson)
elif "job" in itemtype:
item = Job(itemjson)
elif "pollopt" in itemtype:
item = PollOption(itemjson)
elif "poll" in itemtype:
item = Poll(itemjson)
return item
class HackerNews(object):
def getTop(self):
return requests.get(baseURL + "topstories.json").json()
def getNew(self):
return requests.get(baseURL + "newstories.json").json()
def getBest(self):
return requests.get(baseURL + "beststories.json").json()
def getAsk(self):
return requests.get(baseURL + "askstories.json").json()
def getShow(self):
return requests.get(baseURL + "showstories.json").json()
def getJob(self):
return requests.get(baseURL + "jobstories.json").json()
# parameters: [required] list of ids as returned by the above get[...] methods
# [required] amount to load (will return less if less exist
# [optional] starting point (default is 0)
def load(self, ids, amount, start=0):
loaded = []
for index in range(start, start + amount):
if index < len(ids):
item = makeItem(str(ids[index]))
loaded.append(item)
return loaded
class Item(object):
def __init__(self, itemjson):
self.id = str(itemjson["id"])
self.by = itemjson["by"]
self.time = itemjson["time"]
if "score" in itemjson:
self.score = itemjson["score"]
else:
self.score = 1
if "descendants" in itemjson:
self.descendants = itemjson["descendants"]
else:
self.descendants = 0
def age(self):
return int(time.time()) - self.time
def ageString(self):
age = self.age()
if age / (60 * 60 * 24) >= 1:
return str(int(age / (60 * 60 * 24))) + " days"
elif age / (60 * 60) >= 1:
return str(int(age / (60 * 60))) + " hours"
elif age / 60 >= 1:
return str(int(age / 60)) + " minutes"
else:
return str(age) + " seconds"
class Story(Item):
def __init__(self, itemjson):
Item.__init__(self, itemjson)
self.title = itemjson["title"]
self.url = itemjson["url"]
if "kids" in itemjson:
self.kids = itemjson["kids"]
else:
self.kids = []
def loadKids(self):
loaded = []
for kids in self.kids:
item = makeItem(str(kid))
loaded.append(item)
return loaded
class Comment(Item):
def __init__(self, itemjson):
Item.__init__(self, itemjson)
self.text = itemjson["text"]
if "kids" in itemjson:
self.kids = itemjson["kids"]
else:
self.kids = []
def loadKids(self):
loaded = []
for kids in self.kids:
item = makeItem(str(kid))
loaded.append(item)
return loaded
class Ask(Item):
def __init__(self, itemjson):
Item.__init__(self, itemjson)
self.title = itemjson["title"]
if "text" in itemjson:
self.text = itemjson["text"]
else:
self.text = ""
if "kids" in itemjson:
self.kids = itemjson["kids"]
else:
self.kids = []
def loadKids(self):
loaded = []
for kids in self.kids:
item = makeItem(str(kid))
loaded.append(item)
return loaded
class Job(Item):
def __init__(self, itemjson):
Item.__init__(self, itemjson)
self.title = itemjson["title"]
if "text" in itemjson:
self.text = itemjson["text"]
else:
self.text = ""
if "url" in itemjson:
self.url = itemjson["url"]
else:
self.url = ""
class Poll(Item):
def __init__(self, itemjson):
Item.__init__(self, itemjson)
self.title = itemjson["title"]
self.text = itemjson["text"]
if "kids" in itemjson:
self.kids = itemjson["kids"]
else:
self.kids = []
self.parts = itemjson["parts"]
def loadKids(self):
loaded = []
for kids in self.kids:
item = makeItem(str(kid))
loaded.append(item)
return loaded
class PollOption(Item):
def __init__(self, itemjson):
Item.__init__(self, itemjson)
self.text = itemjson["text"]
| mit |
40223209/test1 | static/Brython3.1.1-20150328-091302/Lib/logging/__init__.py | 733 | 66279 | # Copyright 2001-2013 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
Logging package for Python. Based on PEP 282 and comments thereto in
comp.lang.python.
Copyright (C) 2001-2013 Vinay Sajip. All Rights Reserved.
To use, simply 'import logging' and log away!
"""
import sys, os, time, io, traceback, warnings, weakref
from string import Template
__all__ = ['BASIC_FORMAT', 'BufferingFormatter', 'CRITICAL', 'DEBUG', 'ERROR',
'FATAL', 'FileHandler', 'Filter', 'Formatter', 'Handler', 'INFO',
'LogRecord', 'Logger', 'LoggerAdapter', 'NOTSET', 'NullHandler',
'StreamHandler', 'WARN', 'WARNING', 'addLevelName', 'basicConfig',
'captureWarnings', 'critical', 'debug', 'disable', 'error',
'exception', 'fatal', 'getLevelName', 'getLogger', 'getLoggerClass',
'info', 'log', 'makeLogRecord', 'setLoggerClass', 'warn', 'warning',
'getLogRecordFactory', 'setLogRecordFactory', 'lastResort']
try:
import threading
except ImportError: #pragma: no cover
threading = None
__author__ = "Vinay Sajip <vinay_sajip@red-dove.com>"
__status__ = "production"
__version__ = "0.5.1.2"
__date__ = "07 February 2010"
#---------------------------------------------------------------------------
# Miscellaneous module data
#---------------------------------------------------------------------------
#
# _srcfile is used when walking the stack to check when we've got the first
# caller stack frame.
#
if hasattr(sys, 'frozen'): #support for py2exe
_srcfile = "logging%s__init__%s" % (os.sep, __file__[-4:])
else:
_srcfile = __file__
_srcfile = os.path.normcase(_srcfile)
if hasattr(sys, '_getframe'):
currentframe = lambda: sys._getframe(3)
else: #pragma: no cover
def currentframe():
"""Return the frame object for the caller's stack frame."""
try:
raise Exception
except:
return sys.exc_info()[2].tb_frame.f_back
# _srcfile is only used in conjunction with sys._getframe().
# To provide compatibility with older versions of Python, set _srcfile
# to None if _getframe() is not available; this value will prevent
# findCaller() from being called.
#if not hasattr(sys, "_getframe"):
# _srcfile = None
#
#_startTime is used as the base when calculating the relative time of events
#
_startTime = time.time()
#
#raiseExceptions is used to see if exceptions during handling should be
#propagated
#
raiseExceptions = True
#
# If you don't want threading information in the log, set this to zero
#
logThreads = True
#
# If you don't want multiprocessing information in the log, set this to zero
#
logMultiprocessing = True
#
# If you don't want process information in the log, set this to zero
#
logProcesses = True
#---------------------------------------------------------------------------
# Level related stuff
#---------------------------------------------------------------------------
#
# Default levels and level names, these can be replaced with any positive set
# of values having corresponding names. There is a pseudo-level, NOTSET, which
# is only really there as a lower limit for user-defined levels. Handlers and
# loggers are initialized with NOTSET so that they will log all messages, even
# at user-defined levels.
#
CRITICAL = 50
FATAL = CRITICAL
ERROR = 40
WARNING = 30
WARN = WARNING
INFO = 20
DEBUG = 10
NOTSET = 0
_levelNames = {
CRITICAL : 'CRITICAL',
ERROR : 'ERROR',
WARNING : 'WARNING',
INFO : 'INFO',
DEBUG : 'DEBUG',
NOTSET : 'NOTSET',
'CRITICAL' : CRITICAL,
'ERROR' : ERROR,
'WARN' : WARNING,
'WARNING' : WARNING,
'INFO' : INFO,
'DEBUG' : DEBUG,
'NOTSET' : NOTSET,
}
def getLevelName(level):
"""
Return the textual representation of logging level 'level'.
If the level is one of the predefined levels (CRITICAL, ERROR, WARNING,
INFO, DEBUG) then you get the corresponding string. If you have
associated levels with names using addLevelName then the name you have
associated with 'level' is returned.
If a numeric value corresponding to one of the defined levels is passed
in, the corresponding string representation is returned.
Otherwise, the string "Level %s" % level is returned.
"""
return _levelNames.get(level, ("Level %s" % level))
def addLevelName(level, levelName):
"""
Associate 'levelName' with 'level'.
This is used when converting levels to text during message formatting.
"""
_acquireLock()
try: #unlikely to cause an exception, but you never know...
_levelNames[level] = levelName
_levelNames[levelName] = level
finally:
_releaseLock()
def _checkLevel(level):
if isinstance(level, int):
rv = level
elif str(level) == level:
if level not in _levelNames:
raise ValueError("Unknown level: %r" % level)
rv = _levelNames[level]
else:
raise TypeError("Level not an integer or a valid string: %r" % level)
return rv
#---------------------------------------------------------------------------
# Thread-related stuff
#---------------------------------------------------------------------------
#
#_lock is used to serialize access to shared data structures in this module.
#This needs to be an RLock because fileConfig() creates and configures
#Handlers, and so might arbitrary user threads. Since Handler code updates the
#shared dictionary _handlers, it needs to acquire the lock. But if configuring,
#the lock would already have been acquired - so we need an RLock.
#The same argument applies to Loggers and Manager.loggerDict.
#
if threading:
_lock = threading.RLock()
else: #pragma: no cover
_lock = None
def _acquireLock():
"""
Acquire the module-level lock for serializing access to shared data.
This should be released with _releaseLock().
"""
if _lock:
_lock.acquire()
def _releaseLock():
"""
Release the module-level lock acquired by calling _acquireLock().
"""
if _lock:
_lock.release()
#---------------------------------------------------------------------------
# The logging record
#---------------------------------------------------------------------------
class LogRecord(object):
"""
A LogRecord instance represents an event being logged.
LogRecord instances are created every time something is logged. They
contain all the information pertinent to the event being logged. The
main information passed in is in msg and args, which are combined
using str(msg) % args to create the message field of the record. The
record also includes information such as when the record was created,
the source line where the logging call was made, and any exception
information to be logged.
"""
def __init__(self, name, level, pathname, lineno,
msg, args, exc_info, func=None, sinfo=None, **kwargs):
"""
Initialize a logging record with interesting information.
"""
ct = time.time()
self.name = name
self.msg = msg
#
# The following statement allows passing of a dictionary as a sole
# argument, so that you can do something like
# logging.debug("a %(a)d b %(b)s", {'a':1, 'b':2})
# Suggested by Stefan Behnel.
# Note that without the test for args[0], we get a problem because
# during formatting, we test to see if the arg is present using
# 'if self.args:'. If the event being logged is e.g. 'Value is %d'
# and if the passed arg fails 'if self.args:' then no formatting
# is done. For example, logger.warning('Value is %d', 0) would log
# 'Value is %d' instead of 'Value is 0'.
# For the use case of passing a dictionary, this should not be a
# problem.
if args and len(args) == 1 and isinstance(args[0], dict) and args[0]:
args = args[0]
self.args = args
self.levelname = getLevelName(level)
self.levelno = level
self.pathname = pathname
try:
self.filename = os.path.basename(pathname)
self.module = os.path.splitext(self.filename)[0]
except (TypeError, ValueError, AttributeError):
self.filename = pathname
self.module = "Unknown module"
self.exc_info = exc_info
self.exc_text = None # used to cache the traceback text
self.stack_info = sinfo
self.lineno = lineno
self.funcName = func
self.created = ct
self.msecs = (ct - int(ct)) * 1000
self.relativeCreated = (self.created - _startTime) * 1000
if logThreads and threading:
self.thread = threading.get_ident()
self.threadName = threading.current_thread().name
else: # pragma: no cover
self.thread = None
self.threadName = None
if not logMultiprocessing: # pragma: no cover
self.processName = None
else:
self.processName = 'MainProcess'
mp = sys.modules.get('multiprocessing')
if mp is not None:
# Errors may occur if multiprocessing has not finished loading
# yet - e.g. if a custom import hook causes third-party code
# to run when multiprocessing calls import. See issue 8200
# for an example
try:
self.processName = mp.current_process().name
except Exception: #pragma: no cover
pass
if logProcesses and hasattr(os, 'getpid'):
self.process = os.getpid()
else:
self.process = None
def __str__(self):
return '<LogRecord: %s, %s, %s, %s, "%s">'%(self.name, self.levelno,
self.pathname, self.lineno, self.msg)
def getMessage(self):
"""
Return the message for this LogRecord.
Return the message for this LogRecord after merging any user-supplied
arguments with the message.
"""
msg = str(self.msg)
if self.args:
msg = msg % self.args
return msg
#
# Determine which class to use when instantiating log records.
#
_logRecordFactory = LogRecord
def setLogRecordFactory(factory):
"""
Set the factory to be used when instantiating a log record.
:param factory: A callable which will be called to instantiate
a log record.
"""
global _logRecordFactory
_logRecordFactory = factory
def getLogRecordFactory():
"""
Return the factory to be used when instantiating a log record.
"""
return _logRecordFactory
def makeLogRecord(dict):
"""
Make a LogRecord whose attributes are defined by the specified dictionary,
This function is useful for converting a logging event received over
a socket connection (which is sent as a dictionary) into a LogRecord
instance.
"""
rv = _logRecordFactory(None, None, "", 0, "", (), None, None)
rv.__dict__.update(dict)
return rv
#---------------------------------------------------------------------------
# Formatter classes and functions
#---------------------------------------------------------------------------
class PercentStyle(object):
default_format = '%(message)s'
asctime_format = '%(asctime)s'
asctime_search = '%(asctime)'
def __init__(self, fmt):
self._fmt = fmt or self.default_format
def usesTime(self):
return self._fmt.find(self.asctime_search) >= 0
def format(self, record):
return self._fmt % record.__dict__
class StrFormatStyle(PercentStyle):
default_format = '{message}'
asctime_format = '{asctime}'
asctime_search = '{asctime'
def format(self, record):
return self._fmt.format(**record.__dict__)
class StringTemplateStyle(PercentStyle):
default_format = '${message}'
asctime_format = '${asctime}'
asctime_search = '${asctime}'
def __init__(self, fmt):
self._fmt = fmt or self.default_format
self._tpl = Template(self._fmt)
def usesTime(self):
fmt = self._fmt
return fmt.find('$asctime') >= 0 or fmt.find(self.asctime_format) >= 0
def format(self, record):
return self._tpl.substitute(**record.__dict__)
_STYLES = {
'%': PercentStyle,
'{': StrFormatStyle,
'$': StringTemplateStyle
}
class Formatter(object):
"""
Formatter instances are used to convert a LogRecord to text.
Formatters need to know how a LogRecord is constructed. They are
responsible for converting a LogRecord to (usually) a string which can
be interpreted by either a human or an external system. The base Formatter
allows a formatting string to be specified. If none is supplied, the
default value of "%s(message)" is used.
The Formatter can be initialized with a format string which makes use of
knowledge of the LogRecord attributes - e.g. the default value mentioned
above makes use of the fact that the user's message and arguments are pre-
formatted into a LogRecord's message attribute. Currently, the useful
attributes in a LogRecord are described by:
%(name)s Name of the logger (logging channel)
%(levelno)s Numeric logging level for the message (DEBUG, INFO,
WARNING, ERROR, CRITICAL)
%(levelname)s Text logging level for the message ("DEBUG", "INFO",
"WARNING", "ERROR", "CRITICAL")
%(pathname)s Full pathname of the source file where the logging
call was issued (if available)
%(filename)s Filename portion of pathname
%(module)s Module (name portion of filename)
%(lineno)d Source line number where the logging call was issued
(if available)
%(funcName)s Function name
%(created)f Time when the LogRecord was created (time.time()
return value)
%(asctime)s Textual time when the LogRecord was created
%(msecs)d Millisecond portion of the creation time
%(relativeCreated)d Time in milliseconds when the LogRecord was created,
relative to the time the logging module was loaded
(typically at application startup time)
%(thread)d Thread ID (if available)
%(threadName)s Thread name (if available)
%(process)d Process ID (if available)
%(message)s The result of record.getMessage(), computed just as
the record is emitted
"""
converter = time.localtime
def __init__(self, fmt=None, datefmt=None, style='%'):
"""
Initialize the formatter with specified format strings.
Initialize the formatter either with the specified format string, or a
default as described above. Allow for specialized date formatting with
the optional datefmt argument (if omitted, you get the ISO8601 format).
Use a style parameter of '%', '{' or '$' to specify that you want to
use one of %-formatting, :meth:`str.format` (``{}``) formatting or
:class:`string.Template` formatting in your format string.
.. versionchanged: 3.2
Added the ``style`` parameter.
"""
if style not in _STYLES:
raise ValueError('Style must be one of: %s' % ','.join(
_STYLES.keys()))
self._style = _STYLES[style](fmt)
self._fmt = self._style._fmt
self.datefmt = datefmt
default_time_format = '%Y-%m-%d %H:%M:%S'
default_msec_format = '%s,%03d'
def formatTime(self, record, datefmt=None):
"""
Return the creation time of the specified LogRecord as formatted text.
This method should be called from format() by a formatter which
wants to make use of a formatted time. This method can be overridden
in formatters to provide for any specific requirement, but the
basic behaviour is as follows: if datefmt (a string) is specified,
it is used with time.strftime() to format the creation time of the
record. Otherwise, the ISO8601 format is used. The resulting
string is returned. This function uses a user-configurable function
to convert the creation time to a tuple. By default, time.localtime()
is used; to change this for a particular formatter instance, set the
'converter' attribute to a function with the same signature as
time.localtime() or time.gmtime(). To change it for all formatters,
for example if you want all logging times to be shown in GMT,
set the 'converter' attribute in the Formatter class.
"""
ct = self.converter(record.created)
if datefmt:
s = time.strftime(datefmt, ct)
else:
t = time.strftime(self.default_time_format, ct)
s = self.default_msec_format % (t, record.msecs)
return s
def formatException(self, ei):
"""
Format and return the specified exception information as a string.
This default implementation just uses
traceback.print_exception()
"""
sio = io.StringIO()
tb = ei[2]
# See issues #9427, #1553375. Commented out for now.
#if getattr(self, 'fullstack', False):
# traceback.print_stack(tb.tb_frame.f_back, file=sio)
traceback.print_exception(ei[0], ei[1], tb, None, sio)
s = sio.getvalue()
sio.close()
if s[-1:] == "\n":
s = s[:-1]
return s
def usesTime(self):
"""
Check if the format uses the creation time of the record.
"""
return self._style.usesTime()
def formatMessage(self, record):
return self._style.format(record)
def formatStack(self, stack_info):
"""
This method is provided as an extension point for specialized
formatting of stack information.
The input data is a string as returned from a call to
:func:`traceback.print_stack`, but with the last trailing newline
removed.
The base implementation just returns the value passed in.
"""
return stack_info
def format(self, record):
"""
Format the specified record as text.
The record's attribute dictionary is used as the operand to a
string formatting operation which yields the returned string.
Before formatting the dictionary, a couple of preparatory steps
are carried out. The message attribute of the record is computed
using LogRecord.getMessage(). If the formatting string uses the
time (as determined by a call to usesTime(), formatTime() is
called to format the event time. If there is exception information,
it is formatted using formatException() and appended to the message.
"""
record.message = record.getMessage()
if self.usesTime():
record.asctime = self.formatTime(record, self.datefmt)
s = self.formatMessage(record)
if record.exc_info:
# Cache the traceback text to avoid converting it multiple times
# (it's constant anyway)
if not record.exc_text:
record.exc_text = self.formatException(record.exc_info)
if record.exc_text:
if s[-1:] != "\n":
s = s + "\n"
s = s + record.exc_text
if record.stack_info:
if s[-1:] != "\n":
s = s + "\n"
s = s + self.formatStack(record.stack_info)
return s
#
# The default formatter to use when no other is specified
#
_defaultFormatter = Formatter()
class BufferingFormatter(object):
"""
A formatter suitable for formatting a number of records.
"""
def __init__(self, linefmt=None):
"""
Optionally specify a formatter which will be used to format each
individual record.
"""
if linefmt:
self.linefmt = linefmt
else:
self.linefmt = _defaultFormatter
def formatHeader(self, records):
"""
Return the header string for the specified records.
"""
return ""
def formatFooter(self, records):
"""
Return the footer string for the specified records.
"""
return ""
def format(self, records):
"""
Format the specified records and return the result as a string.
"""
rv = ""
if len(records) > 0:
rv = rv + self.formatHeader(records)
for record in records:
rv = rv + self.linefmt.format(record)
rv = rv + self.formatFooter(records)
return rv
#---------------------------------------------------------------------------
# Filter classes and functions
#---------------------------------------------------------------------------
class Filter(object):
"""
Filter instances are used to perform arbitrary filtering of LogRecords.
Loggers and Handlers can optionally use Filter instances to filter
records as desired. The base filter class only allows events which are
below a certain point in the logger hierarchy. For example, a filter
initialized with "A.B" will allow events logged by loggers "A.B",
"A.B.C", "A.B.C.D", "A.B.D" etc. but not "A.BB", "B.A.B" etc. If
initialized with the empty string, all events are passed.
"""
def __init__(self, name=''):
"""
Initialize a filter.
Initialize with the name of the logger which, together with its
children, will have its events allowed through the filter. If no
name is specified, allow every event.
"""
self.name = name
self.nlen = len(name)
def filter(self, record):
"""
Determine if the specified record is to be logged.
Is the specified record to be logged? Returns 0 for no, nonzero for
yes. If deemed appropriate, the record may be modified in-place.
"""
if self.nlen == 0:
return True
elif self.name == record.name:
return True
elif record.name.find(self.name, 0, self.nlen) != 0:
return False
return (record.name[self.nlen] == ".")
class Filterer(object):
"""
A base class for loggers and handlers which allows them to share
common code.
"""
def __init__(self):
"""
Initialize the list of filters to be an empty list.
"""
self.filters = []
def addFilter(self, filter):
"""
Add the specified filter to this handler.
"""
if not (filter in self.filters):
self.filters.append(filter)
def removeFilter(self, filter):
"""
Remove the specified filter from this handler.
"""
if filter in self.filters:
self.filters.remove(filter)
def filter(self, record):
"""
Determine if a record is loggable by consulting all the filters.
The default is to allow the record to be logged; any filter can veto
this and the record is then dropped. Returns a zero value if a record
is to be dropped, else non-zero.
.. versionchanged: 3.2
Allow filters to be just callables.
"""
rv = True
for f in self.filters:
if hasattr(f, 'filter'):
result = f.filter(record)
else:
result = f(record) # assume callable - will raise if not
if not result:
rv = False
break
return rv
#---------------------------------------------------------------------------
# Handler classes and functions
#---------------------------------------------------------------------------
_handlers = weakref.WeakValueDictionary() #map of handler names to handlers
_handlerList = [] # added to allow handlers to be removed in reverse of order initialized
def _removeHandlerRef(wr):
"""
Remove a handler reference from the internal cleanup list.
"""
# This function can be called during module teardown, when globals are
# set to None. If _acquireLock is None, assume this is the case and do
# nothing.
if (_acquireLock is not None and _handlerList is not None and
_releaseLock is not None):
_acquireLock()
try:
if wr in _handlerList:
_handlerList.remove(wr)
finally:
_releaseLock()
def _addHandlerRef(handler):
"""
Add a handler to the internal cleanup list using a weak reference.
"""
_acquireLock()
try:
_handlerList.append(weakref.ref(handler, _removeHandlerRef))
finally:
_releaseLock()
class Handler(Filterer):
"""
Handler instances dispatch logging events to specific destinations.
The base handler class. Acts as a placeholder which defines the Handler
interface. Handlers can optionally use Formatter instances to format
records as desired. By default, no formatter is specified; in this case,
the 'raw' message as determined by record.message is logged.
"""
def __init__(self, level=NOTSET):
"""
Initializes the instance - basically setting the formatter to None
and the filter list to empty.
"""
Filterer.__init__(self)
self._name = None
self.level = _checkLevel(level)
self.formatter = None
# Add the handler to the global _handlerList (for cleanup on shutdown)
_addHandlerRef(self)
self.createLock()
def get_name(self):
return self._name
def set_name(self, name):
_acquireLock()
try:
if self._name in _handlers:
del _handlers[self._name]
self._name = name
if name:
_handlers[name] = self
finally:
_releaseLock()
name = property(get_name, set_name)
def createLock(self):
"""
Acquire a thread lock for serializing access to the underlying I/O.
"""
if threading:
self.lock = threading.RLock()
else: #pragma: no cover
self.lock = None
def acquire(self):
"""
Acquire the I/O thread lock.
"""
if self.lock:
self.lock.acquire()
def release(self):
"""
Release the I/O thread lock.
"""
if self.lock:
self.lock.release()
def setLevel(self, level):
"""
Set the logging level of this handler. level must be an int or a str.
"""
self.level = _checkLevel(level)
def format(self, record):
"""
Format the specified record.
If a formatter is set, use it. Otherwise, use the default formatter
for the module.
"""
if self.formatter:
fmt = self.formatter
else:
fmt = _defaultFormatter
return fmt.format(record)
def emit(self, record):
"""
Do whatever it takes to actually log the specified logging record.
This version is intended to be implemented by subclasses and so
raises a NotImplementedError.
"""
raise NotImplementedError('emit must be implemented '
'by Handler subclasses')
def handle(self, record):
"""
Conditionally emit the specified logging record.
Emission depends on filters which may have been added to the handler.
Wrap the actual emission of the record with acquisition/release of
the I/O thread lock. Returns whether the filter passed the record for
emission.
"""
rv = self.filter(record)
if rv:
self.acquire()
try:
self.emit(record)
finally:
self.release()
return rv
def setFormatter(self, fmt):
"""
Set the formatter for this handler.
"""
self.formatter = fmt
def flush(self):
"""
Ensure all logging output has been flushed.
This version does nothing and is intended to be implemented by
subclasses.
"""
pass
def close(self):
"""
Tidy up any resources used by the handler.
This version removes the handler from an internal map of handlers,
_handlers, which is used for handler lookup by name. Subclasses
should ensure that this gets called from overridden close()
methods.
"""
#get the module data lock, as we're updating a shared structure.
_acquireLock()
try: #unlikely to raise an exception, but you never know...
if self._name and self._name in _handlers:
del _handlers[self._name]
finally:
_releaseLock()
def handleError(self, record):
"""
Handle errors which occur during an emit() call.
This method should be called from handlers when an exception is
encountered during an emit() call. If raiseExceptions is false,
exceptions get silently ignored. This is what is mostly wanted
for a logging system - most users will not care about errors in
the logging system, they are more interested in application errors.
You could, however, replace this with a custom handler if you wish.
The record which was being processed is passed in to this method.
"""
if raiseExceptions and sys.stderr: # see issue 13807
ei = sys.exc_info()
try:
traceback.print_exception(ei[0], ei[1], ei[2],
None, sys.stderr)
sys.stderr.write('Logged from file %s, line %s\n' % (
record.filename, record.lineno))
except IOError: #pragma: no cover
pass # see issue 5971
finally:
del ei
class StreamHandler(Handler):
"""
A handler class which writes logging records, appropriately formatted,
to a stream. Note that this class does not close the stream, as
sys.stdout or sys.stderr may be used.
"""
terminator = '\n'
def __init__(self, stream=None):
"""
Initialize the handler.
If stream is not specified, sys.stderr is used.
"""
Handler.__init__(self)
if stream is None:
stream = sys.stderr
self.stream = stream
def flush(self):
"""
Flushes the stream.
"""
self.acquire()
try:
if self.stream and hasattr(self.stream, "flush"):
self.stream.flush()
finally:
self.release()
def emit(self, record):
"""
Emit a record.
If a formatter is specified, it is used to format the record.
The record is then written to the stream with a trailing newline. If
exception information is present, it is formatted using
traceback.print_exception and appended to the stream. If the stream
has an 'encoding' attribute, it is used to determine how to do the
output to the stream.
"""
try:
msg = self.format(record)
stream = self.stream
stream.write(msg)
stream.write(self.terminator)
self.flush()
except (KeyboardInterrupt, SystemExit): #pragma: no cover
raise
except:
self.handleError(record)
class FileHandler(StreamHandler):
"""
A handler class which writes formatted logging records to disk files.
"""
def __init__(self, filename, mode='a', encoding=None, delay=False):
"""
Open the specified file and use it as the stream for logging.
"""
#keep the absolute path, otherwise derived classes which use this
#may come a cropper when the current directory changes
self.baseFilename = os.path.abspath(filename)
self.mode = mode
self.encoding = encoding
self.delay = delay
if delay:
#We don't open the stream, but we still need to call the
#Handler constructor to set level, formatter, lock etc.
Handler.__init__(self)
self.stream = None
else:
StreamHandler.__init__(self, self._open())
def close(self):
"""
Closes the stream.
"""
self.acquire()
try:
if self.stream:
self.flush()
if hasattr(self.stream, "close"):
self.stream.close()
StreamHandler.close(self)
self.stream = None
finally:
self.release()
def _open(self):
"""
Open the current base file with the (original) mode and encoding.
Return the resulting stream.
"""
return open(self.baseFilename, self.mode, encoding=self.encoding)
def emit(self, record):
"""
Emit a record.
If the stream was not opened because 'delay' was specified in the
constructor, open it before calling the superclass's emit.
"""
if self.stream is None:
self.stream = self._open()
StreamHandler.emit(self, record)
class _StderrHandler(StreamHandler):
"""
This class is like a StreamHandler using sys.stderr, but always uses
whatever sys.stderr is currently set to rather than the value of
sys.stderr at handler construction time.
"""
def __init__(self, level=NOTSET):
"""
Initialize the handler.
"""
Handler.__init__(self, level)
@property
def stream(self):
return sys.stderr
_defaultLastResort = _StderrHandler(WARNING)
lastResort = _defaultLastResort
#---------------------------------------------------------------------------
# Manager classes and functions
#---------------------------------------------------------------------------
class PlaceHolder(object):
"""
PlaceHolder instances are used in the Manager logger hierarchy to take
the place of nodes for which no loggers have been defined. This class is
intended for internal use only and not as part of the public API.
"""
def __init__(self, alogger):
"""
Initialize with the specified logger being a child of this placeholder.
"""
self.loggerMap = { alogger : None }
def append(self, alogger):
"""
Add the specified logger as a child of this placeholder.
"""
if alogger not in self.loggerMap:
self.loggerMap[alogger] = None
#
# Determine which class to use when instantiating loggers.
#
_loggerClass = None
def setLoggerClass(klass):
"""
Set the class to be used when instantiating a logger. The class should
define __init__() such that only a name argument is required, and the
__init__() should call Logger.__init__()
"""
if klass != Logger:
if not issubclass(klass, Logger):
raise TypeError("logger not derived from logging.Logger: "
+ klass.__name__)
global _loggerClass
_loggerClass = klass
def getLoggerClass():
"""
Return the class to be used when instantiating a logger.
"""
return _loggerClass
class Manager(object):
"""
There is [under normal circumstances] just one Manager instance, which
holds the hierarchy of loggers.
"""
def __init__(self, rootnode):
"""
Initialize the manager with the root node of the logger hierarchy.
"""
self.root = rootnode
self.disable = 0
self.emittedNoHandlerWarning = False
self.loggerDict = {}
self.loggerClass = None
self.logRecordFactory = None
def getLogger(self, name):
"""
Get a logger with the specified name (channel name), creating it
if it doesn't yet exist. This name is a dot-separated hierarchical
name, such as "a", "a.b", "a.b.c" or similar.
If a PlaceHolder existed for the specified name [i.e. the logger
didn't exist but a child of it did], replace it with the created
logger and fix up the parent/child references which pointed to the
placeholder to now point to the logger.
"""
rv = None
if not isinstance(name, str):
raise TypeError('A logger name must be a string')
_acquireLock()
try:
if name in self.loggerDict:
rv = self.loggerDict[name]
if isinstance(rv, PlaceHolder):
ph = rv
rv = (self.loggerClass or _loggerClass)(name)
rv.manager = self
self.loggerDict[name] = rv
self._fixupChildren(ph, rv)
self._fixupParents(rv)
else:
rv = (self.loggerClass or _loggerClass)(name)
rv.manager = self
self.loggerDict[name] = rv
self._fixupParents(rv)
finally:
_releaseLock()
return rv
def setLoggerClass(self, klass):
"""
Set the class to be used when instantiating a logger with this Manager.
"""
if klass != Logger:
if not issubclass(klass, Logger):
raise TypeError("logger not derived from logging.Logger: "
+ klass.__name__)
self.loggerClass = klass
def setLogRecordFactory(self, factory):
"""
Set the factory to be used when instantiating a log record with this
Manager.
"""
self.logRecordFactory = factory
def _fixupParents(self, alogger):
"""
Ensure that there are either loggers or placeholders all the way
from the specified logger to the root of the logger hierarchy.
"""
name = alogger.name
i = name.rfind(".")
rv = None
while (i > 0) and not rv:
substr = name[:i]
if substr not in self.loggerDict:
self.loggerDict[substr] = PlaceHolder(alogger)
else:
obj = self.loggerDict[substr]
if isinstance(obj, Logger):
rv = obj
else:
assert isinstance(obj, PlaceHolder)
obj.append(alogger)
i = name.rfind(".", 0, i - 1)
if not rv:
rv = self.root
alogger.parent = rv
def _fixupChildren(self, ph, alogger):
"""
Ensure that children of the placeholder ph are connected to the
specified logger.
"""
name = alogger.name
namelen = len(name)
for c in ph.loggerMap.keys():
#The if means ... if not c.parent.name.startswith(nm)
if c.parent.name[:namelen] != name:
alogger.parent = c.parent
c.parent = alogger
#---------------------------------------------------------------------------
# Logger classes and functions
#---------------------------------------------------------------------------
class Logger(Filterer):
"""
Instances of the Logger class represent a single logging channel. A
"logging channel" indicates an area of an application. Exactly how an
"area" is defined is up to the application developer. Since an
application can have any number of areas, logging channels are identified
by a unique string. Application areas can be nested (e.g. an area
of "input processing" might include sub-areas "read CSV files", "read
XLS files" and "read Gnumeric files"). To cater for this natural nesting,
channel names are organized into a namespace hierarchy where levels are
separated by periods, much like the Java or Python package namespace. So
in the instance given above, channel names might be "input" for the upper
level, and "input.csv", "input.xls" and "input.gnu" for the sub-levels.
There is no arbitrary limit to the depth of nesting.
"""
def __init__(self, name, level=NOTSET):
"""
Initialize the logger with a name and an optional level.
"""
Filterer.__init__(self)
self.name = name
self.level = _checkLevel(level)
self.parent = None
self.propagate = True
self.handlers = []
self.disabled = False
def setLevel(self, level):
"""
Set the logging level of this logger. level must be an int or a str.
"""
self.level = _checkLevel(level)
def debug(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'DEBUG'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.debug("Houston, we have a %s", "thorny problem", exc_info=1)
"""
if self.isEnabledFor(DEBUG):
self._log(DEBUG, msg, args, **kwargs)
def info(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'INFO'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.info("Houston, we have a %s", "interesting problem", exc_info=1)
"""
if self.isEnabledFor(INFO):
self._log(INFO, msg, args, **kwargs)
def warning(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'WARNING'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.warning("Houston, we have a %s", "bit of a problem", exc_info=1)
"""
if self.isEnabledFor(WARNING):
self._log(WARNING, msg, args, **kwargs)
def warn(self, msg, *args, **kwargs):
warnings.warn("The 'warn' method is deprecated, "
"use 'warning' instead", DeprecationWarning, 2)
self.warning(msg, *args, **kwargs)
def error(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'ERROR'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.error("Houston, we have a %s", "major problem", exc_info=1)
"""
if self.isEnabledFor(ERROR):
self._log(ERROR, msg, args, **kwargs)
def exception(self, msg, *args, **kwargs):
"""
Convenience method for logging an ERROR with exception information.
"""
kwargs['exc_info'] = True
self.error(msg, *args, **kwargs)
def critical(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'CRITICAL'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.critical("Houston, we have a %s", "major disaster", exc_info=1)
"""
if self.isEnabledFor(CRITICAL):
self._log(CRITICAL, msg, args, **kwargs)
fatal = critical
def log(self, level, msg, *args, **kwargs):
"""
Log 'msg % args' with the integer severity 'level'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.log(level, "We have a %s", "mysterious problem", exc_info=1)
"""
if not isinstance(level, int):
if raiseExceptions:
raise TypeError("level must be an integer")
else:
return
if self.isEnabledFor(level):
self._log(level, msg, args, **kwargs)
def findCaller(self, stack_info=False):
"""
Find the stack frame of the caller so that we can note the source
file name, line number and function name.
"""
f = currentframe()
#On some versions of IronPython, currentframe() returns None if
#IronPython isn't run with -X:Frames.
if f is not None:
f = f.f_back
rv = "(unknown file)", 0, "(unknown function)", None
while hasattr(f, "f_code"):
co = f.f_code
filename = os.path.normcase(co.co_filename)
if filename == _srcfile:
f = f.f_back
continue
sinfo = None
if stack_info:
sio = io.StringIO()
sio.write('Stack (most recent call last):\n')
traceback.print_stack(f, file=sio)
sinfo = sio.getvalue()
if sinfo[-1] == '\n':
sinfo = sinfo[:-1]
sio.close()
rv = (co.co_filename, f.f_lineno, co.co_name, sinfo)
break
return rv
def makeRecord(self, name, level, fn, lno, msg, args, exc_info,
func=None, extra=None, sinfo=None):
"""
A factory method which can be overridden in subclasses to create
specialized LogRecords.
"""
rv = _logRecordFactory(name, level, fn, lno, msg, args, exc_info, func,
sinfo)
if extra is not None:
for key in extra:
if (key in ["message", "asctime"]) or (key in rv.__dict__):
raise KeyError("Attempt to overwrite %r in LogRecord" % key)
rv.__dict__[key] = extra[key]
return rv
def _log(self, level, msg, args, exc_info=None, extra=None, stack_info=False):
"""
Low-level logging routine which creates a LogRecord and then calls
all the handlers of this logger to handle the record.
"""
sinfo = None
if _srcfile:
#IronPython doesn't track Python frames, so findCaller raises an
#exception on some versions of IronPython. We trap it here so that
#IronPython can use logging.
try:
fn, lno, func, sinfo = self.findCaller(stack_info)
except ValueError: # pragma: no cover
fn, lno, func = "(unknown file)", 0, "(unknown function)"
else: # pragma: no cover
fn, lno, func = "(unknown file)", 0, "(unknown function)"
if exc_info:
if not isinstance(exc_info, tuple):
exc_info = sys.exc_info()
record = self.makeRecord(self.name, level, fn, lno, msg, args,
exc_info, func, extra, sinfo)
self.handle(record)
def handle(self, record):
"""
Call the handlers for the specified record.
This method is used for unpickled records received from a socket, as
well as those created locally. Logger-level filtering is applied.
"""
if (not self.disabled) and self.filter(record):
self.callHandlers(record)
def addHandler(self, hdlr):
"""
Add the specified handler to this logger.
"""
_acquireLock()
try:
if not (hdlr in self.handlers):
self.handlers.append(hdlr)
finally:
_releaseLock()
def removeHandler(self, hdlr):
"""
Remove the specified handler from this logger.
"""
_acquireLock()
try:
if hdlr in self.handlers:
self.handlers.remove(hdlr)
finally:
_releaseLock()
def hasHandlers(self):
"""
See if this logger has any handlers configured.
Loop through all handlers for this logger and its parents in the
logger hierarchy. Return True if a handler was found, else False.
Stop searching up the hierarchy whenever a logger with the "propagate"
attribute set to zero is found - that will be the last logger which
is checked for the existence of handlers.
"""
c = self
rv = False
while c:
if c.handlers:
rv = True
break
if not c.propagate:
break
else:
c = c.parent
return rv
def callHandlers(self, record):
"""
Pass a record to all relevant handlers.
Loop through all handlers for this logger and its parents in the
logger hierarchy. If no handler was found, output a one-off error
message to sys.stderr. Stop searching up the hierarchy whenever a
logger with the "propagate" attribute set to zero is found - that
will be the last logger whose handlers are called.
"""
c = self
found = 0
while c:
for hdlr in c.handlers:
found = found + 1
if record.levelno >= hdlr.level:
hdlr.handle(record)
if not c.propagate:
c = None #break out
else:
c = c.parent
if (found == 0):
if lastResort:
if record.levelno >= lastResort.level:
lastResort.handle(record)
elif raiseExceptions and not self.manager.emittedNoHandlerWarning:
sys.stderr.write("No handlers could be found for logger"
" \"%s\"\n" % self.name)
self.manager.emittedNoHandlerWarning = True
def getEffectiveLevel(self):
"""
Get the effective level for this logger.
Loop through this logger and its parents in the logger hierarchy,
looking for a non-zero logging level. Return the first one found.
"""
logger = self
while logger:
if logger.level:
return logger.level
logger = logger.parent
return NOTSET
def isEnabledFor(self, level):
"""
Is this logger enabled for level 'level'?
"""
if self.manager.disable >= level:
return False
return level >= self.getEffectiveLevel()
def getChild(self, suffix):
"""
Get a logger which is a descendant to this one.
This is a convenience method, such that
logging.getLogger('abc').getChild('def.ghi')
is the same as
logging.getLogger('abc.def.ghi')
It's useful, for example, when the parent logger is named using
__name__ rather than a literal string.
"""
if self.root is not self:
suffix = '.'.join((self.name, suffix))
return self.manager.getLogger(suffix)
class RootLogger(Logger):
"""
A root logger is not that different to any other logger, except that
it must have a logging level and there is only one instance of it in
the hierarchy.
"""
def __init__(self, level):
"""
Initialize the logger with the name "root".
"""
Logger.__init__(self, "root", level)
_loggerClass = Logger
class LoggerAdapter(object):
"""
An adapter for loggers which makes it easier to specify contextual
information in logging output.
"""
def __init__(self, logger, extra):
"""
Initialize the adapter with a logger and a dict-like object which
provides contextual information. This constructor signature allows
easy stacking of LoggerAdapters, if so desired.
You can effectively pass keyword arguments as shown in the
following example:
adapter = LoggerAdapter(someLogger, dict(p1=v1, p2="v2"))
"""
self.logger = logger
self.extra = extra
def process(self, msg, kwargs):
"""
Process the logging message and keyword arguments passed in to
a logging call to insert contextual information. You can either
manipulate the message itself, the keyword args or both. Return
the message and kwargs modified (or not) to suit your needs.
Normally, you'll only need to override this one method in a
LoggerAdapter subclass for your specific needs.
"""
kwargs["extra"] = self.extra
return msg, kwargs
#
# Boilerplate convenience methods
#
def debug(self, msg, *args, **kwargs):
"""
Delegate a debug call to the underlying logger.
"""
self.log(DEBUG, msg, *args, **kwargs)
def info(self, msg, *args, **kwargs):
"""
Delegate an info call to the underlying logger.
"""
self.log(INFO, msg, *args, **kwargs)
def warning(self, msg, *args, **kwargs):
"""
Delegate a warning call to the underlying logger.
"""
self.log(WARNING, msg, *args, **kwargs)
def warn(self, msg, *args, **kwargs):
warnings.warn("The 'warn' method is deprecated, "
"use 'warning' instead", DeprecationWarning, 2)
self.warning(msg, *args, **kwargs)
def error(self, msg, *args, **kwargs):
"""
Delegate an error call to the underlying logger.
"""
self.log(ERROR, msg, *args, **kwargs)
def exception(self, msg, *args, **kwargs):
"""
Delegate an exception call to the underlying logger.
"""
kwargs["exc_info"] = True
self.log(ERROR, msg, *args, **kwargs)
def critical(self, msg, *args, **kwargs):
"""
Delegate a critical call to the underlying logger.
"""
self.log(CRITICAL, msg, *args, **kwargs)
def log(self, level, msg, *args, **kwargs):
"""
Delegate a log call to the underlying logger, after adding
contextual information from this adapter instance.
"""
if self.isEnabledFor(level):
msg, kwargs = self.process(msg, kwargs)
self.logger._log(level, msg, args, **kwargs)
def isEnabledFor(self, level):
"""
Is this logger enabled for level 'level'?
"""
if self.logger.manager.disable >= level:
return False
return level >= self.getEffectiveLevel()
def setLevel(self, level):
"""
Set the specified level on the underlying logger.
"""
self.logger.setLevel(level)
def getEffectiveLevel(self):
"""
Get the effective level for the underlying logger.
"""
return self.logger.getEffectiveLevel()
def hasHandlers(self):
"""
See if the underlying logger has any handlers.
"""
return self.logger.hasHandlers()
root = RootLogger(WARNING)
Logger.root = root
Logger.manager = Manager(Logger.root)
#---------------------------------------------------------------------------
# Configuration classes and functions
#---------------------------------------------------------------------------
BASIC_FORMAT = "%(levelname)s:%(name)s:%(message)s"
def basicConfig(**kwargs):
"""
Do basic configuration for the logging system.
This function does nothing if the root logger already has handlers
configured. It is a convenience method intended for use by simple scripts
to do one-shot configuration of the logging package.
The default behaviour is to create a StreamHandler which writes to
sys.stderr, set a formatter using the BASIC_FORMAT format string, and
add the handler to the root logger.
A number of optional keyword arguments may be specified, which can alter
the default behaviour.
filename Specifies that a FileHandler be created, using the specified
filename, rather than a StreamHandler.
filemode Specifies the mode to open the file, if filename is specified
(if filemode is unspecified, it defaults to 'a').
format Use the specified format string for the handler.
datefmt Use the specified date/time format.
style If a format string is specified, use this to specify the
type of format string (possible values '%', '{', '$', for
%-formatting, :meth:`str.format` and :class:`string.Template`
- defaults to '%').
level Set the root logger level to the specified level.
stream Use the specified stream to initialize the StreamHandler. Note
that this argument is incompatible with 'filename' - if both
are present, 'stream' is ignored.
handlers If specified, this should be an iterable of already created
handlers, which will be added to the root handler. Any handler
in the list which does not have a formatter assigned will be
assigned the formatter created in this function.
Note that you could specify a stream created using open(filename, mode)
rather than passing the filename and mode in. However, it should be
remembered that StreamHandler does not close its stream (since it may be
using sys.stdout or sys.stderr), whereas FileHandler closes its stream
when the handler is closed.
.. versionchanged:: 3.2
Added the ``style`` parameter.
.. versionchanged:: 3.3
Added the ``handlers`` parameter. A ``ValueError`` is now thrown for
incompatible arguments (e.g. ``handlers`` specified together with
``filename``/``filemode``, or ``filename``/``filemode`` specified
together with ``stream``, or ``handlers`` specified together with
``stream``.
"""
# Add thread safety in case someone mistakenly calls
# basicConfig() from multiple threads
_acquireLock()
try:
if len(root.handlers) == 0:
handlers = kwargs.get("handlers")
if handlers is None:
if "stream" in kwargs and "filename" in kwargs:
raise ValueError("'stream' and 'filename' should not be "
"specified together")
else:
if "stream" in kwargs or "filename" in kwargs:
raise ValueError("'stream' or 'filename' should not be "
"specified together with 'handlers'")
if handlers is None:
filename = kwargs.get("filename")
if filename:
mode = kwargs.get("filemode", 'a')
h = FileHandler(filename, mode)
else:
stream = kwargs.get("stream")
h = StreamHandler(stream)
handlers = [h]
fs = kwargs.get("format", BASIC_FORMAT)
dfs = kwargs.get("datefmt", None)
style = kwargs.get("style", '%')
fmt = Formatter(fs, dfs, style)
for h in handlers:
if h.formatter is None:
h.setFormatter(fmt)
root.addHandler(h)
level = kwargs.get("level")
if level is not None:
root.setLevel(level)
finally:
_releaseLock()
#---------------------------------------------------------------------------
# Utility functions at module level.
# Basically delegate everything to the root logger.
#---------------------------------------------------------------------------
def getLogger(name=None):
"""
Return a logger with the specified name, creating it if necessary.
If no name is specified, return the root logger.
"""
if name:
return Logger.manager.getLogger(name)
else:
return root
def critical(msg, *args, **kwargs):
"""
Log a message with severity 'CRITICAL' on the root logger. If the logger
has no handlers, call basicConfig() to add a console handler with a
pre-defined format.
"""
if len(root.handlers) == 0:
basicConfig()
root.critical(msg, *args, **kwargs)
fatal = critical
def error(msg, *args, **kwargs):
"""
Log a message with severity 'ERROR' on the root logger. If the logger has
no handlers, call basicConfig() to add a console handler with a pre-defined
format.
"""
if len(root.handlers) == 0:
basicConfig()
root.error(msg, *args, **kwargs)
def exception(msg, *args, **kwargs):
"""
Log a message with severity 'ERROR' on the root logger, with exception
information. If the logger has no handlers, basicConfig() is called to add
a console handler with a pre-defined format.
"""
kwargs['exc_info'] = True
error(msg, *args, **kwargs)
def warning(msg, *args, **kwargs):
"""
Log a message with severity 'WARNING' on the root logger. If the logger has
no handlers, call basicConfig() to add a console handler with a pre-defined
format.
"""
if len(root.handlers) == 0:
basicConfig()
root.warning(msg, *args, **kwargs)
def warn(msg, *args, **kwargs):
warnings.warn("The 'warn' function is deprecated, "
"use 'warning' instead", DeprecationWarning, 2)
warning(msg, *args, **kwargs)
def info(msg, *args, **kwargs):
"""
Log a message with severity 'INFO' on the root logger. If the logger has
no handlers, call basicConfig() to add a console handler with a pre-defined
format.
"""
if len(root.handlers) == 0:
basicConfig()
root.info(msg, *args, **kwargs)
def debug(msg, *args, **kwargs):
"""
Log a message with severity 'DEBUG' on the root logger. If the logger has
no handlers, call basicConfig() to add a console handler with a pre-defined
format.
"""
if len(root.handlers) == 0:
basicConfig()
root.debug(msg, *args, **kwargs)
def log(level, msg, *args, **kwargs):
"""
Log 'msg % args' with the integer severity 'level' on the root logger. If
the logger has no handlers, call basicConfig() to add a console handler
with a pre-defined format.
"""
if len(root.handlers) == 0:
basicConfig()
root.log(level, msg, *args, **kwargs)
def disable(level):
"""
Disable all logging calls of severity 'level' and below.
"""
root.manager.disable = level
def shutdown(handlerList=_handlerList):
"""
Perform any cleanup actions in the logging system (e.g. flushing
buffers).
Should be called at application exit.
"""
for wr in reversed(handlerList[:]):
#errors might occur, for example, if files are locked
#we just ignore them if raiseExceptions is not set
try:
h = wr()
if h:
try:
h.acquire()
h.flush()
h.close()
except (IOError, ValueError):
# Ignore errors which might be caused
# because handlers have been closed but
# references to them are still around at
# application exit.
pass
finally:
h.release()
except:
if raiseExceptions:
raise
#else, swallow
#Let's try and shutdown automatically on application exit...
import atexit
atexit.register(shutdown)
# Null handler
class NullHandler(Handler):
"""
This handler does nothing. It's intended to be used to avoid the
"No handlers could be found for logger XXX" one-off warning. This is
important for library code, which may contain code to log events. If a user
of the library does not configure logging, the one-off warning might be
produced; to avoid this, the library developer simply needs to instantiate
a NullHandler and add it to the top-level logger of the library module or
package.
"""
def handle(self, record):
"""Stub."""
def emit(self, record):
"""Stub."""
def createLock(self):
self.lock = None
# Warnings integration
_warnings_showwarning = None
def _showwarning(message, category, filename, lineno, file=None, line=None):
"""
Implementation of showwarnings which redirects to logging, which will first
check to see if the file parameter is None. If a file is specified, it will
delegate to the original warnings implementation of showwarning. Otherwise,
it will call warnings.formatwarning and will log the resulting string to a
warnings logger named "py.warnings" with level logging.WARNING.
"""
if file is not None:
if _warnings_showwarning is not None:
_warnings_showwarning(message, category, filename, lineno, file, line)
else:
s = warnings.formatwarning(message, category, filename, lineno, line)
logger = getLogger("py.warnings")
if not logger.handlers:
logger.addHandler(NullHandler())
logger.warning("%s", s)
def captureWarnings(capture):
"""
If capture is true, redirect all warnings to the logging package.
If capture is False, ensure that warnings are not redirected to logging
but to their original destinations.
"""
global _warnings_showwarning
if capture:
if _warnings_showwarning is None:
_warnings_showwarning = warnings.showwarning
warnings.showwarning = _showwarning
else:
if _warnings_showwarning is not None:
warnings.showwarning = _warnings_showwarning
_warnings_showwarning = None
| agpl-3.0 |
FedoraScientific/salome-paravis | test/VisuPrs/CutPlanes/A1.py | 1 | 1512 | # Copyright (C) 2010-2014 CEA/DEN, EDF R&D
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
#
# This case corresponds to: /visu/CutPlanes/A1 case
# Create Cut Planes for all data of the given MED file
import sys
from paravistest import datadir, pictureext, get_picture_dir
from presentations import CreatePrsForFile, PrsTypeEnum
import pvserver as paravis
# Create presentations
myParavis = paravis.myParavis
# Directory for saving snapshots
picturedir = get_picture_dir("CutPlanes/A1")
file = datadir + "hexa_28320_ELEM.med"
print " --------------------------------- "
print "file ", file
print " --------------------------------- "
print "CreatePrsForFile..."
CreatePrsForFile(myParavis, file, [PrsTypeEnum.CUTPLANES], picturedir, pictureext)
| lgpl-2.1 |
guschmue/tensorflow | tensorflow/contrib/learn/python/learn/estimators/estimator_input_test.py | 72 | 12865 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Estimator input."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import tempfile
import numpy as np
from tensorflow.contrib.framework.python.ops import variables
from tensorflow.contrib.layers.python.layers import optimizers
from tensorflow.contrib.learn.python.learn import metric_spec
from tensorflow.contrib.learn.python.learn import models
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import queue_runner_impl
_BOSTON_INPUT_DIM = 13
_IRIS_INPUT_DIM = 4
def boston_input_fn(num_epochs=None):
boston = base.load_boston()
features = input_lib.limit_epochs(
array_ops.reshape(
constant_op.constant(boston.data), [-1, _BOSTON_INPUT_DIM]),
num_epochs=num_epochs)
labels = array_ops.reshape(constant_op.constant(boston.target), [-1, 1])
return features, labels
def boston_input_fn_with_queue(num_epochs=None):
features, labels = boston_input_fn(num_epochs=num_epochs)
# Create a minimal queue runner.
fake_queue = data_flow_ops.FIFOQueue(30, dtypes.int32)
queue_runner = queue_runner_impl.QueueRunner(fake_queue,
[constant_op.constant(0)])
queue_runner_impl.add_queue_runner(queue_runner)
return features, labels
def iris_input_fn():
iris = base.load_iris()
features = array_ops.reshape(
constant_op.constant(iris.data), [-1, _IRIS_INPUT_DIM])
labels = array_ops.reshape(constant_op.constant(iris.target), [-1])
return features, labels
def iris_input_fn_labels_dict():
iris = base.load_iris()
features = array_ops.reshape(
constant_op.constant(iris.data), [-1, _IRIS_INPUT_DIM])
labels = {
'labels': array_ops.reshape(constant_op.constant(iris.target), [-1])
}
return features, labels
def boston_eval_fn():
boston = base.load_boston()
n_examples = len(boston.target)
features = array_ops.reshape(
constant_op.constant(boston.data), [n_examples, _BOSTON_INPUT_DIM])
labels = array_ops.reshape(
constant_op.constant(boston.target), [n_examples, 1])
return array_ops.concat([features, features], 0), array_ops.concat(
[labels, labels], 0)
def extract(data, key):
if isinstance(data, dict):
assert key in data
return data[key]
else:
return data
def linear_model_params_fn(features, labels, mode, params):
features = extract(features, 'input')
labels = extract(labels, 'labels')
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss,
variables.get_global_step(),
optimizer='Adagrad',
learning_rate=params['learning_rate'])
return prediction, loss, train_op
def linear_model_fn(features, labels, mode):
features = extract(features, 'input')
labels = extract(labels, 'labels')
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
if isinstance(features, dict):
(_, features), = features.items()
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
return prediction, loss, train_op
def linear_model_fn_with_model_fn_ops(features, labels, mode):
"""Same as linear_model_fn, but returns `ModelFnOps`."""
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
return model_fn.ModelFnOps(
mode=mode, predictions=prediction, loss=loss, train_op=train_op)
def logistic_model_no_mode_fn(features, labels):
features = extract(features, 'input')
labels = extract(labels, 'labels')
labels = array_ops.one_hot(labels, 3, 1, 0)
prediction, loss = (models.logistic_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
return {
'class': math_ops.argmax(prediction, 1),
'prob': prediction
}, loss, train_op
VOCAB_FILE_CONTENT = 'emerson\nlake\npalmer\n'
EXTRA_FILE_CONTENT = 'kermit\npiggy\nralph\n'
class EstimatorInputTest(test.TestCase):
def testContinueTrainingDictionaryInput(self):
boston = base.load_boston()
output_dir = tempfile.mkdtemp()
est = estimator.Estimator(model_fn=linear_model_fn, model_dir=output_dir)
boston_input = {'input': boston.data}
float64_target = {'labels': boston.target.astype(np.float64)}
est.fit(x=boston_input, y=float64_target, steps=50)
scores = est.evaluate(
x=boston_input,
y=float64_target,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
del est
# Create another estimator object with the same output dir.
est2 = estimator.Estimator(model_fn=linear_model_fn, model_dir=output_dir)
# Check we can evaluate and predict.
scores2 = est2.evaluate(
x=boston_input,
y=float64_target,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
self.assertAllClose(scores2['MSE'], scores['MSE'])
predictions = np.array(list(est2.predict(x=boston_input)))
other_score = _sklearn.mean_squared_error(predictions,
float64_target['labels'])
self.assertAllClose(other_score, scores['MSE'])
def testBostonAll(self):
boston = base.load_boston()
est = estimator.SKCompat(estimator.Estimator(model_fn=linear_model_fn))
float64_labels = boston.target.astype(np.float64)
est.fit(x=boston.data, y=float64_labels, steps=100)
scores = est.score(
x=boston.data,
y=float64_labels,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
predictions = np.array(list(est.predict(x=boston.data)))
other_score = _sklearn.mean_squared_error(predictions, boston.target)
self.assertAllClose(scores['MSE'], other_score)
self.assertTrue('global_step' in scores)
self.assertEqual(100, scores['global_step'])
def testBostonAllDictionaryInput(self):
boston = base.load_boston()
est = estimator.Estimator(model_fn=linear_model_fn)
boston_input = {'input': boston.data}
float64_target = {'labels': boston.target.astype(np.float64)}
est.fit(x=boston_input, y=float64_target, steps=100)
scores = est.evaluate(
x=boston_input,
y=float64_target,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
predictions = np.array(list(est.predict(x=boston_input)))
other_score = _sklearn.mean_squared_error(predictions, boston.target)
self.assertAllClose(other_score, scores['MSE'])
self.assertTrue('global_step' in scores)
self.assertEqual(scores['global_step'], 100)
def testIrisAll(self):
iris = base.load_iris()
est = estimator.SKCompat(
estimator.Estimator(model_fn=logistic_model_no_mode_fn))
est.fit(iris.data, iris.target, steps=100)
scores = est.score(
x=iris.data,
y=iris.target,
metrics={('accuracy', 'class'): metric_ops.streaming_accuracy})
predictions = est.predict(x=iris.data)
predictions_class = est.predict(x=iris.data, outputs=['class'])['class']
self.assertEqual(predictions['prob'].shape[0], iris.target.shape[0])
self.assertAllClose(predictions['class'], predictions_class)
self.assertAllClose(
predictions['class'], np.argmax(
predictions['prob'], axis=1))
other_score = _sklearn.accuracy_score(iris.target, predictions['class'])
self.assertAllClose(scores['accuracy'], other_score)
self.assertTrue('global_step' in scores)
self.assertEqual(100, scores['global_step'])
def testIrisAllDictionaryInput(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
iris_data = {'input': iris.data}
iris_target = {'labels': iris.target}
est.fit(iris_data, iris_target, steps=100)
scores = est.evaluate(
x=iris_data,
y=iris_target,
metrics={('accuracy', 'class'): metric_ops.streaming_accuracy})
predictions = list(est.predict(x=iris_data))
predictions_class = list(est.predict(x=iris_data, outputs=['class']))
self.assertEqual(len(predictions), iris.target.shape[0])
classes_batch = np.array([p['class'] for p in predictions])
self.assertAllClose(classes_batch,
np.array([p['class'] for p in predictions_class]))
self.assertAllClose(
classes_batch,
np.argmax(
np.array([p['prob'] for p in predictions]), axis=1))
other_score = _sklearn.accuracy_score(iris.target, classes_batch)
self.assertAllClose(other_score, scores['accuracy'])
self.assertTrue('global_step' in scores)
self.assertEqual(scores['global_step'], 100)
def testIrisInputFn(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
est.fit(input_fn=iris_input_fn, steps=100)
_ = est.evaluate(input_fn=iris_input_fn, steps=1)
predictions = list(est.predict(x=iris.data))
self.assertEqual(len(predictions), iris.target.shape[0])
def testIrisInputFnLabelsDict(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
est.fit(input_fn=iris_input_fn_labels_dict, steps=100)
_ = est.evaluate(
input_fn=iris_input_fn_labels_dict,
steps=1,
metrics={
'accuracy':
metric_spec.MetricSpec(
metric_fn=metric_ops.streaming_accuracy,
prediction_key='class',
label_key='labels')
})
predictions = list(est.predict(x=iris.data))
self.assertEqual(len(predictions), iris.target.shape[0])
def testTrainInputFn(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
_ = est.evaluate(input_fn=boston_eval_fn, steps=1)
def testPredictInputFn(self):
est = estimator.Estimator(model_fn=linear_model_fn)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
input_fn = functools.partial(boston_input_fn, num_epochs=1)
output = list(est.predict(input_fn=input_fn))
self.assertEqual(len(output), boston.target.shape[0])
def testPredictInputFnWithQueue(self):
est = estimator.Estimator(model_fn=linear_model_fn)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
input_fn = functools.partial(boston_input_fn_with_queue, num_epochs=2)
output = list(est.predict(input_fn=input_fn))
self.assertEqual(len(output), boston.target.shape[0] * 2)
def testPredictConstInputFn(self):
est = estimator.Estimator(model_fn=linear_model_fn)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
def input_fn():
features = array_ops.reshape(
constant_op.constant(boston.data), [-1, _BOSTON_INPUT_DIM])
labels = array_ops.reshape(constant_op.constant(boston.target), [-1, 1])
return features, labels
output = list(est.predict(input_fn=input_fn))
self.assertEqual(len(output), boston.target.shape[0])
if __name__ == '__main__':
test.main()
| apache-2.0 |
taxpon/sverchok | nodes/list_struct/flip.py | 3 | 2953 | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
import bpy
from bpy.props import BoolProperty, IntProperty, StringProperty
from sverchok.node_tree import SverchCustomTreeNode
from sverchok.data_structure import (changable_sockets, dataCorrect, updateNode)
def flip(list, level):
level -= 1
out = []
if not level:
for l in list:
out.append(flip(l, level))
else:
length = maxlen(list)
for i in range(length):
out_ = []
for l in list:
try:
out_.append(l[i])
except:
continue
out.append(out_)
return out
def maxlen(list):
le = []
for l in list:
le.append(len(l))
return max(le)
class ListFlipNode(bpy.types.Node, SverchCustomTreeNode):
''' ListFlipNode '''
bl_idname = 'ListFlipNode'
bl_label = 'List Flip'
bl_icon = 'OUTLINER_OB_EMPTY'
level = IntProperty(name='level_to_count',
default=2, min=0, max=4,
update=updateNode)
typ = StringProperty(name='typ',
default='')
newsock = BoolProperty(name='newsock',
default=False)
def sv_init(self, context):
self.inputs.new('StringsSocket', "data", "data")
self.outputs.new('StringsSocket', 'data', 'data')
def draw_buttons(self, context, layout):
layout.prop(self, "level", text="level")
def update(self):
# адаптивный сокет
inputsocketname = 'data'
outputsocketname = ['data']
changable_sockets(self, inputsocketname, outputsocketname)
def process(self):
if self.inputs['data'].is_linked and self.outputs['data'].is_linked:
outEval = self.inputs['data'].sv_get()
#outCorr = dataCorrect(outEval) # this is bullshit, as max 3 in levels
levels = self.level - 1
out = flip(outEval, levels)
self.outputs['data'].sv_set(out)
def register():
bpy.utils.register_class(ListFlipNode)
def unregister():
bpy.utils.unregister_class(ListFlipNode)
if __name__ == '__main__':
register()
| gpl-3.0 |
pkoutsias/SickRage | tests/sickrage_tests/show/history_tests.py | 4 | 2948 | # coding=utf-8
# This file is part of SickRage.
#
# URL: https://SickRage.GitHub.io
# Git: https://github.com/SickRage/SickRage.git
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
"""
Test history
"""
from __future__ import print_function
import os
import sys
import unittest
sys.path.insert(1, os.path.abspath(os.path.join(os.path.dirname(__file__), '../../../lib')))
sys.path.insert(1, os.path.abspath(os.path.join(os.path.dirname(__file__), '../../..')))
from sickbeard.common import Quality
from sickrage.show.History import History
class HistoryTests(unittest.TestCase):
"""
Test history
"""
def test_get_actions(self):
"""
Test get actions
"""
test_cases = {
None: [],
'': [],
'wrong': [],
'downloaded': Quality.DOWNLOADED,
'Downloaded': Quality.DOWNLOADED,
'snatched': Quality.SNATCHED,
'Snatched': Quality.SNATCHED,
}
unicode_test_cases = {
u'': [],
u'wrong': [],
u'downloaded': Quality.DOWNLOADED,
u'Downloaded': Quality.DOWNLOADED,
u'snatched': Quality.SNATCHED,
u'Snatched': Quality.SNATCHED,
}
for tests in test_cases, unicode_test_cases:
for (action, result) in tests.iteritems():
self.assertEqual(History._get_actions(action), result) # pylint: disable=protected-access
def test_get_limit(self):
"""
Test get limit
"""
test_cases = {
None: 0,
'': 0,
'0': 0,
'5': 5,
'-5': 0,
'1.5': 0,
'-1.5': 0,
5: 5,
-5: 0,
1.5: 1,
-1.5: 0,
}
unicode_test_cases = {
u'': 0,
u'0': 0,
u'5': 5,
u'-5': 0,
u'1.5': 0,
u'-1.5': 0,
}
for tests in test_cases, unicode_test_cases:
for (action, result) in tests.iteritems():
self.assertEqual(History._get_limit(action), result) # pylint: disable=protected-access
if __name__ == '__main__':
print('=====> Testing %s' % __file__)
SUITE = unittest.TestLoader().loadTestsFromTestCase(HistoryTests)
unittest.TextTestRunner(verbosity=2).run(SUITE)
| gpl-3.0 |
miing/mci_migo | acceptance/tests/edit/edit_warn_about_backup_device.py | 1 | 1552 | # Test enabling/disabling the warn_about_backup_device preference
from sst.actions import (
assert_checkbox_value,
click_button,
fails,
go_to,
set_checkbox_value,
)
from u1testutils.sst import config
from acceptance.devices import (
add_device,
delete_device,
)
from acceptance import helpers, urls
# setup
config.set_base_url_from_env()
helpers.login_or_register_account(device_cleanup=True)
# the test: no warnings if no device
go_to(urls.EDIT)
fails(helpers.get_backup_device_warning_div)
add_device('the single device')
# the test: show warning if missing backup device
helpers.assert_backup_device_warning()
# disable the warn_about_backup_device setting
set_checkbox_value(helpers.get_warn_about_backup_device_checkbox(), False)
click_button(helpers.get_update_preferences_button())
assert_checkbox_value(helpers.get_warn_about_backup_device_checkbox(), False)
# no more warning because user does not want to be warned
fails(helpers.get_backup_device_warning_div)
# re enable the warn_about_backup_device setting
set_checkbox_value(helpers.get_warn_about_backup_device_checkbox(), True)
click_button(helpers.get_update_preferences_button())
assert_checkbox_value(helpers.get_warn_about_backup_device_checkbox(), True)
helpers.assert_backup_device_warning()
# add a second device, ensure that the warning is no longer shown
add_device('the backup device')
fails(helpers.get_backup_device_warning_div)
# delete one device and ensure the warning is back
delete_device()
helpers.assert_backup_device_warning()
| agpl-3.0 |
meiavy/python-weixin | lib/python2.7/site-packages/web/python23.py | 75 | 1266 | """Python 2.3 compatabilty"""
import threading
class threadlocal(object):
"""Implementation of threading.local for python2.3.
"""
def __getattribute__(self, name):
if name == "__dict__":
return threadlocal._getd(self)
else:
try:
return object.__getattribute__(self, name)
except AttributeError:
try:
return self.__dict__[name]
except KeyError:
raise AttributeError, name
def __setattr__(self, name, value):
self.__dict__[name] = value
def __delattr__(self, name):
try:
del self.__dict__[name]
except KeyError:
raise AttributeError, name
def _getd(self):
t = threading.currentThread()
if not hasattr(t, '_d'):
# using __dict__ of thread as thread local storage
t._d = {}
_id = id(self)
# there could be multiple instances of threadlocal.
# use id(self) as key
if _id not in t._d:
t._d[_id] = {}
return t._d[_id]
if __name__ == '__main__':
d = threadlocal()
d.x = 1
print d.__dict__
print d.x
| apache-2.0 |
Alberdi/ancientest | test/test_creature.py | 1 | 1084 | import unittest
import country, creature
class TestCreature(unittest.TestCase):
def setUp(self):
self.c = creature.Creature()
def test_where_do_i_live(self):
country1 = country.Country()
country1.add_area([(0,0), (100,0), (100, 100)])
country2 = country.Country()
country2.add_area([(0,0), (100, 0), (100, -100)])
self.assertIsNone(self.c.where_do_i_live())
country1.area.add_resident(self.c)
self.assertEqual(self.c.where_do_i_live(), country1)
country2.area.add_resident(self.c)
self.assertEqual(self.c.where_do_i_live(), country2)
#A criature can only travel to adjacent country
def test_travel(self):
country1 = country.Country()
country1.add_area([(0,0), (100,0), (100, 100)])
country2 = country.Country()
country2.add_area([(0,0), (100, 0), (100, -100)])
country3 = country.Country()
country3.add_area([(200,0), (500, 0), (200, 200)])
country1.area.add_resident(self.c)
self.c.travel(country2)
self.assertEqual(self.c.where_do_i_live(), country2)
self.c.travel(country3)
self.assertEqual(self.c.where_do_i_live(), country2)
| mit |
tardyp/buildbot | master/buildbot/test/unit/scripts/test_start.py | 3 | 4828 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import os
import sys
import time
import mock
import twisted
from twisted.internet import defer
from twisted.internet.utils import getProcessOutputAndValue
from twisted.python import versions
from twisted.trial import unittest
from buildbot.scripts import start
from buildbot.test.util import dirs
from buildbot.test.util import misc
from buildbot.test.util.decorators import flaky
from buildbot.test.util.decorators import skipUnlessPlatformIs
def mkconfig(**kwargs):
config = {
'quiet': False,
'basedir': os.path.abspath('basedir'),
'nodaemon': False,
}
config.update(kwargs)
return config
fake_master_tac = """\
from twisted.application import service
from twisted.internet import reactor
from twisted.python import log
application = service.Application('highscore')
class App(service.Service):
def startService(self):
super().startService()
log.msg("BuildMaster is running") # heh heh heh
reactor.callLater(0, reactor.stop)
app = App()
app.setServiceParent(application)
# isBuildmasterDir wants to see this -> Application('buildmaster')
"""
class TestStart(misc.StdoutAssertionsMixin, dirs.DirsMixin, unittest.TestCase):
def setUp(self):
self.setUpDirs('basedir')
with open(os.path.join('basedir', 'buildbot.tac'), 'wt') as f:
f.write(fake_master_tac)
self.setUpStdoutAssertions()
def tearDown(self):
self.tearDownDirs()
# tests
def test_start_not_basedir(self):
self.assertEqual(start.start(mkconfig(basedir='doesntexist')), 1)
self.assertInStdout('invalid buildmaster directory')
def runStart(self, **config):
args = [
'-c',
'from buildbot.scripts.start import start; import sys; '
'sys.exit(start(%r))' % (
mkconfig(**config),),
]
env = os.environ.copy()
env['PYTHONPATH'] = os.pathsep.join(sys.path)
return getProcessOutputAndValue(sys.executable, args=args, env=env)
@defer.inlineCallbacks
def test_start_no_daemon(self):
(_, err, rc) = yield self.runStart(nodaemon=True)
# on python 3.5, cryptography loudly complains to upgrade
if sys.version_info[:2] != (3, 5):
self.assertEqual((err, rc), (b'', 0))
@defer.inlineCallbacks
def test_start_quiet(self):
res = yield self.runStart(quiet=True)
# on python 3.5, cryptography loudly complains to upgrade
if sys.version_info[:2] != (3, 5):
self.assertEqual(res, (b'', b'', 0))
@skipUnlessPlatformIs('posix')
@defer.inlineCallbacks
def test_start_timeout_nonnumber(self):
(out, err, rc) = yield self.runStart(start_timeout='a')
self.assertEqual((rc, err), (1, b''))
self.assertSubstring(b'Start timeout must be a number\n', out)
@skipUnlessPlatformIs('posix')
@defer.inlineCallbacks
def test_start_timeout_number_string(self):
# integer values from command-line options come in as strings
res = yield self.runStart(start_timeout='10')
self.assertEqual(res, (mock.ANY, b'', 0))
@flaky(bugNumber=2760)
@skipUnlessPlatformIs('posix')
@defer.inlineCallbacks
def test_start(self):
try:
(out, err, rc) = yield self.runStart()
self.assertEqual((rc, err), (0, b''))
self.assertSubstring(
'buildmaster appears to have (re)started correctly', out)
finally:
# wait for the pidfile to go away after the reactor.stop
# in buildbot.tac takes effect
pidfile = os.path.join('basedir', 'twistd.pid')
while os.path.exists(pidfile):
time.sleep(0.01)
if twisted.version <= versions.Version('twisted', 9, 0, 0):
test_start.skip = test_start_quiet.skip = "Skipping due to suprious PotentialZombieWarning."
# the remainder of this script does obscene things:
# - forks
# - shells out to tail
# - starts and stops the reactor
# so testing it will be *far* more pain than is worthwhile
| gpl-2.0 |
qqbuby/redmine_book | redmine_book/packages/redmine/packages/requests/packages/chardet/latin1prober.py | 1778 | 5232 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .charsetprober import CharSetProber
from .constants import eNotMe
from .compat import wrap_ord
FREQ_CAT_NUM = 4
UDF = 0 # undefined
OTH = 1 # other
ASC = 2 # ascii capital letter
ASS = 3 # ascii small letter
ACV = 4 # accent capital vowel
ACO = 5 # accent capital other
ASV = 6 # accent small vowel
ASO = 7 # accent small other
CLASS_NUM = 8 # total classes
Latin1_CharToClass = (
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 00 - 07
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 08 - 0F
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 10 - 17
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 18 - 1F
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 20 - 27
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 28 - 2F
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 30 - 37
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 38 - 3F
OTH, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 40 - 47
ASC, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 48 - 4F
ASC, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 50 - 57
ASC, ASC, ASC, OTH, OTH, OTH, OTH, OTH, # 58 - 5F
OTH, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 60 - 67
ASS, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 68 - 6F
ASS, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 70 - 77
ASS, ASS, ASS, OTH, OTH, OTH, OTH, OTH, # 78 - 7F
OTH, UDF, OTH, ASO, OTH, OTH, OTH, OTH, # 80 - 87
OTH, OTH, ACO, OTH, ACO, UDF, ACO, UDF, # 88 - 8F
UDF, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 90 - 97
OTH, OTH, ASO, OTH, ASO, UDF, ASO, ACO, # 98 - 9F
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # A0 - A7
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # A8 - AF
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # B0 - B7
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # B8 - BF
ACV, ACV, ACV, ACV, ACV, ACV, ACO, ACO, # C0 - C7
ACV, ACV, ACV, ACV, ACV, ACV, ACV, ACV, # C8 - CF
ACO, ACO, ACV, ACV, ACV, ACV, ACV, OTH, # D0 - D7
ACV, ACV, ACV, ACV, ACV, ACO, ACO, ACO, # D8 - DF
ASV, ASV, ASV, ASV, ASV, ASV, ASO, ASO, # E0 - E7
ASV, ASV, ASV, ASV, ASV, ASV, ASV, ASV, # E8 - EF
ASO, ASO, ASV, ASV, ASV, ASV, ASV, OTH, # F0 - F7
ASV, ASV, ASV, ASV, ASV, ASO, ASO, ASO, # F8 - FF
)
# 0 : illegal
# 1 : very unlikely
# 2 : normal
# 3 : very likely
Latin1ClassModel = (
# UDF OTH ASC ASS ACV ACO ASV ASO
0, 0, 0, 0, 0, 0, 0, 0, # UDF
0, 3, 3, 3, 3, 3, 3, 3, # OTH
0, 3, 3, 3, 3, 3, 3, 3, # ASC
0, 3, 3, 3, 1, 1, 3, 3, # ASS
0, 3, 3, 3, 1, 2, 1, 2, # ACV
0, 3, 3, 3, 3, 3, 3, 3, # ACO
0, 3, 1, 3, 1, 1, 1, 3, # ASV
0, 3, 1, 3, 1, 1, 3, 3, # ASO
)
class Latin1Prober(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self.reset()
def reset(self):
self._mLastCharClass = OTH
self._mFreqCounter = [0] * FREQ_CAT_NUM
CharSetProber.reset(self)
def get_charset_name(self):
return "windows-1252"
def feed(self, aBuf):
aBuf = self.filter_with_english_letters(aBuf)
for c in aBuf:
charClass = Latin1_CharToClass[wrap_ord(c)]
freq = Latin1ClassModel[(self._mLastCharClass * CLASS_NUM)
+ charClass]
if freq == 0:
self._mState = eNotMe
break
self._mFreqCounter[freq] += 1
self._mLastCharClass = charClass
return self.get_state()
def get_confidence(self):
if self.get_state() == eNotMe:
return 0.01
total = sum(self._mFreqCounter)
if total < 0.01:
confidence = 0.0
else:
confidence = ((self._mFreqCounter[3] - self._mFreqCounter[1] * 20.0)
/ total)
if confidence < 0.0:
confidence = 0.0
# lower the confidence of latin1 so that other more accurate
# detector can take priority.
confidence = confidence * 0.73
return confidence
| mit |
anshumanchatterji/selenium | py/selenium/webdriver/phantomjs/service.py | 5 | 2041 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from selenium.webdriver.common import service
class Service(service.Service):
"""
Object that manages the starting and stopping of PhantomJS / Ghostdriver
"""
def __init__(self, executable_path, port=0, service_args=None, log_path=None):
"""
Creates a new instance of the Service
:Args:
- executable_path : Path to PhantomJS binary
- port : Port the service is running on
- service_args : A List of other command line options to pass to PhantomJS
- log_path: Path for PhantomJS service to log to
"""
self.service_args= service_args
if self.service_args is None:
self.service_args = []
else:
self.service_args=service_args[:]
if not log_path:
log_path = "ghostdriver.log"
service.Service.__init__(self, executable_path, port=port, log_file=open(log_path, 'w'))
def command_line_args(self):
return self.service_args + ["--webdriver=%d" % self.port]
@property
def service_url(self):
"""
Gets the url of the GhostDriver Service
"""
return "http://localhost:%d/wd/hub" % self.port
def send_remote_shutdown_command(self):
pass
| apache-2.0 |
gnowxilef/youtube-dl | youtube_dl/extractor/la7.py | 53 | 2209 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
js_to_json,
smuggle_url,
)
class LA7IE(InfoExtractor):
IE_NAME = 'la7.it'
_VALID_URL = r'''(?x)(https?://)?(?:
(?:www\.)?la7\.it/([^/]+)/(?:rivedila7|video)/|
tg\.la7\.it/repliche-tgla7\?id=
)(?P<id>.+)'''
_TESTS = [{
# 'src' is a plain URL
'url': 'http://www.la7.it/crozza/video/inccool8-02-10-2015-163722',
'md5': '8b613ffc0c4bf9b9e377169fc19c214c',
'info_dict': {
'id': 'inccool8-02-10-2015-163722',
'ext': 'mp4',
'title': 'Inc.Cool8',
'description': 'Benvenuti nell\'incredibile mondo della INC. COOL. 8. dove “INC.” sta per “Incorporated” “COOL” sta per “fashion” ed Eight sta per il gesto atletico',
'thumbnail': 're:^https?://.*',
'uploader_id': 'kdla7pillole@iltrovatore.it',
'timestamp': 1443814869,
'upload_date': '20151002',
},
}, {
# 'src' is a dictionary
'url': 'http://tg.la7.it/repliche-tgla7?id=189080',
'md5': '6b0d8888d286e39870208dfeceaf456b',
'info_dict': {
'id': '189080',
'ext': 'mp4',
'title': 'TG LA7',
},
}, {
'url': 'http://www.la7.it/omnibus/rivedila7/omnibus-news-02-07-2016-189077',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
player_data = self._parse_json(
self._search_regex(r'videoLa7\(({[^;]+})\);', webpage, 'player data'),
video_id, transform_source=js_to_json)
return {
'_type': 'url_transparent',
'url': smuggle_url('kaltura:103:%s' % player_data['vid'], {
'service_url': 'http://kdam.iltrovatore.it',
}),
'id': video_id,
'title': player_data['title'],
'description': self._og_search_description(webpage, default=None),
'thumbnail': player_data.get('poster'),
'ie_key': 'Kaltura',
}
| unlicense |
peak6/st2 | st2common/tests/unit/test_rbac_resolvers_action.py | 6 | 24920 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from st2common.rbac.types import PermissionType
from st2common.rbac.types import ResourceType
from st2common.persistence.auth import User
from st2common.persistence.rbac import Role
from st2common.persistence.rbac import UserRoleAssignment
from st2common.persistence.rbac import PermissionGrant
from st2common.persistence.action import Action
from st2common.models.db.auth import UserDB
from st2common.models.db.rbac import RoleDB
from st2common.models.db.rbac import UserRoleAssignmentDB
from st2common.models.db.rbac import PermissionGrantDB
from st2common.models.db.action import ActionDB
from st2common.models.api.action import ActionAPI
from st2common.rbac.resolvers import ActionPermissionsResolver
from tests.unit.test_rbac_resolvers import BasePermissionsResolverTestCase
__all__ = [
'ActionPermissionsResolverTestCase'
]
class ActionPermissionsResolverTestCase(BasePermissionsResolverTestCase):
def setUp(self):
super(ActionPermissionsResolverTestCase, self).setUp()
# Create some mock users
user_1_db = UserDB(name='1_role_action_pack_grant')
user_1_db = User.add_or_update(user_1_db)
self.users['custom_role_action_pack_grant'] = user_1_db
user_2_db = UserDB(name='1_role_action_grant')
user_2_db = User.add_or_update(user_2_db)
self.users['custom_role_action_grant'] = user_2_db
user_3_db = UserDB(name='custom_role_pack_action_all_grant')
user_3_db = User.add_or_update(user_3_db)
self.users['custom_role_pack_action_all_grant'] = user_3_db
user_4_db = UserDB(name='custom_role_action_all_grant')
user_4_db = User.add_or_update(user_4_db)
self.users['custom_role_action_all_grant'] = user_4_db
user_5_db = UserDB(name='custom_role_action_execute_grant')
user_5_db = User.add_or_update(user_5_db)
self.users['custom_role_action_execute_grant'] = user_5_db
user_6_db = UserDB(name='action_pack_action_create_grant')
user_6_db = User.add_or_update(user_6_db)
self.users['action_pack_action_create_grant'] = user_6_db
user_7_db = UserDB(name='action_pack_action_all_grant')
user_7_db = User.add_or_update(user_7_db)
self.users['action_pack_action_all_grant'] = user_7_db
user_8_db = UserDB(name='action_action_create_grant')
user_8_db = User.add_or_update(user_8_db)
self.users['action_action_create_grant'] = user_8_db
user_9_db = UserDB(name='action_action_all_grant')
user_9_db = User.add_or_update(user_9_db)
self.users['action_action_all_grant'] = user_9_db
user_10_db = UserDB(name='custom_role_action_list_grant')
user_10_db = User.add_or_update(user_10_db)
self.users['custom_role_action_list_grant'] = user_10_db
# Create some mock resources on which permissions can be granted
action_1_db = ActionDB(pack='test_pack_1', name='action1', entry_point='',
runner_type={'name': 'run-local'})
action_1_db = Action.add_or_update(action_1_db)
self.resources['action_1'] = action_1_db
action_2_db = ActionDB(pack='test_pack_1', name='action2', entry_point='',
runner_type={'name': 'run-local'})
action_2_db = Action.add_or_update(action_1_db)
self.resources['action_2'] = action_2_db
action_3_db = ActionDB(pack='test_pack_2', name='action3', entry_point='',
runner_type={'name': 'run-local'})
action_3_db = Action.add_or_update(action_3_db)
self.resources['action_3'] = action_3_db
# Create some mock roles with associated permission grants
# Custom role 2 - one grant on parent pack
# "action_view" on pack_1
grant_db = PermissionGrantDB(resource_uid=self.resources['pack_1'].get_uid(),
resource_type=ResourceType.PACK,
permission_types=[PermissionType.ACTION_VIEW])
grant_db = PermissionGrant.add_or_update(grant_db)
permission_grants = [str(grant_db.id)]
role_3_db = RoleDB(name='custom_role_action_pack_grant',
permission_grants=permission_grants)
role_3_db = Role.add_or_update(role_3_db)
self.roles['custom_role_action_pack_grant'] = role_3_db
# Custom role 4 - one grant on action
# "action_view" on action_3
grant_db = PermissionGrantDB(resource_uid=self.resources['action_3'].get_uid(),
resource_type=ResourceType.ACTION,
permission_types=[PermissionType.ACTION_VIEW])
grant_db = PermissionGrant.add_or_update(grant_db)
permission_grants = [str(grant_db.id)]
role_4_db = RoleDB(name='custom_role_action_grant', permission_grants=permission_grants)
role_4_db = Role.add_or_update(role_4_db)
self.roles['custom_role_action_grant'] = role_4_db
# Custom role - "action_all" grant on a parent action pack
grant_db = PermissionGrantDB(resource_uid=self.resources['pack_1'].get_uid(),
resource_type=ResourceType.PACK,
permission_types=[PermissionType.ACTION_ALL])
grant_db = PermissionGrant.add_or_update(grant_db)
permission_grants = [str(grant_db.id)]
role_4_db = RoleDB(name='custom_role_pack_action_all_grant',
permission_grants=permission_grants)
role_4_db = Role.add_or_update(role_4_db)
self.roles['custom_role_pack_action_all_grant'] = role_4_db
# Custom role - "action_all" grant on action
grant_db = PermissionGrantDB(resource_uid=self.resources['action_1'].get_uid(),
resource_type=ResourceType.ACTION,
permission_types=[PermissionType.ACTION_ALL])
grant_db = PermissionGrant.add_or_update(grant_db)
permission_grants = [str(grant_db.id)]
role_4_db = RoleDB(name='custom_role_action_all_grant', permission_grants=permission_grants)
role_4_db = Role.add_or_update(role_4_db)
self.roles['custom_role_action_all_grant'] = role_4_db
# Custom role - "action_execute" on action_1
grant_db = PermissionGrantDB(resource_uid=self.resources['action_1'].get_uid(),
resource_type=ResourceType.ACTION,
permission_types=[PermissionType.ACTION_EXECUTE])
grant_db = PermissionGrant.add_or_update(grant_db)
permission_grants = [str(grant_db.id)]
role_5_db = RoleDB(name='custom_role_action_execute_grant',
permission_grants=permission_grants)
role_5_db = Role.add_or_update(role_5_db)
self.roles['custom_role_action_execute_grant'] = role_5_db
# Custom role - "action_create" grant on pack_1
grant_db = PermissionGrantDB(resource_uid=self.resources['pack_1'].get_uid(),
resource_type=ResourceType.PACK,
permission_types=[PermissionType.ACTION_CREATE])
grant_db = PermissionGrant.add_or_update(grant_db)
permission_grants = [str(grant_db.id)]
role_6_db = RoleDB(name='action_pack_action_create_grant',
permission_grants=permission_grants)
role_6_db = Role.add_or_update(role_6_db)
self.roles['action_pack_action_create_grant'] = role_6_db
# Custom role - "action_all" grant on pack_1
grant_db = PermissionGrantDB(resource_uid=self.resources['pack_1'].get_uid(),
resource_type=ResourceType.PACK,
permission_types=[PermissionType.ACTION_ALL])
grant_db = PermissionGrant.add_or_update(grant_db)
permission_grants = [str(grant_db.id)]
role_7_db = RoleDB(name='action_pack_action_all_grant',
permission_grants=permission_grants)
role_7_db = Role.add_or_update(role_7_db)
self.roles['action_pack_action_all_grant'] = role_7_db
# Custom role - "action_create" grant on action_1
grant_db = PermissionGrantDB(resource_uid=self.resources['action_1'].get_uid(),
resource_type=ResourceType.ACTION,
permission_types=[PermissionType.ACTION_CREATE])
grant_db = PermissionGrant.add_or_update(grant_db)
permission_grants = [str(grant_db.id)]
role_8_db = RoleDB(name='action_action_create_grant',
permission_grants=permission_grants)
role_8_db = Role.add_or_update(role_8_db)
self.roles['action_action_create_grant'] = role_8_db
# Custom role - "action_all" grant on action_1
grant_db = PermissionGrantDB(resource_uid=self.resources['action_1'].get_uid(),
resource_type=ResourceType.ACTION,
permission_types=[PermissionType.ACTION_ALL])
grant_db = PermissionGrant.add_or_update(grant_db)
permission_grants = [str(grant_db.id)]
role_9_db = RoleDB(name='action_action_all_grant',
permission_grants=permission_grants)
role_9_db = Role.add_or_update(role_9_db)
self.roles['action_action_all_grant'] = role_9_db
# Custom role - "action_list" grant
grant_db = PermissionGrantDB(resource_uid=None,
resource_type=None,
permission_types=[PermissionType.ACTION_LIST])
grant_db = PermissionGrant.add_or_update(grant_db)
permission_grants = [str(grant_db.id)]
role_10_db = RoleDB(name='custom_role_action_list_grant',
permission_grants=permission_grants)
role_10_db = Role.add_or_update(role_10_db)
self.roles['custom_role_action_list_grant'] = role_10_db
# Create some mock role assignments
user_db = self.users['custom_role_action_pack_grant']
role_assignment_db = UserRoleAssignmentDB(
user=user_db.name,
role=self.roles['custom_role_action_pack_grant'].name)
UserRoleAssignment.add_or_update(role_assignment_db)
user_db = self.users['custom_role_action_grant']
role_assignment_db = UserRoleAssignmentDB(user=user_db.name,
role=self.roles['custom_role_action_grant'].name)
UserRoleAssignment.add_or_update(role_assignment_db)
user_db = self.users['custom_role_pack_action_all_grant']
role_assignment_db = UserRoleAssignmentDB(
user=user_db.name,
role=self.roles['custom_role_pack_action_all_grant'].name)
UserRoleAssignment.add_or_update(role_assignment_db)
user_db = self.users['custom_role_action_all_grant']
role_assignment_db = UserRoleAssignmentDB(
user=user_db.name,
role=self.roles['custom_role_action_all_grant'].name)
UserRoleAssignment.add_or_update(role_assignment_db)
user_db = self.users['custom_role_action_execute_grant']
role_assignment_db = UserRoleAssignmentDB(
user=user_db.name,
role=self.roles['custom_role_action_execute_grant'].name)
UserRoleAssignment.add_or_update(role_assignment_db)
user_db = self.users['action_pack_action_create_grant']
role_assignment_db = UserRoleAssignmentDB(
user=user_db.name,
role=self.roles['action_pack_action_create_grant'].name)
UserRoleAssignment.add_or_update(role_assignment_db)
user_db = self.users['action_pack_action_all_grant']
role_assignment_db = UserRoleAssignmentDB(
user=user_db.name,
role=self.roles['action_pack_action_all_grant'].name)
UserRoleAssignment.add_or_update(role_assignment_db)
user_db = self.users['action_action_create_grant']
role_assignment_db = UserRoleAssignmentDB(
user=user_db.name,
role=self.roles['action_action_create_grant'].name)
UserRoleAssignment.add_or_update(role_assignment_db)
user_db = self.users['action_action_all_grant']
role_assignment_db = UserRoleAssignmentDB(
user=user_db.name,
role=self.roles['action_action_all_grant'].name)
UserRoleAssignment.add_or_update(role_assignment_db)
user_db = self.users['custom_role_action_list_grant']
role_assignment_db = UserRoleAssignmentDB(
user=user_db.name,
role=self.roles['custom_role_action_list_grant'].name)
UserRoleAssignment.add_or_update(role_assignment_db)
def test_user_has_permission(self):
resolver = ActionPermissionsResolver()
# Admin user, should always return true
user_db = self.users['admin']
self.assertUserHasPermission(resolver=resolver,
user_db=user_db,
permission_type=PermissionType.ACTION_LIST)
# Observer, should always return true for VIEW permissions
user_db = self.users['observer']
self.assertUserHasPermission(resolver=resolver,
user_db=user_db,
permission_type=PermissionType.ACTION_LIST)
# No roles, should return false for everything
user_db = self.users['no_roles']
self.assertUserDoesntHavePermission(resolver=resolver,
user_db=user_db,
permission_type=PermissionType.ACTION_LIST)
# Custom role with no permission grants, should return false for everything
user_db = self.users['1_custom_role_no_permissions']
self.assertUserDoesntHavePermission(resolver=resolver,
user_db=user_db,
permission_type=PermissionType.ACTION_LIST)
# Custom role with "action_list" grant
user_db = self.users['custom_role_action_list_grant']
self.assertUserHasPermission(resolver=resolver,
user_db=user_db,
permission_type=PermissionType.ACTION_LIST)
def test_user_has_resource_api_permission(self):
resolver = ActionPermissionsResolver()
# Admin user, should always return true
user_db = self.users['admin']
resource_db = self.resources['action_1']
resource_api = ActionAPI.from_model(resource_db)
self.assertUserHasResourceApiPermission(
resolver=resolver,
user_db=user_db,
resource_api=resource_api,
permission_type=PermissionType.ACTION_CREATE)
# Observer, should return false
user_db = self.users['observer']
resource_db = self.resources['action_1']
resource_api = ActionAPI.from_model(resource_db)
self.assertUserDoesntHaveResourceApiPermission(
resolver=resolver,
user_db=user_db,
resource_api=resource_api,
permission_type=PermissionType.ACTION_CREATE)
# No roles, should return false
user_db = self.users['no_roles']
resource_db = self.resources['action_1']
resource_api = ActionAPI.from_model(resource_db)
self.assertUserDoesntHaveResourceApiPermission(
resolver=resolver,
user_db=user_db,
resource_api=resource_api,
permission_type=PermissionType.ACTION_CREATE)
# Custom role with no permission grants, should return false
user_db = self.users['1_custom_role_no_permissions']
resource_db = self.resources['action_1']
resource_api = ActionAPI.from_model(resource_db)
self.assertUserDoesntHaveResourceApiPermission(
resolver=resolver,
user_db=user_db,
resource_api=resource_api,
permission_type=PermissionType.ACTION_CREATE)
# Custom role with "action_create" grant on parent pack
user_db = self.users['action_pack_action_create_grant']
resource_db = self.resources['action_1']
resource_api = ActionAPI.from_model(resource_db)
self.assertUserHasResourceApiPermission(
resolver=resolver,
user_db=user_db,
resource_api=resource_api,
permission_type=PermissionType.ACTION_CREATE)
# Custom role with "action_all" grant on the parent pack
user_db = self.users['action_pack_action_all_grant']
resource_db = self.resources['action_1']
resource_api = ActionAPI.from_model(resource_db)
self.assertUserHasResourceApiPermission(
resolver=resolver,
user_db=user_db,
resource_api=resource_api,
permission_type=PermissionType.ACTION_CREATE)
# Custom role with "action_create" grant directly on the resource
user_db = self.users['action_action_create_grant']
resource_db = self.resources['action_1']
resource_api = ActionAPI.from_model(resource_db)
self.assertUserHasResourceApiPermission(
resolver=resolver,
user_db=user_db,
resource_api=resource_api,
permission_type=PermissionType.ACTION_CREATE)
# Custom role with "action_all" grant directly on the resource
user_db = self.users['action_action_all_grant']
resource_db = self.resources['action_1']
resource_api = ActionAPI.from_model(resource_db)
self.assertUserHasResourceApiPermission(
resolver=resolver,
user_db=user_db,
resource_api=resource_api,
permission_type=PermissionType.ACTION_CREATE)
def test_user_has_resource_db_permission(self):
resolver = ActionPermissionsResolver()
all_permission_types = PermissionType.get_valid_permissions_for_resource_type(
ResourceType.ACTION)
# Admin user, should always return true
resource_db = self.resources['action_1']
user_db = self.users['admin']
self.assertUserHasResourceDbPermissions(
resolver=resolver,
user_db=user_db,
resource_db=resource_db,
permission_types=all_permission_types)
# Observer, should always return true for VIEW permission
user_db = self.users['observer']
self.assertUserHasResourceDbPermission(
resolver=resolver,
user_db=user_db,
resource_db=self.resources['action_1'],
permission_type=PermissionType.ACTION_VIEW)
self.assertUserHasResourceDbPermission(
resolver=resolver,
user_db=user_db,
resource_db=self.resources['action_2'],
permission_type=PermissionType.ACTION_VIEW)
self.assertUserDoesntHaveResourceDbPermission(
resolver=resolver,
user_db=user_db,
resource_db=self.resources['action_1'],
permission_type=PermissionType.ACTION_MODIFY)
self.assertUserDoesntHaveResourceDbPermission(
resolver=resolver,
user_db=user_db,
resource_db=self.resources['action_2'],
permission_type=PermissionType.ACTION_DELETE)
# No roles, should return false for everything
user_db = self.users['no_roles']
self.assertUserDoesntHaveResourceDbPermissions(
resolver=resolver,
user_db=user_db,
resource_db=resource_db,
permission_types=all_permission_types)
# Custom role with no permission grants, should return false for everything
user_db = self.users['1_custom_role_no_permissions']
self.assertUserDoesntHaveResourceDbPermissions(
resolver=resolver,
user_db=user_db,
resource_db=resource_db,
permission_types=all_permission_types)
# Custom role with unrelated permission grant to parent pack
user_db = self.users['custom_role_pack_grant']
self.assertUserDoesntHaveResourceDbPermission(
resolver=resolver,
user_db=user_db,
resource_db=self.resources['action_1'],
permission_type=PermissionType.ACTION_VIEW)
self.assertUserDoesntHaveResourceDbPermission(
resolver=resolver,
user_db=user_db,
resource_db=self.resources['action_1'],
permission_type=PermissionType.ACTION_EXECUTE)
# Custom role with with grant on the parent pack
user_db = self.users['custom_role_action_pack_grant']
self.assertUserHasResourceDbPermission(
resolver=resolver,
user_db=user_db,
resource_db=self.resources['action_1'],
permission_type=PermissionType.ACTION_VIEW)
self.assertUserHasResourceDbPermission(
resolver=resolver,
user_db=user_db,
resource_db=self.resources['action_2'],
permission_type=PermissionType.ACTION_VIEW)
self.assertUserDoesntHaveResourceDbPermission(
resolver=resolver,
user_db=user_db,
resource_db=self.resources['action_2'],
permission_type=PermissionType.ACTION_EXECUTE)
# Custom role with a direct grant on action
user_db = self.users['custom_role_action_grant']
self.assertUserHasResourceDbPermission(
resolver=resolver,
user_db=user_db,
resource_db=self.resources['action_3'],
permission_type=PermissionType.ACTION_VIEW)
self.assertUserDoesntHaveResourceDbPermission(
resolver=resolver,
user_db=user_db,
resource_db=self.resources['action_2'],
permission_type=PermissionType.ACTION_EXECUTE)
self.assertUserDoesntHaveResourceDbPermission(
resolver=resolver,
user_db=user_db,
resource_db=self.resources['action_3'],
permission_type=PermissionType.ACTION_EXECUTE)
# Custom role - "action_all" grant on the action parent pack
user_db = self.users['custom_role_pack_action_all_grant']
resource_db = self.resources['action_1']
self.assertUserHasResourceDbPermissions(
resolver=resolver,
user_db=user_db,
resource_db=resource_db,
permission_types=all_permission_types)
# Custom role - "action_all" grant on the action
user_db = self.users['custom_role_action_all_grant']
resource_db = self.resources['action_1']
self.assertUserHasResourceDbPermissions(
resolver=resolver,
user_db=user_db,
resource_db=resource_db,
permission_types=all_permission_types)
# Custom role - "action_execute" grant on action_1
user_db = self.users['custom_role_action_execute_grant']
resource_db = self.resources['action_1']
self.assertUserHasResourceDbPermission(
resolver=resolver,
user_db=user_db,
resource_db=resource_db,
permission_type=PermissionType.ACTION_EXECUTE)
# "execute" also grants "view"
self.assertUserHasResourceDbPermission(
resolver=resolver,
user_db=user_db,
resource_db=resource_db,
permission_type=PermissionType.ACTION_VIEW)
permission_types = [
PermissionType.ACTION_CREATE,
PermissionType.ACTION_MODIFY,
PermissionType.ACTION_DELETE
]
self.assertUserDoesntHaveResourceDbPermissions(
resolver=resolver,
user_db=user_db,
resource_db=resource_db,
permission_types=permission_types)
| apache-2.0 |
karyon/django | tests/delete_regress/models.py | 325 | 3172 | from django.contrib.contenttypes.fields import (
GenericForeignKey, GenericRelation,
)
from django.contrib.contenttypes.models import ContentType
from django.db import models
class Award(models.Model):
name = models.CharField(max_length=25)
object_id = models.PositiveIntegerField()
content_type = models.ForeignKey(ContentType, models.CASCADE)
content_object = GenericForeignKey()
class AwardNote(models.Model):
award = models.ForeignKey(Award, models.CASCADE)
note = models.CharField(max_length=100)
class Person(models.Model):
name = models.CharField(max_length=25)
awards = GenericRelation(Award)
class Book(models.Model):
pagecount = models.IntegerField()
class Toy(models.Model):
name = models.CharField(max_length=50)
class Child(models.Model):
name = models.CharField(max_length=50)
toys = models.ManyToManyField(Toy, through='PlayedWith')
class PlayedWith(models.Model):
child = models.ForeignKey(Child, models.CASCADE)
toy = models.ForeignKey(Toy, models.CASCADE)
date = models.DateField(db_column='date_col')
class PlayedWithNote(models.Model):
played = models.ForeignKey(PlayedWith, models.CASCADE)
note = models.TextField()
class Contact(models.Model):
label = models.CharField(max_length=100)
class Email(Contact):
email_address = models.EmailField(max_length=100)
class Researcher(models.Model):
contacts = models.ManyToManyField(Contact, related_name="research_contacts")
class Food(models.Model):
name = models.CharField(max_length=20, unique=True)
class Eaten(models.Model):
food = models.ForeignKey(Food, models.CASCADE, to_field="name")
meal = models.CharField(max_length=20)
# Models for #15776
class Policy(models.Model):
policy_number = models.CharField(max_length=10)
class Version(models.Model):
policy = models.ForeignKey(Policy, models.CASCADE)
class Location(models.Model):
version = models.ForeignKey(Version, models.SET_NULL, blank=True, null=True)
class Item(models.Model):
version = models.ForeignKey(Version, models.CASCADE)
location = models.ForeignKey(Location, models.SET_NULL, blank=True, null=True)
# Models for #16128
class File(models.Model):
pass
class Image(File):
class Meta:
proxy = True
class Photo(Image):
class Meta:
proxy = True
class FooImage(models.Model):
my_image = models.ForeignKey(Image, models.CASCADE)
class FooFile(models.Model):
my_file = models.ForeignKey(File, models.CASCADE)
class FooPhoto(models.Model):
my_photo = models.ForeignKey(Photo, models.CASCADE)
class FooFileProxy(FooFile):
class Meta:
proxy = True
class OrgUnit(models.Model):
name = models.CharField(max_length=64, unique=True)
class Login(models.Model):
description = models.CharField(max_length=32)
orgunit = models.ForeignKey(OrgUnit, models.CASCADE)
class House(models.Model):
address = models.CharField(max_length=32)
class OrderedPerson(models.Model):
name = models.CharField(max_length=32)
lives_in = models.ForeignKey(House, models.CASCADE)
class Meta:
ordering = ['name']
| bsd-3-clause |
wenderen/servo | tests/wpt/web-platform-tests/tools/py/py/_builtin.py | 259 | 6521 | import sys
try:
reversed = reversed
except NameError:
def reversed(sequence):
"""reversed(sequence) -> reverse iterator over values of the sequence
Return a reverse iterator
"""
if hasattr(sequence, '__reversed__'):
return sequence.__reversed__()
if not hasattr(sequence, '__getitem__'):
raise TypeError("argument to reversed() must be a sequence")
return reversed_iterator(sequence)
class reversed_iterator(object):
def __init__(self, seq):
self.seq = seq
self.remaining = len(seq)
def __iter__(self):
return self
def next(self):
i = self.remaining
if i > 0:
i -= 1
item = self.seq[i]
self.remaining = i
return item
raise StopIteration
def __length_hint__(self):
return self.remaining
try:
any = any
except NameError:
def any(iterable):
for x in iterable:
if x:
return True
return False
try:
all = all
except NameError:
def all(iterable):
for x in iterable:
if not x:
return False
return True
try:
sorted = sorted
except NameError:
builtin_cmp = cmp # need to use cmp as keyword arg
def sorted(iterable, cmp=None, key=None, reverse=0):
use_cmp = None
if key is not None:
if cmp is None:
def use_cmp(x, y):
return builtin_cmp(x[0], y[0])
else:
def use_cmp(x, y):
return cmp(x[0], y[0])
l = [(key(element), element) for element in iterable]
else:
if cmp is not None:
use_cmp = cmp
l = list(iterable)
if use_cmp is not None:
l.sort(use_cmp)
else:
l.sort()
if reverse:
l.reverse()
if key is not None:
return [element for (_, element) in l]
return l
try:
set, frozenset = set, frozenset
except NameError:
from sets import set, frozenset
# pass through
enumerate = enumerate
try:
BaseException = BaseException
except NameError:
BaseException = Exception
try:
GeneratorExit = GeneratorExit
except NameError:
class GeneratorExit(Exception):
""" This exception is never raised, it is there to make it possible to
write code compatible with CPython 2.5 even in lower CPython
versions."""
pass
GeneratorExit.__module__ = 'exceptions'
_sysex = (KeyboardInterrupt, SystemExit, MemoryError, GeneratorExit)
try:
callable = callable
except NameError:
def callable(obj):
return hasattr(obj, "__call__")
if sys.version_info >= (3, 0):
exec ("print_ = print ; exec_=exec")
import builtins
# some backward compatibility helpers
_basestring = str
def _totext(obj, encoding=None, errors=None):
if isinstance(obj, bytes):
if errors is None:
obj = obj.decode(encoding)
else:
obj = obj.decode(encoding, errors)
elif not isinstance(obj, str):
obj = str(obj)
return obj
def _isbytes(x):
return isinstance(x, bytes)
def _istext(x):
return isinstance(x, str)
text = str
bytes = bytes
def _getimself(function):
return getattr(function, '__self__', None)
def _getfuncdict(function):
return getattr(function, "__dict__", None)
def _getcode(function):
return getattr(function, "__code__", None)
def execfile(fn, globs=None, locs=None):
if globs is None:
back = sys._getframe(1)
globs = back.f_globals
locs = back.f_locals
del back
elif locs is None:
locs = globs
fp = open(fn, "r")
try:
source = fp.read()
finally:
fp.close()
co = compile(source, fn, "exec", dont_inherit=True)
exec_(co, globs, locs)
else:
import __builtin__ as builtins
_totext = unicode
_basestring = basestring
text = unicode
bytes = str
execfile = execfile
callable = callable
def _isbytes(x):
return isinstance(x, str)
def _istext(x):
return isinstance(x, unicode)
def _getimself(function):
return getattr(function, 'im_self', None)
def _getfuncdict(function):
return getattr(function, "__dict__", None)
def _getcode(function):
try:
return getattr(function, "__code__")
except AttributeError:
return getattr(function, "func_code", None)
def print_(*args, **kwargs):
""" minimal backport of py3k print statement. """
sep = ' '
if 'sep' in kwargs:
sep = kwargs.pop('sep')
end = '\n'
if 'end' in kwargs:
end = kwargs.pop('end')
file = 'file' in kwargs and kwargs.pop('file') or sys.stdout
if kwargs:
args = ", ".join([str(x) for x in kwargs])
raise TypeError("invalid keyword arguments: %s" % args)
at_start = True
for x in args:
if not at_start:
file.write(sep)
file.write(str(x))
at_start = False
file.write(end)
def exec_(obj, globals=None, locals=None):
""" minimal backport of py3k exec statement. """
__tracebackhide__ = True
if globals is None:
frame = sys._getframe(1)
globals = frame.f_globals
if locals is None:
locals = frame.f_locals
elif locals is None:
locals = globals
exec2(obj, globals, locals)
if sys.version_info >= (3, 0):
def _reraise(cls, val, tb):
__tracebackhide__ = True
assert hasattr(val, '__traceback__')
raise cls.with_traceback(val, tb)
else:
exec ("""
def _reraise(cls, val, tb):
__tracebackhide__ = True
raise cls, val, tb
def exec2(obj, globals, locals):
__tracebackhide__ = True
exec obj in globals, locals
""")
def _tryimport(*names):
""" return the first successfully imported module. """
assert names
for name in names:
try:
__import__(name)
except ImportError:
excinfo = sys.exc_info()
else:
return sys.modules[name]
_reraise(*excinfo)
| mpl-2.0 |
atvcaptain/enigma2 | lib/python/Components/TimerSanityCheck.py | 1 | 12252 | from __future__ import print_function
from __future__ import absolute_import
import NavigationInstance
from time import localtime, mktime, gmtime
from ServiceReference import ServiceReference
from enigma import iServiceInformation, eServiceCenter, eServiceReference, getBestPlayableServiceReference
from timer import TimerEntry
from Tools.CIHelper import cihelper
from Components.config import config
class TimerSanityCheck:
def __init__(self, timerlist, newtimer=None):
self.localtimediff = 25*3600 - mktime(gmtime(25*3600))
self.timerlist = timerlist
self.newtimer = newtimer
self.simultimer = []
self.rep_eventlist = []
self.nrep_eventlist = []
self.bflag = -1
self.eflag = 1
def check(self, ext_timer=1):
if ext_timer != 1:
self.newtimer = ext_timer
if self.newtimer is None:
self.simultimer = []
else:
self.simultimer = [ self.newtimer ]
return self.checkTimerlist()
def getSimulTimerList(self):
return self.simultimer
def doubleCheck(self):
if self.newtimer is not None and self.newtimer.service_ref.ref.valid():
self.simultimer = [ self.newtimer ]
for timer in self.timerlist:
if timer == self.newtimer:
return True
else:
if self.newtimer.begin >= timer.begin and self.newtimer.end <= timer.end:
fl1 = timer.service_ref.ref.flags & eServiceReference.isGroup
fl2 = self.newtimer.service_ref.ref.flags & eServiceReference.isGroup
if fl1 != fl2:
return False
if fl1: #is group
return timer.service_ref.ref.getPath() == self.newtimer.service_ref.ref.getPath()
getUnsignedDataRef1 = timer.service_ref.ref.getUnsignedData
getUnsignedDataRef2 = self.newtimer.service_ref.ref.getUnsignedData
for x in (1, 2, 3, 4):
if getUnsignedDataRef1(x) != getUnsignedDataRef2(x):
break
else:
return True
return False
def checkTimerlist(self, ext_timer=1):
#with special service for external plugins
# Entries in eventlist
# timeindex
# BeginEndFlag 1 for begin, -1 for end
# index -1 for the new Timer, 0..n index of the existing timers
# count of running timers
serviceHandler = eServiceCenter.getInstance()
# create a list with all start and end times
# split it into recurring and singleshot timers
##################################################################################
# process the new timer
self.rep_eventlist = []
self.nrep_eventlist = []
if ext_timer != 1:
self.newtimer = ext_timer
if (self.newtimer is not None) and (not self.newtimer.disabled):
if not self.newtimer.service_ref.ref.valid():
return False
rflags = self.newtimer.repeated
rflags = ((rflags & 0x7F)>> 3)|((rflags & 0x07)<<4)
if rflags:
begin = self.newtimer.begin % 86400 # map to first day
if (self.localtimediff > 0) and ((begin + self.localtimediff) > 86400):
rflags = ((rflags >> 1)& 0x3F)|((rflags << 6)& 0x40)
elif (self.localtimediff < 0) and (begin < self.localtimediff):
rflags = ((rflags << 1)& 0x7E)|((rflags >> 6)& 0x01)
while rflags: # then arrange on the week
if rflags & 1:
self.rep_eventlist.append((begin, -1))
begin += 86400
rflags >>= 1
else:
self.nrep_eventlist.extend([(self.newtimer.begin, self.bflag, -1), (self.newtimer.end, self.eflag, -1)])
##################################################################################
# now process existing timers
idx = 0
for timer in self.timerlist:
if (timer != self.newtimer) and (not timer.disabled):
if timer.repeated:
rflags = timer.repeated
rflags = ((rflags & 0x7F)>> 3)|((rflags & 0x07)<<4)
begin = timer.begin % 86400 # map all to first day
if (self.localtimediff > 0) and ((begin + self.localtimediff) > 86400):
rflags = ((rflags >> 1)& 0x3F)|((rflags << 6)& 0x40)
elif (self.localtimediff < 0) and (begin < self.localtimediff):
rflags = ((rflags << 1)& 0x7E)|((rflags >> 6)& 0x01)
while rflags:
if rflags & 1:
self.rep_eventlist.append((begin, idx))
begin += 86400
rflags >>= 1
elif timer.state < TimerEntry.StateEnded:
self.nrep_eventlist.extend([(timer.begin, self.bflag, idx), (timer.end, self.eflag, idx)])
idx += 1
################################################################################
# journalize timer repeations
if self.nrep_eventlist:
interval_begin = min(self.nrep_eventlist)[0]
interval_end = max(self.nrep_eventlist)[0]
offset_0 = interval_begin - (interval_begin % 604800)
weeks = (interval_end - offset_0) / 604800
if (interval_end - offset_0) % 604800:
weeks += 1
for cnt in range(int(weeks)):
for event in self.rep_eventlist:
if event[1] == -1: # -1 is the identifier of the changed timer
event_begin = self.newtimer.begin
event_end = self.newtimer.end
else:
event_begin = self.timerlist[event[1]].begin
event_end = self.timerlist[event[1]].end
new_event_begin = event[0] + offset_0 + (cnt * 604800)
# summertime correction
new_lth = localtime(new_event_begin).tm_hour
new_event_begin += 3600 * (localtime(event_begin).tm_hour - new_lth)
new_event_end = new_event_begin + (event_end - event_begin)
if event[1] == -1:
if new_event_begin >= self.newtimer.begin: # is the soap already running?
self.nrep_eventlist.extend([(new_event_begin, self.bflag, event[1]), (new_event_end, self.eflag, event[1])])
else:
if new_event_begin >= self.timerlist[event[1]].begin: # is the soap already running?
self.nrep_eventlist.extend([(new_event_begin, self.bflag, event[1]), (new_event_end, self.eflag, event[1])])
else:
offset_0 = 345600 # the Epoch begins on Thursday
for cnt in (0, 1): # test two weeks to take care of Sunday-Monday transitions
for event in self.rep_eventlist:
if event[1] == -1: # -1 is the identifier of the changed timer
event_begin = self.newtimer.begin
event_end = self.newtimer.end
else:
event_begin = self.timerlist[event[1]].begin
event_end = self.timerlist[event[1]].end
new_event_begin = event[0] + offset_0 + (cnt * 604800)
new_event_end = new_event_begin + (event_end - event_begin)
self.nrep_eventlist.extend([(new_event_begin, self.bflag, event[1]), (new_event_end, self.eflag, event[1])])
################################################################################
# order list chronological
self.nrep_eventlist.sort()
##################################################################################
# detect overlapping timers and overlapping times
fakeRecList = []
ConflictTimer = None
ConflictTunerType = None
newTimerTunerType = None
cnt = 0
idx = 0
is_ci_use = 0
is_ci_timer_conflict = 0
overlaplist = []
ci_timer = False
if config.misc.use_ci_assignment.value and cihelper.ServiceIsAssigned(self.newtimer.service_ref.ref):
ci_timer = self.newtimer
ci_timer_begin = ci_timer.begin
ci_timer_end = ci_timer.end
ci_timer_dur = ci_timer_end - ci_timer_begin
ci_timer_events = []
for ev in self.nrep_eventlist:
if ev[2] == -1:
ci_timer_events.append((ev[0], ev[0] + ci_timer_dur))
for event in self.nrep_eventlist:
cnt += event[1]
if event[2] == -1: # new timer
timer = self.newtimer
else:
timer = self.timerlist[event[2]]
if event[1] == self.bflag:
tunerType = [ ]
if timer.service_ref.ref and timer.service_ref.ref.flags & eServiceReference.isGroup:
fakeRecService = NavigationInstance.instance.recordService(getBestPlayableServiceReference(timer.service_ref.ref, eServiceReference(), True), True)
else:
fakeRecService = NavigationInstance.instance.recordService(timer.service_ref, True)
if fakeRecService:
fakeRecResult = fakeRecService.start(True)
else:
fakeRecResult = -1
#print "[TimerSanityCheck] +++", len(NavigationInstance.instance.getRecordings(True)), fakeRecResult
if fakeRecResult == -6 and len(NavigationInstance.instance.getRecordings(True)) < 2:
print("[TimerSanityCheck] less than two timers in the simulated recording list - timer conflict is not plausible - ignored !")
fakeRecResult = 0
if not fakeRecResult: # tune okay
#feinfo = fakeRecService.frontendInfo()
#if feinfo:
# tunerType.append(feinfo.getFrontendData().get("tuner_type"))
if hasattr(fakeRecService, 'frontendInfo') and hasattr(fakeRecService.frontendInfo(), 'getFrontendData'):
feinfo = fakeRecService.frontendInfo().getFrontendData()
tunerType.append(feinfo.get("tuner_type"))
else: # tune failed.. so we must go another way to get service type (DVB-S, DVB-T, DVB-C)
def getServiceType(ref): # helper function to get a service type of a service reference
serviceInfo = serviceHandler.info(ref)
serviceInfo = serviceInfo and serviceInfo.getInfoObject(ref, iServiceInformation.sTransponderData)
return serviceInfo and serviceInfo["tuner_type"] or ""
ref = timer.service_ref.ref
if ref.flags & eServiceReference.isGroup: # service group ?
serviceList = serviceHandler.list(ref) # get all alternative services
if serviceList:
for ref in serviceList.getContent("R"): # iterate over all group service references
type = getServiceType(ref)
if not type in tunerType: # just add single time
tunerType.append(type)
else:
tunerType.append(getServiceType(ref))
if event[2] == -1: # new timer
newTimerTunerType = tunerType
overlaplist.append((fakeRecResult, timer, tunerType))
fakeRecList.append((timer, fakeRecService))
if fakeRecResult:
if ConflictTimer is None: # just take care of the first conflict
ConflictTimer = timer
ConflictTunerType = tunerType
elif event[1] == self.eflag:
for fakeRec in fakeRecList:
if timer == fakeRec[0] and fakeRec[1]:
NavigationInstance.instance.stopRecordService(fakeRec[1])
fakeRecList.remove(fakeRec)
fakeRec = None
for entry in overlaplist:
if entry[1] == timer:
overlaplist.remove(entry)
else:
print("Bug: unknown flag!")
if ci_timer and cihelper.ServiceIsAssigned(timer.service_ref.ref):
if event[1] == self.bflag:
timer_begin = event[0]
timer_end = event[0] + (timer.end - timer.begin)
else:
timer_end = event[0]
timer_begin = event[0] - (timer.end - timer.begin)
if timer != ci_timer:
for ci_ev in ci_timer_events:
if (ci_ev[0] >= timer_begin and ci_ev[0] <= timer_end) or (ci_ev[1] >= timer_begin and ci_ev[1] <= timer_end):
if ci_timer.service_ref.ref != timer.service_ref.ref:
is_ci_timer_conflict = 1
break
if is_ci_timer_conflict == 1:
if ConflictTimer is None:
ConflictTimer = timer
ConflictTunerType = tunerType
self.nrep_eventlist[idx] = (event[0], event[1], event[2], cnt, overlaplist[:]) # insert a duplicate into current overlaplist
idx += 1
if ConflictTimer is None: # no conflict found :)
return True
##################################################################################
# we have detected a conflict, now we must figure out the involved timers
if self.newtimer is not None: # new timer?
if self.newtimer is not ConflictTimer: # the new timer is not the conflicting timer?
for event in self.nrep_eventlist:
if len(event[4]) > 1: # entry in overlaplist of this event??
kt = False
nt = False
for entry in event[4]:
if entry[1] is ConflictTimer:
kt = True
if entry[1] is self.newtimer:
nt = True
if nt and kt:
ConflictTimer = self.newtimer
ConflictTunerType = newTimerTunerType
break
self.simultimer = [ ConflictTimer ]
for event in self.nrep_eventlist:
if len(event[4]) > 1: # entry in overlaplist of this event??
for entry in event[4]:
if entry[1] is ConflictTimer:
break
else:
continue
for entry in event[4]:
if not entry[1] in self.simultimer:
for x in entry[2]:
if x in ConflictTunerType:
self.simultimer.append(entry[1])
break
if len(self.simultimer) < 2:
print("Possible Bug: unknown Conflict!")
return True
return False # conflict detected!
| gpl-2.0 |
davidzchen/tensorflow | tensorflow/python/kernel_tests/linalg/linear_operator_circulant_test.py | 14 | 31865 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables as variables_module
from tensorflow.python.ops.linalg import linalg
from tensorflow.python.ops.linalg import linear_operator_circulant
from tensorflow.python.ops.linalg import linear_operator_test_util
from tensorflow.python.ops.signal import fft_ops
from tensorflow.python.platform import test
rng = np.random.RandomState(0)
_to_complex = linear_operator_circulant._to_complex
@test_util.run_all_in_graph_and_eager_modes
class LinearOperatorCirculantBaseTest(object):
"""Common class for circulant tests."""
_atol = {
dtypes.float16: 1e-3,
dtypes.float32: 1e-6,
dtypes.float64: 1e-7,
dtypes.complex64: 1e-6,
dtypes.complex128: 1e-7
}
_rtol = {
dtypes.float16: 1e-3,
dtypes.float32: 1e-6,
dtypes.float64: 1e-7,
dtypes.complex64: 1e-6,
dtypes.complex128: 1e-7
}
@contextlib.contextmanager
def _constrain_devices_and_set_default(self, sess, use_gpu, force_gpu):
"""We overwrite the FFT operation mapping for testing."""
with test.TestCase._constrain_devices_and_set_default(
self, sess, use_gpu, force_gpu) as sess:
yield sess
def _shape_to_spectrum_shape(self, shape):
# If spectrum.shape = batch_shape + [N],
# this creates an operator of shape batch_shape + [N, N]
return shape[:-1]
def _spectrum_to_circulant_1d(self, spectrum, shape, dtype):
"""Creates a circulant matrix from a spectrum.
Intentionally done in an explicit yet inefficient way. This provides a
cross check to the main code that uses fancy reshapes.
Args:
spectrum: Float or complex `Tensor`.
shape: Python list. Desired shape of returned matrix.
dtype: Type to cast the returned matrix to.
Returns:
Circulant (batch) matrix of desired `dtype`.
"""
spectrum = _to_complex(spectrum)
spectrum_shape = self._shape_to_spectrum_shape(shape)
domain_dimension = spectrum_shape[-1]
if not domain_dimension:
return array_ops.zeros(shape, dtype)
# Explicitly compute the action of spectrum on basis vectors.
matrix_rows = []
for m in range(domain_dimension):
x = np.zeros([domain_dimension])
# x is a basis vector.
x[m] = 1.0
fft_x = fft_ops.fft(math_ops.cast(x, spectrum.dtype))
h_convolve_x = fft_ops.ifft(spectrum * fft_x)
matrix_rows.append(h_convolve_x)
matrix = array_ops.stack(matrix_rows, axis=-1)
return math_ops.cast(matrix, dtype)
class LinearOperatorCirculantTestSelfAdjointOperator(
LinearOperatorCirculantBaseTest,
linear_operator_test_util.SquareLinearOperatorDerivedClassTest):
"""Test of LinearOperatorCirculant when operator is self-adjoint.
Real spectrum <==> Self adjoint operator.
Note that when the spectrum is real, the operator may still be complex.
"""
@staticmethod
def dtypes_to_test():
# This operator will always be complex because, although the spectrum is
# real, the matrix will not be real.
return [dtypes.complex64, dtypes.complex128]
def operator_and_matrix(self,
shape_info,
dtype,
use_placeholder,
ensure_self_adjoint_and_pd=False):
shape = shape_info.shape
# For this test class, we are creating real spectrums.
# We also want the spectrum to have eigenvalues bounded away from zero.
#
# spectrum is bounded away from zero.
spectrum = linear_operator_test_util.random_sign_uniform(
shape=self._shape_to_spectrum_shape(shape), minval=1., maxval=2.)
if ensure_self_adjoint_and_pd:
spectrum = math_ops.abs(spectrum)
# If dtype is complex, cast spectrum to complex. The imaginary part will be
# zero, so the operator will still be self-adjoint.
spectrum = math_ops.cast(spectrum, dtype)
lin_op_spectrum = spectrum
if use_placeholder:
lin_op_spectrum = array_ops.placeholder_with_default(spectrum, shape=None)
operator = linalg.LinearOperatorCirculant(
lin_op_spectrum,
is_self_adjoint=True,
is_positive_definite=True if ensure_self_adjoint_and_pd else None,
input_output_dtype=dtype)
mat = self._spectrum_to_circulant_1d(spectrum, shape, dtype=dtype)
return operator, mat
@test_util.disable_xla("No registered Const")
def test_simple_hermitian_spectrum_gives_operator_with_zero_imag_part(self):
with self.cached_session():
spectrum = math_ops.cast([1. + 0j, 1j, -1j], dtypes.complex64)
operator = linalg.LinearOperatorCirculant(
spectrum, input_output_dtype=dtypes.complex64)
matrix = operator.to_dense()
imag_matrix = math_ops.imag(matrix)
eps = np.finfo(np.float32).eps
np.testing.assert_allclose(
0, self.evaluate(imag_matrix), rtol=0, atol=eps * 3)
def test_tape_safe(self):
spectrum = variables_module.Variable(
math_ops.cast([1. + 0j, 1. + 0j], dtypes.complex64))
operator = linalg.LinearOperatorCirculant(spectrum, is_self_adjoint=True)
self.check_tape_safe(operator)
class LinearOperatorCirculantTestHermitianSpectrum(
LinearOperatorCirculantBaseTest,
linear_operator_test_util.SquareLinearOperatorDerivedClassTest):
"""Test of LinearOperatorCirculant when the spectrum is Hermitian.
Hermitian spectrum <==> Real valued operator. We test both real and complex
dtypes here though. So in some cases the matrix will be complex but with
zero imaginary part.
"""
def operator_and_matrix(self,
shape_info,
dtype,
use_placeholder,
ensure_self_adjoint_and_pd=False):
shape = shape_info.shape
# For this test class, we are creating Hermitian spectrums.
# We also want the spectrum to have eigenvalues bounded away from zero.
#
# pre_spectrum is bounded away from zero.
pre_spectrum = linear_operator_test_util.random_uniform(
shape=self._shape_to_spectrum_shape(shape),
dtype=dtype,
minval=1.,
maxval=2.)
pre_spectrum = math_ops.cast(math_ops.abs(pre_spectrum), dtype=dtype)
pre_spectrum_c = _to_complex(pre_spectrum)
# Real{IFFT[pre_spectrum]}
# = IFFT[EvenPartOf[pre_spectrum]]
# is the IFFT of something that is also bounded away from zero.
# Therefore, FFT[pre_h] would be a well-conditioned spectrum.
pre_h = fft_ops.ifft(pre_spectrum_c)
# A spectrum is Hermitian iff it is the DFT of a real convolution kernel.
# So we will make spectrum = FFT[h], for real valued h.
h = math_ops.real(pre_h)
h_c = _to_complex(h)
spectrum = fft_ops.fft(h_c)
lin_op_spectrum = spectrum
if use_placeholder:
lin_op_spectrum = array_ops.placeholder_with_default(spectrum, shape=None)
operator = linalg.LinearOperatorCirculant(
lin_op_spectrum,
input_output_dtype=dtype,
is_positive_definite=True if ensure_self_adjoint_and_pd else None,
is_self_adjoint=True if ensure_self_adjoint_and_pd else None,
)
mat = self._spectrum_to_circulant_1d(spectrum, shape, dtype=dtype)
return operator, mat
@test_util.disable_xla("No registered Const")
def test_simple_hermitian_spectrum_gives_operator_with_zero_imag_part(self):
with self.cached_session():
spectrum = math_ops.cast([1. + 0j, 1j, -1j], dtypes.complex64)
operator = linalg.LinearOperatorCirculant(
spectrum, input_output_dtype=dtypes.complex64)
matrix = operator.to_dense()
imag_matrix = math_ops.imag(matrix)
eps = np.finfo(np.float32).eps
np.testing.assert_allclose(
0, self.evaluate(imag_matrix), rtol=0, atol=eps * 3)
def test_tape_safe(self):
spectrum = variables_module.Variable(
math_ops.cast([1. + 0j, 1. + 1j], dtypes.complex64))
operator = linalg.LinearOperatorCirculant(spectrum, is_self_adjoint=False)
self.check_tape_safe(operator)
class LinearOperatorCirculantTestNonHermitianSpectrum(
LinearOperatorCirculantBaseTest,
linear_operator_test_util.SquareLinearOperatorDerivedClassTest):
"""Test of LinearOperatorCirculant when the spectrum is not Hermitian.
Non-Hermitian spectrum <==> Complex valued operator.
We test only complex dtypes here.
"""
@staticmethod
def dtypes_to_test():
return [dtypes.complex64, dtypes.complex128]
# Skip Cholesky since we are explicitly testing non-hermitian
# spectra.
@staticmethod
def skip_these_tests():
return ["cholesky", "eigvalsh"]
def operator_and_matrix(self,
shape_info,
dtype,
use_placeholder,
ensure_self_adjoint_and_pd=False):
del ensure_self_adjoint_and_pd
shape = shape_info.shape
# Will be well conditioned enough to get accurate solves.
spectrum = linear_operator_test_util.random_sign_uniform(
shape=self._shape_to_spectrum_shape(shape),
dtype=dtype,
minval=1.,
maxval=2.)
lin_op_spectrum = spectrum
if use_placeholder:
lin_op_spectrum = array_ops.placeholder_with_default(spectrum, shape=None)
operator = linalg.LinearOperatorCirculant(
lin_op_spectrum, input_output_dtype=dtype)
self.assertEqual(
operator.parameters,
{
"input_output_dtype": dtype,
"is_non_singular": None,
"is_positive_definite": None,
"is_self_adjoint": None,
"is_square": True,
"name": "LinearOperatorCirculant",
"spectrum": lin_op_spectrum,
})
mat = self._spectrum_to_circulant_1d(spectrum, shape, dtype=dtype)
return operator, mat
@test_util.disable_xla("No registered Const")
def test_simple_hermitian_spectrum_gives_operator_with_zero_imag_part(self):
with self.cached_session():
spectrum = math_ops.cast([1. + 0j, 1j, -1j], dtypes.complex64)
operator = linalg.LinearOperatorCirculant(
spectrum, input_output_dtype=dtypes.complex64)
matrix = operator.to_dense()
imag_matrix = math_ops.imag(matrix)
eps = np.finfo(np.float32).eps
np.testing.assert_allclose(
0, self.evaluate(imag_matrix), rtol=0, atol=eps * 3)
def test_simple_positive_real_spectrum_gives_self_adjoint_pos_def_oper(self):
with self.cached_session() as sess:
spectrum = math_ops.cast([6., 4, 2], dtypes.complex64)
operator = linalg.LinearOperatorCirculant(
spectrum, input_output_dtype=dtypes.complex64)
matrix, matrix_h = sess.run(
[operator.to_dense(),
linalg.adjoint(operator.to_dense())])
self.assertAllClose(matrix, matrix_h)
self.evaluate(operator.assert_positive_definite()) # Should not fail
self.evaluate(operator.assert_self_adjoint()) # Should not fail
def test_defining_operator_using_real_convolution_kernel(self):
with self.cached_session():
convolution_kernel = [1., 2., 1.]
spectrum = fft_ops.fft(
math_ops.cast(convolution_kernel, dtypes.complex64))
# spectrum is shape [3] ==> operator is shape [3, 3]
# spectrum is Hermitian ==> operator is real.
operator = linalg.LinearOperatorCirculant(spectrum)
# Allow for complex output so we can make sure it has zero imag part.
self.assertEqual(operator.dtype, dtypes.complex64)
matrix = self.evaluate(operator.to_dense())
np.testing.assert_allclose(0, np.imag(matrix), atol=1e-6)
@test_util.run_v1_only("currently failing on v2")
def test_hermitian_spectrum_gives_operator_with_zero_imag_part(self):
with self.cached_session():
# Make spectrum the FFT of a real convolution kernel h. This ensures that
# spectrum is Hermitian.
h = linear_operator_test_util.random_normal(shape=(3, 4))
spectrum = fft_ops.fft(math_ops.cast(h, dtypes.complex64))
operator = linalg.LinearOperatorCirculant(
spectrum, input_output_dtype=dtypes.complex64)
matrix = operator.to_dense()
imag_matrix = math_ops.imag(matrix)
eps = np.finfo(np.float32).eps
np.testing.assert_allclose(
0, self.evaluate(imag_matrix), rtol=0, atol=eps * 3 * 4)
def test_convolution_kernel_same_as_first_row_of_to_dense(self):
spectrum = [[3., 2., 1.], [2., 1.5, 1.]]
with self.cached_session():
operator = linalg.LinearOperatorCirculant(spectrum)
h = operator.convolution_kernel()
c = operator.to_dense()
self.assertAllEqual((2, 3), h.shape)
self.assertAllEqual((2, 3, 3), c.shape)
self.assertAllClose(self.evaluate(h), self.evaluate(c)[:, :, 0])
def test_assert_non_singular_fails_for_singular_operator(self):
spectrum = math_ops.cast([0 + 0j, 4 + 0j, 2j + 2], dtypes.complex64)
operator = linalg.LinearOperatorCirculant(spectrum)
with self.cached_session():
with self.assertRaisesOpError("Singular operator"):
self.evaluate(operator.assert_non_singular())
def test_assert_non_singular_does_not_fail_for_non_singular_operator(self):
spectrum = math_ops.cast([-3j, 4 + 0j, 2j + 2], dtypes.complex64)
operator = linalg.LinearOperatorCirculant(spectrum)
with self.cached_session():
self.evaluate(operator.assert_non_singular()) # Should not fail
def test_assert_positive_definite_fails_for_non_positive_definite(self):
spectrum = math_ops.cast([6. + 0j, 4 + 0j, 2j], dtypes.complex64)
operator = linalg.LinearOperatorCirculant(spectrum)
with self.cached_session():
with self.assertRaisesOpError("Not positive definite"):
self.evaluate(operator.assert_positive_definite())
def test_assert_positive_definite_does_not_fail_when_pos_def(self):
spectrum = math_ops.cast([6. + 0j, 4 + 0j, 2j + 2], dtypes.complex64)
operator = linalg.LinearOperatorCirculant(spectrum)
with self.cached_session():
self.evaluate(operator.assert_positive_definite()) # Should not fail
def test_real_spectrum_and_not_self_adjoint_hint_raises(self):
spectrum = [1., 2.]
with self.assertRaisesRegex(ValueError, "real.*always.*self-adjoint"):
linalg.LinearOperatorCirculant(spectrum, is_self_adjoint=False)
def test_real_spectrum_auto_sets_is_self_adjoint_to_true(self):
spectrum = [1., 2.]
operator = linalg.LinearOperatorCirculant(spectrum)
self.assertTrue(operator.is_self_adjoint)
@test_util.run_all_in_graph_and_eager_modes
class LinearOperatorCirculant2DBaseTest(object):
"""Common class for 2D circulant tests."""
@contextlib.contextmanager
def _constrain_devices_and_set_default(self, sess, use_gpu, force_gpu):
"""We overwrite the FFT operation mapping for testing."""
with test.TestCase._constrain_devices_and_set_default(
self, sess, use_gpu, force_gpu) as sess:
yield sess
@staticmethod
def operator_shapes_infos():
shape_info = linear_operator_test_util.OperatorShapesInfo
# non-batch operators (n, n) and batch operators.
return [
shape_info((0, 0)),
shape_info((1, 1)),
shape_info((1, 6, 6)),
shape_info((3, 4, 4)),
shape_info((2, 1, 3, 3))
]
def _shape_to_spectrum_shape(self, shape):
"""Get a spectrum shape that will make an operator of desired shape."""
# This 2D block circulant operator takes a spectrum of shape
# batch_shape + [N0, N1],
# and creates and operator of shape
# batch_shape + [N0*N1, N0*N1]
if shape == (0, 0):
return (0, 0)
elif shape == (1, 1):
return (1, 1)
elif shape == (1, 6, 6):
return (1, 2, 3)
elif shape == (3, 4, 4):
return (3, 2, 2)
elif shape == (2, 1, 3, 3):
return (2, 1, 3, 1)
else:
raise ValueError("Unhandled shape: %s" % shape)
def _spectrum_to_circulant_2d(self, spectrum, shape, dtype):
"""Creates a block circulant matrix from a spectrum.
Intentionally done in an explicit yet inefficient way. This provides a
cross check to the main code that uses fancy reshapes.
Args:
spectrum: Float or complex `Tensor`.
shape: Python list. Desired shape of returned matrix.
dtype: Type to cast the returned matrix to.
Returns:
Block circulant (batch) matrix of desired `dtype`.
"""
spectrum = _to_complex(spectrum)
spectrum_shape = self._shape_to_spectrum_shape(shape)
domain_dimension = spectrum_shape[-1]
if not domain_dimension:
return array_ops.zeros(shape, dtype)
block_shape = spectrum_shape[-2:]
# Explicitly compute the action of spectrum on basis vectors.
matrix_rows = []
for n0 in range(block_shape[0]):
for n1 in range(block_shape[1]):
x = np.zeros(block_shape)
# x is a basis vector.
x[n0, n1] = 1.0
fft_x = fft_ops.fft2d(math_ops.cast(x, spectrum.dtype))
h_convolve_x = fft_ops.ifft2d(spectrum * fft_x)
# We want the flat version of the action of the operator on a basis
# vector, not the block version.
h_convolve_x = array_ops.reshape(h_convolve_x, shape[:-1])
matrix_rows.append(h_convolve_x)
matrix = array_ops.stack(matrix_rows, axis=-1)
return math_ops.cast(matrix, dtype)
class LinearOperatorCirculant2DTestHermitianSpectrum(
LinearOperatorCirculant2DBaseTest,
linear_operator_test_util.SquareLinearOperatorDerivedClassTest):
"""Test of LinearOperatorCirculant2D when the spectrum is Hermitian.
Hermitian spectrum <==> Real valued operator. We test both real and complex
dtypes here though. So in some cases the matrix will be complex but with
zero imaginary part.
"""
@staticmethod
def skip_these_tests():
return ["cond"]
def operator_and_matrix(self,
shape_info,
dtype,
use_placeholder,
ensure_self_adjoint_and_pd=False):
shape = shape_info.shape
# For this test class, we are creating Hermitian spectrums.
# We also want the spectrum to have eigenvalues bounded away from zero.
#
# pre_spectrum is bounded away from zero.
pre_spectrum = linear_operator_test_util.random_uniform(
shape=self._shape_to_spectrum_shape(shape),
dtype=dtype,
minval=1.,
maxval=2.)
pre_spectrum_c = _to_complex(pre_spectrum)
# Real{IFFT[pre_spectrum]}
# = IFFT[EvenPartOf[pre_spectrum]]
# is the IFFT of something that is also bounded away from zero.
# Therefore, FFT[pre_h] would be a well-conditioned spectrum.
pre_h = fft_ops.ifft2d(pre_spectrum_c)
# A spectrum is Hermitian iff it is the DFT of a real convolution kernel.
# So we will make spectrum = FFT[h], for real valued h.
h = math_ops.real(pre_h)
h_c = _to_complex(h)
spectrum = fft_ops.fft2d(h_c)
lin_op_spectrum = spectrum
if use_placeholder:
lin_op_spectrum = array_ops.placeholder_with_default(spectrum, shape=None)
operator = linalg.LinearOperatorCirculant2D(
lin_op_spectrum,
is_positive_definite=True if ensure_self_adjoint_and_pd else None,
is_self_adjoint=True if ensure_self_adjoint_and_pd else None,
input_output_dtype=dtype)
self.assertEqual(
operator.parameters,
{
"input_output_dtype": dtype,
"is_non_singular": None,
"is_positive_definite": (
True if ensure_self_adjoint_and_pd else None),
"is_self_adjoint": (
True if ensure_self_adjoint_and_pd else None),
"is_square": True,
"name": "LinearOperatorCirculant2D",
"spectrum": lin_op_spectrum,
})
mat = self._spectrum_to_circulant_2d(spectrum, shape, dtype=dtype)
return operator, mat
class LinearOperatorCirculant2DTestNonHermitianSpectrum(
LinearOperatorCirculant2DBaseTest,
linear_operator_test_util.SquareLinearOperatorDerivedClassTest):
"""Test of LinearOperatorCirculant when the spectrum is not Hermitian.
Non-Hermitian spectrum <==> Complex valued operator.
We test only complex dtypes here.
"""
@staticmethod
def dtypes_to_test():
return [dtypes.complex64, dtypes.complex128]
@staticmethod
def skip_these_tests():
return ["cholesky", "eigvalsh"]
def operator_and_matrix(self,
shape_info,
dtype,
use_placeholder,
ensure_self_adjoint_and_pd=False):
del ensure_self_adjoint_and_pd
shape = shape_info.shape
# Will be well conditioned enough to get accurate solves.
spectrum = linear_operator_test_util.random_sign_uniform(
shape=self._shape_to_spectrum_shape(shape),
dtype=dtype,
minval=1.,
maxval=2.)
lin_op_spectrum = spectrum
if use_placeholder:
lin_op_spectrum = array_ops.placeholder_with_default(spectrum, shape=None)
operator = linalg.LinearOperatorCirculant2D(
lin_op_spectrum, input_output_dtype=dtype)
self.assertEqual(
operator.parameters,
{
"input_output_dtype": dtype,
"is_non_singular": None,
"is_positive_definite": None,
"is_self_adjoint": None,
"is_square": True,
"name": "LinearOperatorCirculant2D",
"spectrum": lin_op_spectrum,
}
)
mat = self._spectrum_to_circulant_2d(spectrum, shape, dtype=dtype)
return operator, mat
def test_real_hermitian_spectrum_gives_real_symmetric_operator(self):
with self.cached_session(): # Necessary for fft_kernel_label_map
# This is a real and hermitian spectrum.
spectrum = [[1., 2., 2.], [3., 4., 4.], [3., 4., 4.]]
operator = linalg.LinearOperatorCirculant(spectrum)
matrix_tensor = operator.to_dense()
self.assertEqual(matrix_tensor.dtype, dtypes.complex64)
matrix_t = array_ops.matrix_transpose(matrix_tensor)
imag_matrix = math_ops.imag(matrix_tensor)
matrix, matrix_transpose, imag_matrix = self.evaluate(
[matrix_tensor, matrix_t, imag_matrix])
np.testing.assert_allclose(0, imag_matrix, atol=1e-6)
self.assertAllClose(matrix, matrix_transpose, atol=0)
def test_real_spectrum_gives_self_adjoint_operator(self):
with self.cached_session():
# This is a real and hermitian spectrum.
spectrum = linear_operator_test_util.random_normal(
shape=(3, 3), dtype=dtypes.float32)
operator = linalg.LinearOperatorCirculant2D(spectrum)
matrix_tensor = operator.to_dense()
self.assertEqual(matrix_tensor.dtype, dtypes.complex64)
matrix_h = linalg.adjoint(matrix_tensor)
matrix, matrix_h = self.evaluate([matrix_tensor, matrix_h])
self.assertAllClose(matrix, matrix_h, atol=1e-5)
def test_assert_non_singular_fails_for_singular_operator(self):
spectrum = math_ops.cast([[0 + 0j, 4 + 0j], [2j + 2, 3. + 0j]],
dtypes.complex64)
operator = linalg.LinearOperatorCirculant2D(spectrum)
with self.cached_session():
with self.assertRaisesOpError("Singular operator"):
self.evaluate(operator.assert_non_singular())
def test_assert_non_singular_does_not_fail_for_non_singular_operator(self):
spectrum = math_ops.cast([[-3j, 4 + 0j], [2j + 2, 3. + 0j]],
dtypes.complex64)
operator = linalg.LinearOperatorCirculant2D(spectrum)
with self.cached_session():
self.evaluate(operator.assert_non_singular()) # Should not fail
def test_assert_positive_definite_fails_for_non_positive_definite(self):
spectrum = math_ops.cast([[6. + 0j, 4 + 0j], [2j, 3. + 0j]],
dtypes.complex64)
operator = linalg.LinearOperatorCirculant2D(spectrum)
with self.cached_session():
with self.assertRaisesOpError("Not positive definite"):
self.evaluate(operator.assert_positive_definite())
def test_assert_positive_definite_does_not_fail_when_pos_def(self):
spectrum = math_ops.cast([[6. + 0j, 4 + 0j], [2j + 2, 3. + 0j]],
dtypes.complex64)
operator = linalg.LinearOperatorCirculant2D(spectrum)
with self.cached_session():
self.evaluate(operator.assert_positive_definite()) # Should not fail
def test_real_spectrum_and_not_self_adjoint_hint_raises(self):
spectrum = [[1., 2.], [3., 4]]
with self.assertRaisesRegex(ValueError, "real.*always.*self-adjoint"):
linalg.LinearOperatorCirculant2D(spectrum, is_self_adjoint=False)
def test_real_spectrum_auto_sets_is_self_adjoint_to_true(self):
spectrum = [[1., 2.], [3., 4]]
operator = linalg.LinearOperatorCirculant2D(spectrum)
self.assertTrue(operator.is_self_adjoint)
def test_invalid_rank_raises(self):
spectrum = array_ops.constant(np.float32(rng.rand(2)))
with self.assertRaisesRegex(ValueError, "must have at least 2 dimensions"):
linalg.LinearOperatorCirculant2D(spectrum)
def test_tape_safe(self):
spectrum = variables_module.Variable(
math_ops.cast([[1. + 0j, 1. + 0j], [1. + 1j, 2. + 2j]],
dtypes.complex64))
operator = linalg.LinearOperatorCirculant2D(spectrum)
self.check_tape_safe(operator)
@test_util.run_all_in_graph_and_eager_modes
class LinearOperatorCirculant3DTest(test.TestCase):
"""Simple test of the 3D case. See also the 1D and 2D tests."""
@contextlib.contextmanager
def _constrain_devices_and_set_default(self, sess, use_gpu, force_gpu):
"""We overwrite the FFT operation mapping for testing."""
with test.TestCase._constrain_devices_and_set_default(
self, sess, use_gpu, force_gpu) as sess:
yield sess
def test_real_spectrum_gives_self_adjoint_operator(self):
with self.cached_session():
# This is a real and hermitian spectrum.
spectrum = linear_operator_test_util.random_normal(
shape=(2, 2, 3, 5), dtype=dtypes.float32)
operator = linalg.LinearOperatorCirculant3D(spectrum)
self.assertAllEqual((2, 2 * 3 * 5, 2 * 3 * 5), operator.shape)
self.assertEqual(
operator.parameters,
{
"input_output_dtype": dtypes.complex64,
"is_non_singular": None,
"is_positive_definite": None,
"is_self_adjoint": None,
"is_square": True,
"name": "LinearOperatorCirculant3D",
"spectrum": spectrum,
})
matrix_tensor = operator.to_dense()
self.assertEqual(matrix_tensor.dtype, dtypes.complex64)
matrix_h = linalg.adjoint(matrix_tensor)
matrix, matrix_h = self.evaluate([matrix_tensor, matrix_h])
self.assertAllEqual((2, 2 * 3 * 5, 2 * 3 * 5), matrix.shape)
self.assertAllClose(matrix, matrix_h)
def test_defining_operator_using_real_convolution_kernel(self):
with self.cached_session():
convolution_kernel = linear_operator_test_util.random_normal(
shape=(2, 2, 3, 5), dtype=dtypes.float32)
# Convolution kernel is real ==> spectrum is Hermitian.
spectrum = fft_ops.fft3d(
math_ops.cast(convolution_kernel, dtypes.complex64))
# spectrum is Hermitian ==> operator is real.
operator = linalg.LinearOperatorCirculant3D(spectrum)
self.assertAllEqual((2, 2 * 3 * 5, 2 * 3 * 5), operator.shape)
# Allow for complex output so we can make sure it has zero imag part.
self.assertEqual(operator.dtype, dtypes.complex64)
matrix = self.evaluate(operator.to_dense())
self.assertAllEqual((2, 2 * 3 * 5, 2 * 3 * 5), matrix.shape)
np.testing.assert_allclose(0, np.imag(matrix), atol=1e-5)
def test_defining_spd_operator_by_taking_real_part(self):
with self.cached_session(): # Necessary for fft_kernel_label_map
# S is real and positive.
s = linear_operator_test_util.random_uniform(
shape=(10, 2, 3, 4), dtype=dtypes.float32, minval=1., maxval=2.)
# Let S = S1 + S2, the Hermitian and anti-hermitian parts.
# S1 = 0.5 * (S + S^H), S2 = 0.5 * (S - S^H),
# where ^H is the Hermitian transpose of the function:
# f(n0, n1, n2)^H := ComplexConjugate[f(N0-n0, N1-n1, N2-n2)].
# We want to isolate S1, since
# S1 is Hermitian by construction
# S1 is real since S is
# S1 is positive since it is the sum of two positive kernels
# IDFT[S] = IDFT[S1] + IDFT[S2]
# = H1 + H2
# where H1 is real since it is Hermitian,
# and H2 is imaginary since it is anti-Hermitian.
ifft_s = fft_ops.ifft3d(math_ops.cast(s, dtypes.complex64))
# Throw away H2, keep H1.
real_ifft_s = math_ops.real(ifft_s)
# This is the perfect spectrum!
# spectrum = DFT[H1]
# = S1,
fft_real_ifft_s = fft_ops.fft3d(
math_ops.cast(real_ifft_s, dtypes.complex64))
# S1 is Hermitian ==> operator is real.
# S1 is real ==> operator is self-adjoint.
# S1 is positive ==> operator is positive-definite.
operator = linalg.LinearOperatorCirculant3D(fft_real_ifft_s)
# Allow for complex output so we can check operator has zero imag part.
self.assertEqual(operator.dtype, dtypes.complex64)
matrix, matrix_t = self.evaluate([
operator.to_dense(),
array_ops.matrix_transpose(operator.to_dense())
])
self.evaluate(operator.assert_positive_definite()) # Should not fail.
np.testing.assert_allclose(0, np.imag(matrix), atol=1e-6)
self.assertAllClose(matrix, matrix_t)
# Just to test the theory, get S2 as well.
# This should create an imaginary operator.
# S2 is anti-Hermitian ==> operator is imaginary.
# S2 is real ==> operator is self-adjoint.
imag_ifft_s = math_ops.imag(ifft_s)
fft_imag_ifft_s = fft_ops.fft3d(
1j * math_ops.cast(imag_ifft_s, dtypes.complex64))
operator_imag = linalg.LinearOperatorCirculant3D(fft_imag_ifft_s)
matrix, matrix_h = self.evaluate([
operator_imag.to_dense(),
array_ops.matrix_transpose(math_ops.conj(operator_imag.to_dense()))
])
self.assertAllClose(matrix, matrix_h)
np.testing.assert_allclose(0, np.real(matrix), atol=1e-7)
if __name__ == "__main__":
linear_operator_test_util.add_tests(
LinearOperatorCirculantTestSelfAdjointOperator)
linear_operator_test_util.add_tests(
LinearOperatorCirculantTestHermitianSpectrum)
linear_operator_test_util.add_tests(
LinearOperatorCirculantTestNonHermitianSpectrum)
linear_operator_test_util.add_tests(
LinearOperatorCirculant2DTestHermitianSpectrum)
linear_operator_test_util.add_tests(
LinearOperatorCirculant2DTestNonHermitianSpectrum)
test.main()
| apache-2.0 |
zhuzhezhe/weibobash | env/lib/python3.4/site-packages/pip/compat/dictconfig.py | 921 | 23096 | # This is a copy of the Python logging.config.dictconfig module,
# reproduced with permission. It is provided here for backwards
# compatibility for Python versions prior to 2.7.
#
# Copyright 2009-2010 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from __future__ import absolute_import
import logging.handlers
import re
import sys
import types
from pip._vendor import six
# flake8: noqa
IDENTIFIER = re.compile('^[a-z_][a-z0-9_]*$', re.I)
def valid_ident(s):
m = IDENTIFIER.match(s)
if not m:
raise ValueError('Not a valid Python identifier: %r' % s)
return True
#
# This function is defined in logging only in recent versions of Python
#
try:
from logging import _checkLevel
except ImportError:
def _checkLevel(level):
if isinstance(level, int):
rv = level
elif str(level) == level:
if level not in logging._levelNames:
raise ValueError('Unknown level: %r' % level)
rv = logging._levelNames[level]
else:
raise TypeError('Level not an integer or a '
'valid string: %r' % level)
return rv
# The ConvertingXXX classes are wrappers around standard Python containers,
# and they serve to convert any suitable values in the container. The
# conversion converts base dicts, lists and tuples to their wrapped
# equivalents, whereas strings which match a conversion format are converted
# appropriately.
#
# Each wrapper should have a configurator attribute holding the actual
# configurator to use for conversion.
class ConvertingDict(dict):
"""A converting dictionary wrapper."""
def __getitem__(self, key):
value = dict.__getitem__(self, key)
result = self.configurator.convert(value)
# If the converted value is different, save for next time
if value is not result:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
def get(self, key, default=None):
value = dict.get(self, key, default)
result = self.configurator.convert(value)
# If the converted value is different, save for next time
if value is not result:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
def pop(self, key, default=None):
value = dict.pop(self, key, default)
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
class ConvertingList(list):
"""A converting list wrapper."""
def __getitem__(self, key):
value = list.__getitem__(self, key)
result = self.configurator.convert(value)
# If the converted value is different, save for next time
if value is not result:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
def pop(self, idx=-1):
value = list.pop(self, idx)
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
return result
class ConvertingTuple(tuple):
"""A converting tuple wrapper."""
def __getitem__(self, key):
value = tuple.__getitem__(self, key)
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
class BaseConfigurator(object):
"""
The configurator base class which defines some useful defaults.
"""
CONVERT_PATTERN = re.compile(r'^(?P<prefix>[a-z]+)://(?P<suffix>.*)$')
WORD_PATTERN = re.compile(r'^\s*(\w+)\s*')
DOT_PATTERN = re.compile(r'^\.\s*(\w+)\s*')
INDEX_PATTERN = re.compile(r'^\[\s*(\w+)\s*\]\s*')
DIGIT_PATTERN = re.compile(r'^\d+$')
value_converters = {
'ext' : 'ext_convert',
'cfg' : 'cfg_convert',
}
# We might want to use a different one, e.g. importlib
importer = __import__
def __init__(self, config):
self.config = ConvertingDict(config)
self.config.configurator = self
def resolve(self, s):
"""
Resolve strings to objects using standard import and attribute
syntax.
"""
name = s.split('.')
used = name.pop(0)
try:
found = self.importer(used)
for frag in name:
used += '.' + frag
try:
found = getattr(found, frag)
except AttributeError:
self.importer(used)
found = getattr(found, frag)
return found
except ImportError:
e, tb = sys.exc_info()[1:]
v = ValueError('Cannot resolve %r: %s' % (s, e))
v.__cause__, v.__traceback__ = e, tb
raise v
def ext_convert(self, value):
"""Default converter for the ext:// protocol."""
return self.resolve(value)
def cfg_convert(self, value):
"""Default converter for the cfg:// protocol."""
rest = value
m = self.WORD_PATTERN.match(rest)
if m is None:
raise ValueError("Unable to convert %r" % value)
else:
rest = rest[m.end():]
d = self.config[m.groups()[0]]
# print d, rest
while rest:
m = self.DOT_PATTERN.match(rest)
if m:
d = d[m.groups()[0]]
else:
m = self.INDEX_PATTERN.match(rest)
if m:
idx = m.groups()[0]
if not self.DIGIT_PATTERN.match(idx):
d = d[idx]
else:
try:
n = int(idx) # try as number first (most likely)
d = d[n]
except TypeError:
d = d[idx]
if m:
rest = rest[m.end():]
else:
raise ValueError('Unable to convert '
'%r at %r' % (value, rest))
# rest should be empty
return d
def convert(self, value):
"""
Convert values to an appropriate type. dicts, lists and tuples are
replaced by their converting alternatives. Strings are checked to
see if they have a conversion format and are converted if they do.
"""
if not isinstance(value, ConvertingDict) and isinstance(value, dict):
value = ConvertingDict(value)
value.configurator = self
elif not isinstance(value, ConvertingList) and isinstance(value, list):
value = ConvertingList(value)
value.configurator = self
elif not isinstance(value, ConvertingTuple) and\
isinstance(value, tuple):
value = ConvertingTuple(value)
value.configurator = self
elif isinstance(value, six.string_types): # str for py3k
m = self.CONVERT_PATTERN.match(value)
if m:
d = m.groupdict()
prefix = d['prefix']
converter = self.value_converters.get(prefix, None)
if converter:
suffix = d['suffix']
converter = getattr(self, converter)
value = converter(suffix)
return value
def configure_custom(self, config):
"""Configure an object with a user-supplied factory."""
c = config.pop('()')
if not hasattr(c, '__call__') and hasattr(types, 'ClassType') and type(c) != types.ClassType:
c = self.resolve(c)
props = config.pop('.', None)
# Check for valid identifiers
kwargs = dict((k, config[k]) for k in config if valid_ident(k))
result = c(**kwargs)
if props:
for name, value in props.items():
setattr(result, name, value)
return result
def as_tuple(self, value):
"""Utility function which converts lists to tuples."""
if isinstance(value, list):
value = tuple(value)
return value
class DictConfigurator(BaseConfigurator):
"""
Configure logging using a dictionary-like object to describe the
configuration.
"""
def configure(self):
"""Do the configuration."""
config = self.config
if 'version' not in config:
raise ValueError("dictionary doesn't specify a version")
if config['version'] != 1:
raise ValueError("Unsupported version: %s" % config['version'])
incremental = config.pop('incremental', False)
EMPTY_DICT = {}
logging._acquireLock()
try:
if incremental:
handlers = config.get('handlers', EMPTY_DICT)
# incremental handler config only if handler name
# ties in to logging._handlers (Python 2.7)
if sys.version_info[:2] == (2, 7):
for name in handlers:
if name not in logging._handlers:
raise ValueError('No handler found with '
'name %r' % name)
else:
try:
handler = logging._handlers[name]
handler_config = handlers[name]
level = handler_config.get('level', None)
if level:
handler.setLevel(_checkLevel(level))
except StandardError as e:
raise ValueError('Unable to configure handler '
'%r: %s' % (name, e))
loggers = config.get('loggers', EMPTY_DICT)
for name in loggers:
try:
self.configure_logger(name, loggers[name], True)
except StandardError as e:
raise ValueError('Unable to configure logger '
'%r: %s' % (name, e))
root = config.get('root', None)
if root:
try:
self.configure_root(root, True)
except StandardError as e:
raise ValueError('Unable to configure root '
'logger: %s' % e)
else:
disable_existing = config.pop('disable_existing_loggers', True)
logging._handlers.clear()
del logging._handlerList[:]
# Do formatters first - they don't refer to anything else
formatters = config.get('formatters', EMPTY_DICT)
for name in formatters:
try:
formatters[name] = self.configure_formatter(
formatters[name])
except StandardError as e:
raise ValueError('Unable to configure '
'formatter %r: %s' % (name, e))
# Next, do filters - they don't refer to anything else, either
filters = config.get('filters', EMPTY_DICT)
for name in filters:
try:
filters[name] = self.configure_filter(filters[name])
except StandardError as e:
raise ValueError('Unable to configure '
'filter %r: %s' % (name, e))
# Next, do handlers - they refer to formatters and filters
# As handlers can refer to other handlers, sort the keys
# to allow a deterministic order of configuration
handlers = config.get('handlers', EMPTY_DICT)
for name in sorted(handlers):
try:
handler = self.configure_handler(handlers[name])
handler.name = name
handlers[name] = handler
except StandardError as e:
raise ValueError('Unable to configure handler '
'%r: %s' % (name, e))
# Next, do loggers - they refer to handlers and filters
# we don't want to lose the existing loggers,
# since other threads may have pointers to them.
# existing is set to contain all existing loggers,
# and as we go through the new configuration we
# remove any which are configured. At the end,
# what's left in existing is the set of loggers
# which were in the previous configuration but
# which are not in the new configuration.
root = logging.root
existing = list(root.manager.loggerDict)
# The list needs to be sorted so that we can
# avoid disabling child loggers of explicitly
# named loggers. With a sorted list it is easier
# to find the child loggers.
existing.sort()
# We'll keep the list of existing loggers
# which are children of named loggers here...
child_loggers = []
# now set up the new ones...
loggers = config.get('loggers', EMPTY_DICT)
for name in loggers:
if name in existing:
i = existing.index(name)
prefixed = name + "."
pflen = len(prefixed)
num_existing = len(existing)
i = i + 1 # look at the entry after name
while (i < num_existing) and\
(existing[i][:pflen] == prefixed):
child_loggers.append(existing[i])
i = i + 1
existing.remove(name)
try:
self.configure_logger(name, loggers[name])
except StandardError as e:
raise ValueError('Unable to configure logger '
'%r: %s' % (name, e))
# Disable any old loggers. There's no point deleting
# them as other threads may continue to hold references
# and by disabling them, you stop them doing any logging.
# However, don't disable children of named loggers, as that's
# probably not what was intended by the user.
for log in existing:
logger = root.manager.loggerDict[log]
if log in child_loggers:
logger.level = logging.NOTSET
logger.handlers = []
logger.propagate = True
elif disable_existing:
logger.disabled = True
# And finally, do the root logger
root = config.get('root', None)
if root:
try:
self.configure_root(root)
except StandardError as e:
raise ValueError('Unable to configure root '
'logger: %s' % e)
finally:
logging._releaseLock()
def configure_formatter(self, config):
"""Configure a formatter from a dictionary."""
if '()' in config:
factory = config['()'] # for use in exception handler
try:
result = self.configure_custom(config)
except TypeError as te:
if "'format'" not in str(te):
raise
# Name of parameter changed from fmt to format.
# Retry with old name.
# This is so that code can be used with older Python versions
#(e.g. by Django)
config['fmt'] = config.pop('format')
config['()'] = factory
result = self.configure_custom(config)
else:
fmt = config.get('format', None)
dfmt = config.get('datefmt', None)
result = logging.Formatter(fmt, dfmt)
return result
def configure_filter(self, config):
"""Configure a filter from a dictionary."""
if '()' in config:
result = self.configure_custom(config)
else:
name = config.get('name', '')
result = logging.Filter(name)
return result
def add_filters(self, filterer, filters):
"""Add filters to a filterer from a list of names."""
for f in filters:
try:
filterer.addFilter(self.config['filters'][f])
except StandardError as e:
raise ValueError('Unable to add filter %r: %s' % (f, e))
def configure_handler(self, config):
"""Configure a handler from a dictionary."""
formatter = config.pop('formatter', None)
if formatter:
try:
formatter = self.config['formatters'][formatter]
except StandardError as e:
raise ValueError('Unable to set formatter '
'%r: %s' % (formatter, e))
level = config.pop('level', None)
filters = config.pop('filters', None)
if '()' in config:
c = config.pop('()')
if not hasattr(c, '__call__') and hasattr(types, 'ClassType') and type(c) != types.ClassType:
c = self.resolve(c)
factory = c
else:
klass = self.resolve(config.pop('class'))
# Special case for handler which refers to another handler
if issubclass(klass, logging.handlers.MemoryHandler) and\
'target' in config:
try:
config['target'] = self.config['handlers'][config['target']]
except StandardError as e:
raise ValueError('Unable to set target handler '
'%r: %s' % (config['target'], e))
elif issubclass(klass, logging.handlers.SMTPHandler) and\
'mailhost' in config:
config['mailhost'] = self.as_tuple(config['mailhost'])
elif issubclass(klass, logging.handlers.SysLogHandler) and\
'address' in config:
config['address'] = self.as_tuple(config['address'])
factory = klass
kwargs = dict((k, config[k]) for k in config if valid_ident(k))
try:
result = factory(**kwargs)
except TypeError as te:
if "'stream'" not in str(te):
raise
# The argument name changed from strm to stream
# Retry with old name.
# This is so that code can be used with older Python versions
#(e.g. by Django)
kwargs['strm'] = kwargs.pop('stream')
result = factory(**kwargs)
if formatter:
result.setFormatter(formatter)
if level is not None:
result.setLevel(_checkLevel(level))
if filters:
self.add_filters(result, filters)
return result
def add_handlers(self, logger, handlers):
"""Add handlers to a logger from a list of names."""
for h in handlers:
try:
logger.addHandler(self.config['handlers'][h])
except StandardError as e:
raise ValueError('Unable to add handler %r: %s' % (h, e))
def common_logger_config(self, logger, config, incremental=False):
"""
Perform configuration which is common to root and non-root loggers.
"""
level = config.get('level', None)
if level is not None:
logger.setLevel(_checkLevel(level))
if not incremental:
# Remove any existing handlers
for h in logger.handlers[:]:
logger.removeHandler(h)
handlers = config.get('handlers', None)
if handlers:
self.add_handlers(logger, handlers)
filters = config.get('filters', None)
if filters:
self.add_filters(logger, filters)
def configure_logger(self, name, config, incremental=False):
"""Configure a non-root logger from a dictionary."""
logger = logging.getLogger(name)
self.common_logger_config(logger, config, incremental)
propagate = config.get('propagate', None)
if propagate is not None:
logger.propagate = propagate
def configure_root(self, config, incremental=False):
"""Configure a root logger from a dictionary."""
root = logging.getLogger()
self.common_logger_config(root, config, incremental)
dictConfigClass = DictConfigurator
def dictConfig(config):
"""Configure logging using a dictionary."""
dictConfigClass(config).configure()
| mit |
plamut/superdesk | server/apps/saved_searches/saved_searches.py | 3 | 6183 | # -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
import json
import logging
from flask import request
from eve.utils import ParsedRequest
from eve_elastic.elastic import build_elastic_query
from apps.archive.common import get_user
from superdesk import Resource, get_resource_service
from superdesk.services import BaseService
from superdesk.errors import SuperdeskApiError
from superdesk.utils import ListCursor
logger = logging.getLogger(__name__)
class SavedSearchesResource(Resource):
endpoint_name = resource_title = 'saved_searches'
schema = {
'name': {
'type': 'string',
'required': True,
'minlength': 1
},
'description': {
'type': 'string'
},
'filter': {
'type': 'dict',
'required': True
},
'user': Resource.rel('users'),
'is_global': {
'type': 'boolean',
'default': False
}
}
url = 'users/<regex("[a-zA-Z0-9:\\-\\.]+"):user>/saved_searches'
item_methods = ['GET', 'PATCH', 'DELETE']
privileges = {'POST': 'saved_searches', 'PATCH': 'saved_searches', 'DELETE': 'saved_searches'}
class AllSavedSearchesResource(Resource):
endpoint_name = resource_title = 'all_saved_searches'
datasource = {'source': 'saved_searches'}
resource_methods = ['GET']
item_methods = []
schema = SavedSearchesResource.schema
class AllSavedSearchesService(BaseService):
def get(self, req, lookup):
items = list(super().get(req, lookup))
for item in items:
item['filter'] = json.loads(item.get('filter'))
return ListCursor(items)
class SavedSearchesService(BaseService):
def on_create(self, docs):
for doc in docs:
doc.setdefault('user', request.view_args.get('user'))
self.process(doc)
def process(self, doc):
"""
Validates, constructs and runs the query in the document
"""
repo, query = self.process_query(doc)
if repo.find(',') >= 0:
repo = repo.split(',').pop(0)
self.validate_and_run_elastic_query(query, repo)
doc['filter'] = json.dumps(doc.get('filter'))
def on_update(self, updates, original):
"""
Checks if the request owner and the saved search owner are the same person
If not then the request owner should have global saved search privilege
"""
request_user = request.view_args['user']
user = get_user(required=True)
if str(user['_id']) == request_user or user['active_privileges'].get('global_saved_search', 0) == 0:
if 'filter' in updates:
self.process(updates)
super().on_update(updates, original)
else:
raise SuperdeskApiError.forbiddenError("Unauthorized to modify global search")
def get(self, req, lookup):
"""
Overriding because of a different resource URL and user_id is part of the URL
"""
req = ParsedRequest()
req.where = json.dumps({'$or': [lookup, {'is_global': True}]})
items = list(super().get(req, lookup=None))
for item in items:
item['filter'] = json.loads(item.get('filter'))
return ListCursor(items)
def init_request(self, elastic_query):
"""
Initializes request object.
"""
parsed_request = ParsedRequest()
parsed_request.args = {"source": json.dumps(elastic_query)}
return parsed_request
def get_location(self, doc):
"""
Returns location from the doc object and deletes it so that it's not passed to elastic query
:param doc:
:return: location
"""
return doc['filter']['query'].get('repo', 'archive')
def process_query(self, doc):
"""
Processes the Saved Search document
"""
if not doc['filter'].get('query'):
raise SuperdeskApiError.badRequestError('Search cannot be saved without a filter!')
return self.get_location(doc), build_elastic_query(
{k: v for k, v in doc['filter']['query'].items() if k != 'repo'})
def validate_and_run_elastic_query(self, elastic_query, index):
"""
Validates the elastic_query against ElasticSearch.
:param elastic_query: JSON format inline with ElasticSearch syntax
:param index: Name of the ElasticSearch index
:raise SuperdeskError: If failed to validate the elastic_query against ElasticSearch
"""
parsed_request = self.init_request(elastic_query)
try:
return get_resource_service(index).get(req=parsed_request, lookup={})
except Exception as e:
logger.exception(e)
raise SuperdeskApiError.badRequestError('Fail to validate the filter against %s.' % index)
class SavedSearchItemsResource(Resource):
"""
Since Eve doesn't support more than one URL for a resource, this resource is being created to fetch items based on
the search string in the Saved Search document.
"""
endpoint_name = 'saved_search_items'
schema = SavedSearchesResource.schema
resource_title = endpoint_name
url = 'saved_searches/<regex("[a-zA-Z0-9:\\-\\.]+"):saved_search_id>/items'
resource_methods = ['GET']
item_methods = []
class SavedSearchItemsService(SavedSearchesService):
def get(self, req, **lookup):
saved_search_id = lookup['lookup']['saved_search_id']
saved_search = get_resource_service('saved_searches').find_one(req=None, _id=saved_search_id)
if not saved_search:
raise SuperdeskApiError.notFoundError("Invalid Saved Search")
saved_search['filter'] = json.loads(saved_search.get('filter'))
repo, query = super().process_query(saved_search)
return super().validate_and_run_elastic_query(query, repo)
| agpl-3.0 |
vianuevm/Webparser | ENV/Lib/site-packages/decorator.py | 112 | 10639 | ########################## LICENCE ###############################
# Copyright (c) 2005-2012, Michele Simionato
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# Redistributions in bytecode form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
"""
Decorator module, see http://pypi.python.org/pypi/decorator
for the documentation.
"""
__version__ = '3.4.0'
__all__ = ["decorator", "FunctionMaker", "contextmanager"]
import sys, re, inspect
if sys.version >= '3':
from inspect import getfullargspec
def get_init(cls):
return cls.__init__
else:
class getfullargspec(object):
"A quick and dirty replacement for getfullargspec for Python 2.X"
def __init__(self, f):
self.args, self.varargs, self.varkw, self.defaults = \
inspect.getargspec(f)
self.kwonlyargs = []
self.kwonlydefaults = None
def __iter__(self):
yield self.args
yield self.varargs
yield self.varkw
yield self.defaults
def get_init(cls):
return cls.__init__.im_func
DEF = re.compile('\s*def\s*([_\w][_\w\d]*)\s*\(')
# basic functionality
class FunctionMaker(object):
"""
An object with the ability to create functions with a given signature.
It has attributes name, doc, module, signature, defaults, dict and
methods update and make.
"""
def __init__(self, func=None, name=None, signature=None,
defaults=None, doc=None, module=None, funcdict=None):
self.shortsignature = signature
if func:
# func can be a class or a callable, but not an instance method
self.name = func.__name__
if self.name == '<lambda>': # small hack for lambda functions
self.name = '_lambda_'
self.doc = func.__doc__
self.module = func.__module__
if inspect.isfunction(func):
argspec = getfullargspec(func)
self.annotations = getattr(func, '__annotations__', {})
for a in ('args', 'varargs', 'varkw', 'defaults', 'kwonlyargs',
'kwonlydefaults'):
setattr(self, a, getattr(argspec, a))
for i, arg in enumerate(self.args):
setattr(self, 'arg%d' % i, arg)
if sys.version < '3': # easy way
self.shortsignature = self.signature = \
inspect.formatargspec(
formatvalue=lambda val: "", *argspec)[1:-1]
else: # Python 3 way
allargs = list(self.args)
allshortargs = list(self.args)
if self.varargs:
allargs.append('*' + self.varargs)
allshortargs.append('*' + self.varargs)
elif self.kwonlyargs:
allargs.append('*') # single star syntax
for a in self.kwonlyargs:
allargs.append('%s=None' % a)
allshortargs.append('%s=%s' % (a, a))
if self.varkw:
allargs.append('**' + self.varkw)
allshortargs.append('**' + self.varkw)
self.signature = ', '.join(allargs)
self.shortsignature = ', '.join(allshortargs)
self.dict = func.__dict__.copy()
# func=None happens when decorating a caller
if name:
self.name = name
if signature is not None:
self.signature = signature
if defaults:
self.defaults = defaults
if doc:
self.doc = doc
if module:
self.module = module
if funcdict:
self.dict = funcdict
# check existence required attributes
assert hasattr(self, 'name')
if not hasattr(self, 'signature'):
raise TypeError('You are decorating a non function: %s' % func)
def update(self, func, **kw):
"Update the signature of func with the data in self"
func.__name__ = self.name
func.__doc__ = getattr(self, 'doc', None)
func.__dict__ = getattr(self, 'dict', {})
func.func_defaults = getattr(self, 'defaults', ())
func.__kwdefaults__ = getattr(self, 'kwonlydefaults', None)
func.__annotations__ = getattr(self, 'annotations', None)
callermodule = sys._getframe(3).f_globals.get('__name__', '?')
func.__module__ = getattr(self, 'module', callermodule)
func.__dict__.update(kw)
def make(self, src_templ, evaldict=None, addsource=False, **attrs):
"Make a new function from a given template and update the signature"
src = src_templ % vars(self) # expand name and signature
evaldict = evaldict or {}
mo = DEF.match(src)
if mo is None:
raise SyntaxError('not a valid function template\n%s' % src)
name = mo.group(1) # extract the function name
names = set([name] + [arg.strip(' *') for arg in
self.shortsignature.split(',')])
for n in names:
if n in ('_func_', '_call_'):
raise NameError('%s is overridden in\n%s' % (n, src))
if not src.endswith('\n'): # add a newline just for safety
src += '\n' # this is needed in old versions of Python
try:
code = compile(src, '<string>', 'single')
# print >> sys.stderr, 'Compiling %s' % src
exec code in evaldict
except:
print >> sys.stderr, 'Error in generated code:'
print >> sys.stderr, src
raise
func = evaldict[name]
if addsource:
attrs['__source__'] = src
self.update(func, **attrs)
return func
@classmethod
def create(cls, obj, body, evaldict, defaults=None,
doc=None, module=None, addsource=True, **attrs):
"""
Create a function from the strings name, signature and body.
evaldict is the evaluation dictionary. If addsource is true an attribute
__source__ is added to the result. The attributes attrs are added,
if any.
"""
if isinstance(obj, str): # "name(signature)"
name, rest = obj.strip().split('(', 1)
signature = rest[:-1] #strip a right parens
func = None
else: # a function
name = None
signature = None
func = obj
self = cls(func, name, signature, defaults, doc, module)
ibody = '\n'.join(' ' + line for line in body.splitlines())
return self.make('def %(name)s(%(signature)s):\n' + ibody,
evaldict, addsource, **attrs)
def decorator(caller, func=None):
"""
decorator(caller) converts a caller function into a decorator;
decorator(caller, func) decorates a function using a caller.
"""
if func is not None: # returns a decorated function
evaldict = func.func_globals.copy()
evaldict['_call_'] = caller
evaldict['_func_'] = func
return FunctionMaker.create(
func, "return _call_(_func_, %(shortsignature)s)",
evaldict, undecorated=func, __wrapped__=func)
else: # returns a decorator
if inspect.isclass(caller):
name = caller.__name__.lower()
callerfunc = get_init(caller)
doc = 'decorator(%s) converts functions/generators into ' \
'factories of %s objects' % (caller.__name__, caller.__name__)
fun = getfullargspec(callerfunc).args[1] # second arg
elif inspect.isfunction(caller):
name = '_lambda_' if caller.__name__ == '<lambda>' \
else caller.__name__
callerfunc = caller
doc = caller.__doc__
fun = getfullargspec(callerfunc).args[0] # first arg
else: # assume caller is an object with a __call__ method
name = caller.__class__.__name__.lower()
callerfunc = caller.__call__.im_func
doc = caller.__call__.__doc__
fun = getfullargspec(callerfunc).args[1] # second arg
evaldict = callerfunc.func_globals.copy()
evaldict['_call_'] = caller
evaldict['decorator'] = decorator
return FunctionMaker.create(
'%s(%s)' % (name, fun),
'return decorator(_call_, %s)' % fun,
evaldict, undecorated=caller, __wrapped__=caller,
doc=doc, module=caller.__module__)
######################### contextmanager ########################
def __call__(self, func):
'Context manager decorator'
return FunctionMaker.create(
func, "with _self_: return _func_(%(shortsignature)s)",
dict(_self_=self, _func_=func), __wrapped__=func)
try: # Python >= 3.2
from contextlib import _GeneratorContextManager
ContextManager = type(
'ContextManager', (_GeneratorContextManager,), dict(__call__=__call__))
except ImportError: # Python >= 2.5
from contextlib import GeneratorContextManager
def __init__(self, f, *a, **k):
return GeneratorContextManager.__init__(self, f(*a, **k))
ContextManager = type(
'ContextManager', (GeneratorContextManager,),
dict(__call__=__call__, __init__=__init__))
contextmanager = decorator(ContextManager)
| gpl-2.0 |
plxaye/chromium | src/tools/sharding_supervisor/sharding_supervisor_unittest.py | 6 | 4964 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Verify basic usage of sharding_supervisor."""
import difflib
import os
import subprocess
import sys
import unittest
from xml.dom import minidom
import sharding_supervisor_old as sharding_supervisor
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
SHARDING_SUPERVISOR = os.path.join(ROOT_DIR, 'sharding_supervisor.py')
DUMMY_TEST = os.path.join(ROOT_DIR, 'dummy_test.py')
NUM_CORES = sharding_supervisor.DetectNumCores()
SHARDS_PER_CORE = sharding_supervisor.SS_DEFAULT_SHARDS_PER_CORE
def generate_expected_output(start, end, num_shards):
"""Generate the expected stdout and stderr for the dummy test."""
stdout = ''
stderr = ''
for i in range(start, end):
stdout += 'Running shard %d of %d%s' % (i, num_shards, os.linesep)
stdout += '%sALL SHARDS PASSED!%sALL TESTS PASSED!%s' % (os.linesep,
os.linesep,
os.linesep)
return (stdout, stderr)
class ShardingSupervisorUnittest(unittest.TestCase):
def test_basic_run(self):
# Default test.
expected_shards = NUM_CORES * SHARDS_PER_CORE
(expected_out, expected_err) = generate_expected_output(
0, expected_shards, expected_shards)
p = subprocess.Popen([sys.executable, SHARDING_SUPERVISOR, '--no-color',
DUMMY_TEST], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(out, err) = p.communicate()
self.assertEqual(expected_out, out)
self.assertEqual(expected_err, err)
self.assertEqual(0, p.returncode)
def test_shard_per_core(self):
"""Test the --shards_per_core parameter."""
expected_shards = NUM_CORES * 25
(expected_out, expected_err) = generate_expected_output(
0, expected_shards, expected_shards)
p = subprocess.Popen([sys.executable, SHARDING_SUPERVISOR, '--no-color',
'--shards_per_core', '25', DUMMY_TEST],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(out, err) = p.communicate()
self.assertEqual(expected_out, out)
self.assertEqual(expected_err, err)
self.assertEqual(0, p.returncode)
def test_slave_sharding(self):
"""Test the --total-slaves and --slave-index parameters."""
total_shards = 6
expected_shards = NUM_CORES * SHARDS_PER_CORE * total_shards
# Test every single index to make sure they run correctly.
for index in range(total_shards):
begin = NUM_CORES * SHARDS_PER_CORE * index
end = begin + NUM_CORES * SHARDS_PER_CORE
(expected_out, expected_err) = generate_expected_output(
begin, end, expected_shards)
p = subprocess.Popen([sys.executable, SHARDING_SUPERVISOR, '--no-color',
'--total-slaves', str(total_shards),
'--slave-index', str(index),
DUMMY_TEST],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(out, err) = p.communicate()
self.assertEqual(expected_out, out)
self.assertEqual(expected_err, err)
self.assertEqual(0, p.returncode)
def test_append_to_xml(self):
shard_xml_path = os.path.join(ROOT_DIR, 'data', 'gtest_results.xml')
expected_xml_path = os.path.join(
ROOT_DIR, 'data', 'gtest_results_expected.xml')
merged_xml = sharding_supervisor.AppendToXML(None, shard_xml_path, 0)
merged_xml = sharding_supervisor.AppendToXML(merged_xml, shard_xml_path, 1)
with open(expected_xml_path) as expected_xml_file:
expected_xml = minidom.parse(expected_xml_file)
# Serialize XML to a list of strings that is consistently formatted
# (ignoring whitespace between elements) so that it may be compared.
def _serialize_xml(xml):
def _remove_whitespace_and_comments(xml):
children_to_remove = []
for child in xml.childNodes:
if (child.nodeType == minidom.Node.TEXT_NODE and
not child.data.strip()):
children_to_remove.append(child)
elif child.nodeType == minidom.Node.COMMENT_NODE:
children_to_remove.append(child)
elif child.nodeType == minidom.Node.ELEMENT_NODE:
_remove_whitespace_and_comments(child)
for child in children_to_remove:
xml.removeChild(child)
_remove_whitespace_and_comments(xml)
return xml.toprettyxml(indent=' ').splitlines()
diff = list(difflib.unified_diff(
_serialize_xml(expected_xml),
_serialize_xml(merged_xml),
fromfile='gtest_results_expected.xml',
tofile='gtest_results_actual.xml'))
if diff:
self.fail('Did not merge results XML correctly:\n' + '\n'.join(diff))
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
nivwusquorum/tensorflow-deepq | tf_rl/utils/geometry.py | 4 | 1404 | """
This module assumes that all geometrical points are
represented as 1D numpy arrays.
It was designed and tested on 2D points,
but if you try it on 3D points you may
be pleasantly surprised ;-)
"""
import numpy as np
def point_distance(x, y):
"""Returns euclidean distance between points x and y"""
return np.linalg.norm(x-y)
def point_projected_on_line(line_s, line_e, point):
"""Project point on line that goes through line_s and line_e
assumes line_e is not equal or close to line_s
"""
line_along = line_e - line_s
transformed_point = point - line_s
point_dot_line = np.dot(transformed_point, line_along)
line_along_norm = np.dot(line_along, line_along)
transformed_projection = (point_dot_line / line_along_norm) * line_along
return transformed_projection + line_s
def point_segment_distance(segment_s, segment_e, point):
"""Returns distance from point to the closest point on segment
connecting points segment_s and segment_e"""
projected = point_projected_on_line(segment_s, segment_e, point)
if np.isclose(point_distance(segment_s, projected) + point_distance(projected, segment_e),
point_distance(segment_s, segment_e)):
# projected on segment
return point_distance(point, projected)
else:
return min(point_distance(point, segment_s), point_distance(point, segment_e))
| mit |
VCTLabs/openadams | _naf_feature.py | 1 | 6222 | # -*- coding: utf-8 -*-
# $Id$
# -------------------------------------------------------------------
# Copyright 2010 Achim Köhler
#
# This file is part of openADAMS.
#
# openADAMS is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published
# by the Free Software Foundation, either version 2 of the License,
# or (at your option) any later version.
#
# openADAMS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with openADAMS. If not, see <http://www.gnu.org/licenses/>.
# -------------------------------------------------------------------
import sys
from PyQt4 import QtGui, QtCore
from PyQt4.QtCore import Qt
import _naf_commons
import _naf_database as nafdb
import _naf_itemmodel
import _naf_tableview
import _naf_textviewer
class _cFeatureModel(_naf_itemmodel.cItemModel):
def __init__(self):
_naf_itemmodel.cItemModel.__init__(self, 'features')
# simple singleton pattern , see http://code.activestate.com/recipes/52558/
_featureModel = _cFeatureModel()
def cFeatureModel(): return _featureModel
class cFeatureDetailsView(_naf_commons.cArtifactDetailsView):
def __init__(self, parent, readOnly=True):
super(cFeatureDetailsView, self).__init__(parent)
self.mapper = QtGui.QDataWidgetMapper()
self.mapper.setItemDelegate(_naf_itemmodel.cItemDelegate(self))
self.mapper.setModel(cFeatureModel())
self.mapper.setSubmitPolicy(QtGui.QDataWidgetMapper.ManualSubmit)
columns = self.mapper.model().getColumns()
lbl = self.mapper.model().getLabel
layout = QtGui.QGridLayout()
self.setLayout(layout)
layout.addWidget(QtGui.QLabel(lbl("id")), 0, 0)
layout.addWidget(QtGui.QLabel(lbl("title")), 1, 0)
layout.addWidget(QtGui.QLabel(lbl("keywords")), 2, 0)
layout.addWidget(QtGui.QLabel(lbl("priority")), 3, 0)
layout.addWidget(QtGui.QLabel(lbl("risk")), 3, 2)
layout.addWidget(QtGui.QLabel(lbl("status")), 3, 4)
lblDescription = self.makeEditLinkLabel("description", readOnly)
lblDescription.linkActivated.connect(self.sendEditSignal)
layout.addWidget(lblDescription, 4, 0)
ledId = QtGui.QSpinBox(self, maximum=sys.maxint)
ledId.setReadOnly(True) # id is always read only
ledTitle = QtGui.QLineEdit(self, readOnly=readOnly)
cbxKeywords = QtGui.QComboBox(self, enabled=not readOnly, editable=True)
cbxKeywords.setModel(self.mapper.model().getHistoryModel('keywords_view'))
cbxPriority = QtGui.QComboBox(self, enabled=not readOnly)
cbxPriority.setModel(self.mapper.model().getLookupModel('priorityLUT'))
cbxStatus = QtGui.QComboBox(self, enabled=not readOnly)
cbxStatus.setModel(self.mapper.model().getLookupModel('statusLUT'))
cbxRisk = QtGui.QComboBox(self, enabled=not readOnly)
cbxRisk.setModel(self.mapper.model().getLookupModel('riskLUT'))
tedDescription = _naf_textviewer.cTextEditor(self, readOnly=readOnly)
tedDescription.setImageProvider(nafdb.getImageForId)
# addWidget(widget, fromRow, fromColumn, rowSpan, columnSpan, alignment)
layout.addWidget(ledId, 0, 1, 1, 5)
layout.addWidget(ledTitle, 1, 1, 1, 5)
layout.addWidget(cbxKeywords, 2, 1, 1, 5)
layout.addWidget(cbxPriority, 3, 1, 1, 1)
layout.addWidget(cbxRisk, 3, 3, 1, 1)
layout.addWidget(cbxStatus, 3, 5, 1, 1)
layout.addWidget(tedDescription, 4, 1, 1, 5)
layout.setColumnStretch(1, 1)
layout.setColumnStretch(3, 1)
layout.setColumnStretch(5, 1)
layout.setRowStretch(4, 1)
self.mapper.addMapping(ledId, columns.index('id'))
self.mapper.addMapping(ledTitle, columns.index('title'))
self.mapper.addMapping(cbxKeywords, columns.index('keywords'))
self.mapper.addMapping(cbxPriority, columns.index('priority'))
self.mapper.addMapping(cbxStatus, columns.index('status'))
self.mapper.addMapping(cbxRisk, columns.index('risk'))
self.mapper.addMapping(tedDescription, columns.index('description'))
class cFeatureView(QtGui.QTabWidget):
"""View/edit a requirement"""
TYPE_ID = nafdb.TYPE_FEATURE
def __init__(self, parent, isEditable=False):
QtGui.QTabWidget.__init__(self, parent)
self.defaultTitle = self.tr("New Feature")
self.editTitle = self.tr("Edit feature")
self.mapper = _naf_tableview.cNotifier()
self.detailsView = cFeatureDetailsView(self, readOnly=not isEditable)
self.addTab(self.detailsView, self.tr('Feature'))
self.mapper.addObserver(self.detailsView)
relationType = [_naf_tableview.NORMAL_RELATION, _naf_tableview.IGNORE_RELATION][isEditable]
self.requirementTableView = _naf_tableview.cItemTableView(
_naf_tableview.cItemTableModel('requirements', ('id', 'priority', 'status', 'complexity', 'assigned', 'effort', 'category', 'keywords' ,'title'),
relationType=relationType,
itemsCheckable=isEditable),
self)
self.addTab(self.requirementTableView, self.tr('Related Requirements'))
self.mapper.addObserver(self.requirementTableView)
self.usecaseTableView = _naf_tableview.cItemTableView(
_naf_tableview.cItemTableModel('usecases', ('id', 'priority', 'usefrequency', 'actors', 'stakeholders', 'keywords', 'title'),
relationType=relationType,
itemsCheckable=isEditable),
self)
self.addTab(self.usecaseTableView, self.tr('Related Usecases'))
self.mapper.addObserver(self.usecaseTableView)
def model(self):
return self.detailsView.mapper.model()
def submit(self):
self.mapper.submit()
| gpl-2.0 |
TheTypoMaster/chromium-crosswalk | tools/telemetry/telemetry/testing/system_stub.py | 5 | 15406 | # Copyright 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Provides stubs for os, sys and subprocess for testing
This test allows one to test code that itself uses os, sys, and subprocess.
"""
import os
import re
import shlex
import sys
class Override(object):
def __init__(self, base_module, module_list):
stubs = {'adb_commands': AdbCommandsModuleStub,
'cloud_storage': CloudStorageModuleStub,
'open': OpenFunctionStub,
'os': OsModuleStub,
'perf_control': PerfControlModuleStub,
'raw_input': RawInputFunctionStub,
'subprocess': SubprocessModuleStub,
'sys': SysModuleStub,
'thermal_throttle': ThermalThrottleModuleStub,
'logging': LoggingStub,
'certutils': CertUtilsStub,
'adb_install_cert': AdbInstallCertStub,
'platformsettings': PlatformSettingsStub,
}
self.adb_commands = None
self.os = None
self.subprocess = None
self.sys = None
self._base_module = base_module
self._overrides = {}
for module_name in module_list:
self._overrides[module_name] = getattr(base_module, module_name, None)
setattr(self, module_name, stubs[module_name]())
setattr(base_module, module_name, getattr(self, module_name))
if self.os and self.sys:
self.os.path.sys = self.sys
def __del__(self):
assert not len(self._overrides)
def Restore(self):
for module_name, original_module in self._overrides.iteritems():
if original_module is None:
# This will happen when we override built-in functions, like open.
# If we don't delete the attribute, we will shadow the built-in
# function with an attribute set to None.
delattr(self._base_module, module_name)
else:
setattr(self._base_module, module_name, original_module)
self._overrides = {}
class AdbDevice(object):
def __init__(self):
self.has_root = False
self.needs_su = False
self.shell_command_handlers = {}
self.mock_content = []
self.system_properties = {}
if self.system_properties.get('ro.product.cpu.abi') == None:
self.system_properties['ro.product.cpu.abi'] = 'armeabi-v7a'
def HasRoot(self):
return self.has_root
def NeedsSU(self):
return self.needs_su
def RunShellCommand(self, args, **_kwargs):
if isinstance(args, basestring):
args = shlex.split(args)
handler = self.shell_command_handlers[args[0]]
return handler(args)
def FileExists(self, _):
return False
def ReadFile(self, device_path, as_root=False): # pylint: disable=W0613
return self.mock_content
def GetProp(self, property_name):
return self.system_properties[property_name]
def SetProp(self, property_name, property_value):
self.system_properties[property_name] = property_value
class AdbCommandsModuleStub(object):
class AdbCommandsStub(object):
def __init__(self, module, device):
self._module = module
self._device = device
self.is_root_enabled = True
self._adb_device = module.adb_device
def IsRootEnabled(self):
return self.is_root_enabled
def RestartAdbdOnDevice(self):
pass
def IsUserBuild(self):
return False
def WaitForDevicePm(self):
pass
def device(self):
return self._adb_device
def device_serial(self):
return self._device
def __init__(self):
self.attached_devices = []
self.apk_package_name = None
self.adb_device = AdbDevice()
def AdbCommandsStubConstructor(device=None):
return AdbCommandsModuleStub.AdbCommandsStub(self, device)
self.AdbCommands = AdbCommandsStubConstructor
@staticmethod
def IsAndroidSupported():
return True
def GetPackageName(self, _):
return self.apk_package_name
def GetAttachedDevices(self):
return self.attached_devices
def CleanupLeftoverProcesses(self):
pass
class CloudStorageModuleStub(object):
PUBLIC_BUCKET = 'chromium-telemetry'
PARTNER_BUCKET = 'chrome-partner-telemetry'
INTERNAL_BUCKET = 'chrome-telemetry'
BUCKET_ALIASES = {
'public': PUBLIC_BUCKET,
'partner': PARTNER_BUCKET,
'internal': INTERNAL_BUCKET,
}
# These are used to test for CloudStorage errors.
INTERNAL_PERMISSION = 2
PARTNER_PERMISSION = 1
PUBLIC_PERMISSION = 0
# Not logged in.
CREDENTIALS_ERROR_PERMISSION = -1
class NotFoundError(Exception):
pass
class CloudStorageError(Exception):
pass
class PermissionError(CloudStorageError):
pass
class CredentialsError(CloudStorageError):
pass
def __init__(self):
self.default_remote_paths = {CloudStorageModuleStub.INTERNAL_BUCKET:{},
CloudStorageModuleStub.PARTNER_BUCKET:{},
CloudStorageModuleStub.PUBLIC_BUCKET:{}}
self.remote_paths = self.default_remote_paths
self.local_file_hashes = {}
self.local_hash_files = {}
self.permission_level = CloudStorageModuleStub.INTERNAL_PERMISSION
self.downloaded_files = []
def SetPermissionLevelForTesting(self, permission_level):
self.permission_level = permission_level
def CheckPermissionLevelForBucket(self, bucket):
if bucket == CloudStorageModuleStub.PUBLIC_BUCKET:
return
elif (self.permission_level ==
CloudStorageModuleStub.CREDENTIALS_ERROR_PERMISSION):
raise CloudStorageModuleStub.CredentialsError()
elif bucket == CloudStorageModuleStub.PARTNER_BUCKET:
if self.permission_level < CloudStorageModuleStub.PARTNER_PERMISSION:
raise CloudStorageModuleStub.PermissionError()
elif bucket == CloudStorageModuleStub.INTERNAL_BUCKET:
if self.permission_level < CloudStorageModuleStub.INTERNAL_PERMISSION:
raise CloudStorageModuleStub.PermissionError()
elif bucket not in self.remote_paths:
raise CloudStorageModuleStub.NotFoundError()
def SetRemotePathsForTesting(self, remote_path_dict=None):
if not remote_path_dict:
self.remote_paths = self.default_remote_paths
return
self.remote_paths = remote_path_dict
def GetRemotePathsForTesting(self):
if not self.remote_paths:
self.remote_paths = self.default_remote_paths
return self.remote_paths
# Set a dictionary of data files and their "calculated" hashes.
def SetCalculatedHashesForTesting(self, calculated_hash_dictionary):
self.local_file_hashes = calculated_hash_dictionary
def GetLocalDataFiles(self):
return self.local_file_hashes.keys()
# Set a dictionary of hash files and the hashes they should contain.
def SetHashFileContentsForTesting(self, hash_file_dictionary):
self.local_hash_files = hash_file_dictionary
def GetLocalHashFiles(self):
return self.local_hash_files.keys()
def ChangeRemoteHashForTesting(self, bucket, remote_path, new_hash):
self.remote_paths[bucket][remote_path] = new_hash
def List(self, bucket):
if not bucket or not bucket in self.remote_paths:
bucket_error = ('Incorrect bucket specified, correct buckets:' +
str(self.remote_paths))
raise CloudStorageModuleStub.CloudStorageError(bucket_error)
CloudStorageModuleStub.CheckPermissionLevelForBucket(self, bucket)
return list(self.remote_paths[bucket].keys())
def Exists(self, bucket, remote_path):
CloudStorageModuleStub.CheckPermissionLevelForBucket(self, bucket)
return remote_path in self.remote_paths[bucket]
def Insert(self, bucket, remote_path, local_path):
CloudStorageModuleStub.CheckPermissionLevelForBucket(self, bucket)
if not local_path in self.GetLocalDataFiles():
file_path_error = 'Local file path does not exist'
raise CloudStorageModuleStub.CloudStorageError(file_path_error)
self.remote_paths[bucket][remote_path] = (
CloudStorageModuleStub.CalculateHash(self, local_path))
return remote_path
def GetHelper(self, bucket, remote_path, local_path, only_if_changed):
CloudStorageModuleStub.CheckPermissionLevelForBucket(self, bucket)
if not remote_path in self.remote_paths[bucket]:
if only_if_changed:
return False
raise CloudStorageModuleStub.NotFoundError('Remote file does not exist.')
remote_hash = self.remote_paths[bucket][remote_path]
local_hash = self.local_file_hashes[local_path]
if only_if_changed and remote_hash == local_hash:
return False
self.downloaded_files.append(remote_path)
self.local_file_hashes[local_path] = remote_hash
self.local_hash_files[local_path + '.sha1'] = remote_hash
return remote_hash
def Get(self, bucket, remote_path, local_path):
return CloudStorageModuleStub.GetHelper(self, bucket, remote_path,
local_path, False)
def GetIfChanged(self, local_path, bucket=None):
remote_path = os.path.basename(local_path)
if bucket:
return CloudStorageModuleStub.GetHelper(self, bucket, remote_path,
local_path, True)
result = CloudStorageModuleStub.GetHelper(
self, self.PUBLIC_BUCKET, remote_path, local_path, True)
if not result:
result = CloudStorageModuleStub.GetHelper(
self, self.PARTNER_BUCKET, remote_path, local_path, True)
if not result:
result = CloudStorageModuleStub.GetHelper(
self, self.INTERNAL_BUCKET, remote_path, local_path, True)
return result
def GetFilesInDirectoryIfChanged(self, directory, bucket):
if os.path.dirname(directory) == directory: # If in the root dir.
raise ValueError('Trying to serve root directory from HTTP server.')
for dirpath, _, filenames in os.walk(directory):
for filename in filenames:
path, extension = os.path.splitext(
os.path.join(dirpath, filename))
if extension != '.sha1':
continue
self.GetIfChanged(path, bucket)
def CalculateHash(self, file_path):
return self.local_file_hashes[file_path]
def ReadHash(self, hash_path):
return self.local_hash_files[hash_path]
class LoggingStub(object):
def __init__(self):
self.warnings = []
self.errors = []
def info(self, msg, *args):
pass
def error(self, msg, *args):
self.errors.append(msg % args)
def warning(self, msg, *args):
self.warnings.append(msg % args)
def warn(self, msg, *args):
self.warning(msg, *args)
class OpenFunctionStub(object):
class FileStub(object):
def __init__(self, data):
self._data = data
def __enter__(self):
return self
def __exit__(self, *args):
pass
def read(self, size=None):
if size:
return self._data[:size]
else:
return self._data
def write(self, data):
self._data.write(data)
def close(self):
pass
def __init__(self):
self.files = {}
def __call__(self, name, *args, **kwargs):
return OpenFunctionStub.FileStub(self.files[name])
class OsModuleStub(object):
class OsEnvironModuleStub(object):
def get(self, _):
return None
class OsPathModuleStub(object):
def __init__(self, sys_module):
self.sys = sys_module
self.files = []
self.dirs = []
def exists(self, path):
return path in self.files
def isfile(self, path):
return path in self.files
def isdir(self, path):
return path in self.dirs
def join(self, *paths):
def IsAbsolutePath(path):
if self.sys.platform.startswith('win'):
return re.match('[a-zA-Z]:\\\\', path)
else:
return path.startswith('/')
# Per Python specification, if any component is an absolute path,
# discard previous components.
for index, path in reversed(list(enumerate(paths))):
if IsAbsolutePath(path):
paths = paths[index:]
break
if self.sys.platform.startswith('win'):
tmp = os.path.join(*paths)
return tmp.replace('/', '\\')
else:
tmp = os.path.join(*paths)
return tmp.replace('\\', '/')
@staticmethod
def abspath(path):
return os.path.abspath(path)
@staticmethod
def expanduser(path):
return os.path.expanduser(path)
@staticmethod
def dirname(path):
return os.path.dirname(path)
@staticmethod
def splitext(path):
return os.path.splitext(path)
@staticmethod
def splitdrive(path):
return os.path.splitdrive(path)
X_OK = os.X_OK
sep = os.sep
pathsep = os.pathsep
def __init__(self, sys_module=sys):
self.path = OsModuleStub.OsPathModuleStub(sys_module)
self.environ = OsModuleStub.OsEnvironModuleStub()
self.display = ':0'
self.local_app_data = None
self.sys_path = None
self.program_files = None
self.program_files_x86 = None
self.devnull = os.devnull
self._directory = {}
def access(self, path, _):
return path in self.path.files
def getenv(self, name, value=None):
if name == 'DISPLAY':
env = self.display
elif name == 'LOCALAPPDATA':
env = self.local_app_data
elif name == 'PATH':
env = self.sys_path
elif name == 'PROGRAMFILES':
env = self.program_files
elif name == 'PROGRAMFILES(X86)':
env = self.program_files_x86
else:
raise NotImplementedError('Unsupported getenv')
return env if env else value
def chdir(self, path):
pass
def walk(self, top):
for dir_name in self._directory:
yield top, dir_name, self._directory[dir_name]
class PerfControlModuleStub(object):
class PerfControlStub(object):
def __init__(self, adb):
pass
def __init__(self):
self.PerfControl = PerfControlModuleStub.PerfControlStub
class RawInputFunctionStub(object):
def __init__(self):
self.input = ''
def __call__(self, name, *args, **kwargs):
return self.input
class SubprocessModuleStub(object):
class PopenStub(object):
def __init__(self):
self.communicate_result = ('', '')
self.returncode_result = 0
def __call__(self, args, **kwargs):
return self
def communicate(self):
return self.communicate_result
@property
def returncode(self):
return self.returncode_result
def __init__(self):
self.Popen = SubprocessModuleStub.PopenStub()
self.PIPE = None
def call(self, *args, **kwargs):
pass
class SysModuleStub(object):
def __init__(self):
self.platform = ''
class ThermalThrottleModuleStub(object):
class ThermalThrottleStub(object):
def __init__(self, adb):
pass
def __init__(self):
self.ThermalThrottle = ThermalThrottleModuleStub.ThermalThrottleStub
class CertUtilsStub(object):
openssl_import_error = None
@staticmethod
def write_dummy_ca_cert(_ca_cert_str, _key_str, cert_path):
pass
@staticmethod
def generate_dummy_ca_cert():
return '-', '-'
class AdbInstallCertStub(object):
class AndroidCertInstaller(object):
def __init__(self, device_id, _cert_name, _cert_path):
if device_id == 'success':
pass
elif device_id == 'failure':
raise Exception('Test exception.')
def install_cert(self, overwrite_cert=False):
pass
class PlatformSettingsStub(object):
@staticmethod
def HasSniSupport():
return True
| bsd-3-clause |
nojhan/weboob-devel | modules/agendadulibre/browser.py | 6 | 2004 | # -*- coding: utf-8 -*-
# Copyright(C) 2014 Bezleputh
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from weboob.browser import PagesBrowser, URL
from .pages import EventListPage, EventPage
from datetime import timedelta, date
class AgendadulibreBrowser(PagesBrowser):
event_list_page = URL('events\?start_date=(?P<date_from>.*)(?P<region>.*)', EventListPage)
event_page = URL('events/(?P<_id>.*)', EventPage)
def __init__(self, website, region, *args, **kwargs):
self.BASEURL = u'%s/' % website
self.region = '®ion=%s' % region if region else ''
PagesBrowser.__init__(self, *args, **kwargs)
def list_events(self, date_from, date_to, city=None, categories=None, max_date=None):
_max_date = date_from + timedelta(days=365)
max_date = date(year=_max_date.year, month=_max_date.month, day=_max_date.day)
return self.event_list_page.go(date_from=date_from.strftime("%Y-%m-%d"),
region=self.region)\
.list_events(date_from=date_from,
date_to=date_to,
city=city,
categories=categories,
max_date=max_date)
def get_event(self, event_id, event=None):
_id = event_id.split('#')[-1]
return self.event_page.go(_id=_id).get_event(obj=event)
| agpl-3.0 |
kobejean/tensorflow | tensorflow/contrib/nn/python/ops/scaled_softplus_test.py | 66 | 3039 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for scaled_softplus.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.nn.python.ops.scaled_softplus import scaled_softplus
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import gradient_checker
from tensorflow.python.platform import test
class ScaledSoftplusTest(test.TestCase):
def test(self):
np.random.seed(1) # Make it reproducible.
x = np.random.randn(3, 4).astype(np.float32)
x64 = np.random.randn(3, 4).astype(np.float64)
alpha = np.random.rand() + 0.01
clip = np.float32(0.1)
y = np.minimum(alpha * np.log(1. + np.exp(x / alpha)), clip)
y64 = alpha * np.log(1. + np.exp(x64 / alpha))
with self.test_session(use_gpu=True) as sess:
z = scaled_softplus(constant_op.constant(x), alpha, clip)
z64 = scaled_softplus(constant_op.constant(x64), alpha)
z, z64 = sess.run([z, z64])
eps = 1e-6
self.assertAllClose(y, z, eps)
self.assertAllClose(y64, z64, eps)
def testGradient(self):
np.random.seed(1) # Make it reproducible.
x_shape = [5, 10]
x_np = np.random.randn(*x_shape).astype(np.float32)
alpha_np = np.float32(np.random.rand(1, x_shape[1]) + 0.01)
clip_np = np.float32(np.random.rand(x_shape[0], 1) * 5.)
with self.test_session(use_gpu=True):
x_tf = constant_op.constant(x_np)
alpha_tf = constant_op.constant(alpha_np)
clip_tf = constant_op.constant(clip_np)
y_tf = scaled_softplus(x_tf, alpha_tf)
z_tf = scaled_softplus(x_tf, alpha_tf, clip_tf * 0.1)
err = gradient_checker.compute_gradient_error([x_tf, alpha_tf],
[x_shape, alpha_np.shape],
y_tf, x_shape,
[x_np, alpha_np],
delta=0.002)
err_clip = gradient_checker.compute_gradient_error(
[x_tf, alpha_tf, clip_tf],
[x_shape, alpha_np.shape, clip_np.shape],
z_tf, x_shape,
[x_np, alpha_np, clip_np],
delta=0.002)
eps = 2e-4
self.assertLess(err, eps)
self.assertLess(err_clip, eps)
if __name__ == '__main__':
test.main()
| apache-2.0 |
agcooke/Sofie-HDF-Format | sofiehdfformat/gefilereader/GeFile.py | 1 | 3237 | import logging
"""
Read packets from the GCE File format. used on the sparkfun logger.
"""
import __builtin__
import os
import struct
from ant.core import exceptions as antException
import ant.core.constants as constants
import ant.core.message as message
from sofiehdfformat.core.SofieUtils import convertRawDataToHexString
TIMESTAMP_LEN=3;
def open(filename,mode='rb'):
return GeFile(filename,mode);
class GeFile(object):
def __init__(self, filename,mode):
self.reader = __builtin__.open(filename,mode)
self.fileSize = os.path.getsize(filename)
logging.debug('The File'+str(self.reader)+' Its Size: '+
str(self.fileSize))
self.totalIndex = 0
self.message = message.Message();
def readline(self,size=0):
"""
Reads one sample from the file.
Returns a dictionary with {'timestamp':unixTime,'packet':packet
"""
dataIndex = -1
while (dataIndex != 0) and (self.totalIndex < self.fileSize-1):
self.reader.seek(self.totalIndex)
self.rawdata = self.reader.read(20);
dataIndex = self.rawdata.find(chr(constants.MESSAGE_TX_SYNC));
#logging.debug('Total Index:'+str(self.totalIndex)+' Index:'+
# str(dataIndex))
if dataIndex > 0:
self.totalIndex += dataIndex
#Make sure you get the whole packet.
self.reader.seek(self.totalIndex)
self.rawdata = self.reader.read(20);
#logging.debug('SEEKED INDEX: '+str(dataIndex)+' Total Index:'
#+str(self.totalIndex))
elif dataIndex < 0:
self.totalIndex += len(self.rawdata)
try:
size = self.message.decode(self.rawdata);
except antException.MessageError as m:
self.totalIndex += 1;
if self.totalIndex >= self.fileSize:
return False;
else:
logging.debug('Going into Recursion:'+str(m))
self.totalIndex +=1
return self.readline();
packet = self.rawdata[0: size]
convertRawDataToHexString(packet)
timestamp = self.rawdata[size:size+3]
convertRawDataToHexString(timestamp)
try:
hour, min, second = struct.unpack('BBB', \
timestamp)
except struct.error:
logging.warning('ERROR READING FILE.')
self.totalIndex +=1
return self.readline();
unixTime = hour*60*60 \
+min*60 + \
second
logging.debug('DECODED MESSAGE TOTAL LENGTH:'+str(size)+\
' TOTALINDEX: '+str(self.totalIndex)+'UNIX TIME: '+str(unixTime))
self.totalIndex += size
#logging.debug('Total Index:'+str(self.totalIndex)+' Index:'+
# str(dataIndex))
return {'timestamp':unixTime,'packet':packet};
def close(self):
self.reader.close();
def __iter__(self):
self.totalIndex = 0
return self
def next(self):
data = self.readline()
if data == False or data == None:
raise StopIteration
return data
| bsd-3-clause |
apanda/modeling | mcnet/components/security_groups.py | 1 | 1358 | from . import Core
import z3
class SecurityGroups (Core):
def _init (self, name, security_groups, ctx, net):
"""Name is used to name this oracle in z3. Allows multiple mappings """
self.name = name
self.constraints = list ()
self.ctx = ctx
self.net = net
self.ctx.AddPolicy(self)
# Tenants in this case is a list.
self.sg_type, self.sg_list = \
z3.EnumSort('%s_secgroup'%self.name, security_groups)
self.sg_map = {}
for (sg, value) in zip(security_groups, self.sg_list):
self.sg_map[sg] = value
setattr(self, sg, value)
self.policy_func = z3.Function('%s'%(name), self.ctx.address, self.sg_type)
self.address_sg_map = []
def addAddressToGroup (self, sglist):
def normalize ((addr, group)):
if isinstance(group, str):
group = self.sg_map[group]
return (addr, group)
sglist = map(normalize, sglist)
self.address_sg_map.extend(sglist)
def _addConstraints (self, solver):
for (addr, group) in self.address_sg_map:
solver.add(self.policy_func(addr) == group)
def sgPredicate (self, group):
if isinstance(group, str):
group = self.sg_map[group]
return lambda a: self.policy_func(a) == group
| bsd-3-clause |
screwt/tablib | tablib/packages/odf/draw.py | 93 | 5598 | # -*- coding: utf-8 -*-
# Copyright (C) 2006-2007 Søren Roug, European Environment Agency
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Contributor(s):
#
from namespaces import DRAWNS, STYLENS, PRESENTATIONNS
from element import Element
def StyleRefElement(stylename=None, classnames=None, **args):
qattrs = {}
if stylename is not None:
f = stylename.getAttrNS(STYLENS, 'family')
if f == 'graphic':
qattrs[(DRAWNS,u'style-name')]= stylename
elif f == 'presentation':
qattrs[(PRESENTATIONNS,u'style-name')]= stylename
else:
raise ValueError, "Style's family must be either 'graphic' or 'presentation'"
if classnames is not None:
f = classnames[0].getAttrNS(STYLENS, 'family')
if f == 'graphic':
qattrs[(DRAWNS,u'class-names')]= classnames
elif f == 'presentation':
qattrs[(PRESENTATIONNS,u'class-names')]= classnames
else:
raise ValueError, "Style's family must be either 'graphic' or 'presentation'"
return Element(qattributes=qattrs, **args)
def DrawElement(name=None, **args):
e = Element(name=name, **args)
if not args.has_key('displayname'):
e.setAttrNS(DRAWNS,'display-name', name)
return e
# Autogenerated
def A(**args):
return Element(qname = (DRAWNS,'a'), **args)
def Applet(**args):
return Element(qname = (DRAWNS,'applet'), **args)
def AreaCircle(**args):
return Element(qname = (DRAWNS,'area-circle'), **args)
def AreaPolygon(**args):
return Element(qname = (DRAWNS,'area-polygon'), **args)
def AreaRectangle(**args):
return Element(qname = (DRAWNS,'area-rectangle'), **args)
def Caption(**args):
return StyleRefElement(qname = (DRAWNS,'caption'), **args)
def Circle(**args):
return StyleRefElement(qname = (DRAWNS,'circle'), **args)
def Connector(**args):
return StyleRefElement(qname = (DRAWNS,'connector'), **args)
def ContourPath(**args):
return Element(qname = (DRAWNS,'contour-path'), **args)
def ContourPolygon(**args):
return Element(qname = (DRAWNS,'contour-polygon'), **args)
def Control(**args):
return StyleRefElement(qname = (DRAWNS,'control'), **args)
def CustomShape(**args):
return StyleRefElement(qname = (DRAWNS,'custom-shape'), **args)
def Ellipse(**args):
return StyleRefElement(qname = (DRAWNS,'ellipse'), **args)
def EnhancedGeometry(**args):
return Element(qname = (DRAWNS,'enhanced-geometry'), **args)
def Equation(**args):
return Element(qname = (DRAWNS,'equation'), **args)
def FillImage(**args):
return DrawElement(qname = (DRAWNS,'fill-image'), **args)
def FloatingFrame(**args):
return Element(qname = (DRAWNS,'floating-frame'), **args)
def Frame(**args):
return StyleRefElement(qname = (DRAWNS,'frame'), **args)
def G(**args):
return StyleRefElement(qname = (DRAWNS,'g'), **args)
def GluePoint(**args):
return Element(qname = (DRAWNS,'glue-point'), **args)
def Gradient(**args):
return DrawElement(qname = (DRAWNS,'gradient'), **args)
def Handle(**args):
return Element(qname = (DRAWNS,'handle'), **args)
def Hatch(**args):
return DrawElement(qname = (DRAWNS,'hatch'), **args)
def Image(**args):
return Element(qname = (DRAWNS,'image'), **args)
def ImageMap(**args):
return Element(qname = (DRAWNS,'image-map'), **args)
def Layer(**args):
return Element(qname = (DRAWNS,'layer'), **args)
def LayerSet(**args):
return Element(qname = (DRAWNS,'layer-set'), **args)
def Line(**args):
return StyleRefElement(qname = (DRAWNS,'line'), **args)
def Marker(**args):
return DrawElement(qname = (DRAWNS,'marker'), **args)
def Measure(**args):
return StyleRefElement(qname = (DRAWNS,'measure'), **args)
def Object(**args):
return Element(qname = (DRAWNS,'object'), **args)
def ObjectOle(**args):
return Element(qname = (DRAWNS,'object-ole'), **args)
def Opacity(**args):
return DrawElement(qname = (DRAWNS,'opacity'), **args)
def Page(**args):
return Element(qname = (DRAWNS,'page'), **args)
def PageThumbnail(**args):
return StyleRefElement(qname = (DRAWNS,'page-thumbnail'), **args)
def Param(**args):
return Element(qname = (DRAWNS,'param'), **args)
def Path(**args):
return StyleRefElement(qname = (DRAWNS,'path'), **args)
def Plugin(**args):
return Element(qname = (DRAWNS,'plugin'), **args)
def Polygon(**args):
return StyleRefElement(qname = (DRAWNS,'polygon'), **args)
def Polyline(**args):
return StyleRefElement(qname = (DRAWNS,'polyline'), **args)
def Rect(**args):
return StyleRefElement(qname = (DRAWNS,'rect'), **args)
def RegularPolygon(**args):
return StyleRefElement(qname = (DRAWNS,'regular-polygon'), **args)
def StrokeDash(**args):
return DrawElement(qname = (DRAWNS,'stroke-dash'), **args)
def TextBox(**args):
return Element(qname = (DRAWNS,'text-box'), **args)
| mit |
supriyasingh01/github_basics | Internetworking Distributed Project/finalProject/ovs/pox-master/tests/unit/lib/ioworker/io_worker_test.py | 5 | 3839 | #!/usr/bin/env python
### auto generate sha1: 26c6550c27d0274b9338b2b85891aeaf01146ed8
import itertools
import os.path
import sys
import unittest
sys.path.append(os.path.join(os.path.dirname(__file__), *itertools.repeat("..", 3)))
from pox.lib.mock_socket import MockSocket
from pox.lib.ioworker.io_worker import IOWorker, RecocoIOLoop
from nose.tools import eq_
class IOWorkerTest(unittest.TestCase):
def test_basic_send(self):
i = IOWorker()
i.send("foo")
self.assertTrue(i._ready_to_send)
self.assertEqual(i.send_buf, "foo")
i._consume_send_buf(3)
self.assertFalse(i._ready_to_send)
def test_basic_receive(self):
i = IOWorker()
self.data = None
def d(worker):
self.data = worker.peek_receive_buf()
i.set_receive_handler(d)
i._push_receive_data("bar")
self.assertEqual(self.data, "bar")
# d does not consume the data
i._push_receive_data("hepp")
self.assertEqual(self.data, "barhepp")
def test_receive_consume(self):
i = IOWorker()
self.data = None
def consume(worker):
self.data = worker.peek_receive_buf()
worker.consume_receive_buf(len(self.data))
i.set_receive_handler(consume)
i._push_receive_data("bar")
self.assertEqual(self.data, "bar")
# data has been consumed
i._push_receive_data("hepp")
self.assertEqual(self.data, "hepp")
class RecocoIOLoopTest(unittest.TestCase):
def test_basic(self):
loop = RecocoIOLoop()
(left, right) = MockSocket.pair()
loop.create_worker_for_socket(left)
def test_stop(self):
loop = RecocoIOLoop()
loop.stop()
def test_run_read(self):
loop = RecocoIOLoop()
(left, right) = MockSocket.pair()
worker = loop.create_worker_for_socket(left)
# callback for ioworker to record receiving
self.received = None
def r(worker):
self.received = worker.peek_receive_buf()
worker.set_receive_handler(r)
# 'start' the run (dark generator magic here). Does not actually execute run, but 'yield' a generator
g = loop.run()
# g.next() will call it, and get as far as the 'yield select'
select = g.next()
# send data on other socket half
right.send("hallo")
# now we emulate the return value of the select ([rlist],[wlist], [elist])
g.send(([worker], [], []))
# that should result in the socket being red the data being handed
# to the ioworker, the callback being called. Everybody happy.
self.assertEquals(self.received, "hallo")
def test_run_close(self):
loop = RecocoIOLoop()
(left, right) = MockSocket.pair()
worker = loop.create_worker_for_socket(left)
self.assertFalse(worker in loop._workers, "Should not add to _workers yet, until we start up the loop")
self.assertTrue(loop._pending_commands.qsize() == 1, "Should have added pending create() command")
worker.close()
# This causes the worker to be scheduled to be closed -- it also
# calls pinger.ping(). However, the Select task won't receive the ping
# Until after this method has completed! Thus, we only test whether
# worker has been added to the pending close queue
self.assertTrue(loop._pending_commands.qsize() == 2, "Should have added pending close() command")
def test_run_write(self):
loop = RecocoIOLoop()
(left, right) = MockSocket.pair()
worker = loop.create_worker_for_socket(left)
worker.send("heppo")
# 'start' the run (dark generator magic here). Does not actually execute run, but 'yield' a generator
g = loop.run()
# g.next() will call it, and get as far as the 'yield select'
select = g.next()
# now we emulate the return value of the select ([rlist],[wlist], [elist])
g.send(([], [worker], []))
# that should result in the stuff being sent on the socket
self.assertEqual(right.recv(), "heppo")
| cc0-1.0 |
kenshay/ImageScripter | ProgramData/SystemFiles/Python/Lib/site-packages/pylint/test/functional/too_many_lines_disabled.py | 20 | 1147 | """Test that disabling too-many-lines on any line works."""
# pylint: disable=too-many-lines
__revision__ = 0
ZERFZAER = 3
HEHEHE = 2
| gpl-3.0 |
jtakayama/ics691-setupbooster | makahiki/apps/managers/player_mgr/migrations/0003_auto__del_dailystatus.py | 7 | 7098 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'DailyStatus'
db.delete_table('player_mgr_dailystatus')
def backwards(self, orm):
# Adding model 'DailyStatus'
db.create_table('player_mgr_dailystatus', (
('date', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('daily_visitors', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
))
db.send_create_signal('player_mgr', ['DailyStatus'])
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 6, 26, 14, 11, 31, 593195)'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 6, 26, 14, 11, 31, 593045)'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'player_mgr.profile': {
'Meta': {'object_name': 'Profile'},
'completion_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'contact_carrier': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'contact_email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'contact_text': ('django.contrib.localflavor.us.models.PhoneNumberField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'daily_visit_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'last_visit_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
'referrer_awarded': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'referring_user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'referred_profiles'", 'null': 'True', 'to': "orm['auth.User']"}),
'setup_complete': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'setup_profile': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['team_mgr.Team']", 'null': 'True', 'blank': 'True'}),
'theme': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'profile'", 'unique': 'True', 'to': "orm['auth.User']"})
},
'team_mgr.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True', 'db_index': 'True'})
},
'team_mgr.team': {
'Meta': {'object_name': 'Team'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['team_mgr.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True', 'db_index': 'True'})
}
}
complete_apps = ['player_mgr']
| mit |
axbaretto/beam | learning/katas/python/log_elements.py | 8 | 2144 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import apache_beam as beam
class LogElements(beam.PTransform):
class _LoggingFn(beam.DoFn):
def __init__(self, prefix='', with_timestamp=False, with_window=False):
super(LogElements._LoggingFn, self).__init__()
self.prefix = prefix
self.with_timestamp = with_timestamp
self.with_window = with_window
def process(self, element, timestamp=beam.DoFn.TimestampParam,
window=beam.DoFn.WindowParam, **kwargs):
log_line = self.prefix + str(element)
if self.with_timestamp:
log_line += ', timestamp=' + repr(timestamp.to_rfc3339())
if self.with_window:
log_line += ', window(start=' + window.start.to_rfc3339()
log_line += ', end=' + window.end.to_rfc3339() + ')'
print(log_line)
yield element
def __init__(self, label=None, prefix='',
with_timestamp=False, with_window=False):
super(LogElements, self).__init__(label)
self.prefix = prefix
self.with_timestamp = with_timestamp
self.with_window = with_window
def expand(self, input):
input | beam.ParDo(
self._LoggingFn(self.prefix, self.with_timestamp,
self.with_window))
| apache-2.0 |
marcore/edx-platform | common/lib/xmodule/xmodule/modulestore/split_mongo/caching_descriptor_system.py | 21 | 16650 | import sys
import logging
from contracts import contract, new_contract
from fs.osfs import OSFS
from lazy import lazy
from xblock.runtime import KvsFieldData, KeyValueStore
from xblock.fields import ScopeIds
from xblock.core import XBlock
from opaque_keys.edx.locator import BlockUsageLocator, LocalId, CourseLocator, LibraryLocator, DefinitionLocator
from xmodule.library_tools import LibraryToolsService
from xmodule.mako_module import MakoDescriptorSystem
from xmodule.error_module import ErrorDescriptor
from xmodule.errortracker import exc_info_to_str
from xmodule.modulestore import BlockData
from xmodule.modulestore.edit_info import EditInfoRuntimeMixin
from xmodule.modulestore.exceptions import ItemNotFoundError
from xmodule.modulestore.inheritance import inheriting_field_data, InheritanceMixin
from xmodule.modulestore.split_mongo import BlockKey, CourseEnvelope
from xmodule.modulestore.split_mongo.id_manager import SplitMongoIdManager
from xmodule.modulestore.split_mongo.definition_lazy_loader import DefinitionLazyLoader
from xmodule.modulestore.split_mongo.split_mongo_kvs import SplitMongoKVS
from xmodule.x_module import XModuleMixin
log = logging.getLogger(__name__)
new_contract('BlockUsageLocator', BlockUsageLocator)
new_contract('CourseLocator', CourseLocator)
new_contract('LibraryLocator', LibraryLocator)
new_contract('BlockKey', BlockKey)
new_contract('BlockData', BlockData)
new_contract('CourseEnvelope', CourseEnvelope)
new_contract('XBlock', XBlock)
class CachingDescriptorSystem(MakoDescriptorSystem, EditInfoRuntimeMixin):
"""
A system that has a cache of a course version's json that it will use to load modules
from, with a backup of calling to the underlying modulestore for more data.
Computes the settings (nee 'metadata') inheritance upon creation.
"""
@contract(course_entry=CourseEnvelope)
def __init__(self, modulestore, course_entry, default_class, module_data, lazy, **kwargs):
"""
Computes the settings inheritance and sets up the cache.
modulestore: the module store that can be used to retrieve additional
modules
course_entry: the originally fetched enveloped course_structure w/ branch and course id info.
Callers to _load_item provide an override but that function ignores the provided structure and
only looks at the branch and course id
module_data: a dict mapping Location -> json that was cached from the
underlying modulestore
"""
# needed by capa_problem (as runtime.filestore via this.resources_fs)
if course_entry.course_key.course:
root = modulestore.fs_root / course_entry.course_key.org / course_entry.course_key.course / course_entry.course_key.run
else:
root = modulestore.fs_root / str(course_entry.structure['_id'])
root.makedirs_p() # create directory if it doesn't exist
id_manager = SplitMongoIdManager(self)
kwargs.setdefault('id_reader', id_manager)
kwargs.setdefault('id_generator', id_manager)
super(CachingDescriptorSystem, self).__init__(
field_data=None,
load_item=self._load_item,
resources_fs=OSFS(root),
**kwargs
)
self.modulestore = modulestore
self.course_entry = course_entry
# set course_id attribute to avoid problems with subsystems that expect
# it here. (grading, for example)
self.course_id = course_entry.course_key
self.lazy = lazy
self.module_data = module_data
self.default_class = default_class
self.local_modules = {}
self._services['library_tools'] = LibraryToolsService(modulestore)
@lazy
@contract(returns="dict(BlockKey: BlockKey)")
def _parent_map(self):
parent_map = {}
for block_key, block in self.course_entry.structure['blocks'].iteritems():
for child in block.fields.get('children', []):
parent_map[child] = block_key
return parent_map
@contract(usage_key="BlockUsageLocator | BlockKey", course_entry_override="CourseEnvelope | None")
def _load_item(self, usage_key, course_entry_override=None, **kwargs):
"""
Instantiate the xblock fetching it either from the cache or from the structure
:param course_entry_override: the course_info with the course_key to use (defaults to cached)
"""
# usage_key is either a UsageKey or just the block_key. if a usage_key,
if isinstance(usage_key, BlockUsageLocator):
# trust the passed in key to know the caller's expectations of which fields are filled in.
# particularly useful for strip_keys so may go away when we're version aware
course_key = usage_key.course_key
if isinstance(usage_key.block_id, LocalId):
try:
return self.local_modules[usage_key]
except KeyError:
raise ItemNotFoundError
else:
block_key = BlockKey.from_usage_key(usage_key)
version_guid = self.course_entry.course_key.version_guid
else:
block_key = usage_key
course_info = course_entry_override or self.course_entry
course_key = course_info.course_key
version_guid = course_key.version_guid
# look in cache
cached_module = self.modulestore.get_cached_block(course_key, version_guid, block_key)
if cached_module:
return cached_module
block_data = self.get_module_data(block_key, course_key)
class_ = self.load_block_type(block_data.block_type)
block = self.xblock_from_json(class_, course_key, block_key, block_data, course_entry_override, **kwargs)
self.modulestore.cache_block(course_key, version_guid, block_key, block)
return block
@contract(block_key=BlockKey, course_key="CourseLocator | LibraryLocator")
def get_module_data(self, block_key, course_key):
"""
Get block from module_data adding it to module_data if it's not already there but is in the structure
Raises:
ItemNotFoundError if block is not in the structure
"""
json_data = self.module_data.get(block_key)
if json_data is None:
# deeper than initial descendant fetch or doesn't exist
self.modulestore.cache_items(self, [block_key], course_key, lazy=self.lazy)
json_data = self.module_data.get(block_key)
if json_data is None:
raise ItemNotFoundError(block_key)
return json_data
# xblock's runtime does not always pass enough contextual information to figure out
# which named container (course x branch) or which parent is requesting an item. Because split allows
# a many:1 mapping from named containers to structures and because item's identities encode
# context as well as unique identity, this function must sometimes infer whether the access is
# within an unspecified named container. In most cases, course_entry_override will give the
# explicit context; however, runtime.get_block(), e.g., does not. HOWEVER, there are simple heuristics
# which will work 99.999% of the time: a runtime is thread & even context specific. The likelihood that
# the thread is working with more than one named container pointing to the same specific structure is
# low; thus, the course_entry is most likely correct. If the thread is looking at > 1 named container
# pointing to the same structure, the access is likely to be chunky enough that the last known container
# is the intended one when not given a course_entry_override; thus, the caching of the last branch/course id.
@contract(block_key="BlockKey | None")
def xblock_from_json(self, class_, course_key, block_key, block_data, course_entry_override=None, **kwargs):
"""
Load and return block info.
"""
if course_entry_override is None:
course_entry_override = self.course_entry
else:
# most recent retrieval is most likely the right one for next caller (see comment above fn)
self.course_entry = CourseEnvelope(course_entry_override.course_key, self.course_entry.structure)
definition_id = block_data.definition
# If no usage id is provided, generate an in-memory id
if block_key is None:
block_key = BlockKey(block_data.block_type, LocalId())
convert_fields = lambda field: self.modulestore.convert_references_to_keys(
course_key, class_, field, self.course_entry.structure['blocks'],
)
if definition_id is not None and not block_data.definition_loaded:
definition_loader = DefinitionLazyLoader(
self.modulestore,
course_key,
block_key.type,
definition_id,
convert_fields,
)
else:
definition_loader = None
# If no definition id is provide, generate an in-memory id
if definition_id is None:
definition_id = LocalId()
# Construct the Block Usage Locator:
block_locator = course_key.make_usage_key(
block_type=block_key.type,
block_id=block_key.id,
)
converted_fields = convert_fields(block_data.fields)
converted_defaults = convert_fields(block_data.defaults)
if block_key in self._parent_map:
parent_key = self._parent_map[block_key]
parent = course_key.make_usage_key(parent_key.type, parent_key.id)
else:
parent = None
aside_fields = None
# for the situation if block_data has no asides attribute
# (in case it was taken from memcache)
try:
if block_data.asides:
aside_fields = {block_key.type: {}}
for aside in block_data.asides:
aside_fields[block_key.type].update(aside['fields'])
except AttributeError:
pass
try:
kvs = SplitMongoKVS(
definition_loader,
converted_fields,
converted_defaults,
parent=parent,
aside_fields=aside_fields,
field_decorator=kwargs.get('field_decorator')
)
if InheritanceMixin in self.modulestore.xblock_mixins:
field_data = inheriting_field_data(kvs)
else:
field_data = KvsFieldData(kvs)
module = self.construct_xblock_from_class(
class_,
ScopeIds(None, block_key.type, definition_id, block_locator),
field_data,
for_parent=kwargs.get('for_parent')
)
except Exception: # pylint: disable=broad-except
log.warning("Failed to load descriptor", exc_info=True)
return ErrorDescriptor.from_json(
block_data,
self,
course_entry_override.course_key.make_usage_key(
block_type='error',
block_id=block_key.id
),
error_msg=exc_info_to_str(sys.exc_info())
)
edit_info = block_data.edit_info
module._edited_by = edit_info.edited_by # pylint: disable=protected-access
module._edited_on = edit_info.edited_on # pylint: disable=protected-access
module.previous_version = edit_info.previous_version
module.update_version = edit_info.update_version
module.source_version = edit_info.source_version
module.definition_locator = DefinitionLocator(block_key.type, definition_id)
for wrapper in self.modulestore.xblock_field_data_wrappers:
module._field_data = wrapper(module, module._field_data) # pylint: disable=protected-access
# decache any pending field settings
module.save()
# If this is an in-memory block, store it in this system
if isinstance(block_locator.block_id, LocalId):
self.local_modules[block_locator] = module
return module
def get_edited_by(self, xblock):
"""
See :meth: cms.lib.xblock.runtime.EditInfoRuntimeMixin.get_edited_by
"""
return xblock._edited_by
def get_edited_on(self, xblock):
"""
See :class: cms.lib.xblock.runtime.EditInfoRuntimeMixin
"""
return xblock._edited_on
@contract(xblock='XBlock')
def get_subtree_edited_by(self, xblock):
"""
See :class: cms.lib.xblock.runtime.EditInfoRuntimeMixin
"""
# pylint: disable=protected-access
if not hasattr(xblock, '_subtree_edited_by'):
block_data = self.module_data[BlockKey.from_usage_key(xblock.location)]
if block_data.edit_info._subtree_edited_by is None:
self._compute_subtree_edited_internal(
block_data, xblock.location.course_key
)
xblock._subtree_edited_by = block_data.edit_info._subtree_edited_by
return xblock._subtree_edited_by
@contract(xblock='XBlock')
def get_subtree_edited_on(self, xblock):
"""
See :class: cms.lib.xblock.runtime.EditInfoRuntimeMixin
"""
# pylint: disable=protected-access
if not hasattr(xblock, '_subtree_edited_on'):
block_data = self.module_data[BlockKey.from_usage_key(xblock.location)]
if block_data.edit_info._subtree_edited_on is None:
self._compute_subtree_edited_internal(
block_data, xblock.location.course_key
)
xblock._subtree_edited_on = block_data.edit_info._subtree_edited_on
return xblock._subtree_edited_on
def get_published_by(self, xblock):
"""
See :class: cms.lib.xblock.runtime.EditInfoRuntimeMixin
"""
if not hasattr(xblock, '_published_by'):
self.modulestore.compute_published_info_internal(xblock)
return getattr(xblock, '_published_by', None)
def get_published_on(self, xblock):
"""
See :class: cms.lib.xblock.runtime.EditInfoRuntimeMixin
"""
if not hasattr(xblock, '_published_on'):
self.modulestore.compute_published_info_internal(xblock)
return getattr(xblock, '_published_on', None)
@contract(block_data='BlockData')
def _compute_subtree_edited_internal(self, block_data, course_key):
"""
Recurse the subtree finding the max edited_on date and its corresponding edited_by. Cache it.
"""
# pylint: disable=protected-access
max_date = block_data.edit_info.edited_on
max_date_by = block_data.edit_info.edited_by
for child in block_data.fields.get('children', []):
child_data = self.get_module_data(BlockKey(*child), course_key)
if block_data.edit_info._subtree_edited_on is None:
self._compute_subtree_edited_internal(child_data, course_key)
if child_data.edit_info._subtree_edited_on > max_date:
max_date = child_data.edit_info._subtree_edited_on
max_date_by = child_data.edit_info._subtree_edited_by
block_data.edit_info._subtree_edited_on = max_date
block_data.edit_info._subtree_edited_by = max_date_by
def get_aside_of_type(self, block, aside_type):
"""
See `runtime.Runtime.get_aside_of_type`
This override adds the field data from the block to the aside
"""
asides_cached = block.get_asides() if isinstance(block, XModuleMixin) else None
if asides_cached:
for aside in asides_cached:
if aside.scope_ids.block_type == aside_type:
return aside
new_aside = super(CachingDescriptorSystem, self).get_aside_of_type(block, aside_type)
new_aside._field_data = block._field_data # pylint: disable=protected-access
for key, _ in new_aside.fields.iteritems():
if isinstance(key, KeyValueStore.Key) and block._field_data.has(new_aside, key): # pylint: disable=protected-access
try:
value = block._field_data.get(new_aside, key) # pylint: disable=protected-access
except KeyError:
pass
else:
setattr(new_aside, key, value)
block.add_aside(new_aside)
return new_aside
| agpl-3.0 |
danielharbor/openerp | openerp/tools/which.py | 456 | 6884 | #!/usr/bin/env python
""" Which - locate a command
* adapted from Brian Curtin's http://bugs.python.org/file15381/shutil_which.patch
* see http://bugs.python.org/issue444582
* uses ``PATHEXT`` on Windows
* searches current directory before ``PATH`` on Windows,
but not before an explicitly passed path
* accepts both string or iterable for an explicitly passed path, or pathext
* accepts an explicitly passed empty path, or pathext (either '' or [])
* does not search ``PATH`` for files that have a path specified in their name already
* moved defpath and defpathext lists initialization to module level,
instead of initializing them on each function call
* changed interface: which_files() returns generator, which() returns first match,
or raises IOError(errno.ENOENT)
.. function:: which_files(file [, mode=os.F_OK | os.X_OK[, path=None[, pathext=None]]])
Return a generator which yields full paths in which the *file* name exists
in a directory that is part of the file name, or on *path*,
and has the given *mode*.
By default, *mode* matches an inclusive OR of os.F_OK and os.X_OK - an
existing executable file.
The *path* is, by default, the ``PATH`` variable on the platform,
or the string/iterable passed in as *path*.
In the event that a ``PATH`` variable is not found, :const:`os.defpath` is used.
On Windows, a current directory is searched before using the ``PATH`` variable,
but not before an explicitly passed *path*.
The *pathext* is only used on Windows to match files with given extensions appended as well.
It defaults to the ``PATHEXT`` variable, or the string/iterable passed in as *pathext*.
In the event that a ``PATHEXT`` variable is not found,
default value for Windows XP/Vista is used.
The command is always searched without extension first,
even when *pathext* is explicitly passed.
.. function:: which(file [, mode=os.F_OK | os.X_OK[, path=None[, pathext=None]]])
Return first match generated by which_files(file, mode, path, pathext),
or raise IOError(errno.ENOENT).
"""
__docformat__ = 'restructuredtext en'
__all__ = 'which which_files pathsep defpath defpathext F_OK R_OK W_OK X_OK'.split()
import sys
from os import access, defpath, pathsep, environ, F_OK, R_OK, W_OK, X_OK
from os.path import exists, dirname, split, join
windows = sys.platform.startswith('win')
defpath = environ.get('PATH', defpath).split(pathsep)
if windows:
defpath.insert(0, '.') # can insert without checking, when duplicates are removed
# given the quite usual mess in PATH on Windows, let's rather remove duplicates
seen = set()
defpath = [dir for dir in defpath if dir.lower() not in seen and not seen.add(dir.lower())]
del seen
defpathext = [''] + environ.get('PATHEXT',
'.COM;.EXE;.BAT;.CMD;.VBS;.VBE;.JS;.JSE;.WSF;.WSH;.MSC').lower().split(pathsep)
else:
defpathext = ['']
def which_files(file, mode=F_OK | X_OK, path=None, pathext=None):
""" Locate a file in a path supplied as a part of the file name,
or the user's path, or a supplied path.
The function yields full paths (not necessarily absolute paths),
in which the given file name matches an existing file in a directory on the path.
>>> def test_which(expected, *args, **argd):
... result = list(which_files(*args, **argd))
... assert result == expected, 'which_files: %s != %s' % (result, expected)
...
... try:
... result = [ which(*args, **argd) ]
... except IOError:
... result = []
... assert result[:1] == expected[:1], 'which: %s != %s' % (result[:1], expected[:1])
>>> if windows: cmd = environ['COMSPEC']
>>> if windows: test_which([cmd], 'cmd')
>>> if windows: test_which([cmd], 'cmd.exe')
>>> if windows: test_which([cmd], 'cmd', path=dirname(cmd))
>>> if windows: test_which([cmd], 'cmd', pathext='.exe')
>>> if windows: test_which([cmd], cmd)
>>> if windows: test_which([cmd], cmd, path='<nonexistent>')
>>> if windows: test_which([cmd], cmd, pathext='<nonexistent>')
>>> if windows: test_which([cmd], cmd[:-4])
>>> if windows: test_which([cmd], cmd[:-4], path='<nonexistent>')
>>> if windows: test_which([], 'cmd', path='<nonexistent>')
>>> if windows: test_which([], 'cmd', pathext='<nonexistent>')
>>> if windows: test_which([], '<nonexistent>/cmd')
>>> if windows: test_which([], cmd[:-4], pathext='<nonexistent>')
>>> if not windows: sh = '/bin/sh'
>>> if not windows: test_which([sh], 'sh')
>>> if not windows: test_which([sh], 'sh', path=dirname(sh))
>>> if not windows: test_which([sh], 'sh', pathext='<nonexistent>')
>>> if not windows: test_which([sh], sh)
>>> if not windows: test_which([sh], sh, path='<nonexistent>')
>>> if not windows: test_which([sh], sh, pathext='<nonexistent>')
>>> if not windows: test_which([], 'sh', mode=W_OK) # not running as root, are you?
>>> if not windows: test_which([], 'sh', path='<nonexistent>')
>>> if not windows: test_which([], '<nonexistent>/sh')
"""
filepath, file = split(file)
if filepath:
path = (filepath,)
elif path is None:
path = defpath
elif isinstance(path, str):
path = path.split(pathsep)
if pathext is None:
pathext = defpathext
elif isinstance(pathext, str):
pathext = pathext.split(pathsep)
if not '' in pathext:
pathext.insert(0, '') # always check command without extension, even for custom pathext
for dir in path:
basepath = join(dir, file)
for ext in pathext:
fullpath = basepath + ext
if exists(fullpath) and access(fullpath, mode):
yield fullpath
def which(file, mode=F_OK | X_OK, path=None, pathext=None):
""" Locate a file in a path supplied as a part of the file name,
or the user's path, or a supplied path.
The function returns full path (not necessarily absolute path),
in which the given file name matches an existing file in a directory on the path,
or raises IOError(errno.ENOENT).
>>> # for doctest see which_files()
"""
try:
return iter(which_files(file, mode, path, pathext)).next()
except StopIteration:
try:
from errno import ENOENT
except ImportError:
ENOENT = 2
raise IOError(ENOENT, '%s not found' % (mode & X_OK and 'command' or 'file'), file)
if __name__ == '__main__':
import doctest
doctest.testmod()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
msincenselee/vnpy | vnpy/chart/item.py | 1 | 8512 | from abc import abstractmethod
from typing import List, Dict, Tuple
import pyqtgraph as pg
from vnpy.trader.ui import QtCore, QtGui, QtWidgets
from vnpy.trader.object import BarData
from .base import UP_COLOR, DOWN_COLOR, PEN_WIDTH, BAR_WIDTH
from .manager import BarManager
class ChartItem(pg.GraphicsObject):
""""""
def __init__(self, manager: BarManager):
""""""
super().__init__()
self._manager: BarManager = manager
self._bar_picutures: Dict[int, QtGui.QPicture] = {}
self._item_picuture: QtGui.QPicture = None
self._up_pen: QtGui.QPen = pg.mkPen(
color=UP_COLOR, width=PEN_WIDTH
)
self._up_brush: QtGui.QBrush = pg.mkBrush(color=UP_COLOR)
self._down_pen: QtGui.QPen = pg.mkPen(
color=DOWN_COLOR, width=PEN_WIDTH
)
self._down_brush: QtGui.QBrush = pg.mkBrush(color=DOWN_COLOR)
self._rect_area: Tuple[float, float] = None
# Very important! Only redraw the visible part and improve speed a lot.
self.setFlag(self.ItemUsesExtendedStyleOption)
@abstractmethod
def _draw_bar_picture(self, ix: int, bar: BarData) -> QtGui.QPicture:
"""
Draw picture for specific bar.
"""
pass
@abstractmethod
def boundingRect(self) -> QtCore.QRectF:
"""
Get bounding rectangles for item.
"""
pass
@abstractmethod
def get_y_range(self, min_ix: int = None, max_ix: int = None) -> Tuple[float, float]:
"""
Get range of y-axis with given x-axis range.
If min_ix and max_ix not specified, then return range with whole data set.
"""
pass
@abstractmethod
def get_info_text(self, ix: int) -> str:
"""
Get information text to show by cursor.
"""
pass
def update_history(self, history: List[BarData]) -> BarData:
"""
Update a list of bar data.
"""
self._bar_picutures.clear()
bars = self._manager.get_all_bars()
for ix, bar in enumerate(bars):
bar_picture = self._draw_bar_picture(ix, bar)
self._bar_picutures[ix] = bar_picture
self.update()
def update_bar(self, bar: BarData) -> BarData:
"""
Update single bar data.
"""
ix = self._manager.get_index(bar.datetime)
bar_picture = self._draw_bar_picture(ix, bar)
self._bar_picutures[ix] = bar_picture
self.update()
def update(self) -> None:
"""
Refresh the item.
"""
if self.scene():
self.scene().update()
def paint(
self,
painter: QtGui.QPainter,
opt: QtWidgets.QStyleOptionGraphicsItem,
w: QtWidgets.QWidget
):
"""
Reimplement the paint method of parent class.
This function is called by external QGraphicsView.
"""
rect = opt.exposedRect
min_ix = int(rect.left())
max_ix = int(rect.right())
max_ix = min(max_ix, len(self._bar_picutures))
rect_area = (min_ix, max_ix)
if rect_area != self._rect_area or not self._item_picuture:
self._rect_area = rect_area
self._draw_item_picture(min_ix, max_ix)
self._item_picuture.play(painter)
def _draw_item_picture(self, min_ix: int, max_ix: int) -> None:
"""
Draw the picture of item in specific range.
"""
self._item_picuture = QtGui.QPicture()
painter = QtGui.QPainter(self._item_picuture)
for n in range(min_ix, max_ix):
bar_picture = self._bar_picutures[n]
bar_picture.play(painter)
painter.end()
def clear_all(self) -> None:
"""
Clear all data in the item.
"""
self._item_picuture = None
self._bar_picutures.clear()
self.update()
class CandleItem(ChartItem):
""""""
def __init__(self, manager: BarManager):
""""""
super().__init__(manager)
def _draw_bar_picture(self, ix: int, bar: BarData) -> QtGui.QPicture:
""""""
# Create objects
candle_picture = QtGui.QPicture()
painter = QtGui.QPainter(candle_picture)
# Set painter color
if bar.close_price >= bar.open_price:
painter.setPen(self._up_pen)
painter.setBrush(self._up_brush)
else:
painter.setPen(self._down_pen)
painter.setBrush(self._down_brush)
# Draw candle shadow
painter.drawLine(
QtCore.QPointF(ix, bar.high_price),
QtCore.QPointF(ix, bar.low_price)
)
# Draw candle body
if bar.open_price == bar.close_price:
painter.drawLine(
QtCore.QPointF(ix - BAR_WIDTH, bar.open_price),
QtCore.QPointF(ix + BAR_WIDTH, bar.open_price),
)
else:
rect = QtCore.QRectF(
ix - BAR_WIDTH,
bar.open_price,
BAR_WIDTH * 2,
bar.close_price - bar.open_price
)
painter.drawRect(rect)
# Finish
painter.end()
return candle_picture
def boundingRect(self) -> QtCore.QRectF:
""""""
min_price, max_price = self._manager.get_price_range()
rect = QtCore.QRectF(
0,
min_price,
len(self._bar_picutures),
max_price - min_price
)
return rect
def get_y_range(self, min_ix: int = None, max_ix: int = None) -> Tuple[float, float]:
"""
Get range of y-axis with given x-axis range.
If min_ix and max_ix not specified, then return range with whole data set.
"""
min_price, max_price = self._manager.get_price_range(min_ix, max_ix)
return min_price, max_price
def get_info_text(self, ix: int) -> str:
"""
Get information text to show by cursor.
"""
bar = self._manager.get_bar(ix)
if bar:
words = [
"Date",
bar.datetime.strftime("%Y-%m-%d"),
"",
"Time",
bar.datetime.strftime("%H:%M"),
"",
"Open",
str(bar.open_price),
"",
"High",
str(bar.high_price),
"",
"Low",
str(bar.low_price),
"",
"Close",
str(bar.close_price)
]
text = "\n".join(words)
else:
text = ""
return text
class VolumeItem(ChartItem):
""""""
def __init__(self, manager: BarManager):
""""""
super().__init__(manager)
def _draw_bar_picture(self, ix: int, bar: BarData) -> QtGui.QPicture:
""""""
# Create objects
volume_picture = QtGui.QPicture()
painter = QtGui.QPainter(volume_picture)
# Set painter color
if bar.close_price >= bar.open_price:
painter.setPen(self._up_pen)
painter.setBrush(self._up_brush)
else:
painter.setPen(self._down_pen)
painter.setBrush(self._down_brush)
# Draw volume body
rect = QtCore.QRectF(
ix - BAR_WIDTH,
0,
BAR_WIDTH * 2,
bar.volume
)
painter.drawRect(rect)
# Finish
painter.end()
return volume_picture
def boundingRect(self) -> QtCore.QRectF:
""""""
min_volume, max_volume = self._manager.get_volume_range()
rect = QtCore.QRectF(
0,
min_volume,
len(self._bar_picutures),
max_volume - min_volume
)
return rect
def get_y_range(self, min_ix: int = None, max_ix: int = None) -> Tuple[float, float]:
"""
Get range of y-axis with given x-axis range.
If min_ix and max_ix not specified, then return range with whole data set.
"""
min_volume, max_volume = self._manager.get_volume_range(min_ix, max_ix)
return min_volume, max_volume
def get_info_text(self, ix: int) -> str:
"""
Get information text to show by cursor.
"""
bar = self._manager.get_bar(ix)
if bar:
text = f"Volume {bar.volume}"
else:
text = ""
return text
| mit |
maritaria/Isomurphy | tests/graph_explorer.py | 2 | 1251 | import unittest
from graph.explorer import DfsExplorer, BfsExplorer
from graph.graphs import Graph, Vertex
class ExplorerTest(unittest.TestCase):
def test_dfs(self):
g = Graph()
v1 = g.addvertex()
v2 = g.addvertex()
v3 = g.addvertex()
v4 = g.addvertex()
v5 = g.addvertex()
e1 = g.addedge(v1, v2)
e2 = g.addedge(v1, v3)
e3 = g.addedge(v2, v4)
expl = DfsExplorer(g, "marker")
ge = expl.explore(v1)
def getMarker(v : Vertex) -> int:
return getattr(ge.findvertex(v.label()), "marker")
self.assertEqual(getMarker(v1), 0)
self.assertEqual(getMarker(v2), 1)
self.assertEqual(getMarker(v3), 3)
self.assertEqual(getMarker(v4), 2)
self.assertEqual(getMarker(v5), -1)
def test_bfs(self):
g = Graph()
v1 = g.addvertex()
v2 = g.addvertex()
v3 = g.addvertex()
v4 = g.addvertex()
v5 = g.addvertex()
e1 = g.addedge(v1, v2)
e2 = g.addedge(v1, v3)
e3 = g.addedge(v2, v4)
expl = BfsExplorer(g, "marker")
ge = expl.explore(v1)
def getMarker(v : Vertex) -> int:
return getattr(ge.findvertex(v.label()), "marker")
self.assertEqual(getMarker(v1), 0)
self.assertEqual(getMarker(v2), 1)
self.assertEqual(getMarker(v3), 2)
self.assertEqual(getMarker(v4), 3)
self.assertEqual(getMarker(v5), -1) | mit |
Eagles2F/werkzeug | werkzeug/__init__.py | 23 | 6922 | # -*- coding: utf-8 -*-
"""
werkzeug
~~~~~~~~
Werkzeug is the Swiss Army knife of Python web development.
It provides useful classes and functions for any WSGI application to make
the life of a python web developer much easier. All of the provided
classes are independent from each other so you can mix it with any other
library.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from types import ModuleType
import sys
from werkzeug._compat import iteritems
# the version. Usually set automatically by a script.
__version__ = '0.11.dev0'
# This import magic raises concerns quite often which is why the implementation
# and motivation is explained here in detail now.
#
# The majority of the functions and classes provided by Werkzeug work on the
# HTTP and WSGI layer. There is no useful grouping for those which is why
# they are all importable from "werkzeug" instead of the modules where they are
# implemented. The downside of that is, that now everything would be loaded at
# once, even if unused.
#
# The implementation of a lazy-loading module in this file replaces the
# werkzeug package when imported from within. Attribute access to the werkzeug
# module will then lazily import from the modules that implement the objects.
# import mapping to objects in other modules
all_by_module = {
'werkzeug.debug': ['DebuggedApplication'],
'werkzeug.local': ['Local', 'LocalManager', 'LocalProxy', 'LocalStack',
'release_local'],
'werkzeug.serving': ['run_simple'],
'werkzeug.test': ['Client', 'EnvironBuilder', 'create_environ',
'run_wsgi_app'],
'werkzeug.testapp': ['test_app'],
'werkzeug.exceptions': ['abort', 'Aborter'],
'werkzeug.urls': ['url_decode', 'url_encode', 'url_quote',
'url_quote_plus', 'url_unquote', 'url_unquote_plus',
'url_fix', 'Href', 'iri_to_uri', 'uri_to_iri'],
'werkzeug.formparser': ['parse_form_data'],
'werkzeug.utils': ['escape', 'environ_property', 'append_slash_redirect',
'redirect', 'cached_property', 'import_string',
'dump_cookie', 'parse_cookie', 'unescape',
'format_string', 'find_modules', 'header_property',
'html', 'xhtml', 'HTMLBuilder', 'validate_arguments',
'ArgumentValidationError', 'bind_arguments',
'secure_filename'],
'werkzeug.wsgi': ['get_current_url', 'get_host', 'pop_path_info',
'peek_path_info', 'SharedDataMiddleware',
'DispatcherMiddleware', 'ClosingIterator', 'FileWrapper',
'make_line_iter', 'LimitedStream', 'responder',
'wrap_file', 'extract_path_info'],
'werkzeug.datastructures': ['MultiDict', 'CombinedMultiDict', 'Headers',
'EnvironHeaders', 'ImmutableList',
'ImmutableDict', 'ImmutableMultiDict',
'TypeConversionDict',
'ImmutableTypeConversionDict', 'Accept',
'MIMEAccept', 'CharsetAccept',
'LanguageAccept', 'RequestCacheControl',
'ResponseCacheControl', 'ETags', 'HeaderSet',
'WWWAuthenticate', 'Authorization',
'FileMultiDict', 'CallbackDict', 'FileStorage',
'OrderedMultiDict', 'ImmutableOrderedMultiDict'
],
'werkzeug.useragents': ['UserAgent'],
'werkzeug.http': ['parse_etags', 'parse_date', 'http_date', 'cookie_date',
'parse_cache_control_header', 'is_resource_modified',
'parse_accept_header', 'parse_set_header', 'quote_etag',
'unquote_etag', 'generate_etag', 'dump_header',
'parse_list_header', 'parse_dict_header',
'parse_authorization_header',
'parse_www_authenticate_header', 'remove_entity_headers',
'is_entity_header', 'remove_hop_by_hop_headers',
'parse_options_header', 'dump_options_header',
'is_hop_by_hop_header', 'unquote_header_value',
'quote_header_value', 'HTTP_STATUS_CODES'],
'werkzeug.wrappers': ['BaseResponse', 'BaseRequest', 'Request', 'Response',
'AcceptMixin', 'ETagRequestMixin',
'ETagResponseMixin', 'ResponseStreamMixin',
'CommonResponseDescriptorsMixin', 'UserAgentMixin',
'AuthorizationMixin', 'WWWAuthenticateMixin',
'CommonRequestDescriptorsMixin'],
'werkzeug.security': ['generate_password_hash', 'check_password_hash'],
# the undocumented easteregg ;-)
'werkzeug._internal': ['_easteregg']
}
# modules that should be imported when accessed as attributes of werkzeug
attribute_modules = frozenset(['exceptions', 'routing', 'script'])
object_origins = {}
for module, items in iteritems(all_by_module):
for item in items:
object_origins[item] = module
class module(ModuleType):
"""Automatically import objects from the modules."""
def __getattr__(self, name):
if name in object_origins:
module = __import__(object_origins[name], None, None, [name])
for extra_name in all_by_module[module.__name__]:
setattr(self, extra_name, getattr(module, extra_name))
return getattr(module, name)
elif name in attribute_modules:
__import__('werkzeug.' + name)
return ModuleType.__getattribute__(self, name)
def __dir__(self):
"""Just show what we want to show."""
result = list(new_module.__all__)
result.extend(('__file__', '__path__', '__doc__', '__all__',
'__docformat__', '__name__', '__path__',
'__package__', '__version__'))
return result
# keep a reference to this module so that it's not garbage collected
old_module = sys.modules['werkzeug']
# setup the new module and patch it into the dict of loaded modules
new_module = sys.modules['werkzeug'] = module('werkzeug')
new_module.__dict__.update({
'__file__': __file__,
'__package__': 'werkzeug',
'__path__': __path__,
'__doc__': __doc__,
'__version__': __version__,
'__all__': tuple(object_origins) + tuple(attribute_modules),
'__docformat__': 'restructuredtext en'
})
# Due to bootstrapping issues we need to import exceptions here.
# Don't ask :-(
__import__('werkzeug.exceptions')
| bsd-3-clause |
Codefans-fan/odoo | addons/base_vat/__openerp__.py | 52 | 2942 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'VAT Number Validation',
'version': '1.0',
'category': 'Hidden/Dependency',
'description': """
VAT validation for Partner's VAT numbers.
=========================================
After installing this module, values entered in the VAT field of Partners will
be validated for all supported countries. The country is inferred from the
2-letter country code that prefixes the VAT number, e.g. ``BE0477472701``
will be validated using the Belgian rules.
There are two different levels of VAT number validation:
--------------------------------------------------------
* By default, a simple off-line check is performed using the known validation
rules for the country, usually a simple check digit. This is quick and
always available, but allows numbers that are perhaps not truly allocated,
or not valid anymore.
* When the "VAT VIES Check" option is enabled (in the configuration of the user's
Company), VAT numbers will be instead submitted to the online EU VIES
database, which will truly verify that the number is valid and currently
allocated to a EU company. This is a little bit slower than the simple
off-line check, requires an Internet connection, and may not be available
all the time. If the service is not available or does not support the
requested country (e.g. for non-EU countries), a simple check will be performed
instead.
Supported countries currently include EU countries, and a few non-EU countries
such as Chile, Colombia, Mexico, Norway or Russia. For unsupported countries,
only the country code will be validated.
""",
'author': 'OpenERP SA',
'depends': ['account'],
'website': 'https://www.odoo.com/page/accounting',
'data': ['base_vat_view.xml'],
'installable': True,
'auto_install': False,
'images': ['images/1_partner_vat.jpeg'],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
sahiljain/catapult | tracing/tracing_build/trace2html_unittest.py | 3 | 3175 | # Copyright (c) 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import codecs
import gzip
import os
import shutil
import tempfile
import unittest
from tracing_build import trace2html
class Trace2HTMLTests(unittest.TestCase):
SIMPLE_TRACE_PATH = os.path.join(
os.path.dirname(__file__),
'..', 'test_data', 'simple_trace.json')
BIG_TRACE_PATH = os.path.join(
os.path.dirname(__file__),
'..', 'test_data', 'big_trace.json')
NON_JSON_TRACE_PATH = os.path.join(
os.path.dirname(__file__),
'..', 'test_data', 'android_systrace.txt')
def setUp(self):
self._tmpdir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self._tmpdir, ignore_errors=True)
def testGzippedTraceIsntDoubleGzipped(self):
# |input_filename| will contain plain JSON data at one point, then gzipped
# JSON data at another point for reasons that will be explained below.
input_filename = os.path.join(self._tmpdir, 'GzippedTraceIsntDoubleGzipped')
output_filename = os.path.join(
self._tmpdir, 'GzippedTraceIsntDoubleGzipped.html')
# trace2html-ify SIMPLE_TRACE, but from a controlled input filename so
# that when ViewerDataScript gzips it, it uses the same filename for both
# the unzipped SIMPLE_TRACE here and the gzipped SIMPLE_TRACE below.
file(input_filename, 'w').write(file(self.SIMPLE_TRACE_PATH).read())
with codecs.open(output_filename, 'w', encoding='utf-8') as output_file:
trace2html.WriteHTMLForTracesToFile([input_filename], output_file)
# Hash the contents of the output file that was generated from an unzipped
# json input file.
unzipped_hash = hash(file(output_filename).read())
os.unlink(output_filename)
# Gzip SIMPLE_TRACE into |input_filename|.
# trace2html should automatically gunzip it and start building the html from
# the same input as if the input weren't gzipped.
with gzip.GzipFile(input_filename, mode='w') as input_gzipfile:
input_gzipfile.write(file(self.SIMPLE_TRACE_PATH).read())
# trace2html-ify the zipped version of SIMPLE_TRACE from the same input
# filename as the unzipped version so that the gzipping process is stable.
with codecs.open(output_filename, 'w', encoding='utf-8') as output_file:
trace2html.WriteHTMLForTracesToFile([input_filename], output_file)
# Hash the contents of the output file that was generated from the zipped
# json input file.
zipped_hash = hash(file(output_filename).read())
# Compare the hashes, not the file contents directly so that, if they are
# different, python shouldn't try to print megabytes of html.
self.assertEqual(unzipped_hash, zipped_hash)
def testWriteHTMLForTracesToFile(self):
output_filename = os.path.join(
self._tmpdir, 'WriteHTMLForTracesToFile.html')
with codecs.open(output_filename, 'w', encoding='utf-8') as output_file:
trace2html.WriteHTMLForTracesToFile([
self.BIG_TRACE_PATH,
self.SIMPLE_TRACE_PATH,
self.NON_JSON_TRACE_PATH
], output_file)
| bsd-3-clause |
jife94/wwmmo | website/ctrl/doc.py | 4 | 7554 |
import datetime
import difflib
import re
import logging
import random
from google.appengine.api import memcache
from google.appengine.ext import db
import model.doc
import html2text
class DocPage(object):
"""Represents everything we need to display a specific page."""
def __init__(self):
self.key = None
self.title = None
self.content = None
self.slug = None
self.revisions = []
class DocRevision(object):
def __init__(self):
self.key = None
self.page_key = None
self.content = None
self.user = None
self.date = None
self.older_revision_key = None
self.words = None
self.words_added = None
self.words_removed = None
self.words_changed = None
def getPage(slug):
"""Gets the document page with the give slug."""
cache_key = "doc:%s" % (slug)
page = memcache.get(cache_key)
if page:
return page
page_mdl = None
for mdl in model.doc.DocPage.all().filter("slug", slug).fetch(1):
page_mdl = mdl
break
if not page_mdl:
return None
page = DocPage()
page.key = str(page_mdl.key())
page.title = page_mdl.title
page.slug = page_mdl.slug
# Fetch the last four revisions. The latest one is the current, and
# then we want the rest so we can display a little history on the page
# as well (who edited the page, and when).
for rev_mdl in (model.doc.DocPageRevision.all()
.ancestor(page_mdl)
.order("-date")
.fetch(4)):
if not page.content:
page.content = rev_mdl.content
revision = DocRevision()
revision.key = str(rev_mdl.key())
revision.date = rev_mdl.date
revision.user = rev_mdl.user
page.revisions.append(revision)
memcache.set(cache_key, page)
return page
def getPageRevision(slug, revision_key):
"""Fetches a single revision of the page with the given slug."""
page_mdl = None
for mdl in model.doc.DocPage.all().filter("slug", slug).fetch(1):
page_mdl = mdl
break
if not page_mdl:
return None
page = DocPage()
page.key = str(page_mdl.key())
page.title = page_mdl.title
page.slug = page_mdl.slug
rev_mdl = model.doc.DocPageRevision.get(db.Key(revision_key))
if not rev_mdl:
return None
page.content = rev_mdl.content
revision = DocRevision()
revision.key = str(rev_mdl.key())
revision.content = rev_mdl.content
revision.date = rev_mdl.date
revision.user = rev_mdl.user
page.revisions.append(revision)
return page
def getRevisionHistory(page_key):
query = (model.doc.DocPageRevision.all()
.ancestor(db.Key(page_key))
.order("-date"))
return _getRevisionHistory(query)
def getGlobalRevisionHistory():
query = (model.doc.DocPageRevision.all()
.order("-date")
.fetch(20))
revisions = _getRevisionHistory(query)
pages = []
page_map = {}
for revision in revisions:
if revision.page_key not in page_map:
page_mdl = model.doc.DocPage.get(db.Key(revision.page_key))
if not page_mdl:
continue
page = DocPage()
page.key = str(page_mdl.key())
page.title = page_mdl.title
page.slug = page_mdl.slug
page_map[revision.page_key] = page
page = page_map[revision.page_key]
pages.append({"page": page, "revision": revision})
return pages
def _getRevisionHistory(query):
revisions = []
prev_rev = None
rev = None
for rev_mdl in query:
cache_key = "doc-rev-history:%s" % (str(rev_mdl.key()))
rev = memcache.get(cache_key)
if rev:
revisions.append(rev)
else:
rev = DocRevision()
rev.key = str(rev_mdl.key())
rev.page_key = str(rev_mdl.key().parent())
rev.content = rev_mdl.content
rev.user = rev_mdl.user
rev.date = rev_mdl.date
if prev_rev and prev_rev.page_key == rev.page_key:
_populateDelta(rev, prev_rev)
prev_rev.older_revision_key = rev.key
memcache.set("doc-rev-history:%s" % (prev_rev.key), prev_rev)
revisions.append(rev)
prev_rev = rev
if rev and prev_rev and prev_rev.page_key == rev.page_key:
_populateDelta(rev, prev_rev)
prev_rev.older_revision_key = rev.key
memcache.set("doc-rev-history:%s" % (prev_rev.key), prev_rev)
return revisions
def revertTo(revision_key, user):
"""Reverting a revision is simple, just re-save it as if it was brand new."""
rev_mdl = model.doc.DocPageRevision.get(db.Key(revision_key))
if not rev_mdl:
return
new_rev_mdl = model.doc.DocPageRevision(parent=rev_mdl.parent(),
content=rev_mdl.content,
user=user,
date=datetime.datetime.now())
new_rev_mdl.put()
def savePage(page):
"""Saves the given page to the data store."""
if not page.key:
page_mdl = model.doc.DocPage(slug=page.slug, title=page.title)
else:
page_mdl = model.doc.DocPage.get(db.Key(page.key))
page_mdl.title = page.title
page_mdl.put()
memcache.delete("doc:" + page.slug)
rev_mdl = model.doc.DocPageRevision(parent=page_mdl,
content=page.content,
user=page.updatedUser,
date=page.updatedDate)
rev_mdl.put()
def generateDiff(older_rev, newer_rev):
"""Generates an HTML diff of the two revisions."""
older_words = _splitWords(older_rev.content)
newer_words = _splitWords(newer_rev.content)
diff = difflib.ndiff(older_words, newer_words)
html = ""
for word in diff:
action = word[:1]
if '<' in word:
html += word[2:]
elif action == "+":
html += "<span class=\"diff-added\"> " + word[2:] + " </span>"
elif action == "-":
html += "<span class=\"diff-removed\"> " + word[2:] + " </span>"
elif action != "?":
html += word[1:]
return html
def _populateDelta(older_rev, newer_rev):
"""Populates the delta between the older revision and the newer."""
if not older_rev.words:
older_rev.words = _splitWords(older_rev.content)
if not newer_rev.words:
newer_rev.words = _splitWords(newer_rev.content)
newer_rev.words_added = 0
newer_rev.words_removed = 0
newer_rev.words_changed = 0
diff = difflib.ndiff(older_rev.words, newer_rev.words)
last_change = ' '
for word in diff:
if word[0] == '+':
newer_rev.words_added += 1
elif word[0] == '-':
newer_rev.words_removed += 1
elif word[0] == '?':
if last_change == '+':
newer_rev.words_added -= 1
elif last_change == '-':
newer_rev.words_removed -= 1
newer_rev.words_changed += 1
_htmlSplitRegex = re.compile(r"(\s*<[^>]+>\s*)")
_wordSplitRegex = re.compile(r"\s+")
def _splitWords(content):
"""Splits the given string into words.
We first split the words into HTML tags and "text", then further split
the text into words (by spaces). For example, the following string:
Hello World, <a href="index.html">Link</a>
Will be split into:
['Hello', 'World,', '<a href="index.html">', 'Link', '</a>']"""
# Santize the input a little.
content = content.replace(" ", " ")
words = []
for entry in _htmlSplitRegex.split(content):
if entry.strip() == "":
continue
elif '<' in entry:
words.append(entry)
else:
words.extend(_wordSplitRegex.split(entry))
return words
| mit |
Willyham/tchannel-python | tests/tornado/test_peer.py | 1 | 4561 | # encoding=utf8
# Copyright (c) 2015 Uber Technologies, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import absolute_import
import mock
import pytest
from tornado import gen
from tchannel.tornado import peer as tpeer
from tchannel.tornado.stream import InMemStream
from tchannel.tornado.stream import read_full
def closed_stream(body):
"""Builds an in-memory stream whose entire request body is the given string.
:param body:
Request body for the returned Stream
"""
stream = InMemStream(body)
stream.close()
return stream
def mocked_stream():
# An object that conforms to the stream interface but isn't an instance of
# Stream.
def reader():
if stream.read.call_count == 3:
return gen.maybe_future('')
else:
return gen.maybe_future('foo')
stream = mock.Mock()
stream.read.side_effect = reader
return stream
def test_basic_peer_management_operations():
peer_group = tpeer.PeerGroup(mock.MagicMock())
assert not peer_group.hosts
assert not peer_group.peers
assert not peer_group.lookup('localhost:4040')
p = peer_group.get('localhost:4040')
assert p
assert peer_group.lookup('localhost:4040') is p
assert peer_group.get('localhost:4040') is p
assert peer_group.remove('localhost:4040') is p
assert not peer_group.lookup('localhost:4040')
peer_group.add(p)
assert peer_group.hosts == ['localhost:4040']
assert peer_group.peers == [p]
@pytest.mark.parametrize('s, expected', [
(None, b''),
('foo', b'foo'),
(u'☃', b'\xe2\x98\x83'),
(bytearray([0x12, 0x34]), b'\x12\x34'),
(closed_stream('foo'), b'foo'),
(mocked_stream(), b'foofoo')
])
@pytest.mark.gen_test
def test_maybe_stream(s, expected):
got = yield read_full(tpeer.maybe_stream(s))
assert expected == got
@pytest.mark.gen_test
def test_peer_group_clear_multiple():
# Multiple concurrent reset attempts should not conflict with each other.
peer_group = tpeer.PeerGroup(mock.MagicMock())
for i in xrange(10):
peer_group.get('localhost:404%d' % i)
# A peer that will intentionally take a while to close.
dirty_peer = mock.MagicMock()
dirty_peer.close.side_effect = lambda: gen.sleep(0.1)
peer_group.add(dirty_peer)
yield [peer_group.clear() for i in xrange(10)]
# Dirty peer must have been closed only once.
dirty_peer.close.assert_called_once_with()
for i in xrange(10):
assert not peer_group.lookup('localhost:404%d' % i)
@pytest.mark.gen_test
def test_peer_connection_failure():
# Test connecting a peer when the first connection attempt fails.
MockConnection = mock.MagicMock()
connection = mock.MagicMock()
with mock.patch.object(tpeer.Peer, 'connection_class', MockConnection):
@gen.coroutine
def try_connect(*args, **kwargs):
if MockConnection.outgoing.call_count == 1:
# If this is the first call, fail.
raise ZeroDivisionError('great sadness')
else:
raise gen.Return(connection)
MockConnection.outgoing.side_effect = try_connect
peer = tpeer.Peer(mock.MagicMock(), 'localhost:4040')
future = peer.connect()
with pytest.raises(ZeroDivisionError) as excinfo:
yield future
assert 'great sadness' in str(excinfo)
got = yield peer.connect()
assert got is connection
assert MockConnection.outgoing.call_count == 2
| mit |
jessekl/flixr | venv/lib/python2.7/site-packages/werkzeug/testsuite/datastructures.py | 145 | 28212 | # -*- coding: utf-8 -*-
"""
werkzeug.testsuite.datastructures
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tests the functionality of the provided Werkzeug
datastructures.
TODO:
- FileMultiDict
- Immutable types undertested
- Split up dict tests
:copyright: (c) 2014 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from __future__ import with_statement
import unittest
import pickle
from contextlib import contextmanager
from copy import copy, deepcopy
from werkzeug import datastructures
from werkzeug._compat import iterkeys, itervalues, iteritems, iterlists, \
iterlistvalues, text_type, PY2
from werkzeug.testsuite import WerkzeugTestCase
from werkzeug.exceptions import BadRequestKeyError
class NativeItermethodsTestCase(WerkzeugTestCase):
def test_basic(self):
@datastructures.native_itermethods(['keys', 'values', 'items'])
class StupidDict(object):
def keys(self, multi=1):
return iter(['a', 'b', 'c'] * multi)
def values(self, multi=1):
return iter([1, 2, 3] * multi)
def items(self, multi=1):
return iter(zip(iterkeys(self, multi=multi),
itervalues(self, multi=multi)))
d = StupidDict()
expected_keys = ['a', 'b', 'c']
expected_values = [1, 2, 3]
expected_items = list(zip(expected_keys, expected_values))
self.assert_equal(list(iterkeys(d)), expected_keys)
self.assert_equal(list(itervalues(d)), expected_values)
self.assert_equal(list(iteritems(d)), expected_items)
self.assert_equal(list(iterkeys(d, 2)), expected_keys * 2)
self.assert_equal(list(itervalues(d, 2)), expected_values * 2)
self.assert_equal(list(iteritems(d, 2)), expected_items * 2)
class MutableMultiDictBaseTestCase(WerkzeugTestCase):
storage_class = None
def test_pickle(self):
cls = self.storage_class
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
d = cls()
d.setlist(b'foo', [1, 2, 3, 4])
d.setlist(b'bar', b'foo bar baz'.split())
s = pickle.dumps(d, protocol)
ud = pickle.loads(s)
self.assert_equal(type(ud), type(d))
self.assert_equal(ud, d)
self.assert_equal(pickle.loads(
s.replace(b'werkzeug.datastructures', b'werkzeug')), d)
ud[b'newkey'] = b'bla'
self.assert_not_equal(ud, d)
def test_basic_interface(self):
md = self.storage_class()
assert isinstance(md, dict)
mapping = [('a', 1), ('b', 2), ('a', 2), ('d', 3),
('a', 1), ('a', 3), ('d', 4), ('c', 3)]
md = self.storage_class(mapping)
# simple getitem gives the first value
self.assert_equal(md['a'], 1)
self.assert_equal(md['c'], 3)
with self.assert_raises(KeyError):
md['e']
self.assert_equal(md.get('a'), 1)
# list getitem
self.assert_equal(md.getlist('a'), [1, 2, 1, 3])
self.assert_equal(md.getlist('d'), [3, 4])
# do not raise if key not found
self.assert_equal(md.getlist('x'), [])
# simple setitem overwrites all values
md['a'] = 42
self.assert_equal(md.getlist('a'), [42])
# list setitem
md.setlist('a', [1, 2, 3])
self.assert_equal(md['a'], 1)
self.assert_equal(md.getlist('a'), [1, 2, 3])
# verify that it does not change original lists
l1 = [1, 2, 3]
md.setlist('a', l1)
del l1[:]
self.assert_equal(md['a'], 1)
# setdefault, setlistdefault
self.assert_equal(md.setdefault('u', 23), 23)
self.assert_equal(md.getlist('u'), [23])
del md['u']
md.setlist('u', [-1, -2])
# delitem
del md['u']
with self.assert_raises(KeyError):
md['u']
del md['d']
self.assert_equal(md.getlist('d'), [])
# keys, values, items, lists
self.assert_equal(list(sorted(md.keys())), ['a', 'b', 'c'])
self.assert_equal(list(sorted(iterkeys(md))), ['a', 'b', 'c'])
self.assert_equal(list(sorted(itervalues(md))), [1, 2, 3])
self.assert_equal(list(sorted(itervalues(md))), [1, 2, 3])
self.assert_equal(list(sorted(md.items())),
[('a', 1), ('b', 2), ('c', 3)])
self.assert_equal(list(sorted(md.items(multi=True))),
[('a', 1), ('a', 2), ('a', 3), ('b', 2), ('c', 3)])
self.assert_equal(list(sorted(iteritems(md))),
[('a', 1), ('b', 2), ('c', 3)])
self.assert_equal(list(sorted(iteritems(md, multi=True))),
[('a', 1), ('a', 2), ('a', 3), ('b', 2), ('c', 3)])
self.assert_equal(list(sorted(md.lists())),
[('a', [1, 2, 3]), ('b', [2]), ('c', [3])])
self.assert_equal(list(sorted(iterlists(md))),
[('a', [1, 2, 3]), ('b', [2]), ('c', [3])])
# copy method
c = md.copy()
self.assert_equal(c['a'], 1)
self.assert_equal(c.getlist('a'), [1, 2, 3])
# copy method 2
c = copy(md)
self.assert_equal(c['a'], 1)
self.assert_equal(c.getlist('a'), [1, 2, 3])
# deepcopy method
c = md.deepcopy()
self.assert_equal(c['a'], 1)
self.assert_equal(c.getlist('a'), [1, 2, 3])
# deepcopy method 2
c = deepcopy(md)
self.assert_equal(c['a'], 1)
self.assert_equal(c.getlist('a'), [1, 2, 3])
# update with a multidict
od = self.storage_class([('a', 4), ('a', 5), ('y', 0)])
md.update(od)
self.assert_equal(md.getlist('a'), [1, 2, 3, 4, 5])
self.assert_equal(md.getlist('y'), [0])
# update with a regular dict
md = c
od = {'a': 4, 'y': 0}
md.update(od)
self.assert_equal(md.getlist('a'), [1, 2, 3, 4])
self.assert_equal(md.getlist('y'), [0])
# pop, poplist, popitem, popitemlist
self.assert_equal(md.pop('y'), 0)
assert 'y' not in md
self.assert_equal(md.poplist('a'), [1, 2, 3, 4])
assert 'a' not in md
self.assert_equal(md.poplist('missing'), [])
# remaining: b=2, c=3
popped = md.popitem()
assert popped in [('b', 2), ('c', 3)]
popped = md.popitemlist()
assert popped in [('b', [2]), ('c', [3])]
# type conversion
md = self.storage_class({'a': '4', 'b': ['2', '3']})
self.assert_equal(md.get('a', type=int), 4)
self.assert_equal(md.getlist('b', type=int), [2, 3])
# repr
md = self.storage_class([('a', 1), ('a', 2), ('b', 3)])
assert "('a', 1)" in repr(md)
assert "('a', 2)" in repr(md)
assert "('b', 3)" in repr(md)
# add and getlist
md.add('c', '42')
md.add('c', '23')
self.assert_equal(md.getlist('c'), ['42', '23'])
md.add('c', 'blah')
self.assert_equal(md.getlist('c', type=int), [42, 23])
# setdefault
md = self.storage_class()
md.setdefault('x', []).append(42)
md.setdefault('x', []).append(23)
self.assert_equal(md['x'], [42, 23])
# to dict
md = self.storage_class()
md['foo'] = 42
md.add('bar', 1)
md.add('bar', 2)
self.assert_equal(md.to_dict(), {'foo': 42, 'bar': 1})
self.assert_equal(md.to_dict(flat=False), {'foo': [42], 'bar': [1, 2]})
# popitem from empty dict
with self.assert_raises(KeyError):
self.storage_class().popitem()
with self.assert_raises(KeyError):
self.storage_class().popitemlist()
# key errors are of a special type
with self.assert_raises(BadRequestKeyError):
self.storage_class()[42]
# setlist works
md = self.storage_class()
md['foo'] = 42
md.setlist('foo', [1, 2])
self.assert_equal(md.getlist('foo'), [1, 2])
class ImmutableDictBaseTestCase(WerkzeugTestCase):
storage_class = None
def test_follows_dict_interface(self):
cls = self.storage_class
data = {'foo': 1, 'bar': 2, 'baz': 3}
d = cls(data)
self.assert_equal(d['foo'], 1)
self.assert_equal(d['bar'], 2)
self.assert_equal(d['baz'], 3)
self.assert_equal(sorted(d.keys()), ['bar', 'baz', 'foo'])
self.assert_true('foo' in d)
self.assert_true('foox' not in d)
self.assert_equal(len(d), 3)
def test_copies_are_mutable(self):
cls = self.storage_class
immutable = cls({'a': 1})
with self.assert_raises(TypeError):
immutable.pop('a')
mutable = immutable.copy()
mutable.pop('a')
self.assert_true('a' in immutable)
self.assert_true(mutable is not immutable)
self.assert_true(copy(immutable) is immutable)
def test_dict_is_hashable(self):
cls = self.storage_class
immutable = cls({'a': 1, 'b': 2})
immutable2 = cls({'a': 2, 'b': 2})
x = set([immutable])
self.assert_true(immutable in x)
self.assert_true(immutable2 not in x)
x.discard(immutable)
self.assert_true(immutable not in x)
self.assert_true(immutable2 not in x)
x.add(immutable2)
self.assert_true(immutable not in x)
self.assert_true(immutable2 in x)
x.add(immutable)
self.assert_true(immutable in x)
self.assert_true(immutable2 in x)
class ImmutableTypeConversionDictTestCase(ImmutableDictBaseTestCase):
storage_class = datastructures.ImmutableTypeConversionDict
class ImmutableMultiDictTestCase(ImmutableDictBaseTestCase):
storage_class = datastructures.ImmutableMultiDict
def test_multidict_is_hashable(self):
cls = self.storage_class
immutable = cls({'a': [1, 2], 'b': 2})
immutable2 = cls({'a': [1], 'b': 2})
x = set([immutable])
self.assert_true(immutable in x)
self.assert_true(immutable2 not in x)
x.discard(immutable)
self.assert_true(immutable not in x)
self.assert_true(immutable2 not in x)
x.add(immutable2)
self.assert_true(immutable not in x)
self.assert_true(immutable2 in x)
x.add(immutable)
self.assert_true(immutable in x)
self.assert_true(immutable2 in x)
class ImmutableDictTestCase(ImmutableDictBaseTestCase):
storage_class = datastructures.ImmutableDict
class ImmutableOrderedMultiDictTestCase(ImmutableDictBaseTestCase):
storage_class = datastructures.ImmutableOrderedMultiDict
def test_ordered_multidict_is_hashable(self):
a = self.storage_class([('a', 1), ('b', 1), ('a', 2)])
b = self.storage_class([('a', 1), ('a', 2), ('b', 1)])
self.assert_not_equal(hash(a), hash(b))
class MultiDictTestCase(MutableMultiDictBaseTestCase):
storage_class = datastructures.MultiDict
def test_multidict_pop(self):
make_d = lambda: self.storage_class({'foo': [1, 2, 3, 4]})
d = make_d()
self.assert_equal(d.pop('foo'), 1)
assert not d
d = make_d()
self.assert_equal(d.pop('foo', 32), 1)
assert not d
d = make_d()
self.assert_equal(d.pop('foos', 32), 32)
assert d
with self.assert_raises(KeyError):
d.pop('foos')
def test_setlistdefault(self):
md = self.storage_class()
self.assert_equal(md.setlistdefault('u', [-1, -2]), [-1, -2])
self.assert_equal(md.getlist('u'), [-1, -2])
self.assert_equal(md['u'], -1)
def test_iter_interfaces(self):
mapping = [('a', 1), ('b', 2), ('a', 2), ('d', 3),
('a', 1), ('a', 3), ('d', 4), ('c', 3)]
md = self.storage_class(mapping)
self.assert_equal(list(zip(md.keys(), md.listvalues())),
list(md.lists()))
self.assert_equal(list(zip(md, iterlistvalues(md))),
list(iterlists(md)))
self.assert_equal(list(zip(iterkeys(md), iterlistvalues(md))),
list(iterlists(md)))
class OrderedMultiDictTestCase(MutableMultiDictBaseTestCase):
storage_class = datastructures.OrderedMultiDict
def test_ordered_interface(self):
cls = self.storage_class
d = cls()
assert not d
d.add('foo', 'bar')
self.assert_equal(len(d), 1)
d.add('foo', 'baz')
self.assert_equal(len(d), 1)
self.assert_equal(list(iteritems(d)), [('foo', 'bar')])
self.assert_equal(list(d), ['foo'])
self.assert_equal(list(iteritems(d, multi=True)),
[('foo', 'bar'), ('foo', 'baz')])
del d['foo']
assert not d
self.assert_equal(len(d), 0)
self.assert_equal(list(d), [])
d.update([('foo', 1), ('foo', 2), ('bar', 42)])
d.add('foo', 3)
self.assert_equal(d.getlist('foo'), [1, 2, 3])
self.assert_equal(d.getlist('bar'), [42])
self.assert_equal(list(iteritems(d)), [('foo', 1), ('bar', 42)])
expected = ['foo', 'bar']
self.assert_sequence_equal(list(d.keys()), expected)
self.assert_sequence_equal(list(d), expected)
self.assert_sequence_equal(list(iterkeys(d)), expected)
self.assert_equal(list(iteritems(d, multi=True)),
[('foo', 1), ('foo', 2), ('bar', 42), ('foo', 3)])
self.assert_equal(len(d), 2)
self.assert_equal(d.pop('foo'), 1)
assert d.pop('blafasel', None) is None
self.assert_equal(d.pop('blafasel', 42), 42)
self.assert_equal(len(d), 1)
self.assert_equal(d.poplist('bar'), [42])
assert not d
d.get('missingkey') is None
d.add('foo', 42)
d.add('foo', 23)
d.add('bar', 2)
d.add('foo', 42)
self.assert_equal(d, datastructures.MultiDict(d))
id = self.storage_class(d)
self.assert_equal(d, id)
d.add('foo', 2)
assert d != id
d.update({'blah': [1, 2, 3]})
self.assert_equal(d['blah'], 1)
self.assert_equal(d.getlist('blah'), [1, 2, 3])
# setlist works
d = self.storage_class()
d['foo'] = 42
d.setlist('foo', [1, 2])
self.assert_equal(d.getlist('foo'), [1, 2])
with self.assert_raises(BadRequestKeyError):
d.pop('missing')
with self.assert_raises(BadRequestKeyError):
d['missing']
# popping
d = self.storage_class()
d.add('foo', 23)
d.add('foo', 42)
d.add('foo', 1)
self.assert_equal(d.popitem(), ('foo', 23))
with self.assert_raises(BadRequestKeyError):
d.popitem()
assert not d
d.add('foo', 23)
d.add('foo', 42)
d.add('foo', 1)
self.assert_equal(d.popitemlist(), ('foo', [23, 42, 1]))
with self.assert_raises(BadRequestKeyError):
d.popitemlist()
def test_iterables(self):
a = datastructures.MultiDict((("key_a", "value_a"),))
b = datastructures.MultiDict((("key_b", "value_b"),))
ab = datastructures.CombinedMultiDict((a,b))
self.assert_equal(sorted(ab.lists()), [('key_a', ['value_a']), ('key_b', ['value_b'])])
self.assert_equal(sorted(ab.listvalues()), [['value_a'], ['value_b']])
self.assert_equal(sorted(ab.keys()), ["key_a", "key_b"])
self.assert_equal(sorted(iterlists(ab)), [('key_a', ['value_a']), ('key_b', ['value_b'])])
self.assert_equal(sorted(iterlistvalues(ab)), [['value_a'], ['value_b']])
self.assert_equal(sorted(iterkeys(ab)), ["key_a", "key_b"])
class CombinedMultiDictTestCase(WerkzeugTestCase):
storage_class = datastructures.CombinedMultiDict
def test_basic_interface(self):
d1 = datastructures.MultiDict([('foo', '1')])
d2 = datastructures.MultiDict([('bar', '2'), ('bar', '3')])
d = self.storage_class([d1, d2])
# lookup
self.assert_equal(d['foo'], '1')
self.assert_equal(d['bar'], '2')
self.assert_equal(d.getlist('bar'), ['2', '3'])
self.assert_equal(sorted(d.items()),
[('bar', '2'), ('foo', '1')])
self.assert_equal(sorted(d.items(multi=True)),
[('bar', '2'), ('bar', '3'), ('foo', '1')])
assert 'missingkey' not in d
assert 'foo' in d
# type lookup
self.assert_equal(d.get('foo', type=int), 1)
self.assert_equal(d.getlist('bar', type=int), [2, 3])
# get key errors for missing stuff
with self.assert_raises(KeyError):
d['missing']
# make sure that they are immutable
with self.assert_raises(TypeError):
d['foo'] = 'blub'
# copies are immutable
d = d.copy()
with self.assert_raises(TypeError):
d['foo'] = 'blub'
# make sure lists merges
md1 = datastructures.MultiDict((("foo", "bar"),))
md2 = datastructures.MultiDict((("foo", "blafasel"),))
x = self.storage_class((md1, md2))
self.assert_equal(list(iterlists(x)), [('foo', ['bar', 'blafasel'])])
class HeadersTestCase(WerkzeugTestCase):
storage_class = datastructures.Headers
def test_basic_interface(self):
headers = self.storage_class()
headers.add('Content-Type', 'text/plain')
headers.add('X-Foo', 'bar')
assert 'x-Foo' in headers
assert 'Content-type' in headers
headers['Content-Type'] = 'foo/bar'
self.assert_equal(headers['Content-Type'], 'foo/bar')
self.assert_equal(len(headers.getlist('Content-Type')), 1)
# list conversion
self.assert_equal(headers.to_wsgi_list(), [
('Content-Type', 'foo/bar'),
('X-Foo', 'bar')
])
self.assert_equal(str(headers), (
"Content-Type: foo/bar\r\n"
"X-Foo: bar\r\n"
"\r\n"))
self.assert_equal(str(self.storage_class()), "\r\n")
# extended add
headers.add('Content-Disposition', 'attachment', filename='foo')
self.assert_equal(headers['Content-Disposition'],
'attachment; filename=foo')
headers.add('x', 'y', z='"')
self.assert_equal(headers['x'], r'y; z="\""')
def test_defaults_and_conversion(self):
# defaults
headers = self.storage_class([
('Content-Type', 'text/plain'),
('X-Foo', 'bar'),
('X-Bar', '1'),
('X-Bar', '2')
])
self.assert_equal(headers.getlist('x-bar'), ['1', '2'])
self.assert_equal(headers.get('x-Bar'), '1')
self.assert_equal(headers.get('Content-Type'), 'text/plain')
self.assert_equal(headers.setdefault('X-Foo', 'nope'), 'bar')
self.assert_equal(headers.setdefault('X-Bar', 'nope'), '1')
self.assert_equal(headers.setdefault('X-Baz', 'quux'), 'quux')
self.assert_equal(headers.setdefault('X-Baz', 'nope'), 'quux')
headers.pop('X-Baz')
# type conversion
self.assert_equal(headers.get('x-bar', type=int), 1)
self.assert_equal(headers.getlist('x-bar', type=int), [1, 2])
# list like operations
self.assert_equal(headers[0], ('Content-Type', 'text/plain'))
self.assert_equal(headers[:1], self.storage_class([('Content-Type', 'text/plain')]))
del headers[:2]
del headers[-1]
self.assert_equal(headers, self.storage_class([('X-Bar', '1')]))
def test_copying(self):
a = self.storage_class([('foo', 'bar')])
b = a.copy()
a.add('foo', 'baz')
self.assert_equal(a.getlist('foo'), ['bar', 'baz'])
self.assert_equal(b.getlist('foo'), ['bar'])
def test_popping(self):
headers = self.storage_class([('a', 1)])
self.assert_equal(headers.pop('a'), 1)
self.assert_equal(headers.pop('b', 2), 2)
with self.assert_raises(KeyError):
headers.pop('c')
def test_set_arguments(self):
a = self.storage_class()
a.set('Content-Disposition', 'useless')
a.set('Content-Disposition', 'attachment', filename='foo')
self.assert_equal(a['Content-Disposition'], 'attachment; filename=foo')
def test_reject_newlines(self):
h = self.storage_class()
for variation in 'foo\nbar', 'foo\r\nbar', 'foo\rbar':
with self.assert_raises(ValueError):
h['foo'] = variation
with self.assert_raises(ValueError):
h.add('foo', variation)
with self.assert_raises(ValueError):
h.add('foo', 'test', option=variation)
with self.assert_raises(ValueError):
h.set('foo', variation)
with self.assert_raises(ValueError):
h.set('foo', 'test', option=variation)
def test_slicing(self):
# there's nothing wrong with these being native strings
# Headers doesn't care about the data types
h = self.storage_class()
h.set('X-Foo-Poo', 'bleh')
h.set('Content-Type', 'application/whocares')
h.set('X-Forwarded-For', '192.168.0.123')
h[:] = [(k, v) for k, v in h if k.startswith(u'X-')]
self.assert_equal(list(h), [
('X-Foo-Poo', 'bleh'),
('X-Forwarded-For', '192.168.0.123')
])
def test_bytes_operations(self):
h = self.storage_class()
h.set('X-Foo-Poo', 'bleh')
h.set('X-Whoops', b'\xff')
self.assert_equal(h.get('x-foo-poo', as_bytes=True), b'bleh')
self.assert_equal(h.get('x-whoops', as_bytes=True), b'\xff')
def test_to_wsgi_list(self):
h = self.storage_class()
h.set(u'Key', u'Value')
for key, value in h.to_wsgi_list():
if PY2:
self.assert_strict_equal(key, b'Key')
self.assert_strict_equal(value, b'Value')
else:
self.assert_strict_equal(key, u'Key')
self.assert_strict_equal(value, u'Value')
class EnvironHeadersTestCase(WerkzeugTestCase):
storage_class = datastructures.EnvironHeaders
def test_basic_interface(self):
# this happens in multiple WSGI servers because they
# use a vary naive way to convert the headers;
broken_env = {
'HTTP_CONTENT_TYPE': 'text/html',
'CONTENT_TYPE': 'text/html',
'HTTP_CONTENT_LENGTH': '0',
'CONTENT_LENGTH': '0',
'HTTP_ACCEPT': '*',
'wsgi.version': (1, 0)
}
headers = self.storage_class(broken_env)
assert headers
self.assert_equal(len(headers), 3)
self.assert_equal(sorted(headers), [
('Accept', '*'),
('Content-Length', '0'),
('Content-Type', 'text/html')
])
assert not self.storage_class({'wsgi.version': (1, 0)})
self.assert_equal(len(self.storage_class({'wsgi.version': (1, 0)})), 0)
def test_return_type_is_unicode(self):
# environ contains native strings; we return unicode
headers = self.storage_class({
'HTTP_FOO': '\xe2\x9c\x93',
'CONTENT_TYPE': 'text/plain',
})
self.assert_equal(headers['Foo'], u"\xe2\x9c\x93")
assert isinstance(headers['Foo'], text_type)
assert isinstance(headers['Content-Type'], text_type)
iter_output = dict(iter(headers))
self.assert_equal(iter_output['Foo'], u"\xe2\x9c\x93")
assert isinstance(iter_output['Foo'], text_type)
assert isinstance(iter_output['Content-Type'], text_type)
def test_bytes_operations(self):
foo_val = '\xff'
h = self.storage_class({
'HTTP_X_FOO': foo_val
})
self.assert_equal(h.get('x-foo', as_bytes=True), b'\xff')
self.assert_equal(h.get('x-foo'), u'\xff')
class HeaderSetTestCase(WerkzeugTestCase):
storage_class = datastructures.HeaderSet
def test_basic_interface(self):
hs = self.storage_class()
hs.add('foo')
hs.add('bar')
assert 'Bar' in hs
self.assert_equal(hs.find('foo'), 0)
self.assert_equal(hs.find('BAR'), 1)
assert hs.find('baz') < 0
hs.discard('missing')
hs.discard('foo')
assert hs.find('foo') < 0
self.assert_equal(hs.find('bar'), 0)
with self.assert_raises(IndexError):
hs.index('missing')
self.assert_equal(hs.index('bar'), 0)
assert hs
hs.clear()
assert not hs
class ImmutableListTestCase(WerkzeugTestCase):
storage_class = datastructures.ImmutableList
def test_list_hashable(self):
t = (1, 2, 3, 4)
l = self.storage_class(t)
self.assert_equal(hash(t), hash(l))
self.assert_not_equal(t, l)
def make_call_asserter(assert_equal_func, func=None):
"""Utility to assert a certain number of function calls.
>>> assert_calls, func = make_call_asserter(self.assert_equal)
>>> with assert_calls(2):
func()
func()
"""
calls = [0]
@contextmanager
def asserter(count, msg=None):
calls[0] = 0
yield
assert_equal_func(calls[0], count, msg)
def wrapped(*args, **kwargs):
calls[0] += 1
if func is not None:
return func(*args, **kwargs)
return asserter, wrapped
class CallbackDictTestCase(WerkzeugTestCase):
storage_class = datastructures.CallbackDict
def test_callback_dict_reads(self):
assert_calls, func = make_call_asserter(self.assert_equal)
initial = {'a': 'foo', 'b': 'bar'}
dct = self.storage_class(initial=initial, on_update=func)
with assert_calls(0, 'callback triggered by read-only method'):
# read-only methods
dct['a']
dct.get('a')
self.assert_raises(KeyError, lambda: dct['x'])
'a' in dct
list(iter(dct))
dct.copy()
with assert_calls(0, 'callback triggered without modification'):
# methods that may write but don't
dct.pop('z', None)
dct.setdefault('a')
def test_callback_dict_writes(self):
assert_calls, func = make_call_asserter(self.assert_equal)
initial = {'a': 'foo', 'b': 'bar'}
dct = self.storage_class(initial=initial, on_update=func)
with assert_calls(8, 'callback not triggered by write method'):
# always-write methods
dct['z'] = 123
dct['z'] = 123 # must trigger again
del dct['z']
dct.pop('b', None)
dct.setdefault('x')
dct.popitem()
dct.update([])
dct.clear()
with assert_calls(0, 'callback triggered by failed del'):
self.assert_raises(KeyError, lambda: dct.__delitem__('x'))
with assert_calls(0, 'callback triggered by failed pop'):
self.assert_raises(KeyError, lambda: dct.pop('x'))
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(MultiDictTestCase))
suite.addTest(unittest.makeSuite(OrderedMultiDictTestCase))
suite.addTest(unittest.makeSuite(CombinedMultiDictTestCase))
suite.addTest(unittest.makeSuite(ImmutableTypeConversionDictTestCase))
suite.addTest(unittest.makeSuite(ImmutableMultiDictTestCase))
suite.addTest(unittest.makeSuite(ImmutableDictTestCase))
suite.addTest(unittest.makeSuite(ImmutableOrderedMultiDictTestCase))
suite.addTest(unittest.makeSuite(HeadersTestCase))
suite.addTest(unittest.makeSuite(EnvironHeadersTestCase))
suite.addTest(unittest.makeSuite(HeaderSetTestCase))
suite.addTest(unittest.makeSuite(NativeItermethodsTestCase))
suite.addTest(unittest.makeSuite(CallbackDictTestCase))
return suite
| mit |
rosswhitfield/mantid | qt/python/mantidqt/widgets/observers/test/test_ads_observer.py | 3 | 1563 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2019 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
# This file is part of the mantid workbench.
import unittest
from unittest.mock import Mock
from mantidqt.widgets.observers.ads_observer import WorkspaceDisplayADSObserver
class MockWorkspaceDisplay:
def __init__(self):
self.close = Mock()
self.force_close = Mock()
self.replace_workspace = Mock()
class WorkspaceDisplayADSObserverTest(unittest.TestCase):
def test_clearHandle(self):
mock_wsd = MockWorkspaceDisplay()
observer = WorkspaceDisplayADSObserver(mock_wsd)
observer.clearHandle()
mock_wsd.force_close.assert_called_once_with()
def test_deleteHandle(self):
mock_wsd = MockWorkspaceDisplay()
observer = WorkspaceDisplayADSObserver(mock_wsd)
expected_name = "adad"
observer.deleteHandle(expected_name, None)
mock_wsd.close.assert_called_once_with(expected_name)
def test_replaceHandle(self):
mock_wsd = MockWorkspaceDisplay()
observer = WorkspaceDisplayADSObserver(mock_wsd)
expected_name = "a"
expected_parameter = 444555.158
observer.replaceHandle(expected_name, expected_parameter)
mock_wsd.replace_workspace.assert_called_once_with(expected_name, expected_parameter)
| gpl-3.0 |
alvarolopez/nova | nova/db/sqlalchemy/migrate_repo/versions/271_sqlite_postgresql_indexes.py | 33 | 2976 | # Copyright 2014 Rackspace Hosting
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_db.sqlalchemy import utils
INDEXES = [
('block_device_mapping', 'snapshot_id', ['snapshot_id']),
('block_device_mapping', 'volume_id', ['volume_id']),
('dns_domains', 'dns_domains_project_id_idx', ['project_id']),
('fixed_ips', 'network_id', ['network_id']),
('fixed_ips', 'fixed_ips_instance_uuid_fkey', ['instance_uuid']),
('fixed_ips', 'fixed_ips_virtual_interface_id_fkey',
['virtual_interface_id']),
('floating_ips', 'fixed_ip_id', ['fixed_ip_id']),
('iscsi_targets', 'iscsi_targets_volume_id_fkey', ['volume_id']),
('virtual_interfaces', 'virtual_interfaces_network_id_idx',
['network_id']),
('virtual_interfaces', 'virtual_interfaces_instance_uuid_fkey',
['instance_uuid']),
]
def ensure_index_exists(migrate_engine, table_name, index_name, column_names):
if not utils.index_exists(migrate_engine, table_name, index_name):
utils.add_index(migrate_engine, table_name, index_name, column_names)
def ensure_index_removed(migrate_engine, table_name, index_name):
if utils.index_exists(migrate_engine, table_name, index_name):
utils.drop_index(migrate_engine, table_name, index_name)
def upgrade(migrate_engine):
"""Add indexes missing on SQLite and PostgreSQL."""
# PostgreSQL and SQLite namespace indexes at the database level, whereas
# MySQL namespaces indexes at the table level. Unfortunately, some of
# the missing indexes in PostgreSQL and SQLite have conflicting names
# that MySQL allowed.
if migrate_engine.name in ('sqlite', 'postgresql'):
for table_name, index_name, column_names in INDEXES:
ensure_index_exists(migrate_engine, table_name, index_name,
column_names)
elif migrate_engine.name == 'mysql':
# Rename some indexes with conflicting names
ensure_index_removed(migrate_engine, 'dns_domains', 'project_id')
ensure_index_exists(migrate_engine, 'dns_domains',
'dns_domains_project_id_idx', ['project_id'])
ensure_index_removed(migrate_engine, 'virtual_interfaces',
'network_id')
ensure_index_exists(migrate_engine, 'virtual_interfaces',
'virtual_interfaces_network_id_idx',
['network_id'])
| apache-2.0 |
zchking/odoo | addons/calendar/__init__.py | 391 | 1038 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2011 OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import calendar
import controllers
import contacts
| agpl-3.0 |
Azure/azure-sdk-for-python | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_06_30/aio/_compute_management_client.py | 1 | 4855 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Optional, TYPE_CHECKING
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core import AsyncARMPipelineClient
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
from ._configuration import ComputeManagementClientConfiguration
from .operations import DisksOperations
from .operations import SnapshotsOperations
from .operations import DiskEncryptionSetsOperations
from .operations import DiskAccessesOperations
from .. import models
class ComputeManagementClient(object):
"""Compute Client.
:ivar disks: DisksOperations operations
:vartype disks: azure.mgmt.compute.v2020_06_30.aio.operations.DisksOperations
:ivar snapshots: SnapshotsOperations operations
:vartype snapshots: azure.mgmt.compute.v2020_06_30.aio.operations.SnapshotsOperations
:ivar disk_encryption_sets: DiskEncryptionSetsOperations operations
:vartype disk_encryption_sets: azure.mgmt.compute.v2020_06_30.aio.operations.DiskEncryptionSetsOperations
:ivar disk_accesses: DiskAccessesOperations operations
:vartype disk_accesses: azure.mgmt.compute.v2020_06_30.aio.operations.DiskAccessesOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: Subscription credentials which uniquely identify Microsoft Azure subscription. The subscription ID forms part of the URI for every service call.
:type subscription_id: str
:param str base_url: Service URL
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
base_url: Optional[str] = None,
**kwargs: Any
) -> None:
if not base_url:
base_url = 'https://management.azure.com'
self._config = ComputeManagementClientConfiguration(credential, subscription_id, **kwargs)
self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._serialize.client_side_validation = False
self._deserialize = Deserializer(client_models)
self.disks = DisksOperations(
self._client, self._config, self._serialize, self._deserialize)
self.snapshots = SnapshotsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.disk_encryption_sets = DiskEncryptionSetsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.disk_accesses = DiskAccessesOperations(
self._client, self._config, self._serialize, self._deserialize)
async def _send_request(self, http_request: HttpRequest, **kwargs: Any) -> AsyncHttpResponse:
"""Runs the network request through the client's chained policies.
:param http_request: The network request you want to make. Required.
:type http_request: ~azure.core.pipeline.transport.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to True.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.pipeline.transport.AsyncHttpResponse
"""
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
http_request.url = self._client.format_url(http_request.url, **path_format_arguments)
stream = kwargs.pop("stream", True)
pipeline_response = await self._client._pipeline.run(http_request, stream=stream, **kwargs)
return pipeline_response.http_response
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "ComputeManagementClient":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
| mit |
nextgis/quickmapservices_server | qms_server/qms_core/migrations/0001_initial.py | 1 | 3318 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-04-01 19:16
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
import django.utils.timezone
import qms_core.models
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0007_alter_validators_add_error_messages'),
]
operations = [
migrations.CreateModel(
name='NextgisUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=30, unique=True, validators=[django.core.validators.RegexValidator('^[\\w.@+-]+$', 'Enter a valid username. This value may contain only letters, numbers and @/./+/-/_ characters.')], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=30, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('nextgis_guid', models.UUIDField(default=uuid.uuid4, editable=False, verbose_name='nextgis guid', unique=True)),
('locale', models.CharField(choices=[(b'ru', '\u0420\u0443\u0441\u0441\u043a\u0438\u0439'), (b'en', 'English')], default=b'en', max_length=30, null=True, verbose_name='user locale')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
},
managers=[
('objects', qms_core.models.NextgisUserManager()),
],
),
]
| gpl-2.0 |
goodmami/xigt | xigt/scripts/xigt_sort.py | 2 | 3575 | #!/usr/bin/env python
from __future__ import print_function
import re
import argparse
import logging
from xigt.codecs import xigtxml
from xigt import XigtCorpus, Igt, xigtpath as xp
def run(args):
xc = xigtxml.load(args.infile)
if args.igt_key:
logging.info('Sorting %s IGTs' % args.infile)
xc.sort(key=make_sortkey(args.igt_key))
if args.tier_key:
logging.info('Sorting %s tiers by key' % args.infile)
for igt in xc:
igt.sort(key=make_sortkey(args.tier_key))
elif args.tier_deps:
logging.info('Sorting %s tiers by ref-dependencies' % args.infile)
refattrs = [ra.strip() for ra in args.tier_deps.split(',')]
for igt in xc:
igt.sort_tiers(refattrs=refattrs)
if args.item_key:
logging.info('Sorting %s items by key' % args.infile)
for igt in xc:
for tier in igt:
tier.sort(key=make_sortkey(args.item_key))
if args.in_place:
xigtxml.dump(args.infile, xc)
else:
print(xigtxml.dumps(xc))
def make_sortkey(sortkeys):
# return int values if possible (for int comparison), otherwise strings
def safe_int(x):
try:
return int(x)
except ValueError:
return x
key = lambda x: [k for sk in sortkeys
for k in map(safe_int,
re.split(r'(\d+)', xp.find(x, sk) or ''))]
return key
def main(arglist=None):
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description="Sort Igts, Tiers, or Items in Xigt corpora",
epilog='examples:\n'
' xigt sort --igt-key=\'@doc-id\' --igt-key=\'@id\' in.xml > out.xml\n'
' xigt sort --tier-key=\'@type\' in.xml > out.xml\n'
' xigt sort --tier-deps="segmentation,alignment,content" in.xml > out.xml'
)
parser.add_argument('-v', '--verbose',
action='count', dest='verbosity', default=2,
help='increase the verbosity (can be repeated: -vvv)'
)
parser.add_argument('infile',
help='the Xigt corpus file to sort'
)
parser.add_argument('--in-place',
action='store_true',
help='don\'t print to stdout; modify the input file in-place'
)
parser.add_argument('--igt-key',
metavar='XIGTPATH', action='append',
help='the XigtPath query for IGTs (must result in a string, so '
'it should end with an @attribute, text(), or value())'
)
tiergroup = parser.add_mutually_exclusive_group()
tiergroup.add_argument('--tier-key',
metavar='XIGTPATH', action='append',
help='the XigtPath query for Tiers (must result in a string, so '
'it should end with an @attribute, text(), or value())'
)
tiergroup.add_argument('--tier-deps',
metavar='REFATTRS',
help='sort tiers by reference dependencies; argument is a '
'comma-separated prioritized list of considered reference '
'attributes'
)
parser.add_argument('--item-key',
metavar='XIGTPATH', action='append',
help='the XigtPath query for Items (must result in a string, so '
'it should end with an @attribute, text(), or value())'
)
# parser.add_argument('--item-deps',
# action='store_true',
# help='sort items by reference dependencies'
# )
args = parser.parse_args(arglist)
logging.basicConfig(level=50-(args.verbosity*10))
run(args)
if __name__ == '__main__':
main()
| mit |
mbartling/Hadoop_playground | FrogSAT/reduceCNF.py | 1 | 3251 | #! /usr/bin/env python
import sys
import os
sys.path.append('.')
literals = set()
clauseList = []
WINDOWSMODE = 0
verbosity = 0
MSVERBOSITY = " -verb={0}".format(verbosity)
def runMiniSat(filename, filenameout):
if WINDOWSMODE == 1:
minisat = "./minisat_static.exe "
minisat = minisat + filename + " " + filenameout + MSVERBOSITY + " -no-luby -rinc=1.5 -phase-saving=0 -rnd-freq=0.02"
os.system(minisat)
else:
minisat = "./minisat_static "
minisat = minisat + filename + " " + filenameout + MSVERBOSITY + " -no-luby -rinc=1.5 -phase-saving=0 -rnd-freq=0.02"
os.system(minisat)
#prevKey = ''
line = sys.stdin.readline()
line = line.strip()
(key,clause) = line.split('\t')
lits = clause.split()
prevKey = key
for literal in lits:
literal = int(literal)
literals.add(literal)
literals.add(-literal)
clauseList.append(clause)
for line in sys.stdin:
line = line.strip()
(key,clause) = line.split('\t')
if key != prevKey:
#numLits = len(literals)/2 # Should be even number
# MiniSAT determines the number of literals and compares it to our problem statement. Note miniSat is lazy and looks for the max(abs) of the literals.
numLits = max(literals)
filenameTxt = prevKey.replace(' ', '-')
filename = '{0}.cnf'.format(filenameTxt)
filenameout = '{0}-MiniSat.out'.format(filenameTxt)
# Open the file to work on
f = open(filename, 'w')
# write out problem statement
# p cnf numliterals numClauses\n
#problem = ('p cnf', numLits, len(clauseList), '\n')
problem = 'p cnf {0} {1}\n'.format(numLits, len(clauseList))
# MiniSAT doesnt need this, see www.msoos.org/minisat-faq/
f.write(problem)
# Write the clauses
#f.writelines(clauseList)
for entry in clauseList:
f.write('{0}\n'.format(entry))
f.close()
# Run the command line minisat here
print 'Running Minisat Dummy'
#os.system("cat " + filename+ ">" + '{0}.out'.format(filenameTxt))
runMiniSat(filename, filenameout)
literals.clear()
clauseList = []
# Working on same key
lits = clause.split()
prevKey = key
for literal in lits:
literal = int(literal)
literals.add(literal)
literals.add(-literal)
clauseList.append(clause)
# Process the last key
#numLits = len(literals)/2 # Should be even number
numLits = max(literals)
filenameTxt = key.replace(' ', '-')
filename = '{0}.cnf'.format(filenameTxt)
filenameout = '{0}-MiniSat.out'.format(filenameTxt)
# Open the file to work on
f = open(filename, "w")
# write out problem statement
# p cnf numliterals numClauses\n
#problem = ('p cnf', numLits, len(clauseList), '\n')
problem = 'p cnf {0} {1}\n'.format(numLits, len(clauseList))
f.write(problem)
# Write the clauses
#f.writelines(clauseList)
for entry in clauseList:
f.write('{0}\n'.format(entry))
f.close()
# Run the command line minisat here
print 'Running Minisat Dummy'
#os.system("cat " + filename+ ">" + '{0}.out'.format(filenameTxt))
runMiniSat(filename, filenameout)
literals.clear()
clauseList = []
| gpl-2.0 |
z4y4ts/flask-script | setup.py | 7 | 1762 | """
Flask-Script
--------------
Flask support for writing external scripts.
Links
`````
* `documentation <http://flask-script.readthedocs.org>`_
"""
import sys
from setuptools import setup
version='2.0.6'
# Hack to prevent stupid TypeError: 'NoneType' object is not callable error on
# exit of python setup.py test # in multiprocessing/util.py _exit_function when
# running python setup.py test (see
# https://github.com/pypa/virtualenv/pull/259)
try:
import multiprocessing
except ImportError:
pass
install_requires = ['Flask']
setup(
name='Flask-Script',
version=version,
url='http://github.com/smurfix/flask-script',
download_url = 'https://github.com/smurfix/flask-script/tarball/v'+version,
license='BSD',
author='Dan Jacob',
author_email='danjac354@gmail.com',
maintainer='Matthias Urlichs',
maintainer_email='matthias@urlichs.de',
description='Scripting support for Flask',
long_description=__doc__,
packages=[
'flask_script'
],
zip_safe=False,
install_requires=install_requires,
tests_require=[
'pytest',
],
platforms='any',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
| bsd-3-clause |
fuziontech/sentry | tests/sentry/api/endpoints/test_project_key_details.py | 33 | 1384 | from __future__ import absolute_import
from django.core.urlresolvers import reverse
from sentry.models import ProjectKey
from sentry.testutils import APITestCase
class UpdateProjectKeyTest(APITestCase):
def test_simple(self):
project = self.create_project()
key = ProjectKey.objects.get_or_create(project=project)[0]
self.login_as(user=self.user)
url = reverse('sentry-api-0-project-key-details', kwargs={
'organization_slug': project.organization.slug,
'project_slug': project.slug,
'key_id': key.public_key,
})
response = self.client.put(url, {'name': 'hello world'})
assert response.status_code == 200
key = ProjectKey.objects.get(id=key.id)
assert key.label == 'hello world'
class DeleteProjectKeTest(APITestCase):
def test_simple(self):
project = self.create_project()
self.login_as(user=self.user)
key = ProjectKey.objects.get_or_create(project=project)[0]
url = reverse('sentry-api-0-project-key-details', kwargs={
'organization_slug': project.organization.slug,
'project_slug': project.slug,
'key_id': key.public_key,
})
resp = self.client.delete(url)
assert resp.status_code == 204, resp.content
assert not ProjectKey.objects.filter(id=key.id).exists()
| bsd-3-clause |
hyowon/servo | tests/wpt/css-tests/tools/html5lib/html5lib/tests/test_parser.py | 451 | 3612 | from __future__ import absolute_import, division, unicode_literals
import os
import sys
import traceback
import warnings
import re
warnings.simplefilter("error")
from .support import get_data_files
from .support import TestData, convert, convertExpected, treeTypes
from html5lib import html5parser, constants
# Run the parse error checks
checkParseErrors = False
# XXX - There should just be one function here but for some reason the testcase
# format differs from the treedump format by a single space character
def convertTreeDump(data):
return "\n".join(convert(3)(data).split("\n")[1:])
namespaceExpected = re.compile(r"^(\s*)<(\S+)>", re.M).sub
def runParserTest(innerHTML, input, expected, errors, treeClass,
namespaceHTMLElements):
with warnings.catch_warnings(record=True) as caughtWarnings:
warnings.simplefilter("always")
p = html5parser.HTMLParser(tree=treeClass,
namespaceHTMLElements=namespaceHTMLElements)
try:
if innerHTML:
document = p.parseFragment(input, innerHTML)
else:
document = p.parse(input)
except:
errorMsg = "\n".join(["\n\nInput:", input, "\nExpected:", expected,
"\nTraceback:", traceback.format_exc()])
assert False, errorMsg
otherWarnings = [x for x in caughtWarnings
if not issubclass(x.category, constants.DataLossWarning)]
assert len(otherWarnings) == 0, [(x.category, x.message) for x in otherWarnings]
if len(caughtWarnings):
return
output = convertTreeDump(p.tree.testSerializer(document))
expected = convertExpected(expected)
if namespaceHTMLElements:
expected = namespaceExpected(r"\1<html \2>", expected)
errorMsg = "\n".join(["\n\nInput:", input, "\nExpected:", expected,
"\nReceived:", output])
assert expected == output, errorMsg
errStr = []
for (line, col), errorcode, datavars in p.errors:
assert isinstance(datavars, dict), "%s, %s" % (errorcode, repr(datavars))
errStr.append("Line: %i Col: %i %s" % (line, col,
constants.E[errorcode] % datavars))
errorMsg2 = "\n".join(["\n\nInput:", input,
"\nExpected errors (" + str(len(errors)) + "):\n" + "\n".join(errors),
"\nActual errors (" + str(len(p.errors)) + "):\n" + "\n".join(errStr)])
if checkParseErrors:
assert len(p.errors) == len(errors), errorMsg2
def test_parser():
sys.stderr.write('Testing tree builders ' + " ".join(list(treeTypes.keys())) + "\n")
files = get_data_files('tree-construction')
for filename in files:
testName = os.path.basename(filename).replace(".dat", "")
if testName in ("template",):
continue
tests = TestData(filename, "data")
for index, test in enumerate(tests):
input, errors, innerHTML, expected = [test[key] for key in
('data', 'errors',
'document-fragment',
'document')]
if errors:
errors = errors.split("\n")
for treeName, treeCls in treeTypes.items():
for namespaceHTMLElements in (True, False):
yield (runParserTest, innerHTML, input, expected, errors, treeCls,
namespaceHTMLElements)
| mpl-2.0 |
wimberosa/samba | source4/scripting/python/samba/netcmd/drs.py | 1 | 20147 | #!/usr/bin/env python
#
# implement samba_tool drs commands
#
# Copyright Andrew Tridgell 2010
# Copyright Giampaolo Lauria 2011 <lauria2@yahoo.com>
#
# based on C implementation by Kamen Mazdrashki <kamen.mazdrashki@postpath.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import samba.getopt as options
import ldb
from samba.auth import system_session
from samba.netcmd import (
Command,
CommandError,
Option,
SuperCommand,
)
from samba.samdb import SamDB
from samba import drs_utils, nttime2string, dsdb
from samba.dcerpc import drsuapi, misc
import common
def drsuapi_connect(ctx):
'''make a DRSUAPI connection to the server'''
try:
(ctx.drsuapi, ctx.drsuapi_handle, ctx.bind_supported_extensions) = drs_utils.drsuapi_connect(ctx.server, ctx.lp, ctx.creds)
except Exception, e:
raise CommandError("DRS connection to %s failed" % ctx.server, e)
def samdb_connect(ctx):
'''make a ldap connection to the server'''
try:
ctx.samdb = SamDB(url="ldap://%s" % ctx.server,
session_info=system_session(),
credentials=ctx.creds, lp=ctx.lp)
except Exception, e:
raise CommandError("LDAP connection to %s failed" % ctx.server, e)
def drs_errmsg(werr):
'''return "was successful" or an error string'''
(ecode, estring) = werr
if ecode == 0:
return "was successful"
return "failed, result %u (%s)" % (ecode, estring)
def attr_default(msg, attrname, default):
'''get an attribute from a ldap msg with a default'''
if attrname in msg:
return msg[attrname][0]
return default
def drs_parse_ntds_dn(ntds_dn):
'''parse a NTDS DN returning a site and server'''
a = ntds_dn.split(',')
if a[0] != "CN=NTDS Settings" or a[2] != "CN=Servers" or a[4] != 'CN=Sites':
raise RuntimeError("bad NTDS DN %s" % ntds_dn)
server = a[1].split('=')[1]
site = a[3].split('=')[1]
return (site, server)
class cmd_drs_showrepl(Command):
"""show replication status"""
synopsis = "%prog [<DC>] [options]"
takes_args = ["DC?"]
def print_neighbour(self, n):
'''print one set of neighbour information'''
self.message("%s" % n.naming_context_dn)
try:
(site, server) = drs_parse_ntds_dn(n.source_dsa_obj_dn)
self.message("\t%s\%s via RPC" % (site, server))
except RuntimeError:
self.message("\tNTDS DN: %s" % n.source_dsa_obj_dn)
self.message("\t\tDSA object GUID: %s" % n.source_dsa_obj_guid)
self.message("\t\tLast attempt @ %s %s" % (nttime2string(n.last_attempt),
drs_errmsg(n.result_last_attempt)))
self.message("\t\t%u consecutive failure(s)." % n.consecutive_sync_failures)
self.message("\t\tLast success @ %s" % nttime2string(n.last_success))
self.message("")
def drsuapi_ReplicaInfo(ctx, info_type):
'''call a DsReplicaInfo'''
req1 = drsuapi.DsReplicaGetInfoRequest1()
req1.info_type = info_type
try:
(info_type, info) = ctx.drsuapi.DsReplicaGetInfo(ctx.drsuapi_handle, 1, req1)
except Exception, e:
raise CommandError("DsReplicaGetInfo of type %u failed" % info_type, e)
return (info_type, info)
def run(self, DC=None, sambaopts=None,
credopts=None, versionopts=None, server=None):
self.lp = sambaopts.get_loadparm()
if DC is None:
DC = common.netcmd_dnsname(self.lp)
self.server = DC
self.creds = credopts.get_credentials(self.lp, fallback_machine=True)
drsuapi_connect(self)
samdb_connect(self)
# show domain information
ntds_dn = self.samdb.get_dsServiceName()
server_dns = self.samdb.search(base="", scope=ldb.SCOPE_BASE, attrs=["dnsHostName"])[0]['dnsHostName'][0]
(site, server) = drs_parse_ntds_dn(ntds_dn)
try:
ntds = self.samdb.search(base=ntds_dn, scope=ldb.SCOPE_BASE, attrs=['options', 'objectGUID', 'invocationId'])
except Exception, e:
raise CommandError("Failed to search NTDS DN %s" % ntds_dn)
conn = self.samdb.search(base=ntds_dn, expression="(objectClass=nTDSConnection)")
self.message("%s\\%s" % (site, server))
self.message("DSA Options: 0x%08x" % int(attr_default(ntds[0], "options", 0)))
self.message("DSA object GUID: %s" % self.samdb.schema_format_value("objectGUID", ntds[0]["objectGUID"][0]))
self.message("DSA invocationId: %s\n" % self.samdb.schema_format_value("objectGUID", ntds[0]["invocationId"][0]))
self.message("==== INBOUND NEIGHBORS ====\n")
(info_type, info) = self.drsuapi_ReplicaInfo(drsuapi.DRSUAPI_DS_REPLICA_INFO_NEIGHBORS)
for n in info.array:
self.print_neighbour(n)
self.message("==== OUTBOUND NEIGHBORS ====\n")
(info_type, info) = self.drsuapi_ReplicaInfo(drsuapi.DRSUAPI_DS_REPLICA_INFO_REPSTO)
for n in info.array:
self.print_neighbour(n)
reasons = ['NTDSCONN_KCC_GC_TOPOLOGY',
'NTDSCONN_KCC_RING_TOPOLOGY',
'NTDSCONN_KCC_MINIMIZE_HOPS_TOPOLOGY',
'NTDSCONN_KCC_STALE_SERVERS_TOPOLOGY',
'NTDSCONN_KCC_OSCILLATING_CONNECTION_TOPOLOGY',
'NTDSCONN_KCC_INTERSITE_GC_TOPOLOGY',
'NTDSCONN_KCC_INTERSITE_TOPOLOGY',
'NTDSCONN_KCC_SERVER_FAILOVER_TOPOLOGY',
'NTDSCONN_KCC_SITE_FAILOVER_TOPOLOGY',
'NTDSCONN_KCC_REDUNDANT_SERVER_TOPOLOGY']
self.message("==== KCC CONNECTION OBJECTS ====\n")
for c in conn:
self.message("Connection --")
self.message("\tConnection name: %s" % c['name'][0])
self.message("\tEnabled : %s" % attr_default(c, 'enabledConnection', 'TRUE'))
self.message("\tServer DNS name : %s" % server_dns)
self.message("\tServer DN name : %s" % c['fromServer'][0])
self.message("\t\tTransportType: RPC")
self.message("\t\toptions: 0x%08X" % int(attr_default(c, 'options', 0)))
if not 'mS-DS-ReplicatesNCReason' in c:
self.message("Warning: No NC replicated for Connection!")
continue
for r in c['mS-DS-ReplicatesNCReason']:
a = str(r).split(':')
self.message("\t\tReplicatesNC: %s" % a[3])
self.message("\t\tReason: 0x%08x" % int(a[2]))
for s in reasons:
if getattr(dsdb, s, 0) & int(a[2]):
self.message("\t\t\t%s" % s)
class cmd_drs_kcc(Command):
"""trigger knowledge consistency center run"""
synopsis = "%prog [<DC>] [options]"
takes_args = ["DC?"]
def run(self, DC=None, sambaopts=None,
credopts=None, versionopts=None, server=None):
self.lp = sambaopts.get_loadparm()
if DC is None:
DC = common.netcmd_dnsname(self.lp)
self.server = DC
self.creds = credopts.get_credentials(self.lp, fallback_machine=True)
drsuapi_connect(self)
req1 = drsuapi.DsExecuteKCC1()
try:
self.drsuapi.DsExecuteKCC(self.drsuapi_handle, 1, req1)
except Exception, e:
raise CommandError("DsExecuteKCC failed", e)
self.message("Consistency check on %s successful." % DC)
def drs_local_replicate(self, SOURCE_DC, NC):
'''replicate from a source DC to the local SAM'''
self.server = SOURCE_DC
drsuapi_connect(self)
self.local_samdb = SamDB(session_info=system_session(), url=None,
credentials=self.creds, lp=self.lp)
self.samdb = SamDB(url="ldap://%s" % self.server,
session_info=system_session(),
credentials=self.creds, lp=self.lp)
# work out the source and destination GUIDs
res = self.local_samdb.search(base="", scope=ldb.SCOPE_BASE, attrs=["dsServiceName"])
self.ntds_dn = res[0]["dsServiceName"][0]
res = self.local_samdb.search(base=self.ntds_dn, scope=ldb.SCOPE_BASE, attrs=["objectGUID"])
self.ntds_guid = misc.GUID(self.samdb.schema_format_value("objectGUID", res[0]["objectGUID"][0]))
source_dsa_invocation_id = misc.GUID(self.samdb.get_invocation_id())
destination_dsa_guid = self.ntds_guid
self.samdb.transaction_start()
repl = drs_utils.drs_Replicate("ncacn_ip_tcp:%s[seal]" % self.server, self.lp,
self.creds, self.local_samdb)
try:
repl.replicate(NC, source_dsa_invocation_id, destination_dsa_guid)
except Exception, e:
raise CommandError("Error replicating DN %s" % NC, e)
self.samdb.transaction_commit()
class cmd_drs_replicate(Command):
"""replicate a naming context between two DCs"""
synopsis = "%prog <destinationDC> <sourceDC> <NC> [options]"
takes_args = ["DEST_DC", "SOURCE_DC", "NC"]
takes_options = [
Option("--add-ref", help="use ADD_REF to add to repsTo on source", action="store_true"),
Option("--sync-forced", help="use SYNC_FORCED to force inbound replication", action="store_true"),
Option("--sync-all", help="use SYNC_ALL to replicate from all DCs", action="store_true"),
Option("--full-sync", help="resync all objects", action="store_true"),
Option("--local", help="pull changes directly into the local database (destination DC is ignored)", action="store_true"),
]
def run(self, DEST_DC, SOURCE_DC, NC,
add_ref=False, sync_forced=False, sync_all=False, full_sync=False,
local=False, sambaopts=None, credopts=None, versionopts=None, server=None):
self.server = DEST_DC
self.lp = sambaopts.get_loadparm()
self.creds = credopts.get_credentials(self.lp, fallback_machine=True)
if local:
drs_local_replicate(self, SOURCE_DC, NC)
return
drsuapi_connect(self)
samdb_connect(self)
# we need to find the NTDS GUID of the source DC
msg = self.samdb.search(base=self.samdb.get_config_basedn(),
expression="(&(objectCategory=server)(|(name=%s)(dNSHostName=%s)))" % (
ldb.binary_encode(SOURCE_DC),
ldb.binary_encode(SOURCE_DC)),
attrs=[])
if len(msg) == 0:
raise CommandError("Failed to find source DC %s" % SOURCE_DC)
server_dn = msg[0]['dn']
msg = self.samdb.search(base=server_dn, scope=ldb.SCOPE_ONELEVEL,
expression="(|(objectCategory=nTDSDSA)(objectCategory=nTDSDSARO))",
attrs=['objectGUID', 'options'])
if len(msg) == 0:
raise CommandError("Failed to find source NTDS DN %s" % SOURCE_DC)
source_dsa_guid = msg[0]['objectGUID'][0]
dsa_options = int(attr_default(msg, 'options', 0))
req_options = 0
if not (dsa_options & dsdb.DS_NTDSDSA_OPT_DISABLE_OUTBOUND_REPL):
req_options |= drsuapi.DRSUAPI_DRS_WRIT_REP
if add_ref:
req_options |= drsuapi.DRSUAPI_DRS_ADD_REF
if sync_forced:
req_options |= drsuapi.DRSUAPI_DRS_SYNC_FORCED
if sync_all:
req_options |= drsuapi.DRSUAPI_DRS_SYNC_ALL
if full_sync:
req_options |= drsuapi.DRSUAPI_DRS_FULL_SYNC_NOW
try:
drs_utils.sendDsReplicaSync(self.drsuapi, self.drsuapi_handle, source_dsa_guid, NC, req_options)
except drs_utils.drsException, estr:
raise CommandError("DsReplicaSync failed", estr)
self.message("Replicate from %s to %s was successful." % (SOURCE_DC, DEST_DC))
class cmd_drs_bind(Command):
"""show DRS capabilities of a server"""
synopsis = "%prog [<DC>] [options]"
takes_args = ["DC?"]
def run(self, DC=None, sambaopts=None,
credopts=None, versionopts=None, server=None):
self.lp = sambaopts.get_loadparm()
if DC is None:
DC = common.netcmd_dnsname(self.lp)
self.server = DC
self.creds = credopts.get_credentials(self.lp, fallback_machine=True)
drsuapi_connect(self)
samdb_connect(self)
bind_info = drsuapi.DsBindInfoCtr()
bind_info.length = 28
bind_info.info = drsuapi.DsBindInfo28()
(info, handle) = self.drsuapi.DsBind(misc.GUID(drsuapi.DRSUAPI_DS_BIND_GUID), bind_info)
optmap = [
("DRSUAPI_SUPPORTED_EXTENSION_BASE", "DRS_EXT_BASE"),
("DRSUAPI_SUPPORTED_EXTENSION_ASYNC_REPLICATION", "DRS_EXT_ASYNCREPL"),
("DRSUAPI_SUPPORTED_EXTENSION_REMOVEAPI", "DRS_EXT_REMOVEAPI"),
("DRSUAPI_SUPPORTED_EXTENSION_MOVEREQ_V2", "DRS_EXT_MOVEREQ_V2"),
("DRSUAPI_SUPPORTED_EXTENSION_GETCHG_COMPRESS", "DRS_EXT_GETCHG_DEFLATE"),
("DRSUAPI_SUPPORTED_EXTENSION_DCINFO_V1", "DRS_EXT_DCINFO_V1"),
("DRSUAPI_SUPPORTED_EXTENSION_RESTORE_USN_OPTIMIZATION", "DRS_EXT_RESTORE_USN_OPTIMIZATION"),
("DRSUAPI_SUPPORTED_EXTENSION_ADDENTRY", "DRS_EXT_ADDENTRY"),
("DRSUAPI_SUPPORTED_EXTENSION_KCC_EXECUTE", "DRS_EXT_KCC_EXECUTE"),
("DRSUAPI_SUPPORTED_EXTENSION_ADDENTRY_V2", "DRS_EXT_ADDENTRY_V2"),
("DRSUAPI_SUPPORTED_EXTENSION_LINKED_VALUE_REPLICATION", "DRS_EXT_LINKED_VALUE_REPLICATION"),
("DRSUAPI_SUPPORTED_EXTENSION_DCINFO_V2", "DRS_EXT_DCINFO_V2"),
("DRSUAPI_SUPPORTED_EXTENSION_INSTANCE_TYPE_NOT_REQ_ON_MOD","DRS_EXT_INSTANCE_TYPE_NOT_REQ_ON_MOD"),
("DRSUAPI_SUPPORTED_EXTENSION_CRYPTO_BIND", "DRS_EXT_CRYPTO_BIND"),
("DRSUAPI_SUPPORTED_EXTENSION_GET_REPL_INFO", "DRS_EXT_GET_REPL_INFO"),
("DRSUAPI_SUPPORTED_EXTENSION_STRONG_ENCRYPTION", "DRS_EXT_STRONG_ENCRYPTION"),
("DRSUAPI_SUPPORTED_EXTENSION_DCINFO_V01", "DRS_EXT_DCINFO_VFFFFFFFF"),
("DRSUAPI_SUPPORTED_EXTENSION_TRANSITIVE_MEMBERSHIP", "DRS_EXT_TRANSITIVE_MEMBERSHIP"),
("DRSUAPI_SUPPORTED_EXTENSION_ADD_SID_HISTORY", "DRS_EXT_ADD_SID_HISTORY"),
("DRSUAPI_SUPPORTED_EXTENSION_POST_BETA3", "DRS_EXT_POST_BETA3"),
("DRSUAPI_SUPPORTED_EXTENSION_GETCHGREQ_V5", "DRS_EXT_GETCHGREQ_V5"),
("DRSUAPI_SUPPORTED_EXTENSION_GET_MEMBERSHIPS2", "DRS_EXT_GETMEMBERSHIPS2"),
("DRSUAPI_SUPPORTED_EXTENSION_GETCHGREQ_V6", "DRS_EXT_GETCHGREQ_V6"),
("DRSUAPI_SUPPORTED_EXTENSION_NONDOMAIN_NCS", "DRS_EXT_NONDOMAIN_NCS"),
("DRSUAPI_SUPPORTED_EXTENSION_GETCHGREQ_V8", "DRS_EXT_GETCHGREQ_V8"),
("DRSUAPI_SUPPORTED_EXTENSION_GETCHGREPLY_V5", "DRS_EXT_GETCHGREPLY_V5"),
("DRSUAPI_SUPPORTED_EXTENSION_GETCHGREPLY_V6", "DRS_EXT_GETCHGREPLY_V6"),
("DRSUAPI_SUPPORTED_EXTENSION_ADDENTRYREPLY_V3", "DRS_EXT_WHISTLER_BETA3"),
("DRSUAPI_SUPPORTED_EXTENSION_GETCHGREPLY_V7", "DRS_EXT_WHISTLER_BETA3"),
("DRSUAPI_SUPPORTED_EXTENSION_VERIFY_OBJECT", "DRS_EXT_WHISTLER_BETA3"),
("DRSUAPI_SUPPORTED_EXTENSION_XPRESS_COMPRESS", "DRS_EXT_W2K3_DEFLATE"),
("DRSUAPI_SUPPORTED_EXTENSION_GETCHGREQ_V10", "DRS_EXT_GETCHGREQ_V10"),
("DRSUAPI_SUPPORTED_EXTENSION_RESERVED_PART2", "DRS_EXT_RESERVED_FOR_WIN2K_OR_DOTNET_PART2"),
("DRSUAPI_SUPPORTED_EXTENSION_RESERVED_PART3", "DRS_EXT_RESERVED_FOR_WIN2K_OR_DOTNET_PART3")
]
optmap_ext = [
("DRSUAPI_SUPPORTED_EXTENSION_ADAM", "DRS_EXT_ADAM"),
("DRSUAPI_SUPPORTED_EXTENSION_LH_BETA2", "DRS_EXT_LH_BETA2"),
("DRSUAPI_SUPPORTED_EXTENSION_RECYCLE_BIN", "DRS_EXT_RECYCLE_BIN")]
self.message("Bind to %s succeeded." % DC)
self.message("Extensions supported:")
for (opt, str) in optmap:
optval = getattr(drsuapi, opt, 0)
if info.info.supported_extensions & optval:
yesno = "Yes"
else:
yesno = "No "
self.message(" %-60s: %s (%s)" % (opt, yesno, str))
if isinstance(info.info, drsuapi.DsBindInfo48):
self.message("\nExtended Extensions supported:")
for (opt, str) in optmap_ext:
optval = getattr(drsuapi, opt, 0)
if info.info.supported_extensions_ext & optval:
yesno = "Yes"
else:
yesno = "No "
self.message(" %-60s: %s (%s)" % (opt, yesno, str))
self.message("\nSite GUID: %s" % info.info.site_guid)
self.message("Repl epoch: %u" % info.info.repl_epoch)
if isinstance(info.info, drsuapi.DsBindInfo48):
self.message("Forest GUID: %s" % info.info.config_dn_guid)
class cmd_drs_options(Command):
"""query or change 'options' for NTDS Settings object of a domain controller"""
synopsis = "%prog [<DC>] [options]"
takes_args = ["DC?"]
takes_options = [
Option("--dsa-option", help="DSA option to enable/disable", type="str",
metavar="{+|-}IS_GC | {+|-}DISABLE_INBOUND_REPL | {+|-}DISABLE_OUTBOUND_REPL | {+|-}DISABLE_NTDSCONN_XLATE" ),
]
option_map = {"IS_GC": 0x00000001,
"DISABLE_INBOUND_REPL": 0x00000002,
"DISABLE_OUTBOUND_REPL": 0x00000004,
"DISABLE_NTDSCONN_XLATE": 0x00000008}
def run(self, DC=None, dsa_option=None,
sambaopts=None, credopts=None, versionopts=None):
self.lp = sambaopts.get_loadparm()
if DC is None:
DC = common.netcmd_dnsname(self.lp)
self.server = DC
self.creds = credopts.get_credentials(self.lp, fallback_machine=True)
samdb_connect(self)
ntds_dn = self.samdb.get_dsServiceName()
res = self.samdb.search(base=ntds_dn, scope=ldb.SCOPE_BASE, attrs=["options"])
dsa_opts = int(res[0]["options"][0])
# print out current DSA options
cur_opts = [x for x in self.option_map if self.option_map[x] & dsa_opts]
self.message("Current DSA options: " + ", ".join(cur_opts))
# modify options
if dsa_option:
if dsa_option[:1] not in ("+", "-"):
raise CommandError("Unknown option %s" % dsa_option)
flag = dsa_option[1:]
if flag not in self.option_map.keys():
raise CommandError("Unknown option %s" % dsa_option)
if dsa_option[:1] == "+":
dsa_opts |= self.option_map[flag]
else:
dsa_opts &= ~self.option_map[flag]
#save new options
m = ldb.Message()
m.dn = ldb.Dn(self.samdb, ntds_dn)
m["options"]= ldb.MessageElement(str(dsa_opts), ldb.FLAG_MOD_REPLACE, "options")
self.samdb.modify(m)
# print out new DSA options
cur_opts = [x for x in self.option_map if self.option_map[x] & dsa_opts]
self.message("New DSA options: " + ", ".join(cur_opts))
class cmd_drs(SuperCommand):
"""Directory Replication Services (DRS) management"""
subcommands = {}
subcommands["bind"] = cmd_drs_bind()
subcommands["kcc"] = cmd_drs_kcc()
subcommands["replicate"] = cmd_drs_replicate()
subcommands["showrepl"] = cmd_drs_showrepl()
subcommands["options"] = cmd_drs_options()
| gpl-3.0 |
tensorflow/probability | tensorflow_probability/python/math/psd_kernels/polynomial.py | 1 | 10635 | # Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Polynomial and Linear kernel."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.internal import assert_util
from tensorflow_probability.python.internal import tensor_util
from tensorflow_probability.python.math.psd_kernels import positive_semidefinite_kernel as psd_kernel
from tensorflow_probability.python.math.psd_kernels.internal import util
__all__ = [
'Linear',
'Polynomial',
]
def _maybe_shape_static(tensor):
if tensor is None:
return tf.TensorShape([])
return tensor.shape
def _maybe_shape_dynamic(tensor):
if tensor is None:
return []
return tf.shape(tensor)
@psd_kernel.auto_composite_tensor_psd_kernel
class Polynomial(psd_kernel.AutoCompositeTensorPsdKernel):
"""Polynomial Kernel.
Is based on the dot product covariance function and can be obtained
from polynomial regression. This kernel, when parameterizing a
Gaussian Process, results in random polynomial functions.
A linear kernel can be created from this by setting the exponent to 1
or None.
```none
k(x, y) = bias_variance**2 + slope_variance**2 *
((x - shift) dot (y - shift))**exponent
```
#### References
[1]: Carl Edward Rasmussen and Christopher K. I. Williams. Gaussian
Processes for Machine Learning. Section 4.4.2. 2006.
http://www.gaussianprocess.org/gpml/chapters/RW4.pdf
[2]: David Duvenaud. The Kernel Cookbook.
https://www.cs.toronto.edu/~duvenaud/cookbook/
"""
def __init__(self,
bias_variance=None,
slope_variance=None,
shift=None,
exponent=None,
feature_ndims=1,
validate_args=False,
parameters=None,
name='Polynomial'):
"""Construct a Polynomial kernel instance.
Args:
bias_variance: Positive floating point `Tensor` that controls the
variance from the origin. If bias = 0, there is no variance and the
fitted function goes through the origin. Must be broadcastable with
`slope_variance`, `shift`, `exponent`, and inputs to `apply` and
`matrix` methods. A value of `None` is treated like 0.
Default Value: `None`
slope_variance: Positive floating point `Tensor` that controls the
variance of the regression line slope that is the basis for the
polynomial. Must be broadcastable with `bias_variance`, `shift`,
`exponent`, and inputs to `apply` and `matrix` methods. A value of
`None` is treated like 1.
Default Value: `None`
shift: Floating point `Tensor` that contols the intercept with the
x-axis of the linear function to be exponentiated to get this
polynomial. Must be broadcastable with `bias_variance`,
`slope_variance`, `exponent` and inputs to `apply` and `matrix`
methods. A value of `None` is treated like 0, which results in having
the intercept at the origin.
Default Value: `None`
exponent: Positive floating point `Tensor` that controls the exponent
(also known as the degree) of the polynomial function. Must be
broadcastable with `bias_variance`, `slope_variance`, `shift`,
and inputs to `apply` and `matrix` methods. A value of `None` is
treated like 1, which results in a linear kernel.
Default Value: `None`
feature_ndims: Python `int` number of rightmost dims to include in
kernel computation.
Default Value: 1
validate_args: If `True`, parameters are checked for validity despite
possibly degrading runtime performance.
Default Value: `False`
parameters: For subclasses, a dict of constructor arguments.
name: Python `str` name prefixed to Ops created by this class.
Default Value: `'Polynomial'`
"""
parameters = dict(locals()) if parameters is None else parameters
with tf.name_scope(name):
dtype = util.maybe_get_common_dtype(
[bias_variance, slope_variance, shift, exponent])
self._bias_variance = tensor_util.convert_nonref_to_tensor(
bias_variance, name='bias_variance', dtype=dtype)
self._slope_variance = tensor_util.convert_nonref_to_tensor(
slope_variance, name='slope_variance', dtype=dtype)
self._shift = tensor_util.convert_nonref_to_tensor(
shift, name='shift', dtype=dtype)
self._exponent = tensor_util.convert_nonref_to_tensor(
exponent, name='exponent', dtype=dtype)
super(Polynomial, self).__init__(
feature_ndims,
dtype=dtype,
name=name,
validate_args=validate_args,
parameters=parameters)
@property
def bias_variance(self):
"""Variance on bias parameter."""
return self._bias_variance
@property
def slope_variance(self):
"""Variance on slope parameter."""
return self._slope_variance
@property
def shift(self):
"""Shift of linear function that is exponentiated."""
return self._shift
@property
def exponent(self):
"""Exponent of the polynomial term."""
return self._exponent
def _batch_shape(self):
return functools.reduce(
tf.broadcast_static_shape,
map(_maybe_shape_static, [self.slope_variance, self.bias_variance,
self.shift, self.exponent]))
def _batch_shape_tensor(self):
return functools.reduce(
tf.broadcast_dynamic_shape,
map(_maybe_shape_dynamic, [self.slope_variance, self.bias_variance,
self.shift, self.exponent]))
def _apply(self, x1, x2, example_ndims=0):
if self.shift is None:
dot_prod = util.sum_rightmost_ndims_preserving_shape(
x1 * x2, ndims=self.feature_ndims)
else:
shift = tf.convert_to_tensor(self.shift)
shift = util.pad_shape_with_ones(
shift, example_ndims + self.feature_ndims)
dot_prod = util.sum_rightmost_ndims_preserving_shape(
(x1 - shift) * (x2 - shift),
ndims=self.feature_ndims)
if self.exponent is not None:
exponent = tf.convert_to_tensor(self.exponent)
exponent = util.pad_shape_with_ones(
exponent, example_ndims)
dot_prod **= exponent
if self.slope_variance is not None:
slope_variance = tf.convert_to_tensor(self.slope_variance)
slope_variance = util.pad_shape_with_ones(
slope_variance, example_ndims)
dot_prod *= slope_variance ** 2.
if self.bias_variance is not None:
bias_variance = tf.convert_to_tensor(self.bias_variance)
bias_variance = util.pad_shape_with_ones(
bias_variance, example_ndims)
dot_prod += bias_variance ** 2.
return dot_prod
def _parameter_control_dependencies(self, is_init):
if not self.validate_args:
return []
assertions = []
for arg_name, arg in dict(bias_variance=self.bias_variance,
slope_variance=self.slope_variance,
exponent=self.exponent).items():
if arg is not None and is_init != tensor_util.is_ref(arg):
assertions.append(assert_util.assert_positive(
arg,
message='{} must be positive.'.format(arg_name)))
return assertions
@psd_kernel.auto_composite_tensor_psd_kernel
class Linear(Polynomial):
"""Linear Kernel.
Is based on the dot product covariance function and can be obtained
from linear regression. This kernel, when parameterizing a
Gaussian Process, results in random linear functions.
The Linear kernel is based on the Polynomial kernel without the
exponent.
```none
k(x, y) = bias_variance**2 + slope_variance**2 *
((x - shift) dot (y - shift))
```
"""
def __init__(self,
bias_variance=None,
slope_variance=None,
shift=None,
feature_ndims=1,
validate_args=False,
name='Linear'):
"""Construct a Linear kernel instance.
Args:
bias_variance: Positive floating point `Tensor` that controls the
variance from the origin. If bias = 0, there is no variance and the
fitted function goes through the origin (also known as the homogeneous
linear kernel). Must be broadcastable with `slope_variance`,
`shift` and inputs to `apply` and `matrix` methods. A value of
`None` is treated like 0.
Default Value: `None`
slope_variance: Positive floating point `Tensor` that controls the
variance of the regression line slope. Must be broadcastable with
`bias_variance`, `shift`, and inputs to `apply` and `matrix`
methods. A value of `None` is treated like 1.
Default Value: `None`
shift: Floating point `Tensor` that controls the intercept with the
x-axis of the linear interpolation. Must be broadcastable with
`bias_variance`, `slope_variance`, and inputs to `apply` and `matrix`
methods. A value of `None` is treated like 0, which results in having
the intercept at the origin.
feature_ndims: Python `int` number of rightmost dims to include in
kernel computation.
Default Value: 1
validate_args: If `True`, parameters are checked for validity despite
possibly degrading runtime performance.
Default Value: `False`
name: Python `str` name prefixed to Ops created by this class.
Default Value: `'Linear'`
"""
parameters = dict(locals())
super(Linear, self).__init__(
bias_variance=bias_variance,
slope_variance=slope_variance,
shift=shift,
exponent=None,
feature_ndims=feature_ndims,
validate_args=validate_args,
parameters=parameters,
name=name)
| apache-2.0 |
tectronics/pywo | tests/actions/moveresize_test.py | 6 | 9023 | #!/usr/bin/env python
import unittest
import sys
sys.path.insert(0, '../')
sys.path.insert(0, './')
from tests.common_test import MockedXlibTests
from tests.common_test import DESKTOP_WIDTH, DESKTOP_HEIGHT
from tests.common_test import WIN_WIDTH, WIN_HEIGHT
from pywo import actions, core
TOP_LEFT = core.Gravity.parse('NW')
TOP = core.Gravity.parse('N')
TOP_RIGHT = core.Gravity.parse('NE')
LEFT = core.Gravity.parse('W')
MIDDLE = core.Gravity.parse('MIDDLE')
RIGHT = core.Gravity.parse('E')
BOTTOM_LEFT = core.Gravity.parse('SW')
BOTTOM = core.Gravity.parse('S')
BOTTOM_RIGHT = core.Gravity.parse('SE')
class MoveresizeActionsTests(MockedXlibTests):
def get_geometry(self, x, y):
return core.Geometry(x, y, WIN_WIDTH, WIN_HEIGHT)
class PutActionTests(MoveresizeActionsTests):
def setUp(self):
MoveresizeActionsTests.setUp(self)
self.action = actions.manager.get('put')
def test_position_top_left(self):
self.action(self.win, position=TOP_LEFT)
geometry = self.get_geometry(0, 0)
self.assertEqual(self.win.geometry, geometry)
def test_position_top(self):
self.action(self.win, position=TOP)
geometry = self.get_geometry(DESKTOP_WIDTH/2-WIN_WIDTH/2, 0)
self.assertEqual(self.win.geometry, geometry)
def test_position_top_right(self):
self.action(self.win, position=TOP_RIGHT)
geometry = self.get_geometry(DESKTOP_WIDTH-WIN_WIDTH, 0)
self.assertEqual(self.win.geometry, geometry)
def test_position_left(self):
self.action(self.win, position=LEFT)
geometry = self.get_geometry(0, DESKTOP_HEIGHT/2-WIN_HEIGHT/2)
self.assertEqual(self.win.geometry, geometry)
def test_position_middle(self):
self.action(self.win, position=MIDDLE)
geometry = self.get_geometry(DESKTOP_WIDTH/2-WIN_WIDTH/2,
DESKTOP_HEIGHT/2-WIN_HEIGHT/2)
self.assertEqual(self.win.geometry, geometry)
def test_position_right(self):
self.action(self.win, position=RIGHT)
geometry = self.get_geometry(DESKTOP_WIDTH-WIN_WIDTH,
DESKTOP_HEIGHT/2-WIN_HEIGHT/2)
self.assertEqual(self.win.geometry, geometry)
def test_position_bottom_left(self):
self.action(self.win, position=BOTTOM_LEFT)
geometry = self.get_geometry(0, DESKTOP_HEIGHT-WIN_HEIGHT)
self.assertEqual(self.win.geometry, geometry)
def test_position_bottom(self):
self.action(self.win, position=BOTTOM)
geometry = self.get_geometry(DESKTOP_WIDTH/2-WIN_WIDTH/2,
DESKTOP_HEIGHT-WIN_HEIGHT)
self.assertEqual(self.win.geometry, geometry)
def test_position_bottom_right(self):
self.action(self.win, position=BOTTOM_RIGHT)
geometry = self.get_geometry(DESKTOP_WIDTH-WIN_WIDTH,
DESKTOP_HEIGHT-WIN_HEIGHT)
self.assertEqual(self.win.geometry, geometry)
def test_position_gravity_top_left(self):
self.action(self.win, position=MIDDLE, gravity=TOP_LEFT)
geometry = self.get_geometry(DESKTOP_WIDTH/2,
DESKTOP_HEIGHT/2)
self.assertEqual(self.win.geometry, geometry)
def test_position_gravity_top(self):
self.action(self.win, position=MIDDLE, gravity=TOP)
geometry = self.get_geometry(DESKTOP_WIDTH/2-WIN_WIDTH/2,
DESKTOP_HEIGHT/2)
self.assertEqual(self.win.geometry, geometry)
def test_position_gravity_top_right(self):
self.action(self.win, position=MIDDLE, gravity=TOP_RIGHT)
geometry = self.get_geometry(DESKTOP_WIDTH/2-WIN_WIDTH,
DESKTOP_HEIGHT/2)
self.assertEqual(self.win.geometry, geometry)
def test_position_gravity_left(self):
self.action(self.win, position=MIDDLE, gravity=LEFT)
geometry = self.get_geometry(DESKTOP_WIDTH/2,
DESKTOP_HEIGHT/2-WIN_HEIGHT/2)
self.assertEqual(self.win.geometry, geometry)
def test_position_gravity_middle(self):
self.action(self.win, position=MIDDLE, gravity=MIDDLE)
geometry = self.get_geometry(DESKTOP_WIDTH/2-WIN_WIDTH/2,
DESKTOP_HEIGHT/2-WIN_HEIGHT/2)
self.assertEqual(self.win.geometry, geometry)
def test_position_gravity_right(self):
self.action(self.win, position=MIDDLE, gravity=RIGHT)
geometry = self.get_geometry(DESKTOP_WIDTH/2-WIN_WIDTH,
DESKTOP_HEIGHT/2-WIN_HEIGHT/2)
self.assertEqual(self.win.geometry, geometry)
def test_position_gravity_bottom_left(self):
self.action(self.win, position=MIDDLE, gravity=BOTTOM_LEFT)
geometry = self.get_geometry(DESKTOP_WIDTH/2,
DESKTOP_HEIGHT/2-WIN_HEIGHT)
self.assertEqual(self.win.geometry, geometry)
def test_position_gravity_bottom(self):
self.action(self.win, position=MIDDLE, gravity=BOTTOM)
geometry = self.get_geometry(DESKTOP_WIDTH/2-WIN_WIDTH/2,
DESKTOP_HEIGHT/2-WIN_HEIGHT)
self.assertEqual(self.win.geometry, geometry)
def test_position_gravity_bottom_right(self):
self.action(self.win, position=MIDDLE, gravity=BOTTOM_RIGHT)
geometry = self.get_geometry(DESKTOP_WIDTH/2-WIN_WIDTH,
DESKTOP_HEIGHT/2-WIN_HEIGHT)
self.assertEqual(self.win.geometry, geometry)
def test_same_position_twice(self):
self.action = actions.manager.get('put')
geometry = self.get_geometry(DESKTOP_WIDTH/2-WIN_WIDTH/2,
DESKTOP_HEIGHT/2-WIN_HEIGHT/2)
self.action(self.win, position=MIDDLE)
self.assertEqual(self.win.geometry, geometry)
self.action(self.win, position=MIDDLE)
self.assertEqual(self.win.geometry, geometry)
class FloatActionTests(MoveresizeActionsTests):
def setUp(self):
MoveresizeActionsTests.setUp(self)
self.action = actions.manager.get('float')
# always start in the middle of the screen
self.action(self.win, direction=MIDDLE)
def test_empty_desktop_top_left(self):
self.action(self.win, direction=TOP_LEFT)
geometry = self.get_geometry(0, 0)
self.assertEqual(self.win.geometry, geometry)
def test_empty_desktop_top(self):
self.action(self.win, direction=TOP)
geometry = self.get_geometry(DESKTOP_WIDTH/2-WIN_WIDTH/2, 0)
self.assertEqual(self.win.geometry, geometry)
def test_empty_desktop_top_right(self):
self.action(self.win, direction=TOP_RIGHT)
geometry = self.get_geometry(DESKTOP_WIDTH-WIN_WIDTH, 0)
self.assertEqual(self.win.geometry, geometry)
def test_empty_desktop_left(self):
self.action(self.win, direction=LEFT)
geometry = self.get_geometry(0, DESKTOP_HEIGHT/2-WIN_HEIGHT/2)
self.assertEqual(self.win.geometry, geometry)
def test_empty_desktop_middle(self):
self.action(self.win, direction=MIDDLE)
geometry = self.get_geometry(DESKTOP_WIDTH/2-WIN_WIDTH/2,
DESKTOP_HEIGHT/2-WIN_HEIGHT/2)
self.assertEqual(self.win.geometry, geometry)
def test_empty_desktop_right(self):
self.action(self.win, direction=RIGHT)
geometry = self.get_geometry(DESKTOP_WIDTH-WIN_WIDTH,
DESKTOP_HEIGHT/2-WIN_HEIGHT/2)
self.assertEqual(self.win.geometry, geometry)
def test_empty_desktop_bottom_left(self):
self.action(self.win, direction=BOTTOM_LEFT)
geometry = self.get_geometry(0, DESKTOP_HEIGHT-WIN_HEIGHT)
self.assertEqual(self.win.geometry, geometry)
def test_empty_desktop_bottom(self):
self.action(self.win, direction=BOTTOM)
geometry = self.get_geometry(DESKTOP_WIDTH/2-WIN_WIDTH/2,
DESKTOP_HEIGHT-WIN_HEIGHT)
self.assertEqual(self.win.geometry, geometry)
def test_empty_desktop_bottom_right(self):
self.action(self.win, direction=BOTTOM_RIGHT)
geometry = self.get_geometry(DESKTOP_WIDTH-WIN_WIDTH,
DESKTOP_HEIGHT-WIN_HEIGHT)
self.assertEqual(self.win.geometry, geometry)
def test_same_direction_twice(self):
self.action(self.win, direction=LEFT)
geometry = self.get_geometry(0, DESKTOP_HEIGHT/2-WIN_HEIGHT/2)
self.action(self.win, direction=LEFT)
geometry = self.get_geometry(0, DESKTOP_HEIGHT/2-WIN_HEIGHT/2)
if __name__ == '__main__':
main_suite = unittest.TestSuite()
for suite in [PutActionTests,
FloatActionTests, ]:
main_suite.addTest(unittest.TestLoader().loadTestsFromTestCase(suite))
unittest.TextTestRunner(verbosity=2).run(main_suite)
| gpl-3.0 |
anaran/kuma | kuma/wiki/views/translate.py | 4 | 9949 | # -*- coding: utf-8 -*-
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist, PermissionDenied
from django.shortcuts import get_object_or_404, redirect, render
from django.utils.translation import ugettext_lazy as _
from jingo.helpers import urlparams
import kuma.wiki.content
from kuma.attachments.forms import AttachmentRevisionForm
from kuma.core.decorators import block_user_agents, login_required, never_cache
from kuma.core.i18n import get_language_mapping
from kuma.core.urlresolvers import reverse
from kuma.core.utils import get_object_or_none, smart_int
from ..decorators import (check_readonly, prevent_indexing,
process_document_path)
from ..forms import DocumentForm, RevisionForm
from ..models import Document, Revision
from .utils import document_form_initial, split_slug
@block_user_agents
@login_required
@process_document_path
def select_locale(request, document_slug, document_locale):
"""
Select a locale to translate the document to.
"""
doc = get_object_or_404(Document,
locale=document_locale,
slug=document_slug)
return render(request, 'wiki/select_locale.html', {'document': doc})
@block_user_agents
@login_required
@process_document_path
@check_readonly
@prevent_indexing
@never_cache
def translate(request, document_slug, document_locale, revision_id=None):
"""
Create a new translation of a wiki document.
* document_slug is for the default locale
* translation is to the request locale
"""
# TODO: Refactor this view into two views? (new, edit)
# That might help reduce the headache-inducing branchiness.
# The parent document to translate from
parent_doc = get_object_or_404(Document,
locale=settings.WIKI_DEFAULT_LANGUAGE,
slug=document_slug)
if not revision_id:
# HACK: Seems weird, but sticking the translate-to locale in a query
# param is the best way to avoid the MindTouch-legacy locale
# redirection logic.
document_locale = request.GET.get('tolocale',
document_locale)
# Set a "Discard Changes" page
discard_href = ''
if settings.WIKI_DEFAULT_LANGUAGE == document_locale:
# Don't translate to the default language.
return redirect(reverse(
'wiki.edit', locale=settings.WIKI_DEFAULT_LANGUAGE,
args=[parent_doc.slug]))
if not parent_doc.is_localizable:
message = _(u'You cannot translate this document.')
context = {'message': message}
return render(request, 'handlers/400.html', context, status=400)
if revision_id:
revision = get_object_or_404(Revision, pk=revision_id)
else:
revision = None
based_on_rev = parent_doc.current_or_latest_revision()
disclose_description = bool(request.GET.get('opendescription'))
try:
doc = parent_doc.translations.get(locale=document_locale)
slug_dict = split_slug(doc.slug)
except Document.DoesNotExist:
doc = None
disclose_description = True
slug_dict = split_slug(document_slug)
# Find the "real" parent topic, which is its translation
if parent_doc.parent_topic:
try:
parent_topic_translated_doc = (parent_doc.parent_topic
.translations
.get(locale=document_locale))
slug_dict = split_slug(parent_topic_translated_doc.slug +
'/' +
slug_dict['specific'])
except ObjectDoesNotExist:
pass
user_has_doc_perm = (not doc) or (doc and doc.allows_editing_by(request.user))
user_has_rev_perm = (not doc) or (doc and doc.allows_revision_by(request.user))
if not user_has_doc_perm and not user_has_rev_perm:
# User has no perms, bye.
raise PermissionDenied
doc_form = rev_form = None
if user_has_doc_perm:
if doc:
# If there's an existing doc, populate form from it.
discard_href = doc.get_absolute_url()
doc.slug = slug_dict['specific']
doc_initial = document_form_initial(doc)
else:
# If no existing doc, bring over the original title and slug.
discard_href = parent_doc.get_absolute_url()
doc_initial = {'title': based_on_rev.title,
'slug': slug_dict['specific']}
doc_form = DocumentForm(initial=doc_initial,
parent_slug=slug_dict['parent'])
if user_has_rev_perm:
initial = {
'based_on': based_on_rev.id,
'comment': '',
'toc_depth': based_on_rev.toc_depth,
'localization_tags': ['inprogress'],
}
content = None
if revision is not None:
content = revision.content
elif not doc:
content = based_on_rev.content
if content:
initial.update(content=kuma.wiki.content.parse(content)
.filterEditorSafety()
.serialize())
instance = doc and doc.current_or_latest_revision()
rev_form = RevisionForm(request=request,
instance=instance,
initial=initial,
parent_slug=slug_dict['parent'])
if request.method == 'POST':
which_form = request.POST.get('form', 'both')
doc_form_invalid = False
# Grab the posted slug value in case it's invalid
posted_slug = request.POST.get('slug', slug_dict['specific'])
if user_has_doc_perm and which_form in ['doc', 'both']:
disclose_description = True
post_data = request.POST.copy()
post_data.update({'locale': document_locale})
doc_form = DocumentForm(post_data, instance=doc,
parent_slug=slug_dict['parent'])
doc_form.instance.locale = document_locale
doc_form.instance.parent = parent_doc
if which_form == 'both':
# Sending a new copy of post so the slug change above
# doesn't cause problems during validation
rev_form = RevisionForm(request=request,
data=request.POST,
parent_slug=slug_dict['parent'])
# If we are submitting the whole form, we need to check that
# the Revision is valid before saving the Document.
if doc_form.is_valid() and (which_form == 'doc' or
rev_form.is_valid()):
doc = doc_form.save(parent=parent_doc)
if which_form == 'doc':
url = urlparams(doc.get_edit_url(), opendescription=1)
return redirect(url)
else:
doc_form.data['slug'] = posted_slug
doc_form_invalid = True
if doc and user_has_rev_perm and which_form in ['rev', 'both']:
post_data = request.POST.copy()
if 'slug' not in post_data:
post_data['slug'] = posted_slug
# update the post data with the toc_depth of original
post_data['toc_depth'] = based_on_rev.toc_depth
rev_form = RevisionForm(request=request,
data=post_data,
parent_slug=slug_dict['parent'])
rev_form.instance.document = doc # for rev_form.clean()
if rev_form.is_valid() and not doc_form_invalid:
parent_id = request.POST.get('parent_id', '')
# Attempt to set a parent
if parent_id:
try:
parent_doc = get_object_or_404(Document, id=parent_id)
rev_form.instance.document.parent = parent_doc
doc.parent = parent_doc
rev_form.instance.based_on.document = doc.original
except Document.DoesNotExist:
pass
rev_form.save(doc)
return redirect(doc)
if doc:
from_id = smart_int(request.GET.get('from'), None)
to_id = smart_int(request.GET.get('to'), None)
revision_from = get_object_or_none(Revision,
pk=from_id,
document=doc.parent)
revision_to = get_object_or_none(Revision,
pk=to_id,
document=doc.parent)
else:
revision_from = revision_to = None
parent_split = split_slug(parent_doc.slug)
language_mapping = get_language_mapping()
language = language_mapping[document_locale.lower()]
default_locale = language_mapping[settings.WIKI_DEFAULT_LANGUAGE.lower()]
context = {
'parent': parent_doc,
'document': doc,
'document_form': doc_form,
'revision_form': rev_form,
'locale': document_locale,
'default_locale': default_locale,
'language': language,
'based_on': based_on_rev,
'disclose_description': disclose_description,
'discard_href': discard_href,
'attachment_form': AttachmentRevisionForm(),
'specific_slug': parent_split['specific'],
'parent_slug': parent_split['parent'],
'revision_from': revision_from,
'revision_to': revision_to,
}
return render(request, 'wiki/translate.html', context)
| mpl-2.0 |
modulexcite/chromium-dashboard | scripts/oauth2client/util.py | 6 | 5535 | #!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Common utility library."""
__author__ = ['rafek@google.com (Rafe Kaplan)',
'guido@google.com (Guido van Rossum)',
]
__all__ = [
'positional',
]
from gflags import gflags
import inspect
import logging
import types
import urllib
import urlparse
try:
from urlparse import parse_qsl
except ImportError:
from cgi import parse_qsl
logger = logging.getLogger(__name__)
FLAGS = gflags.FLAGS
gflags.DEFINE_enum('positional_parameters_enforcement', 'WARNING',
['EXCEPTION', 'WARNING', 'IGNORE'],
'The action when an oauth2client.util.positional declaration is violated.')
def positional(max_positional_args):
"""A decorator to declare that only the first N arguments my be positional.
This decorator makes it easy to support Python 3 style key-word only
parameters. For example, in Python 3 it is possible to write:
def fn(pos1, *, kwonly1=None, kwonly1=None):
...
All named parameters after * must be a keyword:
fn(10, 'kw1', 'kw2') # Raises exception.
fn(10, kwonly1='kw1') # Ok.
Example:
To define a function like above, do:
@positional(1)
def fn(pos1, kwonly1=None, kwonly2=None):
...
If no default value is provided to a keyword argument, it becomes a required
keyword argument:
@positional(0)
def fn(required_kw):
...
This must be called with the keyword parameter:
fn() # Raises exception.
fn(10) # Raises exception.
fn(required_kw=10) # Ok.
When defining instance or class methods always remember to account for
'self' and 'cls':
class MyClass(object):
@positional(2)
def my_method(self, pos1, kwonly1=None):
...
@classmethod
@positional(2)
def my_method(cls, pos1, kwonly1=None):
...
The positional decorator behavior is controlled by the
--positional_parameters_enforcement flag. The flag may be set to 'EXCEPTION',
'WARNING' or 'IGNORE' to raise an exception, log a warning, or do nothing,
respectively, if a declaration is violated.
Args:
max_positional_arguments: Maximum number of positional arguments. All
parameters after the this index must be keyword only.
Returns:
A decorator that prevents using arguments after max_positional_args from
being used as positional parameters.
Raises:
TypeError if a key-word only argument is provided as a positional parameter,
but only if the --positional_parameters_enforcement flag is set to
'EXCEPTION'.
"""
def positional_decorator(wrapped):
def positional_wrapper(*args, **kwargs):
if len(args) > max_positional_args:
plural_s = ''
if max_positional_args != 1:
plural_s = 's'
message = '%s() takes at most %d positional argument%s (%d given)' % (
wrapped.__name__, max_positional_args, plural_s, len(args))
if FLAGS.positional_parameters_enforcement == 'EXCEPTION':
raise TypeError(message)
elif FLAGS.positional_parameters_enforcement == 'WARNING':
logger.warning(message)
else: # IGNORE
pass
return wrapped(*args, **kwargs)
return positional_wrapper
if isinstance(max_positional_args, (int, long)):
return positional_decorator
else:
args, _, _, defaults = inspect.getargspec(max_positional_args)
return positional(len(args) - len(defaults))(max_positional_args)
def scopes_to_string(scopes):
"""Converts scope value to a string.
If scopes is a string then it is simply passed through. If scopes is an
iterable then a string is returned that is all the individual scopes
concatenated with spaces.
Args:
scopes: string or iterable of strings, the scopes.
Returns:
The scopes formatted as a single string.
"""
if isinstance(scopes, types.StringTypes):
return scopes
else:
return ' '.join(scopes)
def dict_to_tuple_key(dictionary):
"""Converts a dictionary to a tuple that can be used as an immutable key.
The resulting key is always sorted so that logically equivalent dictionaries
always produce an identical tuple for a key.
Args:
dictionary: the dictionary to use as the key.
Returns:
A tuple representing the dictionary in it's naturally sorted ordering.
"""
return tuple(sorted(dictionary.items()))
def _add_query_parameter(url, name, value):
"""Adds a query parameter to a url.
Replaces the current value if it already exists in the URL.
Args:
url: string, url to add the query parameter to.
name: string, query parameter name.
value: string, query parameter value.
Returns:
Updated query parameter. Does not update the url if value is None.
"""
if value is None:
return url
else:
parsed = list(urlparse.urlparse(url))
q = dict(parse_qsl(parsed[4]))
q[name] = value
parsed[4] = urllib.urlencode(q)
return urlparse.urlunparse(parsed)
| bsd-3-clause |
conejoninja/pelisalacarta | python/main-classic/lib/elementtree/HTMLTreeBuilder.py | 103 | 7826 | #
# ElementTree
# $Id: HTMLTreeBuilder.py 2325 2005-03-16 15:50:43Z fredrik $
#
# a simple tree builder, for HTML input
#
# history:
# 2002-04-06 fl created
# 2002-04-07 fl ignore IMG and HR end tags
# 2002-04-07 fl added support for 1.5.2 and later
# 2003-04-13 fl added HTMLTreeBuilder alias
# 2004-12-02 fl don't feed non-ASCII charrefs/entities as 8-bit strings
# 2004-12-05 fl don't feed non-ASCII CDATA as 8-bit strings
#
# Copyright (c) 1999-2004 by Fredrik Lundh. All rights reserved.
#
# fredrik@pythonware.com
# http://www.pythonware.com
#
# --------------------------------------------------------------------
# The ElementTree toolkit is
#
# Copyright (c) 1999-2004 by Fredrik Lundh
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
##
# Tools to build element trees from HTML files.
##
import htmlentitydefs
import re, string, sys
import mimetools, StringIO
import ElementTree
AUTOCLOSE = "p", "li", "tr", "th", "td", "head", "body"
IGNOREEND = "img", "hr", "meta", "link", "br"
if sys.version[:3] == "1.5":
is_not_ascii = re.compile(r"[\x80-\xff]").search # 1.5.2
else:
is_not_ascii = re.compile(eval(r'u"[\u0080-\uffff]"')).search
try:
from HTMLParser import HTMLParser
except ImportError:
from sgmllib import SGMLParser
# hack to use sgmllib's SGMLParser to emulate 2.2's HTMLParser
class HTMLParser(SGMLParser):
# the following only works as long as this class doesn't
# provide any do, start, or end handlers
def unknown_starttag(self, tag, attrs):
self.handle_starttag(tag, attrs)
def unknown_endtag(self, tag):
self.handle_endtag(tag)
##
# ElementTree builder for HTML source code. This builder converts an
# HTML document or fragment to an ElementTree.
# <p>
# The parser is relatively picky, and requires balanced tags for most
# elements. However, elements belonging to the following group are
# automatically closed: P, LI, TR, TH, and TD. In addition, the
# parser automatically inserts end tags immediately after the start
# tag, and ignores any end tags for the following group: IMG, HR,
# META, and LINK.
#
# @keyparam builder Optional builder object. If omitted, the parser
# uses the standard <b>elementtree</b> builder.
# @keyparam encoding Optional character encoding, if known. If omitted,
# the parser looks for META tags inside the document. If no tags
# are found, the parser defaults to ISO-8859-1. Note that if your
# document uses a non-ASCII compatible encoding, you must decode
# the document before parsing.
#
# @see elementtree.ElementTree
class HTMLTreeBuilder(HTMLParser):
# FIXME: shouldn't this class be named Parser, not Builder?
def __init__(self, builder=None, encoding=None):
self.__stack = []
if builder is None:
builder = ElementTree.TreeBuilder()
self.__builder = builder
self.encoding = encoding or "iso-8859-1"
HTMLParser.__init__(self)
##
# Flushes parser buffers, and return the root element.
#
# @return An Element instance.
def close(self):
HTMLParser.close(self)
return self.__builder.close()
##
# (Internal) Handles start tags.
def handle_starttag(self, tag, attrs):
if tag == "meta":
# look for encoding directives
http_equiv = content = None
for k, v in attrs:
if k == "http-equiv":
http_equiv = string.lower(v)
elif k == "content":
content = v
if http_equiv == "content-type" and content:
# use mimetools to parse the http header
header = mimetools.Message(
StringIO.StringIO("%s: %s\n\n" % (http_equiv, content))
)
encoding = header.getparam("charset")
if encoding:
self.encoding = encoding
if tag in AUTOCLOSE:
if self.__stack and self.__stack[-1] == tag:
self.handle_endtag(tag)
self.__stack.append(tag)
attrib = {}
if attrs:
for k, v in attrs:
attrib[string.lower(k)] = v
self.__builder.start(tag, attrib)
if tag in IGNOREEND:
self.__stack.pop()
self.__builder.end(tag)
##
# (Internal) Handles end tags.
def handle_endtag(self, tag):
if tag in IGNOREEND:
return
lasttag = self.__stack.pop()
if tag != lasttag and lasttag in AUTOCLOSE:
self.handle_endtag(lasttag)
self.__builder.end(tag)
##
# (Internal) Handles character references.
def handle_charref(self, char):
if char[:1] == "x":
char = int(char[1:], 16)
else:
char = int(char)
if 0 <= char < 128:
self.__builder.data(chr(char))
else:
self.__builder.data(unichr(char))
##
# (Internal) Handles entity references.
def handle_entityref(self, name):
entity = htmlentitydefs.entitydefs.get(name)
if entity:
if len(entity) == 1:
entity = ord(entity)
else:
entity = int(entity[2:-1])
if 0 <= entity < 128:
self.__builder.data(chr(entity))
else:
self.__builder.data(unichr(entity))
else:
self.unknown_entityref(name)
##
# (Internal) Handles character data.
def handle_data(self, data):
if isinstance(data, type('')) and is_not_ascii(data):
# convert to unicode, but only if necessary
data = unicode(data, self.encoding, "ignore")
self.__builder.data(data)
##
# (Hook) Handles unknown entity references. The default action
# is to ignore unknown entities.
def unknown_entityref(self, name):
pass # ignore by default; override if necessary
##
# An alias for the <b>HTMLTreeBuilder</b> class.
TreeBuilder = HTMLTreeBuilder
##
# Parse an HTML document or document fragment.
#
# @param source A filename or file object containing HTML data.
# @param encoding Optional character encoding, if known. If omitted,
# the parser looks for META tags inside the document. If no tags
# are found, the parser defaults to ISO-8859-1.
# @return An ElementTree instance
def parse(source, encoding=None):
return ElementTree.parse(source, HTMLTreeBuilder(encoding=encoding))
if __name__ == "__main__":
import sys
ElementTree.dump(parse(open(sys.argv[1])))
| gpl-3.0 |
ajfriend/cvxpy | examples/extensions/mixed_integer/noncvx_variable.py | 12 | 2144 | """
Copyright 2013 Steven Diamond
This file is part of CVXPY.
CVXPY is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
CVXPY is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with CVXPY. If not, see <http://www.gnu.org/licenses/>.
"""
import abc
import cvxpy
import cvxpy.interface as intf
import cvxopt
class NonCvxVariable(cvxpy.Variable):
__metaclass__ = abc.ABCMeta
def __init__(self, *args, **kwargs):
super(NonCvxVariable, self).__init__(*args, **kwargs)
self.noncvx = True
self.z = cvxpy.Parameter(*self.size)
self.init_z()
self.u = cvxpy.Parameter(*self.size)
self.u.value = cvxopt.matrix(0, self.size, tc='d')
# Initializes the value of the replicant variable.
def init_z(self):
self.z.value = cvxopt.matrix(0, self.size, tc='d')
# Verify that the matrix has the same dimensions as the variable.
def validate_matrix(self, matrix):
if self.size != intf.size(matrix):
raise Exception(("The argument's dimensions must match "
"the variable's dimensions."))
# Wrapper to validate matrix.
def round(self, matrix):
self.validate_matrix(matrix)
return self._round(matrix)
# Project the matrix into the space defined by the non-convex constraint.
# Returns the updated matrix.
@abc.abstractmethod
def _round(matrix):
return NotImplemented
# Wrapper to validate matrix and update curvature.
def fix(self, matrix):
matrix = self.round(matrix)
return self._fix(matrix)
# Fix the variable so it obeys the non-convex constraint.
@abc.abstractmethod
def _fix(self, matrix):
return NotImplemented
| gpl-3.0 |
christianblunden/googmuze | resources/lib/oauth2client/file.py | 253 | 3160 | # Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for OAuth.
Utilities for making it easier to work with OAuth 2.0
credentials.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import os
import stat
import threading
from anyjson import simplejson
from client import Storage as BaseStorage
from client import Credentials
class CredentialsFileSymbolicLinkError(Exception):
"""Credentials files must not be symbolic links."""
class Storage(BaseStorage):
"""Store and retrieve a single credential to and from a file."""
def __init__(self, filename):
self._filename = filename
self._lock = threading.Lock()
def _validate_file(self):
if os.path.islink(self._filename):
raise CredentialsFileSymbolicLinkError(
'File: %s is a symbolic link.' % self._filename)
def acquire_lock(self):
"""Acquires any lock necessary to access this Storage.
This lock is not reentrant."""
self._lock.acquire()
def release_lock(self):
"""Release the Storage lock.
Trying to release a lock that isn't held will result in a
RuntimeError.
"""
self._lock.release()
def locked_get(self):
"""Retrieve Credential from file.
Returns:
oauth2client.client.Credentials
Raises:
CredentialsFileSymbolicLinkError if the file is a symbolic link.
"""
credentials = None
self._validate_file()
try:
f = open(self._filename, 'rb')
content = f.read()
f.close()
except IOError:
return credentials
try:
credentials = Credentials.new_from_json(content)
credentials.set_store(self)
except ValueError:
pass
return credentials
def _create_file_if_needed(self):
"""Create an empty file if necessary.
This method will not initialize the file. Instead it implements a
simple version of "touch" to ensure the file has been created.
"""
if not os.path.exists(self._filename):
old_umask = os.umask(0177)
try:
open(self._filename, 'a+b').close()
finally:
os.umask(old_umask)
def locked_put(self, credentials):
"""Write Credentials to file.
Args:
credentials: Credentials, the credentials to store.
Raises:
CredentialsFileSymbolicLinkError if the file is a symbolic link.
"""
self._create_file_if_needed()
self._validate_file()
f = open(self._filename, 'wb')
f.write(credentials.to_json())
f.close()
def locked_delete(self):
"""Delete Credentials file.
Args:
credentials: Credentials, the credentials to store.
"""
os.unlink(self._filename)
| gpl-3.0 |
mSenyor/sl4a | python-build/python-libs/gdata/tests/gdata_tests/youtube/service_test.py | 89 | 23513 | #!/usr/bin/python
#
# Copyright (C) 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'api.jhartmann@gmail.com (Jochen Hartmann)'
import getpass
import time
import StringIO
import random
import unittest
import atom
import gdata.youtube
import gdata.youtube.service
YOUTUBE_TEST_CLIENT_ID = 'ytapi-pythonclientlibrary_servicetest'
class YouTubeServiceTest(unittest.TestCase):
def setUp(self):
self.client = gdata.youtube.service.YouTubeService()
self.client.email = username
self.client.password = password
self.client.source = YOUTUBE_TEST_CLIENT_ID
self.client.developer_key = developer_key
self.client.client_id = YOUTUBE_TEST_CLIENT_ID
self.client.ProgrammaticLogin()
def testRetrieveVideoFeed(self):
feed = self.client.GetYouTubeVideoFeed(
'http://gdata.youtube.com/feeds/api/standardfeeds/recently_featured');
self.assert_(isinstance(feed, gdata.youtube.YouTubeVideoFeed))
self.assert_(len(feed.entry) > 0)
for entry in feed.entry:
self.assert_(entry.title.text != '')
def testRetrieveTopRatedVideoFeed(self):
feed = self.client.GetTopRatedVideoFeed()
self.assert_(isinstance(feed, gdata.youtube.YouTubeVideoFeed))
self.assert_(len(feed.entry) > 10)
def testRetrieveMostViewedVideoFeed(self):
feed = self.client.GetMostViewedVideoFeed()
self.assert_(isinstance(feed, gdata.youtube.YouTubeVideoFeed))
self.assert_(len(feed.entry) > 10)
def testRetrieveRecentlyFeaturedVideoFeed(self):
feed = self.client.GetRecentlyFeaturedVideoFeed()
self.assert_(isinstance(feed, gdata.youtube.YouTubeVideoFeed))
self.assert_(len(feed.entry) > 10)
def testRetrieveWatchOnMobileVideoFeed(self):
feed = self.client.GetWatchOnMobileVideoFeed()
self.assert_(isinstance(feed, gdata.youtube.YouTubeVideoFeed))
self.assert_(len(feed.entry) > 10)
def testRetrieveTopFavoritesVideoFeed(self):
feed = self.client.GetTopFavoritesVideoFeed()
self.assert_(isinstance(feed, gdata.youtube.YouTubeVideoFeed))
self.assert_(len(feed.entry) > 10)
def testRetrieveMostRecentVideoFeed(self):
feed = self.client.GetMostRecentVideoFeed()
self.assert_(isinstance(feed, gdata.youtube.YouTubeVideoFeed))
self.assert_(len(feed.entry) > 10)
def testRetrieveMostDiscussedVideoFeed(self):
feed = self.client.GetMostDiscussedVideoFeed()
self.assert_(isinstance(feed, gdata.youtube.YouTubeVideoFeed))
self.assert_(len(feed.entry) > 10)
def testRetrieveMostLinkedVideoFeed(self):
feed = self.client.GetMostLinkedVideoFeed()
self.assert_(isinstance(feed, gdata.youtube.YouTubeVideoFeed))
self.assert_(len(feed.entry) > 10)
def testRetrieveMostRespondedVideoFeed(self):
feed = self.client.GetMostRespondedVideoFeed()
self.assert_(isinstance(feed, gdata.youtube.YouTubeVideoFeed))
self.assert_(len(feed.entry) > 10)
def testRetrieveVideoEntryByUri(self):
entry = self.client.GetYouTubeVideoEntry(
'http://gdata.youtube.com/feeds/videos/Ncakifd_16k')
self.assert_(isinstance(entry, gdata.youtube.YouTubeVideoEntry))
self.assert_(entry.title.text != '')
def testRetrieveVideoEntryByVideoId(self):
entry = self.client.GetYouTubeVideoEntry(video_id='Ncakifd_16k')
self.assert_(isinstance(entry, gdata.youtube.YouTubeVideoEntry))
self.assert_(entry.title.text != '')
def testRetrieveUserVideosbyUri(self):
feed = self.client.GetYouTubeUserFeed(
'http://gdata.youtube.com/feeds/users/gdpython/uploads')
self.assert_(isinstance(feed, gdata.youtube.YouTubeVideoFeed))
self.assert_(len(feed.entry) > 0)
def testRetrieveUserVideosbyUsername(self):
feed = self.client.GetYouTubeUserFeed(username='gdpython')
self.assert_(isinstance(feed, gdata.youtube.YouTubeVideoFeed))
self.assert_(len(feed.entry) > 0)
def testSearchWithVideoQuery(self):
query = gdata.youtube.service.YouTubeVideoQuery()
query.vq = 'google'
query.max_results = 8
feed = self.client.YouTubeQuery(query)
self.assert_(isinstance(feed, gdata.youtube.YouTubeVideoFeed))
self.assertEquals(len(feed.entry), 8)
def testDirectVideoUploadStatusUpdateAndDeletion(self):
self.assertEquals(self.client.developer_key, developer_key)
self.assertEquals(self.client.client_id, YOUTUBE_TEST_CLIENT_ID)
self.assertEquals(self.client.additional_headers['X-GData-Key'],
'key=' + developer_key)
self.assertEquals(self.client.additional_headers['X-Gdata-Client'],
YOUTUBE_TEST_CLIENT_ID)
test_video_title = 'my cool video ' + str(random.randint(1000,5000))
test_video_description = 'description ' + str(random.randint(1000,5000))
my_media_group = gdata.media.Group(
title = gdata.media.Title(text=test_video_title),
description = gdata.media.Description(description_type='plain',
text=test_video_description),
keywords = gdata.media.Keywords(text='video, foo'),
category = gdata.media.Category(
text='Autos',
scheme='http://gdata.youtube.com/schemas/2007/categories.cat',
label='Autos'),
player=None
)
self.assert_(isinstance(my_media_group, gdata.media.Group))
# Set Geo location to 37,-122 lat, long
where = gdata.geo.Where()
where.set_location((37.0,-122.0))
video_entry = gdata.youtube.YouTubeVideoEntry(media=my_media_group,
geo=where)
self.assert_(isinstance(video_entry, gdata.youtube.YouTubeVideoEntry))
new_entry = self.client.InsertVideoEntry(video_entry, video_file_location)
self.assert_(isinstance(new_entry, gdata.youtube.YouTubeVideoEntry))
self.assertEquals(new_entry.title.text, test_video_title)
self.assertEquals(new_entry.media.description.text, test_video_description)
self.assert_(new_entry.id.text)
# check upload status also
upload_status = self.client.CheckUploadStatus(new_entry)
self.assert_(upload_status[0] != '')
# test updating entry meta-data
new_video_description = 'description ' + str(random.randint(1000,5000))
new_entry.media.description.text = new_video_description
updated_entry = self.client.UpdateVideoEntry(new_entry)
self.assert_(isinstance(updated_entry, gdata.youtube.YouTubeVideoEntry))
self.assertEquals(updated_entry.media.description.text,
new_video_description)
# sleep for 10 seconds
time.sleep(10)
# test to delete the entry
value = self.client.DeleteVideoEntry(updated_entry)
if not value:
# sleep more and try again
time.sleep(20)
# test to delete the entry
value = self.client.DeleteVideoEntry(updated_entry)
self.assert_(value == True)
def testDirectVideoUploadWithDeveloperTags(self):
self.assertEquals(self.client.developer_key, developer_key)
self.assertEquals(self.client.client_id, YOUTUBE_TEST_CLIENT_ID)
self.assertEquals(self.client.additional_headers['X-GData-Key'],
'key=' + developer_key)
self.assertEquals(self.client.additional_headers['X-Gdata-Client'],
YOUTUBE_TEST_CLIENT_ID)
test_video_title = 'my cool video ' + str(random.randint(1000,5000))
test_video_description = 'description ' + str(random.randint(1000,5000))
test_developer_tag_01 = 'tag' + str(random.randint(1000,5000))
test_developer_tag_02 = 'tag' + str(random.randint(1000,5000))
test_developer_tag_03 = 'tag' + str(random.randint(1000,5000))
my_media_group = gdata.media.Group(
title = gdata.media.Title(text=test_video_title),
description = gdata.media.Description(description_type='plain',
text=test_video_description),
keywords = gdata.media.Keywords(text='video, foo'),
category = [gdata.media.Category(
text='Autos',
scheme='http://gdata.youtube.com/schemas/2007/categories.cat',
label='Autos')],
player=None
)
self.assert_(isinstance(my_media_group, gdata.media.Group))
video_entry = gdata.youtube.YouTubeVideoEntry(media=my_media_group)
original_developer_tags = [test_developer_tag_01, test_developer_tag_02,
test_developer_tag_03]
dev_tags = video_entry.AddDeveloperTags(original_developer_tags)
for dev_tag in dev_tags:
self.assert_(dev_tag.text in original_developer_tags)
self.assert_(isinstance(video_entry, gdata.youtube.YouTubeVideoEntry))
new_entry = self.client.InsertVideoEntry(video_entry, video_file_location)
self.assert_(isinstance(new_entry, gdata.youtube.YouTubeVideoEntry))
self.assertEquals(new_entry.title.text, test_video_title)
self.assertEquals(new_entry.media.description.text, test_video_description)
self.assert_(new_entry.id.text)
developer_tags_from_new_entry = new_entry.GetDeveloperTags()
for dev_tag in developer_tags_from_new_entry:
self.assert_(dev_tag.text in original_developer_tags)
self.assertEquals(len(developer_tags_from_new_entry),
len(original_developer_tags))
# sleep for 10 seconds
time.sleep(10)
# test to delete the entry
value = self.client.DeleteVideoEntry(new_entry)
if not value:
# sleep more and try again
time.sleep(20)
# test to delete the entry
value = self.client.DeleteVideoEntry(new_entry)
self.assert_(value == True)
def testBrowserBasedVideoUpload(self):
self.assertEquals(self.client.developer_key, developer_key)
self.assertEquals(self.client.client_id, YOUTUBE_TEST_CLIENT_ID)
self.assertEquals(self.client.additional_headers['X-GData-Key'],
'key=' + developer_key)
self.assertEquals(self.client.additional_headers['X-Gdata-Client'],
YOUTUBE_TEST_CLIENT_ID)
test_video_title = 'my cool video ' + str(random.randint(1000,5000))
test_video_description = 'description ' + str(random.randint(1000,5000))
my_media_group = gdata.media.Group(
title = gdata.media.Title(text=test_video_title),
description = gdata.media.Description(description_type='plain',
text=test_video_description),
keywords = gdata.media.Keywords(text='video, foo'),
category = gdata.media.Category(
text='Autos',
scheme='http://gdata.youtube.com/schemas/2007/categories.cat',
label='Autos'),
player=None
)
self.assert_(isinstance(my_media_group, gdata.media.Group))
video_entry = gdata.youtube.YouTubeVideoEntry(media=my_media_group)
self.assert_(isinstance(video_entry, gdata.youtube.YouTubeVideoEntry))
response = self.client.GetFormUploadToken(video_entry)
self.assert_(response[0].startswith(
'http://uploads.gdata.youtube.com/action/FormDataUpload/'))
self.assert_(len(response[0]) > 55)
self.assert_(len(response[1]) > 100)
def testRetrieveRelatedVideoFeedByUri(self):
feed = self.client.GetYouTubeRelatedVideoFeed(
'http://gdata.youtube.com/feeds/videos/Ncakifd_16k/related')
self.assert_(isinstance(feed, gdata.youtube.YouTubeVideoFeed))
self.assert_(len(feed.entry) > 0)
def testRetrieveRelatedVideoFeedById(self):
feed = self.client.GetYouTubeRelatedVideoFeed(video_id = 'Ncakifd_16k')
self.assert_(isinstance(feed, gdata.youtube.YouTubeVideoFeed))
self.assert_(len(feed.entry) > 0)
def testRetrieveResponseVideoFeedByUri(self):
feed = self.client.GetYouTubeVideoResponseFeed(
'http://gdata.youtube.com/feeds/videos/Ncakifd_16k/responses')
self.assert_(isinstance(feed, gdata.youtube.YouTubeVideoResponseFeed))
self.assert_(len(feed.entry) > 0)
def testRetrieveResponseVideoFeedById(self):
feed = self.client.GetYouTubeVideoResponseFeed(video_id='Ncakifd_16k')
self.assert_(isinstance(feed, gdata.youtube.YouTubeVideoResponseFeed))
self.assert_(len(feed.entry) > 0)
def testRetrieveVideoCommentFeedByUri(self):
feed = self.client.GetYouTubeVideoCommentFeed(
'http://gdata.youtube.com/feeds/api/videos/Ncakifd_16k/comments')
self.assert_(isinstance(feed, gdata.youtube.YouTubeVideoCommentFeed))
self.assert_(len(feed.entry) > 0)
def testRetrieveVideoCommentFeedByVideoId(self):
feed = self.client.GetYouTubeVideoCommentFeed(video_id='Ncakifd_16k')
self.assert_(isinstance(feed, gdata.youtube.YouTubeVideoCommentFeed))
self.assert_(len(feed.entry) > 0)
def testAddComment(self):
video_id = '9g6buYJTt_g'
video_entry = self.client.GetYouTubeVideoEntry(video_id=video_id)
random_comment_text = 'test_comment_' + str(random.randint(1000,50000))
self.client.AddComment(comment_text=random_comment_text,
video_entry=video_entry)
comment_feed = self.client.GetYouTubeVideoCommentFeed(video_id=video_id)
comment_found = False
for item in comment_feed.entry:
if (item.content.text == random_comment_text):
comment_found = True
self.assertEquals(comment_found, True)
def testAddRating(self):
video_id_to_rate = 'Ncakifd_16k'
video_entry = self.client.GetYouTubeVideoEntry(video_id=video_id_to_rate)
response = self.client.AddRating(3, video_entry)
self.assert_(isinstance(response, gdata.GDataEntry))
def testRetrievePlaylistFeedByUri(self):
feed = self.client.GetYouTubePlaylistFeed(
'http://gdata.youtube.com/feeds/users/gdpython/playlists')
self.assert_(isinstance(feed, gdata.youtube.YouTubePlaylistFeed))
self.assert_(len(feed.entry) > 0)
def testRetrievePlaylistListFeedByUsername(self):
feed = self.client.GetYouTubePlaylistFeed(username='gdpython')
self.assert_(isinstance(feed, gdata.youtube.YouTubePlaylistFeed))
self.assert_(len(feed.entry) > 0)
def testRetrievePlaylistVideoFeed(self):
feed = self.client.GetYouTubePlaylistVideoFeed(
'http://gdata.youtube.com/feeds/api/playlists/BCB3BB96DF51B505')
self.assert_(isinstance(feed, gdata.youtube.YouTubePlaylistVideoFeed))
self.assert_(len(feed.entry) > 0)
self.assert_(isinstance(feed.entry[0],
gdata.youtube.YouTubePlaylistVideoEntry))
def testAddUpdateAndDeletePlaylist(self):
test_playlist_title = 'my test playlist ' + str(random.randint(1000,3000))
test_playlist_description = 'test playlist '
response = self.client.AddPlaylist(test_playlist_title,
test_playlist_description)
self.assert_(isinstance(response, gdata.youtube.YouTubePlaylistEntry))
new_playlist_title = 'my updated playlist ' + str(random.randint(1000,4000))
new_playlist_description = 'my updated playlist '
playlist_entry_id = response.id.text.split('/')[-1]
updated_playlist = self.client.UpdatePlaylist(playlist_entry_id,
new_playlist_title,
new_playlist_description)
playlist_feed = self.client.GetYouTubePlaylistFeed()
update_successful = False
for playlist_entry in playlist_feed.entry:
if playlist_entry.title.text == new_playlist_title:
update_successful = True
break
self.assertEquals(update_successful, True)
# wait
time.sleep(10)
# delete it
playlist_uri = updated_playlist.id.text
response = self.client.DeletePlaylist(playlist_uri)
self.assertEquals(response, True)
def testAddUpdateAndDeletePrivatePlaylist(self):
test_playlist_title = 'my test playlist ' + str(random.randint(1000,3000))
test_playlist_description = 'test playlist '
response = self.client.AddPlaylist(test_playlist_title,
test_playlist_description,
playlist_private=True)
self.assert_(isinstance(response, gdata.youtube.YouTubePlaylistEntry))
new_playlist_title = 'my updated playlist ' + str(random.randint(1000,4000))
new_playlist_description = 'my updated playlist '
playlist_entry_id = response.id.text.split('/')[-1]
updated_playlist = self.client.UpdatePlaylist(playlist_entry_id,
new_playlist_title,
new_playlist_description,
playlist_private=True)
playlist_feed = self.client.GetYouTubePlaylistFeed()
update_successful = False
playlist_still_private = False
for playlist_entry in playlist_feed.entry:
if playlist_entry.title.text == new_playlist_title:
update_successful = True
if playlist_entry.private is not None:
playlist_still_private = True
self.assertEquals(update_successful, True)
self.assertEquals(playlist_still_private, True)
# wait
time.sleep(10)
# delete it
playlist_uri = updated_playlist.id.text
response = self.client.DeletePlaylist(playlist_uri)
self.assertEquals(response, True)
def testAddEditAndDeleteVideoFromPlaylist(self):
test_playlist_title = 'my test playlist ' + str(random.randint(1000,3000))
test_playlist_description = 'test playlist '
response = self.client.AddPlaylist(test_playlist_title,
test_playlist_description)
self.assert_(isinstance(response, gdata.youtube.YouTubePlaylistEntry))
custom_video_title = 'my test video on my test playlist'
custom_video_description = 'this is a test video on my test playlist'
video_id = 'Ncakifd_16k'
playlist_uri = response.feed_link[0].href
time.sleep(10)
response = self.client.AddPlaylistVideoEntryToPlaylist(
playlist_uri, video_id, custom_video_title, custom_video_description)
self.assert_(isinstance(response, gdata.youtube.YouTubePlaylistVideoEntry))
playlist_entry_id = response.id.text.split('/')[-1]
playlist_uri = response.id.text.split(playlist_entry_id)[0][:-1]
new_video_title = 'video number ' + str(random.randint(1000,3000))
new_video_description = 'test video'
time.sleep(10)
response = self.client.UpdatePlaylistVideoEntryMetaData(
playlist_uri,
playlist_entry_id,
new_video_title,
new_video_description,
1)
self.assert_(isinstance(response, gdata.youtube.YouTubePlaylistVideoEntry))
time.sleep(10)
playlist_entry_id = response.id.text.split('/')[-1]
# remove video from playlist
response = self.client.DeletePlaylistVideoEntry(playlist_uri,
playlist_entry_id)
self.assertEquals(response, True)
time.sleep(10)
# delete the playlist
response = self.client.DeletePlaylist(playlist_uri)
self.assertEquals(response, True)
def testRetrieveSubscriptionFeedByUri(self):
feed = self.client.GetYouTubeSubscriptionFeed(
'http://gdata.youtube.com/feeds/users/gdpython/subscriptions')
self.assert_(isinstance(feed, gdata.youtube.YouTubeSubscriptionFeed))
self.assert_(len(feed.entry) == 3)
subscription_to_channel_found = False
subscription_to_favorites_found = False
subscription_to_query_found = False
all_types_found = False
for entry in feed.entry:
self.assert_(isinstance(entry, gdata.youtube.YouTubeSubscriptionEntry))
subscription_type = entry.GetSubscriptionType()
if subscription_type == 'channel':
subscription_to_channel_found = True
elif subscription_type == 'favorites':
subscription_to_favorites_found = True
elif subscription_type == 'query':
subscription_to_query_found = True
if (subscription_to_channel_found and subscription_to_favorites_found and
subscription_to_query_found):
all_types_found = True
self.assertEquals(all_types_found, True)
def testRetrieveSubscriptionFeedByUsername(self):
feed = self.client.GetYouTubeSubscriptionFeed(username='gdpython')
self.assert_(isinstance(feed, gdata.youtube.YouTubeSubscriptionFeed))
self.assert_(len(feed.entry) == 3)
subscription_to_channel_found = False
subscription_to_favorites_found = False
subscription_to_query_found = False
all_types_found = False
for entry in feed.entry:
self.assert_(isinstance(entry, gdata.youtube.YouTubeSubscriptionEntry))
subscription_type = entry.GetSubscriptionType()
if subscription_type == 'channel':
subscription_to_channel_found = True
elif subscription_type == 'favorites':
subscription_to_favorites_found = True
elif subscription_type == 'query':
subscription_to_query_found = True
if (subscription_to_channel_found and subscription_to_favorites_found and
subscription_to_query_found):
all_types_found = True
self.assertEquals(all_types_found, True)
def testRetrieveUserProfileByUri(self):
user = self.client.GetYouTubeUserEntry(
'http://gdata.youtube.com/feeds/users/gdpython')
self.assert_(isinstance(user, gdata.youtube.YouTubeUserEntry))
self.assertEquals(user.location.text, 'US')
def testRetrieveUserProfileByUsername(self):
user = self.client.GetYouTubeUserEntry(username='gdpython')
self.assert_(isinstance(user, gdata.youtube.YouTubeUserEntry))
self.assertEquals(user.location.text, 'US')
def testRetrieveUserFavoritesFeed(self):
feed = self.client.GetUserFavoritesFeed(username='gdpython')
self.assert_(isinstance(feed, gdata.youtube.YouTubeVideoFeed))
self.assert_(len(feed.entry) > 0)
def testRetrieveDefaultUserFavoritesFeed(self):
feed = self.client.GetUserFavoritesFeed()
self.assert_(isinstance(feed, gdata.youtube.YouTubeVideoFeed))
self.assert_(len(feed.entry) > 0)
def testAddAndDeleteVideoFromFavorites(self):
video_id = 'Ncakifd_16k'
video_entry = self.client.GetYouTubeVideoEntry(video_id=video_id)
response = self.client.AddVideoEntryToFavorites(video_entry)
self.assert_(isinstance(response, gdata.GDataEntry))
time.sleep(10)
response = self.client.DeleteVideoEntryFromFavorites(video_id)
self.assertEquals(response, True)
def testRetrieveContactFeedByUri(self):
feed = self.client.GetYouTubeContactFeed(
'http://gdata.youtube.com/feeds/users/gdpython/contacts')
self.assert_(isinstance(feed, gdata.youtube.YouTubeContactFeed))
self.assertEquals(len(feed.entry), 1)
def testRetrieveContactFeedByUsername(self):
feed = self.client.GetYouTubeContactFeed(username='gdpython')
self.assert_(isinstance(feed, gdata.youtube.YouTubeContactFeed))
self.assertEquals(len(feed.entry), 1)
if __name__ == '__main__':
print ('NOTE: Please run these tests only with a test account. '
'The tests may delete or update your data.')
username = raw_input('Please enter your username: ')
password = getpass.getpass()
developer_key = raw_input('Please enter your developer key: ')
video_file_location = raw_input(
'Please enter the absolute path to a video file: ')
unittest.main()
| apache-2.0 |
Multimac/ansible-modules-extras | windows/win_iis_webapppool.py | 153 | 3531 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Henrik Wallström <henrik@wallstroms.nu>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: win_iis_webapppool
version_added: "2.0"
short_description: Configures a IIS Web Application Pool.
description:
- Creates, Removes and configures a IIS Web Application Pool
options:
name:
description:
- Names of application pool
required: true
default: null
aliases: []
state:
description:
- State of the binding
choices:
- absent
- stopped
- started
- restarted
required: false
default: null
aliases: []
attributes:
description:
- Application Pool attributes from string where attributes are seperated by a pipe and attribute name/values by colon Ex. "foo:1|bar:2"
required: false
default: null
aliases: []
author: Henrik Wallström
'''
EXAMPLES = '''
# This return information about an existing application pool
$ansible -i inventory -m win_iis_webapppool -a "name='DefaultAppPool'" windows
host | success >> {
"attributes": {},
"changed": false,
"info": {
"attributes": {
"CLRConfigFile": "",
"applicationPoolSid": "S-1-5-82-3006700770-424185619-1745488364-794895919-4004696415",
"autoStart": true,
"enable32BitAppOnWin64": false,
"enableConfigurationOverride": true,
"managedPipelineMode": 0,
"managedRuntimeLoader": "webengine4.dll",
"managedRuntimeVersion": "v4.0",
"name": "DefaultAppPool",
"passAnonymousToken": true,
"queueLength": 1000,
"startMode": 0,
"state": 1
},
"name": "DefaultAppPool",
"state": "Started"
}
}
# This creates a new application pool in 'Started' state
$ ansible -i inventory -m win_iis_webapppool -a "name='AppPool' state=started" windows
# This stoppes an application pool
$ ansible -i inventory -m win_iis_webapppool -a "name='AppPool' state=stopped" windows
# This restarts an application pool
$ ansible -i inventory -m win_iis_webapppool -a "name='AppPool' state=restart" windows
# This restarts an application pool
$ ansible -i inventory -m win_iis_webapppool -a "name='AppPool' state=restart" windows
# This change application pool attributes without touching state
$ ansible -i inventory -m win_iis_webapppool -a "name='AppPool' attributes='managedRuntimeVersion:v4.0|autoStart:false'" windows
# This creates an application pool and sets attributes
$ ansible -i inventory -m win_iis_webapppool -a "name='AnotherAppPool' state=started attributes='managedRuntimeVersion:v4.0|autoStart:false'" windows
# Playbook example
---
- name: App Pool with .NET 4.0
win_iis_webapppool:
name: 'AppPool'
state: started
attributes: managedRuntimeVersion:v4.0
register: webapppool
'''
| gpl-3.0 |
opendreambox/python-coherence | coherence/backends/lastfm_storage.py | 5 | 14057 | # Licensed under the MIT license
# http://opensource.org/licenses/mit-license.php
"""
INFO lastFM_user Dez 14 17:35:27 Got new sessionid: '1488f34a1cbed7c9f4232f8fd563c3bd' (coherence/backends/lastfm_storage.py:60)
DEBUG lastFM_stream Dez 14 17:35:53 render <GET /da525474-5357-4d1b-a894-76b1293224c9/1005 HTTP/1.1> (coherence/backends/lastfm_storage.py:148)
command GET
rest /user/e0362c757ef49169e9a0f0970cc2d367.mp3
headers {'icy-metadata': '1', 'host': 'kingpin5.last.fm', 'te': 'trailers', 'connection': 'TE', 'user-agent': 'gnome-vfs/2.12.0.19 neon/0.24.7'}
ProxyClient handleStatus HTTP/1.1 200 OK
ProxyClient handleHeader Content-Type audio/mpeg
ProxyClient handleHeader Content-Length 4050441
ProxyClient handleHeader Cache-Control no-cache, must-revalidate
DEBUG lastFM_stream Dez 14 17:35:53 render <GET /da525474-5357-4d1b-a894-76b1293224c9/1005 HTTP/1.1> (coherence/backends/lastfm_storage.py:148)
command GET
rest /user/e0362c757ef49169e9a0f0970cc2d367.mp3
headers {'icy-metadata': '1', 'host': 'kingpin5.last.fm', 'te': 'trailers', 'connection': 'TE', 'user-agent': 'gnome-vfs/2.12.0.19 neon/0.24.7'}
ProxyClient handleStatus HTTP/1.1 403 Invalid ticket
"""
# Copyright 2007, Frank Scholz <coherence@beebits.net>
# Copyright 2007, Moritz Struebe <morty@gmx.net>
from twisted.internet import defer
from coherence.upnp.core import utils
from coherence.upnp.core.DIDLLite import classChooser, Container, Resource, DIDLElement
import coherence.extern.louie as louie
from coherence.extern.simple_plugin import Plugin
from coherence import log
from coherence.backend import BackendItem, BackendStore
from urlparse import urlsplit
try:
from hashlib import md5
except ImportError:
# hashlib is new in Python 2.5
from md5 import md5
import string
class LastFMUser(log.Loggable):
logCategory = 'lastFM_user'
user = None
passwd = None
host = "ws.audioscrobbler.com"
basepath = "/radio"
sessionid = None
parent = None
getting_tracks = False
tracks = []
def __init__(self, user, passwd):
if user is None:
self.warn("No User",)
if passwd is None:
self.warn("No Passwd",)
self.user = user
self.passwd = passwd
def login(self):
if self.sessionid != None:
self.warning("Session seems to be valid",)
return
def got_page(result):
lines = result[0].split("\n")
for line in lines:
tuple = line.rstrip().split("=", 1)
if len(tuple) == 2:
if tuple[0] == "session":
self.sessionid = tuple[1]
self.info("Got new sessionid: %r",self.sessionid )
if tuple[0] == "base_url":
if(self.host != tuple[1]):
self.host = tuple[1]
self.info("Got new host: %s",self.host )
if tuple[0] == "base_path":
if(self.basepath != tuple[1]):
self.basepath = tuple[1]
self.info("Got new path: %s",self.basepath)
self.get_tracks()
def got_error(error):
self.warning("Login to LastFM Failed! %r", error)
self.debug("%r", error.getTraceback())
def hexify(s): # This function might be GPL! Found this code in some other Projects, too.
result = ""
for c in s:
result = result + ("%02x" % ord(c))
return result
password = hexify(md5(self.passwd).digest())
req = self.basepath + "/handshake.php/?version=1&platform=win&username=" + self.user + "&passwordmd5=" + password + "&language=en&player=coherence"
utils.getPage("http://" + self.host + req).addCallbacks(got_page, got_error, None, None, None, None)
def get_tracks(self):
if self.getting_tracks == True:
return
def got_page(result):
result = utils.parse_xml(result, encoding='utf-8')
self.getting_tracks = False
print self.getting_tracks
print "got Tracks"
for track in result.findall('trackList/track'):
data = {}
def get_data(name):
#print track.find(name).text.encode('utf-8')
return track.find(name).text.encode('utf-8')
#Fixme: This section needs some work
print "adding Track"
data['mimetype'] = 'audio/mpeg'
data['name'] =get_data('creator') + " - " + get_data('title')
data['title'] = get_data('title')
data['artist'] = get_data('creator')
data['creator'] = get_data('creator')
data['album'] = get_data('album')
data['duration'] = get_data('duration')
#FIXME: Image is the wrong tag.
data['image'] =get_data('image')
data['url'] = track.find('location').text.encode('utf-8')
item = self.parent.store.append(data, self.parent)
self.tracks.append(item)
def got_error(error):
self.warning("Problem getting Tracks! %r", error)
self.debug("%r", error.getTraceback())
self.getting_tracks = False
self.getting_tracks = True
req = self.basepath + "/xspf.php?sk=" + self.sessionid + "&discovery=0&desktop=1.3.1.1"
utils.getPage("http://" + self.host + req).addCallbacks(got_page, got_error, None, None, None, None)
def update(self, item):
if 0 < self.tracks.count(item):
while True:
track = self.tracks[0]
if track == item:
break
self.tracks.remove(track)
# Do not remoce so the tracks to answer the browse
# request correctly.
#track.store.remove(track)
#del track
#if len(self.tracks) < 5:
self.get_tracks()
class LFMProxyStream(utils.ReverseProxyResource,log.Loggable):
logCategory = 'lastFM_stream'
def __init__(self, uri, parent):
self.uri = uri
self.parent = parent
_,host_port,path,_,_ = urlsplit(uri)
if host_port.find(':') != -1:
host,port = tuple(host_port.split(':'))
port = int(port)
else:
host = host_port
port = 80
if path == '':
path = '/'
#print "ProxyStream init", host, port, path
utils.ReverseProxyResource.__init__(self, host, port, path)
def render(self, request):
self.debug("render %r", request)
self.parent.store.LFM.update(self.parent)
self.parent.played = True
return utils.ReverseProxyResource.render(self, request)
class LastFMItem(log.Loggable):
logCategory = 'LastFM_item'
def __init__(self, id, obj, parent, mimetype, urlbase, UPnPClass,update=False):
self.id = id
self.name = obj.get('name')
self.title = obj.get('title')
self.artist = obj.get('artist')
self.creator = obj.get('creator')
self.album = obj.get('album')
self.duration = obj.get('duration')
self.mimetype = mimetype
self.parent = parent
if parent:
parent.add_child(self,update=update)
if parent == None:
parent_id = -1
else:
parent_id = parent.get_id()
self.item = UPnPClass(id, parent_id, self.title,False ,self.creator)
if isinstance(self.item, Container):
self.item.childCount = 0
self.child_count = 0
self.children = []
if( len(urlbase) and urlbase[-1] != '/'):
urlbase += '/'
if self.mimetype == 'directory':
self.url = urlbase + str(self.id)
else:
self.url = urlbase + str(self.id)
self.location = LFMProxyStream(obj.get('url'), self)
#self.url = obj.get('url')
if self.mimetype == 'directory':
self.update_id = 0
else:
res = Resource(self.url, 'http-get:*:%s:%s' % (obj.get('mimetype'),
';'.join(('DLNA.ORG_PN=MP3',
'DLNA.ORG_CI=0',
'DLNA.ORG_OP=01',
'DLNA.ORG_FLAGS=01700000000000000000000000000000'))))
res.size = -1 #None
self.item.res.append(res)
def remove(self):
if self.parent:
self.parent.remove_child(self)
del self.item
def add_child(self, child, update=False):
if self.children == None:
self.children = []
self.children.append(child)
self.child_count += 1
if isinstance(self.item, Container):
self.item.childCount += 1
if update == True:
self.update_id += 1
def remove_child(self, child):
self.info("remove_from %d (%s) child %d (%s)" % (self.id, self.get_name(), child.id, child.get_name()))
if child in self.children:
self.child_count -= 1
if isinstance(self.item, Container):
self.item.childCount -= 1
self.children.remove(child)
self.update_id += 1
def get_children(self,start=0,request_count=0):
if request_count == 0:
return self.children[start:]
else:
return self.children[start:request_count]
def get_child_count(self):
if self.mimetype == 'directory':
return 100 #Some Testing, with strange Numbers: 0/lots
return self.child_count
def get_id(self):
return self.id
def get_update_id(self):
if hasattr(self, 'update_id'):
return self.update_id
else:
return None
def get_path(self):
return self.url
def get_name(self):
return self.name
def get_parent(self):
return self.parent
def get_item(self):
return self.item
def get_xml(self):
return self.item.toString()
def __repr__(self):
return 'id: ' + str(self.id) + ' @ ' + self.url + ' ' + self.name
class LastFMStore(log.Loggable,Plugin):
logCategory = 'lastFM_store'
implements = ['MediaServer']
def __init__(self, server, **kwargs):
BackendStore.__init__(self,server,**kwargs)
self.next_id = 1000
self.config = kwargs
self.name = kwargs.get('name','LastFMStore')
self.update_id = 0
self.store = {}
self.wmc_mapping = {'4': 1000}
louie.send('Coherence.UPnP.Backend.init_completed', None, backend=self)
def __repr__(self):
return str(self.__class__).split('.')[-1]
def append( self, obj, parent):
if isinstance(obj, basestring):
mimetype = 'directory'
else:
mimetype = obj['mimetype']
UPnPClass = classChooser(mimetype)
id = self.getnextID()
update = False
if hasattr(self, 'update_id'):
update = True
self.store[id] = LastFMItem( id, obj, parent, mimetype, self.urlbase,
UPnPClass, update=update)
self.store[id].store = self
if hasattr(self, 'update_id'):
self.update_id += 1
if self.server:
self.server.content_directory_server.set_variable(0, 'SystemUpdateID', self.update_id)
if parent:
#value = '%d,%d' % (parent.get_id(),parent_get_update_id())
value = (parent.get_id(),parent.get_update_id())
if self.server:
self.server.content_directory_server.set_variable(0, 'ContainerUpdateIDs', value)
return self.store[id]
def remove(self, item):
try:
parent = item.get_parent()
item.remove()
del self.store[int(id)]
if hasattr(self, 'update_id'):
self.update_id += 1
if self.server:
self.server.content_directory_server.set_variable(0, 'SystemUpdateID', self.update_id)
#value = '%d,%d' % (parent.get_id(),parent_get_update_id())
value = (parent.get_id(),parent.get_update_id())
if self.server:
self.server.content_directory_server.set_variable(0, 'ContainerUpdateIDs', value)
except:
pass
def len(self):
return len(self.store)
def get_by_id(self,id):
if isinstance(id, basestring):
id = id.split('@',1)
id = id[0]
id = int(id)
if id == 0:
id = 1000
try:
return self.store[id]
except:
return None
def getnextID(self):
ret = self.next_id
self.next_id += 1
return ret
def upnp_init(self):
self.current_connection_id = None
parent = self.append({'name':'LastFM','mimetype':'directory'}, None)
self.LFM = LastFMUser(self.config.get("login"), self.config.get("password"))
self.LFM.parent = parent
self.LFM.login()
if self.server:
self.server.connection_manager_server.set_variable(0, 'SourceProtocolInfo',
['http-get:*:audio/mpeg:*'],
default=True)
def main():
f = LastFMStore(None)
def got_upnp_result(result):
print "upnp", result
f.upnp_init()
if __name__ == '__main__':
from twisted.internet import reactor
reactor.callWhenRunning(main)
reactor.run()
| mit |
toontownfunserver/Panda3D-1.9.0 | Pmw/Pmw_1_3/lib/PmwComboBox.py | 6 | 11924 | # Based on iwidgets2.2.0/combobox.itk code.
import os
import string
import types
import Tkinter
import Pmw
class ComboBox(Pmw.MegaWidget):
def __init__(self, parent = None, **kw):
# Define the megawidget options.
INITOPT = Pmw.INITOPT
optiondefs = (
('autoclear', 0, INITOPT),
('buttonaspect', 1.0, INITOPT),
('dropdown', 1, INITOPT),
('fliparrow', 0, INITOPT),
('history', 1, INITOPT),
('labelmargin', 0, INITOPT),
('labelpos', None, INITOPT),
('listheight', 200, INITOPT),
('selectioncommand', None, None),
('sticky', 'ew', INITOPT),
('unique', 1, INITOPT),
)
self.defineoptions(kw, optiondefs)
# Initialise the base class (after defining the options).
Pmw.MegaWidget.__init__(self, parent)
# Create the components.
interior = self.interior()
self._entryfield = self.createcomponent('entryfield',
(('entry', 'entryfield_entry'),), None,
Pmw.EntryField, (interior,))
self._entryfield.grid(column=2, row=2, sticky=self['sticky'])
interior.grid_columnconfigure(2, weight = 1)
self._entryWidget = self._entryfield.component('entry')
if self['dropdown']:
self._isPosted = 0
interior.grid_rowconfigure(2, weight = 1)
# Create the arrow button.
self._arrowBtn = self.createcomponent('arrowbutton',
(), None,
Tkinter.Canvas, (interior,), borderwidth = 2,
relief = 'raised',
width = 16, height = 16)
if 'n' in self['sticky']:
sticky = 'n'
else:
sticky = ''
if 's' in self['sticky']:
sticky = sticky + 's'
self._arrowBtn.grid(column=3, row=2, sticky = sticky)
self._arrowRelief = self._arrowBtn.cget('relief')
# Create the label.
self.createlabel(interior, childCols=2)
# Create the dropdown window.
self._popup = self.createcomponent('popup',
(), None,
Tkinter.Toplevel, (interior,))
self._popup.withdraw()
self._popup.overrideredirect(1)
# Create the scrolled listbox inside the dropdown window.
self._list = self.createcomponent('scrolledlist',
(('listbox', 'scrolledlist_listbox'),), None,
Pmw.ScrolledListBox, (self._popup,),
hull_borderwidth = 2,
hull_relief = 'raised',
hull_height = self['listheight'],
usehullsize = 1,
listbox_exportselection = 0)
self._list.pack(expand=1, fill='both')
self.__listbox = self._list.component('listbox')
# Bind events to the arrow button.
self._arrowBtn.bind('<1>', self._postList)
self._arrowBtn.bind('<Configure>', self._drawArrow)
self._arrowBtn.bind('<3>', self._next)
self._arrowBtn.bind('<Shift-3>', self._previous)
self._arrowBtn.bind('<Down>', self._next)
self._arrowBtn.bind('<Up>', self._previous)
self._arrowBtn.bind('<Control-n>', self._next)
self._arrowBtn.bind('<Control-p>', self._previous)
self._arrowBtn.bind('<Shift-Down>', self._postList)
self._arrowBtn.bind('<Shift-Up>', self._postList)
self._arrowBtn.bind('<F34>', self._postList)
self._arrowBtn.bind('<F28>', self._postList)
self._arrowBtn.bind('<space>', self._postList)
# Bind events to the dropdown window.
self._popup.bind('<Escape>', self._unpostList)
self._popup.bind('<space>', self._selectUnpost)
self._popup.bind('<Return>', self._selectUnpost)
self._popup.bind('<ButtonRelease-1>', self._dropdownBtnRelease)
self._popup.bind('<ButtonPress-1>', self._unpostOnNextRelease)
# Bind events to the Tk listbox.
self.__listbox.bind('<Enter>', self._unpostOnNextRelease)
# Bind events to the Tk entry widget.
self._entryWidget.bind('<Configure>', self._resizeArrow)
self._entryWidget.bind('<Shift-Down>', self._postList)
self._entryWidget.bind('<Shift-Up>', self._postList)
self._entryWidget.bind('<F34>', self._postList)
self._entryWidget.bind('<F28>', self._postList)
# Need to unpost the popup if the entryfield is unmapped (eg:
# its toplevel window is withdrawn) while the popup list is
# displayed.
self._entryWidget.bind('<Unmap>', self._unpostList)
else:
# Create the scrolled listbox below the entry field.
self._list = self.createcomponent('scrolledlist',
(('listbox', 'scrolledlist_listbox'),), None,
Pmw.ScrolledListBox, (interior,),
selectioncommand = self._selectCmd)
self._list.grid(column=2, row=3, sticky='nsew')
self.__listbox = self._list.component('listbox')
# The scrolled listbox should expand vertically.
interior.grid_rowconfigure(3, weight = 1)
# Create the label.
self.createlabel(interior, childRows=2)
self._entryWidget.bind('<Down>', self._next)
self._entryWidget.bind('<Up>', self._previous)
self._entryWidget.bind('<Control-n>', self._next)
self._entryWidget.bind('<Control-p>', self._previous)
self.__listbox.bind('<Control-n>', self._next)
self.__listbox.bind('<Control-p>', self._previous)
if self['history']:
self._entryfield.configure(command=self._addHistory)
# Check keywords and initialise options.
self.initialiseoptions()
def destroy(self):
if self['dropdown'] and self._isPosted:
Pmw.popgrab(self._popup)
Pmw.MegaWidget.destroy(self)
#======================================================================
# Public methods
def get(self, first = None, last=None):
if first is None:
return self._entryWidget.get()
else:
return self._list.get(first, last)
def invoke(self):
if self['dropdown']:
self._postList()
else:
return self._selectCmd()
def selectitem(self, index, setentry=1):
if type(index) == types.StringType:
text = index
items = self._list.get(0, 'end')
if text in items:
index = list(items).index(text)
else:
raise IndexError, 'index "%s" not found' % text
elif setentry:
text = self._list.get(0, 'end')[index]
self._list.select_clear(0, 'end')
self._list.select_set(index, index)
self._list.activate(index)
self.see(index)
if setentry:
self._entryfield.setentry(text)
# Need to explicitly forward this to override the stupid
# (grid_)size method inherited from Tkinter.Frame.Grid.
def size(self):
return self._list.size()
# Need to explicitly forward this to override the stupid
# (grid_)bbox method inherited from Tkinter.Frame.Grid.
def bbox(self, index):
return self._list.bbox(index)
def clear(self):
self._entryfield.clear()
self._list.clear()
#======================================================================
# Private methods for both dropdown and simple comboboxes.
def _addHistory(self):
input = self._entryWidget.get()
if input != '':
index = None
if self['unique']:
# If item is already in list, select it and return.
items = self._list.get(0, 'end')
if input in items:
index = list(items).index(input)
if index is None:
index = self._list.index('end')
self._list.insert('end', input)
self.selectitem(index)
if self['autoclear']:
self._entryWidget.delete(0, 'end')
# Execute the selectioncommand on the new entry.
self._selectCmd()
def _next(self, event):
size = self.size()
if size <= 1:
return
cursels = self.curselection()
if len(cursels) == 0:
index = 0
else:
index = string.atoi(cursels[0])
if index == size - 1:
index = 0
else:
index = index + 1
self.selectitem(index)
def _previous(self, event):
size = self.size()
if size <= 1:
return
cursels = self.curselection()
if len(cursels) == 0:
index = size - 1
else:
index = string.atoi(cursels[0])
if index == 0:
index = size - 1
else:
index = index - 1
self.selectitem(index)
def _selectCmd(self, event=None):
sels = self.getcurselection()
if len(sels) == 0:
item = None
else:
item = sels[0]
self._entryfield.setentry(item)
cmd = self['selectioncommand']
if callable(cmd):
if event is None:
# Return result of selectioncommand for invoke() method.
return cmd(item)
else:
cmd(item)
#======================================================================
# Private methods for dropdown combobox.
def _drawArrow(self, event=None, sunken=0):
arrow = self._arrowBtn
if sunken:
self._arrowRelief = arrow.cget('relief')
arrow.configure(relief = 'sunken')
else:
arrow.configure(relief = self._arrowRelief)
if self._isPosted and self['fliparrow']:
direction = 'up'
else:
direction = 'down'
Pmw.drawarrow(arrow, self['entry_foreground'], direction, 'arrow')
def _postList(self, event = None):
self._isPosted = 1
self._drawArrow(sunken=1)
# Make sure that the arrow is displayed sunken.
self.update_idletasks()
x = self._entryfield.winfo_rootx()
y = self._entryfield.winfo_rooty() + \
self._entryfield.winfo_height()
w = self._entryfield.winfo_width() + self._arrowBtn.winfo_width()
h = self.__listbox.winfo_height()
sh = self.winfo_screenheight()
if y + h > sh and y > sh / 2:
y = self._entryfield.winfo_rooty() - h
self._list.configure(hull_width=w)
Pmw.setgeometryanddeiconify(self._popup, '+%d+%d' % (x, y))
# Grab the popup, so that all events are delivered to it, and
# set focus to the listbox, to make keyboard navigation
# easier.
Pmw.pushgrab(self._popup, 1, self._unpostList)
self.__listbox.focus_set()
self._drawArrow()
# Ignore the first release of the mouse button after posting the
# dropdown list, unless the mouse enters the dropdown list.
self._ignoreRelease = 1
def _dropdownBtnRelease(self, event):
if (event.widget == self._list.component('vertscrollbar') or
event.widget == self._list.component('horizscrollbar')):
return
if self._ignoreRelease:
self._unpostOnNextRelease()
return
self._unpostList()
if (event.x >= 0 and event.x < self.__listbox.winfo_width() and
event.y >= 0 and event.y < self.__listbox.winfo_height()):
self._selectCmd()
def _unpostOnNextRelease(self, event = None):
self._ignoreRelease = 0
def _resizeArrow(self, event):
bw = (string.atoi(self._arrowBtn['borderwidth']) +
string.atoi(self._arrowBtn['highlightthickness']))
newHeight = self._entryfield.winfo_reqheight() - 2 * bw
newWidth = int(newHeight * self['buttonaspect'])
self._arrowBtn.configure(width=newWidth, height=newHeight)
self._drawArrow()
def _unpostList(self, event=None):
if not self._isPosted:
# It is possible to get events on an unposted popup. For
# example, by repeatedly pressing the space key to post
# and unpost the popup. The <space> event may be
# delivered to the popup window even though
# Pmw.popgrab() has set the focus away from the
# popup window. (Bug in Tk?)
return
# Restore the focus before withdrawing the window, since
# otherwise the window manager may take the focus away so we
# can't redirect it. Also, return the grab to the next active
# window in the stack, if any.
Pmw.popgrab(self._popup)
self._popup.withdraw()
self._isPosted = 0
self._drawArrow()
def _selectUnpost(self, event):
self._unpostList()
self._selectCmd()
Pmw.forwardmethods(ComboBox, Pmw.ScrolledListBox, '_list')
Pmw.forwardmethods(ComboBox, Pmw.EntryField, '_entryfield')
| bsd-3-clause |
ali1234/synergy-old | tools/gtest-1.6.0/test/gtest_catch_exceptions_test.py | 414 | 9312 | #!/usr/bin/env python
#
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests Google Test's exception catching behavior.
This script invokes gtest_catch_exceptions_test_ and
gtest_catch_exceptions_ex_test_ (programs written with
Google Test) and verifies their output.
"""
__author__ = 'vladl@google.com (Vlad Losev)'
import os
import gtest_test_utils
# Constants.
FLAG_PREFIX = '--gtest_'
LIST_TESTS_FLAG = FLAG_PREFIX + 'list_tests'
NO_CATCH_EXCEPTIONS_FLAG = FLAG_PREFIX + 'catch_exceptions=0'
FILTER_FLAG = FLAG_PREFIX + 'filter'
# Path to the gtest_catch_exceptions_ex_test_ binary, compiled with
# exceptions enabled.
EX_EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'gtest_catch_exceptions_ex_test_')
# Path to the gtest_catch_exceptions_test_ binary, compiled with
# exceptions disabled.
EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'gtest_catch_exceptions_no_ex_test_')
TEST_LIST = gtest_test_utils.Subprocess([EXE_PATH, LIST_TESTS_FLAG]).output
SUPPORTS_SEH_EXCEPTIONS = 'ThrowsSehException' in TEST_LIST
if SUPPORTS_SEH_EXCEPTIONS:
BINARY_OUTPUT = gtest_test_utils.Subprocess([EXE_PATH]).output
EX_BINARY_OUTPUT = gtest_test_utils.Subprocess([EX_EXE_PATH]).output
# The tests.
if SUPPORTS_SEH_EXCEPTIONS:
# pylint:disable-msg=C6302
class CatchSehExceptionsTest(gtest_test_utils.TestCase):
"""Tests exception-catching behavior."""
def TestSehExceptions(self, test_output):
self.assert_('SEH exception with code 0x2a thrown '
'in the test fixture\'s constructor'
in test_output)
self.assert_('SEH exception with code 0x2a thrown '
'in the test fixture\'s destructor'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in SetUpTestCase()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in TearDownTestCase()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in SetUp()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in TearDown()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in the test body'
in test_output)
def testCatchesSehExceptionsWithCxxExceptionsEnabled(self):
self.TestSehExceptions(EX_BINARY_OUTPUT)
def testCatchesSehExceptionsWithCxxExceptionsDisabled(self):
self.TestSehExceptions(BINARY_OUTPUT)
class CatchCxxExceptionsTest(gtest_test_utils.TestCase):
"""Tests C++ exception-catching behavior.
Tests in this test case verify that:
* C++ exceptions are caught and logged as C++ (not SEH) exceptions
* Exception thrown affect the remainder of the test work flow in the
expected manner.
"""
def testCatchesCxxExceptionsInFixtureConstructor(self):
self.assert_('C++ exception with description '
'"Standard C++ exception" thrown '
'in the test fixture\'s constructor'
in EX_BINARY_OUTPUT)
self.assert_('unexpected' not in EX_BINARY_OUTPUT,
'This failure belongs in this test only if '
'"CxxExceptionInConstructorTest" (no quotes) '
'appears on the same line as words "called unexpectedly"')
def testCatchesCxxExceptionsInFixtureDestructor(self):
self.assert_('C++ exception with description '
'"Standard C++ exception" thrown '
'in the test fixture\'s destructor'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInDestructorTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInSetUpTestCase(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in SetUpTestCase()'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInConstructorTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest constructor '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest destructor '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest::SetUp() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest::TearDown() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest test body '
'called as expected.'
in EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInTearDownTestCase(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in TearDownTestCase()'
in EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInSetUp(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in SetUp()'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTest destructor '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTest::TearDown() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('unexpected' not in EX_BINARY_OUTPUT,
'This failure belongs in this test only if '
'"CxxExceptionInSetUpTest" (no quotes) '
'appears on the same line as words "called unexpectedly"')
def testCatchesCxxExceptionsInTearDown(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in TearDown()'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTearDownTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTearDownTest destructor '
'called as expected.'
in EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInTestBody(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in the test body'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTestBodyTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTestBodyTest destructor '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTestBodyTest::TearDown() '
'called as expected.'
in EX_BINARY_OUTPUT)
def testCatchesNonStdCxxExceptions(self):
self.assert_('Unknown C++ exception thrown in the test body'
in EX_BINARY_OUTPUT)
def testUnhandledCxxExceptionsAbortTheProgram(self):
# Filters out SEH exception tests on Windows. Unhandled SEH exceptions
# cause tests to show pop-up windows there.
FITLER_OUT_SEH_TESTS_FLAG = FILTER_FLAG + '=-*Seh*'
# By default, Google Test doesn't catch the exceptions.
uncaught_exceptions_ex_binary_output = gtest_test_utils.Subprocess(
[EX_EXE_PATH,
NO_CATCH_EXCEPTIONS_FLAG,
FITLER_OUT_SEH_TESTS_FLAG]).output
self.assert_('Unhandled C++ exception terminating the program'
in uncaught_exceptions_ex_binary_output)
self.assert_('unexpected' not in uncaught_exceptions_ex_binary_output)
if __name__ == '__main__':
gtest_test_utils.Main()
| gpl-2.0 |
koolkhel/linux | tools/perf/scripts/python/failed-syscalls-by-pid.py | 1996 | 2233 | # failed system call counts, by pid
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide failed system call totals, broken down by pid.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s syscall-counts-by-pid.py [comm|pid]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_error_totals()
def raw_syscalls__sys_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, id, ret):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
if ret < 0:
try:
syscalls[common_comm][common_pid][id][ret] += 1
except TypeError:
syscalls[common_comm][common_pid][id][ret] = 1
def syscalls__sys_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, ret):
raw_syscalls__sys_exit(**locals())
def print_error_totals():
if for_comm is not None:
print "\nsyscall errors for %s:\n\n" % (for_comm),
else:
print "\nsyscall errors:\n\n",
print "%-30s %10s\n" % ("comm [pid]", "count"),
print "%-30s %10s\n" % ("------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id in id_keys:
print " syscall: %-16s\n" % syscall_name(id),
ret_keys = syscalls[comm][pid][id].keys()
for ret, val in sorted(syscalls[comm][pid][id].iteritems(), key = lambda(k, v): (v, k), reverse = True):
print " err = %-20s %10d\n" % (strerror(ret), val),
| gpl-2.0 |
bopo/peewee | playhouse/tests/test_gfk.py | 16 | 3545 | from peewee import *
from playhouse.gfk import *
from playhouse.tests.base import database_initializer
from playhouse.tests.base import ModelTestCase
db = database_initializer.get_in_memory_database()
class BaseModel(Model):
class Meta:
database = db
def add_tag(self, tag):
t = Tag(tag=tag)
t.object = self
t.save()
return t
class Tag(BaseModel):
tag = CharField()
object_type = CharField(null=True)
object_id = IntegerField(null=True)
object = GFKField()
class Meta:
order_by = ('tag',)
class Appetizer(BaseModel):
name = CharField()
tags = ReverseGFK(Tag)
class Entree(BaseModel):
name = CharField()
tags = ReverseGFK(Tag)
class Dessert(BaseModel):
name = CharField()
tags = ReverseGFK(Tag)
class GFKTestCase(ModelTestCase):
requires = [Tag, Appetizer, Entree, Dessert]
data = {
Appetizer: (
('wings', ('fried', 'spicy')),
('mozzarella sticks', ('fried', 'sweet')),
('potstickers', ('fried',)),
('edamame', ('salty',)),
),
Entree: (
('phad thai', ('spicy',)),
('fried chicken', ('fried', 'salty')),
('tacos', ('fried', 'spicy')),
),
Dessert: (
('sundae', ('sweet',)),
('churro', ('fried', 'sweet')),
)
}
def create(self):
for model, foods in self.data.items():
for name, tags in foods:
inst = model.create(name=name)
for tag in tags:
inst.add_tag(tag)
def test_creation(self):
t = Tag.create(tag='a tag')
t.object = t
t.save()
t_db = Tag.get(Tag.id == t.id)
self.assertEqual(t_db.object_id, t_db._get_pk_value())
self.assertEqual(t_db.object_type, 'tag')
self.assertEqual(t_db.object, t_db)
def test_gfk_api(self):
self.create()
# test instance api
for model, foods in self.data.items():
for food, tags in foods:
inst = model.get(model.name == food)
self.assertEqual([t.tag for t in inst.tags], list(tags))
# test class api and ``object`` api
apps_tags = [(t.tag, t.object.name) for t in Appetizer.tags.order_by(Tag.id)]
data_tags = []
for food, tags in self.data[Appetizer]:
for t in tags:
data_tags.append((t, food))
self.assertEqual(apps_tags, data_tags)
def test_missing(self):
t = Tag.create(tag='sour')
self.assertEqual(t.object, None)
t.object_type = 'appetizer'
t.object_id = 1
# accessing the descriptor will raise a DoesNotExist
self.assertRaises(Appetizer.DoesNotExist, getattr, t, 'object')
t.object_type = 'unknown'
t.object_id = 1
self.assertRaises(AttributeError, getattr, t, 'object')
def test_set_reverse(self):
# assign query
e = Entree.create(name='phad thai')
s = Tag.create(tag='spicy')
p = Tag.create(tag='peanuts')
t = Tag.create(tag='thai')
b = Tag.create(tag='beverage')
e.tags = Tag.select().where(Tag.tag != 'beverage')
self.assertEqual([t.tag for t in e.tags], ['peanuts', 'spicy', 'thai'])
e = Entree.create(name='panang curry')
c = Tag.create(tag='coconut')
e.tags = [p, t, c, s]
self.assertEqual([t.tag for t in e.tags], ['coconut', 'peanuts', 'spicy', 'thai'])
| mit |
florianjacob/webassets | src/webassets/filter/pyscss.py | 12 | 5882 | import os
from webassets.filter import Filter
from webassets.utils import working_directory
__all__ = ('PyScss',)
class PyScss(Filter):
"""Converts `Scss <http://sass-lang.com/>`_ markup to real CSS.
This uses `PyScss <https://github.com/Kronuz/pyScss>`_, a native
Python implementation of the Scss language. The PyScss module needs
to be installed. It's API has been changing; currently, version
1.1.5 is known to be supported.
This is an alternative to using the ``sass`` or ``scss`` filters,
which are based on the original, external tools.
.. note::
The Sass syntax is not supported by PyScss. You need to use
the ``sass`` filter based on the original Ruby implementation
instead.
*Supported configuration options:*
PYSCSS_DEBUG_INFO (debug_info)
Include debug information in the output for use with FireSass.
If unset, the default value will depend on your
:attr:`Environment.debug` setting.
PYSCSS_LOAD_PATHS (load_paths)
Additional load paths that PyScss should use.
.. warning::
The filter currently does not automatically use
:attr:`Environment.load_path` for this.
PYSCSS_STATIC_ROOT (static_root)
The directory PyScss should look in when searching for include
files that you have referenced. Will use
:attr:`Environment.directory` by default.
PYSCSS_STATIC_URL (static_url)
The url PyScss should use when generating urls to files in
``PYSCSS_STATIC_ROOT``. Will use :attr:`Environment.url` by
default.
PYSCSS_ASSETS_ROOT (assets_root)
The directory PyScss should look in when searching for things
like images that you have referenced. Will use
``PYSCSS_STATIC_ROOT`` by default.
PYSCSS_ASSETS_URL (assets_url)
The url PyScss should use when generating urls to files in
``PYSCSS_ASSETS_ROOT``. Will use ``PYSCSS_STATIC_URL`` by
default.
PYSCSS_STYLE (style)
The style of the output CSS. Can be one of ``nested`` (default),
``compact``, ``compressed``, or ``expanded``.
"""
# TODO: PyScss now allows STATIC_ROOT to be a callable, though
# none of the other pertitent values are allowed to be, so this
# is probably not good enough for us.
name = 'pyscss'
options = {
'debug_info': 'PYSCSS_DEBUG_INFO',
'load_paths': 'PYSCSS_LOAD_PATHS',
'static_root': 'PYSCSS_STATIC_ROOT',
'static_url': 'PYSCSS_STATIC_URL',
'assets_root': 'PYSCSS_ASSETS_ROOT',
'assets_url': 'PYSCSS_ASSETS_URL',
'style': 'PYSCSS_STYLE',
}
max_debug_level = None
def setup(self):
super(PyScss, self).setup()
import scss
self.scss = scss
if self.style:
try:
from packaging.version import Version
except ImportError:
from distutils.version import LooseVersion as Version
assert Version(scss.__version__) >= Version('1.2.0'), \
'PYSCSS_STYLE only supported in pyScss>=1.2.0'
# Initialize various settings:
# Why are these module-level, not instance-level ?!
# TODO: It appears that in the current dev version, the
# settings can finally passed to a constructor. We'll need
# to support this.
# Only the dev version appears to support a list
if self.load_paths:
scss.config.LOAD_PATHS = ','.join(self.load_paths)
# These are needed for various helpers (working with images
# etc.). Similar to the compass filter, we require the user
# to specify such paths relative to the media directory.
try:
scss.config.STATIC_ROOT = self.static_root or self.ctx.directory
scss.config.STATIC_URL = self.static_url or self.ctx.url
except EnvironmentError:
raise EnvironmentError('Because Environment.url and/or '
'Environment.directory are not set, you need to '
'provide values for the PYSCSS_STATIC_URL and/or '
'PYSCSS_STATIC_ROOT settings.')
# This directory PyScss will use when generating new files,
# like a spritemap. Maybe we should REQUIRE this to be set.
scss.config.ASSETS_ROOT = self.assets_root or scss.config.STATIC_ROOT
scss.config.ASSETS_URL = self.assets_url or scss.config.STATIC_URL
def input(self, _in, out, **kw):
"""Like the original sass filter, this also needs to work as
an input filter, so that relative @imports can be properly
resolved.
"""
source_path = kw['source_path']
# Because PyScss always puts the current working dir at first
# place of the load path, this is what we need to use to make
# relative references work.
with working_directory(os.path.dirname(source_path)):
scss_opts = {
'debug_info': (
self.ctx.environment.debug if self.debug_info is None else self.debug_info),
}
if self.style:
scss_opts['style'] = self.style
else:
scss_opts['compress'] = False
scss = self.scss.Scss(
scss_opts=scss_opts,
# This is rather nice. We can pass along the filename,
# but also give it already preprocessed content.
scss_files={source_path: _in.read()})
# Compile
# Note: This will not throw an error when certain things
# are wrong, like an include file missing. It merely outputs
# to stdout, via logging. We might have to do something about
# this, and evaluate such problems to an exception.
out.write(scss.compile())
| bsd-2-clause |
zscproject/OWASP-ZSC | lib/encoder/osx_x86/sub_yourvalue.py | 3 | 2978 | #!/usr/bin/env python
'''
OWASP ZSC
https://www.owasp.org/index.php/OWASP_ZSC_Tool_Project
https://github.com/zscproject/OWASP-ZSC
http://api.z3r0d4y.com/
https://groups.google.com/d/forum/owasp-zsc [ owasp-zsc[at]googlegroups[dot]com ]
'''
def start(type, shellcode, job):
if job == "exec":
value = str(type.rsplit('sub_')[1][2:])
t = True
eax = str('0x3b909090')
eax_1 = value
eax_2 = "%x" % (int(eax, 16) + int(eax_1, 16))
A = 0
eax = 'push $%s' % (str(eax))
if '-' in eax_2:
A = 1
eax_2 = eax_2.replace('-', '')
eax_sub = 'push $0x%s\npop %%eax\nneg %%eax\nsub $0x%s,%%eax\nshr $0x10,%%eax\nshr $0x08,%%eax\n_z3r0d4y_' % (
eax_2, eax_1)
if A is 0:
eax_sub = 'push $0x%s\npop %%eax\nsub $0x%s,%%eax\nshr $0x10,%%eax\nshr $0x08,%%eax\n_z3r0d4y_' % (
eax_2, eax_1)
shellcode = shellcode.replace('mov $0x3b,%al', eax_sub)
A = 0
for line in shellcode.rsplit('\n'):
if '_z3r0d4y_' in line:
A = 1
if 'push' in line and '$0x' in line and ',' not in line and len(
line) > 14 and A is 1:
data = line.rsplit('push')[1].rsplit('$0x')[1]
t = True
while t:
ebx_1 = value
ebx_2 = "%x" % (int(data, 16) + int(ebx_1, 16))
if str('00') not in str(ebx_1) and str('00') not in str(
ebx_2) and len(ebx_2) >= 7 and len(
ebx_1) >= 7 and '-' not in ebx_1:
ebx_2 = ebx_2.replace('-', '')
command = '\npush $0x%s\npop %%ebx\npush $0x%s\npop %%eax\nsub %%ebx,%%eax\npush %%eax\n' % (
str(ebx_1), str(ebx_2))
shellcode = shellcode.replace(line, command)
t = False
shellcode = shellcode.replace('_z3r0d4y_', '')
if job == "system":
value = str(type.rsplit('sub_')[1][2:])
for line in shellcode.rsplit('\n'):
if 'push' in line and '$0x' in line and ',' not in line and len(
line) > 14:
data = line.rsplit('push')[1].rsplit('$0x')[1]
ebx_1 = value
ebx_2 = "%x" % (int(data, 16) + int(ebx_1, 16))
A = 0
if str('-') in str(ebx_2):
ebx_2 = ebx_2.replace('-', '')
command = '\npush $0x%s\npop %%ebx\npush $0x%s\npop %%eax\nneg %%eax\nsub %%ebx,%%eax\npush %%eax\n' % (
str(ebx_1), str(ebx_2))
A = 1
if A is 0:
command = '\npush $0x%s\npop %%ebx\npush $0x%s\npop %%eax\nsub %%ebx,%%eax\npush %%eax\n' % (
str(ebx_1), str(ebx_2))
shellcode = shellcode.replace(line, command)
return shellcode | gpl-3.0 |
density215/d215-miniblog | django/conf/locale/__init__.py | 157 | 9257 | LANG_INFO = {
'ar': {
'bidi': True,
'code': 'ar',
'name': 'Arabic',
'name_local': u'\u0627\u0644\u0639\u0631\u0628\u064a\u0651\u0629',
},
'az': {
'bidi': True,
'code': 'az',
'name': 'Azerbaijani',
'name_local': u'az\u0259rbaycan dili',
},
'bg': {
'bidi': False,
'code': 'bg',
'name': 'Bulgarian',
'name_local': u'\u0431\u044a\u043b\u0433\u0430\u0440\u0441\u043a\u0438',
},
'bn': {
'bidi': False,
'code': 'bn',
'name': 'Bengali',
'name_local': u'\u09ac\u09be\u0982\u09b2\u09be',
},
'bs': {
'bidi': False,
'code': 'bs',
'name': 'Bosnian',
'name_local': u'bosanski',
},
'ca': {
'bidi': False,
'code': 'ca',
'name': 'Catalan',
'name_local': u'catal\xe0',
},
'cs': {
'bidi': False,
'code': 'cs',
'name': 'Czech',
'name_local': u'\u010desky',
},
'cy': {
'bidi': False,
'code': 'cy',
'name': 'Welsh',
'name_local': u'Cymraeg',
},
'da': {
'bidi': False,
'code': 'da',
'name': 'Danish',
'name_local': u'Dansk',
},
'de': {
'bidi': False,
'code': 'de',
'name': 'German',
'name_local': u'Deutsch',
},
'el': {
'bidi': False,
'code': 'el',
'name': 'Greek',
'name_local': u'\u0395\u03bb\u03bb\u03b7\u03bd\u03b9\u03ba\u03ac',
},
'en': {
'bidi': False,
'code': 'en',
'name': 'English',
'name_local': u'English',
},
'en-gb': {
'bidi': False,
'code': 'en-gb',
'name': 'British English',
'name_local': u'British English',
},
'es': {
'bidi': False,
'code': 'es',
'name': 'Spanish',
'name_local': u'espa\xf1ol',
},
'es-ar': {
'bidi': False,
'code': 'es-ar',
'name': 'Argentinian Spanish',
'name_local': u'espa\xf1ol de Argentina',
},
'es-mx': {
'bidi': False,
'code': 'es-mx',
'name': 'Mexican Spanish',
'name_local': u'espa\xf1ol de Mexico',
},
'es-ni': {
'bidi': False,
'code': 'es-ni',
'name': 'Nicaraguan Spanish',
'name_local': u'espa\xf1ol de Nicaragua',
},
'et': {
'bidi': False,
'code': 'et',
'name': 'Estonian',
'name_local': u'eesti',
},
'eu': {
'bidi': False,
'code': 'eu',
'name': 'Basque',
'name_local': u'Basque',
},
'fa': {
'bidi': True,
'code': 'fa',
'name': 'Persian',
'name_local': u'\u0641\u0627\u0631\u0633\u06cc',
},
'fi': {
'bidi': False,
'code': 'fi',
'name': 'Finnish',
'name_local': u'suomi',
},
'fr': {
'bidi': False,
'code': 'fr',
'name': 'French',
'name_local': u'Fran\xe7ais',
},
'fy-nl': {
'bidi': False,
'code': 'fy-nl',
'name': 'Frisian',
'name_local': u'Frisian',
},
'ga': {
'bidi': False,
'code': 'ga',
'name': 'Irish',
'name_local': u'Gaeilge',
},
'gl': {
'bidi': False,
'code': 'gl',
'name': 'Galician',
'name_local': u'galego',
},
'he': {
'bidi': True,
'code': 'he',
'name': 'Hebrew',
'name_local': u'\u05e2\u05d1\u05e8\u05d9\u05ea',
},
'hi': {
'bidi': False,
'code': 'hi',
'name': 'Hindi',
'name_local': u'Hindi',
},
'hr': {
'bidi': False,
'code': 'hr',
'name': 'Croatian',
'name_local': u'Hrvatski',
},
'hu': {
'bidi': False,
'code': 'hu',
'name': 'Hungarian',
'name_local': u'Magyar',
},
'id': {
'bidi': False,
'code': 'id',
'name': 'Indonesian',
'name_local': u'Bahasa Indonesia',
},
'is': {
'bidi': False,
'code': 'is',
'name': 'Icelandic',
'name_local': u'\xcdslenska',
},
'it': {
'bidi': False,
'code': 'it',
'name': 'Italian',
'name_local': u'italiano',
},
'ja': {
'bidi': False,
'code': 'ja',
'name': 'Japanese',
'name_local': u'\u65e5\u672c\u8a9e',
},
'ka': {
'bidi': False,
'code': 'ka',
'name': 'Georgian',
'name_local': u'\u10e5\u10d0\u10e0\u10d7\u10e3\u10da\u10d8',
},
'km': {
'bidi': False,
'code': 'km',
'name': 'Khmer',
'name_local': u'Khmer',
},
'kn': {
'bidi': False,
'code': 'kn',
'name': 'Kannada',
'name_local': u'Kannada',
},
'ko': {
'bidi': False,
'code': 'ko',
'name': 'Korean',
'name_local': u'\ud55c\uad6d\uc5b4',
},
'lt': {
'bidi': False,
'code': 'lt',
'name': 'Lithuanian',
'name_local': u'Lithuanian',
},
'lv': {
'bidi': False,
'code': 'lv',
'name': 'Latvian',
'name_local': u'latvie\u0161u',
},
'mk': {
'bidi': False,
'code': 'mk',
'name': 'Macedonian',
'name_local': u'\u041c\u0430\u043a\u0435\u0434\u043e\u043d\u0441\u043a\u0438',
},
'ml': {
'bidi': False,
'code': 'ml',
'name': 'Malayalam',
'name_local': u'Malayalam',
},
'mn': {
'bidi': False,
'code': 'mn',
'name': 'Mongolian',
'name_local': u'Mongolian',
},
'nb': {
'bidi': False,
'code': 'nb',
'name': 'Norwegian Bokmal',
'name_local': u'Norsk (bokm\xe5l)',
},
'nl': {
'bidi': False,
'code': 'nl',
'name': 'Dutch',
'name_local': u'Nederlands',
},
'nn': {
'bidi': False,
'code': 'nn',
'name': 'Norwegian Nynorsk',
'name_local': u'Norsk (nynorsk)',
},
'no': {
'bidi': False,
'code': 'no',
'name': 'Norwegian',
'name_local': u'Norsk',
},
'pa': {
'bidi': False,
'code': 'pa',
'name': 'Punjabi',
'name_local': u'Punjabi',
},
'pl': {
'bidi': False,
'code': 'pl',
'name': 'Polish',
'name_local': u'polski',
},
'pt': {
'bidi': False,
'code': 'pt',
'name': 'Portuguese',
'name_local': u'Portugu\xeas',
},
'pt-br': {
'bidi': False,
'code': 'pt-br',
'name': 'Brazilian Portuguese',
'name_local': u'Portugu\xeas Brasileiro',
},
'ro': {
'bidi': False,
'code': 'ro',
'name': 'Romanian',
'name_local': u'Rom\xe2n\u0103',
},
'ru': {
'bidi': False,
'code': 'ru',
'name': 'Russian',
'name_local': u'\u0420\u0443\u0441\u0441\u043a\u0438\u0439',
},
'sk': {
'bidi': False,
'code': 'sk',
'name': 'Slovak',
'name_local': u'slovensk\xfd',
},
'sl': {
'bidi': False,
'code': 'sl',
'name': 'Slovenian',
'name_local': u'Sloven\u0161\u010dina',
},
'sq': {
'bidi': False,
'code': 'sq',
'name': 'Albanian',
'name_local': u'Albanian',
},
'sr': {
'bidi': False,
'code': 'sr',
'name': 'Serbian',
'name_local': u'\u0441\u0440\u043f\u0441\u043a\u0438',
},
'sr-latn': {
'bidi': False,
'code': 'sr-latn',
'name': 'Serbian Latin',
'name_local': u'srpski (latinica)',
},
'sv': {
'bidi': False,
'code': 'sv',
'name': 'Swedish',
'name_local': u'Svenska',
},
'ta': {
'bidi': False,
'code': 'ta',
'name': 'Tamil',
'name_local': u'\u0ba4\u0bae\u0bbf\u0bb4\u0bcd',
},
'te': {
'bidi': False,
'code': 'te',
'name': 'Telugu',
'name_local': u'\u0c24\u0c46\u0c32\u0c41\u0c17\u0c41',
},
'th': {
'bidi': False,
'code': 'th',
'name': 'Thai',
'name_local': u'Thai',
},
'tr': {
'bidi': False,
'code': 'tr',
'name': 'Turkish',
'name_local': u'T\xfcrk\xe7e',
},
'uk': {
'bidi': False,
'code': 'uk',
'name': 'Ukrainian',
'name_local': u'\u0423\u043a\u0440\u0430\u0457\u043d\u0441\u044c\u043a\u0430',
},
'ur': {
'bidi': False,
'code': 'ur',
'name': 'Urdu',
'name_local': u'\u0627\u0631\u062f\u0648',
},
'vi': {
'bidi': False,
'code': 'vi',
'name': 'Vietnamese',
'name_local': u'Vietnamese',
},
'zh-cn': {
'bidi': False,
'code': 'zh-cn',
'name': 'Simplified Chinese',
'name_local': u'\u7b80\u4f53\u4e2d\u6587',
},
'zh-tw': {
'bidi': False,
'code': 'zh-tw',
'name': 'Traditional Chinese',
'name_local': u'\u7e41\u9ad4\u4e2d\u6587',
}
}
| bsd-3-clause |
vincepandolfo/django | tests/admin_views/test_adminsite.py | 23 | 2861 | from __future__ import unicode_literals
from django.conf.urls import url
from django.contrib import admin
from django.contrib.auth.models import User
from django.test import TestCase, override_settings
from django.test.client import RequestFactory
from django.urls import reverse
from .models import Article
site = admin.AdminSite(name="test_adminsite")
site.register(User)
site.register(Article)
urlpatterns = [
url(r'^test_admin/admin/', site.urls),
]
@override_settings(ROOT_URLCONF='admin_views.test_adminsite')
class SiteEachContextTest(TestCase):
"""
Check each_context contains the documented variables and that available_apps context
variable structure is the expected one.
"""
@classmethod
def setUpTestData(cls):
cls.u1 = User.objects.create_superuser(username='super', password='secret', email='super@example.com')
def setUp(self):
factory = RequestFactory()
request = factory.get(reverse('test_adminsite:index'))
request.user = self.u1
self.ctx = site.each_context(request)
def test_each_context(self):
ctx = self.ctx
self.assertEqual(ctx['site_header'], 'Django administration')
self.assertEqual(ctx['site_title'], 'Django site admin')
self.assertEqual(ctx['site_url'], '/')
self.assertEqual(ctx['has_permission'], True)
def test_each_context_site_url_with_script_name(self):
request = RequestFactory().get(reverse('test_adminsite:index'), SCRIPT_NAME='/my-script-name/')
request.user = self.u1
self.assertEqual(site.each_context(request)['site_url'], '/my-script-name/')
def test_available_apps(self):
ctx = self.ctx
apps = ctx['available_apps']
# we have registered two models from two different apps
self.assertEqual(len(apps), 2)
# admin_views.Article
admin_views = apps[0]
self.assertEqual(admin_views['app_label'], 'admin_views')
self.assertEqual(len(admin_views['models']), 1)
self.assertEqual(admin_views['models'][0]['object_name'], 'Article')
# auth.User
auth = apps[1]
self.assertEqual(auth['app_label'], 'auth')
self.assertEqual(len(auth['models']), 1)
user = auth['models'][0]
self.assertEqual(user['object_name'], 'User')
self.assertEqual(auth['app_url'], '/test_admin/admin/auth/')
self.assertEqual(auth['has_module_perms'], True)
self.assertIn('perms', user)
self.assertEqual(user['perms']['add'], True)
self.assertEqual(user['perms']['change'], True)
self.assertEqual(user['perms']['delete'], True)
self.assertEqual(user['admin_url'], '/test_admin/admin/auth/user/')
self.assertEqual(user['add_url'], '/test_admin/admin/auth/user/add/')
self.assertEqual(user['name'], 'Users')
| bsd-3-clause |
jsvine/csvkit | setup.py | 1 | 2208 | #!/usr/bin/env python
from setuptools import setup
setup(
name='csvkit',
version='0.5.0',
description='A library of utilities for working with CSV, the king of tabular file formats.',
long_description=open('README').read(),
author='Christopher Groskopf',
author_email='staringmonkey@gmail.com',
url='http://blog.apps.chicagotribune.com/',
license='MIT',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities'
],
packages=[
'csvkit',
'csvkit.convert',
'csvkit.utilities'
],
entry_points ={
'console_scripts': [
'csvcut = csvkit.utilities.csvcut:launch_new_instance',
'in2csv = csvkit.utilities.in2csv:launch_new_instance',
'csvsql = csvkit.utilities.csvsql:launch_new_instance',
'csvclean = csvkit.utilities.csvclean:launch_new_instance',
'csvstat = csvkit.utilities.csvstat:launch_new_instance',
'csvlook = csvkit.utilities.csvlook:launch_new_instance',
'csvjoin = csvkit.utilities.csvjoin:launch_new_instance',
'csvstack = csvkit.utilities.csvstack:launch_new_instance',
'csvsort = csvkit.utilities.csvsort:launch_new_instance',
'csvgrep = csvkit.utilities.csvgrep:launch_new_instance',
'csvjson = csvkit.utilities.csvjson:launch_new_instance',
'csvpy = csvkit.utilities.csvpy:launch_new_instance'
]
},
install_requires = [
'argparse>=1.2.1',
'xlrd>=0.7.1',
'python-dateutil>=1.5',
'sqlalchemy>=0.6.6',
'openpyxl>=1.5.7',
'dbf>=0.94.003']
)
| mit |
chriskmanx/qmole | QMOLEDEV64/nmap-4.76/zenmap/build/lib/radialnet/util/drawing.py | 3 | 1032 | # vim: set fileencoding=utf-8 :
# Copyright (C) 2007 Insecure.Com LLC.
#
# Author: João Paulo de Souza Medeiros <ignotus21@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import math
def cairo_to_gdk_color(color):
"""
"""
new_color = range(len(color))
for i in range(len(color)):
new_color[i] = int(color[i] * 65535)
return new_color
| gpl-3.0 |
ltiao/scikit-learn | examples/svm/plot_svm_kernels.py | 329 | 1971 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
SVM-Kernels
=========================================================
Three different types of SVM-Kernels are displayed below.
The polynomial and RBF are especially useful when the
data-points are not linearly separable.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# Our dataset and targets
X = np.c_[(.4, -.7),
(-1.5, -1),
(-1.4, -.9),
(-1.3, -1.2),
(-1.1, -.2),
(-1.2, -.4),
(-.5, 1.2),
(-1.5, 2.1),
(1, 1),
# --
(1.3, .8),
(1.2, .5),
(.2, -2),
(.5, -2.4),
(.2, -2.3),
(0, -2.7),
(1.3, 2.1)].T
Y = [0] * 8 + [1] * 8
# figure number
fignum = 1
# fit the model
for kernel in ('linear', 'poly', 'rbf'):
clf = svm.SVC(kernel=kernel, gamma=2)
clf.fit(X, Y)
# plot the line, the points, and the nearest vectors to the plane
plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=80,
facecolors='none', zorder=10)
plt.scatter(X[:, 0], X[:, 1], c=Y, zorder=10, cmap=plt.cm.Paired)
plt.axis('tight')
x_min = -3
x_max = 3
y_min = -3
y_max = 3
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.figure(fignum, figsize=(4, 3))
plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired)
plt.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'],
levels=[-.5, 0, .5])
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
fignum = fignum + 1
plt.show()
| bsd-3-clause |
etzhou/edx-platform | common/lib/xmodule/xmodule/tests/test_annotator_mixin.py | 223 | 1932 | """
This test will run for annotator_mixin.py
"""
import unittest
from lxml import etree
from xmodule.annotator_mixin import get_instructions, get_extension, html_to_text
class HelperFunctionTest(unittest.TestCase):
"""
Tests to ensure that the following helper functions work for the annotation tool
"""
sample_xml = '''
<annotatable>
<instructions><p>Helper Test Instructions.</p></instructions>
</annotatable>
'''
sample_sourceurl = "http://video-js.zencoder.com/oceans-clip.mp4"
sample_youtubeurl = "http://www.youtube.com/watch?v=yxLIu-scR9Y"
sample_html = '<p><b>Testing here</b> and not bolded here</p>'
def test_get_instructions(self):
"""
Function takes in an input of a specific xml string with surrounding instructions
tags and returns a valid html string.
"""
xmltree = etree.fromstring(self.sample_xml)
expected_xml = u"<div><p>Helper Test Instructions.</p></div>"
actual_xml = get_instructions(xmltree)
self.assertIsNotNone(actual_xml)
self.assertEqual(expected_xml.strip(), actual_xml.strip())
xmltree = etree.fromstring('<annotatable>foo</annotatable>')
actual = get_instructions(xmltree)
self.assertIsNone(actual)
def test_get_extension(self):
"""
Tests whether given a url if the video will return a youtube source or extension
"""
expectedyoutube = 'video/youtube'
expectednotyoutube = 'video/mp4'
result1 = get_extension(self.sample_sourceurl)
result2 = get_extension(self.sample_youtubeurl)
self.assertEqual(expectedyoutube, result2)
self.assertEqual(expectednotyoutube, result1)
def test_html_to_text(self):
expectedtext = "Testing here and not bolded here"
result = html_to_text(self.sample_html)
self.assertEqual(expectedtext, result)
| agpl-3.0 |
bdero/edx-platform | lms/djangoapps/instructor/hint_manager.py | 63 | 11158 | """
Views for hint management.
Get to these views through courseurl/hint_manager.
For example: https://courses.edx.org/courses/MITx/2.01x/2013_Spring/hint_manager
These views will only be visible if FEATURES['ENABLE_HINTER_INSTRUCTOR_VIEW'] = True
"""
import json
import re
from django.http import HttpResponse, Http404
from django_future.csrf import ensure_csrf_cookie
from edxmako.shortcuts import render_to_response, render_to_string
from courseware.courses import get_course_with_access
from courseware.models import XModuleUserStateSummaryField
import courseware.module_render as module_render
import courseware.model_data as model_data
from xmodule.modulestore.django import modulestore
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from xmodule.modulestore.exceptions import ItemNotFoundError
@ensure_csrf_cookie
def hint_manager(request, course_id):
"""
The URL landing function for all calls to the hint manager, both POST and GET.
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
try:
get_course_with_access(request.user, 'staff', course_key, depth=None)
except Http404:
out = 'Sorry, but students are not allowed to access the hint manager!'
return HttpResponse(out)
if request.method == 'GET':
out = get_hints(request, course_key, 'mod_queue')
out.update({'error': ''})
return render_to_response('instructor/hint_manager.html', out)
field = request.POST['field']
if not (field == 'mod_queue' or field == 'hints'):
# Invalid field. (Don't let users continue - they may overwrite other db's)
out = 'Error in hint manager - an invalid field was accessed.'
return HttpResponse(out)
switch_dict = {
'delete hints': delete_hints,
'switch fields': lambda *args: None, # Takes any number of arguments, returns None.
'change votes': change_votes,
'add hint': add_hint,
'approve': approve,
}
# Do the operation requested, and collect any error messages.
error_text = switch_dict[request.POST['op']](request, course_key, field)
if error_text is None:
error_text = ''
render_dict = get_hints(request, course_key, field)
render_dict.update({'error': error_text})
rendered_html = render_to_string('instructor/hint_manager_inner.html', render_dict)
return HttpResponse(json.dumps({'success': True, 'contents': rendered_html}))
def get_hints(request, course_id, field):
"""
Load all of the hints submitted to the course.
Args:
`request` -- Django request object.
`course_id` -- The course id, like 'Me/19.002/test_course'
`field` -- Either 'hints' or 'mod_queue'; specifies which set of hints to load.
Keys in returned dict:
- 'field': Same as input
- 'other_field': 'mod_queue' if `field` == 'hints'; and vice-versa.
- 'field_label', 'other_field_label': English name for the above.
- 'all_hints': A list of [answer, pk dict] pairs, representing all hints.
Sorted by answer.
- 'id_to_name': A dictionary mapping problem id to problem name.
"""
if field == 'mod_queue':
other_field = 'hints'
field_label = 'Hints Awaiting Moderation'
other_field_label = 'Approved Hints'
elif field == 'hints':
other_field = 'mod_queue'
field_label = 'Approved Hints'
other_field_label = 'Hints Awaiting Moderation'
# We want to use the course_id to find all matching usage_id's.
# To do this, just take the school/number part - leave off the classname.
# FIXME: we need to figure out how to do this with opaque keys
all_hints = XModuleUserStateSummaryField.objects.filter(
field_name=field,
usage_id__regex=re.escape(u'{0.org}/{0.course}'.format(course_id)),
)
# big_out_dict[problem id] = [[answer, {pk: [hint, votes]}], sorted by answer]
# big_out_dict maps a problem id to a list of [answer, hints] pairs, sorted in order of answer.
big_out_dict = {}
# id_to name maps a problem id to the name of the problem.
# id_to_name[problem id] = Display name of problem
id_to_name = {}
for hints_by_problem in all_hints:
hints_by_problem.usage_id = hints_by_problem.usage_id.map_into_course(course_id)
name = location_to_problem_name(course_id, hints_by_problem.usage_id)
if name is None:
continue
id_to_name[hints_by_problem.usage_id] = name
def answer_sorter(thing):
"""
`thing` is a tuple, where `thing[0]` contains an answer, and `thing[1]` contains
a dict of hints. This function returns an index based on `thing[0]`, which
is used as a key to sort the list of things.
"""
try:
return float(thing[0])
except ValueError:
# Put all non-numerical answers first.
return float('-inf')
# Answer list contains [answer, dict_of_hints] pairs.
answer_list = sorted(json.loads(hints_by_problem.value).items(), key=answer_sorter)
big_out_dict[hints_by_problem.usage_id] = answer_list
render_dict = {'field': field,
'other_field': other_field,
'field_label': field_label,
'other_field_label': other_field_label,
'all_hints': big_out_dict,
'id_to_name': id_to_name}
return render_dict
def location_to_problem_name(course_id, loc):
"""
Given the location of a crowdsource_hinter module, try to return the name of the
problem it wraps around. Return None if the hinter no longer exists.
"""
try:
descriptor = modulestore().get_item(loc)
return descriptor.get_children()[0].display_name
except ItemNotFoundError:
# Sometimes, the problem is no longer in the course. Just
# don't include said problem.
return None
def delete_hints(request, course_id, field):
"""
Deletes the hints specified.
`request.POST` contains some fields keyed by integers. Each such field contains a
[problem_defn_id, answer, pk] tuple. These tuples specify the hints to be deleted.
Example `request.POST`:
{'op': 'delete_hints',
'field': 'mod_queue',
1: ['problem_whatever', '42.0', '3'],
2: ['problem_whatever', '32.5', '12']}
"""
for key in request.POST:
if key == 'op' or key == 'field':
continue
problem_id, answer, pk = request.POST.getlist(key)
problem_key = course_id.make_usage_key_from_deprecated_string(problem_id)
# Can be optimized - sort the delete list by problem_id, and load each problem
# from the database only once.
this_problem = XModuleUserStateSummaryField.objects.get(field_name=field, usage_id=problem_key)
problem_dict = json.loads(this_problem.value)
del problem_dict[answer][pk]
this_problem.value = json.dumps(problem_dict)
this_problem.save()
def change_votes(request, course_id, field):
"""
Updates the number of votes.
The numbered fields of `request.POST` contain [problem_id, answer, pk, new_votes] tuples.
See `delete_hints`.
Example `request.POST`:
{'op': 'delete_hints',
'field': 'mod_queue',
1: ['problem_whatever', '42.0', '3', 42],
2: ['problem_whatever', '32.5', '12', 9001]}
"""
for key in request.POST:
if key == 'op' or key == 'field':
continue
problem_id, answer, pk, new_votes = request.POST.getlist(key)
problem_key = course_id.make_usage_key_from_deprecated_string(problem_id)
this_problem = XModuleUserStateSummaryField.objects.get(field_name=field, usage_id=problem_key)
problem_dict = json.loads(this_problem.value)
# problem_dict[answer][pk] points to a [hint_text, #votes] pair.
problem_dict[answer][pk][1] = int(new_votes)
this_problem.value = json.dumps(problem_dict)
this_problem.save()
def add_hint(request, course_id, field):
"""
Add a new hint. `request.POST`:
op
field
problem - The problem id
answer - The answer to which a hint will be added
hint - The text of the hint
"""
problem_id = request.POST['problem']
problem_key = course_id.make_usage_key_from_deprecated_string(problem_id)
answer = request.POST['answer']
hint_text = request.POST['hint']
# Validate the answer. This requires initializing the xmodules, which
# is annoying.
try:
descriptor = modulestore().get_item(problem_key)
descriptors = [descriptor]
except ItemNotFoundError:
descriptors = []
field_data_cache = model_data.FieldDataCache(descriptors, course_id, request.user)
hinter_module = module_render.get_module(request.user, request, problem_key, field_data_cache, course_id)
if not hinter_module.validate_answer(answer):
# Invalid answer. Don't add it to the database, or else the
# hinter will crash when we encounter it.
return 'Error - the answer you specified is not properly formatted: ' + str(answer)
this_problem = XModuleUserStateSummaryField.objects.get(field_name=field, usage_id=problem_key)
hint_pk_entry = XModuleUserStateSummaryField.objects.get(field_name='hint_pk', usage_id=problem_key)
this_pk = int(hint_pk_entry.value)
hint_pk_entry.value = this_pk + 1
hint_pk_entry.save()
problem_dict = json.loads(this_problem.value)
if answer not in problem_dict:
problem_dict[answer] = {}
problem_dict[answer][this_pk] = [hint_text, 1]
this_problem.value = json.dumps(problem_dict)
this_problem.save()
def approve(request, course_id, field):
"""
Approve a list of hints, moving them from the mod_queue to the real
hint list. POST:
op, field
(some number) -> [problem, answer, pk]
The numbered fields are analogous to those in `delete_hints` and `change_votes`.
"""
for key in request.POST:
if key == 'op' or key == 'field':
continue
problem_id, answer, pk = request.POST.getlist(key)
problem_key = course_id.make_usage_key_from_deprecated_string(problem_id)
# Can be optimized - sort the delete list by problem_id, and load each problem
# from the database only once.
problem_in_mod = XModuleUserStateSummaryField.objects.get(field_name=field, usage_id=problem_key)
problem_dict = json.loads(problem_in_mod.value)
hint_to_move = problem_dict[answer][pk]
del problem_dict[answer][pk]
problem_in_mod.value = json.dumps(problem_dict)
problem_in_mod.save()
problem_in_hints = XModuleUserStateSummaryField.objects.get(field_name='hints', usage_id=problem_key)
problem_dict = json.loads(problem_in_hints.value)
if answer not in problem_dict:
problem_dict[answer] = {}
problem_dict[answer][pk] = hint_to_move
problem_in_hints.value = json.dumps(problem_dict)
problem_in_hints.save()
| agpl-3.0 |
pjg101/SickRage | lib/js2py/legecy_translators/utils.py | 96 | 2705 | import sys
import unicodedata
from collections import defaultdict
def is_lval(t):
"""Does not chceck whether t is not resticted or internal"""
if not t:
return False
i = iter(t)
if i.next() not in IDENTIFIER_START:
return False
return all(e in IDENTIFIER_PART for e in i)
def is_valid_lval(t):
"""Checks whether t is valid JS identifier name (no keyword like var, function, if etc)
Also returns false on internal"""
if not is_internal(t) and is_lval(t) and t not in RESERVED_NAMES:
return True
return False
def is_plval(t):
return t.startswith('PyJsLval')
def is_marker(t):
return t.startswith('PyJsMarker') or t.startswith('PyJsConstant')
def is_internal(t):
return is_plval(t) or is_marker(t) or t=='var' # var is a scope var
def is_property_accessor(t):
return '[' in t or '.' in t
def is_reserved(t):
return t in RESERVED_NAMES
#http://stackoverflow.com/questions/14245893/efficiently-list-all-characters-in-a-given-unicode-category
BOM = u'\uFEFF'
ZWJ = u'\u200D'
ZWNJ = u'\u200C'
TAB = u'\u0009'
VT = u'\u000B'
FF = u'\u000C'
SP = u'\u0020'
NBSP = u'\u00A0'
LF = u'\u000A'
CR = u'\u000D'
LS = u'\u2028'
PS = u'\u2029'
U_CATEGORIES = defaultdict(list) # Thank you Martijn Pieters!
for c in map(unichr, range(sys.maxunicode + 1)):
U_CATEGORIES[unicodedata.category(c)].append(c)
UNICODE_LETTER = set(U_CATEGORIES['Lu']+U_CATEGORIES['Ll']+
U_CATEGORIES['Lt']+U_CATEGORIES['Lm']+
U_CATEGORIES['Lo']+U_CATEGORIES['Nl'])
UNICODE_COMBINING_MARK = set(U_CATEGORIES['Mn']+U_CATEGORIES['Mc'])
UNICODE_DIGIT = set(U_CATEGORIES['Nd'])
UNICODE_CONNECTOR_PUNCTUATION = set(U_CATEGORIES['Pc'])
IDENTIFIER_START = UNICODE_LETTER.union({'$','_'}) # and some fucking unicode escape sequence
IDENTIFIER_PART = IDENTIFIER_START.union(UNICODE_COMBINING_MARK).union(UNICODE_DIGIT).union(UNICODE_CONNECTOR_PUNCTUATION).union({ZWJ, ZWNJ})
USP = U_CATEGORIES['Zs']
KEYWORD = {'break', 'do', 'instanceof', 'typeof', 'case', 'else', 'new',
'var', 'catch', 'finally', 'return', 'void', 'continue', 'for',
'switch', 'while', 'debugger', 'function', 'this', 'with', 'default',
'if', 'throw', 'delete', 'in', 'try'}
FUTURE_RESERVED_WORD = {'class', 'enum', 'extends', 'super', 'const', 'export', 'import'}
RESERVED_NAMES = KEYWORD.union(FUTURE_RESERVED_WORD).union({'null', 'false', 'true'})
WHITE = {TAB, VT, FF, SP, NBSP, BOM}.union(USP)
LINE_TERMINATOR = {LF, CR, LS, PS}
LLINE_TERMINATOR = list(LINE_TERMINATOR)
x = ''.join(WHITE)+''.join(LINE_TERMINATOR)
SPACE = WHITE.union(LINE_TERMINATOR)
LINE_TERMINATOR_SEQUENCE = LINE_TERMINATOR.union({CR+LF}) | gpl-3.0 |
endlessm/chromium-browser | build/android/pylib/symbols/apk_native_libs_unittest.py | 7 | 14098 | # Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import unittest
from pylib.symbols import apk_native_libs
# Mock ELF-like data
MOCK_ELF_DATA = '\x7fELFFFFFFFFFFFFFFFF'
class MockApkZipInfo(object):
"""A mock ApkZipInfo class, returned by MockApkReaderFactory instances."""
def __init__(self, filename, file_size, compress_size, file_offset,
file_data):
self.filename = filename
self.file_size = file_size
self.compress_size = compress_size
self.file_offset = file_offset
self._data = file_data
def __repr__(self):
"""Convert to string for debugging."""
return 'MockApkZipInfo["%s",size=%d,compressed=%d,offset=%d]' % (
self.filename, self.file_size, self.compress_size, self.file_offset)
def IsCompressed(self):
"""Returns True iff the entry is compressed."""
return self.file_size != self.compress_size
def IsElfFile(self):
"""Returns True iff the entry is an ELF file."""
if not self._data or len(self._data) < 4:
return False
return self._data[0:4] == '\x7fELF'
class MockApkReader(object):
"""A mock ApkReader instance used during unit-testing.
Do not use directly, but use a MockApkReaderFactory context, as in:
with MockApkReaderFactory() as mock:
mock.AddTestEntry(file_path, file_size, compress_size, file_data)
...
# Actually returns the mock instance.
apk_reader = apk_native_libs.ApkReader('/some/path.apk')
"""
def __init__(self, apk_path='test.apk'):
"""Initialize instance."""
self._entries = []
self._fake_offset = 0
self._path = apk_path
def __enter__(self):
return self
def __exit__(self, *kwarg):
self.Close()
return
@property
def path(self):
return self._path
def AddTestEntry(self, filepath, file_size, compress_size, file_data):
"""Add a new entry to the instance for unit-tests.
Do not call this directly, use the AddTestEntry() method on the parent
MockApkReaderFactory instance.
Args:
filepath: archive file path.
file_size: uncompressed file size in bytes.
compress_size: compressed size in bytes.
file_data: file data to be checked by IsElfFile()
Note that file_data can be None, or that its size can be actually
smaller than |compress_size| when used during unit-testing.
"""
self._entries.append(MockApkZipInfo(filepath, file_size, compress_size,
self._fake_offset, file_data))
self._fake_offset += compress_size
def Close(self): # pylint: disable=no-self-use
"""Close this reader instance."""
return
def ListEntries(self):
"""Return a list of MockApkZipInfo instances for this input APK."""
return self._entries
def FindEntry(self, file_path):
"""Find the MockApkZipInfo instance corresponds to a given file path."""
for entry in self._entries:
if entry.filename == file_path:
return entry
raise KeyError('Could not find mock zip archive member for: ' + file_path)
class MockApkReaderTest(unittest.TestCase):
def testEmpty(self):
with MockApkReader() as reader:
entries = reader.ListEntries()
self.assertTrue(len(entries) == 0)
with self.assertRaises(KeyError):
reader.FindEntry('non-existent-entry.txt')
def testSingleEntry(self):
with MockApkReader() as reader:
reader.AddTestEntry('some-path/some-file', 20000, 12345, file_data=None)
entries = reader.ListEntries()
self.assertTrue(len(entries) == 1)
entry = entries[0]
self.assertEqual(entry.filename, 'some-path/some-file')
self.assertEqual(entry.file_size, 20000)
self.assertEqual(entry.compress_size, 12345)
self.assertTrue(entry.IsCompressed())
entry2 = reader.FindEntry('some-path/some-file')
self.assertEqual(entry, entry2)
def testMultipleEntries(self):
with MockApkReader() as reader:
_ENTRIES = {
'foo.txt': (1024, 1024, 'FooFooFoo'),
'lib/bar/libcode.so': (16000, 3240, 1024, '\x7fELFFFFFFFFFFFF'),
}
for path, props in _ENTRIES.iteritems():
reader.AddTestEntry(path, props[0], props[1], props[2])
entries = reader.ListEntries()
self.assertEqual(len(entries), len(_ENTRIES))
for path, props in _ENTRIES.iteritems():
entry = reader.FindEntry(path)
self.assertEqual(entry.filename, path)
self.assertEqual(entry.file_size, props[0])
self.assertEqual(entry.compress_size, props[1])
class ApkNativeLibrariesTest(unittest.TestCase):
def setUp(self):
logging.getLogger().setLevel(logging.ERROR)
def testEmptyApk(self):
with MockApkReader() as reader:
libs_map = apk_native_libs.ApkNativeLibraries(reader)
self.assertTrue(libs_map.IsEmpty())
self.assertEqual(len(libs_map.GetLibraries()), 0)
lib_path, lib_offset = libs_map.FindLibraryByOffset(0)
self.assertIsNone(lib_path)
self.assertEqual(lib_offset, 0)
def testSimpleApk(self):
with MockApkReader() as reader:
_MOCK_ENTRIES = [
# Top-level library should be ignored.
('libfoo.so', 1000, 1000, MOCK_ELF_DATA, False),
# Library not under lib/ should be ignored.
('badlib/test-abi/libfoo2.so', 1001, 1001, MOCK_ELF_DATA, False),
# Library under lib/<abi>/ but without .so extension should be ignored.
('lib/test-abi/libfoo4.so.1', 1003, 1003, MOCK_ELF_DATA, False),
# Library under lib/<abi>/ with .so suffix, but compressed -> ignored.
('lib/test-abi/libfoo5.so', 1004, 1003, MOCK_ELF_DATA, False),
# First correct library
('lib/test-abi/libgood1.so', 1005, 1005, MOCK_ELF_DATA, True),
# Second correct library: support sub-directories
('lib/test-abi/subdir/libgood2.so', 1006, 1006, MOCK_ELF_DATA, True),
# Third correct library, no lib prefix required
('lib/test-abi/crazy.libgood3.so', 1007, 1007, MOCK_ELF_DATA, True),
]
file_offsets = []
prev_offset = 0
for ent in _MOCK_ENTRIES:
reader.AddTestEntry(ent[0], ent[1], ent[2], ent[3])
file_offsets.append(prev_offset)
prev_offset += ent[2]
libs_map = apk_native_libs.ApkNativeLibraries(reader)
self.assertFalse(libs_map.IsEmpty())
self.assertEqual(libs_map.GetLibraries(), [
'lib/test-abi/crazy.libgood3.so',
'lib/test-abi/libgood1.so',
'lib/test-abi/subdir/libgood2.so',
])
BIAS = 10
for mock_ent, file_offset in zip(_MOCK_ENTRIES, file_offsets):
if mock_ent[4]:
lib_path, lib_offset = libs_map.FindLibraryByOffset(
file_offset + BIAS)
self.assertEqual(lib_path, mock_ent[0])
self.assertEqual(lib_offset, BIAS)
def testMultiAbiApk(self):
with MockApkReader() as reader:
_MOCK_ENTRIES = [
('lib/abi1/libfoo.so', 1000, 1000, MOCK_ELF_DATA),
('lib/abi2/libfoo.so', 1000, 1000, MOCK_ELF_DATA),
]
for ent in _MOCK_ENTRIES:
reader.AddTestEntry(ent[0], ent[1], ent[2], ent[3])
libs_map = apk_native_libs.ApkNativeLibraries(reader)
self.assertFalse(libs_map.IsEmpty())
self.assertEqual(libs_map.GetLibraries(), [
'lib/abi1/libfoo.so', 'lib/abi2/libfoo.so'])
lib1_name, lib1_offset = libs_map.FindLibraryByOffset(10)
self.assertEqual(lib1_name, 'lib/abi1/libfoo.so')
self.assertEqual(lib1_offset, 10)
lib2_name, lib2_offset = libs_map.FindLibraryByOffset(1000)
self.assertEqual(lib2_name, 'lib/abi2/libfoo.so')
self.assertEqual(lib2_offset, 0)
class MockApkNativeLibraries(apk_native_libs.ApkNativeLibraries):
"""A mock ApkNativeLibraries instance that can be used as input to
ApkLibraryPathTranslator without creating an ApkReader instance.
Create a new instance, then call AddTestEntry or AddTestEntries
as many times as necessary, before using it as a regular
ApkNativeLibraries instance.
"""
# pylint: disable=super-init-not-called
def __init__(self):
self._native_libs = []
# pylint: enable=super-init-not-called
def AddTestEntry(self, lib_path, file_offset, file_size):
"""Add a new test entry.
Args:
entry: A tuple of (library-path, file-offset, file-size) values,
(e.g. ('lib/armeabi-v8a/libfoo.so', 0x10000, 0x2000)).
"""
self._native_libs.append((lib_path, file_offset, file_offset + file_size))
def AddTestEntries(self, entries):
"""Add a list of new test entries.
Args:
entries: A list of (library-path, file-offset, file-size) values.
"""
for entry in entries:
self.AddTestEntry(entry[0], entry[1], entry[2])
class MockApkNativeLibrariesTest(unittest.TestCase):
def testEmptyInstance(self):
mock = MockApkNativeLibraries()
self.assertTrue(mock.IsEmpty())
self.assertEqual(mock.GetLibraries(), [])
self.assertEqual(mock.GetDumpList(), [])
def testAddTestEntry(self):
mock = MockApkNativeLibraries()
mock.AddTestEntry('lib/armeabi-v7a/libfoo.so', 0x20000, 0x4000)
mock.AddTestEntry('lib/x86/libzoo.so', 0x10000, 0x10000)
mock.AddTestEntry('lib/armeabi-v7a/libbar.so', 0x24000, 0x8000)
self.assertFalse(mock.IsEmpty())
self.assertEqual(mock.GetLibraries(), ['lib/armeabi-v7a/libbar.so',
'lib/armeabi-v7a/libfoo.so',
'lib/x86/libzoo.so'])
self.assertEqual(mock.GetDumpList(), [
('lib/x86/libzoo.so', 0x10000, 0x10000),
('lib/armeabi-v7a/libfoo.so', 0x20000, 0x4000),
('lib/armeabi-v7a/libbar.so', 0x24000, 0x8000),
])
def testAddTestEntries(self):
mock = MockApkNativeLibraries()
mock.AddTestEntries([
('lib/armeabi-v7a/libfoo.so', 0x20000, 0x4000),
('lib/x86/libzoo.so', 0x10000, 0x10000),
('lib/armeabi-v7a/libbar.so', 0x24000, 0x8000),
])
self.assertFalse(mock.IsEmpty())
self.assertEqual(mock.GetLibraries(), ['lib/armeabi-v7a/libbar.so',
'lib/armeabi-v7a/libfoo.so',
'lib/x86/libzoo.so'])
self.assertEqual(mock.GetDumpList(), [
('lib/x86/libzoo.so', 0x10000, 0x10000),
('lib/armeabi-v7a/libfoo.so', 0x20000, 0x4000),
('lib/armeabi-v7a/libbar.so', 0x24000, 0x8000),
])
class ApkLibraryPathTranslatorTest(unittest.TestCase):
def _CheckUntranslated(self, translator, path, offset):
"""Check that a given (path, offset) is not modified by translation."""
self.assertEqual(translator.TranslatePath(path, offset), (path, offset))
def _CheckTranslated(self, translator, path, offset, new_path, new_offset):
"""Check that (path, offset) is translated into (new_path, new_offset)."""
self.assertEqual(translator.TranslatePath(path, offset),
(new_path, new_offset))
def testEmptyInstance(self):
translator = apk_native_libs.ApkLibraryPathTranslator()
self._CheckUntranslated(
translator, '/data/data/com.example.app-1/base.apk', 0x123456)
def testSimpleApk(self):
mock_libs = MockApkNativeLibraries()
mock_libs.AddTestEntries([
('lib/test-abi/libfoo.so', 200, 2000),
('lib/test-abi/libbar.so', 3200, 3000),
('lib/test-abi/crazy.libzoo.so', 6200, 2000),
])
translator = apk_native_libs.ApkLibraryPathTranslator()
translator.AddHostApk('com.example.app', mock_libs)
# Offset is within the first uncompressed library
self._CheckTranslated(
translator,
'/data/data/com.example.app-9.apk', 757,
'/data/data/com.example.app-9.apk!lib/libfoo.so', 557)
# Offset is within the second compressed library.
self._CheckUntranslated(
translator,
'/data/data/com.example.app-9/base.apk', 2800)
# Offset is within the third uncompressed library.
self._CheckTranslated(
translator,
'/data/data/com.example.app-1/base.apk', 3628,
'/data/data/com.example.app-1/base.apk!lib/libbar.so', 428)
# Offset is within the fourth uncompressed library with crazy. prefix
self._CheckTranslated(
translator,
'/data/data/com.example.app-XX/base.apk', 6500,
'/data/data/com.example.app-XX/base.apk!lib/libzoo.so', 300)
# Out-of-bounds apk offset.
self._CheckUntranslated(
translator,
'/data/data/com.example.app-1/base.apk', 10000)
# Invalid package name.
self._CheckUntranslated(
translator, '/data/data/com.example2.app-1/base.apk', 757)
# Invalid apk name.
self._CheckUntranslated(
translator, '/data/data/com.example.app-2/not-base.apk', 100)
# Invalid file extensions.
self._CheckUntranslated(
translator, '/data/data/com.example.app-2/base', 100)
self._CheckUntranslated(
translator, '/data/data/com.example.app-2/base.apk.dex', 100)
def testBundleApks(self):
mock_libs1 = MockApkNativeLibraries()
mock_libs1.AddTestEntries([
('lib/test-abi/libfoo.so', 200, 2000),
('lib/test-abi/libbbar.so', 3200, 3000),
])
mock_libs2 = MockApkNativeLibraries()
mock_libs2.AddTestEntries([
('lib/test-abi/libzoo.so', 200, 2000),
('lib/test-abi/libtool.so', 3000, 4000),
])
translator = apk_native_libs.ApkLibraryPathTranslator()
translator.AddHostApk('com.example.app', mock_libs1, 'base-master.apk')
translator.AddHostApk('com.example.app', mock_libs2, 'feature-master.apk')
self._CheckTranslated(
translator,
'/data/app/com.example.app-XUIYIUW/base-master.apk', 757,
'/data/app/com.example.app-XUIYIUW/base-master.apk!lib/libfoo.so', 557)
self._CheckTranslated(
translator,
'/data/app/com.example.app-XUIYIUW/feature-master.apk', 3200,
'/data/app/com.example.app-XUIYIUW/feature-master.apk!lib/libtool.so',
200)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
ObsidianBlk/GemRB--Unofficial- | gemrb/GUIScripts/bg2/CharGen2.py | 11 | 1084 | # -*-python-*-
# GemRB - Infinity Engine Emulator
# Copyright (C) 2003-2004 The GemRB Project
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# character generation - gender; next race (CharGen2)
import GemRB
import CharGenCommon
from ie_stats import IE_RACE
def OnLoad():
MyChar = GemRB.GetVar ("Slot")
GemRB.SetVar ("Race",0) #race
GemRB.SetPlayerStat (MyChar, IE_RACE, 0)
CharGenCommon.DisplayOverview (2)
return
| gpl-2.0 |
harayz/raspberry_pwn | src/pentest/voiper/sulley/sulley/sessions.py | 8 | 44977 | import re
import sys
import zlib
import time
import socket
import cPickle
import threading
import BaseHTTPServer
import pedrpc
import pgraph
import sex
import primitives
########################################################################################################################
class target:
'''
Target descriptor container.
'''
def __init__ (self, host, port, **kwargs):
'''
@type host: String
@param host: Hostname or IP address of target system
@type port: Integer
@param port: Port of target service
'''
self.host = host
self.port = port
# set these manually once target is instantiated.
self.netmon = None
self.procmon = None
self.vmcontrol = None
self.netmon_options = {}
self.procmon_options = {}
self.vmcontrol_options = {}
self.running_flag = True
def pedrpc_connect (self):
'''
# pass specified target parameters to the PED-RPC server.
'''
# wait for the process monitor to come alive and then set its options.
if self.procmon:
while self.running_flag:
try:
if self.procmon.alive():
break
except:
sys.stderr.write("Procmon exception in sessions.py : self.procmon.alive()\n")
time.sleep(1)
# connection established.
if self.running_flag:
for key in self.procmon_options.keys():
eval('self.procmon.set_%s(self.procmon_options["%s"])' % (key, key))
# wait for the network monitor to come alive and then set its options.
if self.netmon:
while self.running_flag:
try:
if self.netmon.alive():
break
except:
sys.stderr.write("Netmon exception in sessions.py : self.netmon.alive()\n")
time.sleep(1)
# connection established.
if self.running_flag:
for key in self.netmon_options.keys():
eval('self.netmon.set_%s(self.netmon_options["%s"])' % (key, key))
########################################################################################################################
class connection (pgraph.edge.edge):
def __init__ (self, src, dst, callback=None):
'''
Extends pgraph.edge with a callback option. This allows us to register a function to call between node
transmissions to implement functionality such as challenge response systems. The callback method must follow
this prototype::
def callback(session, node, edge, sock)
Where node is the node about to be sent, edge is the last edge along the current fuzz path to "node", session
is a pointer to the session instance which is useful for snagging data such as sesson.last_recv which contains
the data returned from the last socket transmission and sock is the live socket. A callback is also useful in
situations where, for example, the size of the next packet is specified in the first packet.
@type src: Integer
@param src: Edge source ID
@type dst: Integer
@param dst: Edge destination ID
@type callback: Function
@param callback: (Optional, def=None) Callback function to pass received data to between node xmits
'''
# run the parent classes initialization routine first.
pgraph.edge.edge.__init__(self, src, dst)
self.callback = callback
########################################################################################################################
class session (pgraph.graph):
def __init__ (self, session_filename=None, audit_folder=None,skip=0, sleep_time=.2, log_level=2, proto="tcp", restart_interval=0, timeout=5.0, web_port=26001, crash_threshold=3, trans_in_q=None):
'''
Extends pgraph.graph and provides a container for architecting protocol dialogs.
@type session_filename: String
@kwarg session_filename: (Optional, def=None) Filename to serialize persistant data to
@type skip: Integer
@kwarg skip: (Optional, def=0) Number of test cases to skip
@type sleep_time: Float
@kwarg sleep_time: (Optional, def=1.0) Time to sleep in between tests
@type log_level: Integer
@kwarg log_level: (Optional, def=2) Set the log level, higher number == more log messages
@type proto: String
@kwarg proto: (Optional, def="tcp") Communication protocol
@type timeout: Float
@kwarg timeout: (Optional, def=5.0) Seconds to wait for a send/recv prior to timing out
@type restart_interval: Integer
@kwarg restart_interval (Optional, def=0) Restart the target after n test cases, disable by setting to 0
@type crash_threshold: Integer
@kwarg crash_threshold (Optional, def=3) Maximum number of crashes allowed before a node is exhaust
'''
# run the parent classes initialization routine first.
pgraph.graph.__init__(self)
self.session_filename = session_filename
self.audit_folder = audit_folder
self.skip = skip
self.sleep_time = sleep_time
self.log_level = log_level
self.proto = proto
self.restart_interval = restart_interval
self.timeout = timeout
self.web_port = web_port
self.crash_threshold = crash_threshold
self.trans_in_q = trans_in_q
self.total_num_mutations = 0
self.total_mutant_index = 0
self.crashes_detected = 0
self.fuzz_node = None
self.targets = []
self.netmon_results = {}
self.procmon_results = {}
self.pause_flag = False
self.running_flag = True
self.crashing_primitives = {}
if self.proto == "tcp":
self.proto = socket.SOCK_STREAM
elif self.proto == "udp":
self.proto = socket.SOCK_DGRAM
else:
raise sex.error("INVALID PROTOCOL SPECIFIED: %s" % self.proto)
# import settings if they exist.
self.import_file()
# create a root node. we do this because we need to start fuzzing from a single point and the user may want
# to specify a number of initial requests.
self.root = pgraph.node()
self.root.name = "__ROOT_NODE__"
self.root.label = self.root.name
self.last_recv = None
self.add_node(self.root)
####################################################################################################################
def decrement_total_mutant_index(self, val):
if self.total_mutant_index - val > 0:
self.total_mutant_index -= val
else:
self.total_mutant_index = 0
def add_node (self, node):
'''
Add a pgraph node to the graph. We overload this routine to automatically generate and assign an ID whenever a
node is added.
@type node: pGRAPH Node
@param node: Node to add to session graph
'''
node.number = len(self.nodes)
node.id = len(self.nodes)
if not self.nodes.has_key(node.id):
self.nodes[node.id] = node
return self
####################################################################################################################
def add_target (self, target):
'''
Add a target to the session. Multiple targets can be added for parallel fuzzing.
@type target: session.target
@param target: Target to add to session
'''
# pass specified target parameters to the PED-RPC server.
target.pedrpc_connect()
# add target to internal list.
self.targets.append(target)
####################################################################################################################
def connect (self, src, dst=None, callback=None):
'''
Create a connection between the two requests (nodes) and register an optional callback to process in between
transmissions of the source and destination request. Leverage this functionality to handle situations such as
challenge response systems. The session class maintains a top level node that all initial requests must be
connected to. Example::
sess = sessions.session()
sess.connect(sess.root, s_get("HTTP"))
If given only a single parameter, sess.connect() will default to attaching the supplied node to the root node.
This is a convenient alias and is identical to the second line from the above example::
sess.connect(s_get("HTTP"))
If you register callback method, it must follow this prototype::
def callback(session, node, edge, sock)
Where node is the node about to be sent, edge is the last edge along the current fuzz path to "node", session
is a pointer to the session instance which is useful for snagging data such as sesson.last_recv which contains
the data returned from the last socket transmission and sock is the live socket. A callback is also useful in
situations where, for example, the size of the next packet is specified in the first packet. As another
example, if you need to fill in the dynamic IP address of the target register a callback that snags the IP
from sock.getpeername()[0].
@type src: String or Request (Node)
@param src: Source request name or request node
@type dst: String or Request (Node)
@param dst: Destination request name or request node
@type callback: Function
@param callback: (Optional, def=None) Callback function to pass received data to between node xmits
@rtype: pgraph.edge
@return: The edge between the src and dst.
'''
# if only a source was provided, then make it the destination and set the source to the root node.
if not dst:
dst = src
src = self.root
# if source or destination is a name, resolve the actual node.
if type(src) is str:
src = self.find_node("name", src)
if type(dst) is str:
dst = self.find_node("name", dst)
# if source or destination is not in the graph, add it.
if src != self.root and not self.find_node("name", src.name):
self.add_node(src)
if not self.find_node("name", dst.name):
self.add_node(dst)
# create an edge between the two nodes and add it to the graph.
edge = connection(src.id, dst.id, callback)
self.add_edge(edge)
return edge
####################################################################################################################
def export_file (self):
'''
Dump various object values to disk.
@see: import_file()
'''
if not self.session_filename:
return
data = {}
data["session_filename"] = self.session_filename
data["skip"] = self.total_mutant_index
data["sleep_time"] = self.sleep_time
data["log_level"] = self.log_level
data["proto"] = self.proto
data["crashes_detected"] = self.crashes_detected
data["restart_interval"] = self.restart_interval
data["timeout"] = self.timeout
data["web_port"] = self.web_port
data["crash_threshold"] = self.crash_threshold
data["total_num_mutations"] = self.total_num_mutations
data["total_mutant_index"] = self.total_mutant_index
data["netmon_results"] = self.netmon_results
data["procmon_results"] = self.procmon_results
data["pause_flag"] = self.pause_flag
fh = open(self.session_filename, "wb+")
fh.write(zlib.compress(cPickle.dumps(data, protocol=2)))
fh.close()
####################################################################################################################
def waitForRegister(self):
'''
This method should be overwritten by any fuzzer that needs to wait for the client to register after it has restarted
'''
pass
####################################################################################################################
def updateProgressBar(self, x, y):
'''
This method should be overridden by the GUI
'''
pass
####################################################################################################################
def fuzz (self, this_node=None, path=[]):
'''
Call this routine to get the ball rolling. No arguments are necessary as they are both utilized internally
during the recursive traversal of the session graph.
@type this_node: request (node)
@param this_node: (Optional, def=None) Current node that is being fuzzed.
@type path: List
@param path: (Optional, def=[]) Nodes along the path to the current one being fuzzed.
'''
# if no node is specified, then we start from the root node and initialize the session.
if not this_node:
# we can't fuzz if we don't have at least one target and one request.
if not self.targets:
raise sex.error("NO TARGETS SPECIFIED IN SESSION")
if not self.edges_from(self.root.id):
raise sex.error("NO REQUESTS SPECIFIED IN SESSION")
this_node = self.root
try: self.server_init()
except: return
# XXX - TODO - complete parallel fuzzing, will likely have to thread out each target
target = self.targets[0]
# step through every edge from the current node.
for edge in self.edges_from(this_node.id):
# the destination node is the one actually being fuzzed.
self.fuzz_node = self.nodes[edge.dst]
num_mutations = self.fuzz_node.num_mutations()
# keep track of the path as we fuzz through it, don't count the root node.
# we keep track of edges as opposed to nodes because if there is more then one path through a set of
# given nodes we don't want any ambiguity.
if edge.src != self.root.id:
path.append(edge)
current_path = " -> ".join([self.nodes[e.src].name for e in path])
current_path += " -> %s" % self.fuzz_node.name
self.log("current fuzz path: %s" % current_path, 2)
self.log("fuzzed %d of %d total cases" % (self.total_mutant_index, self.total_num_mutations), 2)
self.updateProgressBar(self.total_mutant_index, self.total_num_mutations)
self.update_GUI_crashes(self.crashes_detected)
done_with_fuzz_node = False
crash_count = 0
# loop through all possible mutations of the fuzz node.
while not done_with_fuzz_node:
# the GUI sets unsets this flag when it wants the fuzzer to die
# command line users can just ctrl-c/z or ctrl-alt-delete
if not self.running_flag:
break
# if we need to pause, do so.
self.pause()
# if we have exhausted the mutations of the fuzz node, break out of the while(1).
# note: when mutate() returns False, the node has been reverted to the default (valid) state.
if not self.fuzz_node.mutate():
self.log("all possible mutations for current fuzz node exhausted", 2)
done_with_fuzz_node = True
continue
# make a record in the session that a mutation was made.
self.total_mutant_index += 1
# if we don't need to skip the current test case.
if self.total_mutant_index > self.skip:
# if we've hit the restart interval, restart the target.
if self.restart_interval and self.total_mutant_index % self.restart_interval == 0:
self.log("restart interval of %d reached" % self.restart_interval)
self.restart_target(target)
# call this method in case we should wait for the client app to register with us after a restart
self.waitForRegister()
self.log("fuzzing %d of %d" % (self.fuzz_node.mutant_index, num_mutations), 2)
# attempt to complete a fuzz transmission. keep trying until we are successful, whenever a failure
# occurs, restart the target.
while 1:
try:
# instruct the debugger/sniffer that we are about to send a new fuzz.
if target.procmon: target.procmon.pre_send(self.total_mutant_index)
if target.netmon: target.netmon.pre_send(self.total_mutant_index)
# establish a connection to the target.
self.host = target.host
self.port = target.port
sock = socket.socket(socket.AF_INET, self.proto)
sock.settimeout(self.timeout)
# if the user registered a pre-send function, pass it the sock and let it do the deed.
self.pre_send(sock)
# send out valid requests for each node in the current path up to the node we are fuzzing.
for e in path:
node = self.nodes[e.src]
self.transmit(sock, node, e, target)
# now send the current node we are fuzzing.
self.transmit(sock, self.fuzz_node, edge, target)
self.updateProgressBar(self.total_mutant_index, self.total_num_mutations)
# if we reach this point the send was successful for break out of the while(1).
break
except sex.error, e:
sys.stderr.write("CAUGHT SULLEY EXCEPTION\n")
sys.stderr.write("\t" + e.__str__() + "\n")
sys.exit(1)
# close the socket.
self.close_socket(sock)
self.log("failed connecting to %s:%d" % (target.host, target.port))
self.log("restarting target and trying again")
self.restart_target(target)
# if the user registered a post-send function, pass it the sock and let it do the deed.
# we do this outside the try/except loop because if our fuzz causes a crash then the post_send()
# will likely fail and we don't want to sit in an endless loop.
self.post_send(sock)
# done with the socket.
# The following is necessary because in the case of a
# CANCEL being sent to an INVITE we need the socket to live
# for a little longer
# sock.close()
self.close_socket(sock)
# delay in between test cases.
self.log("sleeping for %f seconds" % self.sleep_time, 5)
time.sleep(self.sleep_time)
# poll the PED-RPC endpoints (netmon, procmon etc...) for the target.
self.poll_pedrpc(target)
# serialize the current session state to disk.
self.export_file()
# recursively fuzz the remainder of the nodes in the session graph.
if not self.running_flag:
break
self.fuzz(self.fuzz_node, path)
# finished with the last node on the path, pop it off the path stack.
if path:
path.pop()
####################################################################################################################
def close_socket(self, sock):
'''
Closes a given socket. Meant to be overridden by VoIPER
@type sock: Socket
@param sock: The socket to be closed
'''
sock.close()
####################################################################################################################
def import_file (self):
'''
Load varous object values from disk.
@see: export_file()
'''
try:
fh = open(self.session_filename, "rb")
data = cPickle.loads(zlib.decompress(fh.read()))
fh.close()
except:
return
# update the skip variable to pick up fuzzing from last test case.
self.skip = data["total_mutant_index"]
self.session_filename = data["session_filename"]
self.sleep_time = data["sleep_time"]
self.log_level = data["log_level"]
self.proto = data["proto"]
self.restart_interval = data["restart_interval"]
self.timeout = data["timeout"]
self.web_port = data["web_port"]
self.crash_threshold = data["crash_threshold"]
self.total_num_mutations = data["total_num_mutations"]
self.total_mutant_index = data["total_mutant_index"]
self.netmon_results = data["netmon_results"]
self.procmon_results = data["procmon_results"]
self.pause_flag = data["pause_flag"]
self.crashes_detected = data["crashes_detected"]
####################################################################################################################
def log (self, msg, level=1):
'''
If the supplied message falls under the current log level, print the specified message to screen.
@type msg: String
@param msg: Message to log
'''
if self.log_level >= level:
print "[%s] %s" % (time.strftime("%I:%M.%S"), msg)
####################################################################################################################
def num_mutations (self, this_node=None, path=[]):
'''
Number of total mutations in the graph. The logic of this routine is identical to that of fuzz(). See fuzz()
for inline comments. The member varialbe self.total_num_mutations is updated appropriately by this routine.
@type this_node: request (node)
@param this_node: (Optional, def=None) Current node that is being fuzzed.
@type path: List
@param path: (Optional, def=[]) Nodes along the path to the current one being fuzzed.
@rtype: Integer
@return: Total number of mutations in this session.
'''
if not this_node:
this_node = self.root
self.total_num_mutations = 0
for edge in self.edges_from(this_node.id):
next_node = self.nodes[edge.dst]
self.total_num_mutations += next_node.num_mutations()
if edge.src != self.root.id:
path.append(edge)
self.num_mutations(next_node, path)
# finished with the last node on the path, pop it off the path stack.
if path:
path.pop()
return self.total_num_mutations
####################################################################################################################
def pause (self):
'''
If thet pause flag is raised, enter an endless loop until it is lowered.
'''
while 1:
if self.pause_flag:
time.sleep(1)
else:
break
####################################################################################################################
def poll_pedrpc (self, target):
'''
Poll the PED-RPC endpoints (netmon, procmon etc...) for the target.
@type target: session.target
@param target: Session target whose PED-RPC services we are polling
'''
# kill the pcap thread and see how many bytes the sniffer recorded.
if target.netmon:
bytes = target.netmon.post_send()
self.log("netmon captured %d bytes for test case #%d" % (bytes, self.total_mutant_index), 2)
self.netmon_results[self.total_mutant_index] = bytes
# check if our fuzz crashed the target. procmon.post_send() returns False if the target access violated.
if target.procmon:
# had to include this because an error in the connection can result in nothing being returned. Which is
# annoying
ret_val = None
while not ret_val:
ret_val = target.procmon.post_send()
alive = ret_val[0]
crash_type = ret_val[1]
if not alive:
self.log("procmon detected %s on test case #%d" % (crash_type, self.total_mutant_index))
self.crashes_detected += 1
self.update_GUI_crashes(self.crashes_detected)
# retrieve the primitive that caused the crash and increment it's individual crash count.
self.crashing_primitives[self.fuzz_node.mutant] = self.crashing_primitives.get(self.fuzz_node.mutant, 0) + 1
# notify with as much information as possible.
if not self.fuzz_node.mutant.name: msg = "primitive lacks a name, "
else: msg = "primitive name: %s, " % self.fuzz_node.mutant.name
msg += "type: %s, default value: %s" % (self.fuzz_node.mutant.s_type, self.fuzz_node.mutant.original_value)
self.log(msg)
# print crash synopsis if access violation
if crash_type == "access violation":
self.procmon_results[self.total_mutant_index] = target.procmon.get_crash_synopsis()
self.log(self.procmon_results[self.total_mutant_index].split("\n")[0], 2)
# log the sent data to disk
if self.audit_folder != None:
crash_log_name = self.audit_folder + '/' + \
str(self.fuzz_node.id) + '_' + \
str(self.total_mutant_index) + '.crashlog'
crash_log = open(crash_log_name, 'w')
crash_log.write(self.fuzz_node.sent_data)
crash_log.close()
self.log('Fuzz request logged to ' + crash_log_name, 2)
# if the user-supplied crash threshold is reached, exhaust this node.
if self.crashing_primitives[self.fuzz_node.mutant] >= self.crash_threshold:
# as long as we're not a group
if not isinstance(self.crashing_primitives[self.fuzz_node.mutant], primitives.group):
skipped = self.fuzz_node.mutant.exhaust()
self.log("crash threshold reached for this primitive, exhausting %d mutants." % skipped)
self.total_mutant_index += skipped
# start the target back up.
self.restart_target(target, stop_first=False)
####################################################################################################################
def update_GUI_crashes(self, num_crashes):
'''
Method to be overridden by a GUI that wants and update of the number of crashes detected
'''
pass
####################################################################################################################
def post_send (self, sock):
'''
Overload or replace this routine to specify actions to run after to each fuzz request. The order of events is
as follows::
pre_send() - req - callback ... req - callback - post_send()
When fuzzing RPC for example, register this method to tear down the RPC request.
@see: pre_send()
@type sock: Socket
@param sock: Connected socket to target
'''
# default to doing nothing.
pass
####################################################################################################################
def pre_send (self, sock):
'''
Overload or replace this routine to specify actions to run prior to each fuzz request. The order of events is
as follows::
pre_send() - req - callback ... req - callback - post_send()
When fuzzing RPC for example, register this method to establish the RPC bind.
@see: pre_send()
@type sock: Socket
@param sock: Connected socket to target
'''
# default to doing nothing.
pass
####################################################################################################################
def restart_target (self, target, stop_first=True):
'''
Restart the fuzz target. If a VMControl is available revert the snapshot, if a process monitor is available
restart the target process. Otherwise, do nothing.
@type target: session.target
@param target: Target we are restarting
'''
# vm restarting is the preferred method so try that first.
if target.vmcontrol:
self.log("restarting target virtual machine")
target.vmcontrol.restart_target()
# if we have a connected process monitor, restart the target process.
elif target.procmon:
self.log("restarting target process")
if stop_first:
target.procmon.stop_target()
target.procmon.start_target()
# give the process a few seconds to settle in.
time.sleep(3)
# otherwise all we can do is wait a while for the target to recover on its own.
else:
self.log("no vmcontrol or procmon channel available ... sleeping for 5 minutes")
time.sleep(300)
# pass specified target parameters to the PED-RPC server to re-establish connections.
target.pedrpc_connect()
####################################################################################################################
def server_init (self):
'''
Called by fuzz() on first run (not on recursive re-entry) to initialize variables, web interface, etc...
'''
self.total_mutant_index = 0
self.total_num_mutations = self.num_mutations()
# spawn the web interface.
t = web_interface_thread(self)
t.start()
####################################################################################################################
def transmit (self, sock, node, edge, target):
'''
Render and transmit a node, process callbacks accordingly.
@type sock: Socket
@param sock: Socket to transmit node on
@type node: Request (Node)
@param node: Request/Node to transmit
@type edge: Connection (pgraph.edge)
@param edge: Edge along the current fuzz path from "node" to next node.
@type target: session.target
@param target: Target we are transmitting to
'''
data = None
self.log("xmitting: [%d.%d]" % (node.id, self.total_mutant_index), level=2)
# if the edge has a callback, process it. the callback has the option to render the node, modify it and return.
if edge.callback:
data = edge.callback(self, node, edge, sock)
# if not data was returned by the callback, render the node here.
if not data:
data = node.render()
# if data length is > 65507 and proto is UDP, truncate it.
# XXX - this logic does not prevent duplicate test cases, need to address this in the future.
if self.proto == socket.SOCK_DGRAM:
# max UDP packet size.
if len(data) > 65507:
#self.log("Too much data for UDP, truncating to 65507 bytes")
data = data[:65507]
# pass the data off to the transaction manager to be added to a transaction
if self.trans_in_q:
self.trans_in_q.put((True, data, 2, (self.host, self.port), 1.5, sock))
try:
sock.sendto(data, (self.host, self.port))
node.sent_data = data
except Exception, inst:
self.log("Socket error, send: %s" % inst[1])
if self.proto == socket.SOCK_STREAM:
# XXX - might have a need to increase this at some point. (possibly make it a class parameter)
try:
self.last_recv = sock.recv(10000)
except Exception, e:
self.log("Nothing received on socket.", 5)
self.last_recv = ""
else:
self.last_recv = ""
if len(self.last_recv) > 0:
self.log("received: [%d] %s" % (len(self.last_recv), self.last_recv), level=10)
########################################################################################################################
class web_interface_handler (BaseHTTPServer.BaseHTTPRequestHandler):
def __init__(self, request, client_address, server):
BaseHTTPServer.BaseHTTPRequestHandler.__init__(self, request, client_address, server)
self.session = None
def commify (self, number):
number = str(number)
processing = 1
regex = re.compile(r"^(-?\d+)(\d{3})")
while processing:
(number, processing) = regex.subn(r"\1,\2",number)
return number
def do_GET (self):
self.do_everything()
def do_HEAD (self):
self.do_everything()
def do_POST (self):
self.do_everything()
def do_everything (self):
if "pause" in self.path:
self.session.pause_flag = True
if "resume" in self.path:
self.session.pause_flag = False
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
if "view_crash" in self.path:
response = self.view_crash(self.path)
elif "view_pcap" in self.path:
response = self.view_pcap(self.path)
else:
response = self.view_index()
self.wfile.write(response)
def log_error (self, *args, **kwargs):
pass
def log_message (self, *args, **kwargs):
pass
def version_string (self):
return "Sulley Fuzz Session"
def view_crash (self, path):
test_number = int(path.split("/")[-1])
return "<html><pre>%s</pre></html>" % self.session.procmon_results[test_number]
def view_pcap (self, path):
return path
def view_index (self):
response = """
<html>
<head>
<title>Sulley Fuzz Control</title>
<style>
a:link {color: #FF8200; text-decoration: none;}
a:visited {color: #FF8200; text-decoration: none;}
a:hover {color: #C5C5C5; text-decoration: none;}
body
{
background-color: #000000;
font-family: Arial, Helvetica, sans-serif;
font-size: 12px;
color: #FFFFFF;
}
td
{
font-family: Arial, Helvetica, sans-serif;
font-size: 12px;
color: #A0B0B0;
}
.fixed
{
font-family: Courier New;
font-size: 12px;
color: #A0B0B0;
}
.input
{
font-family: Arial, Helvetica, sans-serif;
font-size: 11px;
color: #FFFFFF;
background-color: #333333;
border: thin none;
height: 20px;
}
</style>
</head>
<body>
<center>
<table border=0 cellpadding=5 cellspacing=0 width=750><tr><td>
<!-- begin bounding table -->
<table border=0 cellpadding=5 cellspacing=0 width="100%%">
<tr bgcolor="#333333">
<td><div style="font-size: 20px;">Sulley Fuzz Control</div></td>
<td align=right><div style="font-weight: bold; font-size: 20px;">%(status)s</div></td>
</tr>
<tr bgcolor="#111111">
<td colspan=2 align="center">
<table border=0 cellpadding=0 cellspacing=5>
<tr bgcolor="#111111">
<td><b>Total:</b></td>
<td>%(total_mutant_index)s</td>
<td>of</td>
<td>%(total_num_mutations)s</td>
<td class="fixed">%(progress_total_bar)s</td>
<td>%(progress_total)s</td>
</tr>
<tr bgcolor="#111111">
<td><b>%(current_name)s:</b></td>
<td>%(current_mutant_index)s</td>
<td>of</td>
<td>%(current_num_mutations)s</td>
<td class="fixed">%(progress_current_bar)s</td>
<td>%(progress_current)s</td>
</tr>
</table>
</td>
</tr>
<tr>
<td>
<form method=get action="/pause">
<input class="input" type="submit" value="Pause">
</form>
</td>
<td align=right>
<form method=get action="/resume">
<input class="input" type="submit" value="Resume">
</form>
</td>
</tr>
</table>
<!-- begin procmon results -->
<table border=0 cellpadding=5 cellspacing=0 width="100%%">
<tr bgcolor="#333333">
<td nowrap>Test Case #</td>
<td>Crash Synopsis</td>
<td nowrap>Captured Bytes</td>
</tr>
"""
keys = self.session.procmon_results.keys()
keys.sort()
for key in keys:
val = self.session.procmon_results[key]
bytes = " "
if self.session.netmon_results.has_key(key):
bytes = self.commify(self.session.netmon_results[key])
response += '<tr><td class="fixed"><a href="/view_crash/%d">%06d</a></td><td>%s</td><td align=right>%s</td></tr>' % (key, key, val.split("\n")[0], bytes)
response += """
<!-- end procmon results -->
</table>
<!-- end bounding table -->
</td></tr></table>
</center>
</body>
</html>
"""
# what is the fuzzing status.
if self.session.pause_flag:
status = "<font color=red>PAUSED</font>"
else:
status = "<font color=green>RUNNING</font>"
# if there is a current fuzz node.
if self.session.fuzz_node:
# which node (request) are we currently fuzzing.
if self.session.fuzz_node.name:
current_name = self.session.fuzz_node.name
else:
current_name = "[N/A]"
# render sweet progress bars.
progress_current = float(self.session.fuzz_node.mutant_index) / float(self.session.fuzz_node.num_mutations())
num_bars = int(progress_current * 50)
progress_current_bar = "[" + "=" * num_bars + " " * (50 - num_bars) + "]"
progress_current = "%.3f%%" % (progress_current * 100)
progress_total = float(self.session.total_mutant_index) / float(self.session.total_num_mutations)
num_bars = int(progress_total * 50)
progress_total_bar = "[" + "=" * num_bars + " " * (50 - num_bars) + "]"
progress_total = "%.3f%%" % (progress_total * 100)
response %= \
{
"current_mutant_index" : self.commify(self.session.fuzz_node.mutant_index),
"current_name" : current_name,
"current_num_mutations" : self.commify(self.session.fuzz_node.num_mutations()),
"progress_current" : progress_current,
"progress_current_bar" : progress_current_bar,
"progress_total" : progress_total,
"progress_total_bar" : progress_total_bar,
"status" : status,
"total_mutant_index" : self.commify(self.session.total_mutant_index),
"total_num_mutations" : self.commify(self.session.total_num_mutations),
}
else:
response %= \
{
"current_mutant_index" : "",
"current_name" : "",
"current_num_mutations" : "",
"progress_current" : "",
"progress_current_bar" : "",
"progress_total" : "",
"progress_total_bar" : "",
"status" : "<font color=yellow>UNAVAILABLE</font>",
"total_mutant_index" : "",
"total_num_mutations" : "",
}
return response
########################################################################################################################
class web_interface_server (BaseHTTPServer.HTTPServer):
'''
http://docs.python.org/lib/module-BaseHTTPServer.html
'''
def __init__(self, server_address, RequestHandlerClass, session):
BaseHTTPServer.HTTPServer.__init__(self, server_address, RequestHandlerClass)
self.RequestHandlerClass.session = session
########################################################################################################################
class web_interface_thread (threading.Thread):
def __init__ (self, session):
threading.Thread.__init__(self)
self.session = session
self.server = None
def run (self):
self.server = web_interface_server(('', self.session.web_port), web_interface_handler, self.session)
self.server.serve_forever()
| gpl-3.0 |
jh23453/privacyidea | privacyidea/lib/tokenclass.py | 2 | 54866 | # -*- coding: utf-8 -*-
# privacyIDEA is a fork of LinOTP
#
# 2017-04-27 Cornelius Kölbel <cornelius.koelbel@netknights.it>
# Change dateformat
# 2016-06-21 Cornelius Kölbel <cornelius.koelbel@netknights.it>
# Add method to set the next_pin_change and next_password_change.
# 2016-04-29 Cornelius Kölbel <cornelius.koelbel@netknights.it>
# Add get_default_settings to change the parameters before
# the token is created
# 2016-04-08 Cornelius Kölbel <cornelius@privacyidea.org>
# Avoid consecutive if statements
# Remove unreachable code
# 2015-12-18 Cornelius Kölbel <cornelius@privacyidea.org>
# Add get_setting_type
# 2015-10-12 Cornelius Kölbel <cornelius@privacyidea.org>
# Add testconfig classmethod
# 2015-09-07 Cornelius Kölbel <cornelius@privacyidea.org>
# Add challenge response decorator
# 2015-08-27 Cornelius Kölbel <cornelius@privacyidea.org>
# Add revocation of token
# * Nov 27, 2014 Cornelius Kölbel <cornelius@privacyidea.org>
# Migration to flask
# Rewrite of methods
# 100% test code coverage
# * Oct 03, 2014 Cornelius Kölbel <cornelius@privacyidea.org>
# Move the QR stuff in getInitDetail into the token classes
# * Sep 17, 2014 Cornelius Kölbel, cornelius@privacyidea.org
# Improve the return value of the InitDetail
# * May 08, 2014 Cornelius Kölbel
#
# License: AGPLv3
# contact: http://www.privacyidea.org
#
# Copyright (C) 2010 - 2014 LSE Leading Security Experts GmbH
# License: AGPLv3
# contact: http://www.linotp.org
# http://www.lsexperts.de
# linotp@lsexperts.de
#
# This code is free software; you can redistribute it and/or
# modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE
# License as published by the Free Software Foundation; either
# version 3 of the License, or any later version.
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU AFFERO GENERAL PUBLIC LICENSE for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
This is the Token Base class, which is inherited by all token types.
It depends on lib.user and lib.config.
The token object also contains a database token object as self.token.
The token object runs the self.update() method during the initialization
process in the API /token/init.
The update method takes a dictionary. Some of the following parameters:
otpkey -> the token gets created with this OTPKey
genkey -> genkey=1 : privacyIDEA generates an OTPKey, creates the token
and sends it to the client.
2stepinit -> Will do a two step rollout.
privacyIDEA creates the first part of the OTPKey, sends it
to the client and the clients needs to send back the second part.
In case of 2stepinit the key is generated from the server_component and the
client_component using the TokenClass method generate_symmetric_key.
This method is supposed to be overwritten by the corresponding token classes.
"""
import logging
import hashlib
import datetime
from .error import (TokenAdminError,
ParameterError)
from ..api.lib.utils import getParam
from .utils import generate_otpkey
from .log import log_with
from .config import (get_from_config, get_prepend_pin)
from .utils import create_img
from .user import (User,
get_username)
from ..models import (TokenRealm, Challenge, cleanup_challenges)
from .challenge import get_challenges
from .crypto import encryptPassword
from .crypto import decryptPassword
from .policydecorators import libpolicy, auth_otppin, challenge_response_allowed
from .decorators import check_token_locked
from .utils import parse_timedelta, parse_legacy_time
from policy import ACTION
from dateutil.parser import parse as parse_date_string
from dateutil.tz import tzlocal, tzutc
#DATE_FORMAT = "%d/%m/%y %H:%M"
DATE_FORMAT = '%Y-%m-%dT%H:%M%z'
# LASTAUTH is utcnow()
AUTH_DATE_FORMAT = "%Y-%m-%d %H:%M:%S.%f%z"
optional = True
required = False
log = logging.getLogger(__name__)
class TokenClass(object):
# Class properties
using_pin = True
hKeyRequired = False
mode = ['authenticate', 'challenge']
@log_with(log)
def __init__(self, db_token):
"""
Create a new token object.
:param db_token: A database token object
:type db_token: Token
:return: A TokenClass object
"""
self.token = db_token
self.type = db_token.tokentype
# the init_details is a generic container, to store token specific
# processing init_details e.g. for the initialization process
# which could be retrieved in the controllers
# this is not to be confused with the tokeninfo!
self.init_details = {}
# These are temporary details to store during authentication
# like the "matched_otp_counter".
self.auth_details = {}
def set_type(self, tokentype):
"""
Set the tokentype in this object and
also in the underlying database-Token-object.
:param tokentype: The type of the token like HOTP or TOTP
:type tokentype: string
"""
tokentype = u'' + tokentype
self.type = tokentype
self.token.tokentype = tokentype
@staticmethod
def get_class_type():
return None
@staticmethod
def get_class_info(key=None, ret='all'):
return {}
@staticmethod
def get_class_prefix():
return "UNK"
def get_type(self):
return self.token.tokentype
@check_token_locked
def set_user(self, user, report=None):
"""
Set the user attributes (uid, resolvername, resolvertype) of a token.
:param user: a User() object, consisting of loginname and realm
:param report: tbdf.
:return: None
"""
(uid, resolvertype, resolvername) = user.get_user_identifiers()
self.token.resolver = resolvername
self.token.resolver_type = resolvertype
self.token.user_id = uid
# set the tokenrealm
self.set_realms([user.realm])
@property
def user(self):
"""
return the user (owner) of a token
If the token has no owner assigned, we return None
:return: The owner of the token
:rtype: User object
"""
user_object = None
realmname = ""
if self.token.user_id and self.token.resolver:
username = get_username(self.token.user_id, self.token.resolver)
rlist = self.token.realm_list
# FIXME: What if the token has more than one realm assigned?
if len(rlist) == 1:
realmname = rlist[0].realm.name
if username and realmname:
user_object = User(login=username,
resolver=self.token.resolver,
realm=realmname)
return user_object
def is_orphaned(self):
"""
Return True is the token is orphaned.
An orphaned token means, that it has a user assigned, but the user
does not exist in the user store (anymore)
:return: True / False
"""
orphaned = False
if self.token.user_id:
try:
if not self.user or not self.user.login:
# The token is assigned, but the username does not resolve
orphaned = True
except Exception:
# If any other resolving error occurs, we also assume the
# token to be orphaned
orphaned = True
return orphaned
def get_user_displayname(self):
"""
Returns a tuple of a user identifier like user@realm and the
displayname of "givenname surname".
:return: tuple
"""
user_object = self.user
user_info = user_object.info
user_identifier = "{0!s}_{1!s}".format(user_object.login, user_object.realm)
user_displayname = "{0!s} {1!s}".format(user_info.get("givenname", "."),
user_info.get("surname", "."))
return user_identifier, user_displayname
@check_token_locked
def set_user_identifiers(self, uid, resolvername, resolvertype):
"""
(was setUid)
Set the user attributes of a token
:param uid: The user id in the user source
:param resolvername: The name of the resolver
:param resolvertype: The type of the resolver
:return: None
"""
self.token.resolver = resolvername
self.token.resolver_type = resolvertype
self.token.user_id = uid
@check_token_locked
def reset(self):
"""
Reset the failcounter
"""
if self.token.failcount:
# reset the failcounter and write to database
self.token.failcount = 0
self.token.save()
@check_token_locked
def add_init_details(self, key, value):
"""
(was addInfo)
Adds information to a volatile internal dict
"""
self.init_details[key] = value
return self.init_details
@check_token_locked
def set_init_details(self, details):
if type(details) not in [dict]:
raise Exception("Details setting: wrong data type - must be dict")
self.init_details = details
return self.init_details
@log_with(log)
def get_init_details(self):
"""
return the status of the token rollout
:return: return the status dict.
:rtype: dict
"""
return self.init_details
@check_token_locked
def set_tokeninfo(self, info):
"""
Set the tokeninfo field in the DB. Old values will be deleted.
:param info: dictionary with key and value
:type info: dict
:return:
"""
self.token.del_info()
for k, v in info.items():
# check if type is a password
if k.endswith(".type") and v == "password":
# of type password, so we need to encrypt the value of
# the original key (without type)
orig_key = ".".join(k.split(".")[:-1])
info[orig_key] = encryptPassword(info.get(orig_key, ""))
self.token.set_info(info)
@check_token_locked
def add_tokeninfo(self, key, value, value_type=None):
"""
Add a key and a value to the DB tokeninfo
:param key:
:param value:
:return:
"""
add_info = {key: value}
if value_type:
add_info[key + ".type"] = value_type
if value_type == "password":
# encrypt the value
add_info[key] = encryptPassword(value)
self.token.set_info(add_info)
@check_token_locked
def check_otp(self, otpval, counter=None, window=None, options=None):
"""
This checks the OTP value, AFTER the upper level did
the checkPIN
In the base class we do not know, how to calculate the
OTP value. So we return -1.
In case of success, we should return >=0, the counter
:param otpval: the OTP value
:param counter: The counter for counter based otp values
:type counter: int
:param window: a counter window
:type counter: int
:param options: additional token specific options
:type options: dict
:return: counter of the matching OTP value.
:rtype: int
"""
if not counter:
counter = self.token.count
if not window:
window = self.token.count_window
return -1
def get_otp(self, current_time=""):
"""
The default token does not support getting the otp value
will return a tuple of four values
a negative value is a failure.
:return: something like: (1, pin, otpval, combined)
"""
return -2, 0, 0, 0
def get_multi_otp(self, count=0, epoch_start=0, epoch_end=0,
curTime=None, timestamp=None):
"""
This returns a dictionary of multiple future OTP values of a token.
:param count: how many otp values should be returned
:param epoch_start: time based tokens: start when
:param epoch_end: time based tokens: stop when
:param curTime: current time for TOTP token (for selftest)
:type curTime: datetime object
:param timestamp: unix time, current time for TOTP token (for selftest)
:type timestamp: int
:return: True/False, error text, OTP dictionary
:rtype: Tuple
"""
return False, "get_multi_otp not implemented for this tokentype", {}
@libpolicy(auth_otppin)
@check_token_locked
def check_pin(self, pin, user=None, options=None):
"""
Check the PIN of the given Password.
Usually this is only dependent on the token itself,
but the user object can cause certain policies.
Each token could implement its own PIN checking behaviour.
:param pin: the PIN (static password component), that is to be checked.
:type pin: string
:param user: for certain PIN policies (e.g. checking against the
user store) this is the user, whose
password would be checked. But at the moment we are
checking against the userstore in the decorator
"auth_otppin".
:type user: User object
:param options: the optional request parameters
:return: If the PIN is correct, return True
:rtype: bool
"""
# check PIN against the token database
res = self.token.check_pin(pin)
return res
@check_token_locked
def authenticate(self, passw, user=None, options=None):
"""
High level interface which covers the check_pin and check_otp
This is the method that verifies single shot authentication like
they are done with push button tokens.
It is a high level interface to support other tokens as well, which
do not have a pin and otp separation - they could overwrite
this method
If the authentication succeeds an OTP counter needs to be increased,
i.e. the OTP value that was used for this authentication is invalidated!
:param passw: the password which could be pin+otp value
:type passw: string
:param user: The authenticating user
:type user: User object
:param options: dictionary of additional request parameters
:type options: dict
:return: returns tuple of
1. true or false for the pin match,
2. the otpcounter (int) and the
3. reply (dict) that will be added as
additional information in the JSON response
of ``/validate/check``.
:rtype: tuple
"""
pin_match = False
otp_counter = -1
reply = None
(res, pin, otpval) = self.split_pin_pass(passw, user=user,
options=options)
if res != -1:
pin_match = self.check_pin(pin, user=user, options=options)
if pin_match is True:
otp_counter = self.check_otp(otpval, options=options)
#self.set_otp_count(otp_counter)
return pin_match, otp_counter, reply
def update(self, param, reset_failcount=True):
"""
Update the token object
:param param: a dictionary with different params like keysize,
description, genkey, otpkey, pin
:type: param: dict
"""
tdesc = getParam(param, "description", optional)
if tdesc is not None:
self.token.set_description(tdesc)
# key_size as parameter overrules a prevoiusly set
# value e.g. in hashlib in the upper classes
key_size = getParam(param, "keysize", optional)
if key_size is None:
key_size = 20
#
# process the otpkey:
# if otpkey given - take this
# if not given
# if genkey == 1 : create one
# if required and otpkey == None:
# raise param Exception, that we require an otpkey
#
otpKey = getParam(param, "otpkey", optional)
genkey = int(getParam(param, "genkey", optional) or 0)
two_step_init = int(getParam(param, "2stepinit", optional) or 0)
if two_step_init:
if self.token.rollout_state == "clientwait":
# We do not do 2stepinit in the second step
raise ParameterError("2stepinit is only to be used in the "
"first initialization step.")
# In a 2-step enrollment, the server always generates a key
genkey = 1
# The token is disabled
self.token.active = False
if genkey not in [0, 1]:
raise ParameterError("TokenClass supports only genkey in range ["
"0,1] : %r" % genkey)
if genkey == 1 and otpKey is not None:
raise ParameterError('[ParameterError] You may either specify '
'genkey or otpkey, but not both!', id=344)
if otpKey is None and genkey == 1:
otpKey = self._genOtpKey_(key_size)
# otpKey still None?? - raise the exception
if otpKey is None and self.hKeyRequired is True:
otpKey = getParam(param, "otpkey", required)
if otpKey is not None:
if self.token.rollout_state == "clientwait":
# If we have otpkey and the token is in the enrollment-state
# generate the new key
server_component = self.token.get_otpkey().getKey()
client_component = otpKey
otpKey = self.generate_symmetric_key(server_component,
client_component)
self.token.rollout_state = ""
self.token.active = True
self.add_init_details('otpkey', otpKey)
self.token.set_otpkey(otpKey, reset_failcount=reset_failcount)
if two_step_init:
# After the key is generated, we set "waiting for the client".
self.token.rollout_state = "clientwait"
pin = getParam(param, "pin", optional)
if pin is not None:
storeHashed = True
enc = getParam(param, "encryptpin", optional)
if enc is not None and (enc is True or enc.lower() == "true"):
storeHashed = False
self.token.set_pin(pin, storeHashed)
otplen = getParam(param, 'otplen', optional)
if otplen is not None:
self.set_otplen(otplen)
# Add parameters starting with the tokentype-name to the tokeninfo:
for p in param.keys():
if p.startswith(self.type + "."):
self.add_tokeninfo(p, getParam(param, p))
return
def _genOtpKey_(self, otpkeylen=None):
'''
private method, to create an otpkey
'''
if otpkeylen is None:
if hasattr(self, 'otpkeylen'):
otpkeylen = getattr(self, 'otpkeylen')
else:
otpkeylen = 20
return generate_otpkey(otpkeylen)
@check_token_locked
def set_description(self, description):
"""
Set the description on the database level
:param description: description of the token
:type description: string
"""
self.token.set_description(u'' + description)
return
def set_defaults(self):
"""
Set the default values on the database level
"""
self.token.otplen = int(get_from_config("DefaultOtpLen") or 6)
self.token.count_window = int(get_from_config("DefaultCountWindow")
or 10)
self.token.maxfail = int(get_from_config("DefaultMaxFailCount") or 10)
self.token.sync_window = int(get_from_config("DefaultSyncWindow")
or 1000)
self.token.tokentype = u'' + self.type
return
def delete_token(self):
"""
delete the database token
"""
self.token.delete()
def save(self):
"""
Save the database token
"""
self.token.save()
def resync(self, otp1, otp2, options=None):
pass
def get_otp_count_window(self):
return self.token.count_window
def get_otp_count(self):
return self.token.count
def is_active(self):
return self.token.active
def get_failcount(self):
return self.token.failcount
def set_failcount(self, failcount):
"""
Set the failcounter in the database
"""
self.token.failcount = failcount
def get_max_failcount(self):
return self.token.maxfail
def get_user_id(self):
return self.token.user_id
def set_realms(self, realms, add=False):
"""
Set the list of the realms of a token.
:param realms: realms the token should be assigned to
:type realms: list
:param add: if the realms should be added and not replaced
:type add: boolean
"""
self.token.set_realms(realms, add=add)
def get_realms(self):
"""
Return a list of realms the token is assigned to
:return: realms
:rtype:l list
"""
return self.token.get_realms()
def get_serial(self):
return self.token.serial
def get_tokentype(self):
return self.token.tokentype
@check_token_locked
def set_so_pin(self, soPin):
self.token.set_so_pin(soPin)
@check_token_locked
def set_user_pin(self, userPin):
self.token.set_user_pin(userPin)
@check_token_locked
def set_otpkey(self, otpKey):
self.token.set_otpkey(otpKey)
@check_token_locked
def set_otplen(self, otplen):
self.token.otplen = int(otplen)
@check_token_locked
def get_otplen(self):
return self.token.otplen
@check_token_locked
def set_otp_count(self, otpCount):
self.token.count = int(otpCount)
self.token.save()
@check_token_locked
def set_pin(self, pin, encrypt=False):
"""
set the PIN of a token.
Usually the pin is stored in a hashed way.
:param pin: the pin to be set for the token
:type pin: basestring
:param encrypt: If set to True, the pin is stored encrypted and
can be retrieved from the database again
:type encrypt: bool
"""
storeHashed = not encrypt
self.token.set_pin(pin, storeHashed)
def get_pin_hash_seed(self):
return self.token.pin_hash, self.token.pin_seed
@check_token_locked
def set_pin_hash_seed(self, pinhash, seed):
self.token.pin_hash = pinhash
self.token.pin_seed = seed
@check_token_locked
def enable(self, enable=True):
self.token.active = enable
def revoke(self):
"""
This revokes the token.
By default it
1. sets the revoked-field
2. set the locked field
3. disables the token.
Some token types may revoke a token without locking it.
"""
self.token.revoked = True
self.token.locked = True
self.token.active = False
def is_revoked(self):
"""
Check if the token is in the revoked state
:return: True, if the token is revoked
"""
return self.token.revoked
def is_locked(self):
"""
Check if the token is in a locked state
A locked token can not be modified
:return: True, if the token is locked.
"""
return self.token.locked
@check_token_locked
def set_maxfail(self, maxFail):
self.token.maxfail = maxFail
@check_token_locked
def set_hashlib(self, hashlib):
self.add_tokeninfo("hashlib", hashlib)
@check_token_locked
def inc_failcount(self):
if self.token.failcount < self.token.maxfail:
self.token.failcount = (self.token.failcount + 1)
try:
self.token.save()
except: # pragma: no cover
log.error('update failed')
raise TokenAdminError("Token Fail Counter update failed", id=1106)
return self.token.failcount
@check_token_locked
def set_count_window(self, countWindow):
self.token.count_window = int(countWindow)
def get_count_window(self):
return self.token.count_window
@check_token_locked
def set_sync_window(self, syncWindow):
self.token.sync_window = int(syncWindow)
def get_sync_window(self):
return self.token.sync_window
# hashlib algorithms:
# http://www.doughellmann.com/PyMOTW/hashlib/index.html#module-hashlib
@staticmethod
def get_hashlib(hLibStr):
"""
Returns a hashlib function for a given string
:param hLibStr: the hashlib
:type hLibStr: string
:return: the hashlib
:rtype: function
"""
if hLibStr is None:
return hashlib.sha1
hashlibStr = hLibStr.lower()
if hashlibStr == "md5":
return hashlib.md5
elif hashlibStr == "sha1":
return hashlib.sha1
elif hashlibStr == "sha224":
return hashlib.sha224
elif hashlibStr == "sha256":
return hashlib.sha256
elif hashlibStr == "sha384":
return hashlib.sha384
elif hashlibStr == "sha512":
return hashlib.sha512
else:
return hashlib.sha1
def get_tokeninfo(self, key=None, default=None):
"""
return the complete token info or a single key of the tokeninfo.
When returning the complete token info dictionary encrypted entries
are not decrypted.
If you want to receive a decrypted value, you need to call it
directly with the key.
:param key: the key to return
:type key: string
:param default: the default value, if the key does not exist
:type default: string
:return: the value for the key
:rtype: int or string
"""
tokeninfo = self.token.get_info()
if key:
ret = tokeninfo.get(key, default)
if tokeninfo.get(key + ".type") == "password":
# we need to decrypt the return value
ret = decryptPassword(ret)
else:
ret = tokeninfo
return ret
def del_tokeninfo(self, key=None):
self.token.del_info(key)
@check_token_locked
def set_count_auth_success_max(self, count):
"""
Sets the counter for the maximum allowed successful logins
as key "count_auth_success_max" in token info
:param count: a number
:type count: int
"""
self.add_tokeninfo("count_auth_success_max", int(count))
@check_token_locked
def set_count_auth_success(self, count):
"""
Sets the counter for the occurred successful logins
as key "count_auth_success" in token info
:param count: a number
:type count: int
"""
self.add_tokeninfo("count_auth_success", int(count))
@check_token_locked
def set_count_auth_max(self, count):
"""
Sets the counter for the maximum allowed login attempts
as key "count_auth_max" in token info
:param count: a number
:type count: int
"""
self.add_tokeninfo("count_auth_max", int(count))
@check_token_locked
def set_count_auth(self, count):
"""
Sets the counter for the occurred login attepms
as key "count_auth" in token info
:param count: a number
:type count: int
"""
self.add_tokeninfo("count_auth", int(count))
def get_count_auth_success_max(self):
"""
Return the maximum allowed successful authentications
"""
ret = int(self.get_tokeninfo("count_auth_success_max", 0))
return ret
def get_count_auth_success(self):
"""
Return the number of successful authentications
"""
ret = int(self.get_tokeninfo("count_auth_success", 0))
return ret
def get_count_auth_max(self):
"""
Return the number of maximum allowed authentications
"""
ret = int(self.get_tokeninfo("count_auth_max", 0))
return ret
def get_count_auth(self):
"""
Return the number of all authentication tries
"""
ret = int(self.get_tokeninfo("count_auth", 0))
return ret
def get_validity_period_end(self):
"""
returns the end of validity period (if set)
if not set, "" is returned.
:return: the end of the validity period
:rtype: string
"""
end = self.get_tokeninfo("validity_period_end", "")
if end:
end = parse_legacy_time(end)
return end
@check_token_locked
def set_validity_period_end(self, end_date):
"""
sets the end date of the validity period for a token
:param end_date: the end date in the format YYYY-MM-DDTHH:MM+OOOO
if the format is wrong, the method will
throw an exception
:type end_date: string
"""
# upper layer will catch. we just try to verify the date format
d = parse_date_string(end_date)
self.add_tokeninfo("validity_period_end", d.strftime(DATE_FORMAT))
def get_validity_period_start(self):
"""
returns the start of validity period (if set)
if not set, "" is returned.
:return: the start of the validity period
:rtype: string
"""
start = self.get_tokeninfo("validity_period_start", "")
if start:
start = parse_legacy_time(start)
return start
@check_token_locked
def set_validity_period_start(self, start_date):
"""
sets the start date of the validity period for a token
:param start_date: the start date in the format YYYY-MM-DDTHH:MM+OOOO
if the format is wrong, the method will
throw an exception
:type start_date: string
"""
d = parse_date_string(start_date)
self.add_tokeninfo("validity_period_start", d.strftime(DATE_FORMAT))
def set_next_pin_change(self, diff=None, password=False):
"""
Sets the timestamp for the next_pin_change. Provide a
difference like 90d (90 days).
Either provider the
:param diff: The time delta.
:type diff: basestring
:param password: Do no set next_pin_change but next_password_change
:return: None
"""
days = int(diff.lower().strip("d"))
key = "next_pin_change"
if password:
key = "next_password_change"
new_date = datetime.datetime.now(tzlocal()) + datetime.timedelta(days=days)
self.add_tokeninfo(key, new_date.strftime(DATE_FORMAT))
def is_pin_change(self, password=False):
"""
Returns true if the pin of the token needs to be changed.
:param password: Whether the password needs to be changed.
:type password: bool
:return: True or False
"""
key = "next_pin_change"
if password:
key = "next_password_change"
sdate = self.get_tokeninfo(key)
#date_change = datetime.datetime.strptime(sdate, DATE_FORMAT)
date_change = parse_date_string(parse_legacy_time(sdate))
return datetime.datetime.now(tzlocal()) > date_change
@check_token_locked
def inc_count_auth_success(self):
"""
Increase the counter, that counts successful authentications
Also increase the auth counter
"""
succcess_counter = self.get_count_auth_success()
succcess_counter += 1
auth_counter = self.get_count_auth()
auth_counter += 1
self.token.set_info({"count_auth_success": int(succcess_counter),
"count_auth": int(auth_counter)})
return succcess_counter
@check_token_locked
def inc_count_auth(self):
"""
Increase the counter, that counts authentications - successful and
unsuccessful
"""
count = self.get_count_auth()
count += 1
self.set_count_auth(count)
return count
def check_failcount(self):
"""
Checks if the failcounter is exceeded. It returns True, if the
failcounter is less than maxfail
:return: True or False
"""
return self.token.failcount < self.token.maxfail
def check_auth_counter(self):
"""
This function checks the count_auth and the count_auth_success.
If the count_auth is less than count_auth_max
and count_auth_success is less than count_auth_success_max
it returns True. Otherwise False.
:return: success if the counter is less than max
:rtype: bool
"""
if self.get_count_auth_max() != 0 and self.get_count_auth() >= \
self.get_count_auth_max():
return False
if self.get_count_auth_success_max() != 0 and \
self.get_count_auth_success() >= \
self.get_count_auth_success_max():
return False
return True
def check_validity_period(self):
"""
This checks if the datetime.datetime.now() is within the validity
period of the token.
:return: success
:rtype: bool
"""
start = self.get_validity_period_start()
end = self.get_validity_period_end()
if start:
#dt_start = datetime.datetime.strptime(start, DATE_FORMAT)
dt_start = parse_date_string(start)
if dt_start > datetime.datetime.now(tzlocal()):
return False
if end:
#dt_end = datetime.datetime.strptime(end, DATE_FORMAT)
dt_end = parse_date_string(end)
if dt_end < datetime.datetime.now(tzlocal()):
return False
return True
def check_all(self, message_list):
"""
Perform all checks on the token. Returns False if the token is either:
* auth counter exceeded
* not active
* fail counter exceeded
* validity period exceeded
This is used in the function token.check_token_list
:param message_list: A list of messages
:return: False, if any of the checks fail
"""
r = False
# Check if the max auth is succeeded
if not self.check_auth_counter():
message_list.append("Authentication counter exceeded")
# Check if the token is disabled
elif not self.is_active():
message_list.append("Token is disabled")
elif not self.check_failcount():
message_list.append("Failcounter exceeded")
elif not self.check_validity_period():
message_list.append("Outside validity period")
else:
r = True
if not r:
log.info("{0} {1}".format(message_list, self.get_serial()))
return r
@log_with(log)
@check_token_locked
def inc_otp_counter(self, counter=None, reset=True):
"""
Increase the otp counter and store the token in the database
:param counter: the new counter value. If counter is given, than
the counter is increased by (counter+1)
If the counter is not given, the counter is increased
by +1
:type counter: int
:param reset: reset the failcounter if set to True
:type reset: bool
:return: the new counter value
"""
reset_counter = False
if counter:
self.token.count = counter + 1
else:
self.token.count += 1
if reset is True and get_from_config("DefaultResetFailCount") == "True":
reset_counter = True
if (reset_counter and self.token.active and self.token.failcount <
self.token.maxfail):
self.token.failcount = 0
# make DB persistent immediately, to avoid the re-usage of the counter
self.token.save()
return self.token.count
def check_otp_exist(self, otp, window=None):
"""
checks if the given OTP value is/are values of this very token.
This is used to autoassign and to determine the serial number of
a token.
:param otp: the OTP value
:param window: The look ahead window
:type window: int
:return: True or a value > 0 in case of success
"""
return -1
def is_previous_otp(self, otp, window=10):
"""
checks if a given OTP value is a previous OTP value, that lies in the
past or has a lower counter.
This is used in case of a failed authentication to return the
information, that this OTP values was used previously and is invalid.
:param otp: The OTP value.
:type otp: basestring
:param window: A counter window, how far we should look into the past.
:type window: int
:return: bool
"""
return False
def split_pin_pass(self, passw, user=None, options=None):
"""
Split the password into the token PIN and the OTP value
take the given password and split it into the PIN and the
OTP value. The splitting can be dependent of certain policies.
The policies may depend on the user.
Each token type may define its own way to slit the PIN and
the OTP value.
:param passw: the password to split
:return: tuple of pin and otp value
:param user: The user/owner of the token
:type user: User object
:param options: can be used be the token types.
:type options: dict
:return: tuple of (split status, pin, otp value)
:rtype: tuple
"""
# The database field is always an integer
otplen = self.token.otplen
if get_prepend_pin():
pin = passw[0:-otplen]
otpval = passw[-otplen:]
else:
pin = passw[otplen:]
otpval = passw[0:otplen]
return True, pin, otpval
def status_validation_fail(self):
"""
callback to enable a status change, if auth failed
"""
return
def status_validation_success(self):
"""
callback to enable a status change, if auth succeeds
"""
return
def __repr__(self):
"""
return the token state as text
:return: token state as string representation
:rtype: string
"""
ldict = {}
for attr in self.__dict__:
key = "{0!r}".format(attr)
val = "{0!r}".format(getattr(self, attr))
ldict[key] = val
res = "<{0!r} {1!r}>".format(self.__class__, ldict)
return res
def get_init_detail(self, params=None, user=None):
"""
to complete the token initialization, the response of the initialisation
should be build by this token specific method.
This method is called from api/token after the token is enrolled
get_init_detail returns additional information after an admin/init
like the QR code of an HOTP/TOTP token.
Can be anything else.
:param params: The request params during token creation token/init
:type params: dict
:param user: the user, token owner
:type user: User object
:return: additional descriptions
:rtype: dict
"""
response_detail = {}
init_details = self.get_init_details()
response_detail.update(init_details)
response_detail['serial'] = self.get_serial()
otpkey = None
if 'otpkey' in init_details:
otpkey = init_details.get('otpkey')
if otpkey is not None:
response_detail["otpkey"] = {"description": "OTP seed",
"value": "seed://{0!s}".format(otpkey),
"img": create_img(otpkey, width=200)}
return response_detail
def get_QRimage_data(self, response_detail):
"""
FIXME: Do we really use this?
"""
url = None
hparam = {}
if response_detail is not None and 'googleurl' in response_detail:
url = response_detail.get('googleurl')
hparam['alt'] = url
return url, hparam
# challenge interfaces starts here
@challenge_response_allowed
def is_challenge_request(self, passw, user=None, options=None):
"""
This method checks, if this is a request, that triggers a challenge.
The default behaviour to trigger a challenge is,
if the ``passw`` parameter only contains the correct token pin *and*
the request contains a ``data`` or a ``challenge`` key i.e. if the
``options`` parameter contains a key ``data`` or ``challenge``.
Each token type can decide on its own under which condition a challenge
is triggered by overwriting this method.
**please note**: in case of pin policy == 2 (no pin is required)
the ``check_pin`` would always return true! Thus each request
containing a ``data`` or ``challenge`` would trigger a challenge!
The Challenge workflow is like this.
When an authentication request is issued, first it is checked if this is
a request which will create a new challenge (is_challenge_request) or if
this is a response to an existing challenge (is_challenge_response).
In these two cases during request processing the following functions are
called.
is_challenge_request or is_challenge_response
| |
V V
create_challenge check_challenge
| |
V V
challenge_janitor challenge_janitor
:param passw: password, which might be pin or pin+otp
:type passw: string
:param user: The user from the authentication request
:type user: User object
:param options: dictionary of additional request parameters
:type options: dict
:return: true or false
:rtype: bool
"""
request_is_challenge = False
options = options or {}
pin_match = self.check_pin(passw, user=user, options=options)
if pin_match is True and "data" in options or "challenge" in options:
request_is_challenge = True
return request_is_challenge
def is_challenge_response(self, passw, user=None, options=None):
"""
This method checks, if this is a request, that is the response to
a previously sent challenge.
The default behaviour to check if this is the response to a
previous challenge is simply by checking if the request contains
a parameter ``state`` or ``transactionid`` i.e. checking if the
``options`` parameter contains a key ``state`` or ``transactionid``.
This method does not try to verify the response itself!
It only determines, if this is a response for a challenge or not.
The response is verified in check_challenge_response.
:param passw: password, which might be pin or pin+otp
:type passw: string
:param user: the requesting user
:type user: User object
:param options: dictionary of additional request parameters
:type options: dict
:return: true or false
:rtype: bool
"""
options = options or {}
challenge_response = False
if "state" in options or "transaction_id" in options:
challenge_response = True
return challenge_response
@check_token_locked
def check_challenge_response(self, user=None, passw=None, options=None):
"""
This method verifies if there is a matching challenge for the given
passw and also verifies if the response is correct.
It then returns the new otp_counter of the token.
In case of success the otp_counter will be >= 0.
:param user: the requesting user
:type user: User object
:param passw: the password (pin+otp)
:type passw: string
:param options: additional arguments from the request, which could
be token specific. Usually "transactionid"
:type options: dict
:return: return otp_counter. If -1, challenge does not match
:rtype: int
"""
options = options or {}
otp_counter = -1
# fetch the transaction_id
transaction_id = options.get('transaction_id')
if transaction_id is None:
transaction_id = options.get('state')
# get the challenges for this transaction ID
if transaction_id is not None:
challengeobject_list = get_challenges(serial=self.token.serial,
transaction_id=transaction_id)
for challengeobject in challengeobject_list:
if challengeobject.is_valid():
# challenge is still valid
# Add the challenge to the options for check_otp
options["challenge"] = challengeobject.challenge
# Now see if the OTP matches:
otp_counter = self.check_otp(passw, options=options)
if otp_counter >= 0:
# We found the matching challenge, so lets return the
# successful result and delete the challenge object.
challengeobject.delete()
break
else:
# increase the received_count
challengeobject.set_otp_status()
self.challenge_janitor()
return otp_counter
@staticmethod
def challenge_janitor():
"""
Just clean up all challenges, for which the expiration has expired.
:return: None
"""
cleanup_challenges()
def create_challenge(self, transactionid=None, options=None):
"""
This method creates a challenge, which is submitted to the user.
The submitted challenge will be preserved in the challenge
database.
If no transaction id is given, the system will create a transaction
id and return it, so that the response can refer to this transaction.
:param transactionid: the id of this challenge
:param options: the request context parameters / data
:type options: dict
:return: tuple of (bool, message, transactionid, attributes)
:rtype: tuple
The return tuple builds up like this:
``bool`` if submit was successful;
``message`` which is displayed in the JSON response;
additional ``attributes``, which are displayed in the JSON response.
"""
options = options or {}
message = 'please enter otp: '
data = None
attributes = None
validity = int(get_from_config('DefaultChallengeValidityTime', 120))
tokentype = self.get_tokentype().lower()
# Maybe there is a HotpChallengeValidityTime...
lookup_for = tokentype.capitalize() + 'ChallengeValidityTime'
validity = int(get_from_config(lookup_for, validity))
# Create the challenge in the database
db_challenge = Challenge(self.token.serial,
transaction_id=transactionid,
challenge=options.get("challenge"),
data=data,
session=options.get("session"),
validitytime=validity)
db_challenge.save()
self.challenge_janitor()
return True, message, db_challenge.transaction_id, attributes
def get_as_dict(self):
"""
This returns the token data as a dictionary.
It is used to display the token list at /token/list.
:return: The token data as dict
:rtype: dict
"""
# first get the database values as dict
token_dict = self.token.get()
return token_dict
@classmethod
def api_endpoint(cls, request, g):
"""
This provides a function to be plugged into the API endpoint
/ttype/<tokentype> which is defined in api/ttype.py
The method should return
return "json", {}
or
return "text", "OK"
:param request: The Flask request
:param g: The Flask global object g
:return: Flask Response or text
"""
raise ParameterError("{0!s} does not support the API endpoint".format(
cls.get_tokentype()))
@staticmethod
def test_config(params=None):
"""
This method is used to test the token config. Some tokens require some
special token configuration like the SMS-Token or the Email-Token.
To test this configuration, this classmethod is used.
It takes token specific parameters and returns a tuple of a boolean
and a result description.
:param params: token specific parameters
:type params: dict
:return: success, description
:rtype: tuple
"""
return False, "Not implemented"
@staticmethod
def get_setting_type(key):
"""
This function returns the type of the token specific config/setting.
This way a tokenclass can define settings, that can be "public" or a
"password". If this setting is written to the database, the type of
the setting is set automatically in set_privacyidea_config
The key name needs to start with the token type.
:param key: The token specific setting key
:return: A string like "public"
"""
return ""
@classmethod
def get_default_settings(cls, params, logged_in_user=None,
policy_object=None, client_ip=None):
"""
This method returns a dictionary with default settings for token
enrollment.
These default settings depend on the token type and the defined
policies.
The returned dictionary is added to the parameters of the API call.
:param params: The call parameters
:type params: dict
:param logged_in_user: The logged_in_user dictionary with "role",
"username" and "realm"
:type logged_in_user: dict
:param policy_object: The policy_object
:type policy_object: PolicyClass
:return: default parameters
"""
return {}
def check_last_auth_newer(self, last_auth):
"""
Check if the last successful authentication with the token is newer
than the specified time delta which is passed as 10h, 7d or 1y.
It returns True, if the last authentication with this token is
**newer*** than the specified delta.
:param last_auth: 10h, 7d or 1y
:type last_auth: basestring
:return: bool
"""
# per default we return True
res = True
# The tdelta in the policy
tdelta = parse_timedelta(last_auth)
# The last successful authentication of the token
date_s = self.get_tokeninfo(ACTION.LASTAUTH)
if date_s:
log.debug("Compare the last successful authentication of "
"token %s with policy "
"tdelta %s: %s" % (self.token.serial, tdelta,
date_s))
# parse the string from the database
last_success_auth = parse_date_string(date_s)
if not last_success_auth.tzinfo:
# the date string has no timezone, default timezone is UTC
# We need to reparse
last_success_auth = parse_date_string(date_s,
tzinfos=tzutc)
# The last auth is to far in the past
if last_success_auth + tdelta < datetime.datetime.now(tzlocal()):
res = False
log.debug("The last successful authentication is too old: "
"{0!s}".format(last_success_auth))
return res
def generate_symmetric_key(self, server_component, client_component,
options=None):
"""
This method generates a symmetric key, from a server component and a
client component.
This key generation could be based on HMAC, KDF or even Diffie-Hellman.
The basic key-generation is simply replacing the last n byte of the
server component with bytes of the client component.
:param server_component: The component usually generated by privacyIDEA
:type server_component: hex string
:param client_component: The component usually generated by the
client (e.g. smartphone)
:type server_component: hex string
:param options:
:return: the new generated key as hex string
"""
if len(server_component) <= len(client_component):
raise Exception("The server component must be longer than the "
"client component.")
key = server_component[:-len(client_component)] + client_component
return key
| agpl-3.0 |
alejo8591/maker | knowledge/migrations/0001_initial.py | 1 | 11953 | # encoding: utf-8
# Copyright 2013 maker
# License
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'KnowledgeFolder'
db.create_table('knowledge_knowledgefolder', (
('object_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['core.Object'], unique=True, primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('details', self.gf('django.db.models.fields.TextField')(max_length=255, null=True, blank=True)),
('parent', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='child_set', null=True, to=orm['knowledge.KnowledgeFolder'])),
('treepath', self.gf('django.db.models.fields.CharField')(max_length=800)),
))
db.send_create_signal('knowledge', ['KnowledgeFolder'])
# Adding model 'KnowledgeCategory'
db.create_table('knowledge_knowledgecategory', (
('object_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['core.Object'], unique=True, primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('details', self.gf('django.db.models.fields.TextField')(max_length=255, null=True, blank=True)),
('treepath', self.gf('django.db.models.fields.CharField')(max_length=800)),
))
db.send_create_signal('knowledge', ['KnowledgeCategory'])
# Adding model 'KnowledgeItem'
db.create_table('knowledge_knowledgeitem', (
('object_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['core.Object'], unique=True, primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('folder', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['knowledge.KnowledgeFolder'])),
('category', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['knowledge.KnowledgeCategory'], null=True, blank=True)),
('body', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('treepath', self.gf('django.db.models.fields.CharField')(max_length=800)),
))
db.send_create_signal('knowledge', ['KnowledgeItem'])
def backwards(self, orm):
# Deleting model 'KnowledgeFolder'
db.delete_table('knowledge_knowledgefolder')
# Deleting model 'KnowledgeCategory'
db.delete_table('knowledge_knowledgecategory')
# Deleting model 'KnowledgeItem'
db.delete_table('knowledge_knowledgeitem')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'core.group': {
'Meta': {'ordering': "['name']", 'object_name': 'Group'},
'details': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'child_set'", 'null': 'True', 'to': "orm['core.Group']"})
},
'core.object': {
'Meta': {'object_name': 'Object'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'everybody_execute': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'everybody_read': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'everybody_write': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Group']"}),
'group_execute': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'group_read': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'group_write': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'links': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'links_rel_+'", 'null': 'True', 'to': "orm['core.Object']"}),
'nuvius_resource': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'object_name': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'object_type': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'subscribers': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'subscriptions'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.User']"}),
'trash': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.User']"}),
'user_execute': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user_read': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user_write': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'core.user': {
'Meta': {'ordering': "['name']", 'object_name': 'User'},
'default_group': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'default_user_set'", 'null': 'True', 'to': "orm['core.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'other_groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['core.Group']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'knowledge.knowledgecategory': {
'Meta': {'ordering': "['name']", 'object_name': 'KnowledgeCategory', '_ormbases': ['core.Object']},
'details': ('django.db.models.fields.TextField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'treepath': ('django.db.models.fields.CharField', [], {'max_length': '800'})
},
'knowledge.knowledgefolder': {
'Meta': {'ordering': "['name']", 'object_name': 'KnowledgeFolder', '_ormbases': ['core.Object']},
'details': ('django.db.models.fields.TextField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'child_set'", 'null': 'True', 'to': "orm['knowledge.KnowledgeFolder']"}),
'treepath': ('django.db.models.fields.CharField', [], {'max_length': '800'})
},
'knowledge.knowledgeitem': {
'Meta': {'ordering': "['-last_updated']", 'object_name': 'KnowledgeItem', '_ormbases': ['core.Object']},
'body': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['knowledge.KnowledgeCategory']", 'null': 'True', 'blank': 'True'}),
'folder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['knowledge.KnowledgeFolder']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'treepath': ('django.db.models.fields.CharField', [], {'max_length': '800'})
}
}
complete_apps = ['knowledge']
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.