code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
from django.urls import path, re_path
from . import views
urlpatterns = [
path('noslash', views.empty_view),
path('slash/', views.empty_view),
path('needsquoting#/', views.empty_view),
# Accepts paths with two leading slashes.
re_path(r'^(.+)/security/$', views.empty_view),
]
|
georgemarshall/django
|
tests/middleware/urls.py
|
Python
|
bsd-3-clause
| 299
|
from __future__ import unicode_literals
import cgi
import codecs
import logging
import sys
from io import BytesIO
from threading import Lock
import warnings
from django import http
from django.conf import settings
from django.core import signals
from django.core.handlers import base
from django.core.urlresolvers import set_script_prefix
from django.utils import datastructures
from django.utils.deprecation import RemovedInDjango19Warning
from django.utils.encoding import force_str, force_text
from django.utils.functional import cached_property
from django.utils import six
# For backwards compatibility -- lots of code uses this in the wild!
from django.http.response import REASON_PHRASES as STATUS_CODE_TEXT # NOQA
logger = logging.getLogger('django.request')
# encode() and decode() expect the charset to be a native string.
ISO_8859_1, UTF_8 = str('iso-8859-1'), str('utf-8')
class LimitedStream(object):
'''
LimitedStream wraps another stream in order to not allow reading from it
past specified amount of bytes.
'''
def __init__(self, stream, limit, buf_size=64 * 1024 * 1024):
self.stream = stream
self.remaining = limit
self.buffer = b''
self.buf_size = buf_size
def _read_limited(self, size=None):
if size is None or size > self.remaining:
size = self.remaining
if size == 0:
return b''
result = self.stream.read(size)
self.remaining -= len(result)
return result
def read(self, size=None):
if size is None:
result = self.buffer + self._read_limited()
self.buffer = b''
elif size < len(self.buffer):
result = self.buffer[:size]
self.buffer = self.buffer[size:]
else: # size >= len(self.buffer)
result = self.buffer + self._read_limited(size - len(self.buffer))
self.buffer = b''
return result
def readline(self, size=None):
while b'\n' not in self.buffer and \
(size is None or len(self.buffer) < size):
if size:
# since size is not None here, len(self.buffer) < size
chunk = self._read_limited(size - len(self.buffer))
else:
chunk = self._read_limited()
if not chunk:
break
self.buffer += chunk
sio = BytesIO(self.buffer)
if size:
line = sio.readline(size)
else:
line = sio.readline()
self.buffer = sio.read()
return line
class WSGIRequest(http.HttpRequest):
def __init__(self, environ):
script_name = get_script_name(environ)
path_info = get_path_info(environ)
if not path_info:
# Sometimes PATH_INFO exists, but is empty (e.g. accessing
# the SCRIPT_NAME URL without a trailing slash). We really need to
# operate as if they'd requested '/'. Not amazingly nice to force
# the path like this, but should be harmless.
path_info = '/'
self.environ = environ
self.path_info = path_info
self.path = '%s/%s' % (script_name.rstrip('/'), path_info.lstrip('/'))
self.META = environ
self.META['PATH_INFO'] = path_info
self.META['SCRIPT_NAME'] = script_name
self.method = environ['REQUEST_METHOD'].upper()
_, content_params = cgi.parse_header(environ.get('CONTENT_TYPE', ''))
if 'charset' in content_params:
try:
codecs.lookup(content_params['charset'])
except LookupError:
pass
else:
self.encoding = content_params['charset']
self._post_parse_error = False
try:
content_length = int(environ.get('CONTENT_LENGTH'))
except (ValueError, TypeError):
content_length = 0
self._stream = LimitedStream(self.environ['wsgi.input'], content_length)
self._read_started = False
self.resolver_match = None
def _get_scheme(self):
return self.environ.get('wsgi.url_scheme')
def _get_request(self):
warnings.warn('`request.REQUEST` is deprecated, use `request.GET` or '
'`request.POST` instead.', RemovedInDjango19Warning, 2)
if not hasattr(self, '_request'):
self._request = datastructures.MergeDict(self.POST, self.GET)
return self._request
@cached_property
def GET(self):
# The WSGI spec says 'QUERY_STRING' may be absent.
raw_query_string = get_bytes_from_wsgi(self.environ, 'QUERY_STRING', '')
return http.QueryDict(raw_query_string, encoding=self._encoding)
def _get_post(self):
if not hasattr(self, '_post'):
self._load_post_and_files()
return self._post
def _set_post(self, post):
self._post = post
@cached_property
def COOKIES(self):
raw_cookie = get_str_from_wsgi(self.environ, 'HTTP_COOKIE', '')
return http.parse_cookie(raw_cookie)
def _get_files(self):
if not hasattr(self, '_files'):
self._load_post_and_files()
return self._files
POST = property(_get_post, _set_post)
FILES = property(_get_files)
REQUEST = property(_get_request)
class WSGIHandler(base.BaseHandler):
initLock = Lock()
request_class = WSGIRequest
def __call__(self, environ, start_response):
# Set up middleware if needed. We couldn't do this earlier, because
# settings weren't available.
if self._request_middleware is None:
with self.initLock:
try:
# Check that middleware is still uninitialized.
if self._request_middleware is None:
self.load_middleware()
except:
# Unload whatever middleware we got
self._request_middleware = None
raise
set_script_prefix(get_script_name(environ))
signals.request_started.send(sender=self.__class__)
try:
request = self.request_class(environ)
except UnicodeDecodeError:
logger.warning('Bad Request (UnicodeDecodeError)',
exc_info=sys.exc_info(),
extra={
'status_code': 400,
}
)
response = http.HttpResponseBadRequest()
else:
response = self.get_response(request)
response._handler_class = self.__class__
status = '%s %s' % (response.status_code, response.reason_phrase)
response_headers = [(str(k), str(v)) for k, v in response.items()]
for c in response.cookies.values():
response_headers.append((str('Set-Cookie'), str(c.output(header=''))))
start_response(force_str(status), response_headers)
return response
def get_path_info(environ):
"""
Returns the HTTP request's PATH_INFO as a unicode string.
"""
path_info = get_bytes_from_wsgi(environ, 'PATH_INFO', '/')
# It'd be better to implement URI-to-IRI decoding, see #19508.
return path_info.decode(UTF_8)
def get_script_name(environ):
"""
Returns the equivalent of the HTTP request's SCRIPT_NAME environment
variable. If Apache mod_rewrite has been used, returns what would have been
the script name prior to any rewriting (so it's the script name as seen
from the client's perspective), unless the FORCE_SCRIPT_NAME setting is
set (to anything).
"""
if settings.FORCE_SCRIPT_NAME is not None:
return force_text(settings.FORCE_SCRIPT_NAME)
# If Apache's mod_rewrite had a whack at the URL, Apache set either
# SCRIPT_URL or REDIRECT_URL to the full resource URL before applying any
# rewrites. Unfortunately not every Web server (lighttpd!) passes this
# information through all the time, so FORCE_SCRIPT_NAME, above, is still
# needed.
script_url = get_bytes_from_wsgi(environ, 'SCRIPT_URL', '')
if not script_url:
script_url = get_bytes_from_wsgi(environ, 'REDIRECT_URL', '')
if script_url:
path_info = get_bytes_from_wsgi(environ, 'PATH_INFO', '')
script_name = script_url[:-len(path_info)]
else:
script_name = get_bytes_from_wsgi(environ, 'SCRIPT_NAME', '')
# It'd be better to implement URI-to-IRI decoding, see #19508.
return script_name.decode(UTF_8)
def get_bytes_from_wsgi(environ, key, default):
"""
Get a value from the WSGI environ dictionary as bytes.
key and default should be str objects. Under Python 2 they may also be
unicode objects provided they only contain ASCII characters.
"""
value = environ.get(str(key), str(default))
# Under Python 3, non-ASCII values in the WSGI environ are arbitrarily
# decoded with ISO-8859-1. This is wrong for Django websites where UTF-8
# is the default. Re-encode to recover the original bytestring.
return value if six.PY2 else value.encode(ISO_8859_1)
def get_str_from_wsgi(environ, key, default):
"""
Get a value from the WSGI environ dictionary as bytes.
key and default should be str objects. Under Python 2 they may also be
unicode objects provided they only contain ASCII characters.
"""
value = environ.get(str(key), str(default))
# Same comment as above
return value if six.PY2 else value.encode(ISO_8859_1).decode(UTF_8, errors='replace')
|
rooshilp/CMPUT410Lab6
|
virt_env/virt1/lib/python2.7/site-packages/django/core/handlers/wsgi.py
|
Python
|
apache-2.0
| 9,514
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
# Plural-Forms for af (Afrikaans (South Africa))
nplurals=2 # Afrikaans language has 2 forms:
# 1 singular and 1 plural
# Determine plural_id for number *n* as sequence of positive
# integers: 0,1,...
# NOTE! For singular form ALWAYS return plural_id = 0
get_plural_id = lambda n: int(n != 1)
# Construct and return plural form of *word* using
# *plural_id* (which ALWAYS>0). This function will be executed
# for words (or phrases) not found in plural_dict dictionary
# construct_plural_form = lambda word, plural_id: (word + 'suffix')
|
manuelep/openshift_v3_test
|
wsgi/web2py/gluon/contrib/plural_rules/af.py
|
Python
|
mit
| 598
|
"""Python 2/3 compatibility definitions.
These are used by the rest of Elpy to keep compatibility definitions
in one place.
"""
import sys
if sys.version_info >= (3, 0):
PYTHON3 = True
from io import StringIO
def ensure_not_unicode(obj):
return obj
else:
PYTHON3 = False
from StringIO import StringIO # noqa
def ensure_not_unicode(obj):
"""Return obj. If it's a unicode string, convert it to str first.
Pydoc functions simply don't find anything for unicode
strings. No idea why.
"""
if isinstance(obj, unicode):
return obj.encode("utf-8")
else:
return obj
|
dspelaez/dspelaez.github.io
|
resources/dotfiles/dot.emacs.d.own/elpa/elpy-20170214.318/elpy/compat.py
|
Python
|
mit
| 673
|
import errno
from http import client
import io
import os
import array
import socket
import unittest
TestCase = unittest.TestCase
from test import support
here = os.path.dirname(__file__)
# Self-signed cert file for 'localhost'
CERT_localhost = os.path.join(here, 'keycert.pem')
# Self-signed cert file for 'fakehostname'
CERT_fakehostname = os.path.join(here, 'keycert2.pem')
# Root cert file (CA) for svn.python.org's cert
CACERT_svn_python_org = os.path.join(here, 'https_svn_python_org_root.pem')
HOST = support.HOST
class FakeSocket:
def __init__(self, text, fileclass=io.BytesIO):
if isinstance(text, str):
text = text.encode("ascii")
self.text = text
self.fileclass = fileclass
self.data = b''
def sendall(self, data):
self.data += data
def makefile(self, mode, bufsize=None):
if mode != 'r' and mode != 'rb':
raise client.UnimplementedFileMode()
return self.fileclass(self.text)
class EPipeSocket(FakeSocket):
def __init__(self, text, pipe_trigger):
# When sendall() is called with pipe_trigger, raise EPIPE.
FakeSocket.__init__(self, text)
self.pipe_trigger = pipe_trigger
def sendall(self, data):
if self.pipe_trigger in data:
raise socket.error(errno.EPIPE, "gotcha")
self.data += data
def close(self):
pass
class NoEOFStringIO(io.BytesIO):
"""Like StringIO, but raises AssertionError on EOF.
This is used below to test that http.client doesn't try to read
more from the underlying file than it should.
"""
def read(self, n=-1):
data = io.BytesIO.read(self, n)
if data == b'':
raise AssertionError('caller tried to read past EOF')
return data
def readline(self, length=None):
data = io.BytesIO.readline(self, length)
if data == b'':
raise AssertionError('caller tried to read past EOF')
return data
class HeaderTests(TestCase):
def test_auto_headers(self):
# Some headers are added automatically, but should not be added by
# .request() if they are explicitly set.
class HeaderCountingBuffer(list):
def __init__(self):
self.count = {}
def append(self, item):
kv = item.split(b':')
if len(kv) > 1:
# item is a 'Key: Value' header string
lcKey = kv[0].decode('ascii').lower()
self.count.setdefault(lcKey, 0)
self.count[lcKey] += 1
list.append(self, item)
for explicit_header in True, False:
for header in 'Content-length', 'Host', 'Accept-encoding':
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket('blahblahblah')
conn._buffer = HeaderCountingBuffer()
body = 'spamspamspam'
headers = {}
if explicit_header:
headers[header] = str(len(body))
conn.request('POST', '/', body, headers)
self.assertEqual(conn._buffer.count[header.lower()], 1)
def test_putheader(self):
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(None)
conn.putrequest('GET','/')
conn.putheader('Content-length', 42)
self.assertTrue(b'Content-length: 42' in conn._buffer)
def test_ipv6host_header(self):
# Default host header on IPv6 transaction should wrapped by [] if
# its actual IPv6 address
expected = b'GET /foo HTTP/1.1\r\nHost: [2001::]:81\r\n' \
b'Accept-Encoding: identity\r\n\r\n'
conn = client.HTTPConnection('[2001::]:81')
sock = FakeSocket('')
conn.sock = sock
conn.request('GET', '/foo')
self.assertTrue(sock.data.startswith(expected))
expected = b'GET /foo HTTP/1.1\r\nHost: [2001:102A::]\r\n' \
b'Accept-Encoding: identity\r\n\r\n'
conn = client.HTTPConnection('[2001:102A::]')
sock = FakeSocket('')
conn.sock = sock
conn.request('GET', '/foo')
self.assertTrue(sock.data.startswith(expected))
class BasicTest(TestCase):
def test_status_lines(self):
# Test HTTP status lines
body = "HTTP/1.1 200 Ok\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.read(), b"Text")
self.assertTrue(resp.isclosed())
body = "HTTP/1.1 400.100 Not Ok\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
self.assertRaises(client.BadStatusLine, resp.begin)
def test_bad_status_repr(self):
exc = client.BadStatusLine('')
self.assertEqual(repr(exc), '''BadStatusLine("\'\'",)''')
def test_partial_reads(self):
# if we have a lenght, the system knows when to close itself
# same behaviour than when we read the whole thing with read()
body = "HTTP/1.1 200 Ok\r\nContent-Length: 4\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.read(2), b'Te')
self.assertFalse(resp.isclosed())
self.assertEqual(resp.read(2), b'xt')
self.assertTrue(resp.isclosed())
def test_host_port(self):
# Check invalid host_port
for hp in ("www.python.org:abc", "www.python.org:"):
self.assertRaises(client.InvalidURL, client.HTTPConnection, hp)
for hp, h, p in (("[fe80::207:e9ff:fe9b]:8000",
"fe80::207:e9ff:fe9b", 8000),
("www.python.org:80", "www.python.org", 80),
("www.python.org", "www.python.org", 80),
("[fe80::207:e9ff:fe9b]", "fe80::207:e9ff:fe9b", 80)):
c = client.HTTPConnection(hp)
self.assertEqual(h, c.host)
self.assertEqual(p, c.port)
def test_response_headers(self):
# test response with multiple message headers with the same field name.
text = ('HTTP/1.1 200 OK\r\n'
'Set-Cookie: Customer="WILE_E_COYOTE"; '
'Version="1"; Path="/acme"\r\n'
'Set-Cookie: Part_Number="Rocket_Launcher_0001"; Version="1";'
' Path="/acme"\r\n'
'\r\n'
'No body\r\n')
hdr = ('Customer="WILE_E_COYOTE"; Version="1"; Path="/acme"'
', '
'Part_Number="Rocket_Launcher_0001"; Version="1"; Path="/acme"')
s = FakeSocket(text)
r = client.HTTPResponse(s)
r.begin()
cookies = r.getheader("Set-Cookie")
self.assertEqual(cookies, hdr)
def test_read_head(self):
# Test that the library doesn't attempt to read any data
# from a HEAD request. (Tickles SF bug #622042.)
sock = FakeSocket(
'HTTP/1.1 200 OK\r\n'
'Content-Length: 14432\r\n'
'\r\n',
NoEOFStringIO)
resp = client.HTTPResponse(sock, method="HEAD")
resp.begin()
if resp.read():
self.fail("Did not expect response from HEAD request")
def test_send_file(self):
expected = (b'GET /foo HTTP/1.1\r\nHost: example.com\r\n'
b'Accept-Encoding: identity\r\nContent-Length:')
with open(__file__, 'rb') as body:
conn = client.HTTPConnection('example.com')
sock = FakeSocket(body)
conn.sock = sock
conn.request('GET', '/foo', body)
self.assertTrue(sock.data.startswith(expected), '%r != %r' %
(sock.data[:len(expected)], expected))
def test_send(self):
expected = b'this is a test this is only a test'
conn = client.HTTPConnection('example.com')
sock = FakeSocket(None)
conn.sock = sock
conn.send(expected)
self.assertEqual(expected, sock.data)
sock.data = b''
conn.send(array.array('b', expected))
self.assertEqual(expected, sock.data)
sock.data = b''
conn.send(io.BytesIO(expected))
self.assertEqual(expected, sock.data)
def test_send_iter(self):
expected = b'GET /foo HTTP/1.1\r\nHost: example.com\r\n' \
b'Accept-Encoding: identity\r\nContent-Length: 11\r\n' \
b'\r\nonetwothree'
def body():
yield b"one"
yield b"two"
yield b"three"
conn = client.HTTPConnection('example.com')
sock = FakeSocket("")
conn.sock = sock
conn.request('GET', '/foo', body(), {'Content-Length': '11'})
self.assertEqual(sock.data, expected)
def test_send_type_error(self):
# See: Issue #12676
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket('')
with self.assertRaises(TypeError):
conn.request('POST', 'test', conn)
def test_chunked(self):
chunked_start = (
'HTTP/1.1 200 OK\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
'a\r\n'
'hello worl\r\n'
'1\r\n'
'd\r\n'
)
sock = FakeSocket(chunked_start + '0\r\n')
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(), b'hello world')
resp.close()
for x in ('', 'foo\r\n'):
sock = FakeSocket(chunked_start + x)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
try:
resp.read()
except client.IncompleteRead as i:
self.assertEqual(i.partial, b'hello world')
self.assertEqual(repr(i),'IncompleteRead(11 bytes read)')
self.assertEqual(str(i),'IncompleteRead(11 bytes read)')
else:
self.fail('IncompleteRead expected')
finally:
resp.close()
def test_chunked_head(self):
chunked_start = (
'HTTP/1.1 200 OK\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
'a\r\n'
'hello world\r\n'
'1\r\n'
'd\r\n'
)
sock = FakeSocket(chunked_start + '0\r\n')
resp = client.HTTPResponse(sock, method="HEAD")
resp.begin()
self.assertEqual(resp.read(), b'')
self.assertEqual(resp.status, 200)
self.assertEqual(resp.reason, 'OK')
self.assertTrue(resp.isclosed())
def test_negative_content_length(self):
sock = FakeSocket(
'HTTP/1.1 200 OK\r\nContent-Length: -1\r\n\r\nHello\r\n')
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(), b'Hello\r\n')
resp.close()
def test_incomplete_read(self):
sock = FakeSocket('HTTP/1.1 200 OK\r\nContent-Length: 10\r\n\r\nHello\r\n')
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
try:
resp.read()
except client.IncompleteRead as i:
self.assertEqual(i.partial, b'Hello\r\n')
self.assertEqual(repr(i),
"IncompleteRead(7 bytes read, 3 more expected)")
self.assertEqual(str(i),
"IncompleteRead(7 bytes read, 3 more expected)")
else:
self.fail('IncompleteRead expected')
finally:
resp.close()
def test_epipe(self):
sock = EPipeSocket(
"HTTP/1.0 401 Authorization Required\r\n"
"Content-type: text/html\r\n"
"WWW-Authenticate: Basic realm=\"example\"\r\n",
b"Content-Length")
conn = client.HTTPConnection("example.com")
conn.sock = sock
self.assertRaises(socket.error,
lambda: conn.request("PUT", "/url", "body"))
resp = conn.getresponse()
self.assertEqual(401, resp.status)
self.assertEqual("Basic realm=\"example\"",
resp.getheader("www-authenticate"))
# Test lines overflowing the max line size (_MAXLINE in http.client)
def test_overflowing_status_line(self):
body = "HTTP/1.1 200 Ok" + "k" * 65536 + "\r\n"
resp = client.HTTPResponse(FakeSocket(body))
self.assertRaises((client.LineTooLong, client.BadStatusLine), resp.begin)
def test_overflowing_header_line(self):
body = (
'HTTP/1.1 200 OK\r\n'
'X-Foo: bar' + 'r' * 65536 + '\r\n\r\n'
)
resp = client.HTTPResponse(FakeSocket(body))
self.assertRaises(client.LineTooLong, resp.begin)
def test_overflowing_chunked_line(self):
body = (
'HTTP/1.1 200 OK\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
+ '0' * 65536 + 'a\r\n'
'hello world\r\n'
'0\r\n'
)
resp = client.HTTPResponse(FakeSocket(body))
resp.begin()
self.assertRaises(client.LineTooLong, resp.read)
class OfflineTest(TestCase):
def test_responses(self):
self.assertEqual(client.responses[client.NOT_FOUND], "Not Found")
class SourceAddressTest(TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = support.bind_port(self.serv)
self.source_port = support.find_unused_port()
self.serv.listen(5)
self.conn = None
def tearDown(self):
if self.conn:
self.conn.close()
self.conn = None
self.serv.close()
self.serv = None
def testHTTPConnectionSourceAddress(self):
self.conn = client.HTTPConnection(HOST, self.port,
source_address=('', self.source_port))
self.conn.connect()
self.assertEqual(self.conn.sock.getsockname()[1], self.source_port)
@unittest.skipIf(not hasattr(client, 'HTTPSConnection'),
'http.client.HTTPSConnection not defined')
def testHTTPSConnectionSourceAddress(self):
self.conn = client.HTTPSConnection(HOST, self.port,
source_address=('', self.source_port))
# We don't test anything here other the constructor not barfing as
# this code doesn't deal with setting up an active running SSL server
# for an ssl_wrapped connect() to actually return from.
class TimeoutTest(TestCase):
PORT = None
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
TimeoutTest.PORT = support.bind_port(self.serv)
self.serv.listen(5)
def tearDown(self):
self.serv.close()
self.serv = None
def testTimeoutAttribute(self):
# This will prove that the timeout gets through HTTPConnection
# and into the socket.
# default -- use global socket timeout
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
httpConn = client.HTTPConnection(HOST, TimeoutTest.PORT)
httpConn.connect()
finally:
socket.setdefaulttimeout(None)
self.assertEqual(httpConn.sock.gettimeout(), 30)
httpConn.close()
# no timeout -- do not use global socket default
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
httpConn = client.HTTPConnection(HOST, TimeoutTest.PORT,
timeout=None)
httpConn.connect()
finally:
socket.setdefaulttimeout(None)
self.assertEqual(httpConn.sock.gettimeout(), None)
httpConn.close()
# a value
httpConn = client.HTTPConnection(HOST, TimeoutTest.PORT, timeout=30)
httpConn.connect()
self.assertEqual(httpConn.sock.gettimeout(), 30)
httpConn.close()
class HTTPSTest(TestCase):
def setUp(self):
if not hasattr(client, 'HTTPSConnection'):
self.skipTest('ssl support required')
def make_server(self, certfile):
from test.ssl_servers import make_https_server
return make_https_server(self, certfile)
def test_attributes(self):
# simple test to check it's storing the timeout
h = client.HTTPSConnection(HOST, TimeoutTest.PORT, timeout=30)
self.assertEqual(h.timeout, 30)
def _check_svn_python_org(self, resp):
# Just a simple check that everything went fine
server_string = resp.getheader('server')
self.assertIn('Apache', server_string)
def test_networked(self):
# Default settings: no cert verification is done
support.requires('network')
with support.transient_internet('svn.python.org'):
h = client.HTTPSConnection('svn.python.org', 443)
h.request('GET', '/')
resp = h.getresponse()
self._check_svn_python_org(resp)
def test_networked_good_cert(self):
# We feed a CA cert that validates the server's cert
import ssl
support.requires('network')
with support.transient_internet('svn.python.org'):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CACERT_svn_python_org)
h = client.HTTPSConnection('svn.python.org', 443, context=context)
h.request('GET', '/')
resp = h.getresponse()
self._check_svn_python_org(resp)
def test_networked_bad_cert(self):
# We feed a "CA" cert that is unrelated to the server's cert
import ssl
support.requires('network')
with support.transient_internet('svn.python.org'):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERT_localhost)
h = client.HTTPSConnection('svn.python.org', 443, context=context)
with self.assertRaises(ssl.SSLError):
h.request('GET', '/')
def test_local_good_hostname(self):
# The (valid) cert validates the HTTP hostname
import ssl
from test.ssl_servers import make_https_server
server = make_https_server(self, CERT_localhost)
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERT_localhost)
h = client.HTTPSConnection('localhost', server.port, context=context)
h.request('GET', '/nonexistent')
resp = h.getresponse()
self.assertEqual(resp.status, 404)
def test_local_bad_hostname(self):
# The (valid) cert doesn't validate the HTTP hostname
import ssl
from test.ssl_servers import make_https_server
server = make_https_server(self, CERT_fakehostname)
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERT_fakehostname)
h = client.HTTPSConnection('localhost', server.port, context=context)
with self.assertRaises(ssl.CertificateError):
h.request('GET', '/')
# Same with explicit check_hostname=True
h = client.HTTPSConnection('localhost', server.port, context=context,
check_hostname=True)
with self.assertRaises(ssl.CertificateError):
h.request('GET', '/')
# With check_hostname=False, the mismatching is ignored
h = client.HTTPSConnection('localhost', server.port, context=context,
check_hostname=False)
h.request('GET', '/nonexistent')
resp = h.getresponse()
self.assertEqual(resp.status, 404)
class RequestBodyTest(TestCase):
"""Test cases where a request includes a message body."""
def setUp(self):
self.conn = client.HTTPConnection('example.com')
self.conn.sock = self.sock = FakeSocket("")
self.conn.sock = self.sock
def get_headers_and_fp(self):
f = io.BytesIO(self.sock.data)
f.readline() # read the request line
message = client.parse_headers(f)
return message, f
def test_manual_content_length(self):
# Set an incorrect content-length so that we can verify that
# it will not be over-ridden by the library.
self.conn.request("PUT", "/url", "body",
{"Content-Length": "42"})
message, f = self.get_headers_and_fp()
self.assertEqual("42", message.get("content-length"))
self.assertEqual(4, len(f.read()))
def test_ascii_body(self):
self.conn.request("PUT", "/url", "body")
message, f = self.get_headers_and_fp()
self.assertEqual("text/plain", message.get_content_type())
self.assertIsNone(message.get_charset())
self.assertEqual("4", message.get("content-length"))
self.assertEqual(b'body', f.read())
def test_latin1_body(self):
self.conn.request("PUT", "/url", "body\xc1")
message, f = self.get_headers_and_fp()
self.assertEqual("text/plain", message.get_content_type())
self.assertIsNone(message.get_charset())
self.assertEqual("5", message.get("content-length"))
self.assertEqual(b'body\xc1', f.read())
def test_bytes_body(self):
self.conn.request("PUT", "/url", b"body\xc1")
message, f = self.get_headers_and_fp()
self.assertEqual("text/plain", message.get_content_type())
self.assertIsNone(message.get_charset())
self.assertEqual("5", message.get("content-length"))
self.assertEqual(b'body\xc1', f.read())
def test_file_body(self):
with open(support.TESTFN, "w") as f:
f.write("body")
with open(support.TESTFN) as f:
self.conn.request("PUT", "/url", f)
message, f = self.get_headers_and_fp()
self.assertEqual("text/plain", message.get_content_type())
self.assertIsNone(message.get_charset())
self.assertEqual("4", message.get("content-length"))
self.assertEqual(b'body', f.read())
def test_binary_file_body(self):
with open(support.TESTFN, "wb") as f:
f.write(b"body\xc1")
with open(support.TESTFN, "rb") as f:
self.conn.request("PUT", "/url", f)
message, f = self.get_headers_and_fp()
self.assertEqual("text/plain", message.get_content_type())
self.assertIsNone(message.get_charset())
self.assertEqual("5", message.get("content-length"))
self.assertEqual(b'body\xc1', f.read())
class HTTPResponseTest(TestCase):
def setUp(self):
body = "HTTP/1.1 200 Ok\r\nMy-Header: first-value\r\nMy-Header: \
second-value\r\n\r\nText"
sock = FakeSocket(body)
self.resp = client.HTTPResponse(sock)
self.resp.begin()
def test_getting_header(self):
header = self.resp.getheader('My-Header')
self.assertEqual(header, 'first-value, second-value')
header = self.resp.getheader('My-Header', 'some default')
self.assertEqual(header, 'first-value, second-value')
def test_getting_nonexistent_header_with_string_default(self):
header = self.resp.getheader('No-Such-Header', 'default-value')
self.assertEqual(header, 'default-value')
def test_getting_nonexistent_header_with_iterable_default(self):
header = self.resp.getheader('No-Such-Header', ['default', 'values'])
self.assertEqual(header, 'default, values')
header = self.resp.getheader('No-Such-Header', ('default', 'values'))
self.assertEqual(header, 'default, values')
def test_getting_nonexistent_header_without_default(self):
header = self.resp.getheader('No-Such-Header')
self.assertEqual(header, None)
def test_getting_header_defaultint(self):
header = self.resp.getheader('No-Such-Header',default=42)
self.assertEqual(header, 42)
def test_main(verbose=None):
support.run_unittest(HeaderTests, OfflineTest, BasicTest, TimeoutTest,
HTTPSTest, RequestBodyTest, SourceAddressTest,
HTTPResponseTest)
if __name__ == '__main__':
test_main()
|
invisiblek/python-for-android
|
python3-alpha/python3-src/Lib/test/test_httplib.py
|
Python
|
apache-2.0
| 24,526
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import unittest
from libcloud.utils.py3 import httplib
from libcloud.loadbalancer.base import Member, Algorithm
from libcloud.loadbalancer.drivers.brightbox import BrightboxLBDriver
from libcloud.loadbalancer.types import State
from libcloud.test import MockHttpTestCase
from libcloud.test.secrets import LB_BRIGHTBOX_PARAMS
from libcloud.test.file_fixtures import LoadBalancerFileFixtures
class BrightboxLBTests(unittest.TestCase):
def setUp(self):
BrightboxLBDriver.connectionCls.conn_classes = (None,
BrightboxLBMockHttp)
BrightboxLBMockHttp.type = None
self.driver = BrightboxLBDriver(*LB_BRIGHTBOX_PARAMS)
def test_list_protocols(self):
protocols = self.driver.list_protocols()
self.assertEqual(len(protocols), 2)
self.assertTrue('tcp' in protocols)
self.assertTrue('http' in protocols)
def test_list_balancers(self):
balancers = self.driver.list_balancers()
self.assertEqual(len(balancers), 1)
self.assertEqual(balancers[0].id, 'lba-1235f')
self.assertEqual(balancers[0].name, 'lb1')
def test_get_balancer(self):
balancer = self.driver.get_balancer(balancer_id='lba-1235f')
self.assertEqual(balancer.id, 'lba-1235f')
self.assertEqual(balancer.name, 'lb1')
self.assertEqual(balancer.state, State.RUNNING)
def test_destroy_balancer(self):
balancer = self.driver.get_balancer(balancer_id='lba-1235f')
self.assertTrue(self.driver.destroy_balancer(balancer))
def test_create_balancer(self):
members = [Member('srv-lv426', None, None)]
balancer = self.driver.create_balancer(name='lb2', port=80,
protocol='http',
algorithm=Algorithm.ROUND_ROBIN,
members=members)
self.assertEqual(balancer.name, 'lb2')
self.assertEqual(balancer.port, 80)
self.assertEqual(balancer.state, State.PENDING)
def test_balancer_list_members(self):
balancer = self.driver.get_balancer(balancer_id='lba-1235f')
members = balancer.list_members()
self.assertEqual(len(members), 1)
self.assertEqual(members[0].balancer, balancer)
self.assertEqual('srv-lv426', members[0].id)
def test_balancer_attach_member(self):
balancer = self.driver.get_balancer(balancer_id='lba-1235f')
member = balancer.attach_member(Member('srv-kg983', ip=None,
port=None))
self.assertEqual(member.id, 'srv-kg983')
def test_balancer_detach_member(self):
balancer = self.driver.get_balancer(balancer_id='lba-1235f')
member = Member('srv-lv426', None, None)
self.assertTrue(balancer.detach_member(member))
class BrightboxLBMockHttp(MockHttpTestCase):
fixtures = LoadBalancerFileFixtures('brightbox')
def _token(self, method, url, body, headers):
if method == 'POST':
return self.response(httplib.OK, self.fixtures.load('token.json'))
def _1_0_load_balancers(self, method, url, body, headers):
if method == 'GET':
return self.response(httplib.OK,
self.fixtures.load('load_balancers.json'))
elif method == 'POST':
body = self.fixtures.load('load_balancers_post.json')
return self.response(httplib.ACCEPTED, body)
def _1_0_load_balancers_lba_1235f(self, method, url, body, headers):
if method == 'GET':
body = self.fixtures.load('load_balancers_lba_1235f.json')
return self.response(httplib.OK, body)
elif method == 'DELETE':
return self.response(httplib.ACCEPTED, '')
def _1_0_load_balancers_lba_1235f_add_nodes(self, method, url, body,
headers):
if method == 'POST':
return self.response(httplib.ACCEPTED, '')
def _1_0_load_balancers_lba_1235f_remove_nodes(self, method, url, body,
headers):
if method == 'POST':
return self.response(httplib.ACCEPTED, '')
def response(self, status, body):
return (status, body, {'content-type': 'application/json'},
httplib.responses[status])
if __name__ == "__main__":
sys.exit(unittest.main())
|
dcorbacho/libcloud
|
libcloud/test/loadbalancer/test_brightbox.py
|
Python
|
apache-2.0
| 5,292
|
# Copyright 2013, Big Switch Networks
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy as sa
from sqlalchemy import orm
from neutron.db import model_base
class RouterRule(model_base.BASEV2):
id = sa.Column(sa.Integer, primary_key=True)
source = sa.Column(sa.String(64), nullable=False)
destination = sa.Column(sa.String(64), nullable=False)
nexthops = orm.relationship('NextHop', cascade='all,delete')
action = sa.Column(sa.String(10), nullable=False)
router_id = sa.Column(sa.String(36),
sa.ForeignKey('routers.id',
ondelete="CASCADE"))
class NextHop(model_base.BASEV2):
rule_id = sa.Column(sa.Integer,
sa.ForeignKey('routerrules.id',
ondelete="CASCADE"),
primary_key=True)
nexthop = sa.Column(sa.String(64), nullable=False, primary_key=True)
|
yuewko/neutron
|
neutron/plugins/bigswitch/routerrule_db.py
|
Python
|
apache-2.0
| 1,500
|
from geopy.point import Point
class Location(object):
def __init__(self, name="", point=None, attributes=None, **kwargs):
self.name = name
if point is not None:
self.point = Point(point)
if attributes is None:
attributes = {}
self.attributes = dict(attributes, **kwargs)
def __getitem__(self, index):
"""Backwards compatibility with geopy 0.93 tuples."""
return (self.name, self.point)[index]
def __repr__(self):
return "Location(%r, %r)" % (self.name, self.point)
def __iter__(self):
return iter((self.name, self.point))
def __eq__(self, other):
return (self.name, self.point) == (other.name, other.point)
def __ne__(self, other):
return (self.name, self.point) != (other.name, other.point)
|
golismero/golismero
|
thirdparty_libs/geopy/location.py
|
Python
|
gpl-2.0
| 850
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import tools
from openerp.osv import fields,osv
class report_lunch_order(osv.osv):
_name = "report.lunch.order.line"
_description = "Lunch Orders Statistics"
_auto = False
_rec_name = 'date'
_columns = {
'date': fields.date('Date Order', readonly=True, select=True),
'year': fields.char('Year', size=4, readonly=True),
'month':fields.selection([('01','January'), ('02','February'), ('03','March'), ('04','April'),
('05','May'), ('06','June'), ('07','July'), ('08','August'), ('09','September'),
('10','October'), ('11','November'), ('12','December')], 'Month',readonly=True),
'day': fields.char('Day', size=128, readonly=True),
'user_id': fields.many2one('res.users', 'User Name'),
'price_total':fields.float('Total Price', readonly=True),
'note' : fields.text('Note',size=256,readonly=True),
}
_order = 'date desc'
def init(self, cr):
tools.drop_view_if_exists(cr, 'report_lunch_order_line')
cr.execute("""
create or replace view report_lunch_order_line as (
select
min(lo.id) as id,
lo.user_id as user_id,
lo.date as date,
to_char(lo.date, 'YYYY') as year,
to_char(lo.date, 'MM') as month,
to_char(lo.date, 'YYYY-MM-DD') as day,
lo.note as note,
sum(lp.price) as price_total
from
lunch_order_line as lo
left join lunch_product as lp on (lo.product_id = lp.id)
group by
lo.date,lo.user_id,lo.note
)
""")
report_lunch_order()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
inovtec-solutions/OpenERP
|
openerp/addons/lunch/report/report_lunch_order.py
|
Python
|
agpl-3.0
| 2,799
|
"""Execute shell commands via os.popen() and return status, output.
Interface summary:
import commands
outtext = commands.getoutput(cmd)
(exitstatus, outtext) = commands.getstatusoutput(cmd)
outtext = commands.getstatus(file) # returns output of "ls -ld file"
A trailing newline is removed from the output string.
Encapsulates the basic operation:
pipe = os.popen('{ ' + cmd + '; } 2>&1', 'r')
text = pipe.read()
sts = pipe.close()
[Note: it would be nice to add functions to interpret the exit status.]
"""
__all__ = ["getstatusoutput","getoutput","getstatus"]
# Module 'commands'
#
# Various tools for executing commands and looking at their output and status.
#
# NB This only works (and is only relevant) for UNIX.
# Get 'ls -l' status for an object into a string
#
def getstatus(file):
"""Return output of "ls -ld <file>" in a string."""
import warnings
warnings.warn("commands.getstatus() is deprecated", DeprecationWarning)
return getoutput('ls -ld' + mkarg(file))
# Get the output from a shell command into a string.
# The exit status is ignored; a trailing newline is stripped.
# Assume the command will work with '{ ... ; } 2>&1' around it..
#
def getoutput(cmd):
"""Return output (stdout or stderr) of executing cmd in a shell."""
return getstatusoutput(cmd)[1]
# Ditto but preserving the exit status.
# Returns a pair (sts, output)
#
def getstatusoutput(cmd):
"""Return (status, output) of executing cmd in a shell."""
import os
pipe = os.popen('{ ' + cmd + '; } 2>&1', 'r')
text = pipe.read()
sts = pipe.close()
if sts is None: sts = 0
if text[-1:] == '\n': text = text[:-1]
return sts, text
# Make command argument from directory and pathname (prefix space, add quotes).
#
def mk2arg(head, x):
from warnings import warnpy3k
warnpy3k("In 3.x, mk2arg has been removed.")
import os
return mkarg(os.path.join(head, x))
# Make a shell command argument from a string.
# Return a string beginning with a space followed by a shell-quoted
# version of the argument.
# Two strategies: enclose in single quotes if it contains none;
# otherwise, enclose in double quotes and prefix quotable characters
# with backslash.
#
def mkarg(x):
from warnings import warnpy3k
warnpy3k("in 3.x, mkarg has been removed.")
if '\'' not in x:
return ' \'' + x + '\''
s = ' "'
for c in x:
if c in '\\$"`':
s = s + '\\'
s = s + c
s = s + '"'
return s
|
DmitryADP/diff_qc750
|
vendor/nvidia/tegra/3rdparty/python-support-files/src/Lib/commands.py
|
Python
|
gpl-2.0
| 2,540
|
def build_models(payment_class):
return []
|
dekoza/django-getpaid
|
getpaid/backends/transferuj/models.py
|
Python
|
mit
| 47
|
from matplotlib.numerix import which
if which[0] == "numarray":
from numarray.linear_algebra.mlab import *
elif which[0] == "numeric":
from MLab import *
elif which[0] == "numpy":
try:
from numpy.oldnumeric.mlab import *
except ImportError:
from numpy.lib.mlab import *
else:
raise RuntimeError("invalid numerix selector")
amin = min
amax = max
|
tkaitchuck/nupic
|
external/linux64/lib/python2.6/site-packages/matplotlib/numerix/mlab/__init__.py
|
Python
|
gpl-3.0
| 381
|
# -*- coding: utf-8 -*-
# Copyright (c) 2018 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division)
__metaclass__ = type
import json
import os
import shutil
import tempfile
import pytest
from units.compat.mock import patch, MagicMock
from ansible.module_utils._text import to_bytes
from ansible.module_utils import basic
class TestAnsibleModuleTmpDir:
DATA = (
(
{
"_ansible_tmpdir": "/path/to/dir",
"_ansible_remote_tmp": "/path/tmpdir",
"_ansible_keep_remote_files": False,
},
True,
"/path/to/dir"
),
(
{
"_ansible_tmpdir": None,
"_ansible_remote_tmp": "/path/tmpdir",
"_ansible_keep_remote_files": False
},
False,
"/path/tmpdir/ansible-moduletmp-42-"
),
(
{
"_ansible_tmpdir": None,
"_ansible_remote_tmp": "/path/tmpdir",
"_ansible_keep_remote_files": False
},
True,
"/path/tmpdir/ansible-moduletmp-42-"
),
(
{
"_ansible_tmpdir": None,
"_ansible_remote_tmp": "$HOME/.test",
"_ansible_keep_remote_files": False
},
False,
os.path.join(os.environ['HOME'], ".test/ansible-moduletmp-42-")
),
)
# pylint bug: https://github.com/PyCQA/pylint/issues/511
# pylint: disable=undefined-variable
@pytest.mark.parametrize('args, expected, stat_exists', ((s, e, t) for s, t, e in DATA))
def test_tmpdir_property(self, monkeypatch, args, expected, stat_exists):
makedirs = {'called': False}
def mock_mkdtemp(prefix, dir):
return os.path.join(dir, prefix)
def mock_makedirs(path, mode):
makedirs['called'] = True
makedirs['path'] = path
makedirs['mode'] = mode
return
monkeypatch.setattr(tempfile, 'mkdtemp', mock_mkdtemp)
monkeypatch.setattr(os.path, 'exists', lambda x: stat_exists)
monkeypatch.setattr(os, 'makedirs', mock_makedirs)
monkeypatch.setattr(shutil, 'rmtree', lambda x: None)
monkeypatch.setattr(basic, '_ANSIBLE_ARGS', to_bytes(json.dumps({'ANSIBLE_MODULE_ARGS': args})))
with patch('time.time', return_value=42):
am = basic.AnsibleModule(argument_spec={})
actual_tmpdir = am.tmpdir
assert actual_tmpdir == expected
# verify subsequent calls always produces the same tmpdir
assert am.tmpdir == actual_tmpdir
if not stat_exists:
assert makedirs['called']
expected = os.path.expanduser(os.path.expandvars(am._remote_tmp))
assert makedirs['path'] == expected
assert makedirs['mode'] == 0o700
@pytest.mark.parametrize('stdin', ({"_ansible_tmpdir": None,
"_ansible_remote_tmp": "$HOME/.test",
"_ansible_keep_remote_files": True},),
indirect=['stdin'])
def test_tmpdir_makedirs_failure(self, am, monkeypatch):
mock_mkdtemp = MagicMock(return_value="/tmp/path")
mock_makedirs = MagicMock(side_effect=OSError("Some OS Error here"))
monkeypatch.setattr(tempfile, 'mkdtemp', mock_mkdtemp)
monkeypatch.setattr(os.path, 'exists', lambda x: False)
monkeypatch.setattr(os, 'makedirs', mock_makedirs)
actual = am.tmpdir
assert actual == "/tmp/path"
assert mock_makedirs.call_args[0] == (os.path.expanduser(os.path.expandvars("$HOME/.test")),)
assert mock_makedirs.call_args[1] == {"mode": 0o700}
# because makedirs failed the dir should be None so it uses the System tmp
assert mock_mkdtemp.call_args[1]['dir'] is None
assert mock_mkdtemp.call_args[1]['prefix'].startswith("ansible-moduletmp-")
|
aperigault/ansible
|
test/units/module_utils/basic/test_tmpdir.py
|
Python
|
gpl-3.0
| 4,159
|
from __future__ import unicode_literals
from django.db import router
from .base import Operation
class SeparateDatabaseAndState(Operation):
"""
Takes two lists of operations - ones that will be used for the database,
and ones that will be used for the state change. This allows operations
that don't support state change to have it applied, or have operations
that affect the state or not the database, or so on.
"""
serialization_expand_args = ['database_operations', 'state_operations']
def __init__(self, database_operations=None, state_operations=None):
self.database_operations = database_operations or []
self.state_operations = state_operations or []
def deconstruct(self):
kwargs = {}
if self.database_operations:
kwargs['database_operations'] = self.database_operations
if self.state_operations:
kwargs['state_operations'] = self.state_operations
return (
self.__class__.__name__,
[],
kwargs
)
def state_forwards(self, app_label, state):
for state_operation in self.state_operations:
state_operation.state_forwards(app_label, state)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
# We calculate state separately in here since our state functions aren't useful
for database_operation in self.database_operations:
to_state = from_state.clone()
database_operation.state_forwards(app_label, to_state)
database_operation.database_forwards(app_label, schema_editor, from_state, to_state)
from_state = to_state
def database_backwards(self, app_label, schema_editor, from_state, to_state):
# We calculate state separately in here since our state functions aren't useful
to_states = {}
for dbop in self.database_operations:
to_states[dbop] = to_state
to_state = to_state.clone()
dbop.state_forwards(app_label, to_state)
# to_state now has the states of all the database_operations applied
# which is the from_state for the backwards migration of the last
# operation.
for database_operation in reversed(self.database_operations):
from_state = to_state
to_state = to_states[database_operation]
database_operation.database_backwards(app_label, schema_editor, from_state, to_state)
def describe(self):
return "Custom state/database change combination"
class RunSQL(Operation):
"""
Runs some raw SQL. A reverse SQL statement may be provided.
Also accepts a list of operations that represent the state change effected
by this SQL change, in case it's custom column/table creation/deletion.
"""
noop = ''
def __init__(self, sql, reverse_sql=None, state_operations=None, hints=None, elidable=False):
self.sql = sql
self.reverse_sql = reverse_sql
self.state_operations = state_operations or []
self.hints = hints or {}
self.elidable = elidable
def deconstruct(self):
kwargs = {
'sql': self.sql,
}
if self.reverse_sql is not None:
kwargs['reverse_sql'] = self.reverse_sql
if self.state_operations:
kwargs['state_operations'] = self.state_operations
if self.hints:
kwargs['hints'] = self.hints
return (
self.__class__.__name__,
[],
kwargs
)
@property
def reversible(self):
return self.reverse_sql is not None
def state_forwards(self, app_label, state):
for state_operation in self.state_operations:
state_operation.state_forwards(app_label, state)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
if router.allow_migrate(schema_editor.connection.alias, app_label, **self.hints):
self._run_sql(schema_editor, self.sql)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
if self.reverse_sql is None:
raise NotImplementedError("You cannot reverse this operation")
if router.allow_migrate(schema_editor.connection.alias, app_label, **self.hints):
self._run_sql(schema_editor, self.reverse_sql)
def describe(self):
return "Raw SQL operation"
def _run_sql(self, schema_editor, sqls):
if isinstance(sqls, (list, tuple)):
for sql in sqls:
params = None
if isinstance(sql, (list, tuple)):
elements = len(sql)
if elements == 2:
sql, params = sql
else:
raise ValueError("Expected a 2-tuple but got %d" % elements)
schema_editor.execute(sql, params=params)
elif sqls != RunSQL.noop:
statements = schema_editor.connection.ops.prepare_sql_script(sqls)
for statement in statements:
schema_editor.execute(statement, params=None)
class RunPython(Operation):
"""
Runs Python code in a context suitable for doing versioned ORM operations.
"""
reduces_to_sql = False
def __init__(self, code, reverse_code=None, atomic=None, hints=None, elidable=False):
self.atomic = atomic
# Forwards code
if not callable(code):
raise ValueError("RunPython must be supplied with a callable")
self.code = code
# Reverse code
if reverse_code is None:
self.reverse_code = None
else:
if not callable(reverse_code):
raise ValueError("RunPython must be supplied with callable arguments")
self.reverse_code = reverse_code
self.hints = hints or {}
self.elidable = elidable
def deconstruct(self):
kwargs = {
'code': self.code,
}
if self.reverse_code is not None:
kwargs['reverse_code'] = self.reverse_code
if self.atomic is not None:
kwargs['atomic'] = self.atomic
if self.hints:
kwargs['hints'] = self.hints
return (
self.__class__.__name__,
[],
kwargs
)
@property
def reversible(self):
return self.reverse_code is not None
def state_forwards(self, app_label, state):
# RunPython objects have no state effect. To add some, combine this
# with SeparateDatabaseAndState.
pass
def database_forwards(self, app_label, schema_editor, from_state, to_state):
if router.allow_migrate(schema_editor.connection.alias, app_label, **self.hints):
# We now execute the Python code in a context that contains a 'models'
# object, representing the versioned models as an app registry.
# We could try to override the global cache, but then people will still
# use direct imports, so we go with a documentation approach instead.
self.code(from_state.apps, schema_editor)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
if self.reverse_code is None:
raise NotImplementedError("You cannot reverse this operation")
if router.allow_migrate(schema_editor.connection.alias, app_label, **self.hints):
self.reverse_code(from_state.apps, schema_editor)
def describe(self):
return "Raw Python operation"
@staticmethod
def noop(apps, schema_editor):
return None
|
KrzysztofStachanczyk/Sensors-WWW-website
|
www/env/lib/python2.7/site-packages/django/db/migrations/operations/special.py
|
Python
|
gpl-3.0
| 7,662
|
xs = 1<caret>, 2
|
ahb0327/intellij-community
|
python/testData/intentions/PyConvertCollectionLiteralIntentionTest/convertTupleWithoutParenthesesToList.py
|
Python
|
apache-2.0
| 17
|
from __future__ import unicode_literals
import datetime
import unittest
from django.apps.registry import Apps
from django.core.exceptions import ValidationError
from django.db import models
from django.test import TestCase
from .models import (
CustomPKModel, FlexibleDatePost, ModelToValidate, Post, UniqueErrorsModel,
UniqueFieldsModel, UniqueForDateModel, UniqueTogetherModel,
)
class GetUniqueCheckTests(unittest.TestCase):
def test_unique_fields_get_collected(self):
m = UniqueFieldsModel()
self.assertEqual(
([(UniqueFieldsModel, ('id',)),
(UniqueFieldsModel, ('unique_charfield',)),
(UniqueFieldsModel, ('unique_integerfield',))],
[]),
m._get_unique_checks()
)
def test_unique_together_gets_picked_up_and_converted_to_tuple(self):
m = UniqueTogetherModel()
self.assertEqual(
([(UniqueTogetherModel, ('ifield', 'cfield')),
(UniqueTogetherModel, ('ifield', 'efield')),
(UniqueTogetherModel, ('id',)), ],
[]),
m._get_unique_checks()
)
def test_unique_together_normalization(self):
"""
Test the Meta.unique_together normalization with different sorts of
objects.
"""
data = {
'2-tuple': (('foo', 'bar'),
(('foo', 'bar'),)),
'list': (['foo', 'bar'],
(('foo', 'bar'),)),
'already normalized': ((('foo', 'bar'), ('bar', 'baz')),
(('foo', 'bar'), ('bar', 'baz'))),
'set': ({('foo', 'bar'), ('bar', 'baz')}, # Ref #21469
(('foo', 'bar'), ('bar', 'baz'))),
}
for test_name, (unique_together, normalized) in data.items():
class M(models.Model):
foo = models.IntegerField()
bar = models.IntegerField()
baz = models.IntegerField()
Meta = type(str('Meta'), (), {
'unique_together': unique_together,
'apps': Apps()
})
checks, _ = M()._get_unique_checks()
for t in normalized:
check = (M, t)
self.assertIn(check, checks)
def test_primary_key_is_considered_unique(self):
m = CustomPKModel()
self.assertEqual(([(CustomPKModel, ('my_pk_field',))], []), m._get_unique_checks())
def test_unique_for_date_gets_picked_up(self):
m = UniqueForDateModel()
self.assertEqual((
[(UniqueForDateModel, ('id',))],
[(UniqueForDateModel, 'date', 'count', 'start_date'),
(UniqueForDateModel, 'year', 'count', 'end_date'),
(UniqueForDateModel, 'month', 'order', 'end_date')]
), m._get_unique_checks()
)
def test_unique_for_date_exclusion(self):
m = UniqueForDateModel()
self.assertEqual((
[(UniqueForDateModel, ('id',))],
[(UniqueForDateModel, 'year', 'count', 'end_date'),
(UniqueForDateModel, 'month', 'order', 'end_date')]
), m._get_unique_checks(exclude='start_date')
)
class PerformUniqueChecksTest(TestCase):
def test_primary_key_unique_check_not_performed_when_adding_and_pk_not_specified(self):
# Regression test for #12560
with self.assertNumQueries(0):
mtv = ModelToValidate(number=10, name='Some Name')
setattr(mtv, '_adding', True)
mtv.full_clean()
def test_primary_key_unique_check_performed_when_adding_and_pk_specified(self):
# Regression test for #12560
with self.assertNumQueries(1):
mtv = ModelToValidate(number=10, name='Some Name', id=123)
setattr(mtv, '_adding', True)
mtv.full_clean()
def test_primary_key_unique_check_not_performed_when_not_adding(self):
# Regression test for #12132
with self.assertNumQueries(0):
mtv = ModelToValidate(number=10, name='Some Name')
mtv.full_clean()
def test_unique_for_date(self):
Post.objects.create(title="Django 1.0 is released",
slug="Django 1.0", subtitle="Finally", posted=datetime.date(2008, 9, 3))
p = Post(title="Django 1.0 is released", posted=datetime.date(2008, 9, 3))
with self.assertRaises(ValidationError) as cm:
p.full_clean()
self.assertEqual(cm.exception.message_dict, {'title': ['Title must be unique for Posted date.']})
# Should work without errors
p = Post(title="Work on Django 1.1 begins", posted=datetime.date(2008, 9, 3))
p.full_clean()
# Should work without errors
p = Post(title="Django 1.0 is released", posted=datetime.datetime(2008, 9, 4))
p.full_clean()
p = Post(slug="Django 1.0", posted=datetime.datetime(2008, 1, 1))
with self.assertRaises(ValidationError) as cm:
p.full_clean()
self.assertEqual(cm.exception.message_dict, {'slug': ['Slug must be unique for Posted year.']})
p = Post(subtitle="Finally", posted=datetime.datetime(2008, 9, 30))
with self.assertRaises(ValidationError) as cm:
p.full_clean()
self.assertEqual(cm.exception.message_dict, {'subtitle': ['Subtitle must be unique for Posted month.']})
p = Post(title="Django 1.0 is released")
with self.assertRaises(ValidationError) as cm:
p.full_clean()
self.assertEqual(cm.exception.message_dict, {'posted': ['This field cannot be null.']})
def test_unique_for_date_with_nullable_date(self):
FlexibleDatePost.objects.create(title="Django 1.0 is released",
slug="Django 1.0", subtitle="Finally", posted=datetime.date(2008, 9, 3))
p = FlexibleDatePost(title="Django 1.0 is released")
try:
p.full_clean()
except ValidationError:
self.fail("unique_for_date checks shouldn't trigger when the associated DateField is None.")
p = FlexibleDatePost(slug="Django 1.0")
try:
p.full_clean()
except ValidationError:
self.fail("unique_for_year checks shouldn't trigger when the associated DateField is None.")
p = FlexibleDatePost(subtitle="Finally")
try:
p.full_clean()
except ValidationError:
self.fail("unique_for_month checks shouldn't trigger when the associated DateField is None.")
def test_unique_errors(self):
UniqueErrorsModel.objects.create(name='Some Name', no=10)
m = UniqueErrorsModel(name='Some Name', no=11)
with self.assertRaises(ValidationError) as cm:
m.full_clean()
self.assertEqual(cm.exception.message_dict, {'name': ['Custom unique name message.']})
m = UniqueErrorsModel(name='Some Other Name', no=10)
with self.assertRaises(ValidationError) as cm:
m.full_clean()
self.assertEqual(cm.exception.message_dict, {'no': ['Custom unique number message.']})
|
DONIKAN/django
|
tests/validation/test_unique.py
|
Python
|
bsd-3-clause
| 7,108
|
try:
1 // 0
except ZeroDivisionError:
print("ZeroDivisionError")
try:
1 % 0
except ZeroDivisionError:
print("ZeroDivisionError")
|
pfalcon/micropython
|
tests/basics/int_divzero.py
|
Python
|
mit
| 146
|
class <caret>A(B
pass
|
caot/intellij-community
|
python/testData/codeInsight/smartEnter/class.py
|
Python
|
apache-2.0
| 25
|
def foo(bar):
""" @param """
|
asedunov/intellij-community
|
python/testData/completion/epydocTagsMiddle.after.py
|
Python
|
apache-2.0
| 32
|
class Foo:
@property
def bar(self):
import warnings
warnings.warn("this is deprecated", DeprecationWarning, 2)
foo = Foo()
foo.<warning descr="this is deprecated">bar</warning>
|
asedunov/intellij-community
|
python/testData/deprecation/deprecatedProperty.py
|
Python
|
apache-2.0
| 202
|
""" Python Character Mapping Codec iso8859_5 generated from 'MAPPINGS/ISO8859/8859-5.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='iso8859-5',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\x80' # 0x80 -> <control>
u'\x81' # 0x81 -> <control>
u'\x82' # 0x82 -> <control>
u'\x83' # 0x83 -> <control>
u'\x84' # 0x84 -> <control>
u'\x85' # 0x85 -> <control>
u'\x86' # 0x86 -> <control>
u'\x87' # 0x87 -> <control>
u'\x88' # 0x88 -> <control>
u'\x89' # 0x89 -> <control>
u'\x8a' # 0x8A -> <control>
u'\x8b' # 0x8B -> <control>
u'\x8c' # 0x8C -> <control>
u'\x8d' # 0x8D -> <control>
u'\x8e' # 0x8E -> <control>
u'\x8f' # 0x8F -> <control>
u'\x90' # 0x90 -> <control>
u'\x91' # 0x91 -> <control>
u'\x92' # 0x92 -> <control>
u'\x93' # 0x93 -> <control>
u'\x94' # 0x94 -> <control>
u'\x95' # 0x95 -> <control>
u'\x96' # 0x96 -> <control>
u'\x97' # 0x97 -> <control>
u'\x98' # 0x98 -> <control>
u'\x99' # 0x99 -> <control>
u'\x9a' # 0x9A -> <control>
u'\x9b' # 0x9B -> <control>
u'\x9c' # 0x9C -> <control>
u'\x9d' # 0x9D -> <control>
u'\x9e' # 0x9E -> <control>
u'\x9f' # 0x9F -> <control>
u'\xa0' # 0xA0 -> NO-BREAK SPACE
u'\u0401' # 0xA1 -> CYRILLIC CAPITAL LETTER IO
u'\u0402' # 0xA2 -> CYRILLIC CAPITAL LETTER DJE
u'\u0403' # 0xA3 -> CYRILLIC CAPITAL LETTER GJE
u'\u0404' # 0xA4 -> CYRILLIC CAPITAL LETTER UKRAINIAN IE
u'\u0405' # 0xA5 -> CYRILLIC CAPITAL LETTER DZE
u'\u0406' # 0xA6 -> CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
u'\u0407' # 0xA7 -> CYRILLIC CAPITAL LETTER YI
u'\u0408' # 0xA8 -> CYRILLIC CAPITAL LETTER JE
u'\u0409' # 0xA9 -> CYRILLIC CAPITAL LETTER LJE
u'\u040a' # 0xAA -> CYRILLIC CAPITAL LETTER NJE
u'\u040b' # 0xAB -> CYRILLIC CAPITAL LETTER TSHE
u'\u040c' # 0xAC -> CYRILLIC CAPITAL LETTER KJE
u'\xad' # 0xAD -> SOFT HYPHEN
u'\u040e' # 0xAE -> CYRILLIC CAPITAL LETTER SHORT U
u'\u040f' # 0xAF -> CYRILLIC CAPITAL LETTER DZHE
u'\u0410' # 0xB0 -> CYRILLIC CAPITAL LETTER A
u'\u0411' # 0xB1 -> CYRILLIC CAPITAL LETTER BE
u'\u0412' # 0xB2 -> CYRILLIC CAPITAL LETTER VE
u'\u0413' # 0xB3 -> CYRILLIC CAPITAL LETTER GHE
u'\u0414' # 0xB4 -> CYRILLIC CAPITAL LETTER DE
u'\u0415' # 0xB5 -> CYRILLIC CAPITAL LETTER IE
u'\u0416' # 0xB6 -> CYRILLIC CAPITAL LETTER ZHE
u'\u0417' # 0xB7 -> CYRILLIC CAPITAL LETTER ZE
u'\u0418' # 0xB8 -> CYRILLIC CAPITAL LETTER I
u'\u0419' # 0xB9 -> CYRILLIC CAPITAL LETTER SHORT I
u'\u041a' # 0xBA -> CYRILLIC CAPITAL LETTER KA
u'\u041b' # 0xBB -> CYRILLIC CAPITAL LETTER EL
u'\u041c' # 0xBC -> CYRILLIC CAPITAL LETTER EM
u'\u041d' # 0xBD -> CYRILLIC CAPITAL LETTER EN
u'\u041e' # 0xBE -> CYRILLIC CAPITAL LETTER O
u'\u041f' # 0xBF -> CYRILLIC CAPITAL LETTER PE
u'\u0420' # 0xC0 -> CYRILLIC CAPITAL LETTER ER
u'\u0421' # 0xC1 -> CYRILLIC CAPITAL LETTER ES
u'\u0422' # 0xC2 -> CYRILLIC CAPITAL LETTER TE
u'\u0423' # 0xC3 -> CYRILLIC CAPITAL LETTER U
u'\u0424' # 0xC4 -> CYRILLIC CAPITAL LETTER EF
u'\u0425' # 0xC5 -> CYRILLIC CAPITAL LETTER HA
u'\u0426' # 0xC6 -> CYRILLIC CAPITAL LETTER TSE
u'\u0427' # 0xC7 -> CYRILLIC CAPITAL LETTER CHE
u'\u0428' # 0xC8 -> CYRILLIC CAPITAL LETTER SHA
u'\u0429' # 0xC9 -> CYRILLIC CAPITAL LETTER SHCHA
u'\u042a' # 0xCA -> CYRILLIC CAPITAL LETTER HARD SIGN
u'\u042b' # 0xCB -> CYRILLIC CAPITAL LETTER YERU
u'\u042c' # 0xCC -> CYRILLIC CAPITAL LETTER SOFT SIGN
u'\u042d' # 0xCD -> CYRILLIC CAPITAL LETTER E
u'\u042e' # 0xCE -> CYRILLIC CAPITAL LETTER YU
u'\u042f' # 0xCF -> CYRILLIC CAPITAL LETTER YA
u'\u0430' # 0xD0 -> CYRILLIC SMALL LETTER A
u'\u0431' # 0xD1 -> CYRILLIC SMALL LETTER BE
u'\u0432' # 0xD2 -> CYRILLIC SMALL LETTER VE
u'\u0433' # 0xD3 -> CYRILLIC SMALL LETTER GHE
u'\u0434' # 0xD4 -> CYRILLIC SMALL LETTER DE
u'\u0435' # 0xD5 -> CYRILLIC SMALL LETTER IE
u'\u0436' # 0xD6 -> CYRILLIC SMALL LETTER ZHE
u'\u0437' # 0xD7 -> CYRILLIC SMALL LETTER ZE
u'\u0438' # 0xD8 -> CYRILLIC SMALL LETTER I
u'\u0439' # 0xD9 -> CYRILLIC SMALL LETTER SHORT I
u'\u043a' # 0xDA -> CYRILLIC SMALL LETTER KA
u'\u043b' # 0xDB -> CYRILLIC SMALL LETTER EL
u'\u043c' # 0xDC -> CYRILLIC SMALL LETTER EM
u'\u043d' # 0xDD -> CYRILLIC SMALL LETTER EN
u'\u043e' # 0xDE -> CYRILLIC SMALL LETTER O
u'\u043f' # 0xDF -> CYRILLIC SMALL LETTER PE
u'\u0440' # 0xE0 -> CYRILLIC SMALL LETTER ER
u'\u0441' # 0xE1 -> CYRILLIC SMALL LETTER ES
u'\u0442' # 0xE2 -> CYRILLIC SMALL LETTER TE
u'\u0443' # 0xE3 -> CYRILLIC SMALL LETTER U
u'\u0444' # 0xE4 -> CYRILLIC SMALL LETTER EF
u'\u0445' # 0xE5 -> CYRILLIC SMALL LETTER HA
u'\u0446' # 0xE6 -> CYRILLIC SMALL LETTER TSE
u'\u0447' # 0xE7 -> CYRILLIC SMALL LETTER CHE
u'\u0448' # 0xE8 -> CYRILLIC SMALL LETTER SHA
u'\u0449' # 0xE9 -> CYRILLIC SMALL LETTER SHCHA
u'\u044a' # 0xEA -> CYRILLIC SMALL LETTER HARD SIGN
u'\u044b' # 0xEB -> CYRILLIC SMALL LETTER YERU
u'\u044c' # 0xEC -> CYRILLIC SMALL LETTER SOFT SIGN
u'\u044d' # 0xED -> CYRILLIC SMALL LETTER E
u'\u044e' # 0xEE -> CYRILLIC SMALL LETTER YU
u'\u044f' # 0xEF -> CYRILLIC SMALL LETTER YA
u'\u2116' # 0xF0 -> NUMERO SIGN
u'\u0451' # 0xF1 -> CYRILLIC SMALL LETTER IO
u'\u0452' # 0xF2 -> CYRILLIC SMALL LETTER DJE
u'\u0453' # 0xF3 -> CYRILLIC SMALL LETTER GJE
u'\u0454' # 0xF4 -> CYRILLIC SMALL LETTER UKRAINIAN IE
u'\u0455' # 0xF5 -> CYRILLIC SMALL LETTER DZE
u'\u0456' # 0xF6 -> CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I
u'\u0457' # 0xF7 -> CYRILLIC SMALL LETTER YI
u'\u0458' # 0xF8 -> CYRILLIC SMALL LETTER JE
u'\u0459' # 0xF9 -> CYRILLIC SMALL LETTER LJE
u'\u045a' # 0xFA -> CYRILLIC SMALL LETTER NJE
u'\u045b' # 0xFB -> CYRILLIC SMALL LETTER TSHE
u'\u045c' # 0xFC -> CYRILLIC SMALL LETTER KJE
u'\xa7' # 0xFD -> SECTION SIGN
u'\u045e' # 0xFE -> CYRILLIC SMALL LETTER SHORT U
u'\u045f' # 0xFF -> CYRILLIC SMALL LETTER DZHE
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
zwChan/VATEC
|
~/eb-virt/Lib/encodings/iso8859_5.py
|
Python
|
apache-2.0
| 13,578
|
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j. E Y.'
TIME_FORMAT = 'H:i:s'
DATETIME_FORMAT = 'j. E Y. H:i'
YEAR_MONTH_FORMAT = 'F Y.'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'j.m.Y.'
SHORT_DATETIME_FORMAT = 'j.m.Y. H:i'
FIRST_DAY_OF_WEEK = 1
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%Y-%m-%d', # '2006-10-25'
'%d.%m.%Y.', '%d.%m.%y.', # '25.10.2006.', '25.10.06.'
'%d. %m. %Y.', '%d. %m. %y.', # '25. 10. 2006.', '25. 10. 06.'
)
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '14:30:59'
'%H:%M', # '14:30'
)
DATETIME_INPUT_FORMATS = (
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%d.%m.%Y. %H:%M:%S', # '25.10.2006. 14:30:59'
'%d.%m.%Y. %H:%M', # '25.10.2006. 14:30'
'%d.%m.%Y.', # '25.10.2006.'
'%d.%m.%y. %H:%M:%S', # '25.10.06. 14:30:59'
'%d.%m.%y. %H:%M', # '25.10.06. 14:30'
'%d.%m.%y.', # '25.10.06.'
'%d. %m. %Y. %H:%M:%S', # '25. 10. 2006. 14:30:59'
'%d. %m. %Y. %H:%M', # '25. 10. 2006. 14:30'
'%d. %m. %Y.', # '25. 10. 2006.'
'%d. %m. %y. %H:%M:%S', # '25. 10. 06. 14:30:59'
'%d. %m. %y. %H:%M', # '25. 10. 06. 14:30'
'%d. %m. %y.', # '25. 10. 06.'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
|
ychen820/microblog
|
y/google-cloud-sdk/platform/google_appengine/lib/django-1.4/django/conf/locale/hr/formats.py
|
Python
|
bsd-3-clause
| 1,758
|
"""
Views and functions for serving static files. These are only to be used
during development, and SHOULD NOT be used in a production setting.
"""
from __future__ import unicode_literals
import mimetypes
import os
import stat
import posixpath
import re
try:
from urllib.parse import unquote
except ImportError: # Python 2
from urllib import unquote
from django.http import (CompatibleStreamingHttpResponse, Http404,
HttpResponse, HttpResponseRedirect, HttpResponseNotModified)
from django.template import loader, Template, Context, TemplateDoesNotExist
from django.utils.http import http_date, parse_http_date
from django.utils.translation import ugettext as _, ugettext_noop
def serve(request, path, document_root=None, show_indexes=False):
"""
Serve static files below a given point in the directory structure.
To use, put a URL pattern such as::
(r'^(?P<path>.*)$', 'django.views.static.serve', {'document_root' : '/path/to/my/files/'})
in your URLconf. You must provide the ``document_root`` param. You may
also set ``show_indexes`` to ``True`` if you'd like to serve a basic index
of the directory. This index view will use the template hardcoded below,
but if you'd like to override it, you can create a template called
``static/directory_index.html``.
"""
path = posixpath.normpath(unquote(path))
path = path.lstrip('/')
newpath = ''
for part in path.split('/'):
if not part:
# Strip empty path components.
continue
drive, part = os.path.splitdrive(part)
head, part = os.path.split(part)
if part in (os.curdir, os.pardir):
# Strip '.' and '..' in path.
continue
newpath = os.path.join(newpath, part).replace('\\', '/')
if newpath and path != newpath:
return HttpResponseRedirect(newpath)
fullpath = os.path.join(document_root, newpath)
if os.path.isdir(fullpath):
if show_indexes:
return directory_index(newpath, fullpath)
raise Http404(_("Directory indexes are not allowed here."))
if not os.path.exists(fullpath):
raise Http404(_('"%(path)s" does not exist') % {'path': fullpath})
# Respect the If-Modified-Since header.
statobj = os.stat(fullpath)
mimetype, encoding = mimetypes.guess_type(fullpath)
mimetype = mimetype or 'application/octet-stream'
if not was_modified_since(request.META.get('HTTP_IF_MODIFIED_SINCE'),
statobj.st_mtime, statobj.st_size):
return HttpResponseNotModified()
response = CompatibleStreamingHttpResponse(open(fullpath, 'rb'), content_type=mimetype)
response["Last-Modified"] = http_date(statobj.st_mtime)
if stat.S_ISREG(statobj.st_mode):
response["Content-Length"] = statobj.st_size
if encoding:
response["Content-Encoding"] = encoding
return response
DEFAULT_DIRECTORY_INDEX_TEMPLATE = """
{% load i18n %}
<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="Content-type" content="text/html; charset=utf-8" />
<meta http-equiv="Content-Language" content="en-us" />
<meta name="robots" content="NONE,NOARCHIVE" />
<title>{% blocktrans %}Index of {{ directory }}{% endblocktrans %}</title>
</head>
<body>
<h1>{% blocktrans %}Index of {{ directory }}{% endblocktrans %}</h1>
<ul>
{% ifnotequal directory "/" %}
<li><a href="../">../</a></li>
{% endifnotequal %}
{% for f in file_list %}
<li><a href="{{ f|urlencode }}">{{ f }}</a></li>
{% endfor %}
</ul>
</body>
</html>
"""
template_translatable = ugettext_noop("Index of %(directory)s")
def directory_index(path, fullpath):
try:
t = loader.select_template(['static/directory_index.html',
'static/directory_index'])
except TemplateDoesNotExist:
t = Template(DEFAULT_DIRECTORY_INDEX_TEMPLATE, name='Default directory index template')
files = []
for f in os.listdir(fullpath):
if not f.startswith('.'):
if os.path.isdir(os.path.join(fullpath, f)):
f += '/'
files.append(f)
c = Context({
'directory' : path + '/',
'file_list' : files,
})
return HttpResponse(t.render(c))
def was_modified_since(header=None, mtime=0, size=0):
"""
Was something modified since the user last downloaded it?
header
This is the value of the If-Modified-Since header. If this is None,
I'll just return True.
mtime
This is the modification time of the item we're talking about.
size
This is the size of the item we're talking about.
"""
try:
if header is None:
raise ValueError
matches = re.match(r"^([^;]+)(; length=([0-9]+))?$", header,
re.IGNORECASE)
header_mtime = parse_http_date(matches.group(1))
header_len = matches.group(3)
if header_len and int(header_len) != size:
raise ValueError
if int(mtime) > header_mtime:
raise ValueError
except (AttributeError, ValueError, OverflowError):
return True
return False
|
ychen820/microblog
|
y/google-cloud-sdk/platform/google_appengine/lib/django-1.5/django/views/static.py
|
Python
|
bsd-3-clause
| 5,193
|
"""
Iterator based sre token scanner
"""
import sre_parse, sre_compile, sre_constants
from sre_constants import BRANCH, SUBPATTERN
from re import VERBOSE, MULTILINE, DOTALL
import re
__all__ = ['Scanner', 'pattern']
FLAGS = (VERBOSE | MULTILINE | DOTALL)
class Scanner(object):
def __init__(self, lexicon, flags=FLAGS):
self.actions = [None]
# combine phrases into a compound pattern
s = sre_parse.Pattern()
s.flags = flags
p = []
for idx, token in enumerate(lexicon):
phrase = token.pattern
try:
subpattern = sre_parse.SubPattern(s,
[(SUBPATTERN, (idx + 1, sre_parse.parse(phrase, flags)))])
except sre_constants.error:
raise
p.append(subpattern)
self.actions.append(token)
p = sre_parse.SubPattern(s, [(BRANCH, (None, p))])
self.scanner = sre_compile.compile(p)
def iterscan(self, string, idx=0, context=None):
"""
Yield match, end_idx for each match
"""
match = self.scanner.scanner(string, idx).match
actions = self.actions
lastend = idx
end = len(string)
while True:
m = match()
if m is None:
break
matchbegin, matchend = m.span()
if lastend == matchend:
break
action = actions[m.lastindex]
if action is not None:
rval, next_pos = action(m, context)
if next_pos is not None and next_pos != matchend:
# "fast forward" the scanner
matchend = next_pos
match = self.scanner.scanner(string, matchend).match
yield rval, matchend
lastend = matchend
def pattern(pattern, flags=FLAGS):
def decorator(fn):
fn.pattern = pattern
fn.regex = re.compile(pattern, flags)
return fn
return decorator
|
ychen820/microblog
|
y/google-cloud-sdk/platform/google_appengine/lib/django-0.96/django/utils/simplejson/scanner.py
|
Python
|
bsd-3-clause
| 2,009
|
# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Check that all of the certs on all service endpoints validate.
"""
import unittest
from tests.integration import ServiceCertVerificationTest
import boto.swf
class SWFCertVerificationTest(unittest.TestCase, ServiceCertVerificationTest):
swf = True
regions = boto.swf.regions()
def sample_service_call(self, conn):
conn.list_domains('REGISTERED')
|
harshilasu/GraphicMelon
|
y/google-cloud-sdk/platform/gsutil/third_party/boto/tests/integration/swf/test_cert_verification.py
|
Python
|
gpl-3.0
| 1,553
|
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tk user interface implementation for namebench."""
__author__ = 'tstromberg@google.com (Thomas Stromberg)'
import datetime
import os
import Queue
import sys
import threading
import tkFont
# Wildcard imports are evil.
from Tkinter import *
import tkMessageBox
import traceback
import addr_util
import base_ui
import conn_quality
import nameserver_list
import sys_nameservers
import util
THREAD_UNSAFE_TK = 0
LOG_FILE_PATH = util.GenerateOutputFilename('log')
def closedWindowHandler():
print 'Au revoir, mes amis!'
sys.exit(1)
global_message_queue = Queue.Queue()
global_last_message = None
def AddMsg(message, master=None, backup_notifier=None, **kwargs):
"""Add a message to the global queue for output."""
global global_message_queue
global global_last_message
global THREAD_UNSAFE_TK
new_message = StatusMessage(message, **kwargs)
if new_message != global_last_message:
global_message_queue.put(new_message)
if master:
try:
master.event_generate('<<msg>>', when='tail')
global_last_message = new_message
# Tk thread-safety workaround #1
except TclError:
# If we aren't thread safe, we already assume this won't work.
if not THREAD_UNSAFE_TK:
print 'First TCL Error:'
traceback.print_exc()
try:
backup_notifier(-1)
THREAD_UNSAFE_TK = 1
except:
print 'Backup notifier failure:'
traceback.print_exc()
class StatusMessage(object):
"""Messages to be passed from to the main thread from children.
Used to avoid thread issues inherent with Tk.
"""
def __init__(self, message, error=False, count=False, total=False,
enable_button=None, debug=False):
self.message = message
self.error = error
self.count = count
self.debug = debug
self.total = total
self.enable_button = enable_button
class WorkerThread(threading.Thread, base_ui.BaseUI):
"""Handle benchmarking and preparation in a separate UI thread."""
def __init__(self, supplied_ns, global_ns, regional_ns, options, data_source=None, master=None,
backup_notifier=None):
threading.Thread.__init__(self)
self.SetupDataStructures()
self.status_callback = self.msg
self.data_src = data_source
self.backup_notifier = backup_notifier
self.include_internal = False
self.supplied_ns = supplied_ns
self.global_ns = global_ns
self.regional_ns = regional_ns
self.master = master
self.options = options
self.resource_dir = os.path.dirname(os.path.dirname(__file__))
def msg(self, message, **kwargs):
"""Add messages to the main queue."""
return AddMsg(message, master=self.master, backup_notifier=self.backup_notifier, **kwargs)
def run(self):
self.msg('Started thread', enable_button=False)
try:
self.PrepareTestRecords()
self.PrepareNameServers()
self.PrepareBenchmark()
self.RunAndOpenReports()
except nameserver_list.OutgoingUdpInterception:
(exc_type, exception, tb) = sys.exc_info()
self.msg('Outgoing requests were intercepted!', error=exception)
except nameserver_list.TooFewNameservers:
(exc_type, exception, tb) = sys.exc_info()
self.msg('Too few nameservers to test', error=exception)
except conn_quality.OfflineConnection:
(exc_type, exception, tb) = sys.exc_info()
self.msg('The connection appears to be offline!', error=exception)
except:
(exc_type, exception, tb) = sys.exc_info()
traceback.print_exc(tb)
error_msg = '\n'.join(traceback.format_tb(tb)[-4:])
self.msg(exception, error=error_msg)
self.msg(None, enable_button=True)
class NameBenchGui(object):
"""The main GUI."""
def __init__(self, options, supplied_ns, global_ns, regional_ns, version=None):
self.options = options
self.supplied_ns = supplied_ns
self.global_ns = global_ns
self.regional_ns = regional_ns
self.version = version
def Execute(self):
self.root = Tk()
app = MainWindow(self.root, self.options, self.supplied_ns, self.global_ns,
self.regional_ns, self.version)
app.DrawWindow()
self.root.bind('<<msg>>', app.MessageHandler)
self.root.mainloop()
class MainWindow(Frame, base_ui.BaseUI):
"""The main Tk GUI class."""
def __init__(self, master, options, supplied_ns, global_ns, regional_ns, version=None):
"""TODO(tstromberg): Remove duplication from NameBenchGui class."""
Frame.__init__(self)
self.SetupDataStructures()
self.master = master
self.options = options
self.supplied_ns = supplied_ns
self.global_ns = global_ns
self.regional_ns = regional_ns
self.version = version
try:
self.log_file = open(LOG_FILE_PATH, 'w')
except:
print 'Failed to open %s for write' % LOG_FILE_PATH
self.master.protocol('WM_DELETE_WINDOW', closedWindowHandler)
def UpdateStatus(self, message, count=None, total=None, error=None, debug=False):
"""Update our little status window."""
if not message:
return None
if total:
state = '%s... [%s/%s]' % (message, count, total)
elif count:
state = '%s%s' % (message, '.' * count)
else:
state = message
print '> %s' % str(state)
try:
self.log_file.write('%s: %s\r\n' % (datetime.datetime.now(), state))
self.log_file.flush()
except:
pass
if not debug:
self.status.set(state[0:75])
def DrawWindow(self):
"""Draws the user interface."""
self.nameserver_form = StringVar()
self.status = StringVar()
self.query_count = IntVar()
self.data_source = StringVar()
self.health_performance = StringVar()
self.location = StringVar()
self.use_global = IntVar()
self.use_regional = IntVar()
self.use_censor_checks = IntVar()
self.share_results = IntVar()
self.master.title('namebench')
outer_frame = Frame(self.master)
outer_frame.grid(row=0, padx=16, pady=16)
inner_frame = Frame(outer_frame, relief=GROOVE, bd=2, padx=12, pady=12)
inner_frame.grid(row=0, columnspan=2)
status = Label(outer_frame, text='...', textvariable=self.status)
status.grid(row=15, sticky=W, column=0)
if sys.platform[:3] == 'win':
seperator_width = 490
else:
seperator_width = 585
bold_font = tkFont.Font(font=status['font'])
bold_font['weight'] = 'bold'
ns_label = Label(inner_frame, text='Nameservers')
ns_label.grid(row=0, columnspan=2, sticky=W)
ns_label['font'] = bold_font
nameservers = Entry(inner_frame, bg='white',
textvariable=self.nameserver_form,
width=80)
nameservers.grid(row=1, columnspan=2, sticky=W, padx=4, pady=2)
self.nameserver_form.set(', '.join(nameserver_list.InternalNameServers()))
global_button = Checkbutton(inner_frame,
text='Include global DNS providers (Google Public DNS, OpenDNS, UltraDNS, etc.)',
variable=self.use_global)
global_button.grid(row=2, columnspan=2, sticky=W)
global_button.toggle()
regional_button = Checkbutton(inner_frame,
text='Include best available regional DNS services',
variable=self.use_regional)
regional_button.grid(row=3, columnspan=2, sticky=W)
regional_button.toggle()
separator = Frame(inner_frame, height=2, width=seperator_width, bd=1, relief=SUNKEN)
separator.grid(row=4, padx=5, pady=5, columnspan=2)
ds_label = Label(inner_frame, text='Options')
ds_label.grid(row=5, column=0, sticky=W)
ds_label['font'] = bold_font
censorship_button = Checkbutton(inner_frame, text='Include censorship checks',
variable=self.use_censor_checks)
censorship_button.grid(row=6, columnspan=2, sticky=W)
share_button = Checkbutton(inner_frame,
text='Upload and share your anonymized results (help speed up the internet!)',
variable=self.share_results)
# Old versions of Tk do not support two-dimensional padding.
try:
share_button.grid(row=7, columnspan=2, sticky=W, pady=[0,10])
except TclError:
share_button.grid(row=7, columnspan=2, sticky=W)
loc_label = Label(inner_frame, text='Your location')
loc_label.grid(row=10, column=0, sticky=W)
loc_label['font'] = bold_font
run_count_label = Label(inner_frame, text='Health Check Performance')
run_count_label.grid(row=10, column=1, sticky=W)
run_count_label['font'] = bold_font
self.DiscoverLocation()
self.LoadDataSources()
source_titles = self.data_src.ListSourceTitles()
left_dropdown_width = max([len(x) for x in source_titles]) - 3
location_choices = [self.country, '(Other)']
location = OptionMenu(inner_frame, self.location, *location_choices)
location.configure(width=left_dropdown_width)
location.grid(row=11, column=0, sticky=W)
self.location.set(location_choices[0])
mode_choices = ['Fast', 'Slow (unstable network)']
right_dropdown_width = max([len(x) for x in mode_choices]) - 3
health_performance = OptionMenu(inner_frame, self.health_performance, *mode_choices)
health_performance.configure(width=right_dropdown_width)
health_performance.grid(row=11, column=1, sticky=W)
self.health_performance.set(mode_choices[0])
ds_label = Label(inner_frame, text='Query Data Source')
ds_label.grid(row=12, column=0, sticky=W)
ds_label['font'] = bold_font
numqueries_label = Label(inner_frame, text='Number of queries')
numqueries_label.grid(row=12, column=1, sticky=W)
numqueries_label['font'] = bold_font
data_source = OptionMenu(inner_frame, self.data_source, *source_titles)
data_source.configure(width=left_dropdown_width)
data_source.grid(row=13, column=0, sticky=W)
self.data_source.set(source_titles[0])
query_count = Entry(inner_frame, bg='white', textvariable=self.query_count)
query_count.grid(row=13, column=1, sticky=W, padx=4)
query_count.configure(width=right_dropdown_width + 6)
self.query_count.set(self.options.query_count)
self.button = Button(outer_frame, command=self.StartJob)
self.button.grid(row=15, sticky=E, column=1, pady=4, padx=1)
self.UpdateRunState(running=True)
self.UpdateRunState(running=False)
self.UpdateStatus('namebench %s is ready!' % self.version)
def MessageHandler(self, unused_event):
"""Pinged when there is a new message in our queue to handle."""
while global_message_queue.qsize():
m = global_message_queue.get()
if m.error:
self.ErrorPopup(m.message, m.error)
elif m.enable_button == False:
self.UpdateRunState(running=True)
elif m.enable_button == True:
self.UpdateRunState(running=False)
self.UpdateStatus(m.message, count=m.count, total=m.total, error=m.error, debug=m.debug)
def ErrorPopup(self, title, message):
print 'Showing popup: %s' % title
tkMessageBox.showerror(str(title), str(message), master=self.master)
def UpdateRunState(self, running=True):
"""Update the run state of the window, using nasty threading hacks."""
global THREAD_UNSAFE_TK
# try/except blocks added to work around broken Tcl/Tk libraries
# shipped with Fedora 11 (not thread-safe).
# See http://code.google.com/p/namebench/issues/detail?id=23'
if THREAD_UNSAFE_TK:
return
if running:
try:
self.button.config(state=DISABLED)
self.button.config(text='Running')
except TclError:
THREAD_UNSAFE_TK = True
self.UpdateStatus('Unable to disable button due to broken Tk library')
self.UpdateStatus('Running...')
else:
try:
self.button.config(state=NORMAL)
self.button.config(text='Start Benchmark')
except TclError:
pass
def StartJob(self):
"""Events that get called when the Start button is pressed."""
self.ProcessForm()
thread = WorkerThread(self.supplied_ns, self.global_ns, self.regional_ns, self.options,
data_source=self.data_src,
master=self.master, backup_notifier=self.MessageHandler)
thread.start()
def ProcessForm(self):
"""Read form and populate instance variables."""
self.supplied_ns = addr_util.ExtractIPTuplesFromString(self.nameserver_form.get())
if not self.use_global.get():
self.global_ns = []
if not self.use_regional.get():
self.regional_ns = []
if 'Slow' in self.health_performance.get():
self.options.health_thread_count = 10
self.options.query_count = self.query_count.get()
self.options.input_source = self.data_src.ConvertSourceTitleToType(self.data_source.get())
self.options.enable_censorship_checks = self.use_censor_checks.get()
self.options.upload_results = self.share_results.get()
|
jjoaonunes/namebench
|
libnamebench/tk.py
|
Python
|
apache-2.0
| 13,586
|
data = (
'Yao ', # 0x00
'Yu ', # 0x01
'Chong ', # 0x02
'Xi ', # 0x03
'Xi ', # 0x04
'Jiu ', # 0x05
'Yu ', # 0x06
'Yu ', # 0x07
'Xing ', # 0x08
'Ju ', # 0x09
'Jiu ', # 0x0a
'Xin ', # 0x0b
'She ', # 0x0c
'She ', # 0x0d
'Yadoru ', # 0x0e
'Jiu ', # 0x0f
'Shi ', # 0x10
'Tan ', # 0x11
'Shu ', # 0x12
'Shi ', # 0x13
'Tian ', # 0x14
'Dan ', # 0x15
'Pu ', # 0x16
'Pu ', # 0x17
'Guan ', # 0x18
'Hua ', # 0x19
'Tan ', # 0x1a
'Chuan ', # 0x1b
'Shun ', # 0x1c
'Xia ', # 0x1d
'Wu ', # 0x1e
'Zhou ', # 0x1f
'Dao ', # 0x20
'Gang ', # 0x21
'Shan ', # 0x22
'Yi ', # 0x23
'[?] ', # 0x24
'Pa ', # 0x25
'Tai ', # 0x26
'Fan ', # 0x27
'Ban ', # 0x28
'Chuan ', # 0x29
'Hang ', # 0x2a
'Fang ', # 0x2b
'Ban ', # 0x2c
'Que ', # 0x2d
'Hesaki ', # 0x2e
'Zhong ', # 0x2f
'Jian ', # 0x30
'Cang ', # 0x31
'Ling ', # 0x32
'Zhu ', # 0x33
'Ze ', # 0x34
'Duo ', # 0x35
'Bo ', # 0x36
'Xian ', # 0x37
'Ge ', # 0x38
'Chuan ', # 0x39
'Jia ', # 0x3a
'Lu ', # 0x3b
'Hong ', # 0x3c
'Pang ', # 0x3d
'Xi ', # 0x3e
'[?] ', # 0x3f
'Fu ', # 0x40
'Zao ', # 0x41
'Feng ', # 0x42
'Li ', # 0x43
'Shao ', # 0x44
'Yu ', # 0x45
'Lang ', # 0x46
'Ting ', # 0x47
'[?] ', # 0x48
'Wei ', # 0x49
'Bo ', # 0x4a
'Meng ', # 0x4b
'Nian ', # 0x4c
'Ju ', # 0x4d
'Huang ', # 0x4e
'Shou ', # 0x4f
'Zong ', # 0x50
'Bian ', # 0x51
'Mao ', # 0x52
'Die ', # 0x53
'[?] ', # 0x54
'Bang ', # 0x55
'Cha ', # 0x56
'Yi ', # 0x57
'Sao ', # 0x58
'Cang ', # 0x59
'Cao ', # 0x5a
'Lou ', # 0x5b
'Dai ', # 0x5c
'Sori ', # 0x5d
'Yao ', # 0x5e
'Tong ', # 0x5f
'Yofune ', # 0x60
'Dang ', # 0x61
'Tan ', # 0x62
'Lu ', # 0x63
'Yi ', # 0x64
'Jie ', # 0x65
'Jian ', # 0x66
'Huo ', # 0x67
'Meng ', # 0x68
'Qi ', # 0x69
'Lu ', # 0x6a
'Lu ', # 0x6b
'Chan ', # 0x6c
'Shuang ', # 0x6d
'Gen ', # 0x6e
'Liang ', # 0x6f
'Jian ', # 0x70
'Jian ', # 0x71
'Se ', # 0x72
'Yan ', # 0x73
'Fu ', # 0x74
'Ping ', # 0x75
'Yan ', # 0x76
'Yan ', # 0x77
'Cao ', # 0x78
'Cao ', # 0x79
'Yi ', # 0x7a
'Le ', # 0x7b
'Ting ', # 0x7c
'Qiu ', # 0x7d
'Ai ', # 0x7e
'Nai ', # 0x7f
'Tiao ', # 0x80
'Jiao ', # 0x81
'Jie ', # 0x82
'Peng ', # 0x83
'Wan ', # 0x84
'Yi ', # 0x85
'Chai ', # 0x86
'Mian ', # 0x87
'Mie ', # 0x88
'Gan ', # 0x89
'Qian ', # 0x8a
'Yu ', # 0x8b
'Yu ', # 0x8c
'Shuo ', # 0x8d
'Qiong ', # 0x8e
'Tu ', # 0x8f
'Xia ', # 0x90
'Qi ', # 0x91
'Mang ', # 0x92
'Zi ', # 0x93
'Hui ', # 0x94
'Sui ', # 0x95
'Zhi ', # 0x96
'Xiang ', # 0x97
'Bi ', # 0x98
'Fu ', # 0x99
'Tun ', # 0x9a
'Wei ', # 0x9b
'Wu ', # 0x9c
'Zhi ', # 0x9d
'Qi ', # 0x9e
'Shan ', # 0x9f
'Wen ', # 0xa0
'Qian ', # 0xa1
'Ren ', # 0xa2
'Fou ', # 0xa3
'Kou ', # 0xa4
'Jie ', # 0xa5
'Lu ', # 0xa6
'Xu ', # 0xa7
'Ji ', # 0xa8
'Qin ', # 0xa9
'Qi ', # 0xaa
'Yuan ', # 0xab
'Fen ', # 0xac
'Ba ', # 0xad
'Rui ', # 0xae
'Xin ', # 0xaf
'Ji ', # 0xb0
'Hua ', # 0xb1
'Hua ', # 0xb2
'Fang ', # 0xb3
'Wu ', # 0xb4
'Jue ', # 0xb5
'Gou ', # 0xb6
'Zhi ', # 0xb7
'Yun ', # 0xb8
'Qin ', # 0xb9
'Ao ', # 0xba
'Chu ', # 0xbb
'Mao ', # 0xbc
'Ya ', # 0xbd
'Fei ', # 0xbe
'Reng ', # 0xbf
'Hang ', # 0xc0
'Cong ', # 0xc1
'Yin ', # 0xc2
'You ', # 0xc3
'Bian ', # 0xc4
'Yi ', # 0xc5
'Susa ', # 0xc6
'Wei ', # 0xc7
'Li ', # 0xc8
'Pi ', # 0xc9
'E ', # 0xca
'Xian ', # 0xcb
'Chang ', # 0xcc
'Cang ', # 0xcd
'Meng ', # 0xce
'Su ', # 0xcf
'Yi ', # 0xd0
'Yuan ', # 0xd1
'Ran ', # 0xd2
'Ling ', # 0xd3
'Tai ', # 0xd4
'Tiao ', # 0xd5
'Di ', # 0xd6
'Miao ', # 0xd7
'Qiong ', # 0xd8
'Li ', # 0xd9
'Yong ', # 0xda
'Ke ', # 0xdb
'Mu ', # 0xdc
'Pei ', # 0xdd
'Bao ', # 0xde
'Gou ', # 0xdf
'Min ', # 0xe0
'Yi ', # 0xe1
'Yi ', # 0xe2
'Ju ', # 0xe3
'Pi ', # 0xe4
'Ruo ', # 0xe5
'Ku ', # 0xe6
'Zhu ', # 0xe7
'Ni ', # 0xe8
'Bo ', # 0xe9
'Bing ', # 0xea
'Shan ', # 0xeb
'Qiu ', # 0xec
'Yao ', # 0xed
'Xian ', # 0xee
'Ben ', # 0xef
'Hong ', # 0xf0
'Ying ', # 0xf1
'Zha ', # 0xf2
'Dong ', # 0xf3
'Ju ', # 0xf4
'Die ', # 0xf5
'Nie ', # 0xf6
'Gan ', # 0xf7
'Hu ', # 0xf8
'Ping ', # 0xf9
'Mei ', # 0xfa
'Fu ', # 0xfb
'Sheng ', # 0xfc
'Gu ', # 0xfd
'Bi ', # 0xfe
'Wei ', # 0xff
)
|
samuelmaudo/yepes
|
yepes/utils/unidecode/x082.py
|
Python
|
bsd-3-clause
| 4,649
|
# -*- coding: utf-8 -*-
# EForge project management system, Copyright © 2010, Element43
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
# Create your views here.
from eforge import plugins
from eforge.models import Project
from eforge.decorators import project_page, has_project_perm, user_page, group_page
from django.shortcuts import get_object_or_404, render_to_response, redirect
from django.http import HttpResponse
from django.template import RequestContext
@project_page
def summary(request, project):
return render_to_response('eforge/summary.html', {
'project': project
}, context_instance=RequestContext(request))
@project_page
@has_project_perm('eforge.manage')
def manage(request, project):
tabs = plugins.provider['managepg']
print tabs
if not 'pg' in request.GET:
return render_to_response('eforge/manage.html', {
'project': project,
'tabs': tabs.items(),
}, context_instance=RequestContext(request))
else:
pg = request.GET['pg']
if not pg in tabs:
raise Http404()
return tabs[pg]['view'](request, project)
@user_page
def user(request, user):
return render_to_response('eforge/user.html', {
'pguser': user,
})
@group_page
def group(request, group):
return render_to_response('eforge/group.html', {
'group': group,
})
def about(request):
import platform
import django
import eforge
return render_to_response('eforge/about.html', {
'plugins': plugins.plugins,
'eforgever': eforge.get_version(),
'djangover': django.get_version(),
'pyver': platform.python_version(),
}, context_instance=RequestContext(request))
|
oshepherd/eforge
|
eforge/views.py
|
Python
|
isc
| 2,416
|
"""Host Reservation DHCPv6"""
# pylint: disable=invalid-name,line-too-long
import pytest
import misc
import srv_msg
import srv_control
@pytest.mark.v6
@pytest.mark.host_reservation
@pytest.mark.kea_only
@pytest.mark.pgsql
def test_v6_host_reservation_duplicate_reservation_duid():
misc.test_setup()
srv_control.config_srv_subnet('3000::/30', '3000::1-3000::10')
srv_control.enable_db_backend_reservation('PostgreSQL')
srv_control.new_db_backend_reservation('PostgreSQL', 'duid', '00:03:00:01:f6:f5:f4:f3:f2:01')
srv_control.update_db_backend_reservation('dhcp6_subnet_id', 1, 'PostgreSQL', 1)
srv_control.ipv6_address_db_backend_reservation('3000::1', '$(EMPTY)', 'PostgreSQL', 1)
srv_control.new_db_backend_reservation('PostgreSQL', 'duid', '00:03:00:01:f6:f5:f4:f3:f2:01')
srv_control.update_db_backend_reservation('dhcp6_subnet_id', 1, 'PostgreSQL', 2)
srv_control.ipv6_address_db_backend_reservation('3000::2', '$(EMPTY)', 'PostgreSQL', 2)
srv_control.upload_db_reservation('PostgreSQL')
# upload should failed!#TODO add step to failed upload
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
@pytest.mark.v6
@pytest.mark.host_reservation
@pytest.mark.kea_only
@pytest.mark.pgsql
def test_v6_host_reservation_duplicate_reservation_address():
misc.test_setup()
srv_control.config_srv_subnet('3000::/30', '3000::1-3000::10')
srv_control.enable_db_backend_reservation('PostgreSQL')
srv_control.new_db_backend_reservation('PostgreSQL', 'duid', '00:03:00:01:f6:f5:f4:f3:f2:01')
srv_control.update_db_backend_reservation('dhcp6_subnet_id', 1, 'PostgreSQL', 1)
srv_control.ipv6_address_db_backend_reservation('3000::1', '$(EMPTY)', 'PostgreSQL', 1)
srv_control.new_db_backend_reservation('PostgreSQL', 'duid', '00:03:00:01:f6:f5:f4:f3:f2:11')
srv_control.update_db_backend_reservation('dhcp6_subnet_id', 1, 'PostgreSQL', 1)
srv_control.ipv6_address_db_backend_reservation('3000::1', '$(EMPTY)', 'PostgreSQL', 1)
srv_control.upload_db_reservation('PostgreSQL')
# upload should failed! #TODO add step to failed upload
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
@pytest.mark.v6
@pytest.mark.host_reservation
@pytest.mark.kea_only
@pytest.mark.pgsql
def test_v6_host_reservation_pgsql_conflicts_two_entries_for_one_host_different_subnets():
misc.test_setup()
srv_control.config_srv_subnet('3000::/30', '3000::1-3000::10')
srv_control.enable_db_backend_reservation('PostgreSQL')
srv_control.new_db_backend_reservation('PostgreSQL', 'duid', '00:03:00:01:f6:f5:f4:f3:f2:01')
srv_control.update_db_backend_reservation('dhcp6_subnet_id', 1, 'PostgreSQL', 1)
srv_control.ipv6_address_db_backend_reservation('3000::1', '$(EMPTY)', 'PostgreSQL', 1)
srv_control.new_db_backend_reservation('PostgreSQL', 'hw-address', 'f6:f5:f4:f3:f2:01')
srv_control.update_db_backend_reservation('dhcp6_subnet_id', 2, 'PostgreSQL', 2)
srv_control.ipv6_address_db_backend_reservation('3000::3', '$(EMPTY)', 'PostgreSQL', 2)
srv_control.upload_db_reservation('PostgreSQL')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'ia_id', 666)
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_does_include('Client', 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
srv_msg.response_check_include_option(3)
srv_msg.response_check_option_content(3, 'sub-option', 5)
misc.test_procedure()
srv_msg.client_copy_option('server-id')
srv_msg.client_copy_option('IA_NA')
srv_msg.client_sets_value('Client', 'ia_id', 666)
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'REPLY')
srv_msg.response_check_option_content(3, 'sub-option', 5)
srv_msg.response_check_suboption_content(5, 3, 'addr', '3000::1')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'ia_id', '667')
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_does_include('Client', 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
srv_msg.response_check_include_option(3)
srv_msg.response_check_option_content(3, 'sub-option', 5)
misc.test_procedure()
srv_msg.client_copy_option('IA_NA')
srv_msg.client_copy_option('server-id')
srv_msg.client_sets_value('Client', 'ia_id', '667')
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'REPLY')
srv_msg.response_check_option_content(3, 'sub-option', 5)
srv_msg.response_check_suboption_content(5, 3, 'addr', '3000::3', expect_include=False)
@pytest.mark.v6
@pytest.mark.host_reservation
@pytest.mark.kea_only
@pytest.mark.pgsql
def test_v6_host_reservation_pgsql_conflicts_reconfigure_server_with_reservation_of_used_address():
misc.test_setup()
srv_control.config_srv_subnet('3000::/30', '3000::1-3000::2')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:66:55:44:33:22:11')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_does_include('Client', 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
misc.test_procedure()
srv_msg.client_copy_option('server-id')
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:66:55:44:33:22:11')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'REPLY')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:66:55:44:33:22:22')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_does_include('Client', 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
misc.test_procedure()
srv_msg.client_copy_option('server-id')
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:66:55:44:33:22:22')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'REPLY')
# bigger prefix pool + reservation
misc.test_setup()
srv_control.config_srv_subnet('3000::/30', '3000::1-3000::10')
srv_control.enable_db_backend_reservation('PostgreSQL')
srv_control.new_db_backend_reservation('PostgreSQL',
'hw-address',
'00:03:00:01:f6:f5:f4:f3:f2:01')
srv_control.update_db_backend_reservation('dhcp6_subnet_id', 1, 'PostgreSQL', 1)
srv_control.ipv6_address_db_backend_reservation('3000::1', '$(EMPTY)', 'PostgreSQL', 1)
srv_control.upload_db_reservation('PostgreSQL')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'reconfigured')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_does_include('Client', 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
misc.test_procedure()
srv_msg.client_copy_option('server-id')
srv_msg.client_copy_option('IA_NA')
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'REPLY')
srv_msg.response_check_include_option(3)
srv_msg.response_check_option_content(3, 'sub-option', 5)
srv_msg.response_check_suboption_content(5, 3, 'addr', '3000::1', expect_include=False)
@pytest.mark.v6
@pytest.mark.host_reservation
@pytest.mark.kea_only
@pytest.mark.pgsql
def test_v6_host_reservation_pgsql_conflicts_reconfigure_server_with_reservation_of_used_address_2():
misc.test_setup()
srv_control.config_srv_subnet('3000::/30', '3000::1-3000::2')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:66:55:44:33:22:11')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_does_include('Client', 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
misc.test_procedure()
srv_msg.client_copy_option('IA_NA')
srv_msg.client_copy_option('server-id')
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:66:55:44:33:22:11')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'REPLY')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:66:55:44:33:22:22')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_does_include('Client', 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
misc.test_procedure()
srv_msg.client_copy_option('IA_NA')
srv_msg.client_copy_option('server-id')
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:66:55:44:33:22:22')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'REPLY')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_does_include('Client', 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
srv_msg.response_check_include_option(3)
srv_msg.response_check_option_content(3, 'sub-option', 13)
srv_msg.response_check_suboption_content(13, 3, 'statuscode', 2)
# bigger prefix pool + reservation
misc.test_setup()
srv_control.config_srv_subnet('3000::/30', '3000::1-3000::10')
srv_control.enable_db_backend_reservation('PostgreSQL')
srv_control.new_db_backend_reservation('PostgreSQL',
'hw-address',
'00:03:00:01:f6:f5:f4:f3:f2:01')
srv_control.update_db_backend_reservation('dhcp6_subnet_id', 1, 'PostgreSQL', 1)
srv_control.ipv6_address_db_backend_reservation('3000::1', '$(EMPTY)', 'PostgreSQL', 1)
srv_control.upload_db_reservation('PostgreSQL')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'reconfigured')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_does_include('Client', 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
misc.test_procedure()
srv_msg.client_copy_option('IA_NA')
srv_msg.client_copy_option('server-id')
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'REPLY')
srv_msg.response_check_include_option(3)
srv_msg.response_check_option_content(3, 'sub-option', 5)
srv_msg.response_check_suboption_content(5, 3, 'addr', '3000::1', expect_include=False)
@pytest.mark.v6
@pytest.mark.host_reservation
@pytest.mark.kea_only
@pytest.mark.pgsql
def test_v6_host_reservation_pgsql_conflicts_reconfigure_server_with_reservation_of_used_address_renew_before_expire():
misc.test_setup()
srv_control.config_srv_subnet('3000::/30', '3000::1-3000::2')
# Use PostgreSQL reservation system.
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:66:55:44:33:22:11')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_does_include('Client', 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
misc.test_procedure()
srv_msg.client_copy_option('IA_NA')
srv_msg.client_copy_option('server-id')
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:66:55:44:33:22:11')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'REPLY')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:66:55:44:33:22:22')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_does_include('Client', 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
misc.test_procedure()
srv_msg.client_copy_option('IA_NA')
srv_msg.client_copy_option('server-id')
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:66:55:44:33:22:22')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'REPLY')
# SAVE VALUES
srv_msg.client_save_option('IA_NA')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_does_include('Client', 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
srv_msg.response_check_include_option(3)
srv_msg.response_check_option_content(3, 'sub-option', 13)
srv_msg.response_check_suboption_content(13, 3, 'statuscode', 2)
# bigger prefix pool + reservation
misc.test_setup()
srv_control.set_time('renew-timer', 105)
srv_control.set_time('rebind-timer', 106)
srv_control.set_time('valid-lifetime', 107)
srv_control.set_time('preferred-lifetime', 108)
srv_control.config_srv_subnet('3000::/30', '3000::1-3000::3')
srv_control.enable_db_backend_reservation('PostgreSQL')
srv_control.new_db_backend_reservation('PostgreSQL', 'duid', '00:03:00:01:f6:f5:f4:f3:f2:01')
srv_control.update_db_backend_reservation('dhcp6_subnet_id', 1, 'PostgreSQL', 1)
srv_control.ipv6_address_db_backend_reservation('3000::2', '$(EMPTY)', 'PostgreSQL', 1)
srv_control.upload_db_reservation('PostgreSQL')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'reconfigured')
misc.test_procedure()
srv_msg.client_copy_option('server-id')
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:66:55:44:33:22:22')
srv_msg.client_add_saved_option()
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_send_msg('RENEW')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'REPLY')
srv_msg.response_check_include_option(3)
srv_msg.response_check_option_content(3, 'sub-option', 5)
srv_msg.response_check_suboption_content(5, 3, 'validlft', 0)
srv_msg.response_check_suboption_content(5, 3, 'addr', '3000::2')
srv_msg.response_check_suboption_content(5, 3, 'validlft', 107)
srv_msg.response_check_suboption_content(5, 3, 'addr', '3000::3')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_does_include('Client', 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
misc.test_procedure()
srv_msg.client_copy_option('server-id')
srv_msg.client_copy_option('IA_NA')
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'REPLY')
srv_msg.response_check_include_option(3)
srv_msg.response_check_option_content(3, 'sub-option', 5)
srv_msg.response_check_suboption_content(5, 3, 'addr', '3000::2')
@pytest.mark.v6
@pytest.mark.host_reservation
@pytest.mark.kea_only
@pytest.mark.pgsql
def test_v6_host_reservation_pgsql_conflicts_reconfigure_server_with_reservation_of_used_address_renew_after_expire():
misc.test_setup()
srv_control.set_time('renew-timer', 5)
srv_control.set_time('rebind-timer', 6)
srv_control.set_time('preferred-lifetime', 7)
srv_control.set_time('valid-lifetime', 8)
srv_control.config_srv_subnet('3000::/30', '3000::1-3000::2')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:66:55:44:33:22:22')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_sets_value('Client', 'ia_id', 11)
srv_msg.client_does_include('Client', 'IA-NA')
srv_msg.client_sets_value('Client', 'ia_id', 22)
srv_msg.client_does_include('Client', 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'ia_id', 11)
srv_msg.client_does_include('Client', 'IA-NA')
srv_msg.client_sets_value('Client', 'ia_id', 22)
srv_msg.client_does_include('Client', 'IA-NA')
srv_msg.client_copy_option('server-id')
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:66:55:44:33:22:22')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'REPLY')
# SAVE VALUES
srv_msg.client_save_option('IA_NA')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_does_include('Client', 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
srv_msg.response_check_include_option(3)
srv_msg.response_check_option_content(3, 'sub-option', 13)
srv_msg.response_check_suboption_content(13, 3, 'statuscode', 2)
# bigger prefix pool + reservation
srv_msg.forge_sleep(5, 'seconds')
misc.test_setup()
srv_control.set_time('renew-timer', 5)
srv_control.set_time('rebind-timer', 6)
srv_control.set_time('preferred-lifetime', 7)
srv_control.set_time('valid-lifetime', 8)
srv_control.config_srv_subnet('3000::/30', '3000::1-3000::3')
srv_control.enable_db_backend_reservation('PostgreSQL')
srv_control.new_db_backend_reservation('PostgreSQL', 'duid', '00:03:00:01:f6:f5:f4:f3:f2:01')
srv_control.update_db_backend_reservation('dhcp6_subnet_id', 1, 'PostgreSQL', 1)
srv_control.ipv6_address_db_backend_reservation('3000::2', '$(EMPTY)', 'PostgreSQL', 1)
srv_control.upload_db_reservation('PostgreSQL')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'reconfigured')
misc.test_procedure()
srv_msg.client_copy_option('server-id')
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:66:55:44:33:22:22')
srv_msg.client_sets_value('Client', 'IA_Address', '3000::1')
srv_msg.client_does_include('Client', 'IA_Address')
srv_msg.client_sets_value('Client', 'IA_Address', '3000::2')
srv_msg.client_does_include('Client', 'IA_Address')
srv_msg.client_does_include('Client', 'IA-NA')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_send_msg('RENEW')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'REPLY')
srv_msg.response_check_include_option(3)
srv_msg.response_check_option_content(3, 'sub-option', 5)
srv_msg.response_check_suboption_content(5, 3, 'validlft', 0)
srv_msg.response_check_suboption_content(5, 3, 'addr', '3000::2')
srv_msg.response_check_suboption_content(5, 3, 'validlft', 8)
srv_msg.response_check_suboption_content(5, 3, 'addr', '3000::1')
srv_msg.response_check_suboption_content(5, 3, 'addr', '3000::3')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_does_include('Client', 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
misc.test_procedure()
srv_msg.client_copy_option('server-id')
srv_msg.client_copy_option('IA_NA')
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'REPLY')
srv_msg.response_check_include_option(3)
srv_msg.response_check_option_content(3, 'sub-option', 5)
srv_msg.response_check_suboption_content(5, 3, 'addr', '3000::2')
|
isc-projects/forge
|
tests/dhcpv6/kea_only/host_reservation/test_host_reservation_address_conflicts_pgsql.py
|
Python
|
isc
| 22,290
|
import textwrap
from unittest import mock
from unittest.mock import call
import pytest
from flask_forecaster.config import Config, ConfigError, parse_vcap, require
@mock.patch('flask_forecaster.config.os.getenv')
def test_require_success(getenv):
getenv.return_value = 'world'
assert require('hello') == 'world'
getenv.assert_called_once_with('hello')
@mock.patch('flask_forecaster.config.os.getenv')
def test_require_failure(getenv):
getenv.side_effect = ConfigError
with pytest.raises(ConfigError):
require('hello')
getenv.assert_called_once_with('hello')
@mock.patch('flask_forecaster.config.os.getenv')
class TestConfig:
def test_unrecognised_env(self, getenv):
getenv.side_effect = ['', 'garbage', '', '']
with pytest.raises(ValueError):
Config.for_current_env()
def test_unsupplied_env(self, getenv):
env = 'dev'
getenv.side_effect = ['', env, '', '']
config = Config.for_current_env('hello')
assert config.environment == env
getenv.assert_any_call('hello', 'prod')
def test_default_env(self, getenv):
env = 'test'
getenv.side_effect = ['', env, '', '']
config = Config.for_current_env('hello', 'world')
assert config.environment == env
getenv.assert_any_call('hello', 'world')
def test_config_rtfd(self, getenv):
getenv.return_value = 'foo'
config = Config.for_current_env('hello', 'world')
assert config.SQLALCHEMY_DATABASE_URI is None
getenv.assert_any_call('READTHEDOCS')
@pytest.mark.parametrize('env,debug,testing,requires', [
('dev', True, False, []),
('test', False, True, ['VALID_API_TOKEN', 'ACCESSIBLE_PROJECT']),
('prod', False, False, ['FLASK_SECRET_KEY', 'PORT']),
])
@mock.patch('flask_forecaster.config.require')
@mock.patch('flask_forecaster.config.parse_vcap')
def test_config_environment(self, parse_vcap, require, getenv, env, debug, testing, requires):
getenv.side_effect = lambda env_var, default=None: default
config = Config.for_current_env(default=env)
assert config.DEBUG is debug
assert config.TESTING is testing
require.assert_has_calls(
[call(req) for req in requires],
any_order=True,
)
if env == 'prod':
# Production environments also access VCAP_SERVICES
parse_vcap.assert_called_once_with(
'POSTGRES_SERVICE',
(0, 'credentials', 'uri'),
)
@mock.patch('flask_forecaster.config.require')
def test_parse_vcap(_require):
required = dict(
VCAP_SERVICES=textwrap.dedent(
"""
{
"elephantsql-dev": [
{
"name": "elephantsql-dev-c6c60",
"label": "elephantsql-dev",
"plan": "turtle",
"credentials": {
"uri": "postgres://somepath"
}
}
]
}
"""
),
POSTGRES_SERVICE='elephantsql-dev',
)
_require.side_effect = lambda name: required[name]
result = parse_vcap(
'POSTGRES_SERVICE',
(0, 'credentials', 'uri'),
)
assert result == 'postgres://somepath'
_require.assert_any_call('VCAP_SERVICES')
_require.assert_any_call('POSTGRES_SERVICE')
|
textbook/flask-forecaster
|
tests/test_config.py
|
Python
|
isc
| 3,431
|
"""
Collection of astronomy-related functions and utilities
"""
__version__ = '0.1.1'
|
KathleenLabrie/KLpyastro
|
klpyastro/__init__.py
|
Python
|
isc
| 86
|
from collections import defaultdict
from operator import itemgetter
import re
def isRealRoom(name, checksum):
if len(checksum) != 5:
raise Exception
totals = defaultdict(int)
for c in name:
if c != '-':
totals[c] += 1
pairs = zip(totals.keys(), totals.values())
alphaPairs = sorted(pairs, key=itemgetter(0))
freqPairs = sorted(alphaPairs, key=itemgetter(1), reverse=True)
genCheckSum = ''
for a, b in freqPairs:
genCheckSum += a
return genCheckSum[:5] == checksum
def main():
f = open('input.txt', 'r')
sectorSum = 0
for line in f:
room, metadata = line.rsplit('-', 1)
match = re.search(r'(\d+)\[(.{5})\]', metadata)
sector = int(match.group(1))
checksum = match.group(2)
if(isRealRoom(room, checksum)):
sectorSum += sector
print(sectorSum)
if __name__ == "__main__":
main()
|
kmcginn/advent-of-code
|
2016/day04/security.py
|
Python
|
mit
| 922
|
from typing import List
class Solution:
def arrayStringsAreEqualV1(self, word1: List[str], word2: List[str]) -> bool:
return "".join(word1) == "".join(word2)
def arrayStringsAreEqualV2(self, word1: List[str], word2: List[str]) -> bool:
def generator(word: List[str]):
for s in word:
for c in s:
yield c
yield None
for c1, c2 in zip(generator(word1), generator(word2)):
if c1 != c2:
return False
return True
# TESTS
for word1, word2, expected in [
(["ab", "c"], ["a", "bc"], True),
(["a", "cb"], ["ab", "c"], False),
(["abc", "d", "defg"], ["abcddefg"], True),
]:
sol = Solution()
actual = sol.arrayStringsAreEqualV1(word1, word2)
print("Array strings", word1, "and", word2, "are equal ->", actual)
assert actual == expected
assert sol.arrayStringsAreEqualV1(word1, word2) == expected
|
l33tdaima/l33tdaima
|
pr1662e/array_strings_are_equal.py
|
Python
|
mit
| 950
|
import math
from pyb import DAC, micros, elapsed_micros
def tone1(freq):
t0 = micros()
dac = DAC(1)
while True:
theta = 2*math.pi*float(elapsed_micros(t0))*freq/1e6
fv = math.sin(theta)
v = int(126.0 * fv) + 127
#print("Theta %f, sin %f, scaled %d" % (theta, fv, v))
#delay(100)
dac.write(v)
def tone2(freq):
t0 = micros()
dac = DAC(1)
omega = 2 * math.pi * freq / 1e6
while True:
theta = omega*float(elapsed_micros(t0))
fv = math.sin(theta)
v = int(126.0 * fv) + 127
#print("Theta %f, sin %f, scaled %d" % (theta, fv, v))
#delay(100)
dac.write(v)
def tone3(freq, l_buf=256):
dac = DAC(1)
dtheta = 2 * math.pi / l_buf
scale = lambda fv: int(126.0 * fv) + 127
buf = bytearray(scale(math.sin(dtheta*t)) for t in range(l_buf))
dac.write_timed(buf, freq * l_buf, mode=DAC.CIRCULAR)
def tone4(freq, l_buf=256):
dac = DAC(1)
dtheta = 2 * math.pi / l_buf
scale = lambda fv: int(123 * fv) + 127
buf = bytearray(scale(math.sin(dtheta*t)) for t in range(l_buf))
dac.write_timed(buf, freq * l_buf, mode=DAC.CIRCULAR)
def tone5(freq, wavefun=lambda x: math.sin(2.0*math.pi*x), l_buf=256):
dac = DAC(1)
dt = 1.0 / l_buf
scale = lambda fv: int(123 * fv) + 127
buf = bytearray(scale(wavefun(t*dt)) for t in range(l_buf))
dac.write_timed(buf, freq * l_buf, mode=DAC.CIRCULAR)
def tone6(freq, wavefun=lambda x: math.sin(2.0*math.pi*x), l_buf=256, dacnum=1):
dac = DAC(dacnum)
dt = 1.0 / l_buf
scale = lambda fv: int(123 * fv) + 127
buf = bytearray(scale(wavefun(t*dt)) for t in range(l_buf))
dac.write_timed(buf, freq * l_buf, mode=DAC.CIRCULAR)
|
pramasoul/pyboard-fun
|
tone.py
|
Python
|
mit
| 1,739
|
# Problem name: 12148 Electricity
# Problem url: https://uva.onlinejudge.org/external/121/12148.pdf
# Author: Andrey Yemelyanov
import sys
import math
import datetime
def readline():
return sys.stdin.readline().strip()
def main():
while True:
n_readings = int(readline())
if n_readings == 0:
break
meter_readings = []
for i in range(n_readings):
reading = [int(x) for x in readline().split()]
date = datetime.date(reading[2], reading[1], reading[0])
consumption = reading[3]
meter_readings.append((date, consumption))
c = get_daily_consumption(meter_readings)
print(len(c), sum(c))
def get_daily_consumption(meter_readings):
c = []
for i in range(len(meter_readings)):
if i > 0:
current_date = meter_readings[i][0]
current_consumption = meter_readings[i][1]
prev_date = meter_readings[i - 1][0]
prev_consumption = meter_readings[i - 1][1]
if prev_date + datetime.timedelta(days = 1) == current_date:
c.append(current_consumption - prev_consumption)
return c
if __name__=="__main__":
main()
|
andrey-yemelyanov/competitive-programming
|
cp-book/ch1/adhoc/time/12148_Electricity.py
|
Python
|
mit
| 1,212
|
import DeepFried2 as df
from .. import dfext
def mknet(mkbn=lambda chan: df.BatchNormalization(chan, 0.95)):
kw = dict(mkbn=mkbn)
net = df.Sequential(
# -> 128x48
df.SpatialConvolutionCUDNN(3, 64, (7,7), border='same', bias=None),
dfext.resblock(64, **kw),
df.PoolingCUDNN((2,2)), # -> 64x24
dfext.resblock(64, **kw),
dfext.resblock(64, **kw),
dfext.resblock(64, 96, **kw),
df.PoolingCUDNN((2,2)), # -> 32x12
dfext.resblock(96, **kw),
dfext.resblock(96, **kw),
df.PoolingCUDNN((2,2)), # -> 16x6
dfext.resblock(96, **kw),
dfext.resblock(96, **kw),
dfext.resblock(96, 128, **kw),
df.PoolingCUDNN((2,2)), # -> 8x3
dfext.resblock(128, **kw),
dfext.resblock(128, **kw),
df.PoolingCUDNN((2,3)), # -> 4x1
dfext.resblock(128, **kw),
# Eq. to flatten + linear
df.SpatialConvolutionCUDNN(128, 256, (4,1), bias=None),
mkbn(256), df.ReLU(),
df.StoreOut(df.SpatialConvolutionCUDNN(256, 128, (1,1)))
)
net.emb_mod = net[-1]
net.in_shape = (128, 48)
net.scale_factor = (2*2*2*2*2, 2*2*2*2*3)
print("Net has {:.2f}M params".format(df.utils.count_params(net)/1000/1000), flush=True)
return net
def add_piou(lunet2):
newnet = lunet2[:-1]
newnet.emb_mod = lunet2[-1]
newnet.iou_mod = df.StoreOut(df.Sequential(df.SpatialConvolutionCUDNN(256, 1, (1,1)), df.Sigmoid()))
newnet.add(df.RepeatInput(newnet.emb_mod, newnet.iou_mod))
newnet.embs_from_out = lambda out: out[0]
newnet.ious_from_out = lambda out: out[1][:,0] # Also remove the first size-1 dimension.
newnet.in_shape = lunet2.in_shape
newnet.scale_factor = lunet2.scale_factor
print("Added {:.2f}k params".format(df.utils.count_params(newnet.iou_mod)/1000), flush=True)
return newnet
|
VisualComputingInstitute/towards-reid-tracking
|
lib/models/lunet2.py
|
Python
|
mit
| 1,896
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from ..proto.summary_pb2 import Summary
from ..proto.summary_pb2 import SummaryMetadata
from ..proto.tensor_pb2 import TensorProto
from ..proto.tensor_shape_pb2 import TensorShapeProto
import os
import time
import numpy as np
# import tensorflow as tf
# from tensorboard.plugins.beholder import im_util
# from . import im_util
from .file_system_tools import read_pickle,\
write_pickle, write_file
from .shared_config import PLUGIN_NAME, TAG_NAME,\
SUMMARY_FILENAME, DEFAULT_CONFIG, CONFIG_FILENAME, SUMMARY_COLLECTION_KEY_NAME, SECTION_INFO_FILENAME
from . import video_writing
# from .visualizer import Visualizer
class Beholder(object):
def __init__(self, logdir):
self.PLUGIN_LOGDIR = logdir + '/plugins/' + PLUGIN_NAME
self.is_recording = False
self.video_writer = video_writing.VideoWriter(
self.PLUGIN_LOGDIR,
outputs=[video_writing.FFmpegVideoOutput, video_writing.PNGVideoOutput])
self.last_image_shape = []
self.last_update_time = time.time()
self.config_last_modified_time = -1
self.previous_config = dict(DEFAULT_CONFIG)
if not os.path.exists(self.PLUGIN_LOGDIR + '/config.pkl'):
os.makedirs(self.PLUGIN_LOGDIR)
write_pickle(DEFAULT_CONFIG,
'{}/{}'.format(self.PLUGIN_LOGDIR, CONFIG_FILENAME))
# self.visualizer = Visualizer(self.PLUGIN_LOGDIR)
def _get_config(self):
'''Reads the config file from disk or creates a new one.'''
filename = '{}/{}'.format(self.PLUGIN_LOGDIR, CONFIG_FILENAME)
modified_time = os.path.getmtime(filename)
if modified_time != self.config_last_modified_time:
config = read_pickle(filename, default=self.previous_config)
self.previous_config = config
else:
config = self.previous_config
self.config_last_modified_time = modified_time
return config
def _write_summary(self, frame):
'''Writes the frame to disk as a tensor summary.'''
path = '{}/{}'.format(self.PLUGIN_LOGDIR, SUMMARY_FILENAME)
smd = SummaryMetadata()
tensor = TensorProto(
dtype='DT_FLOAT',
float_val=frame.reshape(-1).tolist(),
tensor_shape=TensorShapeProto(
dim=[TensorShapeProto.Dim(size=frame.shape[0]),
TensorShapeProto.Dim(size=frame.shape[1]),
TensorShapeProto.Dim(size=frame.shape[2])]
)
)
summary = Summary(value=[Summary.Value(
tag=TAG_NAME, metadata=smd, tensor=tensor)]).SerializeToString()
write_file(summary, path)
@staticmethod
def stats(tensor_and_name):
imgstats = []
for (img, name) in tensor_and_name:
immax = img.max()
immin = img.min()
imgstats.append(
{
'height': img.shape[0],
'max': str(immax),
'mean': str(img.mean()),
'min': str(immin),
'name': name,
'range': str(immax - immin),
'shape': str((img.shape[1], img.shape[2]))
})
return imgstats
def _get_final_image(self, config, trainable=None, arrays=None, frame=None):
if config['values'] == 'frames':
# print('===frames===')
final_image = frame
elif config['values'] == 'arrays':
# print('===arrays===')
final_image = np.concatenate([arr for arr, _ in arrays])
stat = self.stats(arrays)
write_pickle(
stat, '{}/{}'.format(self.PLUGIN_LOGDIR, SECTION_INFO_FILENAME))
elif config['values'] == 'trainable_variables':
# print('===trainable===')
final_image = np.concatenate([arr for arr, _ in trainable])
stat = self.stats(trainable)
write_pickle(
stat, '{}/{}'.format(self.PLUGIN_LOGDIR, SECTION_INFO_FILENAME))
if len(final_image.shape) == 2: # Map grayscale images to 3D tensors.
final_image = np.expand_dims(final_image, -1)
return final_image
def _enough_time_has_passed(self, FPS):
'''For limiting how often frames are computed.'''
if FPS == 0:
return False
else:
earliest_time = self.last_update_time + (1.0 / FPS)
return time.time() >= earliest_time
def _update_frame(self, trainable, arrays, frame, config):
final_image = self._get_final_image(config, trainable, arrays, frame)
self._write_summary(final_image)
self.last_image_shape = final_image.shape
return final_image
def _update_recording(self, frame, config):
'''Adds a frame to the current video output.'''
# pylint: disable=redefined-variable-type
should_record = config['is_recording']
if should_record:
if not self.is_recording:
self.is_recording = True
print('Starting recording using %s',
self.video_writer.current_output().name())
self.video_writer.write_frame(frame)
elif self.is_recording:
self.is_recording = False
self.video_writer.finish()
print('Finished recording')
# TODO: blanket try and except for production? I don't someone's script to die
# after weeks of running because of a visualization.
def update(self, trainable=None, arrays=None, frame=None):
'''Creates a frame and writes it to disk.
Args:
trainable: a list of namedtuple (tensors, name).
arrays: a list of namedtuple (tensors, name).
frame: lalala
'''
new_config = self._get_config()
if True or self._enough_time_has_passed(self.previous_config['FPS']):
# self.visualizer.update(new_config)
self.last_update_time = time.time()
final_image = self._update_frame(
trainable, arrays, frame, new_config)
self._update_recording(final_image, new_config)
##############################################################################
# @staticmethod
# def gradient_helper(optimizer, loss, var_list=None):
# '''A helper to get the gradients out at each step.
# Args:
# optimizer: the optimizer op.
# loss: the op that computes your loss value.
# Returns: the gradient tensors and the train_step op.
# '''
# if var_list is None:
# var_list = tf.trainable_variables()
# grads_and_vars = optimizer.compute_gradients(loss, var_list=var_list)
# grads = [pair[0] for pair in grads_and_vars]
# return grads, optimizer.apply_gradients(grads_and_vars)
# implements pytorch backward later
class BeholderHook():
pass
# """SessionRunHook implementation that runs Beholder every step.
# Convenient when using tf.train.MonitoredSession:
# ```python
# beholder_hook = BeholderHook(LOG_DIRECTORY)
# with MonitoredSession(..., hooks=[beholder_hook]) as sess:
# sess.run(train_op)
# ```
# """
# def __init__(self, logdir):
# """Creates new Hook instance
# Args:
# logdir: Directory where Beholder should write data.
# """
# self._logdir = logdir
# self.beholder = None
# def begin(self):
# self.beholder = Beholder(self._logdir)
# def after_run(self, run_context, unused_run_values):
# self.beholder.update(run_context.session)
|
lanpa/tensorboardX
|
tensorboardX/beholder/beholder.py
|
Python
|
mit
| 8,355
|
#Author Emily Keiser
def addition(x,y):
return int(x)+int(y)
def subtraction (x,y) :
return int(x) -int(y)
def multiplication (x,y) :
return int(x) *int(y)
def module (x,y) :
return int(x) %int(y)
a=raw_input("Enter variable a: ")
b=raw_input("Enter variable b: ")
print addition (a,b)
print subtraction (a,b)
print multiplication (a,b)
print module (a,b)
#addition
#c=int(a)+int(b)
#Output?
#print(c)
#subtraction
#d=int(a)-int(b)
#print(d)
#multiplication
#e=int(a)*int(b)
#print(e)
#module
#f=36%5
#print(f)
|
davidvillaciscalderon/PythonLab
|
Session 3/basic_operations_with_function.py
|
Python
|
mit
| 537
|
# pylint: disable=api-one-deprecated
from datetime import datetime
from dateutil.relativedelta import relativedelta
from odoo import api, fields, models, tools
_INTERVALS = {
"hours": lambda interval: relativedelta(hours=interval),
"days": lambda interval: relativedelta(days=interval),
"weeks": lambda interval: relativedelta(days=7 * interval),
"months": lambda interval: relativedelta(months=interval),
"now": lambda interval: relativedelta(hours=0),
}
class EventMailScheduler(models.Model):
_inherit = "event.mail"
interval_type = fields.Selection(
selection_add=[
("transferring_started", "Transferring started"),
("transferring_finished", "Transferring finished"),
]
)
@api.depends(
"event_id.state",
"event_id.date_begin",
"interval_type",
"interval_unit",
"interval_nbr",
)
def _compute_scheduled_date(self):
for rself in self:
if rself.interval_type not in [
"transferring_started",
"transferring_finished",
]:
return super(EventMailScheduler, rself)._compute_scheduled_date()
if rself.event_id.state not in ["confirm", "done"]:
rself.scheduled_date = False
else:
date, sign = rself.event_id.create_date, 1
rself.scheduled_date = datetime.strptime(
date, tools.DEFAULT_SERVER_DATETIME_FORMAT
) + _INTERVALS[rself.interval_unit](sign * rself.interval_nbr)
def execute(self, registration=None):
for rself in self:
if rself.interval_type not in [
"transferring_started",
"transferring_finished",
]:
return super(EventMailScheduler, rself).execute()
if registration:
rself.write(
{
"mail_registration_ids": [
(0, 0, {"registration_id": registration.id})
]
}
)
# execute scheduler on registrations
rself.mail_registration_ids.filtered(
lambda reg: reg.scheduled_date
and reg.scheduled_date
<= datetime.strftime(
fields.datetime.now(), tools.DEFAULT_SERVER_DATETIME_FORMAT
)
).execute()
return True
class EventMailRegistration(models.Model):
_inherit = "event.mail.registration"
@api.one
@api.depends(
"registration_id", "scheduler_id.interval_unit", "scheduler_id.interval_type"
)
def _compute_scheduled_date(self):
# keep for-block event though it's api.one now (it was api.multi but it didn't work -- scheduled_date was empty)
# When base module "event" will be updated we simply change api.one to api.multi without changing method body
for rself in self:
if rself.scheduler_id.interval_type not in [
"transferring_started",
"transferring_finished",
]:
return super(EventMailRegistration, rself)._compute_scheduled_date()
if rself.registration_id:
# date_open is not corresponded to its meaining,
# but keep because it's copy-pasted code
date_open_datetime = fields.datetime.now()
rself.scheduled_date = date_open_datetime + _INTERVALS[
rself.scheduler_id.interval_unit
](rself.scheduler_id.interval_nbr)
|
it-projects-llc/website-addons
|
portal_event_tickets/models/event_mail.py
|
Python
|
mit
| 3,646
|
# Jacqueline Kory Westlund
# May 2016
#
# The MIT License (MIT)
#
# Copyright (c) 2016 Personal Robots Group
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys # For getting generic exception info
import datetime # For getting time deltas for timeouts
import time # For sleep
import json # For packing ros message properties
import random # For picking robot responses and shuffling answer options
import logging # Log messages
import Queue # for queuing messages for the main game loop
from SS_Errors import NoStoryFound # Custom exception when no stories found
from ss_script_parser import ss_script_parser # Parses scripts
from ss_personalization_manager import ss_personalization_manager
from ss_ros import ss_ros # Our ROS connection
class ss_script_handler():
""" Social stories script handler parses and deals with script lines. Uses
the script parser to get the next line in a script. We keep loading script
lines and parsing script lines separate on the offchance that we might want
to replace how scripts are stored and accessed (e.g., in a database versus
in text files).
"""
# Constants for script playback:
# Time to pause after showing answer feedback and playing robot
# feedback speech before moving on to the next question.
ANSWER_FEEDBACK_PAUSE_TIME = 2
# Time to wait for robot to finish speaking or acting before
# moving on to the next script line (in seconds).
WAIT_TIME = 30
def __init__(self, ros_node, session, participant, script_path,
story_script_path, session_script_path, database, queue,
percent_correct_to_level):
""" Save references to ROS connection and logger, get scripts and
set up to read script lines
"""
# Set up logger.
self._logger = logging.getLogger(__name__)
self._logger.info("Setting up script handler...")
# Save reference to our ros node so we can publish messages.
self._ros_node = ros_node
# Save script paths so we can load scripts later.
self._script_path = script_path
if (story_script_path is None):
self._story_script_path = ""
else:
self._story_script_path = story_script_path
if (session_script_path is None):
self._session_script_path = ""
else:
self._session_script_path = session_script_path
# We get a reference to the main game node's queue so we can
# give it messages.
self._game_node_queue = queue
# Set up personalization manager so we can get personalized
# stories for this participant.
self._personalization_man = ss_personalization_manager(session,
participant, database, percent_correct_to_level)
# Set up script parser.
self._script_parser = ss_script_parser()
# These are other script parsers we may use later.
self._story_parser = None
self._repeat_parser = None
# If we have a repeating script, we will need to save its filename so
# we can re-load it when we repeat it.
self._repeating_script_name = ""
# Get session script from script parser and give to the script
# parser. Story scripts we will get later from the
# personalization manager.
try:
self._script_parser.load_script(self._script_path
+ self._session_script_path
+ self._script_parser.get_session_script(session))
except IOError:
self._logger.exception("Script parser could not open session "
+ "script!")
# Pass exception up so whoever wanted a script handler knows
# they didn't get a script.
raise
# Initialize flags and counters:
# Set up counter for how many stories have been told this session.
self._stories_told = 0
# When we start, we are not currently telling a story or
# repeating a script, or at the end of the game.
self._doing_story = False
self._repeating = False
self._end_game = False
# When we start, we are not asking a question, and so there is no
# current question type or number.
self._current_question_type = ""
self._current_question_num = 0
# For counting repetitions of a repeating script.
self._repetitions = 0
# The script will tell us the max number of repetitions.
self._max_repetitions = 1
# The script will tell us the max number of stories.
self._max_stories = 1
# The maximum number of incorrect user responses before the
# game moves on (can also be set in the script).
self._max_incorrect_responses = 2
# Set the maximum game time, in minutes. This can also be set
# in the game script.
self._max_game_time = datetime.timedelta(minutes=10)
# Sometimes we may need to know what the last user response we
# waited for was, and how long we waited.
self._last_response_to_get = None
self._last_response_timeout = None
# Save start time so we can check whether we've run out of time.
self._start_time = datetime.datetime.now()
# Initialize total time paused.
self._total_time_paused = datetime.timedelta(seconds=0)
# Initialize pause start time in case someone calls the resume
# game timer function before the pause game function.
self._pause_start_time = None
def iterate_once(self):
""" Play the next commands from the script """
try:
# We check whether we've reached the game time limit when
# we load new stories or when we are about to start a
# repeating script over again.
# Get next line from story script.
if self._doing_story and self._story_parser is not None:
self._logger.debug("Getting next line from story script.")
line = self._story_parser.next_line()
# If not in a story, get next line from repeating script.
elif self._repeating and self._repeat_parser is not None:
self._logger.debug("Getting next line from repeating script.")
line = self._repeat_parser.next_line()
# If not repeating, get next line from main session script.
else:
self._logger.debug("Getting next line from main session script.")
line = self._script_parser.next_line()
# We didn't read a line!
# If we get a stop iteration exception, we're at the end of the
# file and will stop iterating over lines.
except StopIteration as e:
# If we were doing a story, now we're done, go back to
# the previous script.
if self._doing_story:
self._logger.info("Finished story " + str(self._stories_told + 1)
+ " of " + str(self._max_stories) + "!")
self._doing_story = False
self._stories_told += 1
# If we were repeating a script, increment counter.
elif self._repeating:
self._repetitions += 1
self._logger.info("Finished repetition " + str(self._repetitions)
+ " of " + str(self._max_repetitions) + "!")
# If we've done enough repetitions, or if we've run out
# of game time, go back to the main session script (set
# the repeating flag to false).
if (self._repetitions >= self._max_repetitions) \
or self._end_game \
or ((datetime.datetime.now() - self._start_time) \
- self._total_time_paused >= self._max_game_time):
self._logger.info("Done repeating!")
self._repeating = False
# Otherwise, we need to repeat again. Reload the repeating
# script.
else:
# Create a script parser for the filename provided,
# assume it is in the session_scripts directory.
self._repeat_parser = ss_script_parser()
try:
self._repeat_parser.load_script(self._script_path
+ self._session_script_path
+ self._repeating_script_name)
except IOError:
self._logger.exception("Script parser could not open "
+ "session script to repeat! Skipping REPEAT line.")
sself._repeating = False
return
# Otherwise we're at the end of the main script.
else:
self._logger.info("No more script lines to get!")
# Pass on the stop iteration exception, with additional
# information about the player's performance during the
# game, formatted as a json object.
emotion, tom, order = self._personalization_man. \
get_performance_this_session()
performance = {}
if emotion is not None:
performance["child-emotion-question-accuracy"] = \
emotion
if tom is not None:
performance["child-tom-question-accuracy"] = \
tom
if order is not None:
performance["child-order-question-accuracy"] = \
order
e.performance = json.dumps(performance)
raise
except ValueError:
# We may get this exception if we try to get the next line
# but the script file is closed. If that happens, something
# probably went wrong with ending playback of a story script
# or a repeating script. End repeating and end the current
# story so we go back to the main session script.
if self._doing_story:
self._doing_story = False
if self._repeating:
self._repeating = False
# Oh no got some unexpected error! Raise it again so we can
# figure out what happened and deal with it during debugging.
except Exception as e:
self._logger.exception("Unexpected exception! Error: %s", e)
raise
# We got a line: parse it!
else:
# Make sure we got a line before we try parsing it. We
# might not get a line if the file has closed or if
# next_line has some other problem.
if not line:
self._logger.warning("[iterate_once] Tried to get next line, "
+ "but got None!")
return
# Got a line - print for debugging.
self._logger.debug("LINE: " + repr(line))
# Parse line!
# Split on tabs.
elements = line.rstrip().split('\t')
self._logger.debug("... " + str(len(elements)) + " elements: \n... "
+ str(elements))
if len(elements) < 1:
self._logger.info("Line had no elements! Going to next line...")
return
# Do different stuff depending on what the first element is.
#########################################################
# Some STORY lines have only one part to the command.
elif len(elements) == 1:
# For STORY lines, play back the next story for this
# participant.
if "STORY" in elements[0]:
self._logger.debug("STORY")
# If line indicates we need to start a story, do so.
self._doing_story = True
# Create a script parser for the filename provided,
# assuming it is in the story scripts directory.
self._story_parser = ss_script_parser()
try:
self._story_parser.load_script(self._script_path
+ self._story_script_path
+ self._personalization_man.get_next_story_script())
except IOError:
self._logger.exception("Script parser could not open "
+ "story script! Skipping STORY line.")
self._doing_story = False
except AttributeError:
self._logger.exception("Script parser could not open "
+ "story script because no script was loaded! "
+ "Skipping STORY line.")
self._doing_story = False
except NoStoryFound:
self._logger.exception("Script parser could not get \
the next story script because no script was \
found by the personalization manager! \
Skipping STORY line.")
self._doing_story = False
# Line has 2+ elements, so check the other commands.
#########################################################
# For STORY SETUP lines, pick the next story to play so
# we can load its graphics and play back the story.
elif "STORY" in elements[0] and "SETUP" in elements[1]:
self._logger.debug("STORY SETUP")
# Pick the next story to play.
self._personalization_man.pick_next_story()
#########################################################
# For ROBOT lines, send command to the robot.
elif "ROBOT" in elements[0]:
self._logger.debug("ROBOT")
# Play a randomly selected story intro from the list.
if "STORY_INTRO" in elements[1]:
self._ros_node.send_robot_command("DO",
response="ROBOT_NOT_SPEAKING",
timeout=datetime.timedelta(seconds=int(
self.WAIT_TIME)),
properties=self._story_intros[
random.randint(0,len(self._story_intros)-1)])
# Play a randomly selected story closing from the list.
elif "STORY_CLOSING" in elements[1]:
self._ros_node.send_robot_command("DO",
response="ROBOT_NOT_SPEAKING",
timeout=datetime.timedelta(seconds=int(
self.WAIT_TIME)),
properties=self._story_closings[
random.randint(0,len(self._story_closings)-1)])
# Send a command to the robot, with properties.
elif len(elements) > 2:
self._ros_node.send_robot_command(elements[1],
response="ROBOT_NOT_SPEAKING",
timeout=datetime.timedelta(seconds=int(
self.WAIT_TIME)),
properties=elements[2])
# Send a command to the robot, without properties.
else:
self._ros_node.send_robot_command(elements[1], "")
#########################################################
# For OPAL lines, send command to Opal game
elif "OPAL" in elements[0]:
self._logger.debug("OPAL")
if "LOAD_ALL" in elements[1] and len(elements) >= 3:
# Load all objects listed in file -- the file is
# assumed to have properties for one object on each
# line.
to_load = self._read_list_from_file(
self._script_path + self._session_script_path +
elements[2])
for obj in to_load:
self._ros_node.send_opal_command("LOAD_OBJECT", obj)
# Get the next story and load graphics into game.
elif "LOAD_STORY" in elements[1]:
self._load_next_story()
# Load answers for game.
elif "LOAD_ANSWERS" in elements[1] and len(elements) >= 3:
self._load_answers(elements[2])
# Send an opal command, with properties.
elif len(elements) > 2:
self._ros_node.send_opal_command(elements[1], elements[2])
# Send an opal command, without properties.
else:
self._ros_node.send_opal_command(elements[1])
#########################################################
# For PAUSE lines, sleep for the specified number of
# seconds before continuing script playback.
elif "PAUSE" in elements[0] and len(elements) >= 2:
self._logger.debug("PAUSE")
try:
time.sleep(int(elements[1]))
except ValueError:
self._logger.exception("Not pausing! PAUSE command was "
+ "given an invalid argument (should be an int)!")
#########################################################
# For ADD lines, get a list of robot commands that can be
# used in response to particular triggers from the specified
# file and save them for later use -- all ADD lines should
# have 3 elements.
elif "ADD" in elements[0] and len(elements) >= 3:
self._logger.debug("ADD")
# Read list of responses from the specified file into the
# appropriate variable.
try:
if "INCORRECT_RESPONSES" in elements[1]:
self._incorrect_responses = self._read_list_from_file(
self._script_path + self._session_script_path +
elements[2])
self._logger.debug("... Got "
+ str(len(self._incorrect_responses)))
if "CORRECT_RESPONSES" in elements[1]:
self._correct_responses = self._read_list_from_file(
self._script_path + self._session_script_path +
elements[2])
self._logger.debug("... Got "
+ str(len(self._correct_responses)))
elif "START_RESPONSES" in elements[1]:
self._start_responses = self._read_list_from_file(
self._script_path + self._session_script_path +
elements[2])
self._logger.debug("... Got "
+ str(len(self._start_responses)))
elif "NO_RESPONSES" in elements[1]:
self._no_responses = self._read_list_from_file(
self._script_path + self._session_script_path +
elements[2])
self._logger.debug("... Got "
+ str(len(self._no_responses)))
elif "ANSWER_FEEDBACK" in elements[1]:
self._answer_feedback = self._read_list_from_file(
self._script_path + self._session_script_path +
elements[2])
self._logger.debug("... Got "
+ str(len(self._answer_feedback)))
elif "STORY_INTROS" in elements[1]:
self._story_intros = self._read_list_from_file(
self._script_path + self._session_script_path +
elements[2])
self._logger.debug("... Got "
+ str(len(self._story_intros)))
elif "STORY_CLOSINGS" in elements[1]:
self._story_closings = self._read_list_from_file(
self._script_path + self._session_script_path +
elements[2])
self._logger.debug("... Got "
+ str(len(self._story_closings)))
elif "TIMEOUT_CLOSINGS" in elements[1]:
self._timeout_closings = self._read_list_from_file(
self._script_path + self._session_script_path +
elements[2])
self._logger.debug("Got "
+ str(len(self._timeout_closings)))
elif "MAX_STORIES_REACHED" in elements[1]:
self._max_stories_reached = self._read_list_from_file(
self._script_path + self._session_script_path +
elements[2])
self._logger.debug("... Got "
+ str(len(self._max_stories_reached)))
except IOError:
self._logger.exception("Failed to add responses!")
else:
self._logger.info("Added " + elements[1])
#########################################################
# For SET lines, set the specified constant.
elif "SET" in elements[0] and len(elements) >= 3:
self._logger.debug("SET")
if "MAX_INCORRECT_RESPONSES" in elements[1]:
self._max_incorrect_responses = int(elements[2])
self._logger.info("Set MAX_INCORRECT_RESPONSES to " +
elements[2])
elif "MAX_GAME_TIME" in elements[1]:
self._max_game_time = datetime.timedelta(minutes=
int(elements[2]))
self._logger.info("Set MAX_GAME_TIME to " + elements[2])
elif "MAX_STORIES" in elements[1]:
self._max_stories = int(elements[2])
self._logger.info("Set MAX_STORIES to " + elements[2])
#########################################################
# For WAIT lines, wait for the specified user response,
# or for a timeout.
elif "WAIT" in elements[0] and len(elements) >= 3:
self._logger.debug("WAIT")
self.wait_for_response(elements[1], int(elements[2]))
#########################################################
# For QUESTION lines, save the question type and question number
# for later use.
elif "QUESTION" in elements[0] and len(elements) >= 3:
self._current_question_type = elements[1]
self._current_question_num = int(elements[2])
self._logger.info("Current question: type " + elements[1]
+ ", num " + elements[2])
#########################################################
# For REPEAT lines, repeat lines in the specified script
# file the specified number of times.
elif "REPEAT" in elements[0] and len(elements) >= 3:
self._logger.debug("REPEAT")
self._repeating = True
self._repetitions = 0
# Create a script parser for the filename provided,
# assume it is in the session_scripts directory.
self._repeat_parser = ss_script_parser()
self._repeating_script_name = elements[2]
try:
self._repeat_parser.load_script(self._script_path
+ self._session_script_path
+ elements[2])
except IOError:
self._logger.exception("Script parser could not open "
+ "session script to repeat! Skipping REPEAT line.")
self._repeating = False
return
# Figure out how many times we should repeat the script.
if "MAX_STORIES" in elements[1]:
try:
self._max_repetitions = self._max_stories
except AttributeError:
self._logger.exception("Tried to set MAX_REPETITIONS to"
+ " MAX_STORIES, but MAX_STORIES has not been "
+ "set . Setting to 1 repetition instead.")
self._max_repetitions = 1
else:
self._max_repetitions = int(elements[1])
self._logger.debug("Going to repeat " + elements[2] + " " +
str(self._max_repetitions) + " time(s).")
def _read_list_from_file(self, filename):
""" Read a list of robot responses from a file, return a list
of the lines from the file
"""
# Open script for reading.
try:
fh = open(filename, "r")
return fh.readlines()
except IOError as e:
self._logger.exception("Cannot open file: " + filename)
# Pass exception up so anyone trying to add a response list
# from a script knows it didn't work.
raise
def wait_for_response(self, response_to_get, timeout):
""" Wait for a user response or wait until the specified time
has elapsed. If the response is incorrect, allow multiple
attempts up to the maximum number of incorrect responses.
"""
for i in range(0, self._max_incorrect_responses):
self._logger.info("Waiting for user response...")
# Save the response we were trying to get in case we need
# to try again.
self._last_response_to_get = response_to_get
self._last_response_timeout = timeout
# Wait for the specified type of response, or until the
# specified time has elapsed.
response, answer = self._ros_node.wait_for_response(response_to_get,
datetime.timedelta(seconds=int(timeout)))
# After waiting for a response, need to play back an
# appropriate robot response.
# If we didn't receive a response, then it was probably
# because we didn't send a valid response to wait for.
# This is different from a TIMEOUT since we didn't time
# out -- we just didn't get a response of any kind.
if not response:
self._logger.info("Done waiting -- did not get valid response!")
return False
# If we received no user response before timing out, send a
# TIMEOUT message and pause the game.
elif "TIMEOUT" in response:
# Announce we timed out.
self._ros_node.send_game_state("TIMEOUT")
# Pause game and wait to be told whether we should try
# waiting again for a response or whether we should
# skip it and move on. Queue up the pause command so the
# main game loop can take action.
self._game_node_queue.put("PAUSE")
# Announce the game is pausing.
self._ros_node.send_game_state("PAUSE")
# Indicate that we did not get a response.
# We don't break and let the user try again because the
# external game monitor deals with TIMEOUT events, and
# will tell us whether to try waiting again or to just
# skip waiting for this response.
return False
# If response was INCORRECT, randomly select a robot
# response to an incorrect user action.
elif "INCORRECT" in response:
# Record incorrect response in the db.
self._personalization_man.record_user_response(
self._current_question_num, self._current_question_type,
answer)
try:
self._ros_node.send_robot_command("DO",
response="ROBOT_NOT_SPEAKING",
timeout=datetime.timedelta(seconds=int(
self.WAIT_TIME)),
properties=self._incorrect_responses[random.randint(0,
len(self._incorrect_responses)-1)])
except AttributeError:
self._logger.exception("Could not play an incorrect "
+ "response. Maybe none were loaded?")
# Don't break so we allow the user a chance to respond
# again.
# If response was NO, randomly select a robot response to
# the user selecting no.
elif "NO" in response:
try:
self._ros_node.send_robot_command("DO",
response="ROBOT_NOT_SPEAKING",
timeout=datetime.timedelta(seconds=int(
self.WAIT_TIME)),
properties=self._no_responses[random.randint(0,
len(self._no_responses)-1)])
except AttributeError:
self._logger.exception("Could not play a response to "
+ "user's NO. Maybe none were loaded?")
# Don't break so we allow the user a chance to respond
# again.
# If response was CORRECT, randomly select a robot response
# to a correct user action, highlight the correct answer,
# and break out of response loop.
elif "CORRECT" in response:
# Record correct response in the db.
self._personalization_man.record_user_response(
self._current_question_num, self._current_question_type,
answer)
try:
self._ros_node.send_robot_command("DO",
response="ROBOT_NOT_SPEAKING",
timeout=datetime.timedelta(seconds=int(
self.WAIT_TIME)),
properties=self._correct_responses[random.randint(0,
len(self._correct_responses)-1)])
self._ros_node.send_opal_command("SHOW_CORRECT")
self._ros_node.send_robot_command("DO",
response="ROBOT_NOT_SPEAKING",
timeout=datetime.timedelta(seconds=int(
self.WAIT_TIME)),
properties=self._answer_feedback[random.randint(0,
len(self._answer_feedback)-1)])
# Pause after speaking before hiding correct again
time.sleep(self.ANSWER_FEEDBACK_PAUSE_TIME)
self._ros_node.send_opal_command("HIDE_CORRECT")
except AttributeError:
self._logger.exception("Could not play a correct "
+ "response or could not play robot's answer"
+ " feedback. Maybe none were loaded?")
# Break from the for loop so we don't give the user
# a chance to respond again.
break
# If response was START, randomly select a robot response to
# the user selecting START, and break out of response loop.
elif "START" in response:
try:
self._ros_node.send_robot_command("DO",
response="ROBOT_NOT_SPEAKING",
timeout=datetime.timedelta(seconds=int(
self.WAIT_TIME)),
properties=self._start_responses[random.randint(0,
len(self._start_responses)-1)])
except AttributeError:
self._logger.exception("Could not play response to"
+ "user's START. Maybe none were loaded?")
# Break from the for loop so we don't give the user
# a chance to respond again.
break
# We exhausted our allowed number of user responses, so have
# the robot do something instead of waiting more.
else:
# If user was never correct, play robot's correct answer
# feedback and show which answer was correct in the game.
if "CORRECT" in response_to_get:
try:
self._ros_node.send_opal_command("SHOW_CORRECT")
self._ros_node.send_robot_command("DO",
response="ROBOT_NOT_SPEAKING",
timeout=datetime.timedelta(seconds=int(
self.WAIT_TIME)),
properties=self._answer_feedback[random.randint(0,
len(self._answer_feedback)-1)])
# Pause after speaking before hiding correct again.
time.sleep(self.ANSWER_FEEDBACK_PAUSE_TIME)
self._ros_node.send_opal_command("HIDE_CORRECT")
except AttributeError:
self._logger.exception("Could not play robot's answer"
+ " feedback! Maybe none were loaded?")
# If user never selects START (which is used to ask the user
# if they are ready to play), stop all stories and repeating
# scripts, continue with main script so we go to the end.
elif "START" in response_to_get:
self._repeating = False
self.story = False
# We got a user response and responded to it!
return True
def skip_wait_for_response(self):
""" Skip waiting for a response; treat the skipped response as
a NO or INCORRECT response.
"""
# If the response to wait for was CORRECT or INCORRECT,
# randomly select a robot response to an incorrect user
# action.
if "CORRECT" in self._last_response_to_get:
try:
self._ros_node.send_robot_command("DO",
response="ROBOT_NOT_SPEAKING",
timeout=datetime.timedelta(seconds=int(self.WAIT_TIME)),
properties=self._incorrect_responses[random.randint(0, \
len(self._incorrect_responses)-1)])
except AttributeError:
self._logger.exception("Could not play an incorrect "
+ "response. Maybe none were loaded?")
# If response to wait for was YES or NO, randomly select a
# robot response for a NO user action.
elif "NO" in self._last_response_to_get:
try:
self._ros_node.send_robot_command("DO",
response="ROBOT_NOT_SPEAKING",
timeout=datetime.timedelta(seconds=int(self.WAIT_TIME)),
properties=self._no_responses[random.randint(0,
len(self._no_responses)-1)])
except AttributeError:
self._logger.exception("Could not play a response to "
+ "user's NO. Maybe none were loaded?")
def set_end_game(self):
""" End the game gracefully -- stop any stories or repeating
scripts, go back to main session script and finish.
"""
# For now, we just need to set a flag indicating we should end
# the game. When we check whether we should load another story
# or repeat a repeating script, this flag will be used to skip
# back to the main session script, to the end of the game.
self._end_game = True
def set_start_level(self, level):
""" When the game starts, a level to start at can be provided.
Pass this to the personalization manager to deal with, since it
deals with picking the levels of stories to play.
"""
self._personalization_man.set_start_level(level)
def pause_game_timer(self):
""" Track how much time we spend paused so when we check
whether we have reached the max game time, we don't include
time spent paused.
"""
self._pause_start_time = datetime.datetime.now()
def resume_game_timer(self):
""" Add how much time we spent paused to our total time spent
paused.
"""
# Since this function could theoretically be called before we
# get a call to pause_game_timer, we have to check that there
# is a pause start time, and then later, reset it so we can't
# add the same pause length multiple times to our total pause
# time.
if self._pause_start_time is not None:
self._total_time_paused += datetime.datetime.now() \
- self._pause_start_time
# Reset pause start time.
self._pause_start_time = None
def wait_for_last_response_again(self):
""" Wait for the same response that we just waited for again,
with the same parameters for the response and the timeout.
"""
return self.wait_for_response(self._last_response_to_get,
self._last_response_timeout)
def _load_answers(self, answer_list):
""" Load the answer graphics for this story """
# We are given a list of words that indicate what the answer
# options are. By convention, the first word is probably the
# correct answer; the others are incorrect answers. However,
# we won't set this now because this convention may not hold.
# We expect the SET_CORRECT OpalCommand to be used to set
# which answers are correct or incorrect.
# split the list of answers on commas.
answers = answer_list.strip().split(',')
# Shuffle answers to display them in a random order.
random.shuffle(answers)
# Load in the graphic for each answer.
for answer in answers:
toload = {}
# Remove whitespace from name before using it.
toload["name"] = answer.strip()
toload["tag"] = "PlayObject"
toload["slot"] = answers.index(answer) + 1
toload["draggable"] = False
toload["isAnswerSlot"] = True
self._ros_node.send_opal_command("LOAD_OBJECT", json.dumps(toload))
def _load_next_story(self):
""" Get the next story, set up the game scene with scene and
answer slots, and load scene graphics.
"""
# If we've told the max number of stories, or if we've reached
# the max game time, don't load another story even though we
# were told to load one -- instead, play error message from
# robot saying we have to be done now.
if self._stories_told >= self._max_stories \
or ((datetime.datetime.now() - self._start_time) \
- self._total_time_paused >= self._max_game_time) or self._end_game:
self._logger.info("We were told to load another story, but we've "
+ "already played the maximum number of stories or we ran"
" out of time! Skipping and ending now.")
self._doing_story = False
try:
self._ros_node.send_robot_command("DO",
response="ROBOT_NOT_SPEAKING",
timeout=datetime.timedelta(seconds=int(self.WAIT_TIME)),
properties=self._max_stories_reached
[random.randint(0, len(self._no_responses)-1)])
except AttributeError:
self._logger.exception("Could not play a max stories reached "
+ "response. Maybe none were loaded?")
# We were either told to play another story because a
# repeating script loads a story and the max number of
# repetitions is greater than the max number of stories,
# so more stories were requested than can be played, or
# because we ran out of time and were supposed to play more
# stories than we have time for. Either way, stop the
# repeating script if there is one.
self._repeating = False
return
# Get the details for the next story.
try:
scenes, in_order, num_answers = \
self._personalization_man.get_next_story_details()
except NoStoryFound:
# If no story was found, we can't load the story!
self._logger.exception("Cannot load story - no story to load was" +
" found!")
self._doing_story = False
return
# Set up the story scene in the game.
setup = {}
setup["numScenes"] = len(scenes)
setup["scenesInOrder"] = in_order
setup["numAnswers"] = num_answers
self._ros_node.send_opal_command("SETUP_STORY_SCENE", json.dumps(setup))
# Load the scene graphics.
for scene in scenes:
toload = {}
toload["name"] = "scenes/" + scene
toload["tag"] = "PlayObject"
toload["slot"] = scenes.index(scene) + 1
if not in_order:
toload["correctSlot"] = scenes.index(scene) + 1
toload["draggable"] = False if in_order else True
toload["isAnswerSlot"] = False
self._ros_node.send_opal_command("LOAD_OBJECT", json.dumps(toload))
# Tell the personalization manager that we loaded the story so
# it can keep track of which stories have been played.
self._personalization_man.record_story_loaded()
|
personal-robots/sar_social_stories
|
src/ss_script_handler.py
|
Python
|
mit
| 43,375
|
import tornado.web
import forms
import config
import io
class Index(tornado.web.RequestHandler):
"""
Returns the index page.
"""
def get(self):
form = forms.RecomputeForm()
recomputations_count = io.get_recomputations_count()
latest_recomputations = io.load_all_recomputations(config.latest_recomputations_count)
self.render("index.html", recompute_form=form, recomputations_count=recomputations_count,
latest_recomputations=latest_recomputations)
class Recomputations(tornado.web.RequestHandler):
"""
Returns the recomputations/search page.
"""
def initialize(self):
self.form = forms.FilterRecomputationsForm(self.request.arguments)
self.recomputations = io.load_all_recomputations()
def get(self):
self.render("recomputations.html", filter_recomputations_form=self.form, recomputations=self.recomputations)
def post(self):
if self.form.validate():
print "here"
name = self.form.name.data
if name != "":
print name
self.recomputations = [r for r in self.recomputations if r["name"] == name]
self.render("recomputations.html", filter_recomputations_form=self.form, recomputations=self.recomputations)
class Recomputation(tornado.web.RequestHandler):
"""
Returns the individual recomputation page.
"""
def get(self, name):
if name.isdigit():
recomputation = io.load_recomputation_by_id(int(name))
else:
recomputation = io.load_recomputation(name)
if recomputation is not None:
self.render("recomputation.html", recomputation=recomputation)
else:
self.render("recomputation404.html", name=name)
|
cjw-charleswu/Recompute
|
recompute/server/pageserver.py
|
Python
|
mit
| 1,804
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import pytest
from hyputils.memex.util import markdown
class TestRender(object):
def test_it_renders_markdown(self):
actual = markdown.render("_emphasis_ **bold**")
assert "<p><em>emphasis</em> <strong>bold</strong></p>\n" == actual
def test_it_ignores_math_block(self):
actual = markdown.render("$$1 + 1 = 2$$")
assert "<p>$$1 + 1 = 2$$</p>\n" == actual
def test_it_ignores_inline_match(self):
actual = markdown.render(r"Foobar \(1 + 1 = 2\)")
assert "<p>Foobar \\(1 + 1 = 2\\)</p>\n" == actual
def test_it_sanitizes_the_output(self, markdown_render, sanitize):
markdown.render("foobar")
sanitize.assert_called_once_with(markdown_render.return_value)
@pytest.fixture
def markdown_render(self, patch):
return patch("hyputils.memex.util.markdown.markdown")
@pytest.fixture
def sanitize(self, patch):
return patch("hyputils.memex.util.markdown.sanitize")
class TestSanitize(object):
@pytest.mark.parametrize(
"text,expected",
[
(
'<a href="https://example.org">example</a>',
'<a href="https://example.org" rel="nofollow noopener" target="_blank">example</a>',
),
# Don't add rel and target attrs to mailto: links
('<a href="mailto:foo@example.net">example</a>', None),
('<a title="foobar">example</a>', None),
(
'<a href="https://example.org" rel="nofollow noopener" target="_blank" title="foobar">example</a>',
None,
),
("<blockquote>Foobar</blockquote>", None),
("<code>foobar</code>", None),
("<em>foobar</em>", None),
("<hr>", None),
("<h1>foobar</h1>", None),
("<h2>foobar</h2>", None),
("<h3>foobar</h3>", None),
("<h4>foobar</h4>", None),
("<h5>foobar</h5>", None),
("<h6>foobar</h6>", None),
('<img src="http://example.com/img.jpg">', None),
('<img src="/img.jpg">', None),
('<img alt="foobar" src="/img.jpg">', None),
('<img src="/img.jpg" title="foobar">', None),
('<img alt="hello" src="/img.jpg" title="foobar">', None),
("<ol><li>foobar</li></ol>", None),
("<p>foobar</p>", None),
("<pre>foobar</pre>", None),
("<strong>foobar</strong>", None),
("<ul><li>foobar</li></ul>", None),
],
)
def test_it_allows_markdown_html(self, text, expected):
if expected is None:
expected = text
assert markdown.sanitize(text) == expected
@pytest.mark.parametrize(
"text,expected",
[
("<script>evil()</script>", "<script>evil()</script>"),
(
'<a href="#" onclick="evil()">foobar</a>',
'<a href="#" rel="nofollow noopener" target="_blank">foobar</a>',
),
(
'<a href="#" onclick=evil()>foobar</a>',
'<a href="#" rel="nofollow noopener" target="_blank">foobar</a>',
),
("<a href=\"javascript:alert('evil')\">foobar</a>", "<a>foobar</a>"),
('<img src="/evil.jpg" onclick="evil()">', '<img src="/evil.jpg">'),
("<img src=\"javascript:alert('evil')\">", "<img>"),
],
)
def test_it_escapes_evil_html(self, text, expected):
assert markdown.sanitize(text) == expected
def test_it_adds_target_blank_and_rel_nofollow_to_links(self):
actual = markdown.sanitize('<a href="https://example.org">Hello</a>')
expected = '<a href="https://example.org" rel="nofollow noopener" target="_blank">Hello</a>'
assert actual == expected
|
tgbugs/hypush
|
test/memex/util/markdown_test.py
|
Python
|
mit
| 3,893
|
import typing as t
import contextlib
import pytest
import diana
State = t.NewType("State", dict)
AsyncState = t.NewType("AsyncState", dict)
async def no_op():
pass
@pytest.fixture
def module():
class ContextModule(diana.Module):
def __init__(self):
self.state: State = {"count": 0}
@diana.contextprovider
@contextlib.contextmanager
def provide(self) -> State:
self.state["count"] += 1
yield self.state
self.state["count"] -= 1
@diana.contextprovider
@contextlib.asynccontextmanager
async def provide_async(self) -> AsyncState:
self.state["count"] += 1
await no_op()
yield self.state
self.state["count"] -= 1
return ContextModule()
@pytest.fixture
def injector(module):
injector = diana.Injector()
injector.load(module)
return injector
def test_within_context(injector):
@injector
def uses_state(*, state: State):
assert state["count"] != 0
uses_state()
@pytest.mark.asyncio
async def test_within_context_async(injector):
@injector
async def uses_state(*, state: AsyncState):
assert state["count"] != 0
await uses_state()
|
xlevus/python-diana
|
tests/test_context.py
|
Python
|
mit
| 1,257
|
from django.db import models
from django_hstore import hstore
# Create your models here.
class Place(models.Model):
osm_type = models.CharField(max_length=1)
osm_id = models.IntegerField(primary_key=True)
class_field = models.TextField(db_column='class') # Field renamed because it was a Python reserved word.
type = models.TextField()
name = hstore.DictionaryField(blank=True) # This field type is a guess.
admin_level = models.IntegerField(null=True, blank=True)
housenumber = models.TextField(blank=True)
street = models.TextField(blank=True)
isin = models.TextField(blank=True)
postcode = models.TextField(blank=True)
country_code = models.CharField(max_length=2, blank=True)
extratags = models.TextField(blank=True) # This field type is a guess.
geometry = models.TextField() # This field type is a guess.
objects = hstore.HStoreManager()
class Meta :
managed = False
db_table= 'place'
unique_together = ('osm_id', 'class_field')
class Phonetique(models.Model):
nom = models.TextField()
#osm_id = models.IntegerField()
osm = models.ForeignKey(Place)
poids = models.IntegerField()
ville = models.CharField(max_length=200)
semantic = models.CharField(max_length=25)
class Meta :
managed = False
db_table ='phonetique'
def __unicode__(self):
return '%d, %s' % (self.poids, self.nom)
|
lluc/django_jdf
|
jdf/models.py
|
Python
|
mit
| 1,451
|
from rest_framework.decorators import api_view
from django.shortcuts import get_object_or_404
from rest_framework.response import Response
from rest_framework import status
from .models import Person
from .serializers import PersonSerializer
@api_view(['GET', 'DELETE', 'PUT'])
def get_delete_update_person(request, fstname):
person = get_object_or_404(Person, firstname=fstname)
# get details of a single person
if request.method == 'GET':
serializer = PersonSerializer(person)
return Response(serializer.data)
# delete a single person
elif request.method == 'DELETE':
person.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
# update details of a single person
elif request.method == 'PUT':
serializer = PersonSerializer(person, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_204_NO_CONTENT)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view(['GET', 'POST'])
def get_post_people(request):
# get all people
if request.method == 'GET':
people = Person.objects.all()
serializer = PersonSerializer(people, many=True)
return Response(serializer.data)
# insert a new record for a person
elif request.method == 'POST':
data = {
'firstname': request.data.get('firstname'),
'lastname': request.data.get('lastname'),
'country': request.data.get('country'),
'email': request.data.get('email'),
'phone': request.data.get('phone'),
'occupation_field': request.data.get('occupation_field'),
'occupation': request.data.get('occupation'),
'birthdate': request.data.get('birthdate'),
'description': request.data.get('description')
}
serializer = PersonSerializer(data=data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
|
gilleshenrard/ikoab_elise
|
api/views.py
|
Python
|
mit
| 2,172
|
from django.conf import settings
""" Your GSE API key """
GOOGLE_SEARCH_API_KEY = getattr(settings, 'GOOGLE_SEARCH_API_KEY', None)
""" The ID of the Google Custom Search Engine """
GOOGLE_SEARCH_ENGINE_ID = getattr(settings, 'GOOGLE_SEARCH_ENGINE_ID', None)
""" The API version. Defaults to 'v1' """
GOOGLE_SEARCH_API_VERSION = getattr(
settings, 'GOOGLE_SEARCH_API_VERSION', 'v1')
""" The number of search results to show per page """
GOOGLE_SEARCH_RESULTS_PER_PAGE = getattr(
settings, 'GOOGLE_SEARCH_RESULTS_PER_PAGE', 10)
""" The maximum number of pages to display """
GOOGLE_SEARCH_MAX_PAGES = getattr(settings, 'GOOGLE_SEARCH_MAX_PAGES', 10)
|
hzdg/django-google-search
|
googlesearch/__init__.py
|
Python
|
mit
| 662
|
'''
Given two words (beginWord and endWord), and a dictionary's word list, find all shortest transformation sequence(s) from beginWord to endWord, such that:
Only one letter can be changed at a time
Each intermediate word must exist in the word list
For example,
Given:
beginWord = "hit"
endWord = "cog"
wordList = ["hot","dot","dog","lot","log"]
Return
[
["hit","hot","dot","dog","cog"],
["hit","hot","lot","log","cog"]
]
Note:
All words have the same length.
All words contain only lowercase alphabetic characters.
'''
class Solution(object):
def findLadders(self, beginWord, endWord, wordlist):
"""
:type beginWord: str
:type endWord: str
:type wordlist: Set[str]
:rtype: List[List[int]]
"""
def bfs(front_level, end_level, is_forward, word_set, path_dic):
if len(front_level) == 0:
return False
if len(front_level) > len(end_level):
return bfs(end_level, front_level, not is_forward, word_set, path_dic)
for word in (front_level | end_level):
word_set.discard(word)
next_level = set()
done = False
while front_level:
word = front_level.pop()
for c in 'abcdefghijklmnopqrstuvwxyz':
for i in range(len(word)):
new_word = word[:i] + c + word[i + 1:]
if new_word in end_level:
done = True
add_path(word, new_word, is_forward, path_dic)
else:
if new_word in word_set:
next_level.add(new_word)
add_path(word, new_word, is_forward, path_dic)
return done or bfs(next_level, end_level, is_forward, word_set, path_dic)
def add_path(word, new_word, is_forward, path_dic):
if is_forward:
path_dic[word] = path_dic.get(word, []) + [new_word]
else:
path_dic[new_word] = path_dic.get(new_word, []) + [word]
def construct_path(word, end_word, path_dic, path, paths):
if word == end_word:
paths.append(path)
return
if word in path_dic:
for item in path_dic[word]:
construct_path(item, end_word, path_dic, path + [item], paths)
front_level, end_level = {beginWord}, {endWord}
path_dic = {}
bfs(front_level, end_level, True, wordlist, path_dic)
path, paths = [beginWord], []
construct_path(beginWord, endWord, path_dic, path, paths)
return paths
if __name__ == "__main__":
assert Solution().findLadders("hit", "cog", {"hot", "dot", "dog", "lot", "log"}) == [
["hit", "hot", "dot", "dog", "cog"],
["hit", "hot", "lot", "log", "cog"]
]
|
gavinfish/leetcode-share
|
python/126 Word Ladder II.py
|
Python
|
mit
| 3,018
|
""" generator.py: Contains the Generator class. """
import random
import copy
import graphics
from helpers import *
# Just to check we have generated the correct number of polyominoes
# {order: number of omiones}
counts = {1: 1, 2: 1, 3: 2, 4: 7, 5: 18, 6: 60}
class Generator:
""" A class for generating polyominoes. Call the generate function with the
polyomino order wanted. Please Note: This class has not been tested for
orders greater than 6. """
def generate(self, order):
""" Return a list of all the one-sided polyominoes of the given order.
Objects in returned list are 2D square lists representing the shape of
the polyominoes by boolean values.
generate(int) -> list<list<list<bool>>>
"""
self._order = order
ominoes = []
if order == 1:
ominoes = [[[True]]]
return ominoes
# This is the 'growth method' algorithm for generating polyominoes.
# A order * order grid is made, then the bottom-left block filled.
# The squares adjacent to that block are numbered, and one of them
# is randomly picked. This continues till order blocks are filled.
# Check to see if generated polyomino is a repeat, and continue
# till we've generated enough.
while len(ominoes) < counts[order]:
free_squares = {}
pick = 0
max_number = 0
omino = rect_list(order, order, False)
if order > 4:
# A different starting point for orders > 4
# This is so crosses and similar shapes can be generated
row, col = order - 2, 0
else:
row, col = order - 1, 0
omino[row][col] = True
for s in xrange(order - 1):
free_squares, max_number = self._number_adjacent_squares(omino,
(row, col), free_squares, max_number)
possible = [n for n in free_squares.keys() if n > pick]
pick = random.choice(possible)
row, col = free_squares[pick]
free_squares.pop(pick)
omino[row][col] = True
omino = self._normalise(omino)
if not [n for n in ominoes if n == omino]:
ominoes.append(omino)
return ominoes
def generate_colours(self, n):
""" Generate n unique colours and return as a list of RGB triples.
Colours are as contrasted as possible.
generate_colours(int) -> list<(int, int, int)>
"""
# This divides the 360 degrees of hue in the HSV colour space by n,
# and so chooses n colours with equally spaced hues.
colours = []
degrees = 360 / n
for i in xrange(n):
hsv = (degrees * i, 1.0, 0.78)
rgb = graphics.hsv2rgb(hsv)
colours.append(rgb)
return colours
def _normalise(self, polyomino):
""" Return a copy of the given polyomino with its rotation and position
normalised. That is, in its left- and bottom-most position and rotation.
_normalise(list<list<bool>>) -> list<list<bool>>
"""
# Bottom- and left-most rotation and position is defined here as the
# position in which the most bottom row and left column squares are
# filled.
adjusted = copy.deepcopy(polyomino)
rowfractions = {} # Fraction of bottom row filled
colfractions = {} # Fraction of left column filled
for rotation in xrange(4):
adjusted = self._move(adjusted)
rowfilled = adjusted[self._order - 1].count(True)
rowfraction = float(rowfilled) / self._order
rowfractions.update({rotation: rowfraction})
colfilled = [adjusted[row][0] for row in xrange(self._order)].count(True)
colfraction = float(colfilled) / self._order
colfractions.update({rotation: colfraction})
adjusted = self._rotate(adjusted)
# Pick the rotation with the largest fractions
rowpick = max(rowfractions.values())
rowpicked_rotations = [k for k, v in rowfractions.iteritems() \
if v == rowpick]
if len(rowpicked_rotations) > 1:
colpick = max([v for k, v in colfractions.iteritems() \
if k in rowpicked_rotations])
colpicked_rotations = [k for k, v in colfractions.iteritems() \
if v == colpick and k in rowpicked_rotations]
if len(colpicked_rotations) == 0:
rotations = rowpicked_rotations[0]
else:
rotations = colpicked_rotations[0]
else:
rotations = rowpicked_rotations[0]
normalised = copy.deepcopy(polyomino)
for rotation in xrange(rotations):
normalised = self._rotate(normalised)
normalised = self._move(normalised)
return normalised
def _move(self, polyomino):
""" Return a copy of the given polyomino pushed into the bottom left
corner of its grid.
_move(list<list<bool>>) -> list<list<bool>>
"""
moved = copy.deepcopy(polyomino)
while moved[self._order - 1].count(True) == 0:
# While bottom row is empty, move down
for row in xrange(self._order - 1, 0, -1):
for col in xrange(self._order):
moved[row][col] = moved[row - 1][col]
moved[0] = [False] * self._order
while [moved[row][0] for row in xrange(self._order)].count(True) == 0:
# While left column is empty, move left
for row in xrange(self._order):
for col in xrange(self._order - 1):
moved[row][col] = moved[row][col + 1]
for row in xrange(self._order):
moved[row][self._order - 1] = False
return moved
def _rotate(self, polyomino):
""" Return a copy of the given polyomino rotated clockwise 90 degrees.
_rotate(list<list<bool>>) -> list<list<bool>>
"""
rotated = rect_list(self._order, self._order, False)
for row in xrange(self._order):
for col in xrange(self._order):
rotated[col][self._order - 1 - row] = polyomino[row][col]
return rotated
def _number_adjacent_squares(self, polyomino, coordinates, \
numbered_squares, max_number):
""" Return a pair with a dictionary of all the adjacent squares in the
given polyomino, keyed by their number, where they are numbered
clockwise from the top, and the highest numbered square. Numbering will
start from max_number and any previously numbered squares in
numbered_squares will be included.
_number_adjacent_squares(list<list<bool>>, (int,int),
dict<int:(int,int)>, int) ->
(dict<int:(int, int)>, int)
"""
row, col = coordinates
possible_squares = [(row - 1, col), (row, col + 1),
(row + 1, col), (row, col - 1)]
adjacents = copy.deepcopy(numbered_squares)
n = max_number
for row, col in possible_squares:
if row in range(self._order) and col in range(self._order) \
and not polyomino[row][col] \
and not (row, col) in numbered_squares.values():
# Number the square only if its in the grid, not already
# numbered and not already filled
n += 1
adjacents.update({n: (row, col)})
return adjacents, n
|
nickjhughes/polyominohs
|
generator.py
|
Python
|
mit
| 7,968
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import io
import unittest
from prov.model import *
from prov.dot import prov_to_dot
from prov.serializers import Registry
from prov.tests.examples import primer_example, primer_example_alternate
EX_NS = Namespace('ex', 'http://example.org/')
EX2_NS = Namespace('ex2', 'http://example2.org/')
EX_OTHER_NS = Namespace('other', 'http://exceptions.example.org/')
def add_label(record):
record.add_attributes(
[('prov:label', Literal("hello"))]
)
def add_labels(record):
record.add_attributes([
('prov:label', Literal("hello")),
('prov:label', Literal("bye", langtag="en")),
('prov:label', Literal("bonjour", langtag="fr"))
])
def add_types(record):
record.add_attributes([
('prov:type', 'a'),
('prov:type', 1),
('prov:type', 1.0),
('prov:type', True),
('prov:type', EX_NS['abc']),
('prov:type', datetime.datetime.now()),
('prov:type', Literal('http://boiled-egg.example.com', datatype=XSD_ANYURI)),
])
def add_locations(record):
record.add_attributes([
('prov:Location', "Southampton"),
('prov:Location', 1),
('prov:Location', 1.0),
('prov:Location', True),
('prov:Location', EX_NS['london']),
('prov:Location', datetime.datetime.now()),
('prov:Location', EX_NS.uri + "london"),
('prov:Location', Literal(2002, datatype=XSD['gYear'])),
])
def add_value(record):
record.add_attributes([
('prov:value', EX_NS['avalue'])
])
def add_further_attributes(record):
record.add_attributes([
(EX_NS['tag1'], "hello"),
(EX_NS['tag2'], "bye"),
(EX2_NS['tag3'], "hi"),
(EX_NS['tag1'], "hello\nover\nmore\nlines"),
])
def add_further_attributes0(record):
record.add_attributes([
(EX_NS['tag1'], "hello"),
(EX_NS['tag2'], "bye"),
(EX_NS['tag2'], Literal("hola", langtag="es")),
(EX2_NS['tag3'], "hi"),
(EX_NS['tag'], 1),
# long on python 2, int on python 3
(EX_NS['tag'], six.integer_types[-1](1)),
(EX_NS['tag'], Literal(1, datatype=XSD_SHORT)),
(EX_NS['tag'], Literal(1, datatype=XSD_DOUBLE)),
(EX_NS['tag'], 1.0),
(EX_NS['tag'], True),
(EX_NS['tag'], EX_NS.uri + "southampton"),
])
add_further_attributes_with_qnames(record)
def add_further_attributes_with_qnames(record):
record.add_attributes([
(EX_NS['tag'], EX2_NS['newyork']),
(EX_NS['tag'], EX_NS['london']),
])
class TestExtras(unittest.TestCase):
def test_dot(self):
# This is naive.. since we can't programatically check the output is correct
document = ProvDocument()
bundle1 = ProvBundle(identifier=EX_NS['bundle1'])
bundle1.usage(activity=EX_NS['a1'], entity=EX_NS['e1'], identifier=EX_NS['use1'])
bundle1.entity(identifier=EX_NS['e1'], other_attributes={PROV_ROLE: "sausage"})
bundle1.activity(identifier=EX_NS['a1'])
document.activity(EX_NS['a2'])
bundle2 = ProvBundle(identifier=EX_NS['bundle2'])
bundle2.usage(activity=EX_NS['aa1'], entity=EX_NS['ee1'], identifier=EX_NS['use2'])
bundle2.entity(identifier=EX_NS['ee1'])
bundle2.activity(identifier=EX_NS['aa1'])
document.add_bundle(bundle1)
document.add_bundle(bundle2)
prov_to_dot(document)
def test_extra_attributes(self):
document = ProvDocument()
inf = document.influence(EX_NS['a2'], EX_NS['a1'], identifier=EX_NS['inf7'])
add_labels(inf)
add_types(inf)
add_further_attributes(inf)
self.assertEqual(len(inf.attributes), len(list(inf.formal_attributes) + inf.extra_attributes))
def test_serialize_to_path(self):
document = ProvDocument()
document.serialize("output.json")
os.remove('output.json')
document.serialize("http://netloc/outputmyprov/submit.php")
def test_bundle_no_id(self):
document = ProvDocument()
def test():
bundle = ProvBundle()
document.add_bundle(bundle)
self.assertRaises(ProvException, test)
def test_use_set_time_helpers(self):
dt = datetime.datetime.now()
document1 = ProvDocument()
document1.activity(EX_NS['a8'], startTime=dt, endTime=dt)
document2 = ProvDocument()
a = document2.activity(EX_NS['a8'])
a.set_time(startTime=dt, endTime=dt)
self.assertEqual(document1, document2)
self.assertEqual(a.get_startTime(), dt)
self.assertEqual(a.get_endTime(), dt)
def test_bundle_add_garbage(self):
document = ProvDocument()
def test():
document.add_bundle(document.entity(EX_NS['entity_trying_to_be_a_bundle']))
self.assertRaises(ProvException, test)
def test():
bundle = ProvBundle()
document.add_bundle(bundle)
self.assertRaises(ProvException, test)
def test_bundle_equality_garbage(self):
document = ProvBundle()
self.assertNotEqual(document, 1)
def test_bundle_is_bundle(self):
document = ProvBundle()
self.assertTrue(document.is_bundle())
def test_bundle_in_document(self):
document = ProvDocument()
bundle = document.bundle('b')
self.assertTrue(bundle in bundle.document.bundles)
def test_bundle_get_record_by_id(self):
document = ProvDocument()
self.assertEqual(document.get_record(None), None)
# record = document.entity(identifier=EX_NS['e1'])
# self.assertEqual(document.get_record(EX_NS['e1']), record)
#
# bundle = document.bundle(EX_NS['b'])
# self.assertEqual(bundle.get_record(EX_NS['e1']), record)
def test_bundle_get_records(self):
document = ProvDocument()
document.entity(identifier=EX_NS['e1'])
document.agent(identifier=EX_NS['e1'])
self.assertEqual(len(list(document.get_records(ProvAgent))), 1)
self.assertEqual(len(document.get_records()), 2)
def test_bundle_name_clash(self):
document = ProvDocument()
def test():
document.bundle(EX_NS['indistinct'])
document.bundle(EX_NS['indistinct'])
self.assertRaises(ProvException, test)
document = ProvDocument()
def test():
document.bundle(EX_NS['indistinct'])
bundle = ProvBundle(identifier=EX_NS['indistinct'])
document.add_bundle(bundle)
self.assertRaises(ProvException, test)
def test_document_helper_methods(self):
document = ProvDocument()
self.assertFalse(document.is_bundle())
self.assertFalse(document.has_bundles())
document.bundle(EX_NS['b'])
self.assertTrue(document.has_bundles())
self.assertEqual(u'<ProvDocument>', str(document))
def test_reading_and_writing_to_file_like_objects(self):
"""
Tests reading and writing to and from file like objects.
"""
# Create some random document.
document = ProvDocument()
document.entity(EX2_NS["test"])
objects = [io.BytesIO, io.StringIO]
Registry.load_serializers()
formats = Registry.serializers.keys()
for obj in objects:
for format in formats:
try:
buf = obj()
document.serialize(destination=buf, format=format)
buf.seek(0, 0)
new_document = ProvDocument.deserialize(source=buf,
format=format)
self.assertEqual(document, new_document)
except NotImplementedError:
# Some serializers might not implement serialize or deserialize method
pass # and this is fine in the context of this test
finally:
buf.close()
# def test_document_unification(self):
# # TODO: Improve testing of this...
# document = ProvDocument()
# bundle = document.bundle(identifier=EX_NS['b'])
# e1 = bundle.entity(EX_NS['e'])
# e2 = bundle.entity(EX_NS['e'])
# unified = document.unified()
#
# self.assertEqual(len(unified._bundles[0]._records), 1)
def test_primer_alternate(self):
g1 = primer_example()
g2 = primer_example_alternate()
self.assertEqual(g1, g2)
if __name__ == '__main__':
unittest.main()
|
krischer/prov
|
prov/tests/test_extras.py
|
Python
|
mit
| 8,671
|
# -*- coding: utf-8 -*-
"""
sphinx.ext.napoleon.docstring
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Classes for docstring parsing and formatting.
:copyright: Copyright 2007-2017 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import collections
import inspect
import re
# from six import string_types, u
# from six.moves import range
from .iterators import modify_iter
import sys
def _prepare_docstring(s, ignore=1):
# type: (unicode, int) -> List[unicode]
"""Convert a docstring into lines of parseable reST. Remove common leading
indentation, where the indentation of a given number of lines (usually just
one) is ignored.
Return the docstring as a list of lines usable for inserting into a docutils
ViewList (used as argument of nested_parse().) An empty line is added to
act as a separator between this docstring and following content.
"""
lines = s.expandtabs().splitlines()
# Find minimum indentation of any non-blank lines after ignored lines.
margin = sys.maxsize
for line in lines[ignore:]:
content = len(line.lstrip())
if content:
indent = len(line) - content
margin = min(margin, indent)
# Remove indentation from ignored lines.
for i in range(ignore):
if i < len(lines):
lines[i] = lines[i].lstrip()
if margin < sys.maxsize:
for i in range(ignore, len(lines)):
lines[i] = lines[i][margin:]
# Remove any leading blank lines.
while lines and not lines[0]:
lines.pop(0)
# make sure there is an empty line at the end
if lines and lines[-1]:
lines.append('')
return lines
_directive_regex = re.compile(r'\.\. \S+::')
_google_section_regex = re.compile(r'^(\s|\w)+:\s*$')
_google_typed_arg_regex = re.compile(r'\s*(.+?)\s*\(\s*(.*[^\s]+)\s*\)')
_single_colon_regex = re.compile(r'(?<!:):(?!:)')
_xref_regex = re.compile(r'(:(?:[a-zA-Z0-9]+[\-_+:.])*[a-zA-Z0-9]+:`.+?`)')
_bullet_list_regex = re.compile(r'^(\*|\+|\-)(\s+\S|\s*$)')
_enumerated_list_regex = re.compile(
r'^(?P<paren>\()?'
r'(\d+|#|[ivxlcdm]+|[IVXLCDM]+|[a-zA-Z])'
r'(?(paren)\)|\.)(\s+\S|\s*$)')
class GoogleDocstring(object):
"""Convert Google style docstrings to reStructuredText.
Parameters
----------
docstring : :obj:`str` or :obj:`list` of :obj:`str`
The docstring to parse, given either as a string or split into
individual lines.
Other Parameters
----------------
what : :obj:`str`, optional
A string specifying the type of the object to which the docstring
belongs. Valid values: "module", "class", "exception", "function",
"method", "attribute".
name : :obj:`str`, optional
The fully qualified name of the object.
obj : module, class, exception, function, method, or attribute
The object to which the docstring belongs.
Example
-------
>>> from sphinx.ext.napoleon import Config
>>> config = Config(napoleon_use_param=True, napoleon_use_rtype=True)
>>> docstring = '''One line summary.
...
... Extended description.
...
... Args:
... arg1(int): Description of `arg1`
... arg2(str): Description of `arg2`
... Returns:
... str: Description of return value.
... '''
>>> print(GoogleDocstring(docstring, config))
One line summary.
<BLANKLINE>
Extended description.
<BLANKLINE>
:param arg1: Description of `arg1`
:type arg1: int
:param arg2: Description of `arg2`
:type arg2: str
<BLANKLINE>
:returns: Description of return value.
:rtype: str
<BLANKLINE>
"""
def __init__(self, docstring=None, what='', name='',
obj=None, options=None):
if not what:
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
elif isinstance(obj, collections.Callable): # type: ignore
what = 'function'
else:
what = 'object'
if docstring is None:
if obj is None:
raise "If docstring is None, obj may not be"
docstring = obj.__doc__
self._what = what
self._name = name
self._obj = obj
if isinstance(docstring, str):
docstring = _prepare_docstring(docstring)
self._lines = docstring
self._line_iter = modify_iter(docstring, modifier=lambda s: s.rstrip())
self._parsed_lines = [] # type: List[unicode]
self._is_in_section = False
self._section_indent = 0
self._directive_sections = [] # type: List[unicode]
self._entry_sections = {
'args': self._parse_fields_section,
'attributes': self._parse_fields_section,
'returns': self._parse_fields_section,
'yields': self._parse_fields_section,
'example args': self._parse_fields_section,
} # type: Dict[unicode, Callable]
self._freeform_sections = {
'example': self._parse_generic_section,
'examples': self._parse_generic_section,
'example returns': self._parse_generic_section,
'note': self._parse_generic_section,
'references': self._parse_generic_section,
'see also': self._parse_generic_section,
'todo': self._parse_generic_section,
} # type: Dict[unicode, Callable]
self._sections = {
name: value
for name, value in [*self._entry_sections.items(), *self._freeform_sections.items()]
}
self._parsed_dicts = {
name: []
for name in self._entry_sections.keys()
}
self._parse()
def lines(self):
# type: () -> List[unicode]
"""Return the parsed lines of the docstring in reStructuredText format.
Returns
-------
list(str)
The lines of the docstring in a list.
"""
return self._parsed_lines
def result(self):
# type: () -> List[unicode]
"""Return the parsed lines of the docstring in reStructuredText format.
Returns
-------
list(str)
The lines of the docstring in a list.
"""
return {'sections': self._parsed_lines, **self._parsed_dicts}
def _consume_indented_block(self, indent=1):
# type: (int) -> List[unicode]
lines = []
line = self._line_iter.peek()
while(not self._is_section_break() and
(not line or self._is_indented(line, indent))):
lines.append(next(self._line_iter)) # type: ignore
line = self._line_iter.peek()
return lines
def _consume_contiguous(self):
# type: () -> List[unicode]
lines = []
while (self._line_iter.has_next() and
self._line_iter.peek() and
not self._is_section_header()):
lines.append(next(self._line_iter)) # type: ignore
return lines
def _consume_empty(self):
# type: () -> List[unicode]
lines = []
line = self._line_iter.peek()
while self._line_iter.has_next() and not line:
lines.append(next(self._line_iter)) # type: ignore
line = self._line_iter.peek()
return lines
def _consume_field(self, parse_type=True, prefer_type=False):
# type: (bool, bool) -> Tuple[unicode, unicode, List[unicode]]
line = next(self._line_iter) # type: ignore
before, colon, after = self._partition_field_on_colon(line)
_name, _type, _desc = before, '', after # type: unicode, unicode, unicode
if parse_type:
match = _google_typed_arg_regex.match(before) # type: ignore
if match:
_name = match.group(1)
_type = match.group(2)
_name = self._escape_args_and_kwargs(_name)
if prefer_type and not _type:
_type, _name = _name, _type
indent = self._get_indent(line) + 1
_descs = [_desc] + self._dedent(self._consume_indented_block(indent))
return _name, _type, _descs
def _consume_fields(self, parse_type=True, prefer_type=False):
# type: (bool, bool) -> List[Tuple[unicode, unicode, List[unicode]]]
self._consume_empty()
fields = []
while not self._is_section_break():
_name, _type, _desc = self._consume_field(parse_type, prefer_type)
if _name or _type or _desc:
fields.append((_name, _type, _desc,))
return fields
def _consume_section_header(self):
# type: () -> unicode
section = next(self._line_iter) # type: ignore
stripped_section = section.strip(':')
if stripped_section.lower() in self._sections:
section = stripped_section
return section
def _consume_to_end(self):
# type: () -> List[unicode]
lines = []
while self._line_iter.has_next():
lines.append(next(self._line_iter)) # type: ignore
return lines
def _consume_to_next_section(self):
# type: () -> List[unicode]
self._consume_empty()
lines = []
while not self._is_section_break():
lines.append(next(self._line_iter)) # type: ignore
return lines + self._consume_empty()
def _dedent(self, lines, full=False):
# type: (List[unicode], bool) -> List[unicode]
if full:
return [line.lstrip() for line in lines]
else:
min_indent = self._get_min_indent(lines)
return [line[min_indent:] for line in lines]
def _escape_args_and_kwargs(self, name):
# type: (unicode) -> unicode
if name[:2] == '**':
return r'\*\*' + name[2:]
elif name[:1] == '*':
return r'\*' + name[1:]
else:
return name
def _fix_field_desc(self, desc):
# type: (List[unicode]) -> List[unicode]
if self._is_list(desc):
desc = [u''] + desc
elif desc[0].endswith('::'):
desc_block = desc[1:]
indent = self._get_indent(desc[0])
block_indent = self._get_initial_indent(desc_block)
if block_indent > indent:
desc = [u''] + desc
else:
desc = ['', desc[0]] + self._indent(desc_block, 4)
return desc
def _get_current_indent(self, peek_ahead=0):
# type: (int) -> int
line = self._line_iter.peek(peek_ahead + 1)[peek_ahead]
while line != self._line_iter.sentinel:
if line:
return self._get_indent(line)
peek_ahead += 1
line = self._line_iter.peek(peek_ahead + 1)[peek_ahead]
return 0
def _get_indent(self, line):
# type: (unicode) -> int
for i, s in enumerate(line):
if not s.isspace():
return i
return len(line)
def _get_initial_indent(self, lines):
# type: (List[unicode]) -> int
for line in lines:
if line:
return self._get_indent(line)
return 0
def _get_min_indent(self, lines):
# type: (List[unicode]) -> int
min_indent = None
for line in lines:
if line:
indent = self._get_indent(line)
if min_indent is None:
min_indent = indent
elif indent < min_indent:
min_indent = indent
return min_indent or 0
def _indent(self, lines, n=4):
# type: (List[unicode], int) -> List[unicode]
return [(' ' * n) + line for line in lines]
def _is_indented(self, line, indent=1):
# type: (unicode, int) -> bool
for i, s in enumerate(line):
if i >= indent:
return True
elif not s.isspace():
return False
return False
def _is_list(self, lines):
# type: (List[unicode]) -> bool
if not lines:
return False
if _bullet_list_regex.match(lines[0]): # type: ignore
return True
if _enumerated_list_regex.match(lines[0]): # type: ignore
return True
if len(lines) < 2 or lines[0].endswith('::'):
return False
indent = self._get_indent(lines[0])
next_indent = indent
for line in lines[1:]:
if line:
next_indent = self._get_indent(line)
break
return next_indent > indent
def _is_section_header(self):
# type: () -> bool
section = self._line_iter.peek().lower()
match = _google_section_regex.match(section)
if match and section.strip(':') in self._sections:
header_indent = self._get_indent(section)
section_indent = self._get_current_indent(peek_ahead=1)
return section_indent > header_indent
elif self._directive_sections:
if _directive_regex.match(section):
for directive_section in self._directive_sections:
if section.startswith(directive_section):
return True
return False
def _is_section_break(self):
# type: () -> bool
line = self._line_iter.peek()
return (not self._line_iter.has_next() or
self._is_section_header() or
(self._is_in_section and
line and
not self._is_indented(line, self._section_indent)))
def _parse(self):
# type: () -> None
self._parsed_lines = self._consume_empty()
while self._line_iter.has_next():
if self._is_section_header():
try:
section = self._consume_section_header()
self._is_in_section = True
self._section_indent = self._get_current_indent()
if _directive_regex.match(section): # type: ignore
lines = [section] + self._consume_to_next_section()
else:
section_key = section.lower()
parse_section = self._sections[section_key]
if section_key in self._parsed_dicts:
self._parsed_dicts[section_key].extend(
parse_section())
else:
self._parsed_lines.append(
(section_key, parse_section()))
finally:
self._is_in_section = False
self._section_indent = 0
else:
if not self._parsed_lines:
self._parsed_lines.append(('text', self._consume_contiguous() + self._consume_empty()))
else:
self._parsed_lines.append(('text', self._consume_to_next_section()))
# Multiline docstrings often begin right after the """ and then continue
# with appropriate indentation at the next line break. The above algorithm
# splits a single text section into two. Merge them here if that happens.
if len(self._parsed_lines) >= 2:
first = self._parsed_lines[0]
second = self._parsed_lines[1]
if first[0] == 'text' and second[0] == 'text':
self._parsed_lines = self._parsed_lines[1:]
self._parsed_lines[0] = ('text', first[1] + second[1])
def _parse_fields_section(self):
# type: (unicode) -> List[unicode]
fields = self._consume_fields()
# type: (List[Tuple[unicode, unicode, List[unicode]]], unicode, unicode) -> List[unicode] # NOQA
lines = []
for _name, _type, _desc in fields:
_desc = self._strip_empty(_desc)
if any(_desc):
_desc = self._fix_field_desc(_desc)
lines.append((_name, _type, _desc))
return lines
def _parse_generic_section(self):
# type: (unicode, bool) -> List[unicode]
lines = self._strip_empty(self._consume_to_next_section())
lines = self._dedent(lines)
if lines:
return lines
else:
return ['']
def _partition_field_on_colon(self, line):
# type: (unicode) -> Tuple[unicode, unicode, unicode]
before_colon = []
after_colon = []
colon = ''
found_colon = False
for i, source in enumerate(_xref_regex.split(line)): # type: ignore
if found_colon:
after_colon.append(source)
else:
m = _single_colon_regex.search(source)
if (i % 2) == 0 and m:
found_colon = True
colon = source[m.start(): m.end()]
before_colon.append(source[:m.start()])
after_colon.append(source[m.end():])
else:
before_colon.append(source)
return ("".join(before_colon).strip(),
colon,
"".join(after_colon).strip())
def _strip_empty(self, lines):
# type: (List[unicode]) -> List[unicode]
if lines:
start = -1
for i, line in enumerate(lines):
if line:
start = i
break
if start == -1:
lines = []
end = -1
for i in reversed(range(len(lines))):
line = lines[i]
if line:
end = i
break
if start > 0 or end + 1 < len(lines):
lines = lines[start:end + 1]
return lines
|
ajbouh/tfi
|
src/tfi/parse/docstring.py
|
Python
|
mit
| 17,890
|
from pygame.sprite import DirtySprite
from pygame import draw
class BaseWidget(DirtySprite):
"""clase base para todos los widgets"""
focusable = True
# si no es focusable, no se le llaman focusin y focusout
# (por ejemplo, un contenedor, una etiqueta de texto)
hasFocus = False
# indica si el widget está en foco o no.
enabled = True
# un widget con enabled==False no recibe ningun evento
nombre = ''
# identifica al widget en el renderer
hasMouseOver = False
# indica si el widget tuvo el mouse encima o no, por el onMouseOut
opciones = None
# las opciones con las que se inicializo
setFocus_onIn = False
# if True: Renderer.setFocus se dispara onMouseIn también.
KeyCombination = ''
layer = 0
rect = None
x, y = 0, 0
def __init__(self, parent=None, **opciones):
if parent is not None:
self.parent = parent
self.layer = self.parent.layer + 1
self.opciones = opciones
super().__init__()
def on_focus_in(self):
self.hasFocus = True
def on_focus_out(self):
self.hasFocus = False
def on_mouse_down(self, mousedata):
pass
def on_mouse_up(self, mousedata):
pass
def on_mouse_over(self):
pass
def on_mouse_in(self):
self.hasMouseOver = True
def on_mouse_out(self):
self.hasMouseOver = False
def on_key_down(self, keydata):
pass
def on_key_up(self, keydata):
pass
def on_destruction(self):
# esta funcion se llama cuando el widget es quitado del renderer.
pass
@staticmethod
def _biselar(imagen, color_luz, color_sombra):
w, h = imagen.get_size()
draw.line(imagen, color_sombra, (0, h - 2), (w - 1, h - 2), 2)
draw.line(imagen, color_sombra, (w - 2, h - 2), (w - 2, 0), 2)
draw.lines(imagen, color_luz, 0, [(w - 2, 0), (0, 0), (0, h - 4)], 2)
return imagen
def reubicar_en_ventana(self, dx=0, dy=0):
self.rect.move_ip(dx, dy)
self.x += dx
self.y += dy
self.dirty = 1
def __repr__(self):
return self.nombre
def is_visible(self):
return self._visible
|
zenieldanaku/DyDCreature_Editor
|
azoe/widgets/basewidget.py
|
Python
|
mit
| 2,314
|
# Copyright (C) 2014 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Utilities related to bgp data types and models.
"""
import logging
import socket
from ryu.lib.packet.bgp import (
BGPUpdate,
RF_IPv4_UC,
RF_IPv6_UC,
RF_IPv4_VPN,
RF_IPv6_VPN,
RF_L2_EVPN,
RF_RTC_UC,
RouteTargetMembershipNLRI,
BGP_ATTR_TYPE_MULTI_EXIT_DISC,
BGPPathAttributeMultiExitDisc,
BGPPathAttributeMpUnreachNLRI,
BGPPathAttributeAs4Path,
BGPPathAttributeAs4Aggregator,
BGPPathAttributeUnknown,
BGP_ATTR_FLAG_OPTIONAL,
BGP_ATTR_FLAG_TRANSITIVE,
)
from ryu.services.protocols.bgp.info_base.rtc import RtcPath
from ryu.services.protocols.bgp.info_base.ipv4 import Ipv4Path
from ryu.services.protocols.bgp.info_base.ipv6 import Ipv6Path
from ryu.services.protocols.bgp.info_base.vpnv4 import Vpnv4Path
from ryu.services.protocols.bgp.info_base.vpnv6 import Vpnv6Path
from ryu.services.protocols.bgp.info_base.evpn import EvpnPath
LOG = logging.getLogger('utils.bgp')
# RouteFmaily to path sub-class mapping.
_ROUTE_FAMILY_TO_PATH_MAP = {RF_IPv4_UC: Ipv4Path,
RF_IPv6_UC: Ipv6Path,
RF_IPv4_VPN: Vpnv4Path,
RF_IPv6_VPN: Vpnv6Path,
RF_L2_EVPN: EvpnPath,
RF_RTC_UC: RtcPath}
def create_path(src_peer, nlri, **kwargs):
route_family = nlri.ROUTE_FAMILY
assert route_family in _ROUTE_FAMILY_TO_PATH_MAP.keys()
path_cls = _ROUTE_FAMILY_TO_PATH_MAP.get(route_family)
return path_cls(src_peer, nlri, src_peer.version_num, **kwargs)
def clone_path_and_update_med_for_target_neighbor(path, med):
assert path and med
route_family = path.route_family
if route_family not in _ROUTE_FAMILY_TO_PATH_MAP.keys():
raise ValueError('Clone is not supported for address-family %s' %
route_family)
path_cls = _ROUTE_FAMILY_TO_PATH_MAP.get(route_family)
pattrs = path.pathattr_map
pattrs[BGP_ATTR_TYPE_MULTI_EXIT_DISC] = BGPPathAttributeMultiExitDisc(med)
return path_cls(
path.source, path.nlri, path.source_version_num,
pattrs=pattrs, nexthop=path.nexthop,
is_withdraw=path.is_withdraw,
med_set_by_target_neighbor=True
)
def clone_rtcpath_update_rt_as(path, new_rt_as):
"""Clones given RT NLRI `path`, and updates it with new RT_NLRI AS.
Parameters:
- `path`: (Path) RT_NLRI path
- `new_rt_as`: AS value of cloned paths' RT_NLRI
"""
assert path and new_rt_as
if not path or path.route_family != RF_RTC_UC:
raise ValueError('Expected RT_NLRI path')
old_nlri = path.nlri
new_rt_nlri = RouteTargetMembershipNLRI(new_rt_as, old_nlri.route_target)
return RtcPath(path.source, new_rt_nlri, path.source_version_num,
pattrs=path.pathattr_map, nexthop=path.nexthop,
is_withdraw=path.is_withdraw)
def from_inet_ptoi(bgp_id):
"""Convert an IPv4 address string format to a four byte long.
"""
four_byte_id = None
try:
packed_byte = socket.inet_pton(socket.AF_INET, bgp_id)
four_byte_id = int(packed_byte.encode('hex'), 16)
except ValueError:
LOG.debug('Invalid bgp id given for conversion to integer value %s',
bgp_id)
return four_byte_id
def get_unknown_opttrans_attr(path):
"""Utility method that gives a `dict` of unknown and unsupported optional
transitive path attributes of `path`.
Returns dict: <key> - attribute type code, <value> - unknown path-attr.
"""
path_attrs = path.pathattr_map
unknown_opt_tran_attrs = {}
for _, attr in path_attrs.items():
if (isinstance(attr, BGPPathAttributeUnknown) and
attr.flags & (BGP_ATTR_FLAG_OPTIONAL |
BGP_ATTR_FLAG_TRANSITIVE)) or \
isinstance(attr, BGPPathAttributeAs4Path) or \
isinstance(attr, BGPPathAttributeAs4Aggregator):
unknown_opt_tran_attrs[attr.type] = attr
return unknown_opt_tran_attrs
def create_end_of_rib_update():
"""Construct end-of-rib (EOR) Update instance."""
mpunreach_attr = BGPPathAttributeMpUnreachNLRI(RF_IPv4_VPN.afi,
RF_IPv4_VPN.safi,
[])
eor = BGPUpdate(path_attributes=[mpunreach_attr])
return eor
# Bgp update message instance that can used as End of RIB marker.
UPDATE_EOR = create_end_of_rib_update()
|
ool2016-seclab/quarantineSystem
|
ryu/services/protocols/bgp/utils/bgp.py
|
Python
|
mit
| 5,129
|
#!/usr/bin/env python
from mattermost_bot.bot import Bot, PluginsManager
from mattermost_bot.mattermost import MattermostClient
from mattermost_bot.dispatcher import MessageDispatcher
import bot_settings
class LocalBot(Bot):
def __init__(self):
self._client = MattermostClient(
bot_settings.BOT_URL, bot_settings.BOT_TEAM,
bot_settings.BOT_LOGIN, bot_settings.BOT_PASSWORD,
bot_settings.SSL_VERIFY
)
self._plugins = PluginsManager()
self._plugins.init_plugins()
self._dispatcher = MessageDispatcher(self._client, self._plugins)
def main():
bot = LocalBot()
bot.run()
if __name__ == '__main__':
main()
|
seLain/mattermost_bot
|
tests/behavior_tests/run_bot.py
|
Python
|
mit
| 701
|
import numpy as np
import cv2
import cv2.cv as cv
#im = cv2.imread('/Users/asafvaladarsky/Documents/img/Ad0010401.png')
im = cv2.imread('pic.png')
imgray = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)
invert = 255 - imgray
cv2.imwrite('/Users/asafvaladarsky/Documents/pic1.png', invert)
#ret,thresh = cv2.threshold(invert,0,0,0)
contours, hierarchy = cv2.findContours(invert,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
for i in range(0, len(contours)):
if (i % 2 == 0):
cnt = contours[i]
#mask = np.zeros(im2.shape,np.uint8)
#cv2.drawContours(mask,[cnt],0,255,-1)
x,y,w,h = cv2.boundingRect(cnt)
cv2.rectangle(invert,(x,y),(x+w,y+h),(0,255,0),1)
#cv2.drawContours(invert, contours, -1, (255,0,0), 1 )
cv2.imshow('image', invert)
0xFF & cv2.waitKey()
cv2.destroyAllWindows()
'''
gray = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray,(5,5),0)
thresh = cv2.adaptiveThreshold(blur,255,1,1,11,2)
################# Now finding Contours ###################
contours0, hierarchy = cv2.findContours( im, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contours = [cv2.approxPolyDP(cnt, 3, True) for cnt in contours0]
def update(levels):
vis = np.zeros((cvImg.height, cvImg.width, 3), np.uint8)
levels = levels - 3
cv2.drawContours( vis, contours, (-1, 3)[levels <= 0], (128,255,255),
3, cv2.CV_AA, hierarchy, abs(levels) )
cv2.imshow('contours', vis)
update(3)
cv2.createTrackbar( "levels+3", "contours", 3, 7, update )
cv2.imshow('image', img)
0xFF & cv2.waitKey()
cv2.destroyAllWindows()
'''
'''
contours,hierarchy = cv2.findContours(thresh,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
samples = np.empty((0,100))
responses = []
keys = [i for i in range(48,58)]
print len(contours)
for cnt in contours:
if cv2.contourArea(cnt)>50:
[x,y,w,h] = cv2.boundingRect(cnt)
if h>28:
cv2.rectangle(im,(x,y),(x+w,y+h),(0,0,255),2)
roi = thresh[y:y+h,x:x+w]
roismall = cv2.resize(roi,(10,10))
cv2.imshow('norm',im)
key = cv2.waitKey(0)
if key == 27:
sys.exit()
elif key in keys:
responses.append(int(chr(key)))
sample = roismall.reshape((1,100))
samples = np.append(samples,sample,0)
else:
print "boho"
responses = np.array(responses,np.float32)
responses = responses.reshape((responses.size,1))
print("training complete")
np.savetxt('generalsamples.data',samples)
np.savetxt('generalresponses.data',responses)
'''
|
hasadna/OpenPress
|
engine/ocr/Test1.py
|
Python
|
mit
| 2,585
|
#!/usr/bin/env python
"""
moveit_attached_object_demo.py - Version 0.1 2014-01-14
Attach an object to the end-effector and then move the arm to test collision avoidance.
Created for the Pi Robot Project: http://www.pirobot.org
Copyright (c) 2014 Patrick Goebel. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.5
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details at:
http://www.gnu.org/licenses/gpl.html
"""
import rospy, sys
import thread, copy
import moveit_commander
from moveit_commander import RobotCommander, MoveGroupCommander, PlanningSceneInterface
from geometry_msgs.msg import PoseStamped, Pose
from moveit_msgs.msg import CollisionObject, AttachedCollisionObject, PlanningScene
from math import radians
from copy import deepcopy
class MoveItDemo:
def __init__(self):
# Initialize the move_group API
moveit_commander.roscpp_initialize(sys.argv)
rospy.init_node('moveit_demo')
# Construct the initial scene object
scene = PlanningSceneInterface()
# Pause for the scene to get ready
rospy.sleep(1)
# Initialize the MoveIt! commander for the right arm
right_arm = MoveGroupCommander('right_arm')
# Initialize the MoveIt! commander for the gripper
right_gripper = MoveGroupCommander('right_gripper')
# Get the name of the end-effector link
end_effector_link = right_arm.get_end_effector_link()
# Allow some leeway in position (meters) and orientation (radians)
right_arm.set_goal_position_tolerance(0.01)
right_arm.set_goal_orientation_tolerance(0.05)
# Allow replanning to increase the odds of a solution
right_arm.allow_replanning(True)
# Allow 5 seconds per planning attempt
right_arm.set_planning_time(5)
# Remove leftover objects from a previous run
scene.remove_attached_object(end_effector_link, 'tool')
scene.remove_world_object('table')
scene.remove_world_object('box1')
scene.remove_world_object('box2')
scene.remove_world_object('target')
# Set the height of the table off the ground
table_ground = 0.75
# Set the length, width and height of the table
table_size = [0.2, 0.7, 0.01]
# Set the length, width and height of the object to attach
tool_size = [0.3, 0.02, 0.02]
# Create a pose for the tool relative to the end-effector
p = PoseStamped()
p.header.frame_id = end_effector_link
scene.attach_mesh
# Place the end of the object within the grasp of the gripper
p.pose.position.x = tool_size[0] / 2.0 - 0.025
p.pose.position.y = 0.0
p.pose.position.z = 0.0
# Align the object with the gripper (straight out)
p.pose.orientation.x = 0
p.pose.orientation.y = 0
p.pose.orientation.z = 0
p.pose.orientation.w = 1
# Attach the tool to the end-effector
scene.attach_box(end_effector_link, 'tool', p, tool_size)
# Add a floating table top
table_pose = PoseStamped()
table_pose.header.frame_id = 'base_footprint'
table_pose.pose.position.x = 0.35
table_pose.pose.position.y = 0.0
table_pose.pose.position.z = table_ground + table_size[2] / 2.0
table_pose.pose.orientation.w = 1.0
scene.add_box('table', table_pose, table_size)
# Update the current state
right_arm.set_start_state_to_current_state()
# Move the arm with the attached object to the 'straight_forward' position
right_arm.set_named_target('straight_forward')
right_arm.go()
rospy.sleep(2)
# Return the arm in the "resting" pose stored in the SRDF file
right_arm.set_named_target('resting')
right_arm.go()
rospy.sleep(2)
scene.remove_attached_object(end_effector_link, 'tool')
moveit_commander.roscpp_shutdown()
moveit_commander.os._exit(0)
if __name__ == "__main__":
MoveItDemo()
|
fujy/ROS-Project
|
src/rbx2/rbx2_arm_nav/scripts/moveit_attached_object_demo.py
|
Python
|
mit
| 4,719
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from os import listdir
files = listdir('./data/plots/')
colorarray = np.random.random_sample((10000, 3))
for f in files:
size = int(f.split('-')[0])
x, y, c = np.loadtxt('./data/plots/' + f, unpack=True)
fig = plt.figure(figsize=(5, 5))
ax = fig.add_subplot(111)
for px, py, col in zip(x, y, c):
rect = patches.Rectangle((px, py), 1, 1, color=colorarray[col])
ax.add_patch(rect)
plt.xlim([0, size])
plt.ylim([0, size])
plt.savefig('./plots/nice/' + f.replace('.txt', '.pdf'))
plt.clf()
|
kdungs/coursework-computational-physics
|
src/02/plot_forest.py
|
Python
|
mit
| 630
|
from pypermissions.permission import PermissionSet
def _prepare_runtime_permission(self, perm=None, runkw=None, args=None, kwargs=None):
"""This function parses the provided string arguments to decorators into the actual values for use when the
decorator is being evaluated. This allows for permissions to be created that rely on arguments that are provided to
the function.
:param perm: The permission string to parse
:param runkw: The run-time components to be inserted into the permission
:param args: The arguments provided to the decorated function
:param kwargs: The keyword arguments provided to the decorated function
:rtype: :py:class:`str`
"""
permission = perm
if not permission:
return False
for key, value in runkw.iteritems():
val_split = value.split('.')
for attr in val_split:
if attr == "self":
value = self
continue
elif attr in kwargs:
value = kwargs.get(attr)
continue
value = getattr(value, attr)
permission = permission.replace('{'+key+'}', value)
return permission
def set_has_permission(perm=None, perm_set=None, on_failure=None, perm_check=None, **runkw):
"""This decorator checks if the provided permission set has the permission specified. It allows for the permission
to rely on runtime information via runkw; which be used to modify perm based on arguments provided to the decorated
function. For many use cases, this can be extended by decorating it with a custom decorator that will capture the
current user making the function call, and providing their permissions as the perm_set. The function provided for
use when the check fails will be called with the decorated functions arguments.
:param perm: The permission to be checked. May contain {} tags to be replaced at run time.
:param perm_set: The permission set being checked for the permission.
:param on_failure: A function that gets called instead of the decorated function when perm_set does not have the
specified permission.
:param perm_check: The PermissionSet function to be used when evaluating for perm.
:param runkw: The mappings to be used to create the actual permission at run time.
"""
def decorator(function):
def check_permission(self, *args, **kwargs):
permission = _prepare_runtime_permission(self, perm, runkw, args, kwargs)
# No permission provided, so everyone has permission.
if not permission:
return function(self, *args, **kwargs)
if not perm_set:
return on_failure(self, *args, **kwargs)
if not perm_check(perm_set, permission):
return on_failure(self, *args, **kwargs)
return function(self, *args, **kwargs)
return check_permission
return decorator
def set_grants_permission(perm=None, perm_set=None, on_failure=None, **runkw):
"""This decorator checks if the provided permission set has the permission specified. It allows for the permission
to rely on runtime information via runkw; which be used to modify perm based on arguments provided to the decorated
function. For many use cases, this can be extended by decorating it with a custom decorator that will capture the
current user making the function call, and providing their permissions as the perm_set. The function provided for
use when the check fails will be called with the decorated functions arguments.
:param perm: The permission to be checked. May contain {} tags to be replaced at run time.
:param perm_set: The permission set being checked for the permission.
:param on_failure: A function that gets called instead of the decorated function when perm_set does not have the
specified permission.
:param runkw: The mappings to be used to create the actual permission at run time.
"""
return set_has_permission(perm, perm_set, on_failure, perm_check=PermissionSet.grants_permission, **runkw)
def set_has_any_permission(perm=None, perm_set=None, on_failure=None, **runkw):
"""This decorator checks if the provided permission set has a permission of the form specified. It allows for the
permission to rely on runtime information via runkw; which be used to modify perm based on arguments provided to the
decorated function. For many use cases, this can be extended by decorating it with a custom decorator that will
capture the current user making the function call, and providing their permissions as the perm_set. The function
provided for use when the check fails will be called with the decorated functions arguments.
:param perm: The permission to be checked. May contain {} tags to be replaced at run time.
:param perm_set: The permission set being checked for the permission.
:param on_failure: A function that gets called instead of the decorated function when perm_set does not have the
specified permission.
:param runkw: The mappings to be used to create the actual permission at run time.
"""
return set_has_permission(perm, perm_set, on_failure, perm_check=PermissionSet.has_any_permission, **runkw)
|
Acidity/PyPermissions
|
pypermissions/decorators.py
|
Python
|
mit
| 5,369
|
#!/usr/bin/env python3
import sys
import os
import subprocess
import time
filename = sys.argv[1]
print("extracting " + filename)
p = subprocess.Popen(["unzip", filename, "-dintro"], stdout=subprocess.PIPE)
p.communicate()
p = subprocess.Popen(["php","-f","uploadtodb.php","intro/courses.csv","courses"],stdout=subprocess.PIPE, universal_newlines=True)
out, err = p.communicate()
p = subprocess.Popen(["php","-f","uploadtodb.php","intro/users.csv","users"],stdout=subprocess.PIPE, universal_newlines=True)
out, err = p.communicate()
p = subprocess.Popen(["php","-f","uploadtodb.php","intro/course_formative_quiz_grades.csv","course_formative_quiz_grades"],stdout=subprocess.PIPE, universal_newlines=True)
out, err = p.communicate()
p = subprocess.Popen(["php","-f","uploadtodb.php","intro/course_item_passing_states.csv","course_item_passing_states"],stdout=subprocess.PIPE, universal_newlines=True)
out, err = p.communicate()
p = subprocess.Popen(["php","-f","uploadtodb.php","intro/course_passing_states.csv","course_passing_states"],stdout=subprocess.PIPE, universal_newlines=True)
out, err = p.communicate()
p = subprocess.Popen(["php","-f","uploadtodb.php","intro/course_grades.csv","course_grades"],stdout=subprocess.PIPE, universal_newlines=True)
out, err = p.communicate()
p = subprocess.Popen(["php","-f","uploadtodb.php","intro/course_modules.csv","course_modules"],stdout=subprocess.PIPE, universal_newlines=True)
out, err = p.communicate()
p = subprocess.Popen(["php","-f","uploadtodb.php","intro/course_lessons.csv","course_lessons"],stdout=subprocess.PIPE, universal_newlines=True)
out, err = p.communicate()
p = subprocess.Popen(["php","-f","uploadtodb.php","intro/course_items.csv","course_items"],stdout=subprocess.PIPE, universal_newlines=True)
out, err = p.communicate()
p = subprocess.Popen(["php","-f","uploadtodb.php","intro/course_item_grades.csv","course_item_grades"],stdout=subprocess.PIPE, universal_newlines=True)
out, err = p.communicate()
p = subprocess.Popen(["php","-f","uploadtodb.php","intro/course_item_types.csv","course_item_types"],stdout=subprocess.PIPE, universal_newlines=True)
out, err = p.communicate()
subprocess.call("rm intro/*", shell=True)
|
401LearningAnalytics/Project
|
server_side/upload.py
|
Python
|
mit
| 2,202
|
#! /usr/bin/env python3
# encoding: utf-8
'''
http://projecteuler.net/problem=1
If we list all the natural numbers below 10 that are multiples of 3 or 5, we get 3, 5, 6 and 9. The sum of these multiples is 23.
Find the sum of all the multiples of 3 or 5 below 1000.
'''
# Since May 22 2012
from projecteuler import calctime
n, a, b = 100000, 3, 5
# v1
def v1():
summ = 0
for i in range(n):
if i%a == 0 or i%b == 0:
summ += i
return summ
# v2
# Generator expression faster than for loop
def v2():
return sum( i for i in range(n) if i%a == 0 or i%b == 0 )
# v3
# Almost as fast as v2
def v3():
return sum( i for i in range(n) if not i%a or not i%b )
# v4
# Almost as fast as v2
def v4():
return sum( i for i in range(n) if not (i%a and i%b) )
# v5
# Time is O(1), the fastest
def v5():
n = 999
return sum((n//k*k+k)*(n//k)/2*v for k,v in {a:1, b:1, a*b:-1}.items())
if __name__ == '__main__':
for i in range(1, 6):
fname = 'v%d' % i
print(locals()[fname]())
calctime('%s()'% fname, 'from __main__ import %s' % fname, 50)
|
fossilet/project-euler
|
pe1.py
|
Python
|
mit
| 1,110
|
"""
file: cas_manager.py
authors: Christoffer Rosen <cbr4830@rit.edu>
date: Jan. 2014
description: This module contains the CAS_manager class, which is a thread that continously checks if there
is work that needs to be done. Also contains supporting classes of Worker and ThreadPool used by
the CAS_Manager.
"""
from analyzer.analyzer import *
from ingester.ingester import *
from orm.repository import *
import calendar # to convert datetime to unix time
from caslogging import logging
from queue import *
import threading
import time
from monthdelta import *
BACKGROUND_INTERVAL = 60 # wait 1 minutes
class CAS_Manager(threading.Thread):
"""
Thread that continiously checks if there is work to be done and adds it to
the thread pool work queue
"""
def __init__(self):
"""Constructor"""
threading.Thread.__init__(self)
numOfWorkers = int(config['system']['workers'])
self.workQueue = ThreadPool(numOfWorkers)
self.modelQueue = Queue()
def checkIngestion(self):
"""Check if any repo needs to be ingested"""
session = Session()
repo_update_freq = int(config['repoUpdates']['freqInDays'])
refresh_date = str(datetime.utcnow() - timedelta(days=repo_update_freq))
repos_to_get = (session.query(Repository)
.filter(
(Repository.status == "Waiting to be Ingested") |
(Repository.ingestion_date < refresh_date) &
(Repository.status != "Error") &
(Repository.status != "Analyzing"))
.all())
for repo in repos_to_get:
logging.info("Adding repo " + repo.id + " to work queue for ingesting")
repo.status = "In Queue to be Ingested"
session.commit() # update the status of repo
self.workQueue.add_task(ingest,repo.id)
session.close()
def checkAnalyzation(self):
"""Checks if any repo needs to be analyzed"""
session = Session()
repo_update_freq = int(config['repoUpdates']['freqInDays'])
refresh_date = str(datetime.utcnow() - timedelta(days=repo_update_freq))
repos_to_get = (session.query(Repository)
.filter( (Repository.status == "Waiting to be Analyzed") )
.all()
)
for repo in repos_to_get:
logging.info("Adding repo " + repo.id + " to work queue for analyzing.")
repo.status = "In Queue to be Analyzed"
session.commit() # update the status of repo
self.workQueue.add_task(analyze, repo.id)
session.close()
def checkModel(self):
"""Check if any repo needs metrics to be generated"""
session = Session()
repos_to_get = (session.query(Repository)
.filter(
(Repository.status == "In Queue to Build Model") )
.all())
for repo in repos_to_get:
logging.info("Adding repo " + repo.id + " to model queue to finish analyzing")
repo.status = "Building Model"
session.commit() # update status of repo
self.modelQueue.put(repo.id)
session.close()
def checkBuildModel(self):
""" Checks if any repo is awaiting to build model.
We are using a queue because we can't concurrently access R """
session = Session()
if self.modelQueue.empty() != True:
repo_id = self.modelQueue.get()
repo = (session.query(Repository).filter(Repository.id == repo_id).first())
# use data only up to X months prior we won't have sufficent data to build models
# as there may be bugs introduced in those months that haven't been fixed, skewing
# our model.
glm_model_time = int(config['glm_modeling']['months'])
data_months_datetime = datetime.utcnow() - monthdelta(glm_model_time)
data_months_unixtime = calendar.timegm(data_months_datetime.utctimetuple())
# all commits for repo prior to current time - glm model time
training_commits = (session.query(Commit)
.filter(
( Commit.repository_id == repo_id ) &
( Commit.author_date_unix_timestamp < str(data_months_unixtime))
)
.order_by( Commit.author_date_unix_timestamp.desc() )
.all())
# all commits for repo after or on current time - glm model time
testing_commits = (session.query(Commit)
.filter(
( Commit.repository_id == repo_id ) &
( Commit.author_date_unix_timestamp >= str(data_months_unixtime)))
.all())
try:
metrics_generator = MetricsGenerator(repo_id, training_commits, testing_commits)
metrics_generator.buildAllModels()
# montly data dump - or rather, every 30 days.
dump_refresh_date = str(datetime.utcnow() - timedelta(days=30))
if repo.last_data_dump == None or repo.last_data_dump < dump_refresh_date:
logging.info("Generating a monthly data dump for repository: " + repo_id)
# Get all commits for the repository
all_commits = (session.query(Commit)
.filter(
( Commit.repository_id == repo_id )
)
.order_by( Commit.author_date_unix_timestamp.desc() )
.all())
metrics_generator.dumpData(all_commits)
repo.last_data_dump = str(datetime.now().replace(microsecond=0))
# Notify user if repo has never been analyzed previously
if repo.analysis_date is None:
self.notify(repo)
logging.info("Repo " + repo_id + " finished analyzing.")
repo.analysis_date = str(datetime.now().replace(microsecond=0))
repo.status = "Analyzed"
session.commit() # update status of repo
session.close()
# uh-oh
except Exception as e:
logging.exception("Got an exception building model for repository " + repo_id)
repo.status = "Error"
session.commit() # update repo status
session.close()
def notify(self, repo):
""" Send e-mail notifications if applicable to a repo
used by checkBuildModel """
notify = False
notifier = None
logging.info("Notifying subscribed users for repository " + repo.id)
# Create the Notifier
gmail_user = config['gmail']['user']
gmail_pass = config['gmail']['pass']
notifier = Notifier(gmail_user, gmail_pass, repo.name)
# Add subscribers if applicable
if repo.email is not None:
notifier.addSubscribers([repo.email, gmail_user])
else:
notifier.addSubscribers([gmail_user])
notifier.notify()
def run(self):
while(True):
### --- Check repository table if there is any work to be done --- ###
self.checkIngestion()
self.checkAnalyzation()
self.checkModel()
self.checkBuildModel()
time.sleep(BACKGROUND_INTERVAL)
class Worker(threading.Thread):
"""Thread executing tasks from a given tasks queue"""
def __init__(self, tasks):
threading.Thread.__init__(self)
self.tasks = tasks
self.daemon = True
self.start()
def run(self):
while True:
func, args, kargs = self.tasks.get()
try:
func(*args, **kargs)
except Exception as e:
print(e)
self.tasks.task_done()
class ThreadPool:
"""Pool of threads consuming tasks from a queue"""
def __init__(self, num_threads):
self.tasks = Queue(num_threads)
for _ in range(num_threads): Worker(self.tasks)
def add_task(self, func, *args, **kargs):
"""Add a task to the queue"""
self.tasks.put((func, args, kargs))
def wait_completion(self):
"""Wait for completion of all the tasks in the queue"""
self.tasks.join()
|
bumper-app/bumper-bianca
|
bianca/cas_manager.py
|
Python
|
mit
| 7,075
|
import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config:
SECRET_KEY = os.environ.get('SECRET_KEY') or 'hard to guess string'
MAIL_SERVER = os.environ.get('MAIL_SERVER', 'smtp.googlemail.com')
MAIL_PORT = int(os.environ.get('MAIL_PORT', '587'))
MAIL_USE_TLS = os.environ.get('MAIL_USE_TLS', 'true').lower() in \
['true', 'on', '1']
MAIL_USERNAME = os.environ.get('MAIL_USERNAME')
MAIL_PASSWORD = os.environ.get('MAIL_PASSWORD')
FLASKY_MAIL_SUBJECT_PREFIX = '[Flasky]'
FLASKY_MAIL_SENDER = 'Flasky Admin <flasky@example.com>'
FLASKY_ADMIN = os.environ.get('FLASKY_ADMIN')
SSL_REDIRECT = False
SQLALCHEMY_TRACK_MODIFICATIONS = False
SQLALCHEMY_RECORD_QUERIES = True
FLASKY_POSTS_PER_PAGE = 20
FLASKY_FOLLOWERS_PER_PAGE = 50
FLASKY_COMMENTS_PER_PAGE = 30
FLASKY_SLOW_DB_QUERY_TIME = 0.5
@staticmethod
def init_app(app):
pass
class DevelopmentConfig(Config):
DEBUG = True
SQLALCHEMY_DATABASE_URI = os.environ.get('DEV_DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data-dev.sqlite')
class TestingConfig(Config):
TESTING = True
SQLALCHEMY_DATABASE_URI = os.environ.get('TEST_DATABASE_URL') or \
'sqlite://'
WTF_CSRF_ENABLED = False
class ProductionConfig(Config):
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data.sqlite')
@classmethod
def init_app(cls, app):
Config.init_app(app)
# email errors to the administrators
import logging
from logging.handlers import SMTPHandler
credentials = None
secure = None
if getattr(cls, 'MAIL_USERNAME', None) is not None:
credentials = (cls.MAIL_USERNAME, cls.MAIL_PASSWORD)
if getattr(cls, 'MAIL_USE_TLS', None):
secure = ()
mail_handler = SMTPHandler(
mailhost=(cls.MAIL_SERVER, cls.MAIL_PORT),
fromaddr=cls.FLASKY_MAIL_SENDER,
toaddrs=[cls.FLASKY_ADMIN],
subject=cls.FLASKY_MAIL_SUBJECT_PREFIX + ' Application Error',
credentials=credentials,
secure=secure)
mail_handler.setLevel(logging.ERROR)
app.logger.addHandler(mail_handler)
class HerokuConfig(ProductionConfig):
SSL_REDIRECT = True if os.environ.get('DYNO') else False
@classmethod
def init_app(cls, app):
ProductionConfig.init_app(app)
# handle reverse proxy server headers
try:
from werkzeug.middleware.proxy_fix import ProxyFix
except ImportError:
from werkzeug.contrib.fixers import ProxyFix
app.wsgi_app = ProxyFix(app.wsgi_app)
# log to stderr
import logging
from logging import StreamHandler
file_handler = StreamHandler()
file_handler.setLevel(logging.INFO)
app.logger.addHandler(file_handler)
class DockerConfig(ProductionConfig):
@classmethod
def init_app(cls, app):
ProductionConfig.init_app(app)
# log to stderr
import logging
from logging import StreamHandler
file_handler = StreamHandler()
file_handler.setLevel(logging.INFO)
app.logger.addHandler(file_handler)
class UnixConfig(ProductionConfig):
@classmethod
def init_app(cls, app):
ProductionConfig.init_app(app)
# log to syslog
import logging
from logging.handlers import SysLogHandler
syslog_handler = SysLogHandler()
syslog_handler.setLevel(logging.INFO)
app.logger.addHandler(syslog_handler)
config = {
'development': DevelopmentConfig,
'testing': TestingConfig,
'production': ProductionConfig,
'heroku': HerokuConfig,
'docker': DockerConfig,
'unix': UnixConfig,
'default': DevelopmentConfig
}
|
miguelgrinberg/flasky
|
config.py
|
Python
|
mit
| 3,883
|
import csv
from datetime import datetime
from matplotlib import pyplot as plt
# Get dates, high, and low temperatures from file.
filename = 'sitka_weather_2017.csv'
with open(filename) as f:
reader = csv.reader(f)
header_row = next(reader)
dates, highs, lows = [], [], []
for row in reader:
current_date = datetime.strptime(row[0], "%Y-%m-%d")
dates.append(current_date)
high = int(row[1])
highs.append(high)
low = int(row[3])
lows.append(low)
# Plot data.
fig = plt.figure(dpi=128, figsize=(10, 6))
plt.plot(dates, highs, c='red', alpha=0.5)
plt.plot(dates, lows, c='blue', alpha=0.5)
plt.fill_between(dates, highs, lows, facecolor='blue', alpha=0.1)
# Format plot.
plt.title("Daily high and low temperatures - 2017", fontsize=24)
plt.xlabel('', fontsize=16)
fig.autofmt_xdate()
plt.ylabel("Temperature (F)", fontsize=16)
plt.tick_params(axis='both', which='major', labelsize=16)
plt.show()
|
helanan/Panda_Prospecting
|
panda_prospecting/prospecting/insights/high_lows.py
|
Python
|
mit
| 966
|
# -*- coding: utf-8 -*-
"""
The MIT License
Copyright (c) 2010 Olle Johansson
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from plugins.Plugin import *
class CryptoPlugin(Plugin):
name = "CryptoPlugin"
author = "Olle Johansson"
description = "Adds some simple crypto commands."
commands = [
dict(
command = ['!reverse'],
handler = 'cmd_reverse',
onevents = ['Message'],
),
dict(
command = ['!rot13'],
handler = 'cmd_rot13',
onevents = ['Message'],
),
]
def cmd_reverse(self, shout, command, comobj):
msg = self.strip_command(shout.text, command)
msg = msg[::-1]
self.send_message(msg)
def cmd_rot13(self, shout, command, comobj):
msg = self.strip_command(shout.text, command)
msg = self.rot13(msg)
self.send_message(msg)
def rot13(self, s):
return ''.join( self.rot13_char(ch) for ch in s )
def rot13_char(self, ch):
if ch.lower() <= 'm':
dist = 13
else:
dist = -13
return chr(ord(ch) + dist)
|
ollej/shoutbridge
|
src/plugins/CryptoPlugin.py
|
Python
|
mit
| 2,129
|
#!/usr/bin/env python3
import sys
import click
from boadata import __version__
from boadata.cli import try_load, try_apply_sql, qt_app
@click.command()
@click.version_option(__version__)
@click.argument("uri")
@click.option("-s", "--sql", required=False, help="SQL to run on the object.")
@click.option("-t", "--type", default=None, help="What type is the object.")
@click.option("-p", "--parameter", help="Additional parameters for loader, specified as key=value", multiple=True)
def run_app(uri, type, parameter, **kwargs):
kwargs = {key: value for key, value in kwargs.items() if value is not None}
do = try_load(uri, type, parameters=parameter)
do = try_apply_sql(do, kwargs)
with qt_app():
from boadata.gui.qt import DataObjectWindow
window = DataObjectWindow(do)
window.show()
window.setWindowTitle(do.uri)
if __name__ == "__main__":
run_app()
|
janpipek/boadata
|
boadata/commands/boaview.py
|
Python
|
mit
| 911
|
r"""Functions for Higgs signal strengths."""
import flavio
from . import production
from . import decay
from . import width
prod_modes = {
'ggF': {
'desc': 'gluon fusion production',
'tex': 'gg',
'str': 'gg',
},
'hw': {
'desc': '$W$ boson associated production',
'tex': 'Wh',
'str': 'Wh',
},
'hz': {
'desc': '$Z$ boson associated production',
'tex': 'Zh',
'str': 'Zh',
},
'hv': {
'desc': '$Z$ or $W$ boson associated production',
'tex': 'Vh',
'str': 'Vh',
},
'tth': {
'desc': 'top pair associated production',
'tex': r't\bar t h',
'str': 'tth',
},
'vv_h': {
'desc': 'weak boson fusion',
'tex': r'\text{VBF}',
'str': 'VBF',
},
}
decay_modes = {
'h_bb': {
'tex': r'b\bar b',
'str': 'bb',
'tex_class': r'h\to ff',
},
'h_cc': {
'tex': r'c\bar c',
'str': 'cc',
'tex_class': r'h\to ff',
},
'h_tautau': {
'tex': r'\tau^+\tau^-',
'str': 'tautau',
'tex_class': r'h\to ff',
},
'h_mumu': {
'tex': r'\mu^+\mu^-',
'str': 'mumu',
'tex_class': r'h\to ff',
},
'h_ww': {
'tex': r'W^+W^-',
'str': 'WW',
'tex_class': r'h\to VV',
},
'h_zz': {
'tex': r'ZZ',
'str': 'ZZ',
'tex_class': r'h\to VV',
},
'h_vv': {
'tex': r'VV',
'str': 'VV',
'tex_class': r'h\to VV',
},
'h_zga': {
'tex': r'Z\gamma',
'str': 'Zgamma',
'tex_class': r'h\to VV',
},
'h_gaga': {
'tex': r'\gamma\gamma',
'str': 'gammagamma',
'tex_class': r'h\to VV',
},
}
def higgs_signalstrength(wc_obj, par, name_prod, name_dec):
scale = flavio.config['renormalization scale']['hdecays']
C = wc_obj.get_wcxf(sector='all', scale=scale, par=par,
eft='SMEFT', basis='Warsaw')
f_prod = getattr(production, name_prod)
f_dec = getattr(decay, name_dec)
return f_prod(C) * f_dec(C) / width.Gamma_h(par, C)
def make_obs_higgs(name_prod, name_dec):
d_dec = decay_modes[name_dec]
d_prod = prod_modes[name_prod]
process_tex = r"h \to {}".format(d_dec['tex'])
process_taxonomy = r'Process :: Higgs production and decay :: $' + d_dec['tex_class'] + r'$ :: $' + process_tex + r"$"
obs_name = "mu_{}(h->{})".format(d_prod['str'], d_dec['str'])
obs = flavio.classes.Observable(obs_name)
obs.set_description(r"Signal strength of $" + process_tex + r"$ from " + d_prod['desc'])
obs.tex = r"$\mu_{" + d_prod['tex'] + r"}(" + process_tex + r")$"
obs.add_taxonomy(process_taxonomy)
def obs_fct(wc_obj, par):
return higgs_signalstrength(wc_obj, par, name_prod, name_dec)
flavio.classes.Prediction(obs_name, obs_fct)
for prod in prod_modes:
for dec in decay_modes:
make_obs_higgs(prod, dec)
|
flav-io/flavio
|
flavio/physics/higgs/signalstrength.py
|
Python
|
mit
| 3,003
|
import numpy as np
import cv2
from math import pi
def points2mapa(landmarks,pos_rob,mapa,P, delete_countdown): #mapa = (x,y, Px,Py, updt)
new_points2add = []
landmarks = np.array(landmarks)
for i in range(0,landmarks.shape[0]):
x_mapa = pos_rob[0] + landmarks[i,1]*np.cos((pos_rob[2])*pi/180+landmarks[i,0])
y_mapa = pos_rob[1] + landmarks[i,1]*np.sin((pos_rob[2])*pi/180+landmarks[i,0])
new = 1
mapa_ar = np.array(mapa)
if delete_countdown ==5 and mapa_ar.size > 0:
mapa_ar[:,4] = np.zeros([mapa_ar.shape[0]])
mapa = list(mapa_ar)
delete_countdown = 0
else:
delete_countdown = 0
sh_dist = 10000;
p_already = 0
for j in range(0, mapa_ar.shape[0]):
distance = np.sqrt(np.power((x_mapa-mapa_ar[j,0]),2) + np.power((y_mapa - mapa_ar[j,1]),2))
if sh_dist > distance:
sh_dist = distance
p_already = j
print("shortest distance:", sh_dist)
if sh_dist < 10:
mapa = np.array(mapa)
mapa[p_already,4] = 1
mapa = mapa.tolist()
new = 0
if new ==1:
new_points2add.append(i)
delete_countdown +=1
return mapa , delete_countdown , new_points2add
def cam2rob(BB_legos, H):
####cam [[[ 270.03048706 448.53890991]]]
pixel_size = 0.653947100514
# CENTER OF THE CAMERA
cam= np.array([242.54,474.87])
cam2rob_dist = 25
Lego_list = []
for box in BB_legos:
y = box[3]
x = box[0] + (box[2]-box[0])/2
input_vector=np.array([[[x,y]]],dtype=np.float32)
output_vector=cv2.perspectiveTransform(input_vector,np.linalg.inv(H))
distance_x = (-output_vector[0,0,1]+cam[1])*pixel_size +cam2rob_dist
distance_x = -0.28*output_vector[0,0,1] +160
distance_y = -(output_vector[0,0,0] - cam[0])*pixel_size
distance_y = -(output_vector[0,0,0] - cam[0]) *(0.35-0.00022*output_vector[0,0,0])
print("data: ", distance_x,distance_y,box[3],box[1])
angle = np.arctan2(distance_y,distance_x)
distance = np.sqrt(np.power(distance_x,2) + np.power(distance_y,2))
print("Distance ", distance, " angle: ", angle)
if distance < 1000:
Lego_list.append([angle,distance/2])
print("angle" , angle*180/pi)
Lego_list = np.array(Lego_list)
return Lego_list
def mapa2grid(mapa):
obstacle_size = 28
obstacle_cells = obstacle_size
# Obstacles position
obs_pos = mapa
n_obs = obs_pos.shape[0]
for i in range(0,n_obs):
mapa[obs_pos[i,0]:obs_pos[i,0]+obstacle_cells,obs_pos[i,1]:obs_pos[i,1]+obstacle_cells] = np.ones([obstacle_cells,obstacle_cells])
target_on = 0
while(target_on == 1):
tar_pos = np.random.randint(1000,size=[2])
if mapa[tar_pos[0],tar_pos[1]] == 0 :
mapa[tar_pos[0],tar_pos[1]] = 2
target_on = 1
return mapa
def delete_in_mapa(mapa,robot_trajectory):
robot_trajectory_ar = np.array(robot_trajectory)
min_dist = 29
max_dist = 60
min_angle = np.arctan2(29,20)
max_angle = np.arctan2(29,-20)
mapa_ar = np.array(mapa)
eliminate_index = []
for i in range(0, robot_trajectory_ar.shape[0]):
for j in range(0, mapa_ar.shape[0]):
x = mapa_ar[j,0] - robot_trajectory_ar[i,0]
y = mapa_ar[j,1] - robot_trajectory_ar[i,1]
distance = np.sqrt(np.power(x,2)+np.power(y,2))
angle = np.arctan2(y,x) - robot_trajectory_ar[i,2]*pi/180
if (distance > min_dist and distance< max_dist and angle > min_angle and angle< max_angle) and mapa_ar[j,4] == 0 :
pass
elif j not in eliminate_index:
eliminate_index.append(j)
print("j: ",eliminate_index)
eliminate_index = np.array(eliminate_index)
mapa = np.array(mapa)
if mapa.size:
mapa = mapa[eliminate_index,:]
mapa= mapa.tolist()
#mapa = mapa[eliminate_index]
return mapa
def add_points_in_mapa(landmarks,new_points2add,mapa, P ,pos_rob,index):
landmarks = np.array(landmarks)
for i in new_points2add:
x_mapa = pos_rob[0] + landmarks[i,1]*np.cos((pos_rob[2])*pi/180+landmarks[i,0])
y_mapa = pos_rob[1] + landmarks[i,1]*np.sin((pos_rob[2])*pi/180+landmarks[i,0])
mapa.append(np.array([x_mapa,y_mapa,P[0,0],P[1,1],1]))
if index !=1000:
print("grrrrr")
x_mapa = pos_rob[0] + landmarks[index,1]*np.cos((pos_rob[2])*pi/180+landmarks[index,0])
y_mapa = pos_rob[1] + landmarks[index,1]*np.sin((pos_rob[2])*pi/180+landmarks[index,0])
mapa.append(np.array([x_mapa,y_mapa,P[0,0],P[1,1],5]))
return mapa
def create_fake_lego_measurements(real_rob_pos, mapa):
min_dist = 29
max_dist = 60
min_angle = 0#np.arctan2(29,15)
max_angle = np.arctan2(29,-15)
mapa_ar = np.array(mapa)
fake_landmarks = [];
for j in range(0, mapa_ar.shape[0]):
x = mapa_ar[j,0] - real_rob_pos[0]
y = mapa_ar[j,1] - real_rob_pos[1]
distance = np.sqrt(np.power(x,2)+np.power(y,2))
angle = np.arctan2(y,x) - real_rob_pos[2]*pi/180
if distance > min_dist and distance< max_dist and angle > min_angle and angle< max_angle:
fake_landmarks.append(np.array([angle,distance]))
return fake_landmarks
def update_mapa(mapa,landmark_rob,pos_rob,P,delete_countdown, robot_trajectory,index):
mapa,delete_countdown, new_points2add = points2mapa(landmark_rob, pos_rob, mapa, P, delete_countdown)
robot_trajectory.append(pos_rob)
mapa = add_points_in_mapa(landmark_rob,new_points2add,mapa,P,pos_rob,index)
if delete_countdown == 5:
mapa = delete_in_mapa(mapa, robot_trajectory)
robot_trajectory = [];
return mapa, delete_countdown,robot_trajectory
|
TheCamusean/DLRCev3
|
scripts/slam/mapping_BASE_10668.py
|
Python
|
mit
| 5,371
|
import collections
from client.constants import FieldKeyword
from metadata import Metadata
class FileUrlMetadata(Metadata):
def get_title(self, soup):
image_url = self.prop_map[FieldKeyword.URL]
return image_url.split('/')[-1]
def get_files_list(self, response):
file_list = collections.OrderedDict()
file_list[FieldKeyword.COUNT] = 1
file_list[FieldKeyword.DATA] = [{
FieldKeyword.URL: response.request_url,
FieldKeyword.TYPE: None
}]
return file_list
def fetch_site_data(self, sanitized_url, status_code):
return self.generic_fetch_content(sanitized_url, status_code)
def parse_content(self, response):
self.generic_parse_content(response)
self.prop_map[FieldKeyword.FILES] = self.get_files_list(response)
|
SAAVY/magpie
|
client/scraper/file_metadata.py
|
Python
|
mit
| 836
|
#! .env/bin/python
# -*- coding: utf8 -*-
from __future__ import unicode_literals
# import time
import random
import itertools
# from collections import defaultdict
import gym
import numpy as np
# implementation of the sarsa algorithm on the mountain car using values
# rounding for value function approximation
# Note: discarded because matplotlib was a bitch.
# def plot(Q):
# """
# Plot the mountain car action value function.
# This only account for the two first dimensions of the state space.
# This plots in a 2 dimensional space circle for each action that is
# bigger the higher the action value function is for this action.
# Assumes all states have the same action set.
# """
# x, y = zip(*Q.keys())
# allXs, allYs, allAreas, allColors = [], [], [], []
# ACTION_COLORS = [
# (1, 0, 0),
# (0, 1, 0),
# (0, 0, 1)
# ]
# areas = defaultdict(dict)
# SEP = 1
# for a in Q[(x[0], y[0])]:
# for xi, yi in zip(x, y):
# bounds = (min(Q[(xi, yi)].values()),
# max(Q[(xi, yi)].values()))
# areas[(xi, yi)][a] = \
# np.pi * SEP * Q[(xi, yi)][a] / (bounds[1] - bounds[0])
# for xi, yi in zip(x, y):
# order = sorted(
# range(Q[(xi, yi)].keys()),
# key=lambda a: Q[(xi, yi)][a])
# for a in order:
# allXs.append(xi)
# allYs.append(yi)
# allAreas.append(areas[(xi, yi)][a])
# allColors.append(tuple(ACTION_COLORS[a]))
# plt.scatter(allXs, allYs, s=allAreas, c=allColors, alpha=0.5)
# plt.show()
class Sarsa(object):
def __init__(self, allStates, allActions):
"""
Sarsa performs in discrete action space and requires the
action state value function table to be initialized arbitrarily
for each state and action.
* allStates should be given as a list of all possible states,
each state being a tuple floats, all of the same length
* allActiosn should be the list of possible actions
"""
super(Sarsa, self).__init__()
self._Q = {
state: {action: 0 for action in allActions}
for state in allStates
}
self._e = 0.2 # epsilon, for the epsilon-greedy policy
self._a = 1 # alpha, learning reat
self._g = 0.5 # gamma, discount factor
def pickAction(self, state, episodeI=None):
"""
Returns the best action according to (for now) the e-greedy policy
If episodeI is given, it should be the episode index. Used to
update epsilon for the e-greedy polic
"""
def pickMax():
best = max(self._Q[state].values())
for action in self._Q[state].keys():
if self._Q[state][action] == best:
return action
def pickRandom():
nbActions = len(self._Q[state])
return self._Q[state].keys()[random.randint(0, nbActions - 1)]
if episodeI is not None:
self._e = 1.0 / (episodeI or 1)
# print "e=", self._e
if random.random() > self._e:
return pickMax()
else:
return pickRandom()
def train(self, oldState, newState, action, reward, episodeI):
"""
TD(0) policy improvement
Returns the next action to take
"""
# sample a new action following e-greedy
# print "train:", oldState, newState, action, reward
newAction = self.pickAction(newState, episodeI=episodeI)
# print "New action: ", newAction
self._Q[oldState][action] = self._Q[oldState][action] + self._a *\
(reward +
self._g * self._Q[newState][newAction] -
self._Q[oldState][action])
return newAction
class RoundingSarsa(object):
"""
Rouding sarsa dummily uses sarse on a discretized space
This makes no assumption of the relationship there may exist between two
states prior to visit.
Requires a discrete action space.
Observation space is assumed to be continuous, a gym Box
"""
def __init__(self, observationSpace, actionSpace, d=2):
super(RoundingSarsa, self).__init__()
self._precision = 100
self._os = observationSpace
values, self._steps = zip(*[
np.linspace(
observationSpace.low[x],
observationSpace.high[x],
self._precision,
retstep=True)
for x in xrange(d)
])
allStates = list(itertools.product(*values))
allActions = range(actionSpace.n)
self.sarsa = Sarsa(allStates, allActions)
def _threshold(self, val, step, dim):
# warning: this assumes rounding started at 0 which may not be the case
return round(float(val - self._os.low[dim]) / step) * step + \
self._os.low[dim]
def _round(self, observations):
return tuple([
self._threshold(observations[x], self._steps[x], x)
for x in xrange(len(observations))])
def pickAction(self, state):
state = self._round(state)
return self.sarsa.pickAction(state)
def train(self, oldState, newState, action, reward, episodeI):
return self.sarsa.train(
self._round(oldState),
self._round(newState),
action, reward, episodeI)
RENDER_EPISODES_SKIP = 1000
# load the environment
env = gym.make('MountainCar-v0')
agent = RoundingSarsa(env.observation_space, env.action_space)
for i_episode in range(1, 20001):
# reset the enviroment at the beginning of each episode
observation = env.reset()
# import ipdb; ipdb.set_trace()
action = agent.pickAction(observation)
done = False
episodeReturn = 0
# up to a 100 steps
t = 0
for t in xrange(1000):
if (i_episode - 1) % RENDER_EPISODES_SKIP == 0:
env.render() # render the environment
# print(observation)
# take action, get back the reward and the observations
newObservation, reward, done, info = env.step(action)
episodeReturn += reward
action = agent.train(
observation, newObservation, action, reward, i_episode)
observation = newObservation
if done: # the episode is terminated (we 'lost'/'won')
break
# plot(agent.sarsa._Q)
print("Episode %d finished after %d timesteps" % (i_episode, t + 1))
print "Episode %d Return: " % i_episode, episodeReturn
while True:
observation = env.reset()
agent.pickAction(observation)
done = False
while not done:
env.render() # render the environment
observation, reward, done, info = env.step(action)
action = agent.pickAction(observation)
if done:
break
# Using the following line, gym can record the execution of the environment
# env.monitor.start('/tmp/experiment-name-1')
|
Hiestaa/RLViz
|
experiments/gym_test_2.py
|
Python
|
mit
| 7,015
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# This module uses code from TLSLlite
# TLSLite Author: Trevor Perrin)
import binascii
from x509 import ASN1_Node
def a2b_base64(s):
try:
b = bytearray(binascii.a2b_base64(s))
except Exception as e:
raise SyntaxError("base64 error: %s" % e)
return b
def b2a_base64(b):
return binascii.b2a_base64(b)
def dePem(s, name):
"""Decode a PEM string into a bytearray of its payload.
The input must contain an appropriate PEM prefix and postfix
based on the input name string, e.g. for name="CERTIFICATE":
-----BEGIN CERTIFICATE-----
MIIBXDCCAUSgAwIBAgIBADANBgkqhkiG9w0BAQUFADAPMQ0wCwYDVQQDEwRUQUNL
...
KoZIhvcNAQEFBQADAwA5kw==
-----END CERTIFICATE-----
The first such PEM block in the input will be found, and its
payload will be base64 decoded and returned.
"""
prefix = "-----BEGIN %s-----" % name
postfix = "-----END %s-----" % name
start = s.find(prefix)
if start == -1:
raise SyntaxError("Missing PEM prefix")
end = s.find(postfix, start+len(prefix))
if end == -1:
raise SyntaxError("Missing PEM postfix")
s = s[start+len("-----BEGIN %s-----" % name) : end]
retBytes = a2b_base64(s) # May raise SyntaxError
return retBytes
def dePemList(s, name):
"""Decode a sequence of PEM blocks into a list of bytearrays.
The input must contain any number of PEM blocks, each with the appropriate
PEM prefix and postfix based on the input name string, e.g. for
name="TACK BREAK SIG". Arbitrary text can appear between and before and
after the PEM blocks. For example:
" Created by TACK.py 0.9.3 Created at 2012-02-01T00:30:10Z -----BEGIN TACK
BREAK SIG-----
ATKhrz5C6JHJW8BF5fLVrnQss6JnWVyEaC0p89LNhKPswvcC9/s6+vWLd9snYTUv
YMEBdw69PUP8JB4AdqA3K6Ap0Fgd9SSTOECeAKOUAym8zcYaXUwpk0+WuPYa7Zmm
SkbOlK4ywqt+amhWbg9txSGUwFO5tWUHT3QrnRlE/e3PeNFXLx5Bckg= -----END TACK
BREAK SIG----- Created by TACK.py 0.9.3 Created at 2012-02-01T00:30:11Z
-----BEGIN TACK BREAK SIG-----
ATKhrz5C6JHJW8BF5fLVrnQss6JnWVyEaC0p89LNhKPswvcC9/s6+vWLd9snYTUv
YMEBdw69PUP8JB4AdqA3K6BVCWfcjN36lx6JwxmZQncS6sww7DecFO/qjSePCxwM
+kdDqX/9/183nmjx6bf0ewhPXkA0nVXsDYZaydN8rJU1GaMlnjcIYxY= -----END TACK
BREAK SIG----- "
All such PEM blocks will be found, decoded, and return in an ordered list
of bytearrays, which may have zero elements if not PEM blocks are found.
"""
bList = []
prefix = "-----BEGIN %s-----" % name
postfix = "-----END %s-----" % name
while 1:
start = s.find(prefix)
if start == -1:
return bList
end = s.find(postfix, start+len(prefix))
if end == -1:
raise SyntaxError("Missing PEM postfix")
s2 = s[start+len(prefix) : end]
retBytes = a2b_base64(s2) # May raise SyntaxError
bList.append(retBytes)
s = s[end+len(postfix) : ]
def pem(b, name):
"""Encode a payload bytearray into a PEM string.
The input will be base64 encoded, then wrapped in a PEM prefix/postfix
based on the name string, e.g. for name="CERTIFICATE":
-----BEGIN CERTIFICATE-----
MIIBXDCCAUSgAwIBAgIBADANBgkqhkiG9w0BAQUFADAPMQ0wCwYDVQQDEwRUQUNL
...
KoZIhvcNAQEFBQADAwA5kw==
-----END CERTIFICATE-----
"""
s1 = b2a_base64(b)[:-1] # remove terminating \n
s2 = ""
while s1:
s2 += s1[:64] + "\n"
s1 = s1[64:]
s = ("-----BEGIN %s-----\n" % name) + s2 + \
("-----END %s-----\n" % name)
return s
def pemSniff(inStr, name):
searchStr = "-----BEGIN %s-----" % name
return searchStr in inStr
def parse_private_key(s):
"""Parse a string containing a PEM-encoded <privateKey>."""
if pemSniff(s, "PRIVATE KEY"):
bytes = dePem(s, "PRIVATE KEY")
return _parsePKCS8(bytes)
elif pemSniff(s, "RSA PRIVATE KEY"):
bytes = dePem(s, "RSA PRIVATE KEY")
return _parseSSLeay(bytes)
else:
raise SyntaxError("Not a PEM private key file")
def _parsePKCS8(bytes):
s = ASN1_Node(str(bytes))
root = s.root()
version_node = s.first_child(root)
version = bytestr_to_int(s.get_value_of_type(version_node, 'INTEGER'))
if version != 0:
raise SyntaxError("Unrecognized PKCS8 version")
rsaOID_node = s.next_node(version_node)
ii = s.first_child(rsaOID_node)
rsaOID = decode_OID(s.get_value_of_type(ii, 'OBJECT IDENTIFIER'))
if rsaOID != '1.2.840.113549.1.1.1':
raise SyntaxError("Unrecognized AlgorithmIdentifier")
privkey_node = s.next_node(rsaOID_node)
value = s.get_value_of_type(privkey_node, 'OCTET STRING')
return _parseASN1PrivateKey(value)
def _parseSSLeay(bytes):
return _parseASN1PrivateKey(ASN1_Node(str(bytes)))
def bytesToNumber(s):
return int(binascii.hexlify(s), 16)
def _parseASN1PrivateKey(s):
root = s.root()
version_node = s.first_child(root)
version = bytestr_to_int(s.get_value_of_type(version_node, 'INTEGER'))
if version != 0:
raise SyntaxError("Unrecognized RSAPrivateKey version")
n = s.next_node(version_node)
e = s.next_node(n)
d = s.next_node(e)
p = s.next_node(d)
q = s.next_node(p)
dP = s.next_node(q)
dQ = s.next_node(dP)
qInv = s.next_node(dQ)
return map(lambda x: bytesToNumber(s.get_value_of_type(x, 'INTEGER')), [n, e, d, p, q, dP, dQ, qInv])
|
cryptapus/electrum-uno
|
lib/pem.py
|
Python
|
mit
| 6,584
|
import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="color", parent_name="surface.hoverlabel.font", **kwargs
):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "none"),
**kwargs
)
|
plotly/plotly.py
|
packages/python/plotly/plotly/validators/surface/hoverlabel/font/_color.py
|
Python
|
mit
| 470
|
''' Controller for the application '''
import logging
import sys
import traceback
import forms
from models import Settings
from flask import Flask, render_template
from google.appengine.api import app_identity # pylint: disable=E0401
from google.appengine.api import mail # pylint: disable=E0401
from google.appengine.api import users
import pdb
# Initialize the application with CSRF
app = Flask(__name__) # pylint: disable=invalid-name
# Set the Flask debug to false so you can use GAE debug
app.config.update(DEBUG=False)
app.secret_key = Settings.get('SECRET_KEY')
app.config['RECAPTCHA_USE_SSL'] = False
app.config['RECAPTCHA_PUBLIC_KEY'] = Settings.get('RECAPTCHA_PUBLIC_KEY')
app.config['RECAPTCHA_PRIVATE_KEY'] = Settings.get('RECAPTCHA_PRIVATE_KEY')
app.config['RECAPTCHA_OPTIONS'] = {'theme': 'white'}
@app.before_request
def enable_local_error_handling():
''' test of log '''
app.logger.addHandler(logging.StreamHandler())
app.logger.setLevel(logging.INFO)
@app.route('/', methods=['GET', 'POST'])
def form():
''' Show the message form for the user to fill in '''
message_form = forms.MessageForm()
if message_form.validate_on_submit():
send_mail(message_form.email.data, message_form.message.data)
return render_template('submitted_form.html', title="Thanks", form=message_form)
return render_template('form.html', title="Message", form=message_form)
def send_mail(their_email, their_message):
''' Send an email message to me '''
message = mail.EmailMessage(sender=app_identity.get_application_id() +
'@appspot.gserviceaccount.com>')
message.subject = 'Message from Bagbatch Website'
message.to = Settings.get('EMAIL')
message.body = """From: {}\n\n<<BEGINS>>\n\n{}\n\n<<ENDS>>""".format(their_email, their_message)
message.send()
@app.errorhandler(500)
def server_error(error):
''' Log any errors to the browser because you are too lazy to look at the console
The Flask DEBUG setting must the set to false for this to work '''
exception_type, exception_value, trace_back = sys.exc_info()
no_limit = None
exception = ''.join(traceback.format_exception(exception_type, exception_value,
trace_back, no_limit))
logging.exception('An error occurred during a request. ' + str(error))
return render_template('500.html', title=error, exception=exception)
@app.route('/admin', methods=['GET'])
def admin_page():
''' Authentication required page '''
user = users.get_current_user()
return render_template('admin.html', email=user.email())
|
joejcollins/CaptainScarlet
|
web_app/main.py
|
Python
|
mit
| 2,641
|
# -*- coding: utf-8 -*-
import os
from fuel import config
from fuel.datasets import H5PYDataset
from fuel.transformers.defaults import uint8_pixels_to_floatX
class SVHN(H5PYDataset):
"""The Street View House Numbers (SVHN) dataset.
SVHN [SVHN] is a real-world image dataset for developing machine
learning and object recognition algorithms with minimal requirement
on data preprocessing and formatting. It can be seen as similar in
flavor to MNIST [LBBH] (e.g., the images are of small cropped
digits), but incorporates an order of magnitude more labeled data
(over 600,000 digit images) and comes from a significantly harder,
unsolved, real world problem (recognizing digits and numbers in
natural scene images). SVHN is obtained from house numbers in
Google Street View images.
.. [SVHN] Yuval Netzer, Tao Wang, Adam Coates, Alessandro Bissacco,
Bo Wu, Andrew Y. Ng. *Reading Digits in Natural Images with
Unsupervised Feature Learning*, NIPS Workshop on Deep Learning
and Unsupervised Feature Learning, 2011.
.. [LBBH] Yann LeCun, Léon Bottou, Yoshua Bengio, and Patrick Haffner,
*Gradient-based learning applied to document recognition*,
Proceedings of the IEEE, November 1998, 86(11):2278-2324.
Parameters
----------
which_format : {1, 2}
SVHN format 1 contains the full numbers, whereas SVHN format 2
contains cropped digits.
which_set : {'train', 'test', 'extra'}
Whether to load the training set (73,257 examples), the test
set (26,032 examples) or the extra set (531,131 examples).
Note that SVHN does not have a validation set; usually you
will create your own training/validation split
using the `subset` argument.
"""
filename = 'svhn_format_{}.hdf5'
default_transformers = uint8_pixels_to_floatX(('features',))
def __init__(self, which_format, which_set, **kwargs):
self.which_format = which_format
super(SVHN, self).__init__(self.data_path, which_set, **kwargs)
@property
def data_path(self):
return os.path.join(
config.data_path, self.filename.format(self.which_format))
|
EderSantana/fuel
|
fuel/datasets/svhn.py
|
Python
|
mit
| 2,213
|
from seabreeze.pyseabreeze.features._base import SeaBreezeFeature
# Definition
# ==========
#
# TODO: This feature needs to be implemented for pyseabreeze
#
class SeaBreezeDataBufferFeature(SeaBreezeFeature):
identifier = "data_buffer"
def clear(self) -> None:
raise NotImplementedError("implement in derived class")
def remove_oldest_spectra(self, number_of_spectra: int) -> None:
raise NotImplementedError("implement in derived class")
def get_number_of_elements(self) -> int:
raise NotImplementedError("implement in derived class")
def get_buffer_capacity(self) -> int:
raise NotImplementedError("implement in derived class")
def set_buffer_capacity(self, capacity: int) -> None:
raise NotImplementedError("implement in derived class")
def get_buffer_capacity_maximum(self) -> int:
raise NotImplementedError("implement in derived class")
def get_buffer_capacity_minimum(self) -> int:
raise NotImplementedError("implement in derived class")
|
ap--/python-seabreeze
|
src/seabreeze/pyseabreeze/features/databuffer.py
|
Python
|
mit
| 1,040
|
"""
Basic pulsar-related functions and statistics.
"""
import functools
from collections.abc import Iterable
import warnings
from scipy.optimize import minimize, basinhopping
import numpy as np
import matplotlib.pyplot as plt
from .fftfit import fftfit as taylor_fftfit
from ..utils import simon, jit
from . import HAS_PINT, get_model, toa
__all__ = ['pulse_phase', 'phase_exposure', 'fold_events', 'profile_stat',
'z_n', 'fftfit', 'get_TOA', 'z_n_binned_events', 'z_n_gauss',
'z_n_events', 'htest', 'z_n_binned_events_all', 'z_n_gauss_all',
'z_n_events_all', 'get_orbital_correction_from_ephemeris_file']
def _default_value_if_no_key(dictionary, key, default):
try:
return dictionary[key]
except:
return default
def p_to_f(*period_derivatives):
"""Convert periods into frequencies, and vice versa.
For now, limited to third derivative. Raises when a
fourth derivative is passed.
Parameters
----------
p, pdot, pddot, ... : floats
period derivatives, starting from zeroth and in
increasing order
Examples
--------
>>> p_to_f() == []
True
>>> np.allclose(p_to_f(1), [1])
True
>>> np.allclose(p_to_f(1, 2), [1, -2])
True
>>> np.allclose(p_to_f(1, 2, 3), [1, -2, 5])
True
>>> np.allclose(p_to_f(1, 2, 3, 4), [1, -2, 5, -16])
True
>>> np.allclose(p_to_f(1, 2, 3, 4, 32, 22), [1, -2, 5, -16, 0, 0])
True
"""
nder = len(period_derivatives)
if nder == 0:
return []
fder = np.zeros_like(period_derivatives)
p = period_derivatives[0]
fder[0] = 1 / p
if nder > 1:
pd = period_derivatives[1]
fder[1] = -1 / p**2 * pd
if nder > 2:
pdd = period_derivatives[2]
fder[2] = 2 / p**3 * pd**2 - 1 / p**2 * pdd
if nder > 3:
pddd = period_derivatives[3]
fder[3] = - 6 / p**4 * pd ** 3 + 6 / p**3 * pd * pdd - \
1 / p**2 * pddd
if nder > 4:
warnings.warn("Derivatives above third are not supported")
return fder
def pulse_phase(times, *frequency_derivatives, **opts):
"""
Calculate pulse phase from the frequency and its derivatives.
Parameters
----------
times : array of floats
The times at which the phase is calculated
*frequency_derivatives: floats
List of derivatives in increasing order, starting from zero.
Other Parameters
----------------
ph0 : float
The starting phase
to_1 : bool, default True
Only return the fractional part of the phase, normalized from 0 to 1
Returns
-------
phases : array of floats
The absolute pulse phase
"""
ph0 = _default_value_if_no_key(opts, "ph0", 0)
to_1 = _default_value_if_no_key(opts, "to_1", True)
ph = ph0
for i_f, f in enumerate(frequency_derivatives):
ph += 1 / np.math.factorial(i_f + 1) * times**(i_f + 1) * f
if to_1:
ph -= np.floor(ph)
return ph
def phase_exposure(start_time, stop_time, period, nbin=16, gtis=None):
"""Calculate the exposure on each phase of a pulse profile.
Parameters
----------
start_time, stop_time : float
Starting and stopping time (or phase if ``period==1``)
period : float
The pulse period (if 1, equivalent to phases)
Other parameters
----------------
nbin : int, optional, default 16
The number of bins in the profile
gtis : [[gti00, gti01], [gti10, gti11], ...], optional, default None
Good Time Intervals
Returns
-------
expo : array of floats
The normalized exposure of each bin in the pulse profile (1 is the
highest exposure, 0 the lowest)
"""
if gtis is None:
gtis = np.array([[start_time, stop_time]])
# Use precise floating points -------------
start_time = np.longdouble(start_time)
stop_time = np.longdouble(stop_time)
period = np.longdouble(period)
gtis = np.array(gtis, dtype=np.longdouble)
# -----------------------------------------
expo = np.zeros(nbin)
phs = np.linspace(0, 1, nbin + 1)
phs = np.array(list(zip(phs[0:-1], phs[1:])))
# Discard gtis outside [start, stop]
good = np.logical_and(gtis[:, 0] < stop_time, gtis[:, 1] > start_time)
gtis = gtis[good]
for g in gtis:
g0 = g[0]
g1 = g[1]
if g0 < start_time:
# If the start of the fold is inside a gti, start from there
g0 = start_time
if g1 > stop_time:
# If the end of the fold is inside a gti, end there
g1 = stop_time
length = g1 - g0
# How many periods inside this length?
nraw = length / period
# How many integer periods?
nper = nraw.astype(int)
# First raw exposure: the number of periods
expo += nper / nbin
# FRACTIONAL PART =================
# What remains is additional exposure for part of the profile.
start_phase = np.fmod(g0 / period, 1)
end_phase = nraw - nper + start_phase
limits = [[start_phase, end_phase]]
# start_phase is always < 1. end_phase not always. In this case...
if end_phase > 1:
limits = [[0, end_phase - 1], [start_phase, 1]]
for l in limits:
l0 = l[0]
l1 = l[1]
# Discards bins untouched by these limits
goodbins = np.logical_and(phs[:, 0] <= l1, phs[:, 1] >= l0)
idxs = np.arange(len(phs), dtype=int)[goodbins]
for i in idxs:
start = np.max([phs[i, 0], l0])
stop = np.min([phs[i, 1], l1])
w = stop - start
expo[i] += w
return expo / np.max(expo)
def fold_events(times, *frequency_derivatives, **opts):
'''Epoch folding with exposure correction.
Parameters
----------
times : array of floats
f, fdot, fddot... : float
The frequency and any number of derivatives.
Other Parameters
----------------
nbin : int, optional, default 16
The number of bins in the pulse profile
weights : float or array of floats, optional
The weights of the data. It can either be specified as a single value
for all points, or an array with the same length as ``time``
gtis : [[gti0_0, gti0_1], [gti1_0, gti1_1], ...], optional
Good time intervals
ref_time : float, optional, default 0
Reference time for the timing solution
expocorr : bool, default False
Correct each bin for exposure (use when the period of the pulsar is
comparable to that of GTIs)
Returns
-------
phase_bins : array of floats
The phases corresponding to the pulse profile
profile : array of floats
The pulse profile
profile_err : array of floats
The uncertainties on the pulse profile
'''
nbin = _default_value_if_no_key(opts, "nbin", 16)
weights = _default_value_if_no_key(opts, "weights", 1)
gtis = _default_value_if_no_key(opts, "gtis",
np.array([[times[0], times[-1]]]))
ref_time = _default_value_if_no_key(opts, "ref_time", 0)
expocorr = _default_value_if_no_key(opts, "expocorr", False)
if not isinstance(weights, Iterable):
weights *= np.ones(len(times))
gtis = gtis - ref_time
times = times - ref_time
# This dt has not the same meaning as in the Lightcurve case.
# it's just to define stop_time as a meaningful value after
# the last event.
dt = np.abs(times[1] - times[0])
start_time = times[0]
stop_time = times[-1] + dt
phases = pulse_phase(times, *frequency_derivatives, to_1=True)
gti_phases = pulse_phase(gtis, *frequency_derivatives, to_1=False)
start_phase, stop_phase = pulse_phase(np.array([start_time, stop_time]),
*frequency_derivatives,
to_1=False)
raw_profile, bins = np.histogram(phases,
bins=np.linspace(0, 1, nbin + 1),
weights=weights)
if expocorr:
expo_norm = phase_exposure(start_phase, stop_phase, 1, nbin,
gtis=gti_phases)
simon("For exposure != 1, the uncertainty might be incorrect")
else:
expo_norm = 1
# TODO: this is wrong. Need to extend this to non-1 weights
raw_profile_err = np.sqrt(raw_profile)
return bins[:-1] + np.diff(bins) / 2, raw_profile / expo_norm, \
raw_profile_err / expo_norm
def profile_stat(profile, err=None):
"""Calculate the epoch folding statistics \'a la Leahy et al. (1983).
Parameters
----------
profile : array
The pulse profile
Other Parameters
----------------
err : float or array
The uncertainties on the pulse profile
Returns
-------
stat : float
The epoch folding statistics
"""
mean = np.mean(profile)
if err is None:
err = np.sqrt(mean)
return np.sum((profile - mean) ** 2 / err ** 2)
@functools.lru_cache(maxsize=128)
def _cached_sin_harmonics(nbin, z_n_n):
"""Cached sine values corresponding to each of the nbin bins.
Parameters
----------
nbin : int
Number of bins
z_n_n : int
The number of harmonics (n) in the Z^2_n search
"""
dph = 1.0 / nbin
twopiphases = np.pi * 2 * np.arange(dph / 2, 1, dph)
cached_sin = np.zeros(z_n_n * nbin)
for i in range(z_n_n):
cached_sin[i * nbin: (i + 1) * nbin] = np.sin(twopiphases)
return cached_sin
@functools.lru_cache(maxsize=128)
def _cached_cos_harmonics(nbin, z_n_n):
"""Cached cosine values corresponding to each of the nbin bins.
Parameters
----------
nbin : int
Number of bins
z_n_n : int
The number of harmonics (n) in the Z^2_n search
"""
dph = 1.0 / nbin
twopiphases = np.pi * 2 * np.arange(dph / 2, 1, dph)
cached_cos = np.zeros(z_n_n * nbin)
for i in range(z_n_n):
cached_cos[i * nbin: (i + 1) * nbin] = np.cos(twopiphases)
return cached_cos
@jit(nopython=True)
def _z_n_fast_cached_sums_unnorm(prof, ks, cached_sin, cached_cos):
'''Calculate the unnormalized Z^2_k, for (k=1,.. n), of a pulsed profile.
Parameters
----------
prof : :class:`numpy.array`
The pulsed profile
ks : :class:`numpy.array` of int
The harmonic numbers, from 1 to n
cached_sin : :class:`numpy.array`
Cached sine values for each phase bin in the profile
cached_cos : :class:`numpy.array`
Cached cosine values for each phase bin in the profile
'''
all_zs = np.zeros(ks.size)
N = prof.size
total_sum = 0
for k in ks:
local_z = (
np.sum(cached_cos[: N * k: k] * prof) ** 2
+ np.sum(cached_sin[: N * k: k] * prof) ** 2
)
total_sum += local_z
all_zs[k - 1] = total_sum
return all_zs
def z_n_binned_events_all(profile, nmax=20):
'''Z^2_n statistic for multiple harmonics and binned events
See Bachetti+2021, arXiv:2012.11397
Parameters
----------
profile : array of floats
The folded pulse profile (containing the number of
photons falling in each pulse bin)
n : int
Number of harmonics, including the fundamental
Returns
-------
ks : list of ints
Harmonic numbers, from 1 to nmax (included)
z2_n : float
The value of the statistic for all ks
'''
cached_sin = _cached_sin_harmonics(profile.size, nmax)
cached_cos = _cached_cos_harmonics(profile.size, nmax)
ks = np.arange(1, nmax + 1, dtype=int)
total = np.sum(profile)
if total == 0:
return ks, np.zeros(nmax)
all_zs = _z_n_fast_cached_sums_unnorm(profile, ks, cached_sin, cached_cos)
return ks, all_zs * 2 / total
def z_n_gauss_all(profile, err, nmax=20):
'''Z^2_n statistic for n harmonics and normally-distributed profiles
See Bachetti+2021, arXiv:2012.11397
Parameters
----------
profile : array of floats
The folded pulse profile
err : float
The (assumed constant) uncertainty on the flux in each bin.
nmax : int
Maximum number of harmonics, including the fundamental
Returns
-------
ks : list of ints
Harmonic numbers, from 1 to nmax (included)
z2_n : list of floats
The value of the statistic for all ks
'''
cached_sin = _cached_sin_harmonics(profile.size, nmax)
cached_cos = _cached_cos_harmonics(profile.size, nmax)
ks = np.arange(1, nmax + 1, dtype=int)
all_zs = _z_n_fast_cached_sums_unnorm(profile, ks, cached_sin, cached_cos)
return ks, all_zs * (2 / profile.size / err**2)
@jit(nopython=True)
def z_n_events_all(phase, nmax=20):
'''Z^2_n statistics, a` la Buccheri+83, A&A, 128, 245, eq. 2.
Parameters
----------
phase : array of floats
The phases of the events
n : int, default 2
Number of harmonics, including the fundamental
Returns
-------
ks : list of ints
Harmonic numbers, from 1 to nmax (included)
z2_n : float
The Z^2_n statistic for all ks
'''
all_zs = np.zeros(nmax)
ks = np.arange(1, nmax + 1)
nphot = phase.size
total_sum = 0
phase = phase * 2 * np.pi
for k in ks:
local_z = (
np.sum(np.cos(k * phase)) ** 2
+ np.sum(np.sin(k * phase)) ** 2
)
total_sum += local_z
all_zs[k - 1] = total_sum
return ks, 2 / nphot * all_zs
def z_n_binned_events(profile, n):
'''Z^2_n statistic for pulse profiles from binned events
See Bachetti+2021, arXiv:2012.11397
Parameters
----------
profile : array of floats
The folded pulse profile (containing the number of
photons falling in each pulse bin)
n : int
Number of harmonics, including the fundamental
Returns
-------
z2_n : float
The value of the statistic
'''
_, all_zs = z_n_binned_events_all(profile, nmax=n)
return all_zs[-1]
def z_n_gauss(profile, err, n):
'''Z^2_n statistic for normally-distributed profiles
See Bachetti+2021, arXiv:2012.11397
Parameters
----------
profile : array of floats
The folded pulse profile
err : float
The (assumed constant) uncertainty on the flux in each bin.
n : int
Number of harmonics, including the fundamental
Returns
-------
z2_n : float
The value of the statistic
'''
_, all_zs = z_n_gauss_all(profile, err, nmax=n)
return all_zs[-1]
def z_n_events(phase, n):
'''Z^2_n statistics, a` la Buccheri+83, A&A, 128, 245, eq. 2.
Parameters
----------
phase : array of floats
The phases of the events
n : int, default 2
Number of harmonics, including the fundamental
Returns
-------
z2_n : float
The Z^2_n statistic
'''
ks, all_zs = z_n_events_all(phase, nmax=n)
return all_zs[-1]
def z_n(data, n, datatype="events", err=None, norm=None):
'''Z^2_n statistics, a` la Buccheri+83, A&A, 128, 245, eq. 2.
If datatype is "binned" or "gauss", uses the formulation from
Bachetti+2021, ApJ, arxiv:2012.11397
Parameters
----------
data : array of floats
Phase values or binned flux values
n : int, default 2
Number of harmonics, including the fundamental
Other Parameters
----------------
datatype : str
The data type: "events" if phase values between 0 and 1,
"binned" if folded pulse profile from photons, "gauss" if
folded pulse profile with normally-distributed fluxes
err : float
The uncertainty on the pulse profile fluxes (required for
datatype="gauss", ignored otherwise)
norm : float
For backwards compatibility; if norm is not None, it is
substituted to ``data``, and data is ignored. This raises
a DeprecationWarning
Returns
-------
z2_n : float
The Z^2_n statistics of the events.
'''
data = np.asarray(data)
if norm is not None:
warnings.warn("The use of ``z_n(phase, norm=profile)`` is deprecated. Use "
"``z_n(profile, datatype='binned')`` instead",
DeprecationWarning)
if isinstance(norm, Iterable):
data = norm
datatype = "binned"
else:
datatype = "events"
if data.size == 0:
return 0
if datatype == "binned":
return z_n_binned_events(data, n)
elif datatype == "events":
return z_n_events(data, n)
elif datatype == "gauss":
if err is None:
raise ValueError(
"If datatype='gauss', you need to specify an uncertainty (err)")
return z_n_gauss(data, n=n, err=err)
raise ValueError(f"Unknown datatype requested for Z_n ({datatype})")
def htest(data, nmax=20, datatype="binned", err=None):
'''htest-test statistic, a` la De Jager+89, A&A, 221, 180D, eq. 2.
If datatype is "binned" or "gauss", uses the formulation from
Bachetti+2021, ApJ, arxiv:2012.11397
Parameters
----------
data : array of floats
Phase values or binned flux values
nmax : int, default 20
Maximum of harmonics for Z^2_n
Other Parameters
----------------
datatype : str
The datatype of data: "events" if phase values between 0 and 1,
"binned" if folded pulse profile from photons, "gauss" if
folded pulse profile with normally-distributed fluxes
err : float
The uncertainty on the pulse profile fluxes (required for
datatype="gauss", ignored otherwise)
Returns
-------
M : int
The best number of harmonics that describe the signal.
htest : float
The htest statistics of the events.
'''
if datatype == "binned":
ks, zs = z_n_binned_events_all(data, nmax)
elif datatype == "events":
ks, zs = z_n_events_all(data, nmax)
elif datatype == "gauss":
if err is None:
raise ValueError(
"If datatype='gauss', you need to specify an uncertainty (err)")
ks, zs = z_n_gauss_all(data, nmax=nmax, err=err)
else:
raise ValueError(f"Unknown datatype requested for htest ({datatype})")
Hs = zs - 4 * ks + 4
bestidx = np.argmax(Hs)
return ks[bestidx], Hs[bestidx]
def fftfit_fun(profile, template, amplitude, phase):
'''Function to be minimized for the FFTFIT method.'''
pass
def fftfit(prof, template=None, quick=False, sigma=None, use_bootstrap=False,
**fftfit_kwargs):
"""Align a template to a pulse profile.
Parameters
----------
prof : array
The pulse profile
template : array, default None
The template of the pulse used to perform the TOA calculation. If None,
a simple sinusoid is used
Other parameters
----------------
sigma : array
error on profile bins (currently has no effect)
use_bootstrap : bool
Calculate errors using a bootstrap method, with `fftfit_error`
**fftfit_kwargs : additional arguments for `fftfit_error`
Returns
-------
mean_amp, std_amp : floats
Mean and standard deviation of the amplitude
mean_phase, std_phase : floats
Mean and standard deviation of the phase
"""
prof = prof - np.mean(prof)
template = template - np.mean(template)
return taylor_fftfit(prof, template)
def _plot_TOA_fit(profile, template, toa, mod=None, toaerr=None,
additional_phase=0., show=True, period=1):
"""Plot diagnostic information on the TOA."""
from scipy.interpolate import interp1d
import time
phases = np.arange(0, 2, 1 / len(profile))
profile = np.concatenate((profile, profile))
template = np.concatenate((template, template))
if mod is None:
mod = interp1d(phases, template, fill_value='extrapolate')
fig = plt.figure()
plt.plot(phases, profile, drawstyle='steps-mid')
fine_phases = np.linspace(0, 1, 1000)
fine_phases_shifted = fine_phases - toa / period + additional_phase
model = mod(fine_phases_shifted - np.floor(fine_phases_shifted))
model = np.concatenate((model, model))
plt.plot(np.linspace(0, 2, 2000), model)
if toaerr is not None:
plt.axvline((toa - toaerr) / period)
plt.axvline((toa + toaerr) / period)
plt.axvline(toa / period - 0.5 / len(profile), ls='--')
plt.axvline(toa / period + 0.5 / len(profile), ls='--')
timestamp = int(time.time())
plt.savefig('{}.png'.format(timestamp))
if not show:
plt.close(fig)
def get_TOA(prof, period, tstart, template=None, additional_phase=0,
quick=False, debug=False, use_bootstrap=False,
**fftfit_kwargs):
"""Calculate the Time-Of-Arrival of a pulse.
Parameters
----------
prof : array
The pulse profile
template : array, default None
The template of the pulse used to perform the TOA calculation, if any.
Otherwise use the default of fftfit
tstart : float
The time at the start of the pulse profile
Other parameters
----------------
nstep : int, optional, default 100
Number of steps for the bootstrap method
Returns
-------
toa, toastd : floats
Mean and standard deviation of the TOA
"""
nbin = len(prof)
ph = np.arange(0, 1, 1 / nbin)
if template is None:
template = np.cos(2 * np.pi * ph)
mean_amp, std_amp, phase_res, phase_res_err = \
fftfit(prof, template=template, quick=quick,
use_bootstrap=use_bootstrap, **fftfit_kwargs)
phase_res = phase_res + additional_phase
phase_res = phase_res - np.floor(phase_res)
toa = tstart + phase_res * period
toaerr = phase_res_err * period
if debug:
_plot_TOA_fit(prof, template, toa - tstart, toaerr=toaerr,
additional_phase=additional_phase,
period=period)
return toa, toaerr
def _load_and_prepare_TOAs(mjds, ephem="DE405"):
toalist = [None] * len(mjds)
for i, m in enumerate(mjds):
toalist[i] = toa.TOA(m, obs='Barycenter', scale='tdb')
toalist = toa.TOAs(toalist=toalist)
if 'tdb' not in toalist.table.colnames:
toalist.compute_TDBs(ephem=ephem)
if 'ssb_obs_pos' not in toalist.table.colnames:
toalist.compute_posvels(ephem, False)
return toalist
def get_orbital_correction_from_ephemeris_file(mjdstart, mjdstop, parfile,
ntimes=1000, ephem="DE405",
return_pint_model=False):
"""Get a correction for orbital motion from pulsar parameter file.
Parameters
----------
mjdstart, mjdstop : float
Start and end of the time interval where we want the orbital solution
parfile : str
Any parameter file understood by PINT (Tempo or Tempo2 format)
Other parameters
----------------
ntimes : int
Number of time intervals to use for interpolation. Default 1000
Returns
-------
correction_sec : function
Function that accepts in input an array of times in seconds and a
floating-point MJDref value, and returns the deorbited times
correction_mjd : function
Function that accepts times in MJDs and returns the deorbited times.
"""
from scipy.interpolate import interp1d
from astropy import units
simon("Assuming events are already referred to the solar system "
"barycenter (timescale is TDB)")
if not HAS_PINT:
raise ImportError("You need the optional dependency PINT to use this "
"functionality: github.com/nanograv/pint")
mjds = np.linspace(mjdstart, mjdstop, ntimes)
toalist = _load_and_prepare_TOAs(mjds, ephem=ephem)
m = get_model(parfile)
delays = m.delay(toalist)
correction_mjd_rough = \
interp1d(mjds,
(toalist.table['tdbld'] * units.d - delays).to(units.d).value,
fill_value="extrapolate")
def correction_mjd(mjds):
"""Get the orbital correction.
Parameters
----------
mjds : array-like
The input times in MJD
Returns
-------
mjds: Corrected times in MJD
"""
xvals = correction_mjd_rough.x
# Maybe this will be fixed if scipy/scipy#9602 is accepted
bad = (mjds < xvals[0]) | (np.any(mjds > xvals[-1]))
if np.any(bad):
warnings.warn("Some points are outside the interpolation range:"
" {}".format(mjds[bad]))
return correction_mjd_rough(mjds)
def correction_sec(times, mjdref):
"""Get the orbital correction.
Parameters
----------
times : array-like
The input times in seconds of Mission Elapsed Time (MET)
mjdref : float
MJDREF, reference MJD for the mission
Returns
-------
mets: array-like
Corrected times in MET seconds
"""
deorb_mjds = correction_mjd(times / 86400 + mjdref)
return np.array((deorb_mjds - mjdref) * 86400)
retvals = [correction_sec, correction_mjd]
if return_pint_model:
retvals.append(m)
return retvals
|
abigailStev/stingray
|
stingray/pulse/pulsar.py
|
Python
|
mit
| 25,604
|
from Crypto import Random
from src.aes import encrypt_message, decrypt_message
def test_integrity():
plaintext = 'Test Text'
key = Random.new().read(16)
# Ensure that D(k, E(k, p)) == p
assert decrypt_message(key, encrypt_message(key, plaintext)) == plaintext
def test_privacy():
plaintext = 'Test Text'
prng = Random.new()
key1 = prng.read(16)
key2 = prng.read(16)
# Ensure decrypting with a different key does not reveal the plaintext
assert decrypt_message(key1, encrypt_message(key2, plaintext)) != plaintext
|
MichaelAquilina/CryptoTools
|
src/tests/aes_test.py
|
Python
|
mit
| 561
|
import shutil
import tempfile
import time
import os
import random
import subprocess
import unittest
from basefs.keys import Key
from basefs.logs import Log
from . import utils
class MountTests(unittest.TestCase):
def setUp(self):
__, self.logpath = tempfile.mkstemp()
__, self.logpath_b = tempfile.mkstemp()
self.addCleanup(os.remove, self.logpath)
self.addCleanup(os.remove, self.logpath_b)
__, self.keypath = tempfile.mkstemp()
self.addCleanup(os.remove, self.keypath)
self.port = random.randint(40000, 50000-1)
self.port_b = random.randint(50000, 60000)
log = Log(self.logpath)
root_key = Key.generate()
log.bootstrap([root_key], ['127.0.0.1:%i' % self.port])
root_key.save(self.keypath)
shutil.copy2(self.logpath, self.logpath_b)
self.hostname = utils.random_ascii(10)
self.hostname_b = utils.random_ascii(10)
self.mountpath = tempfile.mkdtemp()
self.mountpath_b = tempfile.mkdtemp()
context = {
'mountpath': self.mountpath,
'logpath': self.logpath,
'keypath': self.keypath,
'port': self.port,
'hostname': self.hostname,
}
cmd = 'basefs mount %(logpath)s %(mountpath)s -k %(keypath)s -p %(port)s -n %(hostname)s'
proc = subprocess.Popen(cmd % context, shell=True)
self.addCleanup(proc.kill)
time.sleep(1)
self.addCleanup(proc.kill)
context.update({
'mountpath': self.mountpath_b,
'logpath': self.logpath_b,
'port': self.port_b,
'hostname': self.hostname_b,
})
proc = subprocess.Popen(cmd % context, shell=True)
self.addCleanup(proc.kill)
self.addCleanup(time.sleep, 1)
self.addCleanup(proc.kill)
self.addCleanup(shutil.rmtree, self.mountpath)
self.addCleanup(shutil.rmtree, self.mountpath_b)
time.sleep(1)
def test_mount(self):
pass
|
glic3rinu/basefs
|
basefs/tests/test_mount.py
|
Python
|
mit
| 2,043
|
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import numpy as np
import datetime
from Bio.Seq import Seq
if __name__ == '__main__':
from needleman_wunsch import needleman_wunsch
else:
from .needleman_wunsch import needleman_wunsch
#-------------------------------
def plot_nw(seq_alpha_col,seq_beta_row,p_penalty):
if not seq_alpha_col or not seq_beta_row:
print("Alguna de las secuencias está vacía.")
return
plt.rcParams["figure.figsize"] = 20, 20
param = {"grid.linewidth": 1.6,
"grid.color": "lightgray",
"axes.linewidth": 1.6,
"axes.edgecolor": "lightgray",
"font.size": 8}
plt.rcParams.update(param)
# Data
headh = seq_alpha_col
headv = seq_beta_row
score_matrix, pt_mat, arrows = needleman_wunsch(seq_alpha_col,seq_beta_row,p_penalty,score_only=False)
# Plot
fig, ax = plt.subplots()
ax.set_xlim(-1.5, score_matrix.shape[1] - .5)
ax.set_ylim(-1.5, score_matrix.shape[0] - .5)
ax.invert_yaxis()
for i in range(score_matrix.shape[0]):
for j in range(score_matrix.shape[1]):
ax.text(j, i, score_matrix[i, j], ha="center", va="center")
for i, l in enumerate(headh):
ax.text(i + 1, -1, l, ha="center", va="center", fontweight="semibold")
for i, l in enumerate(headv):
ax.text(-1, i + 1, l, ha="center", va="center", fontweight="semibold")
ax.xaxis.set_minor_locator(ticker.FixedLocator(
np.arange(-1.5, score_matrix.shape[1] - .5, 1)))
ax.yaxis.set_minor_locator(ticker.FixedLocator(
np.arange(-1.5, score_matrix.shape[1] - .5, 1)))
plt.tick_params(axis='both', which='both', bottom='off', top='off',
left="off", right="off", labelbottom='off', labelleft='off')
#-----------ax.set_aspect('auto')
ax.grid(True, which='minor')
arrowprops = dict(facecolor='blue', alpha=0.5, lw=0,
shrink=0.2, width=2, headwidth=7, headlength=7)
# all path
for i in range(1,pt_mat.shape[0]):
for j in range(1,pt_mat.shape[1]):
if(pt_mat[i][j]['left'] != ''):
ax.annotate("", xy=(j-1,i),
xytext=(j,i), arrowprops=arrowprops)
if(pt_mat[i][j]['diagonal'] != ''):
ax.annotate("", xy=(j-1,i-1),
xytext=(j,i), arrowprops=arrowprops)
if(pt_mat[i][j]['up'] != ''):
ax.annotate("", xy=(j,i-1),
xytext=(j,i), arrowprops=arrowprops)
# optimal path
arrowprops.update(facecolor='crimson')
for i in range(arrows.shape[0]):
ax.annotate("", xy=arrows[i, 2:], # origin
xytext=arrows[i, :2], arrowprops=arrowprops)
#------------
plt.gca().set_aspect('auto')
time = '{:%Y-%m-%d_%H-%M-%S}'.format(datetime.datetime.now())
plt.savefig("output/needleman_wunsch/output-nw_"+time+".pdf", dpi=600)
#plt.show()
if __name__ == '__main__':
alpha = Seq("ACTCA")
beta = Seq("TTCAT")
penalty = {'MATCH': 1, 'MISMATCH': -1, 'GAP': -2}
plot_nw(alpha,beta,penalty)
|
kevinah95/bmc-sequence-alignment
|
algorithms/needleman_wunsch/plot_nw.py
|
Python
|
mit
| 3,189
|
# http://stackoverflow.com/questions/1477294/generate-random-utf-8-string-in-python
import random
def get_random_unicode(length):
try:
get_char = unichr
except NameError:
get_char = chr
# Update this to include code point ranges to be sampled
include_ranges = [
(0x0021, 0x0021),
(0x0023, 0x0026),
(0x0028, 0x007E),
(0x00A1, 0x00AC),
(0x00AE, 0x00FF),
(0x0100, 0x017F),
(0x0180, 0x024F),
(0x2C60, 0x2C7F),
(0x16A0, 0x16F0),
(0x0370, 0x0377),
(0x037A, 0x037E),
(0x0384, 0x038A),
(0x038C, 0x038C),
]
alphabet = [
get_char(code_point) for current_range in include_ranges
for code_point in range(current_range[0], current_range[1] + 1)
]
return ''.join(random.choice(alphabet) for i in range(length))
|
gouthambs/Flask-Blogging
|
test/utils.py
|
Python
|
mit
| 948
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: Alexander David Leech
@date: 03/06/2016
@rev: 1
@lang: Python 2.7
@deps: YAML
@desc: Class to use as an interface to import YAML files
"""
import yaml
class yamlImport():
@staticmethod
def importYAML(pathToFile):
try:
with open(pathToFile, "r") as f:
config = yaml.load(f)
except IOError:
print("Failed to read " + pathToFile)
raise SystemExit()
return config
|
FlaminMad/RPiProcessRig
|
RPiProcessRig/src/yamlImport.py
|
Python
|
mit
| 521
|
"""
Example of module documentation which can be
multiple-lined
"""
from sqlalchemy import Column, Integer, String
from wopmars.Base import Base
class FooBaseH(Base):
"""
Documentation for the class
"""
__tablename__ = "FooBaseH"
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String(255))
state = Column(String)
__mapper_args__ = {
'polymorphic_on': state,
'polymorphic_identity': "1"
}
|
aitgon/wopmars
|
wopmars/tests/resource/model/FooBaseH.py
|
Python
|
mit
| 472
|
"""
Derivation and Elementary Trees live here.
"""
from __future__ import print_function
from baal.structures import Entry, ConstituencyTree, consts
from baal.semantics import Predicate, Expression
from collections import deque
from copy import copy, deepcopy
from math import floor, ceil
try:
input = raw_input
except:
pass
def prn_pairs(phead, thead):
pairs = [("-LRB-", "-RRB-"), ("-RSB-", "-RSB-"), ("-LCB-", "-RCB-"),
("--", "--"), (",", ",")]
return any([left.lower()==phead.lower() and right.lower()==thead.lower() for left,right in pairs])
class AttachmentPoint(object):
def __init__(self, free, pos_symbol, gorn, type, seq_index):
self.free = free
self.pos_symbol = pos_symbol
self.gorn = gorn
self.type = type
self.seq_index = seq_index
self.hlf_symbol = None
self.frontier_increment = 0.01
self.frontier = (-1,0)
def __repr__(self):
return "{}@{}".format(self.pos_symbol,self.gorn)
def __deepcopy__(self, memo):
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
for k, v in self.__dict__.items():
setattr(result, k, deepcopy(v, memo))
return result
@classmethod
def from_tree(cls, tree, address, seq_index, tree_type):
new_point = cls(True, tree.symbol, address, tree_type, seq_index)
if tree.spine_index >= 0:
new_point.frontier = (tree.spine_index, tree.spine_index)
return new_point
@property
def left_frontier(self):
l, r = self.frontier
self.frontier = (l-self.frontier_increment, r)
assert self.frontier[0] > floor(self.frontier[0])
return self.frontier[0]
@property
def right_frontier(self):
l, r = self.frontier
self.frontier = (l, r+self.frontier_increment)
assert self.frontier[1] < ceil(self.frontier[1])
return self.frontier[1]
def sibling_increment(self, left=True):
l, r = self.frontier
if left:
self.frontier = (ceil(l) - 1.0, r)
else:
self.frontier = (l, floor(r) + 1.0)
def match(self, op):
pos_match = self.pos_symbol == op.target['pos_symbol']
gorn_match = ((self.gorn == op.target['target_gorn'])
or op.target['target_gorn'] is None)
hlf_match = self.hlf_symbol == op.target['target_hlf']
type_match = self.type == op.type
fail = []
if not pos_match:
f = "failure because pos:"
f += "self: {}; op: {}".format(str(self.pos_symbol),
str(op.target['pos_symbol']))
fail.append(f)
if not gorn_match:
f = "failure because gorn:"
f += "self: {}; op: {}".format(str(self.gorn),
str(op.target['target_gorn']))
fail.append(f)
if not hlf_match:
f = "failure because hlf:"
f += "self: {}; op: {}".format(str(self.hlf_symbol),
str(op.target['target_hlf']))
fail.append(f)
#if len(fail) > 0:
# print(" & \n".join(fail))
#else:
# print("Success!")
return self.free and pos_match and gorn_match and hlf_match and type_match
def set_path_features(self, hlf_symbol):
self.hlf_symbol = hlf_symbol
def clone(self):
ret = AttachmentPoint(self.free, self.pos_symbol, self.gorn,
self.type, self.seq_index)
ret.hlf_symbol = self.hlf_symbol
ret.frontier = self.frontier
return ret
class AttachmentOperation(object):
"""Represents an elementary tree operation
Used by DerivationTrees when trying to find where an elementary tree should attach
There are two modes to the operation:
1. Use it as a general attachment. In this case it needs to know
the permissable attachments via the pos_symbol (and direction if insertion)
2. Use it in specific attachment. In this case it needs to know
identifying information about the tree it should be attaching to.
Current ideas: hlf_symbol, tree_id, argument_number, gorn_address
Thoughts: gorn_address won't work (for obvious reasons as the tree grows)
tree_id won't work because there might be duplicates
hlf_symbol could work, as long as this semantic form remains
argument_number requires planning, which CSG and others might handle
"""
def __init__(self, target, type):
"""Pass in the already made parameters to make the operation.
Args:
target: dict with keys 'pos_symbol' and 'parameter'
'pos_symbol' is the part of speech this operation looks for
'parameter' is direction for insertions, and argument number
for substitutions
type: the type of operation this is: consts.INSERTION or consts.SUBSTITUTION
Notes:
insertion direction: left means it inserts on the left side
e.g. (NP* (DT a)) inserts left.
the asterisk denotes the attachment point
right means it inserts on the right side
e.g. (*S (. .)) inserts right
the asterisk denotes the attachment point
"""
self.target = target
self.type = type
@property
def is_insertion(self):
return self.type == consts.INSERTION
@property
def direction(self):
if not self.is_insertion:
raise Exception("Not an insertion tree")
else:
return self.target['attach_direction']
def clone(self):
return AttachmentOperation(self.target, self.type)
def set_path_features(self, target_gorn, target_hlf):
if target_hlf is not None:
self.target['target_hlf'] = target_hlf
if target_gorn is not None:
self.target['target_gorn'] = tuple(target_gorn)
@classmethod
def from_tree(cls, tree):
"""Calculate the parameters for the operation from a parse tree
Args:
tree: A ConstituencyParse instance
"""
if tree.adjunct:
target = {'pos_symbol': tree.symbol, 'attach_direction': tree.direction,
'target_gorn': None, 'target_hlf': None}
type = consts.INSERTION
else:
target = {'pos_symbol': tree.symbol, 'attach_direction': "up",
'target_gorn': None, 'target_hlf': None}
type = consts.SUBSTITUTION
return cls(target, type)
return cls(root_op, "", (0,), None, "(ROOT)",
[root_subpoint], [], hlf_symbol="g-1")
class ElementaryTree(object):
"""represent a tree fragment, its operations, and its internal addresses
"""
def __init__(self, op, head, head_address, head_symbol, bracketed_string,
substitution_points, insertion_points,
hlf_symbol=None, tree_id=None, last_type=None, last_index=-1):
self.tree_operation = op
self.head = head
self.head_address = head_address
self.substitution_points = substitution_points
self.insertion_points = insertion_points
self.address = (0,)
self.last_type = last_type
self.last_index = last_index
self.hlf_symbol = hlf_symbol
self.bracketed_string = bracketed_string
self.tree_id = tree_id
self.head_symbol = head_symbol
@classmethod
def from_full_parse_tree(cls, parse_tree):
if parse_tree.symbol == "" and len(parse_tree.children) == 1:
parse_tree.symbol = "ROOT"
_, addressbook = parse_tree.clone()
@classmethod
def from_single_parse_tree(cls, parse_tree):
if parse_tree.save_str().upper() == "(ROOT ROOT)":
return cls.root_tree()
_, addressbook = parse_tree.clone()
head = None
head_address = None
substitution_points = list()
insertion_points = list()
sorted_book = sorted(addressbook.items())
_, root = sorted_book[0]
root_sym = root.symbol
for address, tree in sorted_book:
#if tree.symbol == "ROOT":
# head = "ROOT"
# new_point = AttachmentPoint.from_tree(tree, address, 0, consts.SUBSTITUTION)
# substitution_points.append(new_point)
if tree.lexical:
if head is None:
head = tree.symbol
head_address = address
head_parent = tree.parent
else:
assert prn_pairs(head, tree.symbol)
elif tree.complement:
new_point = AttachmentPoint.from_tree(tree,
address,
len(substitution_points),
consts.SUBSTITUTION)
substitution_points.append(new_point)
elif tree.spine_index >= 0:
new_point = AttachmentPoint.from_tree(tree,
address,
len(insertion_points),
consts.INSERTION)
insertion_points.append(new_point)
else:
print(address, tree)
print("Then what is it?")
op = AttachmentOperation.from_tree(parse_tree)
assert (head is not None and head_address is not None) or head is "ROOT"
return cls(op, head, head_address, head_parent, parse_tree.save_str(),
substitution_points, insertion_points)
@classmethod
def from_bracketed_string(cls, bracketed_string):
parse_tree, _ = ConstituencyTree.make(bracketed_string=bracketed_string)
return cls.from_single_parse_tree(parse_tree)
@classmethod
def root_tree(cls):
root_op = AttachmentOperation({'pos_symbol': 'ROOT', 'attach_direction': None,
'target_gorn': None, 'target_hlf':None},
consts.SUBSTITUTION)
root_subpoint = AttachmentPoint(True, 'ROOT', (0,), consts.SUBSTITUTION, 0)
root_subpoint.hlf_symbol = "g-1"
return cls(root_op, "", (0,), None, "(ROOT)",
[root_subpoint], [], hlf_symbol="g-1")
def __deepcopy__(self, memo):
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
for k, v in self.__dict__.items():
setattr(result, k, deepcopy(v, memo))
return result
################### INSERTION OPERATION
########################################
def insert(self, op_tree):
new_tree = deepcopy(self)#.clone()
address = new_tree.mark_insertion(op_tree.tree_operation)
op_tree = deepcopy(op_tree)#.clone()
op_tree.address = address
return new_tree, op_tree
def mark_insertion(self, op):
assert self.last_match is not None
assert self.last_match.match(op)
if op.target['attach_direction'] == "left":
op_index = self.last_match.left_frontier
else:
op_index = self.last_match.right_frontier
return self.last_match.gorn + (op_index,)
def matches_inspoint(self, op):
self.last_type = None
self.last_index = -1
for index, point in enumerate(self.insertion_points):
if point.match(op):
self.last_index = index
self.last_type = consts.INSERTION
return True
return False
################### SUBSTITUTION OPERATION
###########################################
def substitute(self, op_tree):
"""update open substitution spots.
Args:
op_tree: an ElementaryTree instance
Notes:
accepts an op_tree that needs to substitute here.
raises an Exception if it can't
"""
new_tree = deepcopy(self)#self.clone()
address = new_tree.mark_substituted(op_tree.tree_operation)
op_tree = deepcopy(op_tree)#.clone()
op_tree.address = address
return new_tree, op_tree
def mark_substituted(self, op):
assert self.last_match is not None
assert self.last_match.match(op)
self.last_match.free = False
match_gorn = self.last_match.gorn
if self.hlf_symbol == 'g-1':
return match_gorn
is_left = match_gorn < self.head_address
for point in self.insertion_points:
if point.gorn == match_gorn[:-1]:
point.sibling_increment(is_left)
return match_gorn
def matches_subpoint(self, op):
"""check to see if operation matches anything on this tree
Args:
op: AttachmentOperation instance
Returns:
True, False
"""
self.last_type = None
self.last_index = -1
for index, point in enumerate(self.substitution_points):
if point.match(op):
self.last_type = consts.SUBSTITUTION
self.last_index = index
return True
return False
##################### UTILITY METHODS
#####################################
def point_iterator(self, ignore_taken=False):
for pt_type, points in zip(['SUB', 'INS'], [self.sub_points, self.ins_points]):
for point in points:
if ignore_taken and not point.free:
continue
yield pt_type, point
@property
def ins_points(self):
return self.insertion_points
@property
def sub_points(self):
return self.substitution_points
@property
def root_pos(self):
return self.tree_operation.target['pos_symbol']
@property
def last_match(self):
if self.last_index < 0:
return None
elif self.last_type == consts.SUBSTITUTION:
return self.substitution_points[self.last_index]
else:
return self.insertion_points[self.last_index]
@property
def is_insertion(self):
return self.tree_operation.is_insertion
@property
def pos_symbol(self):
return self.tree_operation.target['pos_symbol']
def set_path_features(self, target_gorn=None, target_hlf=None,
self_hlf=None, tree_id=None):
"""Set the variables needed to reconstruct paths.
Args
target_gorn: the gorn address of the target operation node
target_hlf: the target hlf symbol of the target operation tree
self_hlf: this tree's hlf symbol
Notes:
The gorn address will identify where in the target tree
The target_hlf will identify which tree; especially important for duplicates
"""
if self_hlf:
for point in self.substitution_points + self.insertion_points:
point.set_path_features(self_hlf)
self.hlf_symbol = self_hlf
if target_gorn or target_hlf:
self.tree_operation.set_path_features(target_gorn, target_hlf)
if tree_id:
self.tree_id = tree_id
def expand_address(self, incoming):
self.expanded_address = incoming
for _, point in self.point_iterator():
point.expanded_address = incoming + point.gorn[1:]
""" a soft deletion to see if i can get rid of this code
def refresh_points(self):
self.tree_operation = self.tree_operation.clone()
self.substitution_points = [sub.clone() for sub in self.substitution_points]
self.insertion_points = [ins.clone() for ins in self.insertion_points]
def clone(self):
new_tree = ElementaryTree(self.tree_operation, self.head,
self.head_address, self.bracketed_string,
self.substitution_points,
self.insertion_points)
new_tree.refresh_points()
if self.last_match:
new_tree.last_type = self.last_type
new_tree.last_index = self.last_index
if self.hlf_symbol:
new_tree.hlf_symbol = self.hlf_symbol
new_tree.address = self.address
new_tree.tree_id = self.tree_id
return new_tree
"""
def __str__(self):
return self.bracketed_string
def __repr__(self):
substr = ", ".join("{}{}@{}".format(sub.pos_symbol,
"-FREE" if sub.free else "-FILLED",
sub.gorn)
for sub in sorted(self.substitution_points,
key=lambda x: x.gorn))
instr = ", ".join("{}@{}".format(ins.pos_symbol, ins.gorn)
for ins in sorted(self.insertion_points,
key=lambda x: x.gorn))
if self.tree_operation.is_insertion:
typestr = "{}*" if self.tree_operation.direction == "left" else "*{}"
else:
typestr = "^{}^"
typestr = typestr.format(self.head)
return "<{}; sub=[{}], ins=[{}]>".format(typestr, substr, instr)
class DerivationTree(object):
"""represent a tree of ElementaryTrees and their attachment addresses.
"""
def __init__(self, elem_tree, children, predicate=None, suppress_predicate=False):
self.elem_tree = elem_tree
self.children = children
self.predicate = predicate
if not suppress_predicate and predicate is None:
self.predicate = self.instantiate_semantics()
@classmethod
def root_tree(cls):
E = ElementaryTree.root_tree()
P = Predicate(name='ROOT', valence=1, hlf_symbol='g-1')
return cls(E, [], P)
@classmethod
def from_single_parse_tree(cls, tree):
elem_tree = ElementaryTree.from_single_parse_tree(tree)
return cls(elem_tree, [])
@classmethod
def from_bracketed(cls, bracketed_string, **kwargs):
elem_tree = ElementaryTree.from_bracketed_string(bracketed_string)
#parse_tree, _ = ConstituencyTree.make(bracketed_string=bracketed_string)
return cls(elem_tree, [], **kwargs)
@property
def E(self):
""" shortcut alias for shorter lines """
return self.elem_tree
@property
def is_insertion(self):
return self.elem_tree.is_insertion
@property
def direction(self):
if self.is_insertion:
return self.E.tree_operation.target['attach_direction']
else:
return "up"
@property
def tree_op(self):
return self.E.tree_operation
@property
def bracketed(self):
return self.E.bracketed_string
@property
def head(self):
return self.E.head
@property
def supertag(self):
return (self.E.root_pos, self.E.head_symbol, self.direction)
@property
def superindex(self):
return (self.head, self.supertag)
@property
def is_root(self):
return "ROOT" in self.E.bracketed_string
@property
def num_children(self):
return sum([child.num_children+1 for child in self.children])
@property
def lexical(self):
out = [self.E.head]
for child in self.children:
out.extend(child.lexical)
return out
def target_gorn(self, adjust_insertion=True):
gorn = self.tree_op.target['target_gorn']
direction = self.tree_op.target['attach_direction']
if self.is_insertion and adjust_insertion:
gorn += ((-100 if direction == "left" else 100), )
return gorn
def accepts_op(self, other_tree):
other_target = other_tree.E.tree_operation.target['pos_symbol']
if other_tree.is_insertion:
points = self.E.insertion_points
else:
points = self.E.substitution_points
for point in points:
if point.pos_symbol == other_target:
return True
return False
def expand_address(self, incoming=None):
incoming = incoming or (0,)
self.E.expand_address(incoming)
self.expanded_address = incoming
for child in self.children:
child_address = incoming + child.E.address[1:]
child.expand_address(child_address)
def all_points(self):
points = list(self.E.point_iterator())
for child in self.children:
points.extend(child.all_points)
return points
def get_spine(self):
tree, _ = ConstituencyTree.make(bracketed_string=self.bracketed)
annotate = lambda t: (t.symbol, ("SUB" if t.complement
else ("INS" if t.adjunct
else "SPINE")))
not_lex = lambda t: not tree.lexical
spine = [[(tree.symbol, self.direction)]]
while not_lex(tree):
if len(tree.children) == 1 and tree.children[0].lexical:
break
spine.append([annotate(c) for c in tree.children if not_lex(c)])
tree = tree.children[tree.spine_index]
return spine
def roll_features(self, parent_head="ROOT"):
"""assumes 1 head.. more thought needed for other forms"""
spine = self.get_spine()
out_ch = [child.head for child in self.children]
out = [(self.head, parent_head, self.bracketed, spine, out_ch)]
for child in self.children:
out.extend(child.roll_features(self.head))
return out
def modo_roll_features(self, parent_head="ROOT", parent_spine=None):
"""v2. mother-daughter roll features
roll up the tree; get the mother-daughter quadruples
"""
parent_spine = parent_spine or ((("ROOT", "SUB"),),)
tree, _ = ConstituencyTree.make(bracketed_string=self.bracketed)
safety = 0
annotate = lambda t: (t.symbol, ("SUB" if t.complement
else ("INS" if t.adjunct
else "SPINE")))
filter_ch = lambda c: c.E.head_symbol in [",", ":", ".", "``","''", "--"]
not_lex = lambda t: not tree.lexical
spine = [[(tree.symbol, self.direction)]]
while not_lex(tree):
if len(tree.children) == 1 and tree.children[0].lexical:
break
spine.append([annotate(c) for c in tree.children if not_lex(c)])
tree = tree.children[tree.spine_index]
safety += 1
if safety == 100:
raise Exception("loop issue")
out = [(self.head, parent_head, self.bracketed, spine, parent_spine)]
for child in self.children:
out.extend(child.modo_roll_features(self.head, spine))
return out
def dcontext_roll_features(self):
"""v3. mother-daughter roll features
roll up the trees; get the node+daughter head context
"""
tree, _ = ConstituencyTree.make(bracketed_string=self.bracketed)
annotate = lambda t: (t.symbol, ("SUB" if t.complement
else ("INS" if t.adjunct
else "SPINE")))
filter_ch = lambda c: c.E.head_symbol in [",", ":", ".", "``","''", "--"]
not_lex = lambda t: not tree.lexical
spine = [[(tree.symbol, self.direction)]]
while not_lex(tree):
if len(tree.children) == 1 and tree.children[0].lexical:
break
spine.append([annotate(c) for c in tree.children if not_lex(c)])
tree = tree.children[tree.spine_index]
hlf_info = (self.E.hlf_symbol, self.E.tree_operation.target['target_hlf'])
child_heads = [child.head for child in self.children]
out = [(self.head, spine, child_heads, self.bracketed, hlf_info)]
for child in self.children:
out.extend(child.dcontext_roll_features())
return out
def learning_features_july2016(self):
'''sequential choice model with a horizon and RTTN
'''
tree, _ = ConstituencyTree.make(bracketed_string=self.bracketed)
annotate = lambda t: (t.symbol, ("SUB" if t.complement
else ("INS" if t.adjunct
else "SPINE")))
not_lex = lambda t: not tree.lexical
spine = [[(tree.symbol, self.direction)]]
while not_lex(tree):
if len(tree.children) == 1 and tree.children[0].lexical:
break
spine.append([annotate(c) for c in tree.children if not_lex(c)])
tree = tree.children[tree.spine_index]
return self.head, spine
def to_constituency(self):
raise Exception("dont use this yet")
import pdb
#pdb.set_trace()
tree, _ = ConstituencyTree.make(bracketed_string=self.bracketed)
for child in sorted(self.children, key=lambda c: c.E.address):
print("*******\n**********")
print("starting child {}".format(child.supertag))
ct = child.to_constituency()
print("----------------------------")
print("finished to constituency for ct")
print("tree is currently {}".format(tree))
print("child's ct: {}".format(ct))
print("-------------------")
print(self.bracketed)
print(child.E.address)
print(str(child))
print("attaching {} to {}".format(child.bracketed, self.bracketed))
self.attach_at(tree, ct, list(child.E.address)[1:])
return tree
def attach_at(self, node, op, address):
raise Exception("dont use this yet")
while len(address) > 1:
node = node.children[address.pop(0)]
if not hasattr(node, "bookkeeper"):
node.bookkeeper = {}
opid = address.pop(0)
assert len(address) == 0
if isinstance(opid, int):
node.children[opid].__dict__.update(op.__dict__)
elif isinstance(opid, float):
if opid > 0:
node.children.extend(op.children)
else:
node.children = op.children + node.children
node.spine_index += len(op.children)
else:
raise Exception("sanity check")
def __str__(self):
if self.E.bracketed_string == "(ROOT)" and len(self.children) == 0:
return "<empty root>"
lexical = self.in_order_lexical()
return " ".join(lexical)
def __repr__(self):
if self.E.bracketed_string == "(ROOT)" and len(self.children) == 0:
return "<empty root>"
descs = self.in_order_descriptive()
return " ".join(descs)
def _check_heads(self, child_prep, next_word, stk_idx, sf_stk, avail_pos):
for (head,hlf), child in child_prep.items():
if head == next_word:
import pdb
#pdb.set_trace()
w_size = child.num_children + 1
low,high = stk_idx, stk_idx+w_size
while high >= stk_idx and low >= 0:
possible = sf_stk[low:high]
if sorted(possible) == sorted(child.lexical):
child_prep.pop((head, hlf))
pos = avail_pos.pop()
return child, pos, low
else:
low -= 1
high -= 1
return None, None, None
def _sort_by_surface_form(self, sf_list, children, positions, left=True):
"""assign spine-out indices that agrees with surface form list (sf_list)
positions start from 0 and go negative when left, positive when right
we want to associate things closer to 0 with words closer to head
"""
#my_possible_positions = [i for i,x in enumerate(sf_list) if x==self.E.head]
#if "down" in [c.E.head for c in children]:
# import pdb
# pdb.set_trace()
#for possible_position in my_possible_positions:
#print("===")
child_prep = {(child.E.head,child.E.hlf_symbol):child for child in children}
pairing = []
avail_pos = sorted(positions)
sf_stk = sf_list[:]
if not left:
avail_pos = avail_pos[::-1]
sf_stk = sf_stk[::-1]
# if the position is so bad that it cuts off the words, just skip it
if not all([(word in sf_stk) for c in children for word in c.lexical]):
raise Exception()
stk_idx = len(sf_stk) - 1
#print("xxx")
domain = set([w for child in children for w in child.lexical])
import pdb
#pdb.set_trace()
while len(avail_pos) > 0 and stk_idx >= 0:
#while len(sf_stk) > 0 and len(pairing)<len(children):
#print("---", possible_position, child_prep.keys(), sf_stk, stk_idx)
next_word = sf_stk[stk_idx]
if next_word not in domain:
#print("trashpop", next_word)
sf_stk.pop()
else:
child, pos, low = self._check_heads(child_prep, next_word, stk_idx, sf_stk, avail_pos)
if child is not None:
stk_idx = low
sf_stk = sf_stk[:low]
pairing.append((child,pos))
stk_idx -= 1
try:
assert len(avail_pos) == 0
yield pairing
except:
raise Exception()
#try:
# assert len(my_possible_positions) > 1
#except:
print("available positions weren't exausted. why?")
print("I thought i had it figured out; multiple of this head word")
print("it partitions string too much.. but i was wrong?")
print("debugging. inspect now.")
import pdb
pdb.set_trace()
def sort_by_surface_form(self, sf_list, children, positions, left=True):
#import pdb
#pdb.set_trace()
#try:
#if self.E.head == "iii":
# import pdb
# pdb.set_trace()
all_pairings = list(self._sort_by_surface_form(sf_list, children, positions, left))
#except IndexError as e:
# print("tried to pop from an empty list... what should I do")
# import pdb
# pdb.set_trace()
if len(all_pairings) == 1:
return all_pairings[0]
else:
#try:
key = lambda item: (item[1], (item[0].E.head, item[0].E.hlf_symbol))
same = lambda p1, p2: tuple(map(key,p1))==tuple(map(key,p2))
if all([same(p1,p2) for p1 in all_pairings for p2 in all_pairings]):
#print("all same anyway, returning")
return all_pairings[0]
else:
dt_check = lambda diffs: any([item[0].E.head_symbol == "DT" for pair in diffs for item in pair])
dt_key = lambda pairing: sum([abs(p) for c,p in pairing if c.E.head_symbol=="DT"])
differences = [(p1,p2) for i,p1 in enumerate(all_pairings)
for j,p2 in enumerate(all_pairings)
if not same(p1,p2) and i<j]
differences = [(x,y) for diff_item in differences for x,y in zip(*diff_item) if x!=y]
if len(differences) == 2 and dt_check(differences):
#print("shortcutting")
out_pairing = max(all_pairings, key=dt_key)
#print("hopefully works: ", out_pairing)
return out_pairing
#return all_pairings[0]
print("Not sure what to do. not all pairings are the same. inspect please")
import pdb
pdb.set_trace()
#except Exception as e:
# print("not exactly sure what is breaking")
# import pdb
# pdb.set_trace()
def surface_index(self, sf_list, num_left):
for i,w in enumerate(sf_list):
if w == self.E.head and i >= num_left:
return i
return -1
def align_gorn_to_surface(self, surface_form):
if len(self.children) == 0:
return
sf_list = surface_form.split(" ")
if self.E.head == "as" and "much" in sf_list:
import pdb
#pdb.set_trace()
left_of = lambda x,me: x.elem_tree.address < me.elem_tree.head_address
left_children = [child for child in self.children if left_of(child, self)]
organizer = {}
num_left = sum([child.num_children+1 for child in left_children])
boundary = max(num_left, self.surface_index(sf_list, num_left))
left_form = " ".join(sf_list[:boundary])
right_form = " ".join(sf_list[boundary+1:])
#### LEFT CHILDREN
for child in left_children:
addr = child.elem_tree.address
level, position = addr[:-1], addr[-1]
organizer.setdefault(level, []).append((child, position))
for level, items in organizer.items():
if len(items) == 1:
continue
children, positions = [x[0] for x in items], [x[1] for x in items]
pairing = self.sort_by_surface_form(sf_list[:boundary], children, positions, True)
for child,position in pairing:
assert child.E.address[:-1] == level
child.E.address = child.E.address[:-1] + (position,)
#### RIGHT CHILDREN
organizer = {}
right_children = [child for child in self.children if not left_of(child, self)]
for child in right_children:
addr = child.elem_tree.address
level, position = addr[:-1], addr[-1]
organizer.setdefault(level, []).append((child, position))
for level, items in organizer.items():
if len(items) == 1:
continue
children, positions = [x[0] for x in items], [x[1] for x in items]
pairing = self.sort_by_surface_form(sf_list[boundary+1:], children, positions, False)
for child,position in pairing:
assert child.E.address[:-1] == level
child.E.address = child.E.address[:-1] + (position,)
for child in left_children:
child.align_gorn_to_surface(left_form)
for child in right_children:
child.align_gorn_to_surface(right_form)
def align_gorn_to_surface_deprecated_march30(self, surface_form):
left_of = lambda x,me: x.elem_tree.address < me.elem_tree.head_address
surface_index = lambda child: surface_form.find(child.elem_tree.head)
left_children = [child for child in self.children if left_of(child, self)]
organizer = {}
#### LEFT CHILDREN
for child in left_children:
addr = child.elem_tree.address
level, position = addr[:-1], addr[-1]
organizer.setdefault(level, []).append((child, position))
for level, items in organizer.items():
if len(items) == 1:
continue
child_list = sorted([c for c,p in items], key=surface_index)
pop_q = deque(sorted([p for c,p in items]))
assert [x!=y for x in pop_q for y in pop_q]
for child in child_list:
addr = child.elem_tree.address
child.elem_tree.address = addr[:-1] + (pop_q.popleft(), )
#### RIGHT CHILDREN
organizer = {}
right_children = [child for child in self.children if not left_of(child, self)]
for child in right_children:
addr = child.elem_tree.address
level, position = addr[:-1], addr[-1]
organizer.setdefault(level, []).append((child, position))
for level, items in organizer.items():
if len(items) == 1:
continue
child_list = sorted([c for c,p in items], key=surface_index)
pop_q = deque(sorted([p for c,p in items]))
for child in child_list:
addr = child.elem_tree.address
child.elem_tree.address = addr[:-1] + (pop_q.popleft(), )
for child in self.children:
child.align_gorn_to_surface(surface_form)
def align_gorn_to_surface_old(self, surface_form):
ins_children = [child for child in self.children if child.is_insertion]
sub_children = [child for child in self.children if not child.is_insertion]
surface_index = lambda child: surface_form.find(child.elem_tree.head)
organizer = {}
for child in ins_children:
addr = child.elem_tree.address
new_addr = addr[:-1] + ((1,) if addr[-1] > 0 else (-1,))
organizer.setdefault(addr, []).append(child)
for proxy_addr, child_list in organizer.items():
if len(child_list) == 1:
continue
offset = min([c.elem_tree.address[-1] for c in child_list])
for i, child in enumerate(sorted(child_list, key=surface_index),0):
last_bit = i+offset
child.elem_tree.address = proxy_addr[:-1] +(last_bit,)
for child in self.children:
child.align_gorn_to_surface(surface_form)
#left_ins = [child for child in ins_children if child.elem_tree.address[-1]<0]
#right_ins = [child for child in ins_children if child.elem_tree.address[-1]>0]
#surface_index = lambda child: surface_form.find(child.elem_tree.head)
#sort_key = lambda ch: ch.elem_tree.address[:-1]+()
def gorn_in_order(self, include_empty=False):
items = [(child.elem_tree.address, child) for child in self.children]
if len(self.E.head) > 0:
items.append((self.elem_tree.head_address, self))
if include_empty:
for point in self.elem_tree.substitution_points:
if all([addr!=point.gorn for addr, _ in items]):
items.append((point.gorn, None))
sorted_items = sorted(items)
return sorted_items
def gorn_pre_order(self, merged=True):
"""Return children sorted by gorn. Use for pre-order walks.
Will also return from inside out.
"""
left_of = lambda x,me: x.elem_tree.address < me.elem_tree.head_address
left_children = [child for child in self.children if left_of(child, self)]
right_children = [child for child in self.children if not left_of(child, self)]
sorted_left = sorted(left_children, key=lambda x: x.elem_tree.address, reverse=True)
#for i,left in enumerate(sorted_left):
# print(i,left.elem_tree.bracketed_string)
# print(i,left.elem_tree.address)
sorted_right = sorted(right_children, key=lambda x: x.elem_tree.address)
#for i,right in enumerate(sorted_right):
# print(i,right.elem_tree.bracketed_string)
# print(i,right.elem_tree.address)
#sorted_children = sorted(self.children, key=lambda x: x.elem_tree.address)
if merged:
return sorted_left + sorted_right
else:
return sorted_left, sorted_right
def learning_features(self, *args):
"""make learning features. currently for dual attender model.
output: features and annotations for pairs (parent, child)
"""
feature_output = []
f1 = "head={}".format(self.E.head)
f2 = "template={}".format(self.E.bracketed_string.replace(self.E.head, ""))
if self.is_root:
my_feats = (f2,)
else:
my_feats = (f1, f2)
for child_type, side in zip(self.gorn_pre_order(False), ("left", "right")):
for i, child in enumerate(child_type):
anno = []
anno.append("dist-from-spine: {}".format(i))
anno.append("dist-from-frontier: {}".format(len(child_type)-i-1))
anno.append("spine-side: {}".format(side))
if child.is_insertion:
anno.append("type=ins")
else:
anno.append("type=sub")
for j, pt in enumerate(self.E.substitution_points):
if pt.gorn == child.E.address:
anno.append("argument-{}".format(j))
child_feats, pairs_below = child.learning_features()
feature_output.extend(pairs_below)
feature_output.append((my_feats, child_feats, tuple(anno)))
return my_feats, feature_output
def _old_learning_features(self, flat=False):
raise Exception("don't use this function anymore")
f1 = "head={}".format(self.elem_tree.head)
f2 = "template={}".format(self.elem_tree.bracketed_string.replace(self.elem_tree.head, ""))
#f4 = "surface=[{}]".format(str(self))
#fulllex = self.in_order_lexical(True)
#f5 = "surface_with_empties=[{}]".format(fulllex)
myfeats = {"f1":f1,"f2":f2,"f3": []}
#"f4":f4,"f5":f5}
allfeats = [myfeats]
first_ins = lambda child: (child.E.address < self.E.head_address and
all([child.E.address < other_child.E.address
for other_child in self.children
if other_child.E.address != child.E.address]))
last_ins = lambda child: (child.E.address > self.E.head_address and
all([child.E.address > other_child.E.address
for other_child in self.children
if other_child.E.address != child.E.address]))
for child in self.children:
# if child is insertion, find out whether it's furthest left or furthest right
# if child is substitution, find out which of the substitution poitns it corresponds to
if first_ins(child):
pass
arrow = "<-" if child.is_insertion else "->"
f3 = "{}{}{}".format(self.elem_tree.head, arrow, child.elem_tree.head)
myfeats['f3'].append(f3)
allfeats.extend(child.learning_features())
if flat:
final_list = []
for featset in allfeats:
for featval in featset.values():
if isinstance(featval, list):
final_list.extend(featval)
else:
final_list.append(featval)
return final_list
return allfeats
def path_reconstruction_features(self):
return (self.E.bracketed_string, self.E.hlf_symbol,
self.E.tree_operation.target['target_hlf'],
self.E.tree_operation.target['target_gorn'])
#return (self.elem_tree.tree_id, self.elem_tree.head)
def pre_order_features(self):
feat_list = [self.path_reconstruction_features()]# for now, just id
for child in self.gorn_pre_order():
feat_list.extend(child.pre_order_features())
return tuple(feat_list)
def pre_order_descriptive(self):
descs = [str(self.elem_tree)]
sorted_children = sorted(self.children, key=lambda x: x.elem_tree.address)
for tree in sorted_children:
descs.extend(tree.pre_order_descriptive())
return descs
def in_order_descriptive(self):
descs = []
for address, tree in self.gorn_in_order():
if tree == self:
descs.append(str(self.elem_tree))
else:
descs.extend(tree.in_order_descriptive())
return descs
def in_order_treeids(self):
treeids = []
for address, tree in self.gorn_in_order():
if tree == self:
treeids.append(tree.elem_tree.tree_id)
else:
treeids.extend(tree.in_order_treeids())
return treeids
def pre_order_lexical(self):
pass
def in_order_lexical(self, include_empties=False):
lexical = []
for address, tree in self.gorn_in_order(include_empties):
if include_empties and tree is None:
lexical.append("<open-sub-point>")
elif tree.elem_tree.head is None:
continue
elif tree == self:
lexical.append(self.elem_tree.head)
else:
lexical.extend(tree.in_order_lexical())
return lexical
def expanded_by_hlf(self, book=None):
if book is None:
self.expand_address()
book = {}
book[self.E.hlf_symbol] = self.expanded_address
for child in self.children:
book = child.expanded_by_hlf(book)
return book
def make_expression(self, top=True):
expr = []
for i, (address, tree) in enumerate(self.gorn_in_order()):
if tree == self:
expr.append(self.predicate)
else:
expr.extend(tree.make_expression(False))
if top:
return Expression.from_iter(expr)
return expr
def lookup_insert(self, index):
return self.elem_tree.insertion_points[index].gorn
def lookup_sub(self, index):
return self.elem_tree.substitution_points[index].gorn
def set_path_features(self, instantiate_semantics=True, *args, **kwargs):
self.elem_tree.set_path_features(*args, **kwargs)
if instantiate_semantics:
self.predicate = self.instantiate_semantics()
def set_insertion_argument(self, arg):
if not self.is_insertion:
raise Exception("Don't call this if it's not insertion..")
self.predicate.substitute(arg, 0)
def instantiate_semantics(self):
num_arguments = len(self.elem_tree.substitution_points)
if self.is_insertion:
num_arguments += 1
predicate = Predicate(self.elem_tree.head,
num_arguments,
self.elem_tree.hlf_symbol)
if self.elem_tree.hlf_symbol is None:
self.elem_tree.set_path_features(self_hlf=predicate.hlf_symbol)
return predicate
def __deepcopy__(self, memo):
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
for k, v in self.__dict__.items():
setattr(result, k, deepcopy(v, memo))
return result
def clone(self):
children = [child.clone() for child in self.children]
pred = self.predicate.clone()
return self.__class__(self.elem_tree.clone(), children)
def handle_insertion(self, operative, in_place):
"""Check if my elementary tree is the insertion point; if not, recurse
Args:
op_tree: ElementaryTree instance
"""
ThisClass = self.__class__
op_tree = operative.elem_tree
op = op_tree.tree_operation
if self.elem_tree.matches_inspoint(op):
# do the insertting; making new elem tree copies; updating addresses
new_elem_tree, new_op_tree = self.elem_tree.insert(op_tree)
# start making the new composed tree
# create a new clone of the op dtree
if in_place:
new_operative = operative
new_operative.elem_tree = new_op_tree
new_children = self.children
else:
#new_children = [child.clone() for child in self.children]
new_children = deepcopy(self.children)
new_operative = ThisClass.replicate(operative, new_op_tree)
# since it's an insertion, this pred is an argument to the op
new_pred = deepcopy(self.predicate)
# put the predicate into the op
new_operative.set_insertion_argument(new_pred)
# finish off the children
new_children.append(new_operative)
else:
new_elem_tree = deepcopy(self.elem_tree)
new_children = [child.operate(operative, in_place) for child in self.children]
new_pred = deepcopy(self.predicate)
if in_place:
self.elem_tree = new_elem_tree
self.children = new_children
self.predicate = new_pred
return self
else:
return ThisClass(new_elem_tree, new_children)
def handle_substitution(self, operative, in_place=False):
"""Check if my elementary tree is the subpoint; if not, recurse on children
Args:
op_tree: ElementaryTree instance
"""
ThisClass = self.__class__
op_tree = operative.elem_tree
op = op_tree.tree_operation
if self.elem_tree.matches_subpoint(op):
# the purpose of the substitute is to give the op_tree an address
# that adddress is the location of its substituion
# this is important for when we want to order our derived children via gorn
new_elem_tree, new_op_tree = self.elem_tree.substitute(op_tree)
##### HANDLE IN-PLACE-TYPE VS FACTORY-TYPE OPERATION
# the thing coming in is copied
if in_place:
new_operative = operative
new_operative.elem_tree = new_op_tree
new_children = self.children
else:
new_children = deepcopy(self.children)#[child.clone() for child in self.children]
new_operative = ThisClass.replicate(operative, new_op_tree)
new_children.append(new_operative)
##### HANDLE LOGIC STUFF
new_pred = deepcopy(self.predicate)#.clone()
# we put it into its correct spot
if self.is_insertion:
pred_arg_index = new_elem_tree.last_index + 1
else:
pred_arg_index = new_elem_tree.last_index
# abusing terms. substitute here is not a tree substitute, but a logic substitute
# find a better term....................
new_pred.substitute(new_operative.predicate, pred_arg_index)
else:
new_elem_tree = deepcopy(self.elem_tree)#.clone()
new_pred = deepcopy(self.predicate)#.clone()
new_children = [child.operate(operative, in_place) for child in self.children]
if in_place:
self.elem_tree = new_elem_tree
self.children = new_children
self.predicate = new_pred
return self
else:
return ThisClass(new_elem_tree, new_children)
def operate(self, operative, in_place=False):
"""handle the possible operations incoming to this derived tree.
Args:
operative: a DerivationTree instance
Returns:
a new DerivationTree that results from operation
Notes:
An intended operation would know what tree it wants to operate on
and where it wants to do it.
E.G:
(NP* (DT a)) knows it wants to attach to the tree (NP (NN dog))
which is substituted into (S (NP) (VP finds) (NP))
The DerivationTree should know that (NP (NN dog)) was substituted into
the first substitution spot.
Temp QUD:
what is the best way to represent this intended operation?
we could have the DT tree know it wants to attach to tree id X
but that tree id X could be in the tree twice (either NP)
it could know the predicate then?
"""
if operative.elem_tree.tree_operation.type == consts.INSERTION:
return self.handle_insertion(operative, in_place)
elif operative.elem_tree.tree_operation.type == consts.SUBSTITUTION:
return self.handle_substitution(operative, in_place)
@classmethod
def replicate(cls, old_inst, new_elem_tree=None, new_children=None, new_pred=None):
""" this is basically clone but allows etrees, childre, and preds rather than just straight cloning """
new_elem_tree = new_elem_tree or deepcopy(old_inst.elem_tree)#.clone()
new_children = new_children or deepcopy(old_inst.children) #[child.clone() for child in old_inst.children]
new_pred = new_pred or deepcopy(old_inst.predicate)#.clone()
return cls(new_elem_tree, new_children)
def test():
parse = """(ROOT(S(NP(NP (DT The) (NN boy))(VP (VBG laying)(S(VP (VB face)(PRT (RP down))(PP (IN on)(NP (DT a) (NN skateboard)))))))(VP (VBZ is)(VP (VBG being)(VP (VBN pushed)(PP (IN along)(NP (DT the) (NN ground)))(PP (IN by)(NP (DT another) (NN boy))))))(. .)))"""
tree_cuts = tree_enrichment.string2cuts(parse)
tree_strings = [cut.save_str() for cut in tree_cuts]
derived_trees = [DerivationTree.from_bracketed(tree_string) for tree_string in tree_strings]
derived_trees[2].elem_tree.insertion_points[0].hlf_symbol = 'g0'
derived_trees[1].elem_tree.tree_operation.target['target_hlf'] = 'g0'
derived_trees[1].elem_tree.tree_operation.target['target_gorn'] = (0,)
#derived_two = [DerivationTree.from_parse_tree(tree) for tree in tree_cuts]
return derived_trees
if __name__ == "__main__":
test()
|
braingineer/baal
|
baal/structures/gist_trees.py
|
Python
|
mit
| 54,149
|
import guess_language
import threading
from job_queue import JobQueue
from multiprocessing import cpu_count
from app_config import *
from html_parser_by_tag import HTMLParserByTag
from event_analysis import EventAnalysis
from events.models import Event, Feature, EventFeature, Weight
from tree_tagger import TreeTagger
from website_link_arborescence import *
from tf_idf import TypeFeature
from django.core.validators import URLValidator
def is_nb_word_website_enough(x):
"""
Return the number of words to take from the website
"""
return K_MOST_IMPORTANT_KEYWORD
def event_analysis():
"""
Event analysis process. It fetches all the event in the database and analyse the description & website and
then create all the related features
"""
event_analysis = EventAnalysis()
# Store all available website and avoid parsing a website several times
websites = dict(dict())
# Contains the list of key-word with tree tagger
description_tree_tagger = dict()
website_tree_tagger = dict()
events = Event.objects.all()
if len(events) == 0:
return
nb_core = cpu_count()
nb_events = len(events)
nb_events_thread = nb_events/nb_core
events_thread = []
for i in range(nb_core-1):
events_thread.append(events[i*nb_events_thread:(i+1)*nb_events_thread])
events_thread.append(events[(nb_core-1)*nb_events_thread:])
# Fulfill the corpus
start_threads(nb_core, event_analysis_fulfill_corpus,
events_thread, event_analysis, websites, description_tree_tagger, website_tree_tagger)
#Monothread - event_analysis_fulfill_corpus(event_analysis, websites, description_tree_tagger, website_tree_tagger, events)
event_analysis.set_corpus_complete()
# We compute the tf-idf of the key word in the description and in the website if exists
start_threads(nb_core, event_analysis_compute_tf_idf,
events_thread, event_analysis, websites, description_tree_tagger, website_tree_tagger)
#Monothread - event_analysis_compute_tf_idf(event_analysis, websites, description_tree_tagger, website_tree_tagger, events)
# We fetch the k most important tags by event
job_queue = JobQueue()
job_queue.start()
start_threads(nb_core, event_analysis_fetch_k_most_important_features_and_push_database,
events_thread, job_queue, event_analysis, websites)
job_queue.finish()
#Monothread - event_analysis_fetch_k_most_important_features_and_push_database(None, event_analysis, websites, events)
compute_statistics(events, description_tree_tagger, website_tree_tagger)
def compute_statistics(events, description_tree_tagger, website_tree_tagger):
"""
Compute useful statistics
"""
nb_event = len(events)
avg_nb_keyword_description = 0
avg_nb_keyword_description_website = 0
nb_description_fr = float(len(description_tree_tagger))/float(len(events))
sum = 0
for k, v in description_tree_tagger.items():
sum += len(v)
avg_nb_keyword_description = float(sum)/float(len(events))
sum = 0
for k, v in description_tree_tagger.items():
sum += len(v)
if k in website_tree_tagger.keys():
sum += len(website_tree_tagger[k])
avg_nb_keyword_description_website = float(sum)/float(len(events))
validator = URLValidator(verify_exists=True)
nb_events_with_valid_website = 0
for e in events:
if e.website != '':
try:
validator(e.website)
nb_events_with_valid_website += 1
except:
pass
nb_website_with_keyword = 0
for v in website_tree_tagger.values():
if len(v) > 0:
nb_website_with_keyword += 1
nb_website_fr = float(nb_website_with_keyword)/float(nb_events_with_valid_website)
nb_event_website_fr = 0
for k, v in description_tree_tagger.items():
if len(v) > 0 and k in website_tree_tagger.keys():
if len(website_tree_tagger[k]) > 0:
nb_event_website_fr += 1
print 'Number of events : ', nb_event
print 'Average number of keywords in description : ', avg_nb_keyword_description
print 'Average number of keywords in description + website (rec :', DEFAULT_RECURSION_WEBSITE, ') : ', avg_nb_keyword_description_website
print '% descriptions in french : ', nb_description_fr*100.0, ' %'
print '% websites have some French content : ', nb_website_fr*100.0, ' %'
print '% events with French description & website : ', nb_event_website_fr*100.0/nb_event, ' %'
def start_threads(nb_core, fct, tab, *args):
"""
Starts as many thread as number of cores of the machine
"""
threads = []
for i in range(nb_core):
thread = threading.Thread(target=fct, args=args + (tab[i],))
threads.append(thread)
thread.start()
for t in threads:
t.join()
def event_analysis_fulfill_corpus(event_analysis, websites, description_tree_tagger, website_tree_tagger, events):
"""
Part 1 of the event analysis, that fulfill the corpus
"""
tagger = TreeTagger()
# We complete the corpus with plain text of description & website if exists
for e in events:
len_description = 0
if e.description != '' and guess_language.guessLanguage(e.description.encode('utf-8')) == LANGUAGE_FOR_TEXT_ANALYSIS:
event_analysis.add_document_in_corpus(e.description, EventAnalysis.get_id_website(e.id, False))
description_tree_tagger[e.id] = tagger.tag_text(e.description, FILTER_TREE_TAGGER)
len_description = len(description_tree_tagger[e.id])
if e.website != '' and len_description < is_nb_word_website_enough(len_description):
try:
unique_urls = HashTableUrl()
TreeNode(e.website.encode('utf-8'), DEFAULT_RECURSION_WEBSITE, unique_urls)
websites[e.website] = ''
for w in unique_urls.get_urls():
websites[e.website] += event_website_parser(w) + ' '
event_analysis.add_document_in_corpus(websites[e.website], EventAnalysis.get_id_website(e.id, True))
website_tree_tagger[e.id] = tagger.tag_text(websites[e.website], FILTER_TREE_TAGGER)
# We empty the buffer, to save memory and because we only need it afterwards the url
websites[e.website] = ' '
# Some website :
# - has a 403 error, eg: complexe3d.com,
# - is nonexistent website like http://www.biblio.morges.ch
# - is not a web url ... like galerie@edouardroch.ch,
# thhp://www.vitromusee.ch (the typo is on purpose !), www,chateaudeprangins.ch, http://
except (HTTPError, URLError, ValueError) as e: # We must know the other kind of error as conversion problem
pass
def event_analysis_compute_tf_idf(event_analysis, websites, description_tree_tagger, website_tree_tagger, events):
"""
Part 2 of event analysis that compute the tf_idf of each feature in the related document
"""
for e in events:
if e.description != '' and e.id in description_tree_tagger.keys():
for k in description_tree_tagger[e.id]:
event_analysis.compute_tf_idf(k, EventAnalysis.get_id_website(e.id, False))
if e.website in websites.keys() and e.id in website_tree_tagger.keys():
for k in website_tree_tagger[e.id]:
event_analysis.compute_tf_idf(k, EventAnalysis.get_id_website(e.id, True))
def event_analysis_fetch_k_most_important_features_and_push_database(job_queue, event_analysis, websites, events):
"""
Part 3 of event analysis that fetch the k most important features for an event and push them into the database
"""
from collections import OrderedDict
from itertools import islice
for e in events:
key_words_description = OrderedDict()
if e.description != '':
key_words_description = event_analysis.get_tf_idf_the_k_most_important(K_MOST_IMPORTANT_KEYWORD,
EventAnalysis.get_id_website(e.id, False))
key_words_website = OrderedDict()
if e.website in websites.keys():
key_words_website = event_analysis.get_tf_idf_the_k_most_important(K_MOST_IMPORTANT_KEYWORD,
EventAnalysis.get_id_website(e.id, True))
key_words_description_keys = key_words_description.keys()
key_words_website_keys = key_words_website.keys()
# Input => 2 merges orderedDict as (tag, (frequency, idf, type))
# Output key_words -> OrderedDict(tag, idf, type), len = K_MOST_IMPORTANT_KEYWORD
# Mix key words in description and website to keep the most k important terms.
# If there is a key in the both dict, we take the max
# and we MUST resort (we use the frequency) the dictionary to keep only the most k important
key_words = OrderedDict(
(x[0], (x[1][1], x[1][2])) for x in(islice(OrderedDict(sorted(
dict({(k,
(max(key_words_description.get(k)[0] if k in key_words_description_keys else 0.0, key_words_website.get(k)[0] if k in key_words_website_keys else 0.0),
# If the key exists in description & website, take the tf_idf related with the
key_words_description.get(k)[1] if k in key_words_description_keys and k in key_words_website_keys and key_words_description.get(k)[0] >= key_words_website.get(k)[0]
else
(key_words_website.get(k)[1] if k in key_words_description_keys and k in key_words_website_keys
else (key_words_description.get(k)[1] if k in key_words_description_keys else key_words_website.get(k)[1])),
TypeFeature.Description if k in key_words_description_keys and k in key_words_website_keys and key_words_description.get(k)[0] >= key_words_website.get(k)[0]
else
(TypeFeature.Website if k in key_words_description_keys and k in key_words_website_keys
else TypeFeature.Description if k in key_words_description_keys else TypeFeature.Website))
)
# Finally, we sort inversely the dict by the frequency and we keep the K_MOST_IMPORTANT_KEY values
for k in (key_words_description_keys + key_words_website_keys)}).iteritems(), key=lambda x: x[1][0])).items(), 0, K_MOST_IMPORTANT_KEYWORD)))
# Django ORM database is not thread safe, so we have to use a job queue
job_queue.put([update_database_event_tags, e, key_words])
#Monothread - update_database_event_tags(e, key_words)
def event_website_parser(url):
"""
Parsed the website of an event
"""
if url == '':
raise Exception("The event doesn't have any website")
try:
parser = HTMLParserByTag()
html = parser.unescape(urllib2.urlopen(url.encode('utf-8')).read().decode('utf-8'))
parsed_text = ''
for t in FILTER_TAGS_WEBSITE:
parser.initialize(t)
parser.feed(html)
parsed_text += parser.get_data() + ' '
return parsed_text if guess_language.guessLanguage(parsed_text.encode('utf-8')) == LANGUAGE_FOR_TEXT_ANALYSIS else ''
except:
return ''
def update_database_event_tags(event, key_words):
"""
Update all the necessary information for a event-features
"""
for fe in EventFeature.objects.filter(event=event):
fe.delete()
feature_name = [f.name for f in Feature.objects.all()]
for k, v in key_words.items():
k = k.strip()
# We insert the new feature or fetch it
feature = Feature.objects.get(name__exact=k) if k in feature_name else Feature(name=k)
feature.save()
EventFeature(event=event,
feature=feature,
tf_idf=v[0],
weight=Weight.objects.get(name__exact=WEIGHT_DESCRIPTION_NAME if v[1] == TypeFeature.Description else WEIGHT_WEBSITE_NAME)
).save()
weight = Weight.objects.get(name__exact=WEIGHT_CATEGORY_NAME)
if len(EventFeature.objects.filter(event=event, weight=weight)) == 0:
words = event.category.name.split('/')
if len(words) == 3:
words = [words[0], words[1]]
for w in words:
w = w.strip().lower()
feature = Feature.objects.get(name__exact=w) if w in feature_name else Feature(name=w)
feature.save()
ef = None
if len(EventFeature.objects.filter(event=event, feature=feature)) > 0:
ef = EventFeature.objects.get(event=event, feature=feature)
ef.weight = weight
else:
ef = EventFeature(event=event, feature=feature, tf_idf=WEIGHT_CATEGORY, weight=weight)
ef.save()
def get_list_event_features():
"""
Return the list of all events with related features
"""
events = Event.objects.all()
out = dict()
for e in events:
out[e] = [(ef.feature.name, ef.tf_idf*ef.weight.weight, ef.weight.weight, ef.weight.name)
for ef in EventFeature.objects.filter(event__exact=e).order_by('-tf_idf')]
return out
|
Diego999/Social-Recommendation-System
|
event_analyse/functions.py
|
Python
|
mit
| 13,838
|
# ######## KADEMLIA CONSTANTS ###########
BIT_NODE_ID_LEN = 160
HEX_NODE_ID_LEN = BIT_NODE_ID_LEN // 4
# Small number representing the degree of
# parallelism in network calls
ALPHA = 3
# Maximum number of contacts stored in a bucket
# NOTE: Should be an even number.
K = 8 # pylint: disable=invalid-name
# Maximum number of contacts stored in the
# replacement cache of a bucket
# NOTE: Should be an even number.
CACHE_K = 32
# Timeout for network operations
# [seconds]
RPC_TIMEOUT = 0.1
# Delay between iterations of iterative node lookups
# (for loose parallelism)
# [seconds]
ITERATIVE_LOOKUP_DELAY = RPC_TIMEOUT / 2
# If a KBucket has not been used for this amount of time, refresh it.
# [seconds]
REFRESH_TIMEOUT = 60 * 60 * 1000 # 1 hour
# The interval at which nodes replicate (republish/refresh)
# the data they hold
# [seconds]
REPLICATE_INTERVAL = REFRESH_TIMEOUT
# The time it takes for data to expire in the network;
# the original publisher of the data will also republish
# the data at this time if it is still valid
# [seconds]
DATE_EXPIRE_TIMEOUT = 86400 # 24 hours
# ####### IMPLEMENTATION-SPECIFIC CONSTANTS ###########
# The interval in which the node should check whether any buckets
# need refreshing or whether any data needs to be republished
# [seconds]
CHECK_REFRESH_INTERVAL = REFRESH_TIMEOUT / 5
# Max size of a single UDP datagram.
# Any larger message will be spread accross several UDP packets.
# [bytes]
UDP_DATAGRAM_MAX_SIZE = 8192 # 8 KB
DB_PATH = "db/ob.db"
VERSION = "0.3.1"
SATOSHIS_IN_BITCOIN = 100000000
# The IP of the default DNSChain Server used to validate namecoin addresses
DNSCHAIN_SERVER_IP = "192.184.93.146"
|
im0rtel/OpenBazaar
|
node/constants.py
|
Python
|
mit
| 1,678
|
"""Tests for distutils.archive_util."""
__revision__ = "$Id: test_archive_util.py 86596 2010-11-20 19:04:17Z ezio.melotti $"
import unittest
import os
import tarfile
from os.path import splitdrive
import warnings
from distutils.archive_util import (check_archive_formats, make_tarball,
make_zipfile, make_archive,
ARCHIVE_FORMATS)
from distutils.spawn import find_executable, spawn
from distutils.tests import support
from test.support import check_warnings, run_unittest
try:
import zipfile
ZIP_SUPPORT = True
except ImportError:
ZIP_SUPPORT = find_executable('zip')
class ArchiveUtilTestCase(support.TempdirManager,
support.LoggingSilencer,
unittest.TestCase):
def test_make_tarball(self):
# creating something to tar
tmpdir = self.mkdtemp()
self.write_file([tmpdir, 'file1'], 'xxx')
self.write_file([tmpdir, 'file2'], 'xxx')
os.mkdir(os.path.join(tmpdir, 'sub'))
self.write_file([tmpdir, 'sub', 'file3'], 'xxx')
tmpdir2 = self.mkdtemp()
unittest.skipUnless(splitdrive(tmpdir)[0] == splitdrive(tmpdir2)[0],
"Source and target should be on same drive")
base_name = os.path.join(tmpdir2, 'archive')
# working with relative paths to avoid tar warnings
old_dir = os.getcwd()
os.chdir(tmpdir)
try:
make_tarball(splitdrive(base_name)[1], '.')
finally:
os.chdir(old_dir)
# check if the compressed tarball was created
tarball = base_name + '.tar.gz'
self.assertTrue(os.path.exists(tarball))
# trying an uncompressed one
base_name = os.path.join(tmpdir2, 'archive')
old_dir = os.getcwd()
os.chdir(tmpdir)
try:
make_tarball(splitdrive(base_name)[1], '.', compress=None)
finally:
os.chdir(old_dir)
tarball = base_name + '.tar'
self.assertTrue(os.path.exists(tarball))
def _tarinfo(self, path):
tar = tarfile.open(path)
try:
names = tar.getnames()
names.sort()
return tuple(names)
finally:
tar.close()
def _create_files(self):
# creating something to tar
tmpdir = self.mkdtemp()
dist = os.path.join(tmpdir, 'dist')
os.mkdir(dist)
self.write_file([dist, 'file1'], 'xxx')
self.write_file([dist, 'file2'], 'xxx')
os.mkdir(os.path.join(dist, 'sub'))
self.write_file([dist, 'sub', 'file3'], 'xxx')
os.mkdir(os.path.join(dist, 'sub2'))
tmpdir2 = self.mkdtemp()
base_name = os.path.join(tmpdir2, 'archive')
return tmpdir, tmpdir2, base_name
@unittest.skipUnless(find_executable('tar') and find_executable('gzip'),
'Need the tar command to run')
def test_tarfile_vs_tar(self):
tmpdir, tmpdir2, base_name = self._create_files()
old_dir = os.getcwd()
os.chdir(tmpdir)
try:
make_tarball(base_name, 'dist')
finally:
os.chdir(old_dir)
# check if the compressed tarball was created
tarball = base_name + '.tar.gz'
self.assertTrue(os.path.exists(tarball))
# now create another tarball using `tar`
tarball2 = os.path.join(tmpdir, 'archive2.tar.gz')
tar_cmd = ['tar', '-cf', 'archive2.tar', 'dist']
gzip_cmd = ['gzip', '-f9', 'archive2.tar']
old_dir = os.getcwd()
os.chdir(tmpdir)
try:
spawn(tar_cmd)
spawn(gzip_cmd)
finally:
os.chdir(old_dir)
self.assertTrue(os.path.exists(tarball2))
# let's compare both tarballs
self.assertEqual(self._tarinfo(tarball), self._tarinfo(tarball2))
# trying an uncompressed one
base_name = os.path.join(tmpdir2, 'archive')
old_dir = os.getcwd()
os.chdir(tmpdir)
try:
make_tarball(base_name, 'dist', compress=None)
finally:
os.chdir(old_dir)
tarball = base_name + '.tar'
self.assertTrue(os.path.exists(tarball))
# now for a dry_run
base_name = os.path.join(tmpdir2, 'archive')
old_dir = os.getcwd()
os.chdir(tmpdir)
try:
make_tarball(base_name, 'dist', compress=None, dry_run=True)
finally:
os.chdir(old_dir)
tarball = base_name + '.tar'
self.assertTrue(os.path.exists(tarball))
@unittest.skipUnless(find_executable('compress'),
'The compress program is required')
def test_compress_deprecated(self):
tmpdir, tmpdir2, base_name = self._create_files()
# using compress and testing the PendingDeprecationWarning
old_dir = os.getcwd()
os.chdir(tmpdir)
try:
with check_warnings() as w:
warnings.simplefilter("always")
make_tarball(base_name, 'dist', compress='compress')
finally:
os.chdir(old_dir)
tarball = base_name + '.tar.Z'
self.assertTrue(os.path.exists(tarball))
self.assertEqual(len(w.warnings), 1)
# same test with dry_run
os.remove(tarball)
old_dir = os.getcwd()
os.chdir(tmpdir)
try:
with check_warnings() as w:
warnings.simplefilter("always")
make_tarball(base_name, 'dist', compress='compress',
dry_run=True)
finally:
os.chdir(old_dir)
self.assertTrue(not os.path.exists(tarball))
self.assertEqual(len(w.warnings), 1)
@unittest.skipUnless(ZIP_SUPPORT, 'Need zip support to run')
def test_make_zipfile(self):
# creating something to tar
tmpdir = self.mkdtemp()
self.write_file([tmpdir, 'file1'], 'xxx')
self.write_file([tmpdir, 'file2'], 'xxx')
tmpdir2 = self.mkdtemp()
base_name = os.path.join(tmpdir2, 'archive')
make_zipfile(base_name, tmpdir)
# check if the compressed tarball was created
tarball = base_name + '.zip'
def test_check_archive_formats(self):
self.assertEqual(check_archive_formats(['gztar', 'xxx', 'zip']),
'xxx')
self.assertEqual(check_archive_formats(['gztar', 'zip']), None)
def test_make_archive(self):
tmpdir = self.mkdtemp()
base_name = os.path.join(tmpdir, 'archive')
self.assertRaises(ValueError, make_archive, base_name, 'xxx')
def test_make_archive_cwd(self):
current_dir = os.getcwd()
def _breaks(*args, **kw):
raise RuntimeError()
ARCHIVE_FORMATS['xxx'] = (_breaks, [], 'xxx file')
try:
try:
make_archive('xxx', 'xxx', root_dir=self.mkdtemp())
except:
pass
self.assertEqual(os.getcwd(), current_dir)
finally:
del ARCHIVE_FORMATS['xxx']
def test_suite():
return unittest.makeSuite(ArchiveUtilTestCase)
if __name__ == "__main__":
run_unittest(test_suite())
|
MalloyPower/parsing-python
|
front-end/testsuite-python-lib/Python-3.2/Lib/distutils/tests/test_archive_util.py
|
Python
|
mit
| 7,249
|
from django.db import models
class Category(models.Model):
name = models.CharField(max_length=30)
active = models.BooleanField(default=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __unicode__(self):
return self.name
def __str__(self):
return self.name
|
bootcamptropa/django
|
categories/models.py
|
Python
|
mit
| 362
|
# -*- coding: utf-8 -*-
# This file as well as the whole tsfresh package are licenced under the MIT licence (see the LICENCE.txt)
# Maximilian Christ (maximilianchrist.com), Blue Yonder Gmbh, 2016
import math
import warnings
from random import shuffle
from unittest import TestCase
from matrixprofile.exceptions import NoSolutionPossible
from tsfresh.examples.driftbif_simulation import velocity
from tsfresh.feature_extraction.feature_calculators import *
from tsfresh.feature_extraction.feature_calculators import (
_aggregate_on_chunks,
_estimate_friedrich_coefficients,
_get_length_sequences_where,
_into_subchunks,
_roll,
)
class FeatureCalculationTestCase(TestCase):
def setUp(self):
# There will be a lot of warnings in the feature calculators.
# Just ignore all of them in these tests
warnings.simplefilter("ignore")
def tearDown(self):
warnings.resetwarnings()
def assertIsNaN(self, result):
self.assertTrue(np.isnan(result), msg="{} is not np.NaN")
def assertEqualOnAllArrayTypes(self, f, input_to_f, result, *args, **kwargs):
expected_result = f(input_to_f, *args, **kwargs)
self.assertEqual(
expected_result,
result,
msg="Not equal for lists: {} != {}".format(expected_result, result),
)
expected_result = f(np.array(input_to_f), *args, **kwargs)
self.assertEqual(
expected_result,
result,
msg="Not equal for numpy.arrays: {} != {}".format(expected_result, result),
)
expected_result = f(pd.Series(input_to_f, dtype="float64"), *args, **kwargs)
self.assertEqual(
expected_result,
result,
msg="Not equal for pandas.Series: {} != {}".format(expected_result, result),
)
def assertTrueOnAllArrayTypes(self, f, input_to_f, *args, **kwargs):
self.assertTrue(f(input_to_f, *args, **kwargs), msg="Not true for lists")
self.assertTrue(
f(np.array(input_to_f), *args, **kwargs), msg="Not true for numpy.arrays"
)
self.assertTrue(
f(pd.Series(input_to_f), *args, **kwargs), msg="Not true for pandas.Series"
)
def assertAllTrueOnAllArrayTypes(self, f, input_to_f, *args, **kwargs):
self.assertTrue(
all(dict(f(input_to_f, *args, **kwargs)).values()), msg="Not true for lists"
)
self.assertTrue(
all(dict(f(np.array(input_to_f), *args, **kwargs)).values()),
msg="Not true for numpy.arrays",
)
self.assertTrue(
all(dict(f(pd.Series(input_to_f), *args, **kwargs)).values()),
msg="Not true for pandas.Series",
)
def assertFalseOnAllArrayTypes(self, f, input_to_f, *args, **kwargs):
self.assertFalse(f(input_to_f, *args, **kwargs), msg="Not false for lists")
self.assertFalse(
f(np.array(input_to_f), *args, **kwargs), msg="Not false for numpy.arrays"
)
self.assertFalse(
f(pd.Series(input_to_f), *args, **kwargs), msg="Not false for pandas.Series"
)
def assertAllFalseOnAllArrayTypes(self, f, input_to_f, *args, **kwargs):
self.assertFalse(
any(dict(f(input_to_f, *args, **kwargs)).values()),
msg="Not false for lists",
)
self.assertFalse(
any(dict(f(np.array(input_to_f), *args, **kwargs)).values()),
msg="Not false for numpy.arrays",
)
self.assertFalse(
any(dict(f(pd.Series(input_to_f), *args, **kwargs)).values()),
msg="Not false for pandas.Series",
)
def assertAlmostEqualOnAllArrayTypes(self, f, input_to_f, result, *args, **kwargs):
expected_result = f(input_to_f, *args, **kwargs)
self.assertAlmostEqual(
expected_result,
result,
msg="Not almost equal for lists: {} != {}".format(expected_result, result),
)
expected_result = f(np.array(input_to_f), *args, **kwargs)
self.assertAlmostEqual(
expected_result,
result,
msg="Not almost equal for numpy.arrays: {} != {}".format(
expected_result, result
),
)
expected_result = f(pd.Series(input_to_f, dtype="float64"), *args, **kwargs)
self.assertAlmostEqual(
expected_result,
result,
msg="Not almost equal for pandas.Series: {} != {}".format(
expected_result, result
),
)
def assertIsNanOnAllArrayTypes(self, f, input_to_f, *args, **kwargs):
self.assertTrue(
np.isnan(f(input_to_f, *args, **kwargs)), msg="Not NaN for lists"
)
self.assertTrue(
np.isnan(f(np.array(input_to_f), *args, **kwargs)),
msg="Not NaN for numpy.arrays",
)
self.assertTrue(
np.isnan(f(pd.Series(input_to_f, dtype="float64"), *args, **kwargs)),
msg="Not NaN for pandas.Series",
)
def assertEqualPandasSeriesWrapper(self, f, input_to_f, result, *args, **kwargs):
self.assertEqual(
f(pd.Series(input_to_f), *args, **kwargs),
result,
msg="Not equal for pandas.Series: {} != {}".format(
f(pd.Series(input_to_f), *args, **kwargs), result
),
)
def test__roll(self):
x = np.random.normal(size=30)
for shift in [0, 1, 10, 11, 30, 31, 50, 51, 150, 151]:
np.testing.assert_array_equal(_roll(x, shift), np.roll(x, shift))
np.testing.assert_array_equal(_roll(x, -shift), np.roll(x, -shift))
def test___get_length_sequences_where(self):
self.assertEqualOnAllArrayTypes(
_get_length_sequences_where,
[0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1],
[1, 3, 1, 2],
)
self.assertEqualOnAllArrayTypes(
_get_length_sequences_where,
[0, True, 0, 0, True, True, True, 0, 0, True, 0, True, True],
[1, 3, 1, 2],
)
self.assertEqualOnAllArrayTypes(
_get_length_sequences_where,
[0, True, 0, 0, 1, True, 1, 0, 0, True, 0, 1, True],
[1, 3, 1, 2],
)
self.assertEqualOnAllArrayTypes(_get_length_sequences_where, [0] * 10, [0])
self.assertEqualOnAllArrayTypes(_get_length_sequences_where, [], [0])
def test__into_subchunks(self):
np.testing.assert_array_equal(
_into_subchunks(range(7), 3, 2), np.array([[0, 1, 2], [2, 3, 4], [4, 5, 6]])
)
np.testing.assert_array_equal(
_into_subchunks(range(5), 3), np.array([[0, 1, 2], [1, 2, 3], [2, 3, 4]])
)
def test_variance_larger_than_standard_deviation(self):
self.assertFalseOnAllArrayTypes(
variance_larger_than_standard_deviation, [-1, -1, 1, 1, 1]
)
self.assertTrueOnAllArrayTypes(
variance_larger_than_standard_deviation, [-1, -1, 1, 1, 2]
)
def test_large_standard_deviation(self):
self.assertFalseOnAllArrayTypes(large_standard_deviation, [1, 1, 1, 1], r=0)
self.assertFalseOnAllArrayTypes(large_standard_deviation, [1, 1, 1, 1], r=0)
self.assertTrueOnAllArrayTypes(large_standard_deviation, [-1, -1, 1, 1], r=0)
self.assertTrueOnAllArrayTypes(large_standard_deviation, [-1, -1, 1, 1], r=0.25)
self.assertTrueOnAllArrayTypes(large_standard_deviation, [-1, -1, 1, 1], r=0.3)
self.assertFalseOnAllArrayTypes(large_standard_deviation, [-1, -1, 1, 1], r=0.5)
def test_symmetry_looking(self):
self.assertAllTrueOnAllArrayTypes(
symmetry_looking, [-1, -1, 1, 1], [dict(r=0.05), dict(r=0.75)]
)
self.assertAllFalseOnAllArrayTypes(
symmetry_looking, [-1, -1, 1, 1], [dict(r=0)]
)
self.assertAllFalseOnAllArrayTypes(
symmetry_looking, [-1, -1, -1, -1, 1], [dict(r=0.05)]
)
self.assertAllTrueOnAllArrayTypes(
symmetry_looking, [-2, -2, -2, -1, -1, -1], [dict(r=0.05)]
)
self.assertAllTrueOnAllArrayTypes(
symmetry_looking, [-0.9, -0.900001], [dict(r=0.05)]
)
def test_has_duplicate_max(self):
self.assertTrueOnAllArrayTypes(has_duplicate_max, [2.1, 0, 0, 2.1, 1.1])
self.assertFalseOnAllArrayTypes(
has_duplicate_max, np.array([2.1, 0, 0, 2, 1.1])
)
self.assertTrueOnAllArrayTypes(has_duplicate_max, [1, 1, 1, 1])
self.assertFalseOnAllArrayTypes(has_duplicate_max, np.array([0]))
self.assertTrueOnAllArrayTypes(has_duplicate_max, np.array([1, 1]))
def test_has_duplicate_min(self):
self.assertTrueOnAllArrayTypes(has_duplicate_min, [-2.1, 0, 0, -2.1, 1.1])
self.assertFalseOnAllArrayTypes(has_duplicate_min, [2.1, 0, -1, 2, 1.1])
self.assertTrueOnAllArrayTypes(has_duplicate_min, np.array([1, 1, 1, 1]))
self.assertFalseOnAllArrayTypes(has_duplicate_min, np.array([0]))
self.assertTrueOnAllArrayTypes(has_duplicate_min, np.array([1, 1]))
def test_has_duplicate(self):
self.assertTrueOnAllArrayTypes(has_duplicate, np.array([-2.1, 0, 0, -2.1]))
self.assertTrueOnAllArrayTypes(has_duplicate, [-2.1, 2.1, 2.1, 2.1])
self.assertFalseOnAllArrayTypes(has_duplicate, [1.1, 1.2, 1.3, 1.4])
self.assertFalseOnAllArrayTypes(has_duplicate, [1])
self.assertFalseOnAllArrayTypes(has_duplicate, [])
def test_sum(self):
self.assertEqualOnAllArrayTypes(sum_values, [1, 2, 3, 4.1], 10.1)
self.assertEqualOnAllArrayTypes(sum_values, [-1.2, -2, -3, -4], -10.2)
self.assertEqualOnAllArrayTypes(sum_values, [], 0)
def test_agg_autocorrelation_returns_correct_values(self):
param = [{"f_agg": "mean", "maxlag": 10}]
x = [1, 1, 1, 1, 1, 1, 1]
expected_res = 0
res = dict(agg_autocorrelation(x, param=param))['f_agg_"mean"__maxlag_10']
self.assertAlmostEqual(res, expected_res, places=4)
x = [1, 2, -3]
expected_res = 1 / np.var(x) * (((1 * 2 + 2 * (-3)) / 2 + (1 * -3)) / 2)
res = dict(agg_autocorrelation(x, param=param))['f_agg_"mean"__maxlag_10']
self.assertAlmostEqual(res, expected_res, places=4)
np.random.seed(42)
x = np.random.normal(size=3000)
expected_res = 0
res = dict(agg_autocorrelation(x, param=param))['f_agg_"mean"__maxlag_10']
self.assertAlmostEqual(res, expected_res, places=2)
param = [{"f_agg": "median", "maxlag": 10}]
x = [1, 1, 1, 1, 1, 1, 1]
expected_res = 0
res = dict(agg_autocorrelation(x, param=param))['f_agg_"median"__maxlag_10']
self.assertAlmostEqual(res, expected_res, places=4)
x = [1, 2, -3]
expected_res = 1 / np.var(x) * (((1 * 2 + 2 * (-3)) / 2 + (1 * -3)) / 2)
res = dict(agg_autocorrelation(x, param=param))['f_agg_"median"__maxlag_10']
self.assertAlmostEqual(res, expected_res, places=4)
def test_agg_autocorrelation_returns_max_lag_does_not_affect_other_results(self):
param = [{"f_agg": "mean", "maxlag": 1}, {"f_agg": "mean", "maxlag": 10}]
x = range(10)
res1 = dict(agg_autocorrelation(x, param=param))['f_agg_"mean"__maxlag_1']
res10 = dict(agg_autocorrelation(x, param=param))['f_agg_"mean"__maxlag_10']
self.assertAlmostEqual(res1, 0.77777777, places=4)
self.assertAlmostEqual(res10, -0.64983164983165, places=4)
param = [{"f_agg": "mean", "maxlag": 1}]
x = range(10)
res1 = dict(agg_autocorrelation(x, param=param))['f_agg_"mean"__maxlag_1']
self.assertAlmostEqual(res1, 0.77777777, places=4)
def test_partial_autocorrelation(self):
# Test for altering time series
# len(x) < max_lag
param = [{"lag": lag} for lag in range(10)]
x = [1, 2, 1, 2, 1, 2]
expected_res = [("lag_0", 1.0), ("lag_1", -1.0), ("lag_2", np.nan)]
res = partial_autocorrelation(x, param=param)
self.assertAlmostEqual(res[0][1], expected_res[0][1], places=4)
self.assertAlmostEqual(res[1][1], expected_res[1][1], places=4)
self.assertIsNaN(res[2][1])
# Linear signal
param = [{"lag": lag} for lag in range(10)]
x = np.linspace(0, 1, 3000)
expected_res = [("lag_0", 1.0), ("lag_1", 1.0), ("lag_2", 0)]
res = partial_autocorrelation(x, param=param)
self.assertAlmostEqual(res[0][1], expected_res[0][1], places=2)
self.assertAlmostEqual(res[1][1], expected_res[1][1], places=2)
self.assertAlmostEqual(res[2][1], expected_res[2][1], places=2)
# Random noise
np.random.seed(42)
x = np.random.normal(size=3000)
param = [{"lag": lag} for lag in range(10)]
expected_res = [("lag_0", 1.0), ("lag_1", 0), ("lag_2", 0)]
res = partial_autocorrelation(x, param=param)
self.assertAlmostEqual(res[0][1], expected_res[0][1], places=1)
self.assertAlmostEqual(res[1][1], expected_res[1][1], places=1)
self.assertAlmostEqual(res[2][1], expected_res[2][1], places=1)
# On a simulated AR process
np.random.seed(42)
param = [{"lag": lag} for lag in range(10)]
# Simulate AR process
T = 3000
epsilon = np.random.randn(T)
x = np.repeat(1.0, T)
for t in range(T - 1):
x[t + 1] = 0.5 * x[t] + 2 + epsilon[t]
expected_res = [("lag_0", 1.0), ("lag_1", 0.5), ("lag_2", 0)]
res = partial_autocorrelation(x, param=param)
self.assertAlmostEqual(res[0][1], expected_res[0][1], places=1)
self.assertAlmostEqual(res[1][1], expected_res[1][1], places=1)
self.assertAlmostEqual(res[2][1], expected_res[2][1], places=1)
# Some pathological cases
param = [{"lag": lag} for lag in range(10)]
# List of length 1
res = partial_autocorrelation([1], param=param)
for lag_no, lag_val in res:
self.assertIsNaN(lag_val)
# Empty list
res = partial_autocorrelation([], param=param)
for lag_no, lag_val in res:
self.assertIsNaN(lag_val)
# List contains only zeros
res = partial_autocorrelation(np.zeros(100), param=param)
for lag_no, lag_val in res:
if lag_no == "lag_0":
self.assertEqual(lag_val, 1.0)
else:
self.assertIsNaN(lag_val)
def test_augmented_dickey_fuller(self):
# todo: add unit test for the values of the test statistic
# the adf hypothesis test checks for unit roots,
# so H_0 = {random drift} vs H_1 = {AR(1) model}
# H0 is true
np.random.seed(seed=42)
x = np.cumsum(np.random.uniform(size=100))
param = [
{"autolag": "BIC", "attr": "teststat"},
{"autolag": "BIC", "attr": "pvalue"},
{"autolag": "BIC", "attr": "usedlag"},
]
expected_index = [
'attr_"teststat"__autolag_"BIC"',
'attr_"pvalue"__autolag_"BIC"',
'attr_"usedlag"__autolag_"BIC"',
]
res = augmented_dickey_fuller(x=x, param=param)
res = pd.Series(dict(res))
self.assertCountEqual(list(res.index), expected_index)
self.assertGreater(res['attr_"pvalue"__autolag_"BIC"'], 0.10)
self.assertEqual(res['attr_"usedlag"__autolag_"BIC"'], 0)
# H0 should be rejected for AR(1) model with x_{t} = 1/2 x_{t-1} + e_{t}
np.random.seed(seed=42)
e = np.random.normal(0.1, 0.1, size=100)
m = 50
x = [0] * m
x[0] = 100
for i in range(1, m):
x[i] = x[i - 1] * 0.5 + e[i]
param = [
{"autolag": "AIC", "attr": "teststat"},
{"autolag": "AIC", "attr": "pvalue"},
{"autolag": "AIC", "attr": "usedlag"},
]
expected_index = [
'attr_"teststat"__autolag_"AIC"',
'attr_"pvalue"__autolag_"AIC"',
'attr_"usedlag"__autolag_"AIC"',
]
res = augmented_dickey_fuller(x=x, param=param)
res = pd.Series(dict(res))
self.assertCountEqual(list(res.index), expected_index)
self.assertLessEqual(res['attr_"pvalue"__autolag_"AIC"'], 0.05)
self.assertEqual(res['attr_"usedlag"__autolag_"AIC"'], 0)
# Check if LinAlgError and ValueError are catched
res_linalg_error = augmented_dickey_fuller(
x=np.repeat(np.nan, 100), param=param
)
res_value_error = augmented_dickey_fuller(x=[], param=param)
for index, val in res_linalg_error:
self.assertIsNaN(val)
for index, val in res_value_error:
self.assertIsNaN(val)
# Should return NaN if "attr" is unknown
res_attr_error = augmented_dickey_fuller(
x=x, param=[{"autolag": "AIC", "attr": ""}]
)
for index, val in res_attr_error:
self.assertIsNaN(val)
def test_abs_energy(self):
self.assertEqualOnAllArrayTypes(abs_energy, [1, 1, 1], 3)
self.assertEqualOnAllArrayTypes(abs_energy, [1, 2, 3], 14)
self.assertEqualOnAllArrayTypes(abs_energy, [-1, 2, -3], 14)
self.assertAlmostEqualOnAllArrayTypes(abs_energy, [-1, 1.3], 2.69)
self.assertEqualOnAllArrayTypes(abs_energy, [1], 1)
def test_cid_ce(self):
self.assertEqualOnAllArrayTypes(cid_ce, [1, 1, 1], 0, normalize=True)
self.assertEqualOnAllArrayTypes(cid_ce, [0, 4], 2, normalize=True)
self.assertEqualOnAllArrayTypes(cid_ce, [100, 104], 2, normalize=True)
self.assertEqualOnAllArrayTypes(cid_ce, [1, 1, 1], 0, normalize=False)
self.assertEqualOnAllArrayTypes(cid_ce, [0.5, 3.5, 7.5], 5, normalize=False)
self.assertEqualOnAllArrayTypes(
cid_ce, [-4.33, -1.33, 2.67], 5, normalize=False
)
def test_lempel_ziv_complexity(self):
self.assertAlmostEqualOnAllArrayTypes(
lempel_ziv_complexity, [1, 1, 1], 2.0 / 3, bins=2
)
self.assertAlmostEqualOnAllArrayTypes(
lempel_ziv_complexity, [1, 1, 1], 2.0 / 3, bins=5
)
self.assertAlmostEqualOnAllArrayTypes(
lempel_ziv_complexity, [1, 1, 1, 1, 1, 1, 1], 0.4285714285, bins=2
)
self.assertAlmostEqualOnAllArrayTypes(
lempel_ziv_complexity, [1, 1, 1, 2, 1, 1, 1], 0.5714285714, bins=2
)
self.assertAlmostEqualOnAllArrayTypes(
lempel_ziv_complexity, [-1, 4.3, 5, 1, -4.5, 1, 5, 7, -3.4, 6], 0.8, bins=10
)
self.assertAlmostEqualOnAllArrayTypes(
lempel_ziv_complexity,
[-1, np.nan, 5, 1, -4.5, 1, 5, 7, -3.4, 6],
0.4,
bins=10,
)
self.assertAlmostEqualOnAllArrayTypes(
lempel_ziv_complexity, np.linspace(0, 1, 10), 0.6, bins=3
)
self.assertAlmostEqualOnAllArrayTypes(
lempel_ziv_complexity, [1, 1, 2, 3, 4, 5, 6, 0, 7, 8], 0.6, bins=3
)
def test_fourier_entropy(self):
self.assertAlmostEqualOnAllArrayTypes(
fourier_entropy, [1, 2, 1], 0.693147180, bins=2
)
self.assertAlmostEqualOnAllArrayTypes(
fourier_entropy, [1, 2, 1], 0.693147180, bins=5
)
self.assertAlmostEqualOnAllArrayTypes(
fourier_entropy, [1, 1, 2, 1, 1, 1, 1], 0.5623351446188083, bins=5
)
self.assertAlmostEqualOnAllArrayTypes(
fourier_entropy, [1, 1, 1, 1, 2, 1, 1], 1.0397207708399179, bins=5
)
self.assertAlmostEqualOnAllArrayTypes(
fourier_entropy,
[-1, 4.3, 5, 1, -4.5, 1, 5, 7, -3.4, 6],
1.5607104090414063,
bins=10,
)
self.assertIsNanOnAllArrayTypes(
fourier_entropy, [-1, np.nan, 5, 1, -4.5, 1, 5, 7, -3.4, 6], bins=10
)
def test_permutation_entropy(self):
self.assertAlmostEqualOnAllArrayTypes(
permutation_entropy,
[4, 7, 9, 10, 6, 11, 3],
1.054920167,
dimension=3,
tau=1,
)
# should grow
self.assertAlmostEqualOnAllArrayTypes(
permutation_entropy,
[1, -1, 1, -1, 1, -1, 1, -1],
0.6931471805599453,
dimension=3,
tau=1,
)
self.assertAlmostEqualOnAllArrayTypes(
permutation_entropy,
[1, -1, 1, -1, 1, 1, 1, -1],
1.3296613488547582,
dimension=3,
tau=1,
)
self.assertAlmostEqualOnAllArrayTypes(
permutation_entropy,
[-1, 4.3, 5, 1, -4.5, 1, 5, 7, -3.4, 6],
1.0397207708399179,
dimension=3,
tau=2,
)
# nan is treated like any other number
self.assertAlmostEqualOnAllArrayTypes(
permutation_entropy,
[-1, 4.3, 5, 1, -4.5, 1, 5, np.nan, -3.4, 6],
1.0397207708399179,
dimension=3,
tau=2,
)
# if too short, return nan
self.assertIsNanOnAllArrayTypes(
permutation_entropy, [1, -1], dimension=3, tau=1
)
def test_ratio_beyond_r_sigma(self):
x = [0, 1] * 10 + [10, 20, -30] # std of x is 7.21, mean 3.04
self.assertEqualOnAllArrayTypes(ratio_beyond_r_sigma, x, 3.0 / len(x), r=1)
self.assertEqualOnAllArrayTypes(ratio_beyond_r_sigma, x, 2.0 / len(x), r=2)
self.assertEqualOnAllArrayTypes(ratio_beyond_r_sigma, x, 1.0 / len(x), r=3)
self.assertEqualOnAllArrayTypes(ratio_beyond_r_sigma, x, 0, r=20)
def test_mean_abs_change(self):
self.assertEqualOnAllArrayTypes(mean_abs_change, [-2, 2, 5], 3.5)
self.assertEqualOnAllArrayTypes(mean_abs_change, [1, 2, -1], 2)
def test_mean_change(self):
self.assertEqualOnAllArrayTypes(mean_change, [-2, 2, 5], 3.5)
self.assertEqualOnAllArrayTypes(mean_change, [1, 2, -1], -1)
self.assertEqualOnAllArrayTypes(mean_change, [10, 20], 10)
self.assertIsNanOnAllArrayTypes(mean_change, [1])
self.assertIsNanOnAllArrayTypes(mean_change, [])
def test_mean_second_derivate_central(self):
self.assertEqualOnAllArrayTypes(
mean_second_derivative_central, list(range(10)), 0
)
self.assertEqualOnAllArrayTypes(mean_second_derivative_central, [1, 3, 5], 0)
self.assertEqualOnAllArrayTypes(
mean_second_derivative_central, [1, 3, 7, -3], -3
)
def test_median(self):
self.assertEqualOnAllArrayTypes(median, [1, 1, 2, 2], 1.5)
self.assertEqualOnAllArrayTypes(median, [0.5, 0.5, 2, 3.5, 10], 2)
self.assertEqualOnAllArrayTypes(median, [0.5], 0.5)
self.assertIsNanOnAllArrayTypes(median, [])
def test_mean(self):
self.assertEqualOnAllArrayTypes(mean, [1, 1, 2, 2], 1.5)
self.assertEqualOnAllArrayTypes(mean, [0.5, 0.5, 2, 3.5, 10], 3.3)
self.assertEqualOnAllArrayTypes(mean, [0.5], 0.5)
self.assertIsNanOnAllArrayTypes(mean, [])
def test_length(self):
self.assertEqualOnAllArrayTypes(length, [1, 2, 3, 4], 4)
self.assertEqualOnAllArrayTypes(length, [1, 2, 3], 3)
self.assertEqualOnAllArrayTypes(length, [1, 2], 2)
self.assertEqualOnAllArrayTypes(length, [1, 2, 3, np.NaN], 4)
self.assertEqualOnAllArrayTypes(length, [], 0)
def test_standard_deviation(self):
self.assertAlmostEqualOnAllArrayTypes(standard_deviation, [1, 1, -1, -1], 1)
self.assertAlmostEqualOnAllArrayTypes(
standard_deviation, [1, 2, -2, -1], 1.58113883008
)
self.assertIsNanOnAllArrayTypes(standard_deviation, [])
def test_variation_coefficient(self):
self.assertIsNanOnAllArrayTypes(
variation_coefficient, [1, 1, -1, -1],
)
self.assertAlmostEqualOnAllArrayTypes(
variation_coefficient, [1, 2, -3, -1], -7.681145747868608
)
self.assertAlmostEqualOnAllArrayTypes(
variation_coefficient, [1, 2, 4, -1], 1.2018504251546631
)
self.assertIsNanOnAllArrayTypes(variation_coefficient, [])
def test_variance(self):
self.assertAlmostEqualOnAllArrayTypes(variance, [1, 1, -1, -1], 1)
self.assertAlmostEqualOnAllArrayTypes(variance, [1, 2, -2, -1], 2.5)
self.assertIsNanOnAllArrayTypes(variance, [])
def test_skewness(self):
self.assertEqualOnAllArrayTypes(skewness, [1, 1, 1, 2, 2, 2], 0)
self.assertAlmostEqualOnAllArrayTypes(
skewness, [1, 1, 1, 2, 2], 0.6085806194501855
)
self.assertEqualOnAllArrayTypes(skewness, [1, 1, 1], 0)
self.assertIsNanOnAllArrayTypes(skewness, [1, 1])
def test_kurtosis(self):
self.assertAlmostEqualOnAllArrayTypes(
kurtosis, [1, 1, 1, 2, 2], -3.333333333333333
)
self.assertAlmostEqualOnAllArrayTypes(kurtosis, [1, 1, 1, 1], 0)
self.assertIsNanOnAllArrayTypes(kurtosis, [1, 1, 1])
def test_root_mean_square(self):
self.assertAlmostEqualOnAllArrayTypes(
root_mean_square, [1, 1, 1, 2, 2], 1.4832396974191
)
self.assertAlmostEqualOnAllArrayTypes(root_mean_square, [0], 0)
self.assertIsNanOnAllArrayTypes(root_mean_square, [])
self.assertAlmostEqualOnAllArrayTypes(root_mean_square, [1], 1)
self.assertAlmostEqualOnAllArrayTypes(root_mean_square, [-1], 1)
def test_mean_n_absolute_max(self):
self.assertIsNanOnAllArrayTypes(mean_n_absolute_max, [], number_of_maxima=1)
self.assertIsNanOnAllArrayTypes(
mean_n_absolute_max, [12, 3], number_of_maxima=10
)
self.assertRaises(
AssertionError, mean_n_absolute_max, [12, 3], number_of_maxima=0
)
self.assertRaises(
AssertionError, mean_n_absolute_max, [12, 3], number_of_maxima=-1
)
self.assertAlmostEqualOnAllArrayTypes(
mean_n_absolute_max, [-1, -5, 4, 10], 6.33333333333, number_of_maxima=3
)
self.assertAlmostEqualOnAllArrayTypes(
mean_n_absolute_max, [0, -5, -9], 7.000000, number_of_maxima=2
)
self.assertAlmostEqualOnAllArrayTypes(
mean_n_absolute_max, [0, 0, 0], 0, number_of_maxima=1
)
def test_absolute_sum_of_changes(self):
self.assertEqualOnAllArrayTypes(absolute_sum_of_changes, [1, 1, 1, 1, 2, 1], 2)
self.assertEqualOnAllArrayTypes(absolute_sum_of_changes, [1, -1, 1, -1], 6)
self.assertEqualOnAllArrayTypes(absolute_sum_of_changes, [1], 0)
self.assertEqualOnAllArrayTypes(absolute_sum_of_changes, [], 0)
def test_longest_strike_below_mean(self):
self.assertEqualOnAllArrayTypes(
longest_strike_below_mean, [1, 2, 1, 1, 1, 2, 2, 2], 3
)
self.assertEqualOnAllArrayTypes(
longest_strike_below_mean, [1, 2, 3, 4, 5, 6], 3
)
self.assertEqualOnAllArrayTypes(longest_strike_below_mean, [1, 2, 3, 4, 5], 2)
self.assertEqualOnAllArrayTypes(longest_strike_below_mean, [1, 2, 1], 1)
self.assertEqualOnAllArrayTypes(longest_strike_below_mean, [], 0)
def test_longest_strike_above_mean(self):
self.assertEqualOnAllArrayTypes(
longest_strike_above_mean, [1, 2, 1, 2, 1, 2, 2, 1], 2
)
self.assertEqualOnAllArrayTypes(
longest_strike_above_mean, [1, 2, 3, 4, 5, 6], 3
)
self.assertEqualOnAllArrayTypes(longest_strike_above_mean, [1, 2, 3, 4, 5], 2)
self.assertEqualOnAllArrayTypes(longest_strike_above_mean, [1, 2, 1], 1)
self.assertEqualOnAllArrayTypes(longest_strike_above_mean, [], 0)
def test_count_above_mean(self):
self.assertEqualOnAllArrayTypes(count_above_mean, [1, 2, 1, 2, 1, 2], 3)
self.assertEqualOnAllArrayTypes(count_above_mean, [1, 1, 1, 1, 1, 2], 1)
self.assertEqualOnAllArrayTypes(count_above_mean, [1, 1, 1, 1, 1], 0)
self.assertEqualOnAllArrayTypes(count_above_mean, [], 0)
def test_count_below_mean(self):
self.assertEqualOnAllArrayTypes(count_below_mean, [1, 2, 1, 2, 1, 2], 3)
self.assertEqualOnAllArrayTypes(count_below_mean, [1, 1, 1, 1, 1, 2], 5)
self.assertEqualOnAllArrayTypes(count_below_mean, [1, 1, 1, 1, 1], 0)
self.assertEqualOnAllArrayTypes(count_below_mean, [], 0)
def test_last_location_maximum(self):
self.assertAlmostEqualOnAllArrayTypes(
last_location_of_maximum, [1, 2, 1, 2, 1], 0.8
)
self.assertAlmostEqualOnAllArrayTypes(
last_location_of_maximum, [1, 2, 1, 1, 2], 1.0
)
self.assertAlmostEqualOnAllArrayTypes(
last_location_of_maximum, [2, 1, 1, 1, 1], 0.2
)
self.assertAlmostEqualOnAllArrayTypes(
last_location_of_maximum, [1, 1, 1, 1, 1], 1.0
)
self.assertAlmostEqualOnAllArrayTypes(last_location_of_maximum, [1], 1.0)
self.assertIsNanOnAllArrayTypes(last_location_of_maximum, [])
def test_first_location_of_maximum(self):
self.assertAlmostEqualOnAllArrayTypes(
first_location_of_maximum, [1, 2, 1, 2, 1], 0.2
)
self.assertAlmostEqualOnAllArrayTypes(
first_location_of_maximum, [1, 2, 1, 1, 2], 0.2
)
self.assertAlmostEqualOnAllArrayTypes(
first_location_of_maximum, [2, 1, 1, 1, 1], 0.0
)
self.assertAlmostEqualOnAllArrayTypes(
first_location_of_maximum, [1, 1, 1, 1, 1], 0.0
)
self.assertAlmostEqualOnAllArrayTypes(first_location_of_maximum, [1], 0.0)
self.assertIsNanOnAllArrayTypes(first_location_of_maximum, [])
def test_last_location_of_minimum(self):
self.assertAlmostEqualOnAllArrayTypes(
last_location_of_minimum, [1, 2, 1, 2, 1], 1.0
)
self.assertAlmostEqualOnAllArrayTypes(
last_location_of_minimum, [1, 2, 1, 2, 2], 0.6
)
self.assertAlmostEqualOnAllArrayTypes(
last_location_of_minimum, [2, 1, 1, 1, 2], 0.8
)
self.assertAlmostEqualOnAllArrayTypes(
last_location_of_minimum, [1, 1, 1, 1, 1], 1.0
)
self.assertAlmostEqualOnAllArrayTypes(last_location_of_minimum, [1], 1.0)
self.assertIsNanOnAllArrayTypes(last_location_of_minimum, [])
def test_first_location_of_minimum(self):
self.assertAlmostEqualOnAllArrayTypes(
first_location_of_minimum, [1, 2, 1, 2, 1], 0.0
)
self.assertAlmostEqualOnAllArrayTypes(
first_location_of_minimum, [2, 2, 1, 2, 2], 0.4
)
self.assertAlmostEqualOnAllArrayTypes(
first_location_of_minimum, [2, 1, 1, 1, 2], 0.2
)
self.assertAlmostEqualOnAllArrayTypes(
first_location_of_minimum, [1, 1, 1, 1, 1], 0.0
)
self.assertAlmostEqualOnAllArrayTypes(first_location_of_minimum, [1], 0.0)
self.assertIsNanOnAllArrayTypes(first_location_of_minimum, [])
def test_percentage_of_doubled_datapoints(self):
self.assertAlmostEqualOnAllArrayTypes(
percentage_of_reoccurring_datapoints_to_all_datapoints, [1, 1, 2, 3, 4], 0.4
)
self.assertAlmostEqualOnAllArrayTypes(
percentage_of_reoccurring_datapoints_to_all_datapoints, [1, 1.5, 2, 3], 0
)
self.assertAlmostEqualOnAllArrayTypes(
percentage_of_reoccurring_datapoints_to_all_datapoints, [1], 0
)
self.assertAlmostEqualOnAllArrayTypes(
percentage_of_reoccurring_datapoints_to_all_datapoints,
[1.111, -2.45, 1.111, 2.45],
0.5,
)
self.assertIsNanOnAllArrayTypes(
percentage_of_reoccurring_datapoints_to_all_datapoints, []
)
def test_ratio_of_doubled_values(self):
self.assertAlmostEqualOnAllArrayTypes(
percentage_of_reoccurring_values_to_all_values, [1, 1, 2, 3, 4], 0.25
)
self.assertAlmostEqualOnAllArrayTypes(
percentage_of_reoccurring_values_to_all_values, [1, 1.5, 2, 3], 0
)
self.assertAlmostEqualOnAllArrayTypes(
percentage_of_reoccurring_values_to_all_values, [1], 0
)
self.assertAlmostEqualOnAllArrayTypes(
percentage_of_reoccurring_values_to_all_values,
[1.111, -2.45, 1.111, 2.45],
1.0 / 3.0,
)
self.assertIsNanOnAllArrayTypes(
percentage_of_reoccurring_values_to_all_values, []
)
def test_sum_of_reoccurring_values(self):
self.assertAlmostEqualOnAllArrayTypes(
sum_of_reoccurring_values, [1, 1, 2, 3, 4, 4], 5
)
self.assertAlmostEqualOnAllArrayTypes(
sum_of_reoccurring_values, [1, 1.5, 2, 3], 0
)
self.assertAlmostEqualOnAllArrayTypes(sum_of_reoccurring_values, [1], 0)
self.assertAlmostEqualOnAllArrayTypes(
sum_of_reoccurring_values, [1.111, -2.45, 1.111, 2.45], 1.111
)
self.assertAlmostEqualOnAllArrayTypes(sum_of_reoccurring_values, [], 0)
def test_sum_of_reoccurring_data_points(self):
self.assertAlmostEqualOnAllArrayTypes(
sum_of_reoccurring_data_points, [1, 1, 2, 3, 4, 4], 10
)
self.assertAlmostEqualOnAllArrayTypes(
sum_of_reoccurring_data_points, [1, 1.5, 2, 3], 0
)
self.assertAlmostEqualOnAllArrayTypes(sum_of_reoccurring_data_points, [1], 0)
self.assertAlmostEqualOnAllArrayTypes(
sum_of_reoccurring_data_points, [1.111, -2.45, 1.111, 2.45], 2.222
)
self.assertAlmostEqualOnAllArrayTypes(sum_of_reoccurring_data_points, [], 0)
def test_uniqueness_factor(self):
self.assertAlmostEqualOnAllArrayTypes(
ratio_value_number_to_time_series_length, [1, 1, 2, 3, 4], 0.8
)
self.assertAlmostEqualOnAllArrayTypes(
ratio_value_number_to_time_series_length, [1, 1.5, 2, 3], 1
)
self.assertAlmostEqualOnAllArrayTypes(
ratio_value_number_to_time_series_length, [1], 1
)
self.assertAlmostEqualOnAllArrayTypes(
ratio_value_number_to_time_series_length, [1.111, -2.45, 1.111, 2.45], 0.75
)
self.assertIsNanOnAllArrayTypes(ratio_value_number_to_time_series_length, [])
def test_fft_coefficient(self):
x = range(10)
param = [
{"coeff": 0, "attr": "real"},
{"coeff": 1, "attr": "real"},
{"coeff": 2, "attr": "real"},
{"coeff": 0, "attr": "imag"},
{"coeff": 1, "attr": "imag"},
{"coeff": 2, "attr": "imag"},
{"coeff": 0, "attr": "angle"},
{"coeff": 1, "attr": "angle"},
{"coeff": 2, "attr": "angle"},
{"coeff": 0, "attr": "abs"},
{"coeff": 1, "attr": "abs"},
{"coeff": 2, "attr": "abs"},
]
expected_index = [
'attr_"real"__coeff_0',
'attr_"real"__coeff_1',
'attr_"real"__coeff_2',
'attr_"imag"__coeff_0',
'attr_"imag"__coeff_1',
'attr_"imag"__coeff_2',
'attr_"angle"__coeff_0',
'attr_"angle"__coeff_1',
'attr_"angle"__coeff_2',
'attr_"abs"__coeff_0',
'attr_"abs"__coeff_1',
'attr_"abs"__coeff_2',
]
res = pd.Series(dict(fft_coefficient(x, param)))
self.assertCountEqual(list(res.index), expected_index)
self.assertAlmostEqual(res['attr_"imag"__coeff_0'], 0, places=6)
self.assertAlmostEqual(res['attr_"real"__coeff_0'], sum(x), places=6)
self.assertAlmostEqual(res['attr_"angle"__coeff_0'], 0, places=6)
self.assertAlmostEqual(res['attr_"abs"__coeff_0'], sum(x), places=6)
x = [0, 1, 0, 0]
res = pd.Series(dict(fft_coefficient(x, param)))
# see documentation of fft in numpy
# should return array([1. + 0.j, 0. - 1.j, -1. + 0.j])
self.assertAlmostEqual(res['attr_"imag"__coeff_0'], 0, places=6)
self.assertAlmostEqual(res['attr_"real"__coeff_0'], 1, places=6)
self.assertAlmostEqual(res['attr_"imag"__coeff_1'], -1, places=6)
self.assertAlmostEqual(res['attr_"angle"__coeff_1'], -90, places=6)
self.assertAlmostEqual(res['attr_"real"__coeff_1'], 0, places=6)
self.assertAlmostEqual(res['attr_"imag"__coeff_2'], 0, places=6)
self.assertAlmostEqual(res['attr_"real"__coeff_2'], -1, places=6)
# test what happens if coeff is biger than time series lenght
x = range(5)
param = [{"coeff": 10, "attr": "real"}]
expected_index = ['attr_"real"__coeff_10']
res = pd.Series(dict(fft_coefficient(x, param)))
self.assertCountEqual(list(res.index), expected_index)
self.assertIsNaN(res['attr_"real"__coeff_10'])
def test_fft_aggregated(self):
param = [
{"aggtype": "centroid"},
{"aggtype": "variance"},
{"aggtype": "skew"},
{"aggtype": "kurtosis"},
]
expected_index = [
'aggtype_"centroid"',
'aggtype_"variance"',
'aggtype_"skew"',
'aggtype_"kurtosis"',
]
x = np.arange(10)
res = pd.Series(dict(fft_aggregated(x, param)))
self.assertCountEqual(list(res.index), expected_index)
self.assertAlmostEqual(res['aggtype_"centroid"'], 1.135, places=3)
self.assertAlmostEqual(res['aggtype_"variance"'], 2.368, places=3)
self.assertAlmostEqual(res['aggtype_"skew"'], 1.249, places=3)
self.assertAlmostEqual(res['aggtype_"kurtosis"'], 3.643, places=3)
# Scalar multiplying the distribution should not change the results:
x = 10 * x
res = pd.Series(dict(fft_aggregated(x, param)))
self.assertCountEqual(list(res.index), expected_index)
self.assertAlmostEqual(res['aggtype_"centroid"'], 1.135, places=3)
self.assertAlmostEqual(res['aggtype_"variance"'], 2.368, places=3)
self.assertAlmostEqual(res['aggtype_"skew"'], 1.249, places=3)
self.assertAlmostEqual(res['aggtype_"kurtosis"'], 3.643, places=3)
# The fft of a sign wave is a dirac delta, variance and skew should be near zero, kurtosis should be near 3:
# However, in the discrete limit, skew and kurtosis blow up in a manner that is noise dependent and are
# therefore bad features, therefore an nan should be returned for these values
x = np.sin(2 * np.pi / 10 * np.arange(30))
res = pd.Series(dict(fft_aggregated(x, param)))
self.assertCountEqual(list(res.index), expected_index)
self.assertAlmostEqual(res['aggtype_"centroid"'], 3.0, places=5)
self.assertAlmostEqual(res['aggtype_"variance"'], 0.0, places=5)
self.assertIsNaN(res['aggtype_"skew"'])
self.assertIsNaN(res['aggtype_"kurtosis"'])
# Gaussian test:
def normal(y, mean_, sigma_):
return (
1
/ (2 * np.pi * sigma_ ** 2)
* np.exp(-((y - mean_) ** 2) / (2 * sigma_ ** 2))
)
mean_ = 500.0
sigma_ = 1.0
range_ = int(2 * mean_)
x = list(map(lambda x: normal(x, mean_, sigma_), range(range_)))
# The fourier transform of a Normal dist in the positive halfspace is a half normal,
# Hand calculated values of centroid and variance based for the half-normal dist:
# (Ref: https://en.wikipedia.org/wiki/Half-normal_distribution)
expected_fft_centroid = (range_ / (2 * np.pi * sigma_)) * np.sqrt(2 / np.pi)
expected_fft_var = (range_ / (2 * np.pi * sigma_)) ** 2 * (1 - 2 / np.pi)
# Calculate values for unit test:
res = pd.Series(dict(fft_aggregated(x, param)))
self.assertCountEqual(list(res.index), expected_index)
# Compare against hand calculated values:
rel_diff_allowed = 0.02
self.assertAlmostEqual(
res['aggtype_"centroid"'],
expected_fft_centroid,
delta=rel_diff_allowed * expected_fft_centroid,
)
self.assertAlmostEqual(
res['aggtype_"variance"'],
expected_fft_var,
delta=rel_diff_allowed * expected_fft_var,
)
def test_number_peaks(self):
x = np.array([0, 1, 2, 1, 0, 1, 2, 3, 4, 5, 4, 3, 2, 1])
self.assertEqualOnAllArrayTypes(number_peaks, x, 2, 1)
self.assertEqualOnAllArrayTypes(number_peaks, x, 2, 2)
self.assertEqualOnAllArrayTypes(number_peaks, x, 1, 3)
self.assertEqualOnAllArrayTypes(number_peaks, x, 1, 4)
self.assertEqualOnAllArrayTypes(number_peaks, x, 0, 5)
self.assertEqualOnAllArrayTypes(number_peaks, x, 0, 6)
def test_mass_quantile(self):
x = [1] * 101
param = [{"q": 0.5}]
expected_index = ["q_0.5"]
res = index_mass_quantile(x, param)
res = pd.Series(dict(res))
self.assertCountEqual(list(res.index), expected_index)
self.assertAlmostEqual(res["q_0.5"], 0.5, places=1)
# Test for parts of pandas series
x = pd.Series([0] * 55 + [1] * 101)
param = [{"q": 0.5}]
expected_index = ["q_0.5"]
res = index_mass_quantile(x[x > 0], param)
res = pd.Series(dict(res))
self.assertCountEqual(list(res.index), expected_index)
self.assertAlmostEqual(res["q_0.5"], 0.5, places=1)
x = [0] * 1000 + [1]
param = [{"q": 0.5}, {"q": 0.99}]
expected_index = ["q_0.5", "q_0.99"]
res = index_mass_quantile(x, param)
res = pd.Series(dict(res))
self.assertCountEqual(list(res.index), expected_index)
self.assertAlmostEqual(res["q_0.5"], 1, places=1)
self.assertAlmostEqual(res["q_0.99"], 1, places=1)
x = [0, 1, 1, 0, 0, 1, 0, 0]
param = [{"q": 0.30}, {"q": 0.60}, {"q": 0.90}]
expected_index = ["q_0.3", "q_0.6", "q_0.9"]
res = index_mass_quantile(x, param)
res = pd.Series(dict(res))
self.assertCountEqual(list(res.index), expected_index)
self.assertAlmostEqual(res["q_0.3"], 0.25, places=1)
self.assertAlmostEqual(res["q_0.6"], 0.375, places=1)
self.assertAlmostEqual(res["q_0.9"], 0.75, places=1)
x = [0, 0, 0]
param = [{"q": 0.5}]
expected_index = ["q_0.5"]
res = index_mass_quantile(x, param)
res = pd.Series(dict(res))
self.assertCountEqual(list(res.index), expected_index)
self.assertTrue(np.isnan(res["q_0.5"]))
x = []
param = [{"q": 0.5}]
expected_index = ["q_0.5"]
res = index_mass_quantile(x, param)
res = pd.Series(dict(res))
self.assertCountEqual(list(res.index), expected_index)
self.assertTrue(np.isnan(res["q_0.5"]))
def test_number_cwt_peaks(self):
x = [1, 1, 1, 1, 1, 1, 1, 5, 1, 1, 1, 1, 1, 1, 5, 1, 1, 1, 1, 1, 1]
self.assertEqualOnAllArrayTypes(number_cwt_peaks, x, 2, 2)
def test_spkt_welch_density(self):
# todo: improve tests
x = range(10)
param = [{"coeff": 1}, {"coeff": 10}]
expected_index = ["coeff_1", "coeff_10"]
res = pd.Series(dict(spkt_welch_density(x, param)))
self.assertCountEqual(list(res.index), expected_index)
self.assertIsNaN(res["coeff_10"])
def test_cwt_coefficients(self):
x = [0.1, 0.2, 0.3]
param = [
{"widths": (1, 2, 3), "coeff": 2, "w": 1},
{"widths": (1, 3), "coeff": 2, "w": 3},
{"widths": (1, 3), "coeff": 5, "w": 3},
]
shuffle(param)
expected_index = [
"coeff_2__w_1__widths_(1, 2, 3)",
"coeff_2__w_3__widths_(1, 3)",
"coeff_5__w_3__widths_(1, 3)",
]
res = cwt_coefficients(x, param)
res = pd.Series(dict(res))
# todo: add unit test for the values
self.assertCountEqual(list(res.index), expected_index)
self.assertTrue(math.isnan(res["coeff_5__w_3__widths_(1, 3)"]))
def test_ar_coefficient(self):
# Test for X_i = 2.5 * X_{i-1} + 1
param = [{"k": 1, "coeff": 0}, {"k": 1, "coeff": 1}]
shuffle(param)
x = [1] + 9 * [0]
for i in range(1, len(x)):
x[i] = 2.5 * x[i - 1] + 1
res = ar_coefficient(x, param)
expected_index = ["coeff_0__k_1", "coeff_1__k_1"]
res = pd.Series(dict(res))
self.assertCountEqual(list(res.index), expected_index)
self.assertAlmostEqual(res["coeff_0__k_1"], 1, places=2)
self.assertAlmostEqual(res["coeff_1__k_1"], 2.5, places=2)
# Test for X_i = 1.4 * X_{i-1} - 1 X_{i-2} + 1
param = [
{"k": 1, "coeff": 0},
{"k": 1, "coeff": 1},
{"k": 2, "coeff": 0},
{"k": 2, "coeff": 1},
{"k": 2, "coeff": 2},
{"k": 2, "coeff": 3},
]
shuffle(param)
x = [1, 1] + 5 * [0]
for i in range(2, len(x)):
x[i] = (-2) * x[i - 2] + 3.5 * x[i - 1] + 1
res = ar_coefficient(x, param)
expected_index = [
"coeff_0__k_1",
"coeff_1__k_1",
"coeff_0__k_2",
"coeff_1__k_2",
"coeff_2__k_2",
"coeff_3__k_2",
]
res = pd.Series(dict(res))
self.assertIsInstance(res, pd.Series)
self.assertCountEqual(list(res.index), expected_index)
self.assertAlmostEqual(res["coeff_0__k_2"], 1, places=2)
self.assertAlmostEqual(res["coeff_1__k_2"], 3.5, places=2)
self.assertAlmostEqual(res["coeff_2__k_2"], -2, places=2)
self.assertTrue(np.isnan(res["coeff_3__k_2"]))
def test_time_reversal_asymmetry_statistic(self):
x = [1] * 10
self.assertAlmostEqualOnAllArrayTypes(
time_reversal_asymmetry_statistic, x, 0, 0
)
self.assertAlmostEqualOnAllArrayTypes(
time_reversal_asymmetry_statistic, x, 0, 1
)
self.assertAlmostEqualOnAllArrayTypes(
time_reversal_asymmetry_statistic, x, 0, 2
)
self.assertAlmostEqualOnAllArrayTypes(
time_reversal_asymmetry_statistic, x, 0, 3
)
x = [1, 2, -3, 4]
# 1/2 * ( (4^2 * -3 + 3 * 2^2) + (3^2*2)-(2*1^1)) = 1/2 * (-48+12+18-2) = 20/2
self.assertAlmostEqualOnAllArrayTypes(
time_reversal_asymmetry_statistic, x, -10, 1
)
self.assertAlmostEqualOnAllArrayTypes(
time_reversal_asymmetry_statistic, x, 0, 2
)
self.assertAlmostEqualOnAllArrayTypes(
time_reversal_asymmetry_statistic, x, 0, 3
)
def test_number_crossing_m(self):
x = [10, -10, 10, -10]
self.assertEqualOnAllArrayTypes(number_crossing_m, x, 3, 0)
self.assertEqualOnAllArrayTypes(number_crossing_m, x, 0, 10)
x = [10, 20, 20, 30]
self.assertEqualOnAllArrayTypes(number_crossing_m, x, 0, 0)
self.assertEqualOnAllArrayTypes(number_crossing_m, x, 1, 15)
def test_c3(self):
x = [1] * 10
self.assertAlmostEqualOnAllArrayTypes(c3, x, 1, 0)
self.assertAlmostEqualOnAllArrayTypes(c3, x, 1, 1)
self.assertAlmostEqualOnAllArrayTypes(c3, x, 1, 2)
self.assertAlmostEqualOnAllArrayTypes(c3, x, 1, 3)
x = [1, 2, -3, 4]
# 1/2 *(1*2*(-3)+2*(-3)*4) = 1/2 *(-6-24) = -30/2
self.assertAlmostEqualOnAllArrayTypes(c3, x, -15, 1)
self.assertAlmostEqualOnAllArrayTypes(c3, x, 0, 2)
self.assertAlmostEqualOnAllArrayTypes(c3, x, 0, 3)
def test_binned_entropy(self):
self.assertAlmostEqualOnAllArrayTypes(binned_entropy, [10] * 100, 0, 10)
self.assertAlmostEqualOnAllArrayTypes(
binned_entropy,
[10] * 10 + [1],
-(10 / 11 * np.math.log(10 / 11) + 1 / 11 * np.math.log(1 / 11)),
10,
)
self.assertAlmostEqualOnAllArrayTypes(
binned_entropy,
[10] * 10 + [1],
-(10 / 11 * np.math.log(10 / 11) + 1 / 11 * np.math.log(1 / 11)),
10,
)
self.assertAlmostEqualOnAllArrayTypes(
binned_entropy,
[10] * 10 + [1],
-(10 / 11 * np.math.log(10 / 11) + 1 / 11 * np.math.log(1 / 11)),
100,
)
self.assertAlmostEqualOnAllArrayTypes(
binned_entropy, list(range(10)), -np.math.log(1 / 10), 100
)
self.assertAlmostEqualOnAllArrayTypes(
binned_entropy, list(range(100)), -np.math.log(1 / 2), 2
)
def test_sample_entropy(self):
# "random" list -> large entropy
ts = [
1,
4,
5,
1,
7,
3,
1,
2,
5,
8,
9,
7,
3,
7,
9,
5,
4,
3,
9,
1,
2,
3,
4,
2,
9,
6,
7,
4,
9,
2,
9,
9,
6,
5,
1,
3,
8,
1,
5,
3,
8,
4,
1,
2,
2,
1,
6,
5,
3,
6,
5,
4,
8,
9,
6,
7,
5,
3,
2,
5,
4,
2,
5,
1,
6,
5,
3,
5,
6,
7,
8,
5,
2,
8,
6,
3,
8,
2,
7,
1,
7,
3,
5,
6,
2,
1,
3,
7,
3,
5,
3,
7,
6,
7,
7,
2,
3,
1,
7,
8,
]
self.assertAlmostEqualOnAllArrayTypes(sample_entropy, ts, 2.38262780)
# This is not very complex, so it gives a small value
ts = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
self.assertAlmostEqualOnAllArrayTypes(sample_entropy, ts, 0.25131442)
# however adding a 2 increases complexity
ts = [1, 1, 2, 1, 1, 1, 1, 1, 1, 1]
self.assertAlmostEqualOnAllArrayTypes(sample_entropy, ts, 0.74193734)
# and it does not matter where
ts = [1, 1, 1, 2, 1, 1, 1, 1, 1, 1]
self.assertAlmostEqualOnAllArrayTypes(sample_entropy, ts, 0.74193734)
# negative numbers also work
ts = [1, -1, 1, -1, 1, -1]
self.assertAlmostEqualOnAllArrayTypes(sample_entropy, ts, 0.69314718)
# nan gives nan
ts = [1, -1, 1, np.nan, 1, -1]
self.assertIsNanOnAllArrayTypes(sample_entropy, ts)
# this is not a very "random" list, so it should give a small entropy
ts = list(range(1000))
self.assertAlmostEqualOnAllArrayTypes(sample_entropy, ts, 0.0010314596066622707)
def test_autocorrelation(self):
self.assertAlmostEqualOnAllArrayTypes(
autocorrelation, [1, 2, 1, 2, 1, 2], -1, 1
)
self.assertAlmostEqualOnAllArrayTypes(autocorrelation, [1, 2, 1, 2, 1, 2], 1, 2)
self.assertAlmostEqualOnAllArrayTypes(
autocorrelation, [1, 2, 1, 2, 1, 2], -1, 3
)
self.assertAlmostEqualOnAllArrayTypes(autocorrelation, [1, 2, 1, 2, 1, 2], 1, 4)
self.assertAlmostEqualOnAllArrayTypes(
autocorrelation, pd.Series([0, 1, 2, 0, 1, 2]), -0.75, 2
)
# Autocorrelation lag is larger than length of the time series
self.assertIsNanOnAllArrayTypes(autocorrelation, [1, 2, 1, 2, 1, 2], 200)
self.assertIsNanOnAllArrayTypes(autocorrelation, [np.nan], 0)
self.assertIsNanOnAllArrayTypes(autocorrelation, [], 0)
# time series with length 1 has no variance, therefore no result for autocorrelation at lag 0
self.assertIsNanOnAllArrayTypes(autocorrelation, [1], 0)
def test_quantile(self):
self.assertAlmostEqualOnAllArrayTypes(
quantile, [1, 1, 1, 3, 4, 7, 9, 11, 13, 13], 1.0, 0.2
)
self.assertAlmostEqualOnAllArrayTypes(
quantile, [1, 1, 1, 3, 4, 7, 9, 11, 13, 13], 13, 0.9
)
self.assertAlmostEqualOnAllArrayTypes(
quantile, [1, 1, 1, 3, 4, 7, 9, 11, 13, 13], 13, 1.0
)
self.assertAlmostEqualOnAllArrayTypes(quantile, [1], 1, 0.5)
self.assertIsNanOnAllArrayTypes(quantile, [], 0.5)
def test_mean_abs_change_quantiles(self):
self.assertAlmostEqualOnAllArrayTypes(
change_quantiles,
list(range(10)),
1,
ql=0.1,
qh=0.9,
isabs=True,
f_agg="mean",
)
self.assertAlmostEqualOnAllArrayTypes(
change_quantiles,
list(range(10)),
0,
ql=0.15,
qh=0.18,
isabs=True,
f_agg="mean",
)
self.assertAlmostEqualOnAllArrayTypes(
change_quantiles, [0, 1, 0, 0, 0], 0.5, ql=0, qh=1, isabs=True, f_agg="mean"
)
self.assertAlmostEqualOnAllArrayTypes(
change_quantiles,
[0, 1, 0, 0, 0],
0.5,
ql=0.1,
qh=1,
isabs=True,
f_agg="mean",
)
self.assertAlmostEqualOnAllArrayTypes(
change_quantiles,
[0, 1, 0, 0, 0],
0,
ql=0.1,
qh=0.6,
isabs=True,
f_agg="mean",
)
self.assertAlmostEqualOnAllArrayTypes(
change_quantiles, [0, 1, -9, 0, 0], 5, ql=0, qh=1, isabs=True, f_agg="mean"
)
self.assertAlmostEqualOnAllArrayTypes(
change_quantiles,
[0, 1, -9, 0, 0],
0.5,
ql=0.1,
qh=1,
isabs=True,
f_agg="mean",
)
self.assertAlmostEqualOnAllArrayTypes(
change_quantiles,
[0, 1, -9, 0, 0, 1, 0],
0.75,
ql=0.1,
qh=1,
isabs=True,
f_agg="mean",
)
self.assertAlmostEqualOnAllArrayTypes(
change_quantiles,
list(range(10)),
1,
ql=0.1,
qh=0.9,
isabs=False,
f_agg="mean",
)
self.assertAlmostEqualOnAllArrayTypes(
change_quantiles,
list(range(10)),
0,
ql=0.15,
qh=0.18,
isabs=False,
f_agg="mean",
)
self.assertAlmostEqualOnAllArrayTypes(
change_quantiles, [0, 1, 0, 0, 0], 0, ql=0, qh=1, isabs=False, f_agg="mean"
)
self.assertAlmostEqualOnAllArrayTypes(
change_quantiles,
[0, 1, 0, 0, 0],
0,
ql=0.1,
qh=1,
isabs=False,
f_agg="mean",
)
self.assertAlmostEqualOnAllArrayTypes(
change_quantiles,
[0, 1, 0, 0, 0],
0,
ql=0.1,
qh=0.6,
isabs=False,
f_agg="mean",
)
self.assertAlmostEqualOnAllArrayTypes(
change_quantiles, [0, 1, -9, 0, 0], 0, ql=0, qh=1, isabs=False, f_agg="mean"
)
self.assertAlmostEqualOnAllArrayTypes(
change_quantiles,
[0, 1, -9, 0, 0],
0.5,
ql=0.1,
qh=1,
isabs=False,
f_agg="mean",
)
self.assertAlmostEqualOnAllArrayTypes(
change_quantiles,
[0, 1, -9, 0, 0, 1, 0],
0.25,
ql=0.1,
qh=1,
isabs=False,
f_agg="mean",
)
self.assertAlmostEqualOnAllArrayTypes(
change_quantiles,
list(range(10)),
0,
ql=0.1,
qh=0.9,
isabs=True,
f_agg="std",
)
self.assertAlmostEqualOnAllArrayTypes(
change_quantiles, [0, 1, 0, 0, 0], 0.5, ql=0, qh=1, isabs=True, f_agg="std"
)
self.assertAlmostEqualOnAllArrayTypes(
change_quantiles,
list(range(10)),
0,
ql=0.1,
qh=0.9,
isabs=False,
f_agg="std",
)
self.assertAlmostEqualOnAllArrayTypes(
change_quantiles, [0, 1, 0, 1, 0], 1, ql=0, qh=1, isabs=False, f_agg="std"
)
def test_value_count(self):
self.assertEqualPandasSeriesWrapper(value_count, [1] * 10, 10, value=1)
self.assertEqualPandasSeriesWrapper(value_count, list(range(10)), 1, value=0)
self.assertEqualPandasSeriesWrapper(value_count, [1] * 10, 0, value=0)
self.assertEqualPandasSeriesWrapper(value_count, [np.NaN, 0, 1] * 3, 3, value=0)
self.assertEqualPandasSeriesWrapper(
value_count, [np.NINF, 0, 1] * 3, 3, value=0
)
self.assertEqualPandasSeriesWrapper(
value_count, [np.PINF, 0, 1] * 3, 3, value=0
)
self.assertEqualPandasSeriesWrapper(
value_count, [0.1, 0.2, 0.3] * 3, 3, value=0.2
)
self.assertEqualPandasSeriesWrapper(
value_count, [np.NaN, 0, 1] * 3, 3, value=np.NaN
)
self.assertEqualPandasSeriesWrapper(
value_count, [np.NINF, 0, 1] * 3, 3, value=np.NINF
)
self.assertEqualPandasSeriesWrapper(
value_count, [np.PINF, 0, 1] * 3, 3, value=np.PINF
)
def test_range_count(self):
self.assertEqualPandasSeriesWrapper(range_count, [1] * 10, 0, min=1, max=1)
self.assertEqualPandasSeriesWrapper(range_count, [1] * 10, 0, min=0.9, max=1)
self.assertEqualPandasSeriesWrapper(range_count, [1] * 10, 10, min=1, max=1.1)
self.assertEqualPandasSeriesWrapper(
range_count, list(range(10)), 9, min=0, max=9
)
self.assertEqualPandasSeriesWrapper(
range_count, list(range(10)), 10, min=0, max=10
)
self.assertEqualPandasSeriesWrapper(
range_count, list(range(0, -10, -1)), 9, min=-10, max=0
)
self.assertEqualPandasSeriesWrapper(
range_count, [np.NaN, np.PINF, np.NINF] + list(range(10)), 10, min=0, max=10
)
def test_approximate_entropy(self):
self.assertEqualOnAllArrayTypes(approximate_entropy, [1], 0, m=2, r=0.5)
self.assertEqualOnAllArrayTypes(approximate_entropy, [1, 2], 0, m=2, r=0.5)
self.assertEqualOnAllArrayTypes(approximate_entropy, [1, 2, 3], 0, m=2, r=0.5)
self.assertEqualOnAllArrayTypes(approximate_entropy, [1, 2, 3], 0, m=2, r=0.5)
self.assertAlmostEqualOnAllArrayTypes(
approximate_entropy, [12, 13, 15, 16, 17] * 10, 0.282456191, m=2, r=0.9
)
self.assertRaises(
ValueError, approximate_entropy, x=[12, 13, 15, 16, 17] * 10, m=2, r=-0.5
)
def test_absolute_maximum(self):
self.assertEqualOnAllArrayTypes(absolute_maximum, [-5, 0, 1], 5)
self.assertEqualOnAllArrayTypes(absolute_maximum, [0], 0)
self.assertIsNanOnAllArrayTypes(absolute_maximum, [])
def test_max_langevin_fixed_point(self):
"""
Estimating the intrinsic velocity of a dissipative soliton
"""
default_params = {"m": 3, "r": 30}
# active Brownian motion
ds = velocity(tau=3.8, delta_t=0.05, R=3e-4, seed=0)
v = ds.simulate(100000, v0=np.zeros(1))
v0 = max_langevin_fixed_point(v[:, 0], **default_params)
self.assertLess(abs(ds.deterministic - v0), 0.001)
# Brownian motion
ds = velocity(tau=2.0 / 0.3 - 3.8, delta_t=0.05, R=3e-4, seed=0)
v = ds.simulate(10000, v0=np.zeros(1))
v0 = max_langevin_fixed_point(v[:, 0], **default_params)
self.assertLess(v0, 0.001)
def test_linear_trend(self):
# check linear up trend
x = range(10)
param = [
{"attr": "pvalue"},
{"attr": "rvalue"},
{"attr": "intercept"},
{"attr": "slope"},
{"attr": "stderr"},
]
res = linear_trend(x, param)
res = pd.Series(dict(res))
expected_index = [
'attr_"pvalue"',
'attr_"intercept"',
'attr_"rvalue"',
'attr_"slope"',
'attr_"stderr"',
]
self.assertEqual(len(res), 5)
self.assertCountEqual(list(res.index), expected_index)
self.assertAlmostEqual(res['attr_"pvalue"'], 0)
self.assertAlmostEqual(res['attr_"stderr"'], 0)
self.assertAlmostEqual(res['attr_"intercept"'], 0)
self.assertAlmostEqual(res['attr_"slope"'], 1.0)
# check p value for random trend
np.random.seed(42)
x = np.random.uniform(size=100)
param = [{"attr": "rvalue"}]
res = linear_trend(x, param)
res = pd.Series(dict(res))
self.assertLess(abs(res['attr_"rvalue"']), 0.1)
# check slope and intercept decreasing trend with intercept
x = [42 - 2 * x for x in range(10)]
param = [{"attr": "intercept"}, {"attr": "slope"}]
res = linear_trend(x, param)
res = pd.Series(dict(res))
self.assertAlmostEqual(res['attr_"intercept"'], 42)
self.assertAlmostEqual(res['attr_"slope"'], -2)
def test__aggregate_on_chunks(self):
self.assertListEqual(
_aggregate_on_chunks(x=pd.Series([0, 1, 2, 3]), f_agg="max", chunk_len=2),
[1, 3],
)
self.assertListEqual(
_aggregate_on_chunks(x=pd.Series([1, 1, 3, 3]), f_agg="max", chunk_len=2),
[1, 3],
)
self.assertListEqual(
_aggregate_on_chunks(x=pd.Series([0, 1, 2, 3]), f_agg="min", chunk_len=2),
[0, 2],
)
self.assertListEqual(
_aggregate_on_chunks(
x=pd.Series([0, 1, 2, 3, 5]), f_agg="min", chunk_len=2
),
[0, 2, 5],
)
self.assertListEqual(
_aggregate_on_chunks(x=pd.Series([0, 1, 2, 3]), f_agg="mean", chunk_len=2),
[0.5, 2.5],
)
self.assertListEqual(
_aggregate_on_chunks(
x=pd.Series([0, 1, 0, 4, 5]), f_agg="mean", chunk_len=2
),
[0.5, 2, 5],
)
self.assertListEqual(
_aggregate_on_chunks(
x=pd.Series([0, 1, 0, 4, 5]), f_agg="mean", chunk_len=3
),
[1 / 3, 4.5],
)
self.assertListEqual(
_aggregate_on_chunks(
x=pd.Series([0, 1, 2, 3, 5, -2]), f_agg="median", chunk_len=2
),
[0.5, 2.5, 1.5],
)
self.assertListEqual(
_aggregate_on_chunks(
x=pd.Series([-10, 5, 3, -3, 4, -6]), f_agg="median", chunk_len=3
),
[3, -3],
)
self.assertListEqual(
_aggregate_on_chunks(
x=pd.Series([0, 1, 2, np.NaN, 5]), f_agg="median", chunk_len=2
),
[0.5, 2, 5],
)
def test_agg_linear_trend(self):
x = pd.Series(range(9), index=range(9))
param = [
{"attr": "intercept", "chunk_len": 3, "f_agg": "max"},
{"attr": "slope", "chunk_len": 3, "f_agg": "max"},
{"attr": "intercept", "chunk_len": 3, "f_agg": "min"},
{"attr": "slope", "chunk_len": 3, "f_agg": "min"},
{"attr": "intercept", "chunk_len": 3, "f_agg": "mean"},
{"attr": "slope", "chunk_len": 3, "f_agg": "mean"},
{"attr": "intercept", "chunk_len": 3, "f_agg": "median"},
{"attr": "slope", "chunk_len": 3, "f_agg": "median"},
]
expected_index = [
'attr_"intercept"__chunk_len_3__f_agg_"max"',
'attr_"slope"__chunk_len_3__f_agg_"max"',
'attr_"intercept"__chunk_len_3__f_agg_"min"',
'attr_"slope"__chunk_len_3__f_agg_"min"',
'attr_"intercept"__chunk_len_3__f_agg_"mean"',
'attr_"slope"__chunk_len_3__f_agg_"mean"',
'attr_"intercept"__chunk_len_3__f_agg_"median"',
'attr_"slope"__chunk_len_3__f_agg_"median"',
]
res = agg_linear_trend(x=x, param=param)
res = pd.Series(dict(res))
self.assertEqual(len(res), 8)
self.maxDiff = 2000
self.assertCountEqual(list(res.index), expected_index)
self.assertAlmostEqual(res['attr_"intercept"__chunk_len_3__f_agg_"max"'], 2)
self.assertAlmostEqual(res['attr_"slope"__chunk_len_3__f_agg_"max"'], 3)
self.assertAlmostEqual(res['attr_"intercept"__chunk_len_3__f_agg_"min"'], 0)
self.assertAlmostEqual(res['attr_"slope"__chunk_len_3__f_agg_"min"'], 3)
self.assertAlmostEqual(res['attr_"intercept"__chunk_len_3__f_agg_"mean"'], 1)
self.assertAlmostEqual(res['attr_"slope"__chunk_len_3__f_agg_"mean"'], 3)
self.assertAlmostEqual(res['attr_"intercept"__chunk_len_3__f_agg_"median"'], 1)
self.assertAlmostEqual(res['attr_"slope"__chunk_len_3__f_agg_"median"'], 3)
x = pd.Series([np.NaN, np.NaN, np.NaN, -3, -3, -3])
res = agg_linear_trend(x=x, param=param)
res = pd.Series(dict(res))
self.assertIsNaN(res['attr_"intercept"__chunk_len_3__f_agg_"max"'])
self.assertIsNaN(res['attr_"slope"__chunk_len_3__f_agg_"max"'])
self.assertIsNaN(res['attr_"intercept"__chunk_len_3__f_agg_"min"'])
self.assertIsNaN(res['attr_"slope"__chunk_len_3__f_agg_"min"'])
self.assertIsNaN(res['attr_"intercept"__chunk_len_3__f_agg_"mean"'])
self.assertIsNaN(res['attr_"slope"__chunk_len_3__f_agg_"mean"'])
self.assertIsNaN(res['attr_"intercept"__chunk_len_3__f_agg_"median"'])
self.assertIsNaN(res['attr_"slope"__chunk_len_3__f_agg_"median"'])
x = pd.Series([np.NaN, np.NaN, -3, -3, -3, -3])
res = agg_linear_trend(x=x, param=param)
res = pd.Series(dict(res))
self.assertAlmostEqual(res['attr_"intercept"__chunk_len_3__f_agg_"max"'], -3)
self.assertAlmostEqual(res['attr_"slope"__chunk_len_3__f_agg_"max"'], 0)
self.assertAlmostEqual(res['attr_"intercept"__chunk_len_3__f_agg_"min"'], -3)
self.assertAlmostEqual(res['attr_"slope"__chunk_len_3__f_agg_"min"'], 0)
self.assertAlmostEqual(res['attr_"intercept"__chunk_len_3__f_agg_"mean"'], -3)
self.assertAlmostEqual(res['attr_"slope"__chunk_len_3__f_agg_"mean"'], 0)
self.assertAlmostEqual(res['attr_"intercept"__chunk_len_3__f_agg_"median"'], -3)
self.assertAlmostEqual(res['attr_"slope"__chunk_len_3__f_agg_"median"'], 0)
def test_energy_ratio_by_chunks(self):
x = pd.Series(range(90), index=range(90))
param = [{"num_segments": 6, "segment_focus": i} for i in range(6)]
output = energy_ratio_by_chunks(x=x, param=param)
self.assertAlmostEqual(output[0][1], 0.0043, places=3)
self.assertAlmostEqual(output[1][1], 0.0316, places=3)
self.assertAlmostEqual(output[2][1], 0.0871, places=3)
self.assertAlmostEqual(output[3][1], 0.1709, places=3)
self.assertAlmostEqual(output[4][1], 0.2829, places=3)
self.assertAlmostEqual(output[5][1], 0.4232, places=3)
# Sum of the ratios should be 1.0
sum = 0.0
for name, dat in output:
sum = sum + dat
self.assertAlmostEqual(sum, 1.0)
x = pd.Series(1, index=range(10))
param = [{"num_segments": 3, "segment_focus": i} for i in range(3)]
output = energy_ratio_by_chunks(x=x, param=param)
self.assertAlmostEqual(output[0][1], 0.4, places=3)
self.assertAlmostEqual(output[1][1], 0.3, places=3)
self.assertAlmostEqual(output[2][1], 0.3, places=3)
# Sum of the ratios should be 1.0
sum = 0.0
for name, dat in output:
sum = sum + dat
self.assertAlmostEqual(sum, 1.0)
x = pd.Series(0, index=range(10))
param = [{"num_segments": 3, "segment_focus": i} for i in range(3)]
output = energy_ratio_by_chunks(x=x, param=param)
self.assertIsNaN(output[0][1])
self.assertIsNaN(output[1][1])
self.assertIsNaN(output[2][1])
def test_linear_trend_timewise_hours(self):
"""Test linear_trend_timewise function with hour intervals."""
x = pd.Series(
[0, 1, 3, 6],
index=pd.DatetimeIndex(
[
"2018-01-01 04:00:00",
"2018-01-01 05:00:00",
"2018-01-01 07:00:00",
"2018-01-01 10:00:00",
]
),
)
param = [
{"attr": "pvalue"},
{"attr": "rvalue"},
{"attr": "intercept"},
{"attr": "slope"},
{"attr": "stderr"},
]
res = linear_trend_timewise(x, param)
res = pd.Series(dict(res))
expected_index = [
'attr_"pvalue"',
'attr_"intercept"',
'attr_"rvalue"',
'attr_"slope"',
'attr_"stderr"',
]
self.assertEqual(len(res), 5)
self.assertCountEqual(list(res.index), expected_index)
self.assertAlmostEqual(res['attr_"pvalue"'], 0, places=3)
self.assertAlmostEqual(res['attr_"stderr"'], 0, places=3)
self.assertAlmostEqual(res['attr_"intercept"'], 0, places=3)
self.assertAlmostEqual(res['attr_"slope"'], 1.0, places=3)
def test_linear_trend_timewise_days(self):
"""Test linear_trend_timewise function with day intervals."""
# Try with different days
x = pd.Series(
[0, 24, 48, 72],
index=pd.DatetimeIndex(
[
"2018-01-01 04:00:00",
"2018-01-02 04:00:00",
"2018-01-03 04:00:00",
"2018-01-04 04:00:00",
]
),
)
param = [
{"attr": "pvalue"},
{"attr": "rvalue"},
{"attr": "intercept"},
{"attr": "slope"},
{"attr": "stderr"},
]
res = linear_trend_timewise(x, param)
res = pd.Series(dict(res))
self.assertAlmostEqual(res['attr_"pvalue"'], 0, places=3)
self.assertAlmostEqual(res['attr_"stderr"'], 0, places=3)
self.assertAlmostEqual(res['attr_"intercept"'], 0, places=3)
self.assertAlmostEqual(res['attr_"slope"'], 1.0, places=3)
def test_linear_trend_timewise_seconds(self):
"""Test linear_trend_timewise function with second intervals."""
# Try with different days
x = pd.Series(
[0, 1 / float(3600), 2 / float(3600), 3 / float(3600)],
index=pd.DatetimeIndex(
[
"2018-01-01 04:00:01",
"2018-01-01 04:00:02",
"2018-01-01 04:00:03",
"2018-01-01 04:00:04",
]
),
)
param = [
{"attr": "pvalue"},
{"attr": "rvalue"},
{"attr": "intercept"},
{"attr": "slope"},
{"attr": "stderr"},
]
res = linear_trend_timewise(x, param)
res = pd.Series(dict(res))
self.assertAlmostEqual(res['attr_"pvalue"'], 0, places=3)
self.assertAlmostEqual(res['attr_"stderr"'], 0, places=3)
self.assertAlmostEqual(res['attr_"intercept"'], 0, places=3)
self.assertAlmostEqual(res['attr_"slope"'], 1.0, places=3)
def test_linear_trend_timewise_years(self):
"""Test linear_trend_timewise function with year intervals."""
# Try with different days
x = pd.Series(
[
0,
365 * 24,
365 * 48,
365 * 72 + 24,
], # Add 24 to the last one since it's a leap year
index=pd.DatetimeIndex(
[
"2018-01-01 04:00:00",
"2019-01-01 04:00:00",
"2020-01-01 04:00:00",
"2021-01-01 04:00:00",
]
),
)
param = [
{"attr": "pvalue"},
{"attr": "rvalue"},
{"attr": "intercept"},
{"attr": "slope"},
{"attr": "stderr"},
]
res = linear_trend_timewise(x, param)
res = pd.Series(dict(res))
self.assertAlmostEqual(res['attr_"pvalue"'], 0, places=3)
self.assertAlmostEqual(res['attr_"stderr"'], 0, places=3)
self.assertAlmostEqual(res['attr_"intercept"'], 0, places=3)
self.assertAlmostEqual(res['attr_"slope"'], 1.0, places=3)
def test_change_quantiles(self):
"""Test change_quantiles function when changing from `sum` to `np.sum`."""
np.random.seed(0)
res = change_quantiles(np.random.rand(10000) * 1000, 0.1, 0.2, False, "mean")
self.assertAlmostEqual(res, -0.9443846621365727)
def test_count_above(self):
self.assertEqualPandasSeriesWrapper(count_above, [1] * 10, 1, t=1)
self.assertEqualPandasSeriesWrapper(count_above, list(range(10)), 1, t=0)
self.assertEqualPandasSeriesWrapper(count_above, list(range(10)), 0.5, t=5)
self.assertEqualPandasSeriesWrapper(
count_above, [0.1, 0.2, 0.3] * 3, 2 / 3, t=0.2
)
self.assertEqualPandasSeriesWrapper(count_above, [np.NaN, 0, 1] * 3, 2 / 3, t=0)
self.assertEqualPandasSeriesWrapper(
count_above, [np.NINF, 0, 1] * 3, 2 / 3, t=0
)
self.assertEqualPandasSeriesWrapper(count_above, [np.PINF, 0, 1] * 3, 1, t=0)
self.assertEqualPandasSeriesWrapper(
count_above, [np.NaN, 0, 1] * 3, 0, t=np.NaN
)
self.assertEqualPandasSeriesWrapper(
count_above, [np.NINF, 0, np.PINF] * 3, 1, t=np.NINF
)
self.assertEqualPandasSeriesWrapper(
count_above, [np.PINF, 0, 1] * 3, 1 / 3, t=np.PINF
)
def test_count_below(self):
self.assertEqualPandasSeriesWrapper(count_below, [1] * 10, 1, t=1)
self.assertEqualPandasSeriesWrapper(count_below, list(range(10)), 1 / 10, t=0)
self.assertEqualPandasSeriesWrapper(count_below, list(range(10)), 6 / 10, t=5)
self.assertEqualPandasSeriesWrapper(
count_below, [0.1, 0.2, 0.3] * 3, 2 / 3, t=0.2
)
self.assertEqualPandasSeriesWrapper(count_below, [np.NaN, 0, 1] * 3, 1 / 3, t=0)
self.assertEqualPandasSeriesWrapper(
count_below, [np.NINF, 0, 1] * 3, 2 / 3, t=0
)
self.assertEqualPandasSeriesWrapper(
count_below, [np.PINF, 0, 1] * 3, 1 / 3, t=0
)
self.assertEqualPandasSeriesWrapper(
count_below, [np.NaN, 0, 1] * 3, 0, t=np.NaN
)
self.assertEqualPandasSeriesWrapper(
count_below, [np.NINF, 0, np.PINF] * 3, 1 / 3, t=np.NINF
)
self.assertEqualPandasSeriesWrapper(
count_below, [np.PINF, 0, 1] * 3, 1, t=np.PINF
)
def test_benford_correlation(self):
# A test with list of random values
np.random.seed(42)
random_list = np.random.uniform(size=100)
# Fibonacci series is known to match the Newcomb-Benford's Distribution
fibonacci_list = [0, 1]
for i in range(2, 200):
fibonacci_list.append(fibonacci_list[i - 1] + fibonacci_list[i - 2])
# A list of equally distributed digits (returns NaN)
equal_list = [1, 2, 3, 4, 5, 6, 7, 8, 9]
# A list containing NaN
list_with_nan = [
1.354,
0.058,
0.055,
0.99,
3.15,
np.nan,
0.3,
2.3,
0,
0.59,
0.74,
]
self.assertAlmostEqual(benford_correlation(random_list), 0.39458056)
self.assertAlmostEqual(benford_correlation(fibonacci_list), 0.998003988)
self.assertAlmostEqual(benford_correlation(list_with_nan), 0.10357511)
self.assertIsNaN(benford_correlation(equal_list))
def test_query_similarity_count(self):
np.random.seed(42)
query = np.random.uniform(size=10)
threshold = 3.0
x = np.random.uniform(size=100)
# z-normalized Euclidean distances
param = [{"query": query}]
self.assertAlmostEqual(query_similarity_count(x, param=param)[0][1], 0.0)
param = [{"query": query, "threshold": threshold}]
self.assertAlmostEqual(query_similarity_count(x, param=param)[0][1], 6.0)
# non-normalized Euclidean distances
param = [{"query": query, "normalize": False}]
self.assertAlmostEqual(query_similarity_count(x, param=param)[0][1], 0.0)
param = [{"query": query, "threshold": threshold, "normalize": False}]
self.assertAlmostEqual(query_similarity_count(x, param=param)[0][1], 91.0)
def test_matrix_profile_window(self):
# Test matrix profile output with specified window
np.random.seed(9999)
ts = np.random.uniform(size=2 ** 10)
w = 2 ** 5
subq = ts[0:w]
ts[0:w] = subq
ts[w + 100 : w + 100 + w] = subq
param = [
{"threshold": 0.98, "windows": 36, "feature": "min"},
{"threshold": 0.98, "windows": 36, "feature": "max"},
{"threshold": 0.98, "windows": 36, "feature": "mean"},
{"threshold": 0.98, "windows": 36, "feature": "median"},
{"threshold": 0.98, "windows": 36, "feature": "25"},
{"threshold": 0.98, "windows": 36, "feature": "75"},
]
self.assertAlmostEqual(matrix_profile(ts, param=param)[0][1], 2.825786727580335)
def test_matrix_profile_no_window(self):
# Test matrix profile output with no window specified
np.random.seed(9999)
ts = np.random.uniform(size=2 ** 10)
w = 2 ** 5
subq = ts[0:w]
ts[0:w] = subq
ts[w + 100 : w + 100 + w] = subq
param = [
{"threshold": 0.98, "feature": "min"},
{"threshold": 0.98, "feature": "max"},
{"threshold": 0.98, "feature": "mean"},
{"threshold": 0.98, "feature": "median"},
{"threshold": 0.98, "feature": "25"},
{"threshold": 0.98, "feature": "75"},
]
# Test matrix profile output with no window specified
self.assertAlmostEqual(matrix_profile(ts, param=param)[0][1], 2.825786727580335)
def test_matrix_profile_nan(self):
# Test matrix profile of NaNs (NaN output)
ts = np.random.uniform(size=2 ** 6)
ts[:] = np.nan
param = [
{"threshold": 0.98, "windows": None, "feature": "min"},
{"threshold": 0.98, "windows": None, "feature": "max"},
{"threshold": 0.98, "windows": None, "feature": "mean"},
{"threshold": 0.98, "windows": None, "feature": "median"},
{"threshold": 0.98, "windows": None, "feature": "25"},
{"threshold": 0.98, "windows": None, "feature": "75"},
]
self.assertTrue(np.isnan(matrix_profile(ts, param=param)[0][1]))
class FriedrichTestCase(TestCase):
def test_estimate_friedrich_coefficients(self):
"""
Estimate friedrich coefficients
"""
default_params = {"m": 3, "r": 30}
# active Brownian motion
ds = velocity(tau=3.8, delta_t=0.05, R=3e-4, seed=0)
v = ds.simulate(10000, v0=np.zeros(1))
coeff = _estimate_friedrich_coefficients(v[:, 0], **default_params)
self.assertLess(abs(coeff[-1]), 0.0001)
# Brownian motion
ds = velocity(tau=2.0 / 0.3 - 3.8, delta_t=0.05, R=3e-4, seed=0)
v = ds.simulate(10000, v0=np.zeros(1))
coeff = _estimate_friedrich_coefficients(v[:, 0], **default_params)
self.assertLess(abs(coeff[-1]), 0.0001)
def test_friedrich_coefficients(self):
# Test binning error returns vector of NaNs
param = [{"coeff": coeff, "m": 2, "r": 30} for coeff in range(4)]
x = np.zeros(100)
res = pd.Series(dict(friedrich_coefficients(x, param)))
expected_index = [
"coeff_0__m_2__r_30",
"coeff_1__m_2__r_30",
"coeff_2__m_2__r_30",
"coeff_3__m_2__r_30",
]
self.assertCountEqual(list(res.index), expected_index)
self.assertTrue(np.sum(np.isnan(res)), 3)
def test_friedrich_number_of_returned_features_is_equal_to_number_of_parameters(
self,
):
"""unit test for issue 501"""
param = [
{"m": 3, "r": 5, "coeff": 2},
{"m": 3, "r": 5, "coeff": 3},
{"m": 3, "r": 2, "coeff": 3},
]
x = np.zeros(100)
res = pd.Series(dict(friedrich_coefficients(x, param)))
expected_index = ["coeff_2__m_3__r_5", "coeff_3__m_3__r_5", "coeff_3__m_3__r_2"]
self.assertCountEqual(list(res.index), expected_index)
self.assertTrue(np.sum(np.isnan(res)), 3)
def test_friedrich_equal_to_snapshot(self):
param = [{"coeff": coeff, "m": 2, "r": 30} for coeff in range(4)]
x = np.array(
[
-0.53,
-0.61,
-1.26,
-0.88,
-0.34,
0.58,
2.86,
-0.47,
0.78,
-0.45,
-0.27,
0.43,
1.72,
0.26,
1.02,
-0.09,
0.65,
1.49,
-0.95,
-1.02,
-0.64,
-1.63,
-0.71,
-0.43,
-1.69,
0.05,
1.58,
1.1,
0.55,
-1.02,
]
)
res = pd.Series(dict(friedrich_coefficients(x, param)))
self.assertAlmostEqual(res["coeff_0__m_2__r_30"], -0.24536975738843042)
self.assertAlmostEqual(res["coeff_1__m_2__r_30"], -0.533309548662685)
self.assertAlmostEqual(res["coeff_2__m_2__r_30"], 0.2759399238199404)
|
blue-yonder/tsfresh
|
tests/units/feature_extraction/test_feature_calculations.py
|
Python
|
mit
| 83,172
|
from django.core.urlresolvers import reverse
from django.db import models
from django.db.models.signals import post_save
from django.utils.text import slugify
from django.utils.safestring import mark_safe
# Create your models here.
class ProductQuerySet(models.query.QuerySet):
def active(self):
return self.filter(active=True)
class ProductManager(models.Manager):
def get_queryset(self):
return ProductQuerySet(self.model, using=self._db)
def all(self, *args, **kwargs):
return self.get_queryset().active()
def get_related(self, instance):
products_one = self.get_queryset().filter(categories__in=instance.categories.all())
products_two = self.get_queryset().filter(default=instance.default)
qs = (products_one | products_two).exclude(id=instance.id).distinct()
return qs
class Product(models.Model):
title = models.CharField(max_length=120)
description = models.TextField(blank=True, null=True)
price = models.DecimalField(decimal_places=2, max_digits=20)
active = models.BooleanField(default=True)
categories = models.ManyToManyField('Category', blank=True)
default = models.ForeignKey('Category', related_name='default_category', null=True, blank=True)
objects = ProductManager()
class Meta:
ordering = ["-title"]
def __unicode__(self): # def __str__(self):
return self.title
def get_absolute_url(self):
return reverse("product_detail", kwargs={"pk": self.pk})
def get_image_url(self):
img = self.productimage_set.first()
if img:
return img.image.url
return img
class Variation(models.Model):
product = models.ForeignKey(Product)
title = models.CharField(max_length=120)
price = models.DecimalField(decimal_places=2, max_digits=20)
sale_price = models.DecimalField(decimal_places=2, max_digits=20, null=True, blank=True)
active = models.BooleanField(default=True)
inventory = models.IntegerField(null=True, blank=True) # refer none == unlimited amount
def __unicode__(self):
return self.title
def get_price(self):
if self.sale_price is not None:
return self.sale_price
else:
return self.price
def get_html_price(self):
if self.sale_price is not None:
html_text = "<span class='sale-price'>%s</span> <span class='og-price'>%s</span>" % \
(self.sale_price, self.price)
else:
html_text = "<span class='price'>%s</span>" % (self.price)
return mark_safe(html_text)
def get_absolute_url(self):
return self.product.get_absolute_url()
def add_to_cart(self):
return "%s?item=%s&qty=1" % (reverse("cart"), self.id)
def remove_from_cart(self):
return "%s?item=%s&qty=1&delete=True" % (reverse("cart"), self.id)
def get_title(self):
return "%s - %s" % (self.product.title, self.title)
def product_post_saved_receiver(sender, instance, created, *args, **kwargs):
product = instance
variations = product.variation_set.all()
if variations.count() == 0:
new_var = Variation()
new_var.product = product
new_var.title = "Default"
new_var.price = product.price
new_var.save()
post_save.connect(product_post_saved_receiver, sender=Product)
def image_upload_to(instance, filename):
title = instance.product.title
slug = slugify(title)
basename, file_extension = filename.split(".")
new_filename = "%s-%s.%s" % (slug, instance.id, file_extension)
return "products/%s/%s" % (slug, new_filename)
class ProductImage(models.Model):
product = models.ForeignKey(Product)
image = models.ImageField(upload_to=image_upload_to)
def __unicode__(self):
return self.product.title
# Product Category
class Category(models.Model):
title = models.CharField(max_length=120, unique=True)
slug = models.SlugField(unique=True)
description = models.TextField(null=True, blank=True)
active = models.BooleanField(default=True)
timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
def __unicode__(self):
return self.title
def get_absolute_url(self):
return reverse("category_detail", kwargs={"slug": self.slug})
def image_upload_to_featured(instance, filename):
title = instance.product.title
slug = slugify(title)
basename, file_extension = filename.split(".")
new_filename = "%s-%s.%s" % (slug, instance.id, file_extension)
return "products/%s/featured/%s" % (slug, new_filename)
class ProductFeatured(models.Model):
product = models.ForeignKey(Product)
image = models.ImageField(upload_to=image_upload_to_featured)
title = models.CharField(max_length=120, null=True, blank=True)
text = models.CharField(max_length=220, null=True, blank=True)
text_right = models.BooleanField(default=False)
text_css_color = models.CharField(max_length=6, null=True, blank=True)
show_price = models.BooleanField(default=False)
make_image_background = models.BooleanField(default=False)
active = models.BooleanField(default=True)
def __unicode__(self):
return self.product.title
|
dizzy54/ecommerce
|
src/products/models.py
|
Python
|
mit
| 5,256
|
#!/usr/bin/env python2
import ringo_config
cfg = ringo_config.RingoConfig()
import pyximport;pyximport.install(build_dir=cfg.pyximport_build())
import argparse
import random
import numpy as np
import model
from simulation import Simulation, SimParameters, EventType, RearrangementType
def run_L_D_simulation(self, L, D):
# L = duplication length
# D = number of DCJs in each branch.
#
param = self.sim_parameters
# pre_dups (at root) and post_dups (at branches) to achieve 1.5 genes/family in average.
pre_duplications = int(0.43 * param.num_genes / L)
post_duplications = int(0.07 * param.num_genes / L)
post_duplications = [int(0.5 * post_duplications), int(1.5 * post_duplications)]
# post_duplications = [int(1 * post_duplications), int(1 * post_duplications)]
param.pre_duplications = pre_duplications
current_copy_number = None # will init at root
deletion_length_range = xrange(1, param.indel_length + 1)
duplication_length_range = xrange(1, L + 1)
idx = 1
ev_tree = self.sim_tree
for ev_node in ev_tree.preorder_node_iter():
if ev_node.parent_node is None:
# identity genome:
ev_node.value = current_genome = model.Genome.identity(param.num_genes, param.num_chr)
ev_node.events = {ev: 0 for ev in EventType.all}
# add copy number information to track orthologous/paralogous, when duplications are present:
for chromosome in current_genome.chromosomes:
chromosome.copy_number = [1] * len(chromosome.gene_order)
current_copy_number = current_genome.gene_count()
# pre-duplications:
for i in range(pre_duplications):
Simulation.apply_random_segmental_duplication(current_genome,
range(1, param.duplication_length + 1),
current_copy_number)
ev_node.events[EventType.DUPLICATION] = pre_duplications
# ev_node.edge.length = pre_duplications
if ev_node.label is None:
ev_node.label = "Root"
else:
# evolve genome:
if ev_node.is_internal():
if ev_node.label is None:
ev_node.label = "M%02d" % idx
idx += 1
else: # complete labelling for leaves
ev_node.label = ev_node.taxon.label
current_genome = ev_node.parent_node.value.clone(ev_node.label)
ev_node.value = current_genome
pd = post_duplications.pop()
ev_node.edge.length = D + pd
# events
events = [EventType.DUPLICATION] * pd + [EventType.REARRANGEMENT] * D
ev_node.edge.events = {ev: 0 for ev in EventType.all}
random.shuffle(events)
for event in events:
if event == EventType.DUPLICATION:
Simulation.apply_random_segmental_duplication(current_genome, duplication_length_range, current_copy_number)
ev_node.edge.events[event] += 1
elif event == EventType.REARRANGEMENT:
# here, I can also have deletions:
ev = np.random.choice([RearrangementType.REVERSAL, EventType.DELETION], 1,
p=[param.rearrangement_p, param.deletion_p])[0]
if ev == RearrangementType.REVERSAL:
Simulation.apply_random_reversal(current_genome)
ev_node.edge.events[event] += 1
else:
Simulation.apply_random_deletion(current_genome, deletion_length_range)
ev_node.edge.events[EventType.DELETION] += 1
ev_node.events = {ev: ev_node.parent_node.events[ev] + count for ev, count in
ev_node.edge.events.iteritems()}
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Simulates rearrangement evolution on a given newick tree")
parser.add_argument("-s", "--sim", type=int, help="Simulate a new birth_death with SIM species")
parser.add_argument("-n", "--num_genes", type=int, default=100, help="Number of genes in the root genome.")
parser.add_argument("-c", "--num_chr", type=int, default=5, help="Number of chromosomes in the root genome.")
parser.add_argument("-L", "-dl", "--duplication_length", type=int, default=5, help="Maximum length of duplication event.")
parser.add_argument("-D", "--rearrangements", type=int, default=5, help="Number of rearrangements.")
parser.add_argument("-o", "--output", type=str, default="sim", help="Name of the output folder.")
parser.add_argument("-dp", "--deletion_p", type=float, default=0.0, help="Percentage of deletions, from 0 to 1.0")
parser.add_argument("-ip", "--insertion_p", type=float, default=0.0, help="Percentage of insertions, from 0 to 1.0")
parser.add_argument("-il", "--indel_length", type=int, default=5, help="Maximum size of indel event in genes.")
parser.add_argument("-d", "--disturb", type=float, default=0,
help="Disturb branch lengths multiplying each by e^r, where r in [-d,+d]. ")
param = parser.parse_args()
# Simulation parameters:
sim_par = SimParameters(num_genes=param.num_genes, num_chr=param.num_chr,
del_p=param.deletion_p, ins_p=param.insertion_p, indel_length=param.indel_length,
duplication_length=param.duplication_length)
# start sim object;
sim = Simulation(param.output, sim_par)
sim.simulate_tree(param.sim)
run_L_D_simulation(sim, param.duplication_length, param.rearrangements)
sim.save_simulation(save_copies=True)
|
pedrofeijao/RINGO
|
src/ringo/LD_simulation.py
|
Python
|
mit
| 5,857
|
#!/usr/bin/env python3
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as stats
import math
mu = 0
variance1 = 20000**2
variance2 = 2000**2
sigma1 = math.sqrt(variance1)
sigma2 = math.sqrt(variance2)
x = np.linspace(mu - 3*sigma1, mu + 3*sigma1, 100)
plt.plot(x, stats.norm.pdf(x, mu, sigma1))
x2 = np.linspace(mu - 3*sigma2, mu + 3*sigma2,100)
#x2 = [i for i in x2]
plt.plot(x2, stats.norm.pdf(x2, mu, sigma2))
plt.xlim((-50000,50000))
frame1 = plt.gca()
frame1.axes.xaxis.set_ticklabels([])
frame1.axes.yaxis.set_ticklabels([])
plt.show()
|
jdurbin/sandbox
|
python/plotting/plotnormal.py
|
Python
|
mit
| 568
|
VERSION = (0, 11)
__version__ = '.'.join(map(str, VERSION))
DATE = "2015-02-06"
|
20tab/twentytab-sortable
|
sortable/__init__.py
|
Python
|
mit
| 80
|
#!/usr/bin/env python3
import sys
import os
import path_utils
import generic_run
def puaq():
print("Usage: %s input_file.flac" % path_utils.basename_filtered(__file__))
sys.exit(1)
def convert_flac_to_mp3(input_file, output_file, bitrate):
cmd = ["ffmpeg", "-i", input_file, "-ab", bitrate, "-map_metadata", "0", "-id3v2_version", "3", output_file]
v, r = generic_run.run_cmd_simple(cmd)
if not v:
print("Failed to convert file from flac to mp3: %s" % r)
if __name__ == "__main__":
if len(sys.argv) < 2:
puaq()
input_file = sys.argv[1]
output_file = ""
if len(sys.argv) == 3:
output_file = sys.argv[2]
else:
output_file = (input_file.rsplit(".", 1)[0]) + ".mp3"
bitrate = "192k"
convert_flac_to_mp3(input_file, output_file, bitrate)
|
mvendra/mvtools
|
audio/flac_to_mp3.py
|
Python
|
mit
| 823
|
"""
Flask-Validictory
-------------
Simple integration between Flask and Validictory.
"""
import os
from setuptools import setup
module_path = os.path.join(os.path.dirname(__file__), 'flask_validictory.py')
version_line = [line for line in open(module_path)
if line.startswith('__version_info__')][0]
__version__ = '.'.join(eval(version_line.split('__version_info__ = ')[-1]))
setup(
name='Flask-Validictory',
version=__version__,
url='https://github.com/inner-loop/flask-validictory/',
license='MIT',
author='Mark Angrish',
author_email='mark.angrish@innerloop.io',
description='Simple integration between Flask and Validictory.',
long_description=__doc__,
py_modules=['flask_validictory'],
zip_safe=False,
include_package_data=True,
platforms='any',
install_requires=[
'Flask>=0.10.1', 'validictory>=0.9.1'
],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
|
inner-loop/flask-validictory
|
setup.py
|
Python
|
mit
| 1,318
|
from .base import ApplicationVersion, package_version # noqa
|
LostProperty/wsgiappversion
|
wsgiappversion/__init__.py
|
Python
|
mit
| 62
|
# @name <%= app_name %>
# @description
# Models for UserControler.
import json
from src.models import BaseModel
class <%= endpoint %>Model(BaseModel):
_parse_class_name = '<%= table %>'
pass
|
nghiattran/generator-python-parse
|
generators/endpoint/templates/model_template.py
|
Python
|
mit
| 201
|
#!/usr/bin/python3
# -*- Coding : UTF-8 -*-
from os import path
import github_update_checker
from setuptools import setup, find_packages
file_path = path.abspath(path.dirname(__file__))
with open(path.join(file_path, "README.md"), encoding="UTF-8") as f:
long_description = f.read()
setup(
name="github_update_checker",
version=github_update_checker.__version__,
description="A simple update checker for github in python",
long_description=long_description,
url="https://github.com/Tellendil/py_github_update_checker",
author="Benjamin Schubert",
author_email="ben.c.schubert@gmail.com",
license="MIT",
classifiers=[
'Development Status :: 5 - Stable',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3'
],
keywords="update",
packages=find_packages()
)
|
BenjaminSchubert/py_github_update_checker
|
setup.py
|
Python
|
mit
| 995
|
"""Forms to render HTML input & validate request data."""
from wtforms import Form, BooleanField, DateTimeField, PasswordField
from wtforms import TextAreaField, TextField
from wtforms.validators import Length, required
class AppointmentForm(Form):
"""Render HTML input for Appointment model & validate submissions.
This matches the models.Appointment class very closely. Where
models.Appointment represents the domain and its persistence, this class
represents how to display a form in HTML & accept/reject the results.
"""
title = TextField('Title', [Length(max=255)])
start = DateTimeField('Start', [required()])
end = DateTimeField('End')
allday = BooleanField('All Day')
location = TextField('Location', [Length(max=255)])
description = TextAreaField('Description')
class LoginForm(Form):
"""Render HTML input for user login form.
Authentication (i.e. password verification) happens in the view function.
"""
username = TextField('Username', [required()])
password = PasswordField('Password', [required()])
|
abacuspix/NFV_project
|
Instant_Flask_Web_Development/sched/forms.py
|
Python
|
mit
| 1,083
|
import RPi.GPIO as GPIO
import time
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(3,GPIO.OUT)
state=True
GPIO.output(3,True)
|
raj808569/homeautomationIOT
|
d.py
|
Python
|
mit
| 139
|
from collections.abc import Mapping, Iterable
from ctypes import c_int, c_int32, c_double, c_char_p, POINTER
from weakref import WeakValueDictionary
import numpy as np
from numpy.ctypeslib import as_array
from openmc.exceptions import AllocationError, InvalidIDError
from . import _dll
from .core import _FortranObjectWithID
from .error import _error_handler
from .material import Material
__all__ = ['Cell', 'cells']
# Cell functions
_dll.openmc_extend_cells.argtypes = [c_int32, POINTER(c_int32), POINTER(c_int32)]
_dll.openmc_extend_cells.restype = c_int
_dll.openmc_extend_cells.errcheck = _error_handler
_dll.openmc_cell_get_id.argtypes = [c_int32, POINTER(c_int32)]
_dll.openmc_cell_get_id.restype = c_int
_dll.openmc_cell_get_id.errcheck = _error_handler
_dll.openmc_cell_get_fill.argtypes = [
c_int32, POINTER(c_int), POINTER(POINTER(c_int32)), POINTER(c_int32)]
_dll.openmc_cell_get_fill.restype = c_int
_dll.openmc_cell_get_fill.errcheck = _error_handler
_dll.openmc_cell_set_fill.argtypes = [
c_int32, c_int, c_int32, POINTER(c_int32)]
_dll.openmc_cell_set_fill.restype = c_int
_dll.openmc_cell_set_fill.errcheck = _error_handler
_dll.openmc_cell_set_id.argtypes = [c_int32, c_int32]
_dll.openmc_cell_set_id.restype = c_int
_dll.openmc_cell_set_id.errcheck = _error_handler
_dll.openmc_cell_set_temperature.argtypes = [
c_int32, c_double, POINTER(c_int32)]
_dll.openmc_cell_set_temperature.restype = c_int
_dll.openmc_cell_set_temperature.errcheck = _error_handler
_dll.openmc_get_cell_index.argtypes = [c_int32, POINTER(c_int32)]
_dll.openmc_get_cell_index.restype = c_int
_dll.openmc_get_cell_index.errcheck = _error_handler
_dll.cells_size.restype = c_int
class Cell(_FortranObjectWithID):
"""Cell stored internally.
This class exposes a cell that is stored internally in the OpenMC
library. To obtain a view of a cell with a given ID, use the
:data:`openmc.capi.cells` mapping.
Parameters
----------
index : int
Index in the `cells` array.
Attributes
----------
id : int
ID of the cell
"""
__instances = WeakValueDictionary()
def __new__(cls, uid=None, new=True, index=None):
mapping = cells
if index is None:
if new:
# Determine ID to assign
if uid is None:
uid = max(mapping, default=0) + 1
else:
if uid in mapping:
raise AllocationError('A cell with ID={} has already '
'been allocated.'.format(uid))
index = c_int32()
_dll.openmc_extend_cells(1, index, None)
index = index.value
else:
index = mapping[uid]._index
if index not in cls.__instances:
instance = super().__new__(cls)
instance._index = index
if uid is not None:
instance.id = uid
cls.__instances[index] = instance
return cls.__instances[index]
@property
def id(self):
cell_id = c_int32()
_dll.openmc_cell_get_id(self._index, cell_id)
return cell_id.value
@id.setter
def id(self, cell_id):
_dll.openmc_cell_set_id(self._index, cell_id)
@property
def fill(self):
fill_type = c_int()
indices = POINTER(c_int32)()
n = c_int32()
_dll.openmc_cell_get_fill(self._index, fill_type, indices, n)
if fill_type.value == 1:
if n.value > 1:
return [Material(index=i) for i in indices[:n.value]]
else:
index = indices[0]
return Material(index=index)
else:
raise NotImplementedError
@fill.setter
def fill(self, fill):
if isinstance(fill, Iterable):
n = len(fill)
indices = (c_int32*n)(*(m._index if m is not None else -1
for m in fill))
_dll.openmc_cell_set_fill(self._index, 1, n, indices)
elif isinstance(fill, Material):
indices = (c_int32*1)(fill._index)
_dll.openmc_cell_set_fill(self._index, 1, 1, indices)
elif fill is None:
indices = (c_int32*1)(-1)
_dll.openmc_cell_set_fill(self._index, 1, 1, indices)
def set_temperature(self, T, instance=None):
"""Set the temperature of a cell
Parameters
----------
T : float
Temperature in K
instance : int or None
Which instance of the cell
"""
_dll.openmc_cell_set_temperature(self._index, T, c_int32(instance))
class _CellMapping(Mapping):
def __getitem__(self, key):
index = c_int32()
try:
_dll.openmc_get_cell_index(key, index)
except (AllocationError, InvalidIDError) as e:
# __contains__ expects a KeyError to work correctly
raise KeyError(str(e))
return Cell(index=index.value)
def __iter__(self):
for i in range(len(self)):
yield Cell(index=i).id
def __len__(self):
return _dll.cells_size()
def __repr__(self):
return repr(dict(self))
cells = _CellMapping()
|
wbinventor/openmc
|
openmc/capi/cell.py
|
Python
|
mit
| 5,272
|
import collections
import yaml
import netCDF4
import numpy as np
from pyresample import geometry
from pyresample import kd_tree
from pyresample import utils
from pyresample import grid
from satistjenesten.utils import get_area_filepath
class GenericScene(object):
""" Generic Scene object
It is a parent class to the more customized satellite scenes
Attributes:
config_dict (dict): configuration dictionary that tells you how to read an input file
config_filepath (str): file path to configuration dictionary
scene_filepath (str): file path to input file
"""
def __init__(self):
self.config_dict = None
self.config_filepath = None
self.scene_filepath = None
def parse_yaml_config(self, config_string):
self.config_dict = yaml.load(config_string)
def load_config_from_file(self):
config_fh = open(self.config_filepath, 'r')
self.parse_yaml_config(config_fh)
class SatScene(GenericScene):
def __init__(self):
super(SatScene, self).__init__()
self.bands = collections.OrderedDict()
self.latitudes = None
self.longitudes = None
self.input_filename = None
self.config_dict = None
def get_bands(self):
self.bands = collections.OrderedDict()
band_dicts = self.config_dict['bands']
for (band_name, band_value) in zip(band_dicts.keys(), band_dicts.values()):
band = SatBand()
nc_dataset = get_netcdf_filehandle(self.input_filename)
band.data = nc_dataset.variables[band_name][:]
band.long_name = band_value['long_name']
self.bands[band_name] = band
def get_coordinates(self):
nc_dataset = get_netcdf_filehandle(self.input_filename)
self.latitudes = nc_dataset.variables[self.config_dict['latitudes_name']][:]
self.longitudes = nc_dataset.variables[self.config_dict['longitudes_name']][:]
def load_scene_from_disk(self):
self.load_config_from_file()
self.get_bands()
self.get_coordinates()
def resample_to_area(self):
gridded_scene = GriddedSatScene()
attributes_list_to_pass = ['bands', 'area_def', 'area_name']
self.get_area_def()
copy_attributes(self, gridded_scene, attributes_list_to_pass)
self.swath_area_def = geometry.SwathDefinition(lons=self.longitudes, lats=self.latitudes)
valid_input_index, valid_output_index, index_array, distance_array = \
kd_tree.get_neighbour_info(self.swath_area_def, self.area_def,
self.area_def.pixel_size_x*2.5, neighbours = 1)
bands_number = len(self.bands)
for i, band in enumerate(self.bands.values()):
print "Resampling band {0:d}/{1:d}".format(i+1, bands_number)
swath_data = band.data.copy()
band.data = kd_tree.get_sample_from_neighbour_info('nn', self.area_def.shape,
swath_data,
valid_input_index,
valid_output_index,
index_array)
gridded_scene.gridded = True
return gridded_scene
def resample_to_gac(self):
bands_number = len(self.bands)
for i, band in enumerate(self.bands.values()):
print "Resampling band {0:d} of {0:d}".format(i+1, bands_number)
lac_data = band.data.copy()
gac_data = rescale_lac_array_to_gac(lac_data)
band.data = gac_data
lac_latitudes = self.latitudes.copy()
lac_longitudes = self.longitudes.copy()
gac_longitudes = rescale_lac_array_to_gac(lac_longitudes)
gac_latitudes = rescale_lac_array_to_gac(lac_latitudes)
self.bands['longitude'] = SatBand()
self.bands['latitude'] = SatBand()
self.bands['longitude'].data = gac_longitudes
self.bands['latitude'].data = gac_latitudes
def get_area_def(self):
self.area_def = get_area_def_from_file(self.area_name)
def write_as_netcdf(self):
output_dataset = netCDF4.Dataset(self.output_filepath, 'w')
# create dimensions
ydim, xdim = self.bands.values()[0].data.shape
output_dataset.createDimension('y', ydim)
output_dataset.createDimension('x', xdim)
output_dataset.createDimension('time', None)
# create variables
bands_number = len(self.bands.keys())
for (band_name, band_object) in self.bands.items():
variable = output_dataset.createVariable(band_name,
band_object.data.dtype, ('y', 'x'))
variable[:] = band_object.data
output_dataset.close()
class SwathSatScene(SatScene):
def __init__(self):
super(SwathSatScene, self).__init__()
self.swath_area_def = None
class GriddedSatScene(SatScene):
def __init__(self):
super(GriddedSatScene, self).__init__()
self.area_name = None
self.area_def = None
self.gridded = False
class SatBand(object):
def __init__(self):
self.data = None
self.long_name = None
self.dtype = None
self.unit = None
self.latitude = None
self.longitude = None
def get_netcdf_filehandle(input_filename):
return netCDF4.Dataset(input_filename, 'r')
def copy_attributes(object_from, object_to, attributes_list):
for attribute_name in attributes_list:
if hasattr(object_from, attribute_name):
the_attribute = getattr(object_from, attribute_name)
setattr(object_to, attribute_name, the_attribute)
def get_area_def_from_file(area_name):
area_filepath = get_area_filepath()
return utils.load_area(area_filepath, area_name)
def window_blocks(large_array, window_size):
"""
Split a large 1D array into smaller non-overlapping arrays
Args:
large_array (numpy.ndarray): 1d array to be split in smaller blocks
window_size (int): window size, array shape should be divisible by this number
Returns:
numpy.ndarray: Resulting array with multiple small blocks of size `window_size`
"""
y_size = large_array.shape[0]/window_size
blocks_array = large_array.reshape(y_size, window_size)
return blocks_array
def rescale_lac_array_to_gac(lac_array):
"""
Create a GAC AVHRR array by averaging 4 consecutive LAC pixels
Take only every forth scan line, omit the rest
Args:
lac_array (numpy.ndtype): array with scan width of 2001 pixels
Returns:
gac_array (numpy.ndtype): array with scan width of 400 pixels
Note:
Original GAC data contains 401 pixels per scanline, for the sake
of simplicity we take only 400 pixels.
"""
window_size = 5
lac_array_with_omitted_lines = lac_array[::4]
lac_array_2000px = lac_array_with_omitted_lines[:,:-1]
flat_lac_array = lac_array_2000px.flatten()
gac_array_flat = np.mean(window_blocks(flat_lac_array, window_size)[:,:-1], axis=1)
gac_length = gac_array_flat.shape[0]
gac_array_2d = gac_array_flat.reshape(gac_length/400, 400)
return gac_array_2d
|
mitkin/avhrr-sic-analysis
|
satistjenesten/data.py
|
Python
|
mit
| 7,349
|
"""kuoteng_bot URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from django.conf import settings
#from telegram_bot.views import _set_webhook
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^bot/', include('telegram_bot.urls')),
url(r'^', include('django_telegrambot.urls')),
]
#_set_webhook()
|
rapirent/toc_project
|
kuoteng_bot/kuoteng_bot/urls.py
|
Python
|
mit
| 972
|