repo_name stringlengths 5 100 | ref stringlengths 12 67 | path stringlengths 4 244 | copies stringlengths 1 8 | content stringlengths 0 1.05M ⌀ |
|---|---|---|---|---|
specter4mjy/you-get | refs/heads/develop | src/you_get/extractors/tudou.py | 2 | #!/usr/bin/env python
__all__ = ['tudou_download', 'tudou_download_playlist', 'tudou_download_by_id', 'tudou_download_by_iid']
from ..common import *
from xml.dom.minidom import parseString
def tudou_download_by_iid(iid, title, output_dir = '.', merge = True, info_only = False):
data = json.loads(get_decoded_html('http://www.tudou.com/outplay/goto/getItemSegs.action?iid=%s' % iid))
temp = max([data[i] for i in data if 'size' in data[i][0]], key=lambda x:sum([part['size'] for part in x]))
vids, size = [t["k"] for t in temp], sum([t["size"] for t in temp])
urls = [[n.firstChild.nodeValue.strip()
for n in
parseString(
get_html('http://ct.v2.tudou.com/f?id=%s' % vid))
.getElementsByTagName('f')][0]
for vid in vids]
ext = r1(r'http://[\w.]*/(\w+)/[\w.]*', urls[0])
print_info(site_info, title, ext, size)
if not info_only:
download_urls(urls, title, ext, size, output_dir=output_dir, merge = merge)
def tudou_download_by_id(id, title, output_dir = '.', merge = True, info_only = False):
html = get_html('http://www.tudou.com/programs/view/%s/' % id)
iid = r1(r'iid\s*[:=]\s*(\S+)', html)
title = r1(r'kw\s*[:=]\s*[\'\"]([^\n]+?)\'\s*\n', html).replace("\\'", "\'")
tudou_download_by_iid(iid, title, output_dir = output_dir, merge = merge, info_only = info_only)
def tudou_download(url, output_dir = '.', merge = True, info_only = False, **kwargs):
# Embedded player
id = r1(r'http://www.tudou.com/v/([^/]+)/', url)
if id:
return tudou_download_by_id(id, title="", info_only=info_only)
html = get_decoded_html(url)
title = r1(r'kw\s*[:=]\s*[\'\"]([^\n]+?)\'\s*\n', html).replace("\\'", "\'")
assert title
title = unescape_html(title)
vcode = r1(r'vcode\s*[:=]\s*\'([^\']+)\'', html)
if vcode:
from .youku import youku_download_by_vid
if 'stream_id' in kwargs:
return youku_download_by_vid(vcode, title=title, output_dir=output_dir, merge=merge, info_only=info_only, stream_id=kwargs['stream_id'])
else:
return youku_download_by_vid(vcode, title=title, output_dir=output_dir, merge=merge, info_only=info_only)
iid = r1(r'iid\s*[:=]\s*(\d+)', html)
if not iid:
return tudou_download_playlist(url, output_dir, merge, info_only)
tudou_download_by_iid(iid, title, output_dir = output_dir, merge = merge, info_only = info_only)
def parse_playlist(url):
aid = r1('http://www.tudou.com/playlist/p/a(\d+)(?:i\d+)?\.html', url)
html = get_decoded_html(url)
if not aid:
aid = r1(r"aid\s*[:=]\s*'(\d+)'", html)
if re.match(r'http://www.tudou.com/albumcover/', url):
atitle = r1(r"title\s*:\s*'([^']+)'", html)
elif re.match(r'http://www.tudou.com/playlist/p/', url):
atitle = r1(r'atitle\s*=\s*"([^"]+)"', html)
else:
raise NotImplementedError(url)
assert aid
assert atitle
import json
#url = 'http://www.tudou.com/playlist/service/getZyAlbumItems.html?aid='+aid
url = 'http://www.tudou.com/playlist/service/getAlbumItems.html?aid='+aid
return [(atitle + '-' + x['title'], str(x['itemId'])) for x in json.loads(get_html(url))['message']]
def tudou_download_playlist(url, output_dir = '.', merge = True, info_only = False):
videos = parse_playlist(url)
for i, (title, id) in enumerate(videos):
print('Processing %s of %s videos...' % (i + 1, len(videos)))
tudou_download_by_iid(id, title, output_dir = output_dir, merge = merge, info_only = info_only)
site_info = "Tudou.com"
download = tudou_download
download_playlist = tudou_download_playlist
|
gengue/django | refs/heads/master | django/contrib/admindocs/urls.py | 574 | from django.conf.urls import url
from django.contrib.admindocs import views
urlpatterns = [
url('^$',
views.BaseAdminDocsView.as_view(template_name='admin_doc/index.html'),
name='django-admindocs-docroot'),
url('^bookmarklets/$',
views.BookmarkletsView.as_view(),
name='django-admindocs-bookmarklets'),
url('^tags/$',
views.TemplateTagIndexView.as_view(),
name='django-admindocs-tags'),
url('^filters/$',
views.TemplateFilterIndexView.as_view(),
name='django-admindocs-filters'),
url('^views/$',
views.ViewIndexView.as_view(),
name='django-admindocs-views-index'),
url('^views/(?P<view>[^/]+)/$',
views.ViewDetailView.as_view(),
name='django-admindocs-views-detail'),
url('^models/$',
views.ModelIndexView.as_view(),
name='django-admindocs-models-index'),
url('^models/(?P<app_label>[^\.]+)\.(?P<model_name>[^/]+)/$',
views.ModelDetailView.as_view(),
name='django-admindocs-models-detail'),
url('^templates/(?P<template>.*)/$',
views.TemplateDetailView.as_view(),
name='django-admindocs-templates'),
]
|
gsathya/blocker | refs/heads/master | centinel/experiments/__init__.py | 1 | __all__ = ['tcp_connect', 'http_request', 'turkey']
|
c0defreak/python-for-android | refs/heads/master | python3-alpha/python3-src/Lib/test/test_urllib2_localnet.py | 51 | #!/usr/bin/env python3
import os
import email
import urllib.parse
import urllib.request
import http.server
import unittest
import hashlib
from test import support
threading = support.import_module('threading')
here = os.path.dirname(__file__)
# Self-signed cert file for 'localhost'
CERT_localhost = os.path.join(here, 'keycert.pem')
# Self-signed cert file for 'fakehostname'
CERT_fakehostname = os.path.join(here, 'keycert2.pem')
# Loopback http server infrastructure
class LoopbackHttpServer(http.server.HTTPServer):
"""HTTP server w/ a few modifications that make it useful for
loopback testing purposes.
"""
def __init__(self, server_address, RequestHandlerClass):
http.server.HTTPServer.__init__(self,
server_address,
RequestHandlerClass)
# Set the timeout of our listening socket really low so
# that we can stop the server easily.
self.socket.settimeout(0.1)
def get_request(self):
"""HTTPServer method, overridden."""
request, client_address = self.socket.accept()
# It's a loopback connection, so setting the timeout
# really low shouldn't affect anything, but should make
# deadlocks less likely to occur.
request.settimeout(10.0)
return (request, client_address)
class LoopbackHttpServerThread(threading.Thread):
"""Stoppable thread that runs a loopback http server."""
def __init__(self, request_handler):
threading.Thread.__init__(self)
self._stop_server = False
self.ready = threading.Event()
request_handler.protocol_version = "HTTP/1.0"
self.httpd = LoopbackHttpServer(("127.0.0.1", 0),
request_handler)
#print "Serving HTTP on %s port %s" % (self.httpd.server_name,
# self.httpd.server_port)
self.port = self.httpd.server_port
def stop(self):
"""Stops the webserver if it's currently running."""
# Set the stop flag.
self._stop_server = True
self.join()
self.httpd.server_close()
def run(self):
self.ready.set()
while not self._stop_server:
self.httpd.handle_request()
# Authentication infrastructure
class DigestAuthHandler:
"""Handler for performing digest authentication."""
def __init__(self):
self._request_num = 0
self._nonces = []
self._users = {}
self._realm_name = "Test Realm"
self._qop = "auth"
def set_qop(self, qop):
self._qop = qop
def set_users(self, users):
assert isinstance(users, dict)
self._users = users
def set_realm(self, realm):
self._realm_name = realm
def _generate_nonce(self):
self._request_num += 1
nonce = hashlib.md5(str(self._request_num).encode("ascii")).hexdigest()
self._nonces.append(nonce)
return nonce
def _create_auth_dict(self, auth_str):
first_space_index = auth_str.find(" ")
auth_str = auth_str[first_space_index+1:]
parts = auth_str.split(",")
auth_dict = {}
for part in parts:
name, value = part.split("=")
name = name.strip()
if value[0] == '"' and value[-1] == '"':
value = value[1:-1]
else:
value = value.strip()
auth_dict[name] = value
return auth_dict
def _validate_auth(self, auth_dict, password, method, uri):
final_dict = {}
final_dict.update(auth_dict)
final_dict["password"] = password
final_dict["method"] = method
final_dict["uri"] = uri
HA1_str = "%(username)s:%(realm)s:%(password)s" % final_dict
HA1 = hashlib.md5(HA1_str.encode("ascii")).hexdigest()
HA2_str = "%(method)s:%(uri)s" % final_dict
HA2 = hashlib.md5(HA2_str.encode("ascii")).hexdigest()
final_dict["HA1"] = HA1
final_dict["HA2"] = HA2
response_str = "%(HA1)s:%(nonce)s:%(nc)s:" \
"%(cnonce)s:%(qop)s:%(HA2)s" % final_dict
response = hashlib.md5(response_str.encode("ascii")).hexdigest()
return response == auth_dict["response"]
def _return_auth_challenge(self, request_handler):
request_handler.send_response(407, "Proxy Authentication Required")
request_handler.send_header("Content-Type", "text/html")
request_handler.send_header(
'Proxy-Authenticate', 'Digest realm="%s", '
'qop="%s",'
'nonce="%s", ' % \
(self._realm_name, self._qop, self._generate_nonce()))
# XXX: Not sure if we're supposed to add this next header or
# not.
#request_handler.send_header('Connection', 'close')
request_handler.end_headers()
request_handler.wfile.write(b"Proxy Authentication Required.")
return False
def handle_request(self, request_handler):
"""Performs digest authentication on the given HTTP request
handler. Returns True if authentication was successful, False
otherwise.
If no users have been set, then digest auth is effectively
disabled and this method will always return True.
"""
if len(self._users) == 0:
return True
if "Proxy-Authorization" not in request_handler.headers:
return self._return_auth_challenge(request_handler)
else:
auth_dict = self._create_auth_dict(
request_handler.headers["Proxy-Authorization"]
)
if auth_dict["username"] in self._users:
password = self._users[ auth_dict["username"] ]
else:
return self._return_auth_challenge(request_handler)
if not auth_dict.get("nonce") in self._nonces:
return self._return_auth_challenge(request_handler)
else:
self._nonces.remove(auth_dict["nonce"])
auth_validated = False
# MSIE uses short_path in its validation, but Python's
# urllib.request uses the full path, so we're going to see if
# either of them works here.
for path in [request_handler.path, request_handler.short_path]:
if self._validate_auth(auth_dict,
password,
request_handler.command,
path):
auth_validated = True
if not auth_validated:
return self._return_auth_challenge(request_handler)
return True
# Proxy test infrastructure
class FakeProxyHandler(http.server.BaseHTTPRequestHandler):
"""This is a 'fake proxy' that makes it look like the entire
internet has gone down due to a sudden zombie invasion. It main
utility is in providing us with authentication support for
testing.
"""
def __init__(self, digest_auth_handler, *args, **kwargs):
# This has to be set before calling our parent's __init__(), which will
# try to call do_GET().
self.digest_auth_handler = digest_auth_handler
http.server.BaseHTTPRequestHandler.__init__(self, *args, **kwargs)
def log_message(self, format, *args):
# Uncomment the next line for debugging.
# sys.stderr.write(format % args)
pass
def do_GET(self):
(scm, netloc, path, params, query, fragment) = urllib.parse.urlparse(
self.path, "http")
self.short_path = path
if self.digest_auth_handler.handle_request(self):
self.send_response(200, "OK")
self.send_header("Content-Type", "text/html")
self.end_headers()
self.wfile.write(bytes("You've reached %s!<BR>" % self.path,
"ascii"))
self.wfile.write(b"Our apologies, but our server is down due to "
b"a sudden zombie invasion.")
# Test cases
class ProxyAuthTests(unittest.TestCase):
URL = "http://localhost"
USER = "tester"
PASSWD = "test123"
REALM = "TestRealm"
def setUp(self):
super(ProxyAuthTests, self).setUp()
self.digest_auth_handler = DigestAuthHandler()
self.digest_auth_handler.set_users({self.USER: self.PASSWD})
self.digest_auth_handler.set_realm(self.REALM)
def create_fake_proxy_handler(*args, **kwargs):
return FakeProxyHandler(self.digest_auth_handler, *args, **kwargs)
self.server = LoopbackHttpServerThread(create_fake_proxy_handler)
self.server.start()
self.server.ready.wait()
proxy_url = "http://127.0.0.1:%d" % self.server.port
handler = urllib.request.ProxyHandler({"http" : proxy_url})
self.proxy_digest_handler = urllib.request.ProxyDigestAuthHandler()
self.opener = urllib.request.build_opener(
handler, self.proxy_digest_handler)
def tearDown(self):
self.server.stop()
super(ProxyAuthTests, self).tearDown()
def test_proxy_with_bad_password_raises_httperror(self):
self.proxy_digest_handler.add_password(self.REALM, self.URL,
self.USER, self.PASSWD+"bad")
self.digest_auth_handler.set_qop("auth")
self.assertRaises(urllib.error.HTTPError,
self.opener.open,
self.URL)
def test_proxy_with_no_password_raises_httperror(self):
self.digest_auth_handler.set_qop("auth")
self.assertRaises(urllib.error.HTTPError,
self.opener.open,
self.URL)
def test_proxy_qop_auth_works(self):
self.proxy_digest_handler.add_password(self.REALM, self.URL,
self.USER, self.PASSWD)
self.digest_auth_handler.set_qop("auth")
result = self.opener.open(self.URL)
while result.read():
pass
result.close()
def test_proxy_qop_auth_int_works_or_throws_urlerror(self):
self.proxy_digest_handler.add_password(self.REALM, self.URL,
self.USER, self.PASSWD)
self.digest_auth_handler.set_qop("auth-int")
try:
result = self.opener.open(self.URL)
except urllib.error.URLError:
# It's okay if we don't support auth-int, but we certainly
# shouldn't receive any kind of exception here other than
# a URLError.
result = None
if result:
while result.read():
pass
result.close()
def GetRequestHandler(responses):
class FakeHTTPRequestHandler(http.server.BaseHTTPRequestHandler):
server_version = "TestHTTP/"
requests = []
headers_received = []
port = 80
def do_GET(self):
body = self.send_head()
while body:
done = self.wfile.write(body)
body = body[done:]
def do_POST(self):
content_length = self.headers["Content-Length"]
post_data = self.rfile.read(int(content_length))
self.do_GET()
self.requests.append(post_data)
def send_head(self):
FakeHTTPRequestHandler.headers_received = self.headers
self.requests.append(self.path)
response_code, headers, body = responses.pop(0)
self.send_response(response_code)
for (header, value) in headers:
self.send_header(header, value % {'port':self.port})
if body:
self.send_header("Content-type", "text/plain")
self.end_headers()
return body
self.end_headers()
def log_message(self, *args):
pass
return FakeHTTPRequestHandler
class TestUrlopen(unittest.TestCase):
"""Tests urllib.request.urlopen using the network.
These tests are not exhaustive. Assuming that testing using files does a
good job overall of some of the basic interface features. There are no
tests exercising the optional 'data' and 'proxies' arguments. No tests
for transparent redirection have been written.
"""
def setUp(self):
super(TestUrlopen, self).setUp()
self.server = None
def tearDown(self):
if self.server is not None:
self.server.stop()
super(TestUrlopen, self).tearDown()
def urlopen(self, url, data=None, **kwargs):
l = []
f = urllib.request.urlopen(url, data, **kwargs)
try:
# Exercise various methods
l.extend(f.readlines(200))
l.append(f.readline())
l.append(f.read(1024))
l.append(f.read())
finally:
f.close()
return b"".join(l)
def start_server(self, responses=None):
if responses is None:
responses = [(200, [], b"we don't care")]
handler = GetRequestHandler(responses)
self.server = LoopbackHttpServerThread(handler)
self.server.start()
self.server.ready.wait()
port = self.server.port
handler.port = port
return handler
def start_https_server(self, responses=None, certfile=CERT_localhost):
if not hasattr(urllib.request, 'HTTPSHandler'):
self.skipTest('ssl support required')
from test.ssl_servers import make_https_server
if responses is None:
responses = [(200, [], b"we care a bit")]
handler = GetRequestHandler(responses)
server = make_https_server(self, certfile=certfile, handler_class=handler)
handler.port = server.port
return handler
def test_redirection(self):
expected_response = b"We got here..."
responses = [
(302, [("Location", "http://localhost:%(port)s/somewhere_else")],
""),
(200, [], expected_response)
]
handler = self.start_server(responses)
data = self.urlopen("http://localhost:%s/" % handler.port)
self.assertEqual(data, expected_response)
self.assertEqual(handler.requests, ["/", "/somewhere_else"])
def test_chunked(self):
expected_response = b"hello world"
chunked_start = (
b'a\r\n'
b'hello worl\r\n'
b'1\r\n'
b'd\r\n'
b'0\r\n'
)
response = [(200, [("Transfer-Encoding", "chunked")], chunked_start)]
handler = self.start_server(response)
data = self.urlopen("http://localhost:%s/" % handler.port)
self.assertEqual(data, expected_response)
def test_404(self):
expected_response = b"Bad bad bad..."
handler = self.start_server([(404, [], expected_response)])
try:
self.urlopen("http://localhost:%s/weeble" % handler.port)
except urllib.error.URLError as f:
data = f.read()
f.close()
else:
self.fail("404 should raise URLError")
self.assertEqual(data, expected_response)
self.assertEqual(handler.requests, ["/weeble"])
def test_200(self):
expected_response = b"pycon 2008..."
handler = self.start_server([(200, [], expected_response)])
data = self.urlopen("http://localhost:%s/bizarre" % handler.port)
self.assertEqual(data, expected_response)
self.assertEqual(handler.requests, ["/bizarre"])
def test_200_with_parameters(self):
expected_response = b"pycon 2008..."
handler = self.start_server([(200, [], expected_response)])
data = self.urlopen("http://localhost:%s/bizarre" % handler.port,
b"get=with_feeling")
self.assertEqual(data, expected_response)
self.assertEqual(handler.requests, ["/bizarre", b"get=with_feeling"])
def test_https(self):
handler = self.start_https_server()
data = self.urlopen("https://localhost:%s/bizarre" % handler.port)
self.assertEqual(data, b"we care a bit")
def test_https_with_cafile(self):
handler = self.start_https_server(certfile=CERT_localhost)
import ssl
# Good cert
data = self.urlopen("https://localhost:%s/bizarre" % handler.port,
cafile=CERT_localhost)
self.assertEqual(data, b"we care a bit")
# Bad cert
with self.assertRaises(urllib.error.URLError) as cm:
self.urlopen("https://localhost:%s/bizarre" % handler.port,
cafile=CERT_fakehostname)
# Good cert, but mismatching hostname
handler = self.start_https_server(certfile=CERT_fakehostname)
with self.assertRaises(ssl.CertificateError) as cm:
self.urlopen("https://localhost:%s/bizarre" % handler.port,
cafile=CERT_fakehostname)
def test_sending_headers(self):
handler = self.start_server()
req = urllib.request.Request("http://localhost:%s/" % handler.port,
headers={"Range": "bytes=20-39"})
urllib.request.urlopen(req)
self.assertEqual(handler.headers_received["Range"], "bytes=20-39")
def test_basic(self):
handler = self.start_server()
open_url = urllib.request.urlopen("http://localhost:%s" % handler.port)
for attr in ("read", "close", "info", "geturl"):
self.assertTrue(hasattr(open_url, attr), "object returned from "
"urlopen lacks the %s attribute" % attr)
try:
self.assertTrue(open_url.read(), "calling 'read' failed")
finally:
open_url.close()
def test_info(self):
handler = self.start_server()
try:
open_url = urllib.request.urlopen(
"http://localhost:%s" % handler.port)
info_obj = open_url.info()
self.assertIsInstance(info_obj, email.message.Message,
"object returned by 'info' is not an "
"instance of email.message.Message")
self.assertEqual(info_obj.get_content_subtype(), "plain")
finally:
self.server.stop()
def test_geturl(self):
# Make sure same URL as opened is returned by geturl.
handler = self.start_server()
open_url = urllib.request.urlopen("http://localhost:%s" % handler.port)
url = open_url.geturl()
self.assertEqual(url, "http://localhost:%s" % handler.port)
def test_bad_address(self):
# Make sure proper exception is raised when connecting to a bogus
# address.
self.assertRaises(IOError,
# Given that both VeriSign and various ISPs have in
# the past or are presently hijacking various invalid
# domain name requests in an attempt to boost traffic
# to their own sites, finding a domain name to use
# for this test is difficult. RFC2606 leads one to
# believe that '.invalid' should work, but experience
# seemed to indicate otherwise. Single character
# TLDs are likely to remain invalid, so this seems to
# be the best choice. The trailing '.' prevents a
# related problem: The normal DNS resolver appends
# the domain names from the search path if there is
# no '.' the end and, and if one of those domains
# implements a '*' rule a result is returned.
# However, none of this will prevent the test from
# failing if the ISP hijacks all invalid domain
# requests. The real solution would be to be able to
# parameterize the framework with a mock resolver.
urllib.request.urlopen,
"http://sadflkjsasf.i.nvali.d./")
def test_iteration(self):
expected_response = b"pycon 2008..."
handler = self.start_server([(200, [], expected_response)])
data = urllib.request.urlopen("http://localhost:%s" % handler.port)
for line in data:
self.assertEqual(line, expected_response)
def test_line_iteration(self):
lines = [b"We\n", b"got\n", b"here\n", b"verylong " * 8192 + b"\n"]
expected_response = b"".join(lines)
handler = self.start_server([(200, [], expected_response)])
data = urllib.request.urlopen("http://localhost:%s" % handler.port)
for index, line in enumerate(data):
self.assertEqual(line, lines[index],
"Fetched line number %s doesn't match expected:\n"
" Expected length was %s, got %s" %
(index, len(lines[index]), len(line)))
self.assertEqual(index + 1, len(lines))
@support.reap_threads
def test_main():
support.run_unittest(ProxyAuthTests, TestUrlopen)
if __name__ == "__main__":
test_main()
|
TNT-Samuel/Coding-Projects | refs/heads/master | DNS Server/Source/Lib/random.py | 5 | """Random variable generators.
integers
--------
uniform within range
sequences
---------
pick random element
pick random sample
pick weighted random sample
generate random permutation
distributions on the real line:
------------------------------
uniform
triangular
normal (Gaussian)
lognormal
negative exponential
gamma
beta
pareto
Weibull
distributions on the circle (angles 0 to 2pi)
---------------------------------------------
circular uniform
von Mises
General notes on the underlying Mersenne Twister core generator:
* The period is 2**19937-1.
* It is one of the most extensively tested generators in existence.
* The random() method is implemented in C, executes in a single Python step,
and is, therefore, threadsafe.
"""
from warnings import warn as _warn
from types import MethodType as _MethodType, BuiltinMethodType as _BuiltinMethodType
from math import log as _log, exp as _exp, pi as _pi, e as _e, ceil as _ceil
from math import sqrt as _sqrt, acos as _acos, cos as _cos, sin as _sin
from os import urandom as _urandom
from _collections_abc import Set as _Set, Sequence as _Sequence
from hashlib import sha512 as _sha512
import itertools as _itertools
import bisect as _bisect
import os as _os
__all__ = ["Random","seed","random","uniform","randint","choice","sample",
"randrange","shuffle","normalvariate","lognormvariate",
"expovariate","vonmisesvariate","gammavariate","triangular",
"gauss","betavariate","paretovariate","weibullvariate",
"getstate","setstate", "getrandbits", "choices",
"SystemRandom"]
NV_MAGICCONST = 4 * _exp(-0.5)/_sqrt(2.0)
TWOPI = 2.0*_pi
LOG4 = _log(4.0)
SG_MAGICCONST = 1.0 + _log(4.5)
BPF = 53 # Number of bits in a float
RECIP_BPF = 2**-BPF
# Translated by Guido van Rossum from C source provided by
# Adrian Baddeley. Adapted by Raymond Hettinger for use with
# the Mersenne Twister and os.urandom() core generators.
import _random
class Random(_random.Random):
"""Random number generator base class used by bound module functions.
Used to instantiate instances of Random to get generators that don't
share state.
Class Random can also be subclassed if you want to use a different basic
generator of your own devising: in that case, override the following
methods: random(), seed(), getstate(), and setstate().
Optionally, implement a getrandbits() method so that randrange()
can cover arbitrarily large ranges.
"""
VERSION = 3 # used by getstate/setstate
def __init__(self, x=None):
"""Initialize an instance.
Optional argument x controls seeding, as for Random.seed().
"""
self.seed(x)
self.gauss_next = None
def seed(self, a=None, version=2):
"""Initialize internal state from hashable object.
None or no argument seeds from current time or from an operating
system specific randomness source if available.
If *a* is an int, all bits are used.
For version 2 (the default), all of the bits are used if *a* is a str,
bytes, or bytearray. For version 1 (provided for reproducing random
sequences from older versions of Python), the algorithm for str and
bytes generates a narrower range of seeds.
"""
if version == 1 and isinstance(a, (str, bytes)):
a = a.decode('latin-1') if isinstance(a, bytes) else a
x = ord(a[0]) << 7 if a else 0
for c in map(ord, a):
x = ((1000003 * x) ^ c) & 0xFFFFFFFFFFFFFFFF
x ^= len(a)
a = -2 if x == -1 else x
if version == 2 and isinstance(a, (str, bytes, bytearray)):
if isinstance(a, str):
a = a.encode()
a += _sha512(a).digest()
a = int.from_bytes(a, 'big')
super().seed(a)
self.gauss_next = None
def getstate(self):
"""Return internal state; can be passed to setstate() later."""
return self.VERSION, super().getstate(), self.gauss_next
def setstate(self, state):
"""Restore internal state from object returned by getstate()."""
version = state[0]
if version == 3:
version, internalstate, self.gauss_next = state
super().setstate(internalstate)
elif version == 2:
version, internalstate, self.gauss_next = state
# In version 2, the state was saved as signed ints, which causes
# inconsistencies between 32/64-bit systems. The state is
# really unsigned 32-bit ints, so we convert negative ints from
# version 2 to positive longs for version 3.
try:
internalstate = tuple(x % (2**32) for x in internalstate)
except ValueError as e:
raise TypeError from e
super().setstate(internalstate)
else:
raise ValueError("state with version %s passed to "
"Random.setstate() of version %s" %
(version, self.VERSION))
## ---- Methods below this point do not need to be overridden when
## ---- subclassing for the purpose of using a different core generator.
## -------------------- pickle support -------------------
# Issue 17489: Since __reduce__ was defined to fix #759889 this is no
# longer called; we leave it here because it has been here since random was
# rewritten back in 2001 and why risk breaking something.
def __getstate__(self): # for pickle
return self.getstate()
def __setstate__(self, state): # for pickle
self.setstate(state)
def __reduce__(self):
return self.__class__, (), self.getstate()
## -------------------- integer methods -------------------
def randrange(self, start, stop=None, step=1, _int=int):
"""Choose a random item from range(start, stop[, step]).
This fixes the problem with randint() which includes the
endpoint; in Python this is usually not what you want.
"""
# This code is a bit messy to make it fast for the
# common case while still doing adequate error checking.
istart = _int(start)
if istart != start:
raise ValueError("non-integer arg 1 for randrange()")
if stop is None:
if istart > 0:
return self._randbelow(istart)
raise ValueError("empty range for randrange()")
# stop argument supplied.
istop = _int(stop)
if istop != stop:
raise ValueError("non-integer stop for randrange()")
width = istop - istart
if step == 1 and width > 0:
return istart + self._randbelow(width)
if step == 1:
raise ValueError("empty range for randrange() (%d,%d, %d)" % (istart, istop, width))
# Non-unit step argument supplied.
istep = _int(step)
if istep != step:
raise ValueError("non-integer step for randrange()")
if istep > 0:
n = (width + istep - 1) // istep
elif istep < 0:
n = (width + istep + 1) // istep
else:
raise ValueError("zero step for randrange()")
if n <= 0:
raise ValueError("empty range for randrange()")
return istart + istep*self._randbelow(n)
def randint(self, a, b):
"""Return random integer in range [a, b], including both end points.
"""
return self.randrange(a, b+1)
def _randbelow(self, n, int=int, maxsize=1<<BPF, type=type,
Method=_MethodType, BuiltinMethod=_BuiltinMethodType):
"Return a random int in the range [0,n). Raises ValueError if n==0."
random = self.random
getrandbits = self.getrandbits
# Only call self.getrandbits if the original random() builtin method
# has not been overridden or if a new getrandbits() was supplied.
if type(random) is BuiltinMethod or type(getrandbits) is Method:
k = n.bit_length() # don't use (n-1) here because n can be 1
r = getrandbits(k) # 0 <= r < 2**k
while r >= n:
r = getrandbits(k)
return r
# There's an overridden random() method but no new getrandbits() method,
# so we can only use random() from here.
if n >= maxsize:
_warn("Underlying random() generator does not supply \n"
"enough bits to choose from a population range this large.\n"
"To remove the range limitation, add a getrandbits() method.")
return int(random() * n)
if n == 0:
raise ValueError("Boundary cannot be zero")
rem = maxsize % n
limit = (maxsize - rem) / maxsize # int(limit * maxsize) % n == 0
r = random()
while r >= limit:
r = random()
return int(r*maxsize) % n
## -------------------- sequence methods -------------------
def choice(self, seq):
"""Choose a random element from a non-empty sequence."""
try:
i = self._randbelow(len(seq))
except ValueError:
raise IndexError('Cannot choose from an empty sequence') from None
return seq[i]
def shuffle(self, x, random=None):
"""Shuffle list x in place, and return None.
Optional argument random is a 0-argument function returning a
random float in [0.0, 1.0); if it is the default None, the
standard random.random will be used.
"""
if random is None:
randbelow = self._randbelow
for i in reversed(range(1, len(x))):
# pick an element in x[:i+1] with which to exchange x[i]
j = randbelow(i+1)
x[i], x[j] = x[j], x[i]
else:
_int = int
for i in reversed(range(1, len(x))):
# pick an element in x[:i+1] with which to exchange x[i]
j = _int(random() * (i+1))
x[i], x[j] = x[j], x[i]
def sample(self, population, k):
"""Chooses k unique random elements from a population sequence or set.
Returns a new list containing elements from the population while
leaving the original population unchanged. The resulting list is
in selection order so that all sub-slices will also be valid random
samples. This allows raffle winners (the sample) to be partitioned
into grand prize and second place winners (the subslices).
Members of the population need not be hashable or unique. If the
population contains repeats, then each occurrence is a possible
selection in the sample.
To choose a sample in a range of integers, use range as an argument.
This is especially fast and space efficient for sampling from a
large population: sample(range(10000000), 60)
"""
# Sampling without replacement entails tracking either potential
# selections (the pool) in a list or previous selections in a set.
# When the number of selections is small compared to the
# population, then tracking selections is efficient, requiring
# only a small set and an occasional reselection. For
# a larger number of selections, the pool tracking method is
# preferred since the list takes less space than the
# set and it doesn't suffer from frequent reselections.
if isinstance(population, _Set):
population = tuple(population)
if not isinstance(population, _Sequence):
raise TypeError("Population must be a sequence or set. For dicts, use list(d).")
randbelow = self._randbelow
n = len(population)
if not 0 <= k <= n:
raise ValueError("Sample larger than population or is negative")
result = [None] * k
setsize = 21 # size of a small set minus size of an empty list
if k > 5:
setsize += 4 ** _ceil(_log(k * 3, 4)) # table size for big sets
if n <= setsize:
# An n-length list is smaller than a k-length set
pool = list(population)
for i in range(k): # invariant: non-selected at [0,n-i)
j = randbelow(n-i)
result[i] = pool[j]
pool[j] = pool[n-i-1] # move non-selected item into vacancy
else:
selected = set()
selected_add = selected.add
for i in range(k):
j = randbelow(n)
while j in selected:
j = randbelow(n)
selected_add(j)
result[i] = population[j]
return result
def choices(self, population, weights=None, *, cum_weights=None, k=1):
"""Return a k sized list of population elements chosen with replacement.
If the relative weights or cumulative weights are not specified,
the selections are made with equal probability.
"""
random = self.random
if cum_weights is None:
if weights is None:
_int = int
total = len(population)
return [population[_int(random() * total)] for i in range(k)]
cum_weights = list(_itertools.accumulate(weights))
elif weights is not None:
raise TypeError('Cannot specify both weights and cumulative weights')
if len(cum_weights) != len(population):
raise ValueError('The number of weights does not match the population')
bisect = _bisect.bisect
total = cum_weights[-1]
return [population[bisect(cum_weights, random() * total)] for i in range(k)]
## -------------------- real-valued distributions -------------------
## -------------------- uniform distribution -------------------
def uniform(self, a, b):
"Get a random number in the range [a, b) or [a, b] depending on rounding."
return a + (b-a) * self.random()
## -------------------- triangular --------------------
def triangular(self, low=0.0, high=1.0, mode=None):
"""Triangular distribution.
Continuous distribution bounded by given lower and upper limits,
and having a given mode value in-between.
http://en.wikipedia.org/wiki/Triangular_distribution
"""
u = self.random()
try:
c = 0.5 if mode is None else (mode - low) / (high - low)
except ZeroDivisionError:
return low
if u > c:
u = 1.0 - u
c = 1.0 - c
low, high = high, low
return low + (high - low) * _sqrt(u * c)
## -------------------- normal distribution --------------------
def normalvariate(self, mu, sigma):
"""Normal distribution.
mu is the mean, and sigma is the standard deviation.
"""
# mu = mean, sigma = standard deviation
# Uses Kinderman and Monahan method. Reference: Kinderman,
# A.J. and Monahan, J.F., "Computer generation of random
# variables using the ratio of uniform deviates", ACM Trans
# Math Software, 3, (1977), pp257-260.
random = self.random
while 1:
u1 = random()
u2 = 1.0 - random()
z = NV_MAGICCONST*(u1-0.5)/u2
zz = z*z/4.0
if zz <= -_log(u2):
break
return mu + z*sigma
## -------------------- lognormal distribution --------------------
def lognormvariate(self, mu, sigma):
"""Log normal distribution.
If you take the natural logarithm of this distribution, you'll get a
normal distribution with mean mu and standard deviation sigma.
mu can have any value, and sigma must be greater than zero.
"""
return _exp(self.normalvariate(mu, sigma))
## -------------------- exponential distribution --------------------
def expovariate(self, lambd):
"""Exponential distribution.
lambd is 1.0 divided by the desired mean. It should be
nonzero. (The parameter would be called "lambda", but that is
a reserved word in Python.) Returned values range from 0 to
positive infinity if lambd is positive, and from negative
infinity to 0 if lambd is negative.
"""
# lambd: rate lambd = 1/mean
# ('lambda' is a Python reserved word)
# we use 1-random() instead of random() to preclude the
# possibility of taking the log of zero.
return -_log(1.0 - self.random())/lambd
## -------------------- von Mises distribution --------------------
def vonmisesvariate(self, mu, kappa):
"""Circular data distribution.
mu is the mean angle, expressed in radians between 0 and 2*pi, and
kappa is the concentration parameter, which must be greater than or
equal to zero. If kappa is equal to zero, this distribution reduces
to a uniform random angle over the range 0 to 2*pi.
"""
# mu: mean angle (in radians between 0 and 2*pi)
# kappa: concentration parameter kappa (>= 0)
# if kappa = 0 generate uniform random angle
# Based upon an algorithm published in: Fisher, N.I.,
# "Statistical Analysis of Circular Data", Cambridge
# University Press, 1993.
# Thanks to Magnus Kessler for a correction to the
# implementation of step 4.
random = self.random
if kappa <= 1e-6:
return TWOPI * random()
s = 0.5 / kappa
r = s + _sqrt(1.0 + s * s)
while 1:
u1 = random()
z = _cos(_pi * u1)
d = z / (r + z)
u2 = random()
if u2 < 1.0 - d * d or u2 <= (1.0 - d) * _exp(d):
break
q = 1.0 / r
f = (q + z) / (1.0 + q * z)
u3 = random()
if u3 > 0.5:
theta = (mu + _acos(f)) % TWOPI
else:
theta = (mu - _acos(f)) % TWOPI
return theta
## -------------------- gamma distribution --------------------
def gammavariate(self, alpha, beta):
"""Gamma distribution. Not the gamma function!
Conditions on the parameters are alpha > 0 and beta > 0.
The probability distribution function is:
x ** (alpha - 1) * math.exp(-x / beta)
pdf(x) = --------------------------------------
math.gamma(alpha) * beta ** alpha
"""
# alpha > 0, beta > 0, mean is alpha*beta, variance is alpha*beta**2
# Warning: a few older sources define the gamma distribution in terms
# of alpha > -1.0
if alpha <= 0.0 or beta <= 0.0:
raise ValueError('gammavariate: alpha and beta must be > 0.0')
random = self.random
if alpha > 1.0:
# Uses R.C.H. Cheng, "The generation of Gamma
# variables with non-integral shape parameters",
# Applied Statistics, (1977), 26, No. 1, p71-74
ainv = _sqrt(2.0 * alpha - 1.0)
bbb = alpha - LOG4
ccc = alpha + ainv
while 1:
u1 = random()
if not 1e-7 < u1 < .9999999:
continue
u2 = 1.0 - random()
v = _log(u1/(1.0-u1))/ainv
x = alpha*_exp(v)
z = u1*u1*u2
r = bbb+ccc*v-x
if r + SG_MAGICCONST - 4.5*z >= 0.0 or r >= _log(z):
return x * beta
elif alpha == 1.0:
# expovariate(1/beta)
u = random()
while u <= 1e-7:
u = random()
return -_log(u) * beta
else: # alpha is between 0 and 1 (exclusive)
# Uses ALGORITHM GS of Statistical Computing - Kennedy & Gentle
while 1:
u = random()
b = (_e + alpha)/_e
p = b*u
if p <= 1.0:
x = p ** (1.0/alpha)
else:
x = -_log((b-p)/alpha)
u1 = random()
if p > 1.0:
if u1 <= x ** (alpha - 1.0):
break
elif u1 <= _exp(-x):
break
return x * beta
## -------------------- Gauss (faster alternative) --------------------
def gauss(self, mu, sigma):
"""Gaussian distribution.
mu is the mean, and sigma is the standard deviation. This is
slightly faster than the normalvariate() function.
Not thread-safe without a lock around calls.
"""
# When x and y are two variables from [0, 1), uniformly
# distributed, then
#
# cos(2*pi*x)*sqrt(-2*log(1-y))
# sin(2*pi*x)*sqrt(-2*log(1-y))
#
# are two *independent* variables with normal distribution
# (mu = 0, sigma = 1).
# (Lambert Meertens)
# (corrected version; bug discovered by Mike Miller, fixed by LM)
# Multithreading note: When two threads call this function
# simultaneously, it is possible that they will receive the
# same return value. The window is very small though. To
# avoid this, you have to use a lock around all calls. (I
# didn't want to slow this down in the serial case by using a
# lock here.)
random = self.random
z = self.gauss_next
self.gauss_next = None
if z is None:
x2pi = random() * TWOPI
g2rad = _sqrt(-2.0 * _log(1.0 - random()))
z = _cos(x2pi) * g2rad
self.gauss_next = _sin(x2pi) * g2rad
return mu + z*sigma
## -------------------- beta --------------------
## See
## http://mail.python.org/pipermail/python-bugs-list/2001-January/003752.html
## for Ivan Frohne's insightful analysis of why the original implementation:
##
## def betavariate(self, alpha, beta):
## # Discrete Event Simulation in C, pp 87-88.
##
## y = self.expovariate(alpha)
## z = self.expovariate(1.0/beta)
## return z/(y+z)
##
## was dead wrong, and how it probably got that way.
def betavariate(self, alpha, beta):
"""Beta distribution.
Conditions on the parameters are alpha > 0 and beta > 0.
Returned values range between 0 and 1.
"""
# This version due to Janne Sinkkonen, and matches all the std
# texts (e.g., Knuth Vol 2 Ed 3 pg 134 "the beta distribution").
y = self.gammavariate(alpha, 1.0)
if y == 0:
return 0.0
else:
return y / (y + self.gammavariate(beta, 1.0))
## -------------------- Pareto --------------------
def paretovariate(self, alpha):
"""Pareto distribution. alpha is the shape parameter."""
# Jain, pg. 495
u = 1.0 - self.random()
return 1.0 / u ** (1.0/alpha)
## -------------------- Weibull --------------------
def weibullvariate(self, alpha, beta):
"""Weibull distribution.
alpha is the scale parameter and beta is the shape parameter.
"""
# Jain, pg. 499; bug fix courtesy Bill Arms
u = 1.0 - self.random()
return alpha * (-_log(u)) ** (1.0/beta)
## --------------- Operating System Random Source ------------------
class SystemRandom(Random):
"""Alternate random number generator using sources provided
by the operating system (such as /dev/urandom on Unix or
CryptGenRandom on Windows).
Not available on all systems (see os.urandom() for details).
"""
def random(self):
"""Get the next random number in the range [0.0, 1.0)."""
return (int.from_bytes(_urandom(7), 'big') >> 3) * RECIP_BPF
def getrandbits(self, k):
"""getrandbits(k) -> x. Generates an int with k random bits."""
if k <= 0:
raise ValueError('number of bits must be greater than zero')
if k != int(k):
raise TypeError('number of bits should be an integer')
numbytes = (k + 7) // 8 # bits / 8 and rounded up
x = int.from_bytes(_urandom(numbytes), 'big')
return x >> (numbytes * 8 - k) # trim excess bits
def seed(self, *args, **kwds):
"Stub method. Not used for a system random number generator."
return None
def _notimplemented(self, *args, **kwds):
"Method should not be called for a system random number generator."
raise NotImplementedError('System entropy source does not have state.')
getstate = setstate = _notimplemented
## -------------------- test program --------------------
def _test_generator(n, func, args):
import time
print(n, 'times', func.__name__)
total = 0.0
sqsum = 0.0
smallest = 1e10
largest = -1e10
t0 = time.time()
for i in range(n):
x = func(*args)
total += x
sqsum = sqsum + x*x
smallest = min(x, smallest)
largest = max(x, largest)
t1 = time.time()
print(round(t1-t0, 3), 'sec,', end=' ')
avg = total/n
stddev = _sqrt(sqsum/n - avg*avg)
print('avg %g, stddev %g, min %g, max %g\n' % \
(avg, stddev, smallest, largest))
def _test(N=2000):
_test_generator(N, random, ())
_test_generator(N, normalvariate, (0.0, 1.0))
_test_generator(N, lognormvariate, (0.0, 1.0))
_test_generator(N, vonmisesvariate, (0.0, 1.0))
_test_generator(N, gammavariate, (0.01, 1.0))
_test_generator(N, gammavariate, (0.1, 1.0))
_test_generator(N, gammavariate, (0.1, 2.0))
_test_generator(N, gammavariate, (0.5, 1.0))
_test_generator(N, gammavariate, (0.9, 1.0))
_test_generator(N, gammavariate, (1.0, 1.0))
_test_generator(N, gammavariate, (2.0, 1.0))
_test_generator(N, gammavariate, (20.0, 1.0))
_test_generator(N, gammavariate, (200.0, 1.0))
_test_generator(N, gauss, (0.0, 1.0))
_test_generator(N, betavariate, (3.0, 3.0))
_test_generator(N, triangular, (0.0, 1.0, 1.0/3.0))
# Create one instance, seeded from current time, and export its methods
# as module-level functions. The functions share state across all uses
#(both in the user's code and in the Python libraries), but that's fine
# for most programs and is easier for the casual user than making them
# instantiate their own Random() instance.
_inst = Random()
seed = _inst.seed
random = _inst.random
uniform = _inst.uniform
triangular = _inst.triangular
randint = _inst.randint
choice = _inst.choice
randrange = _inst.randrange
sample = _inst.sample
shuffle = _inst.shuffle
choices = _inst.choices
normalvariate = _inst.normalvariate
lognormvariate = _inst.lognormvariate
expovariate = _inst.expovariate
vonmisesvariate = _inst.vonmisesvariate
gammavariate = _inst.gammavariate
gauss = _inst.gauss
betavariate = _inst.betavariate
paretovariate = _inst.paretovariate
weibullvariate = _inst.weibullvariate
getstate = _inst.getstate
setstate = _inst.setstate
getrandbits = _inst.getrandbits
if hasattr(_os, "fork"):
_os.register_at_fork(after_in_child=_inst.seed)
if __name__ == '__main__':
_test()
|
atris/gpdb | refs/heads/master | gpMgmt/bin/gppylib/__init__.py | 166 | # $Id: $
# Make sure Python loads the modules of this package via absolute paths.
from os.path import abspath as _abspath
__path__[0] = _abspath(__path__[0])
|
asedunov/intellij-community | refs/heads/master | python/lib/Lib/site-packages/django/db/backends/sqlite3/introspection.py | 88 | import re
from django.db.backends import BaseDatabaseIntrospection
# This light wrapper "fakes" a dictionary interface, because some SQLite data
# types include variables in them -- e.g. "varchar(30)" -- and can't be matched
# as a simple dictionary lookup.
class FlexibleFieldLookupDict(object):
# Maps SQL types to Django Field types. Some of the SQL types have multiple
# entries here because SQLite allows for anything and doesn't normalize the
# field type; it uses whatever was given.
base_data_types_reverse = {
'bool': 'BooleanField',
'boolean': 'BooleanField',
'smallint': 'SmallIntegerField',
'smallint unsigned': 'PositiveSmallIntegerField',
'smallinteger': 'SmallIntegerField',
'int': 'IntegerField',
'integer': 'IntegerField',
'bigint': 'BigIntegerField',
'integer unsigned': 'PositiveIntegerField',
'decimal': 'DecimalField',
'real': 'FloatField',
'text': 'TextField',
'char': 'CharField',
'date': 'DateField',
'datetime': 'DateTimeField',
'time': 'TimeField',
}
def __getitem__(self, key):
key = key.lower()
try:
return self.base_data_types_reverse[key]
except KeyError:
import re
m = re.search(r'^\s*(?:var)?char\s*\(\s*(\d+)\s*\)\s*$', key)
if m:
return ('CharField', {'max_length': int(m.group(1))})
raise KeyError
class DatabaseIntrospection(BaseDatabaseIntrospection):
data_types_reverse = FlexibleFieldLookupDict()
def get_table_list(self, cursor):
"Returns a list of table names in the current database."
# Skip the sqlite_sequence system table used for autoincrement key
# generation.
cursor.execute("""
SELECT name FROM sqlite_master
WHERE type='table' AND NOT name='sqlite_sequence'
ORDER BY name""")
return [row[0] for row in cursor.fetchall()]
def get_table_description(self, cursor, table_name):
"Returns a description of the table, with the DB-API cursor.description interface."
return [(info['name'], info['type'], None, None, None, None,
info['null_ok']) for info in self._table_info(cursor, table_name)]
def get_relations(self, cursor, table_name):
"""
Returns a dictionary of {field_index: (field_index_other_table, other_table)}
representing all relationships to the given table. Indexes are 0-based.
"""
# Dictionary of relations to return
relations = {}
# Schema for this table
cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s AND type = %s", [table_name, "table"])
results = cursor.fetchone()[0].strip()
results = results[results.index('(')+1:results.rindex(')')]
# Walk through and look for references to other tables. SQLite doesn't
# really have enforced references, but since it echoes out the SQL used
# to create the table we can look for REFERENCES statements used there.
for field_index, field_desc in enumerate(results.split(',')):
field_desc = field_desc.strip()
if field_desc.startswith("UNIQUE"):
continue
m = re.search('references (.*) \(["|](.*)["|]\)', field_desc, re.I)
if not m:
continue
table, column = [s.strip('"') for s in m.groups()]
cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s", [table])
result = cursor.fetchone()
if not result:
continue
other_table_results = result[0].strip()
li, ri = other_table_results.index('('), other_table_results.rindex(')')
other_table_results = other_table_results[li+1:ri]
for other_index, other_desc in enumerate(other_table_results.split(',')):
other_desc = other_desc.strip()
if other_desc.startswith('UNIQUE'):
continue
name = other_desc.split(' ', 1)[0].strip('"')
if name == column:
relations[field_index] = (other_index, table)
break
return relations
def get_indexes(self, cursor, table_name):
"""
Returns a dictionary of fieldname -> infodict for the given table,
where each infodict is in the format:
{'primary_key': boolean representing whether it's the primary key,
'unique': boolean representing whether it's a unique index}
"""
indexes = {}
for info in self._table_info(cursor, table_name):
indexes[info['name']] = {'primary_key': info['pk'] != 0,
'unique': False}
cursor.execute('PRAGMA index_list(%s)' % self.connection.ops.quote_name(table_name))
# seq, name, unique
for index, unique in [(field[1], field[2]) for field in cursor.fetchall()]:
if not unique:
continue
cursor.execute('PRAGMA index_info(%s)' % self.connection.ops.quote_name(index))
info = cursor.fetchall()
# Skip indexes across multiple fields
if len(info) != 1:
continue
name = info[0][2] # seqno, cid, name
indexes[name]['unique'] = True
return indexes
def _table_info(self, cursor, name):
cursor.execute('PRAGMA table_info(%s)' % self.connection.ops.quote_name(name))
# cid, name, type, notnull, dflt_value, pk
return [{'name': field[1],
'type': field[2],
'null_ok': not field[3],
'pk': field[5] # undocumented
} for field in cursor.fetchall()]
|
kaidokert/cookiecutter-django | refs/heads/master | {{cookiecutter.repo_name}}/{{cookiecutter.repo_name}}/contrib/__init__.py | 14224 | # -*- coding: utf-8 -*-
|
dnxbjyj/python-basic | refs/heads/master | gui/wxpython/wxPython-demo-4.0.1/samples/dataview/IndexListModel.py | 1 | import sys
import wx
import wx.dataview as dv
#----------------------------------------------------------------------
# This model class provides the data to the view when it is asked for.
# Since it is a list-only model (no hierachical data) then it is able
# to be referenced by row rather than by item object, so in this way
# it is easier to comprehend and use than other model types. In this
# example we also provide a Compare function to assist with sorting of
# items in our model. Notice that the data items in the data model
# object don't ever change position due to a sort or column
# reordering. The view manages all of that and maps view rows and
# columns to the model's rows and columns as needed.
#
# For this example our data is stored in a simple list of lists. In
# real life you can use whatever you want or need to hold your data.
class TestModel(dv.DataViewIndexListModel):
def __init__(self, data, log):
dv.DataViewIndexListModel.__init__(self, len(data))
self.data = data
self.log = log
# This method is called to provide the data object for a
# particular row,col
def GetValueByRow(self, row, col):
return str(self.data[row][col])
# This method is called when the user edits a data item in the view.
def SetValueByRow(self, value, row, col):
self.log.write("SetValue: (%d,%d) %s\n" % (row, col, value))
self.data[row][col] = value
return True
# Report how many columns this model provides data for.
def GetColumnCount(self):
return len(self.data[0])
# Specify the data type for a column
def GetColumnType(self, col):
return "string"
# Report the number of rows in the model
def GetCount(self):
#self.log.write('GetCount')
return len(self.data)
# Called to check if non-standard attributes should be used in the
# cell at (row, col)
def GetAttrByRow(self, row, col, attr):
##self.log.write('GetAttrByRow: (%d, %d)' % (row, col))
if col == 3:
attr.SetColour('blue')
attr.SetBold(True)
return True
return False
# This is called to assist with sorting the data in the view. The
# first two args are instances of the DataViewItem class, so we
# need to convert them to row numbers with the GetRow method.
# Then it's just a matter of fetching the right values from our
# data set and comparing them. The return value is -1, 0, or 1,
# just like Python's cmp() function.
def Compare(self, item1, item2, col, ascending):
if not ascending: # swap sort order?
item2, item1 = item1, item2
row1 = self.GetRow(item1)
row2 = self.GetRow(item2)
if col == 0:
return cmp(int(self.data[row1][col]), int(self.data[row2][col]))
else:
return cmp(self.data[row1][col], self.data[row2][col])
def DeleteRows(self, rows):
# make a copy since we'll be sorting(mutating) the list
rows = list(rows)
# use reverse order so the indexes don't change as we remove items
rows.sort(reverse=True)
for row in rows:
# remove it from our data structure
del self.data[row]
# notify the view(s) using this model that it has been removed
self.RowDeleted(row)
def AddRow(self, value):
# update data structure
self.data.append(value)
# notify views
self.RowAppended()
class TestPanel(wx.Panel):
def __init__(self, parent, log, model=None, data=None):
self.log = log
wx.Panel.__init__(self, parent, -1)
# Create a dataview control
self.dvc = dv.DataViewCtrl(self,
style=wx.BORDER_THEME
| dv.DV_ROW_LINES # nice alternating bg colors
#| dv.DV_HORIZ_RULES
| dv.DV_VERT_RULES
| dv.DV_MULTIPLE
)
# Create an instance of our simple model...
if model is None:
self.model = TestModel(data, log)
else:
self.model = model
# ...and associate it with the dataview control. Models can
# be shared between multiple DataViewCtrls, so this does not
# assign ownership like many things in wx do. There is some
# internal reference counting happening so you don't really
# need to hold a reference to it either, but we do for this
# example so we can fiddle with the model from the widget
# inspector or whatever.
self.dvc.AssociateModel(self.model)
# Now we create some columns. The second parameter is the
# column number within the model that the DataViewColumn will
# fetch the data from. This means that you can have views
# using the same model that show different columns of data, or
# that they can be in a different order than in the model.
self.dvc.AppendTextColumn("Artist", 1, width=170, mode=dv.DATAVIEW_CELL_EDITABLE)
self.dvc.AppendTextColumn("Title", 2, width=260, mode=dv.DATAVIEW_CELL_EDITABLE)
self.dvc.AppendTextColumn("Genre", 3, width=80, mode=dv.DATAVIEW_CELL_EDITABLE)
# There are Prepend methods too, and also convenience methods
# for other data types but we are only using strings in this
# example. You can also create a DataViewColumn object
# yourself and then just use AppendColumn or PrependColumn.
c0 = self.dvc.PrependTextColumn("Id", 0, width=40)
# The DataViewColumn object is returned from the Append and
# Prepend methods, and we can modify some of it's properties
# like this.
c0.Alignment = wx.ALIGN_RIGHT
c0.Renderer.Alignment = wx.ALIGN_RIGHT
c0.MinWidth = 40
# Through the magic of Python we can also access the columns
# as a list via the Columns property. Here we'll mark them
# all as sortable and reorderable.
for c in self.dvc.Columns:
c.Sortable = True
c.Reorderable = True
# Let's change our minds and not let the first col be moved.
c0.Reorderable = False
# set the Sizer property (same as SetSizer)
self.Sizer = wx.BoxSizer(wx.VERTICAL)
self.Sizer.Add(self.dvc, 1, wx.EXPAND)
# Add some buttons to help out with the tests
b1 = wx.Button(self, label="New View", name="newView")
self.Bind(wx.EVT_BUTTON, self.OnNewView, b1)
b2 = wx.Button(self, label="Add Row")
self.Bind(wx.EVT_BUTTON, self.OnAddRow, b2)
b3 = wx.Button(self, label="Delete Row(s)")
self.Bind(wx.EVT_BUTTON, self.OnDeleteRows, b3)
btnbox = wx.BoxSizer(wx.HORIZONTAL)
btnbox.Add(b1, 0, wx.LEFT|wx.RIGHT, 5)
btnbox.Add(b2, 0, wx.LEFT|wx.RIGHT, 5)
btnbox.Add(b3, 0, wx.LEFT|wx.RIGHT, 5)
self.Sizer.Add(btnbox, 0, wx.TOP|wx.BOTTOM, 5)
# Bind some events so we can see what the DVC sends us
self.Bind(dv.EVT_DATAVIEW_ITEM_EDITING_DONE, self.OnEditingDone, self.dvc)
self.Bind(dv.EVT_DATAVIEW_ITEM_VALUE_CHANGED, self.OnValueChanged, self.dvc)
def OnNewView(self, evt):
f = wx.Frame(None, title="New view, shared model", size=(600,400))
TestPanel(f, self.log, self.model)
b = f.FindWindowByName("newView")
b.Disable()
f.Show()
def OnDeleteRows(self, evt):
# Remove the selected row(s) from the model. The model will take care
# of notifying the view (and any other observers) that the change has
# happened.
items = self.dvc.GetSelections()
rows = [self.model.GetRow(item) for item in items]
self.model.DeleteRows(rows)
def OnAddRow(self, evt):
# Add some bogus data to a new row in the model's data
id = len(self.model.data) + 1
value = [str(id),
'new artist %d' % id,
'new title %d' % id,
'genre %d' % id]
self.model.AddRow(value)
def OnEditingDone(self, evt):
self.log.write("OnEditingDone\n")
def OnValueChanged(self, evt):
self.log.write("OnValueChanged\n")
#----------------------------------------------------------------------
def main():
from data import musicdata
app = wx.App()
frm = wx.Frame(None, title="IndexListModel sample", size=(700,500))
pnl = TestPanel(frm, sys.stdout, data=musicdata)
frm.Show()
app.MainLoop()
#----------------------------------------------------------------------
if __name__ == '__main__':
main()
|
gurneyalex/partner-contact | refs/heads/8.0 | partner_contact_nationality/models.py | 22 | # -*- coding: utf-8 -*-
# Odoo, Open Source Management Solution
# Copyright (C) 2014-2015 Grupo ESOC <www.grupoesoc.es>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from openerp import fields, models
class Partner(models.Model):
"""Partner with nationality."""
_inherit = "res.partner"
nationality_id = fields.Many2one("res.country", "Nationality")
|
sherjilozair/dqn | refs/heads/master | plot.py | 1 | import numpy as np
import sys
import re
import subprocess
def get_job_name(jobid):
cmd = "sacct --format=\"JobName%30\" -j {}".format(jobid)
result = subprocess.check_output(cmd, shell=True)
return str(result).split("\\n")[2].strip()
jobids = sys.argv[2:]
exp_name = [get_job_name(jobid) for jobid in jobids]
exp_data = [np.load("{}/{}.npy".format(jobid, jobid)) for jobid in jobids]
N = int(sys.argv[1])
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
exp_data = map(lambda d: np.convolve(d, np.ones((N,))/N, mode='valid'), exp_data)
plt.figure(figsize=(16, 8))
for data in exp_data:
plt.plot(data)
plt.xlabel('#episodes')
plt.ylabel('returns')
plt.legend(exp_name, loc='best')
plotpath = "plot_{}.png".format("_".join(jobids))
print ("Saved to {}".format(plotpath))
plt.savefig(plotpath)
|
canadacoin/test2 | refs/heads/master | contrib/wallettools/walletunlock.py | 2299 | from jsonrpc import ServiceProxy
access = ServiceProxy("http://127.0.0.1:8332")
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60) |
vprime/puuuu | refs/heads/master | env/lib/python2.7/site-packages/paramiko/message.py | 1 | # Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
"""
Implementation of an SSH2 "message".
"""
import struct
import cStringIO
from paramiko import util
class Message (object):
"""
An SSH2 message is a stream of bytes that encodes some combination of
strings, integers, bools, and infinite-precision integers (known in Python
as longs). This class builds or breaks down such a byte stream.
Normally you don't need to deal with anything this low-level, but it's
exposed for people implementing custom extensions, or features that
paramiko doesn't support yet.
"""
def __init__(self, content=None):
"""
Create a new SSH2 message.
:param str content:
the byte stream to use as the message content (passed in only when
decomposing a message).
"""
if content != None:
self.packet = cStringIO.StringIO(content)
else:
self.packet = cStringIO.StringIO()
def __str__(self):
"""
Return the byte stream content of this message, as a string.
"""
return self.packet.getvalue()
def __repr__(self):
"""
Returns a string representation of this object, for debugging.
"""
return 'paramiko.Message(' + repr(self.packet.getvalue()) + ')'
def rewind(self):
"""
Rewind the message to the beginning as if no items had been parsed
out of it yet.
"""
self.packet.seek(0)
def get_remainder(self):
"""
Return the bytes (as a `str`) of this message that haven't already been
parsed and returned.
"""
position = self.packet.tell()
remainder = self.packet.read()
self.packet.seek(position)
return remainder
def get_so_far(self):
"""
Returns the `str` bytes of this message that have been parsed and
returned. The string passed into a message's constructor can be
regenerated by concatenating ``get_so_far`` and `get_remainder`.
"""
position = self.packet.tell()
self.rewind()
return self.packet.read(position)
def get_bytes(self, n):
"""
Return the next ``n`` bytes of the message (as a `str`), without
decomposing into an int, decoded string, etc. Just the raw bytes are
returned. Returns a string of ``n`` zero bytes if there weren't ``n``
bytes remaining in the message.
"""
b = self.packet.read(n)
max_pad_size = 1<<20 # Limit padding to 1 MB
if len(b) < n and n < max_pad_size:
return b + '\x00' * (n - len(b))
return b
def get_byte(self):
"""
Return the next byte of the message, without decomposing it. This
is equivalent to `get_bytes(1) <get_bytes>`.
:return:
the next (`str`) byte of the message, or ``'\000'`` if there aren't
any bytes remaining.
"""
return self.get_bytes(1)
def get_boolean(self):
"""
Fetch a boolean from the stream.
"""
b = self.get_bytes(1)
return b != '\x00'
def get_int(self):
"""
Fetch an int from the stream.
:return: a 32-bit unsigned `int`.
"""
return struct.unpack('>I', self.get_bytes(4))[0]
def get_int64(self):
"""
Fetch a 64-bit int from the stream.
:return: a 64-bit unsigned integer (`long`).
"""
return struct.unpack('>Q', self.get_bytes(8))[0]
def get_mpint(self):
"""
Fetch a long int (mpint) from the stream.
:return: an arbitrary-length integer (`long`).
"""
return util.inflate_long(self.get_string())
def get_string(self):
"""
Fetch a `str` from the stream. This could be a byte string and may
contain unprintable characters. (It's not unheard of for a string to
contain another byte-stream message.)
"""
return self.get_bytes(self.get_int())
def get_list(self):
"""
Fetch a `list` of `strings <str>` from the stream.
These are trivially encoded as comma-separated values in a string.
"""
return self.get_string().split(',')
def add_bytes(self, b):
"""
Write bytes to the stream, without any formatting.
:param str b: bytes to add
"""
self.packet.write(b)
return self
def add_byte(self, b):
"""
Write a single byte to the stream, without any formatting.
:param str b: byte to add
"""
self.packet.write(b)
return self
def add_boolean(self, b):
"""
Add a boolean value to the stream.
:param bool b: boolean value to add
"""
if b:
self.add_byte('\x01')
else:
self.add_byte('\x00')
return self
def add_int(self, n):
"""
Add an integer to the stream.
:param int n: integer to add
"""
self.packet.write(struct.pack('>I', n))
return self
def add_int64(self, n):
"""
Add a 64-bit int to the stream.
:param long n: long int to add
"""
self.packet.write(struct.pack('>Q', n))
return self
def add_mpint(self, z):
"""
Add a long int to the stream, encoded as an infinite-precision
integer. This method only works on positive numbers.
:param long z: long int to add
"""
self.add_string(util.deflate_long(z))
return self
def add_string(self, s):
"""
Add a string to the stream.
:param str s: string to add
"""
self.add_int(len(s))
self.packet.write(s)
return self
def add_list(self, l):
"""
Add a list of strings to the stream. They are encoded identically to
a single string of values separated by commas. (Yes, really, that's
how SSH2 does it.)
:param list l: list of strings to add
"""
self.add_string(','.join(l))
return self
def _add(self, i):
if type(i) is str:
return self.add_string(i)
elif type(i) is int:
return self.add_int(i)
elif type(i) is long:
if i > 0xffffffffL:
return self.add_mpint(i)
else:
return self.add_int(i)
elif type(i) is bool:
return self.add_boolean(i)
elif type(i) is list:
return self.add_list(i)
else:
raise Exception('Unknown type')
def add(self, *seq):
"""
Add a sequence of items to the stream. The values are encoded based
on their type: str, int, bool, list, or long.
.. warning::
Longs are encoded non-deterministically. Don't use this method.
:param seq: the sequence of items
"""
for item in seq:
self._add(item)
|
hack4sec/ws-cli | refs/heads/master | classes/threads/ParamsBruterThread.py | 1 | # -*- coding: utf-8 -*-
"""
This is part of WebScout software
Docs EN: http://hack4sec.pro/wiki/index.php/WebScout_en
Docs RU: http://hack4sec.pro/wiki/index.php/WebScout
License: MIT
Copyright (c) Anton Kuzmin <http://anton-kuzmin.ru> (ru) <http://anton-kuzmin.pro> (en)
Thread class for Dafs modules
"""
import threading
import Queue
import time
import copy
import re
import pprint
from requests.exceptions import ChunkedEncodingError, ConnectionError
from classes.threads.HttpThread import HttpThread
from classes.Registry import Registry
class ParamsBruterThread(HttpThread):
""" Thread class for ParamsBrute modules """
queue = None
method = None
template = None
mask_symbol = None
counter = None
retested_words = None
last_action = 0
ignore_words_re = None
queue_is_empty = False
last_word = ""
def __init__(
self, queue, protocol, host, url, max_params_length, value, method, mask_symbol, not_found_re,
not_found_size, not_found_codes, retest_codes, delay, ignore_words_re,
counter, result):
super(ParamsBruterThread, self).__init__()
self.retested_words = {}
self.queue = queue
self.protocol = protocol.lower()
self.host = host
self.url = url
self.mask_symbol = mask_symbol
self.counter = counter
self.result = result
self.value = value
self.done = False
self.max_params_length = int(max_params_length)
self.ignore_words_re = False if not len(ignore_words_re) else re.compile(ignore_words_re)
self.not_found_re = False if not len(not_found_re) else re.compile(not_found_re)
self.not_found_size = int(not_found_size)
self.method = method.lower()
not_found_codes = not_found_codes.split(',')
not_found_codes.append('404')
self.not_found_codes = list(set(not_found_codes))
self.retest_codes = list(set(retest_codes.split(','))) if len(retest_codes) else []
self.delay = int(delay)
self.retest_delay = int(Registry().get('config')['params_bruter']['retest_delay'])
self.http = copy.deepcopy(Registry().get('http'))
self.logger = Registry().get('logger')
self.retest_limit = int(Registry().get('config')['dafs']['retest_limit'])
def build_params_str(self):
params_str = "" if not len(self.last_word) else "{0}={1}&".format(self.last_word, self.value)
self.last_word = ""
while len(params_str) < self.max_params_length:
try:
word = self.queue.get()
except Queue.Empty:
self.queue_is_empty = True
break
if not len(word.strip()) or (self.ignore_words_re and self.ignore_words_re.findall(word)):
continue
self.counter.up()
params_str += "{0}={1}&".format(word, self.value)
self.last_word = word
return params_str[:-(len(self.last_word) + 3)]
def request_params(self, params):
full_url = self.protocol + "://" + self.host + self.url
return self.http.get(full_url + params) if \
self.method == 'get' else \
self.http.post(full_url, data=params, headers={'Content-Type': 'application/x-www-form-urlencoded'})
def run(self):
""" Run thread """
need_retest = False
while not self.done:
self.last_action = int(time.time())
if self.delay:
time.sleep(self.delay)
try:
if not need_retest:
params_str = self.build_params_str()
try:
resp = self.request_params(params_str)
except ConnectionError:
need_retest = True
self.http.change_proxy()
continue
if self.is_retest_need(params_str, resp):
time.sleep(self.retest_delay)
need_retest = True
continue
positive_item = False
if self.is_response_right(resp):
param_found = False
for one_param in params_str.split("&"):
try:
resp = self.request_params(one_param)
except ConnectionError:
need_retest = True
self.http.change_proxy()
continue
if self.is_response_right(resp):
self.result.append(one_param)
param_found = True
found_item = one_param
if param_found is False:
self.result.append(params_str)
found_item = params_str
positive_item = True
self.log_item(found_item, resp, positive_item)
self.check_positive_limit_stop(self.result)
need_retest = False
if self.queue_is_empty:
self.done = True
break
except ChunkedEncodingError as e:
self.logger.ex(e)
except BaseException as e:
try:
if str(e).count('Cannot connect to proxy'):
need_retest = True
else:
self.logger.ex(e)
except UnicodeDecodeError:
pass
except UnboundLocalError:
self.logger.ex(e)
finally:
pass
|
Benoss/django-cookiecutter | refs/heads/master | tests.py | 1 | from cookiecutter.main import cookiecutter
cookiecutter(".", no_input=True, output_dir="./tmp/")
|
eXcomm/cjdns | refs/heads/master | node_build/dependencies/libuv/build/gyp/pylib/gyp/easy_xml_test.py | 2698 | #!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Unit tests for the easy_xml.py file. """
import gyp.easy_xml as easy_xml
import unittest
import StringIO
class TestSequenceFunctions(unittest.TestCase):
def setUp(self):
self.stderr = StringIO.StringIO()
def test_EasyXml_simple(self):
self.assertEqual(
easy_xml.XmlToString(['test']),
'<?xml version="1.0" encoding="utf-8"?><test/>')
self.assertEqual(
easy_xml.XmlToString(['test'], encoding='Windows-1252'),
'<?xml version="1.0" encoding="Windows-1252"?><test/>')
def test_EasyXml_simple_with_attributes(self):
self.assertEqual(
easy_xml.XmlToString(['test2', {'a': 'value1', 'b': 'value2'}]),
'<?xml version="1.0" encoding="utf-8"?><test2 a="value1" b="value2"/>')
def test_EasyXml_escaping(self):
original = '<test>\'"\r&\nfoo'
converted = '<test>\'"
&
foo'
converted_apos = converted.replace("'", ''')
self.assertEqual(
easy_xml.XmlToString(['test3', {'a': original}, original]),
'<?xml version="1.0" encoding="utf-8"?><test3 a="%s">%s</test3>' %
(converted, converted_apos))
def test_EasyXml_pretty(self):
self.assertEqual(
easy_xml.XmlToString(
['test3',
['GrandParent',
['Parent1',
['Child']
],
['Parent2']
]
],
pretty=True),
'<?xml version="1.0" encoding="utf-8"?>\n'
'<test3>\n'
' <GrandParent>\n'
' <Parent1>\n'
' <Child/>\n'
' </Parent1>\n'
' <Parent2/>\n'
' </GrandParent>\n'
'</test3>\n')
def test_EasyXml_complex(self):
# We want to create:
target = (
'<?xml version="1.0" encoding="utf-8"?>'
'<Project>'
'<PropertyGroup Label="Globals">'
'<ProjectGuid>{D2250C20-3A94-4FB9-AF73-11BC5B73884B}</ProjectGuid>'
'<Keyword>Win32Proj</Keyword>'
'<RootNamespace>automated_ui_tests</RootNamespace>'
'</PropertyGroup>'
'<Import Project="$(VCTargetsPath)\\Microsoft.Cpp.props"/>'
'<PropertyGroup '
'Condition="\'$(Configuration)|$(Platform)\'=='
'\'Debug|Win32\'" Label="Configuration">'
'<ConfigurationType>Application</ConfigurationType>'
'<CharacterSet>Unicode</CharacterSet>'
'</PropertyGroup>'
'</Project>')
xml = easy_xml.XmlToString(
['Project',
['PropertyGroup', {'Label': 'Globals'},
['ProjectGuid', '{D2250C20-3A94-4FB9-AF73-11BC5B73884B}'],
['Keyword', 'Win32Proj'],
['RootNamespace', 'automated_ui_tests']
],
['Import', {'Project': '$(VCTargetsPath)\\Microsoft.Cpp.props'}],
['PropertyGroup',
{'Condition': "'$(Configuration)|$(Platform)'=='Debug|Win32'",
'Label': 'Configuration'},
['ConfigurationType', 'Application'],
['CharacterSet', 'Unicode']
]
])
self.assertEqual(xml, target)
if __name__ == '__main__':
unittest.main()
|
feelobot/compose | refs/heads/master | compose/cli/main.py | 1 | from __future__ import print_function
from __future__ import unicode_literals
from inspect import getdoc
from operator import attrgetter
import logging
import re
import signal
import sys
from docker.errors import APIError
import dockerpty
from .. import __version__
from .. import legacy
from ..const import DEFAULT_TIMEOUT
from ..project import NoSuchService, ConfigurationError
from ..service import BuildError, NeedsBuildError
from ..config import parse_environment
from ..progress_stream import StreamOutputError
from .command import Command
from .docopt_command import NoSuchCommand
from .errors import UserError
from .formatter import Formatter
from .log_printer import LogPrinter
from .utils import yesno, get_version_info
log = logging.getLogger(__name__)
def main():
setup_logging()
try:
command = TopLevelCommand()
command.sys_dispatch()
except KeyboardInterrupt:
log.error("\nAborting.")
sys.exit(1)
except (UserError, NoSuchService, ConfigurationError, legacy.LegacyError) as e:
log.error(e.msg)
sys.exit(1)
except NoSuchCommand as e:
log.error("No such command: %s", e.command)
log.error("")
log.error("\n".join(parse_doc_section("commands:", getdoc(e.supercommand))))
sys.exit(1)
except APIError as e:
log.error(e.explanation)
sys.exit(1)
except BuildError as e:
log.error("Service '%s' failed to build: %s" % (e.service.name, e.reason))
sys.exit(1)
except StreamOutputError as e:
log.error(e)
sys.exit(1)
except NeedsBuildError as e:
log.error("Service '%s' needs to be built, but --no-build was passed." % e.service.name)
sys.exit(1)
def setup_logging():
console_handler = logging.StreamHandler(sys.stderr)
console_handler.setFormatter(logging.Formatter())
console_handler.setLevel(logging.INFO)
root_logger = logging.getLogger()
root_logger.addHandler(console_handler)
root_logger.setLevel(logging.DEBUG)
# Disable requests logging
logging.getLogger("requests").propagate = False
# stolen from docopt master
def parse_doc_section(name, source):
pattern = re.compile('^([^\n]*' + name + '[^\n]*\n?(?:[ \t].*?(?:\n|$))*)',
re.IGNORECASE | re.MULTILINE)
return [s.strip() for s in pattern.findall(source)]
class TopLevelCommand(Command):
"""Define and run multi-container applications with Docker.
Usage:
docker-compose [options] [COMMAND] [ARGS...]
docker-compose -h|--help
Options:
-f, --file FILE Specify an alternate compose file (default: docker-compose.yml)
-p, --project-name NAME Specify an alternate project name (default: directory name)
--verbose Show more output
-v, --version Print version and exit
Commands:
build Build or rebuild services
help Get help on a command
kill Kill containers
logs View output from containers
port Print the public port for a port binding
ps List containers
pull Pulls service images
restart Restart services
rm Remove stopped containers
run Run a one-off command
scale Set number of containers for a service
start Start services
stop Stop services
up Create and start containers
migrate-to-labels Recreate containers to add labels
version Show the Docker-Compose version information
"""
def docopt_options(self):
options = super(TopLevelCommand, self).docopt_options()
options['version'] = get_version_info('compose')
return options
def build(self, project, options):
"""
Build or rebuild services.
Services are built once and then tagged as `project_service`,
e.g. `composetest_db`. If you change a service's `Dockerfile` or the
contents of its build directory, you can run `docker-compose build` to rebuild it.
Usage: build [options] [SERVICE...]
Options:
--no-cache Do not use cache when building the image.
"""
no_cache = bool(options.get('--no-cache', False))
project.build(service_names=options['SERVICE'], no_cache=no_cache)
def help(self, project, options):
"""
Get help on a command.
Usage: help COMMAND
"""
handler = self.get_handler(options['COMMAND'])
raise SystemExit(getdoc(handler))
def kill(self, project, options):
"""
Force stop service containers.
Usage: kill [options] [SERVICE...]
Options:
-s SIGNAL SIGNAL to send to the container.
Default signal is SIGKILL.
"""
signal = options.get('-s', 'SIGKILL')
project.kill(service_names=options['SERVICE'], signal=signal)
def logs(self, project, options):
"""
View output from containers.
Usage: logs [options] [SERVICE...]
Options:
--no-color Produce monochrome output.
"""
containers = project.containers(service_names=options['SERVICE'], stopped=True)
monochrome = options['--no-color']
print("Attaching to", list_containers(containers))
LogPrinter(containers, attach_params={'logs': True}, monochrome=monochrome).run()
def port(self, project, options):
"""
Print the public port for a port binding.
Usage: port [options] SERVICE PRIVATE_PORT
Options:
--protocol=proto tcp or udp [default: tcp]
--index=index index of the container if there are multiple
instances of a service [default: 1]
"""
index = int(options.get('--index'))
service = project.get_service(options['SERVICE'])
try:
container = service.get_container(number=index)
except ValueError as e:
raise UserError(str(e))
print(container.get_local_port(
options['PRIVATE_PORT'],
protocol=options.get('--protocol') or 'tcp') or '')
def ps(self, project, options):
"""
List containers.
Usage: ps [options] [SERVICE...]
Options:
-q Only display IDs
"""
containers = sorted(
project.containers(service_names=options['SERVICE'], stopped=True) +
project.containers(service_names=options['SERVICE'], one_off=True),
key=attrgetter('name'))
if options['-q']:
for container in containers:
print(container.id)
else:
headers = [
'Name',
'Command',
'State',
'Ports',
]
rows = []
for container in containers:
command = container.human_readable_command
if len(command) > 30:
command = '%s ...' % command[:26]
rows.append([
container.name,
command,
container.human_readable_state,
container.human_readable_ports,
])
print(Formatter().table(headers, rows))
def pull(self, project, options):
"""
Pulls images for services.
Usage: pull [options] [SERVICE...]
Options:
--allow-insecure-ssl Allow insecure connections to the docker
registry
"""
insecure_registry = options['--allow-insecure-ssl']
project.pull(
service_names=options['SERVICE'],
insecure_registry=insecure_registry
)
def rm(self, project, options):
"""
Remove stopped service containers.
Usage: rm [options] [SERVICE...]
Options:
-f, --force Don't ask to confirm removal
-v Remove volumes associated with containers
"""
all_containers = project.containers(service_names=options['SERVICE'], stopped=True)
stopped_containers = [c for c in all_containers if not c.is_running]
if len(stopped_containers) > 0:
print("Going to remove", list_containers(stopped_containers))
if options.get('--force') \
or yesno("Are you sure? [yN] ", default=False):
project.remove_stopped(
service_names=options['SERVICE'],
v=options.get('-v', False)
)
else:
print("No stopped containers")
def run(self, project, options):
"""
Run a one-off command on a service.
For example:
$ docker-compose run web python manage.py shell
By default, linked services will be started, unless they are already
running. If you do not want to start linked services, use
`docker-compose run --no-deps SERVICE COMMAND [ARGS...]`.
Usage: run [options] [-e KEY=VAL...] SERVICE [COMMAND] [ARGS...]
Options:
--allow-insecure-ssl Allow insecure connections to the docker
registry
-d Detached mode: Run container in the background, print
new container name.
--entrypoint CMD Override the entrypoint of the image.
-e KEY=VAL Set an environment variable (can be used multiple times)
-u, --user="" Run as specified username or uid
--no-deps Don't start linked services.
--rm Remove container after run. Ignored in detached mode.
--service-ports Run command with the service's ports enabled and mapped
to the host.
-T Disable pseudo-tty allocation. By default `docker-compose run`
allocates a TTY.
"""
service = project.get_service(options['SERVICE'])
insecure_registry = options['--allow-insecure-ssl']
if not options['--no-deps']:
deps = service.get_linked_names()
if len(deps) > 0:
project.up(
service_names=deps,
start_deps=True,
allow_recreate=False,
insecure_registry=insecure_registry,
)
tty = True
if options['-d'] or options['-T'] or not sys.stdin.isatty():
tty = False
if options['COMMAND']:
command = [options['COMMAND']] + options['ARGS']
else:
command = service.options.get('command')
container_options = {
'command': command,
'tty': tty,
'stdin_open': not options['-d'],
'detach': options['-d'],
}
if options['-e']:
container_options['environment'] = parse_environment(options['-e'])
if options['--entrypoint']:
container_options['entrypoint'] = options.get('--entrypoint')
if options['--rm']:
container_options['restart'] = None
if options['--user']:
container_options['user'] = options.get('--user')
if not options['--service-ports']:
container_options['ports'] = []
try:
container = service.create_container(
quiet=True,
one_off=True,
insecure_registry=insecure_registry,
**container_options
)
except APIError as e:
legacy.check_for_legacy_containers(
project.client,
project.name,
[service.name],
allow_one_off=False,
)
raise e
if options['-d']:
service.start_container(container)
print(container.name)
else:
dockerpty.start(project.client, container.id, interactive=not options['-T'])
exit_code = container.wait()
if options['--rm']:
project.client.remove_container(container.id)
sys.exit(exit_code)
def scale(self, project, options):
"""
Set number of containers to run for a service.
Numbers are specified in the form `service=num` as arguments.
For example:
$ docker-compose scale web=2 worker=3
Usage: scale [options] [SERVICE=NUM...]
Options:
-t, --timeout TIMEOUT Specify a shutdown timeout in seconds.
(default: 10)
"""
timeout = int(options.get('--timeout') or DEFAULT_TIMEOUT)
for s in options['SERVICE=NUM']:
if '=' not in s:
raise UserError('Arguments to scale should be in the form service=num')
service_name, num = s.split('=', 1)
try:
num = int(num)
except ValueError:
raise UserError('Number of containers for service "%s" is not a '
'number' % service_name)
project.get_service(service_name).scale(num, timeout=timeout)
def start(self, project, options):
"""
Start existing containers.
Usage: start [SERVICE...]
"""
project.start(service_names=options['SERVICE'])
def stop(self, project, options):
"""
Stop running containers without removing them.
They can be started again with `docker-compose start`.
Usage: stop [options] [SERVICE...]
Options:
-t, --timeout TIMEOUT Specify a shutdown timeout in seconds.
(default: 10)
"""
timeout = int(options.get('--timeout') or DEFAULT_TIMEOUT)
project.stop(service_names=options['SERVICE'], timeout=timeout)
def restart(self, project, options):
"""
Restart running containers.
Usage: restart [options] [SERVICE...]
Options:
-t, --timeout TIMEOUT Specify a shutdown timeout in seconds.
(default: 10)
"""
timeout = int(options.get('--timeout') or DEFAULT_TIMEOUT)
project.restart(service_names=options['SERVICE'], timeout=timeout)
def up(self, project, options):
"""
Builds, (re)creates, starts, and attaches to containers for a service.
Unless they are already running, this command also starts any linked services.
The `docker-compose up` command aggregates the output of each container. When
the command exits, all containers are stopped. Running `docker-compose up -d`
starts the containers in the background and leaves them running.
If there are existing containers for a service, and the service's configuration
or image was changed after the container's creation, `docker-compose up` picks
up the changes by stopping and recreating the containers (preserving mounted
volumes). To prevent Compose from picking up changes, use the `--no-recreate`
flag.
If you want to force Compose to stop and recreate all containers, use the
`--force-recreate` flag.
Usage: up [options] [SERVICE...]
Options:
--allow-insecure-ssl Allow insecure connections to the docker
registry
-d Detached mode: Run containers in the background,
print new container names.
--no-color Produce monochrome output.
--no-deps Don't start linked services.
--force-recreate Recreate containers even if their configuration and
image haven't changed. Incompatible with --no-recreate.
--no-recreate If containers already exist, don't recreate them.
Incompatible with --force-recreate.
--no-build Don't build an image, even if it's missing
-t, --timeout TIMEOUT Use this timeout in seconds for container shutdown
when attached or when containers are already
running. (default: 10)
"""
insecure_registry = options['--allow-insecure-ssl']
detached = options['-d']
monochrome = options['--no-color']
start_deps = not options['--no-deps']
allow_recreate = not options['--no-recreate']
force_recreate = options['--force-recreate']
service_names = options['SERVICE']
timeout = int(options.get('--timeout') or DEFAULT_TIMEOUT)
if force_recreate and not allow_recreate:
raise UserError("--force-recreate and --no-recreate cannot be combined.")
to_attach = project.up(
service_names=service_names,
start_deps=start_deps,
allow_recreate=allow_recreate,
force_recreate=force_recreate,
insecure_registry=insecure_registry,
do_build=not options['--no-build'],
timeout=timeout
)
if not detached:
print("Attaching to", list_containers(to_attach))
log_printer = LogPrinter(to_attach, attach_params={"logs": True}, monochrome=monochrome)
try:
log_printer.run()
finally:
def handler(signal, frame):
project.kill(service_names=service_names)
sys.exit(0)
signal.signal(signal.SIGINT, handler)
print("Gracefully stopping... (press Ctrl+C again to force)")
project.stop(service_names=service_names, timeout=timeout)
def migrate_to_labels(self, project, _options):
"""
Recreate containers to add labels
If you're coming from Compose 1.2 or earlier, you'll need to remove or
migrate your existing containers after upgrading Compose. This is
because, as of version 1.3, Compose uses Docker labels to keep track
of containers, and so they need to be recreated with labels added.
If Compose detects containers that were created without labels, it
will refuse to run so that you don't end up with two sets of them. If
you want to keep using your existing containers (for example, because
they have data volumes you want to preserve) you can migrate them with
the following command:
docker-compose migrate-to-labels
Alternatively, if you're not worried about keeping them, you can
remove them - Compose will just create new ones.
docker rm -f myapp_web_1 myapp_db_1 ...
Usage: migrate-to-labels
"""
legacy.migrate_project_to_labels(project)
def version(self, project, options):
"""
Show version informations
Usage: version [--short]
Options:
--short Shows only Compose's version number.
"""
if options['--short']:
print(__version__)
else:
print(get_version_info('full'))
def list_containers(containers):
return ", ".join(c.name for c in containers)
|
sunny414/tryton-client | refs/heads/master | tryton/gui/window/view_form/view/form.py | 1 | #This file is part of Tryton. The COPYRIGHT file at the top level of
#this repository contains the full copyright notices and license terms.
import operator
from functools import reduce
import gtk
import gettext
from interface import ParserView
_ = gettext.gettext
class ViewForm(ParserView):
def __init__(self, screen, widget, children=None, state_widgets=None,
notebooks=None, cursor_widget='', children_field=None):
super(ViewForm, self).__init__(screen, widget, children, state_widgets,
notebooks, cursor_widget, children_field)
self.view_type = 'form'
for widget in self.state_widgets:
if isinstance(widget, gtk.Button):
widget.connect('clicked', self.button_clicked)
# Force to display the first time it switches on a page
# This avoids glitch in position of widgets
display_done = {}
for notebook in notebooks:
def switch(notebook, page, page_num):
if page_num not in display_done.setdefault(notebook, []):
notebook.grab_focus()
display_done[notebook].append(page_num)
self.display()
notebook.connect('switch-page', switch)
self.widgets = children
for widgets in self.widgets.itervalues():
for widget in widgets:
widget.view = self
vbox = gtk.VBox()
vp = gtk.Viewport()
vp.set_shadow_type(gtk.SHADOW_NONE)
vp.add(self.widget)
scroll = gtk.ScrolledWindow()
scroll.add(vp)
scroll.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
scroll.set_placement(gtk.CORNER_TOP_LEFT)
viewport = gtk.Viewport()
viewport.set_shadow_type(gtk.SHADOW_ETCHED_IN)
viewport.add(scroll)
vbox.pack_start(viewport, expand=True, fill=True)
self.widget = vbox
def __getitem__(self, name):
return self.widgets[name][0]
def destroy(self):
for widget_name in self.widgets.keys():
for widget in self.widgets[widget_name]:
widget.destroy()
del self.widgets[widget_name]
self.widget.destroy()
self.widget = None
self.widgets = None
self.screen = None
self.state_widgets = None
def cancel(self):
for widgets in self.widgets.itervalues():
for widget in widgets:
widget.cancel()
def set_value(self, focused_widget=False):
record = self.screen.current_record
if record:
for name, widgets in self.widgets.iteritems():
if name in record.group.fields:
field = record.group.fields[name]
for widget in widgets:
if (not focused_widget
or widget.widget.is_focus()
or (isinstance(widget.widget, gtk.Container)
and widget.widget.get_focus_child())):
widget.set_value(record, field)
def sel_ids_get(self):
if self.screen.current_record:
return [self.screen.current_record.id]
return []
def selected_records(self):
if self.screen.current_record:
return [self.screen.current_record]
return []
@property
def modified(self):
return any(w.modified for widgets in self.widgets.itervalues()
for w in widgets)
def reset(self):
record = self.screen.current_record
if record:
for name, widgets in self.widgets.iteritems():
field = record.group.fields.get(name)
if field and 'valid' in field.get_state_attrs(record):
for widget in widgets:
field.get_state_attrs(record)['valid'] = True
widget.display(record, field)
def display(self):
record = self.screen.current_record
if record:
# Force to set fields in record
# Get first the lazy one to reduce number of requests
fields = [(name, field.attrs.get('loading', 'eager'))
for name, field in record.group.fields.iteritems()]
fields.sort(key=operator.itemgetter(1), reverse=True)
for field, _ in fields:
record[field].get(record)
for name, widgets in self.widgets.iteritems():
field = None
if record:
field = record.group.fields.get(name)
if field:
field.state_set(record)
for widget in widgets:
widget.display(record, field)
for widget in self.state_widgets:
widget.state_set(record)
return True
def set_cursor(self, new=False, reset_view=True):
if reset_view:
for notebook in self.notebooks:
notebook.set_current_page(0)
if self.cursor_widget in self.widgets:
self.widgets[self.cursor_widget][0].grab_focus()
elif not self.widget.has_focus():
self.widgets[self.cursor_widget][0].grab_focus()
record = self.screen.current_record
position = reduce(lambda x, y: x + len(y), self.widgets, 0)
focus_widget = None
if record:
for name, widgets in self.widgets.iteritems():
for widget in widgets:
field = record.group.fields.get(name)
if not field:
continue
if not field.get_state_attrs(record).get('valid', True):
if widget.position > position:
continue
position = widget.position
focus_widget = widget
if focus_widget:
for notebook in self.notebooks:
for i in range(notebook.get_n_pages()):
child = notebook.get_nth_page(i)
if focus_widget.widget.is_ancestor(child):
notebook.set_current_page(i)
focus_widget.grab_focus()
def button_clicked(self, widget):
record = self.screen.current_record
fields = self.get_fields()
if not record.validate(fields):
self.screen.display()
return
else:
widget.set_sensitive(False)
try:
self.screen.button(widget.attrs)
finally:
widget.set_sensitive(True)
|
pescobar/easybuild-easyblocks | refs/heads/master | easybuild/easyblocks/c/chapel.py | 1 | ##
# This file is an EasyBuild reciPY as per https://github.com/easybuilders/easybuild
#
# Copyright:: Copyright 2012-2019 Uni.Lu/LCSB, NTUA
# Authors:: Fotis Georgatos <fotis@cern.ch>, Kenneth Hoste
# License:: MIT/GPL
# $Id$
#
# This work implements a part of the HPCBIOS project and is a component of the policy:
# http://hpcbios.readthedocs.org/en/latest/
##
"""
EasyBuild support for Chapel, implemented as an easyblock
@author: Fotis Georgatos (Uni.Lu)
@author: Kenneth Hoste (Ghent University)
"""
import os
import shutil
from easybuild.easyblocks.generic.configuremake import ConfigureMake
class EB_Chapel(ConfigureMake):
"""Support for building Chapel."""
def __init__(self, *args, **kwargs):
"""Initialize Chapel-specific variables."""
super(EB_Chapel, self).__init__(*args, **kwargs)
self.build_in_installdir = True
def configure_step(self):
"""No configure step for Chapel."""
pass
# building is done via make, so taken care of by ConfigureMake easyblock
def install_step(self):
"""Installation of Chapel has already been done as part of the build procedure"""
pass
def sanity_check_step(self):
"""Custom sanity check for Chapel."""
libpath = os.path.join('lib', 'linux64', 'gnu', 'comm-none', 'substrate-none', 'seg-none',
'mem-default', 'tasks-fifo', 'threads-pthreads', 'atomics-intrinsics')
custom_paths = {
'files': ['bin/linux64/chpl', 'bin/linux64/chpldoc',
os.path.join(libpath, 'libchpl.a'), os.path.join(libpath, 'main.o')],
'dirs': []
}
super(EB_Chapel, self).sanity_check_step(custom_paths=custom_paths)
def make_module_req_guess(self):
"""
A dictionary of possible directories to look for; this is needed since bin/linux64 of chapel is non standard
"""
return {
'PATH': ['bin', 'bin/linux64', 'bin64'],
'LD_LIBRARY_PATH': ['lib', 'lib/linux64', 'lib64'],
}
|
danielquinn/django_polymorphic | refs/heads/master | example/pexp/management/commands/pcmd.py | 4 | # -*- coding: utf-8 -*-
"""
This module is a scratchpad for general development, testing & debugging.
"""
from django.core.management.base import NoArgsCommand
from django.db.models import connection
from pprint import pprint
from pexp.models import *
def reset_queries():
connection.queries=[]
def show_queries():
print; print 'QUERIES:',len(connection.queries); pprint(connection.queries); print; connection.queries=[]
class Command(NoArgsCommand):
help = ""
def handle_noargs(self, **options):
Project.objects.all().delete()
a=Project.objects.create(topic="John's gathering")
b=ArtProject.objects.create(topic="Sculpting with Tim", artist="T. Turner")
c=ResearchProject.objects.create(topic="Swallow Aerodynamics", supervisor="Dr. Winter")
print Project.objects.all()
print
ModelA.objects.all().delete()
a=ModelA.objects.create(field1='A1')
b=ModelB.objects.create(field1='B1', field2='B2')
c=ModelC.objects.create(field1='C1', field2='C2', field3='C3')
print ModelA.objects.all()
print
|
Mistobaan/tensorflow | refs/heads/master | tensorflow/contrib/meta_graph_transform/__init__.py | 85 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility for applying the Graph Transform tool to a MetaGraphDef."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.meta_graph_transform import meta_graph_transform
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = ['meta_graph_transform']
remove_undocumented(__name__, allowed_exception_list=_allowed_symbols)
|
nagyistoce/edx-platform | refs/heads/master | common/djangoapps/student/tests/test_long_username_email.py | 123 | # -*- coding: utf-8 -*-
import json
from django.test import TestCase
from django.core.urlresolvers import reverse
class TestLongUsernameEmail(TestCase):
def setUp(self):
super(TestLongUsernameEmail, self).setUp()
self.url = reverse('create_account')
self.url_params = {
'username': 'username',
'email': 'foo_bar' + '@bar.com',
'name': 'foo bar',
'password': '123',
'terms_of_service': 'true',
'honor_code': 'true',
}
def test_long_username(self):
"""
Test username cannot be more than 30 characters long.
"""
self.url_params['username'] = 'username' * 4
response = self.client.post(self.url, self.url_params)
# Status code should be 400.
self.assertEqual(response.status_code, 400)
obj = json.loads(response.content)
self.assertEqual(
obj['value'],
"Username cannot be more than 30 characters long",
)
def test_long_email(self):
"""
Test email cannot be more than 75 characters long.
"""
self.url_params['email'] = '{0}@bar.com'.format('foo_bar' * 15)
response = self.client.post(self.url, self.url_params)
# Status code should be 400.
self.assertEqual(response.status_code, 400)
obj = json.loads(response.content)
self.assertEqual(
obj['value'],
"Email cannot be more than 75 characters long",
)
|
HPPTECH/hpp_IOSTressTest | refs/heads/master | Resources/pyOCD-0.5.1/pyOCD/interface/__init__.py | 9 | """
mbed CMSIS-DAP debugger
Copyright (c) 2006-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import logging
from hidapi_backend import HidApiUSB
from pyusb_backend import PyUSB
from pywinusb_backend import PyWinUSB
INTERFACE = {
'hidapiusb': HidApiUSB,
'pyusb': PyUSB,
'pywinusb': PyWinUSB
}
# Allow user to override backend with an environment variable.
usb_backend = os.getenv('PYOCD_USB_BACKEND', "")
# Check validity of backend env var.
if usb_backend and ((usb_backend not in INTERFACE.keys()) or (not INTERFACE[usb_backend].isAvailable)):
logging.error("Invalid USB backend specified in PYOCD_USB_BACKEND: " + usb_backend)
usb_backend = ""
# Select backend based on OS and availability.
if not usb_backend:
if os.name == "nt":
# Prefer hidapi over pyWinUSB for Windows, since pyWinUSB has known bug(s)
if HidApiUSB.isAvailable:
usb_backend = "hidapiusb"
elif PyWinUSB.isAvailable:
usb_backend = "pywinusb"
elif os.name == "posix":
# Select hidapi for OS X and pyUSB for Linux.
if os.uname()[0] == 'Darwin':
usb_backend = "hidapiusb"
else:
usb_backend = "pyusb"
|
intel-hpdd/intel-manager-for-lustre | refs/heads/master | tests/feature/cli/features/steps/helpers.py | 1 | from nose.tools import *
from behave import *
def fail(msg=None):
raise AssertionError(msg)
@given("the {entity} count should be {count}")
@then("the {entity} count should be {count}")
def step(context, entity, count):
from chroma_core.models import ManagedHost, ManagedFilesystem, ManagedTarget, ManagedOst, ManagedMgs, ManagedMdt
entity_map = {
"server": ManagedHost,
"filesystem": ManagedFilesystem,
"target": ManagedTarget,
"mgt": ManagedMgs,
"mdt": ManagedMdt,
"ost": ManagedOst,
}
eq_(entity_map[entity].objects.count(), int(count))
@given("there should be {count} lines of output")
@then("there should be {count} lines of output")
def step(context, count):
context.stdout.seek(0)
# NB: We're hard-coding -1 lines for the header -- this may not
# be robust enough for the future.
output_line_count = len(context.stdout.readlines()) - 1
eq_(output_line_count, int(count))
@given("the {entity} {field} on {subject} should be {value}")
@then("the {entity} {field} on {subject} should be {value}")
def step(context, entity, field, subject, value):
entity_map = {"server": "host"}
from chroma_cli.api import ApiHandle
ah = ApiHandle()
try:
endpoint = entity_map[entity]
except KeyError:
endpoint = entity
resource = ah.endpoints[endpoint].show(subject)
import re
match = re.match("^the same as (\w+)$", value)
if match:
value = resource.all_attributes[match.group(1)]
eq_(resource.all_attributes[field], value)
@given("the config parameter {key} should be set to {value}")
@then("the config parameter {key} should be set to {value}")
def step(context, key, value):
from chroma_cli.defaults import defaults
if value == "the default":
eq_(getattr(context.cli_config, key), defaults[key])
elif value == "True":
eq_(getattr(context.cli_config, key), True)
else:
eq_(getattr(context.cli_config, key), value)
@given("the {testkey} host contact test should {result}")
@then("the {testkey} host contact test should {result}")
def step(context, testkey, result):
value = {"fail": False, "succeed": True}[result]
kwargs = {testkey: value}
# This business of reaching into context._runner.hooks is necessitated
# by the lack of a good place to put these things. Sigh.
context._runner.hooks["patch_test_host_contact_task"](context, kwargs)
context.cli_failure_expected = not value
@given("the boot_time on {hostname} has been recorded")
def step(context, hostname):
from chroma_cli.api import ApiHandle
ah = ApiHandle()
resource = ah.endpoints["host"].show(hostname)
context.server_boot_time = resource.all_attributes["boot_time"]
@then("the boot_time on {hostname} should reflect a reboot")
def step(context, hostname):
from chroma_cli.api import ApiHandle
ah = ApiHandle()
resource = ah.endpoints["host"].show(hostname)
context.test_case.assertGreater(resource.all_attributes["boot_time"], context.server_boot_time)
|
fabianvaccaro/pygums | refs/heads/master | pythonLibs/mahotas-1.1.0/mahotas/euler.py | 2 | # Copyright (C) 2008-2010, Luis Pedro Coelho <luis@luispedro.org>
# vim: set ts=4 sts=4 sw=4 expandtab smartindent:
# License: MIT
import numpy as np
from .convolve import convolve
_euler_lookup4 = np.array([
0, 1, 1, 0,
1, 0, 2, -1,
1, 2, 0, -1,
0, -1, -1, 0,
])/4.
_euler_lookup8 = np.array([
0, 1, 1, 0,
1, 0, -2, -1,
1, -2, 0, -1,
0, -1, -1, 0,
])/4.
_powers = np.array([
[1, 2],
[4, 8]
])
__all__ = ['euler']
def euler(f, n=8, mode='constant'):
'''
euler_nr = euler(f, n=8)
Compute the Euler number of image f
The Euler number is also known as the Euler characteristic given that many
other mathematical objects are also known as Euler numbers.
Parameters
----------
f : ndarray
A 2-D binary image
n : int, optional
Connectivity, one of (4,8). default: 8
mode : {'reflect', 'nearest', 'wrap', 'mirror', 'constant' [default]}
How to handle borders
Returns
-------
euler_nr : int
Euler number
References
----------
http://en.wikipedia.org/wiki/Euler_characteristic
References
----------
The following algorithm is used:
*A Fast Algorithm for Computing the Euler Number of an Image and its VLSI
Implementation*, doi: 10.1109/ICVD.2000.812628
'''
if n == 8:
lookup = _euler_lookup8
elif n == 4:
lookup = _euler_lookup4
else:
raise ValueError('mahotas.euler: Connectivity must be 4 or 8')
if f.dtype is not np.bool:
assert np.all( (f == 0) | (f == 1)), 'mahotas.euler: Non-binary image'
f = (f != 0)
value = convolve(f.astype(_powers.dtype), _powers, mode=mode)
return lookup[value].sum()
|
jmlb23/python | refs/heads/master | python3.*/tiposFunc.py | 1 | #!/usr/bin/python3
# -*- coding: UTF-8 -*-
#funcions con tipado estatico
#curioso porque non da un typeerror
#moi importante xa que as clases modulos e funcions si que crean un novo
#ambito o ambito que estamos acostumados a ver delimitado por chaves en c
#php c++ java c# non existe aqui, os if for while else e demais estruturas
#de control de fluxo non crean un novo ambito
#http://stackoverflow.com/questions/5331047/why-does-python-let-me-define-a-variable-in-one-scope-but-use-it-in-another
#https://docs.python.org/2/tutorial/classes.html#python-scopes-and-namespaces
#en python non temos declaracion so asignacion
val= "asdads"
def func(val:int) -> int:
return val
print(func(2.3))
|
revanthkolli/osf.io | refs/heads/develop | website/addons/figshare/views/hgrid.py | 3 | # -*- coding: utf-8 -*-
from website.util import rubeus
from ..api import Figshare
def figshare_hgrid_data(node_settings, auth, parent=None, **kwargs):
node = node_settings.owner
if node_settings.figshare_type == 'project':
item = Figshare.from_settings(node_settings.user_settings).project(node_settings, node_settings.figshare_id)
else:
item = Figshare.from_settings(node_settings.user_settings).article(node_settings, node_settings.figshare_id)
if not node_settings.figshare_id or not node_settings.has_auth or not item:
return
#TODO Test me
#Throw error if neither
node_settings.figshare_title = item.get('title') or item['items'][0]['title']
node_settings.save()
return [
rubeus.build_addon_root(
node_settings, u'{0}:{1}'.format(node_settings.figshare_title or "Unnamed {0}".format(node_settings.figshare_type or ''), node_settings.figshare_id), permissions=auth,
nodeUrl=node.url, nodeApiUrl=node.api_url,
)
]
|
mscuthbert/abjad | refs/heads/master | abjad/tools/rhythmtreetools/test/test_rhythmtreetools_RhythmTreeContainer_insert.py | 2 | # -*- encoding: utf-8 -*-
from abjad import *
def test_rhythmtreetools_RhythmTreeContainer_insert_01():
leaf_a = rhythmtreetools.RhythmTreeLeaf(preprolated_duration=3)
leaf_b = rhythmtreetools.RhythmTreeLeaf(preprolated_duration=3)
leaf_c = rhythmtreetools.RhythmTreeLeaf(preprolated_duration=2)
container = rhythmtreetools.RhythmTreeContainer()
assert container.children == ()
container.insert(0, leaf_a)
assert container.children == (leaf_a,)
container.insert(0, leaf_b)
assert container.children == (leaf_b, leaf_a)
container.insert(1, leaf_c)
assert container.children == (leaf_b, leaf_c, leaf_a) |
tomasreimers/tensorflow-emscripten | refs/heads/master | tensorflow/contrib/bayesflow/python/ops/stochastic_tensor.py | 9 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classes and helper functions for creating Stochastic Tensors.
`StochasticTensor` objects wrap `Distribution` objects. Their
values may be samples from the underlying distribution, or the distribution
mean (as governed by `value_type`). These objects provide a `loss`
method for use when sampling from a non-reparameterized distribution.
The `loss`method is used in conjunction with `stochastic_graph.surrogate_loss`
to produce a single differentiable loss in stochastic graphs having
both continuous and discrete stochastic nodes.
## Stochastic Tensor Classes
@@BaseStochasticTensor
@@StochasticTensor
## Stochastic Tensor Value Types
@@MeanValue
@@SampleValue
@@value_type
@@get_current_value_type
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import contextlib
import threading
import six
from tensorflow.contrib.bayesflow.python.ops import stochastic_gradient_estimators as sge
from tensorflow.contrib.distributions.python.ops import distribution
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
STOCHASTIC_TENSOR_COLLECTION = "_stochastic_tensor_collection_"
@six.add_metaclass(abc.ABCMeta)
class BaseStochasticTensor(object):
"""Base Class for Tensor-like objects that emit stochastic values."""
def __init__(self):
# Add self to this graph's Stochsatic Tensor collection for
# purposes of later performing correct surrogate loss calculation.
ops.add_to_collection(STOCHASTIC_TENSOR_COLLECTION, self)
@abc.abstractproperty
def name(self):
pass
@abc.abstractproperty
def dtype(self):
pass
@abc.abstractproperty
def graph(self):
pass
@abc.abstractmethod
def value(self, name=None):
pass
@abc.abstractmethod
def loss(self, sample_loss):
"""Returns the term to add to the surrogate loss.
This method is called by `surrogate_loss`. The input `sample_loss` should
have already had `stop_gradient` applied to it. This is because the
surrogate_loss usually provides a Monte Carlo sample term of the form
`differentiable_surrogate * sample_loss` where `sample_loss` is considered
constant with respect to the input for purposes of the gradient.
Args:
sample_loss: `Tensor`, sample loss downstream of this `StochasticTensor`.
Returns:
Either `None` or a `Tensor`.
"""
raise NotImplementedError("surrogate_loss not implemented")
@staticmethod
def _tensor_conversion_function(v, dtype=None, name=None, as_ref=False):
_ = name
if dtype and not dtype.is_compatible_with(v.dtype):
raise ValueError(
"Incompatible type conversion requested to type '%s' for variable "
"of type '%s'" % (dtype.name, v.dtype.name))
if as_ref:
raise ValueError("%s: Ref type is not supported." % v)
return v.value()
# pylint: disable=protected-access
ops.register_tensor_conversion_function(
BaseStochasticTensor, BaseStochasticTensor._tensor_conversion_function)
# pylint: enable=protected-access
class _StochasticValueType(object):
"""Interface for the ValueType classes.
This is the base class for MeanValue, SampleValue, and their descendants.
"""
def pushed_above(self, unused_value_type):
pass
def popped_above(self, unused_value_type):
pass
def declare_inputs(self, unused_stochastic_tensor, unused_inputs_dict):
pass
@abc.abstractproperty
def stop_gradient(self):
"""Whether the value should be wrapped in stop_gradient.
StochasticTensors must respect this property.
"""
pass
class MeanValue(_StochasticValueType):
def __init__(self, stop_gradient=False):
self._stop_gradient = stop_gradient
@property
def stop_gradient(self):
return self._stop_gradient
class SampleValue(_StochasticValueType):
"""Draw samples, possibly adding new outer dimensions along the way.
This ValueType draws samples from StochasticTensors run within its
context, increasing the rank according to the requested shape.
Examples:
```python
mu = tf.zeros((2,3))
sigma = tf.ones((2, 3))
with sg.value_type(sg.SampleValue()):
st = sg.StochasticTensor(
tf.contrib.distributions.Normal, mu=mu, sigma=sigma)
# draws 1 sample and does not reshape
assertEqual(st.value().get_shape(), (2, 3))
```
```python
mu = tf.zeros((2,3))
sigma = tf.ones((2, 3))
with sg.value_type(sg.SampleValue(4)):
st = sg.StochasticTensor(
tf.contrib.distributions.Normal, mu=mu, sigma=sigma)
# draws 4 samples each with shape (2, 3) and concatenates
assertEqual(st.value().get_shape(), (4, 2, 3))
```
"""
def __init__(self, shape=(), stop_gradient=False):
"""Sample according to shape.
For the given StochasticTensor `st` using this value type,
the shape of `st.value()` will match that of
`st.distribution.sample(shape)`.
Args:
shape: A shape tuple or int32 tensor. The sample shape.
Default is a scalar: take one sample and do not change the size.
stop_gradient: If `True`, StochasticTensors' values are wrapped in
`stop_gradient`, to avoid backpropagation through.
"""
self._shape = shape
self._stop_gradient = stop_gradient
@property
def shape(self):
return self._shape
@property
def stop_gradient(self):
return self._stop_gradient
# Keeps track of how a StochasticTensor's value should be accessed.
# Used by value_type and get_current_value_type below.
_STOCHASTIC_VALUE_STACK = collections.defaultdict(list)
@contextlib.contextmanager
def value_type(dist_value_type):
"""Creates a value type context for any StochasticTensor created within.
Typical usage:
```
with sg.value_type(sg.MeanValue(stop_gradients=True)):
st = sg.StochasticTensor(tf.contrib.distributions.Normal, mu=mu,
sigma=sigma)
```
In the example above, `st.value()` (or equivalently, `tf.identity(st)`) will
be the mean value of the Normal distribution, i.e., `mu` (possibly
broadcasted to the shape of `sigma`). Furthermore, because the `MeanValue`
was marked with `stop_gradients=True`, this value will have been wrapped
in a `stop_gradients` call to disable any possible backpropagation.
Args:
dist_value_type: An instance of `MeanValue`, `SampleValue`, or
any other stochastic value type.
Yields:
A context for `StochasticTensor` objects that controls the
value created when they are initialized.
Raises:
TypeError: if `dist_value_type` is not an instance of a stochastic value
type.
"""
if not isinstance(dist_value_type, _StochasticValueType):
raise TypeError("dist_value_type must be a Distribution Value Type")
thread_id = threading.current_thread().ident
stack = _STOCHASTIC_VALUE_STACK[thread_id]
if stack:
stack[-1].pushed_above(dist_value_type)
stack.append(dist_value_type)
yield
stack.pop()
if stack:
stack[-1].popped_above(dist_value_type)
class NoValueTypeSetError(ValueError):
pass
def get_current_value_type():
thread_id = threading.current_thread().ident
if not _STOCHASTIC_VALUE_STACK[thread_id]:
raise NoValueTypeSetError(
"No value type currently set for this thread (%s). Did you forget to "
"wrap 'with stochastic_graph.value_type(...)'?" % thread_id)
return _STOCHASTIC_VALUE_STACK[thread_id][-1]
class StochasticTensor(BaseStochasticTensor):
"""StochasticTensor is a BaseStochasticTensor backed by a distribution."""
def __init__(self,
dist,
name="StochasticTensor",
dist_value_type=None,
loss_fn=sge.score_function):
"""Construct a `StochasticTensor`.
`StochasticTensor` is backed by the `dist` distribution and its `value`
method will return the same value each time it is called. What `value` is
returned is controlled by the `dist_value_type` (defaults to
`SampleValue`).
Some distributions' sample functions are not differentiable (e.g. a sample
from a discrete distribution like a Bernoulli) and so to differentiate
wrt parameters upstream of the sample requires a gradient estimator like
the score function estimator. This is accomplished by passing a
differentiable `loss_fn` to the `StochasticTensor`, which
defaults to a function whose derivative is the score function estimator.
Calling `stochastic_graph.surrogate_loss(final_losses)` will call
`loss()` on every `StochasticTensor` upstream of final losses.
`loss()` will return None for `StochasticTensor`s backed by
reparameterized distributions; it will also return None if the value type is
`MeanValueType` or if `loss_fn=None`.
Args:
dist: an instance of `Distribution`.
name: a name for this `StochasticTensor` and its ops.
dist_value_type: a `_StochasticValueType`, which will determine what the
`value` of this `StochasticTensor` will be. If not provided, the
value type set with the `value_type` context manager will be used.
loss_fn: callable that takes
`(st, st.value(), influenced_loss)`, where
`st` is this `StochasticTensor`, and returns a `Tensor` loss. By
default, `loss_fn` is the `score_function`, or more precisely, the
integral of the score function, such that when the gradient is taken,
the score function results. See the `stochastic_gradient_estimators`
module for additional loss functions and baselines.
Raises:
TypeError: if `dist` is not an instance of `Distribution`.
TypeError: if `loss_fn` is not `callable`.
"""
if not isinstance(dist, distribution.Distribution):
raise TypeError("dist must be an instance of Distribution")
if dist_value_type is None:
try:
self._value_type = get_current_value_type()
except NoValueTypeSetError:
self._value_type = SampleValue()
else:
# We want to enforce a value type here, but use the value_type()
# context manager to enforce some error checking.
with value_type(dist_value_type):
self._value_type = get_current_value_type()
if loss_fn is not None and not callable(loss_fn):
raise TypeError("loss_fn must be callable")
self._loss_fn = loss_fn
with ops.name_scope(name) as scope:
self._name = scope
self._dist = dist
self._value = self._create_value()
super(StochasticTensor, self).__init__()
@property
def value_type(self):
return self._value_type
@property
def distribution(self):
return self._dist
def _create_value(self):
"""Create the value Tensor based on the value type, store as self._value."""
if isinstance(self._value_type, MeanValue):
value_tensor = self._dist.mean()
elif isinstance(self._value_type, SampleValue):
value_tensor = self._dist.sample(self._value_type.shape)
else:
raise TypeError("Unrecognized Distribution Value Type: %s",
self._value_type)
if self._value_type.stop_gradient:
# stop_gradient is being enforced by the value type
return array_ops.stop_gradient(value_tensor)
if isinstance(self._value_type, MeanValue):
return value_tensor # Using pathwise-derivative for this one.
if self._dist.is_continuous and self._dist.is_reparameterized:
return value_tensor # Using pathwise-derivative for this one.
else:
# Will have to perform some variant of score function
# estimation. Call stop_gradient on the sampler just in case we
# may accidentally leak some gradient from it.
return array_ops.stop_gradient(value_tensor)
@property
def name(self):
return self._name
@property
def graph(self):
return self._value.graph
@property
def dtype(self):
return self._dist.dtype
def entropy(self, name="entropy"):
return self._dist.entropy(name=name)
def mean(self, name="mean"):
return self._dist.mean(name=name)
def value(self, name="value"):
return self._value
def loss(self, final_loss, name="Loss"):
# Return a loss based on final_loss and the distribution. Returns
# None if pathwise derivatives are supported, if the loss_fn
# was explicitly set to None, or if the value type is MeanValue.
if self._loss_fn is None:
return None
if (self._dist.is_continuous and self._dist.is_reparameterized and
not self._value_type.stop_gradient):
# Can perform pathwise-derivative on this one; no additional loss needed.
return None
with ops.name_scope(self.name, values=[final_loss]):
with ops.name_scope(name):
if (self._value_type.stop_gradient or
isinstance(self._value_type, SampleValue)):
return self._loss_fn(self, self._value, final_loss)
elif isinstance(self._value_type, MeanValue):
return None # MeanValue generally provides its own gradient
else:
raise TypeError("Unrecognized Distribution Value Type: %s",
self._value_type)
class ObservedStochasticTensor(StochasticTensor):
"""A StochasticTensor with an observed value."""
# pylint: disable=super-init-not-called
def __init__(self, dist, value, name=None):
"""Construct an `ObservedStochasticTensor`.
`ObservedStochasticTensor` is backed by distribution `dist` and uses the
provided value instead of using the current value type to draw a value from
the distribution. The provided value argument must be appropriately shaped
to have come from the distribution.
Args:
dist: an instance of `Distribution`.
value: a Tensor containing the observed value
name: a name for this `ObservedStochasticTensor` and its ops.
Raises:
TypeError: if `dist` is not an instance of `Distribution`.
ValueError: if `value` is not compatible with the distribution.
"""
if not isinstance(dist, distribution.Distribution):
raise TypeError("dist must be an instance of Distribution")
with ops.name_scope(name, "ObservedStochasticTensor", [value]) as scope:
self._name = scope
self._dist = dist
dist_shape = self._dist.get_batch_shape().concatenate(
self._dist.get_event_shape())
value = ops.convert_to_tensor(value)
value_shape = value.get_shape()
if not value_shape.is_compatible_with(dist_shape):
if value_shape.ndims < dist_shape.ndims:
raise ValueError(
"Rank of observed value (%d) must be >= rank of a sample from the"
" distribution (%d)." % (value_shape.ndims, dist_shape.ndims))
sample_shape = value_shape[(value_shape.ndims - dist_shape.ndims):]
if not sample_shape.is_compatible_with(dist_shape):
raise ValueError(
"Shape of observed value %s is incompatible with the shape of a "
"sample from the distribution %s." % (value_shape, dist_shape))
if value.dtype != self._dist.dtype:
raise ValueError("Type of observed value (%s) does not match type of "
"distribution (%s)." % (value.dtype, self._dist.dtype))
self._value = array_ops.identity(value)
# pylint: disable=non-parent-init-called
BaseStochasticTensor.__init__(self)
def loss(self, final_loss, name=None):
return None
__all__ = [
"BaseStochasticTensor",
"StochasticTensor",
"ObservedStochasticTensor",
"MeanValue",
"SampleValue",
"value_type",
"get_current_value_type",
]
|
maxvyaznikov/django-params | refs/heads/master | django_params/models.py | 1 | # -*- coding: utf-8 -*-
from datetime import date, datetime
from django.db import models
from django.conf import settings
from django.utils import timezone
from gettext import gettext as _
DEFAULT_DATE_FORMAT = getattr(settings, 'DJANGO_PARAMS_DATE_FORMAT', '%d.%m.%Y')
DEFAULT_DATETIME_FORMAT = getattr(settings, 'DJANGO_PARAMS_DATETIME_FORMAT', '%d.%m.%Y %H:%M:%S')
class Param(models.Model):
TYPE_TEXT = 't'
TYPE_DATE = 'd'
TYPE_DATETIME = 'dt'
TYPE_INT = 'i'
TYPE_CHOICES = (
(TYPE_TEXT, 'Text'),
(TYPE_DATE, 'Date'),
(TYPE_DATETIME, 'DateTime'),
(TYPE_INT, 'Integer'),
)
name = models.CharField(choices=settings.DJANGO_PARAMS_NAME_CHOICES, unique=True, max_length=255)
type = models.CharField(choices=TYPE_CHOICES, default=TYPE_TEXT, max_length=10)
value = models.TextField(blank=True, null=False, default='')
def __unicode__(self):
return self.get_name_display()
def __str__(self):
return self.__unicode__()
class Meta:
app_label = 'django_params'
verbose_name = _('param')
verbose_name_plural = _('params')
def get_value(self):
return Param.str2val(type=self.type, value=self.value)[1]
def set_value(self, value):
self.value = Param.val2str(type=self.type, value=value)[1]
self.save()
@staticmethod
def val2str(type, value):
if type == Param.TYPE_DATE:
value = datetime.strftime(value, DEFAULT_DATE_FORMAT)
elif type == Param.TYPE_DATETIME:
value = datetime.strftime(value, DEFAULT_DATETIME_FORMAT)
elif type == Param.TYPE_INT:
value = str(value)
return type, value
@staticmethod
def str2val(type, value):
if type == Param.TYPE_DATE:
if value:
value = date.fromordinal(datetime.strptime(value, DEFAULT_DATE_FORMAT).toordinal())
else:
value = None
elif type == Param.TYPE_DATETIME:
if value:
value = datetime.strptime(value, DEFAULT_DATETIME_FORMAT)
value = timezone.make_aware(value, timezone.get_default_timezone())
else:
value = None
elif type == Param.TYPE_INT:
value = int(value)
return type, value
@staticmethod
def get(request, name):
if not hasattr(request, '_django_params_cache'):
request._django_params_cache = {p.name: p for p in Param.objects.all()}
return request._django_params_cache[name] if name in request._django_params_cache else None
@staticmethod
def get_one(name, create_if_nothing_with=None):
"""
Function to get single param by a single query
:param name: is a name of parameter
:param create_if_nothing_with: if you want to create param with default value
if it doesn't exist on the time of your call, specify this param as tuple
of (<type>, <value>) to create it. For example:
>> Param.get_one(settings.PARAMS_COPYRIGHT)
None
>> Param.get_one(settings.PARAMS_COPYRIGHT, (Param.TYPE_TEXT, '(c)'))
'(c)'
>> Param.get_one(settings.PARAMS_COPYRIGHT)
'(c)'
:return:
"""
try:
return Param.objects.get(name=name)
except Param.DoesNotExist:
if create_if_nothing_with is None:
return None
else:
type, val = Param.val2str(*create_if_nothing_with)
return Param.objects.create(name=name, type=type, value=val)
|
IssamLaradji/scikit-learn | refs/heads/master | examples/cluster/plot_agglomerative_clustering.py | 26 | """
Agglomerative clustering with and without structure
===================================================
This example shows the effect of imposing a connectivity graph to capture
local structure in the data. The graph is simply the graph of 20 nearest
neighbors.
Two consequences of imposing a connectivity can be seen. First clustering
with a connectivity matrix is much faster.
Second, when using a connectivity matrix, average and complete linkage are
unstable and tend to create a few clusters that grow very quickly. Indeed,
average and complete linkage fight this percolation behavior by considering all
the distances between two clusters when merging them. The connectivity
graph breaks this mechanism. This effect is more pronounced for very
sparse graphs (try decreasing the number of neighbors in
kneighbors_graph) and with complete linkage. In particular, having a very
small number of neighbors in the graph, imposes a geometry that is
close to that of single linkage, which is well known to have this
percolation instability.
"""
# Authors: Gael Varoquaux, Nelle Varoquaux
# License: BSD 3 clause
import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import AgglomerativeClustering
from sklearn.neighbors import kneighbors_graph
# Generate sample data
n_samples = 1500
np.random.seed(0)
t = 1.5 * np.pi * (1 + 3 * np.random.rand(1, n_samples))
x = t * np.cos(t)
y = t * np.sin(t)
X = np.concatenate((x, y))
X += .7 * np.random.randn(2, n_samples)
X = X.T
# Create a graph capturing local connectivity. Larger number of neighbors
# will give more homogeneous clusters to the cost of computation
# time. A very large number of neighbors gives more evenly distributed
# cluster sizes, but may not impose the local manifold structure of
# the data
knn_graph = kneighbors_graph(X, 30)
for connectivity in (None, knn_graph):
for n_clusters in (30, 3):
plt.figure(figsize=(10, 4))
for index, linkage in enumerate(('average', 'complete', 'ward')):
plt.subplot(1, 3, index + 1)
model = AgglomerativeClustering(linkage=linkage,
connectivity=connectivity,
n_clusters=n_clusters)
t0 = time.time()
model.fit(X)
elapsed_time = time.time() - t0
plt.scatter(X[:, 0], X[:, 1], c=model.labels_,
cmap=plt.cm.spectral)
plt.title('linkage=%s (time %.2fs)' % (linkage, elapsed_time),
fontdict=dict(verticalalignment='top'))
plt.axis('equal')
plt.axis('off')
plt.subplots_adjust(bottom=0, top=.89, wspace=0,
left=0, right=1)
plt.suptitle('n_cluster=%i, connectivity=%r' %
(n_clusters, connectivity is not None), size=17)
plt.show()
|
KellyChan/Python | refs/heads/master | python/gists/comma.py | 3 | s = "A bird in the hand..."
# Add your for loop
for word in s:
if word == 'A' or word == 'a':
print 'X',
else:
print word, # , for printing in a line, not create a new line
|
dcroc16/skunk_works | refs/heads/master | google_appengine/lib/jinja2-2.6/jinja2/ext.py | 114 | # -*- coding: utf-8 -*-
"""
jinja2.ext
~~~~~~~~~~
Jinja extensions allow to add custom tags similar to the way django custom
tags work. By default two example extensions exist: an i18n and a cache
extension.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD.
"""
from collections import deque
from jinja2 import nodes
from jinja2.defaults import *
from jinja2.environment import Environment
from jinja2.runtime import Undefined, concat
from jinja2.exceptions import TemplateAssertionError, TemplateSyntaxError
from jinja2.utils import contextfunction, import_string, Markup, next
# the only real useful gettext functions for a Jinja template. Note
# that ugettext must be assigned to gettext as Jinja doesn't support
# non unicode strings.
GETTEXT_FUNCTIONS = ('_', 'gettext', 'ngettext')
class ExtensionRegistry(type):
"""Gives the extension an unique identifier."""
def __new__(cls, name, bases, d):
rv = type.__new__(cls, name, bases, d)
rv.identifier = rv.__module__ + '.' + rv.__name__
return rv
class Extension(object):
"""Extensions can be used to add extra functionality to the Jinja template
system at the parser level. Custom extensions are bound to an environment
but may not store environment specific data on `self`. The reason for
this is that an extension can be bound to another environment (for
overlays) by creating a copy and reassigning the `environment` attribute.
As extensions are created by the environment they cannot accept any
arguments for configuration. One may want to work around that by using
a factory function, but that is not possible as extensions are identified
by their import name. The correct way to configure the extension is
storing the configuration values on the environment. Because this way the
environment ends up acting as central configuration storage the
attributes may clash which is why extensions have to ensure that the names
they choose for configuration are not too generic. ``prefix`` for example
is a terrible name, ``fragment_cache_prefix`` on the other hand is a good
name as includes the name of the extension (fragment cache).
"""
__metaclass__ = ExtensionRegistry
#: if this extension parses this is the list of tags it's listening to.
tags = set()
#: the priority of that extension. This is especially useful for
#: extensions that preprocess values. A lower value means higher
#: priority.
#:
#: .. versionadded:: 2.4
priority = 100
def __init__(self, environment):
self.environment = environment
def bind(self, environment):
"""Create a copy of this extension bound to another environment."""
rv = object.__new__(self.__class__)
rv.__dict__.update(self.__dict__)
rv.environment = environment
return rv
def preprocess(self, source, name, filename=None):
"""This method is called before the actual lexing and can be used to
preprocess the source. The `filename` is optional. The return value
must be the preprocessed source.
"""
return source
def filter_stream(self, stream):
"""It's passed a :class:`~jinja2.lexer.TokenStream` that can be used
to filter tokens returned. This method has to return an iterable of
:class:`~jinja2.lexer.Token`\s, but it doesn't have to return a
:class:`~jinja2.lexer.TokenStream`.
In the `ext` folder of the Jinja2 source distribution there is a file
called `inlinegettext.py` which implements a filter that utilizes this
method.
"""
return stream
def parse(self, parser):
"""If any of the :attr:`tags` matched this method is called with the
parser as first argument. The token the parser stream is pointing at
is the name token that matched. This method has to return one or a
list of multiple nodes.
"""
raise NotImplementedError()
def attr(self, name, lineno=None):
"""Return an attribute node for the current extension. This is useful
to pass constants on extensions to generated template code.
::
self.attr('_my_attribute', lineno=lineno)
"""
return nodes.ExtensionAttribute(self.identifier, name, lineno=lineno)
def call_method(self, name, args=None, kwargs=None, dyn_args=None,
dyn_kwargs=None, lineno=None):
"""Call a method of the extension. This is a shortcut for
:meth:`attr` + :class:`jinja2.nodes.Call`.
"""
if args is None:
args = []
if kwargs is None:
kwargs = []
return nodes.Call(self.attr(name, lineno=lineno), args, kwargs,
dyn_args, dyn_kwargs, lineno=lineno)
@contextfunction
def _gettext_alias(__context, *args, **kwargs):
return __context.call(__context.resolve('gettext'), *args, **kwargs)
def _make_new_gettext(func):
@contextfunction
def gettext(__context, __string, **variables):
rv = __context.call(func, __string)
if __context.eval_ctx.autoescape:
rv = Markup(rv)
return rv % variables
return gettext
def _make_new_ngettext(func):
@contextfunction
def ngettext(__context, __singular, __plural, __num, **variables):
variables.setdefault('num', __num)
rv = __context.call(func, __singular, __plural, __num)
if __context.eval_ctx.autoescape:
rv = Markup(rv)
return rv % variables
return ngettext
class InternationalizationExtension(Extension):
"""This extension adds gettext support to Jinja2."""
tags = set(['trans'])
# TODO: the i18n extension is currently reevaluating values in a few
# situations. Take this example:
# {% trans count=something() %}{{ count }} foo{% pluralize
# %}{{ count }} fooss{% endtrans %}
# something is called twice here. One time for the gettext value and
# the other time for the n-parameter of the ngettext function.
def __init__(self, environment):
Extension.__init__(self, environment)
environment.globals['_'] = _gettext_alias
environment.extend(
install_gettext_translations=self._install,
install_null_translations=self._install_null,
install_gettext_callables=self._install_callables,
uninstall_gettext_translations=self._uninstall,
extract_translations=self._extract,
newstyle_gettext=False
)
def _install(self, translations, newstyle=None):
gettext = getattr(translations, 'ugettext', None)
if gettext is None:
gettext = translations.gettext
ngettext = getattr(translations, 'ungettext', None)
if ngettext is None:
ngettext = translations.ngettext
self._install_callables(gettext, ngettext, newstyle)
def _install_null(self, newstyle=None):
self._install_callables(
lambda x: x,
lambda s, p, n: (n != 1 and (p,) or (s,))[0],
newstyle
)
def _install_callables(self, gettext, ngettext, newstyle=None):
if newstyle is not None:
self.environment.newstyle_gettext = newstyle
if self.environment.newstyle_gettext:
gettext = _make_new_gettext(gettext)
ngettext = _make_new_ngettext(ngettext)
self.environment.globals.update(
gettext=gettext,
ngettext=ngettext
)
def _uninstall(self, translations):
for key in 'gettext', 'ngettext':
self.environment.globals.pop(key, None)
def _extract(self, source, gettext_functions=GETTEXT_FUNCTIONS):
if isinstance(source, basestring):
source = self.environment.parse(source)
return extract_from_ast(source, gettext_functions)
def parse(self, parser):
"""Parse a translatable tag."""
lineno = next(parser.stream).lineno
num_called_num = False
# find all the variables referenced. Additionally a variable can be
# defined in the body of the trans block too, but this is checked at
# a later state.
plural_expr = None
variables = {}
while parser.stream.current.type != 'block_end':
if variables:
parser.stream.expect('comma')
# skip colon for python compatibility
if parser.stream.skip_if('colon'):
break
name = parser.stream.expect('name')
if name.value in variables:
parser.fail('translatable variable %r defined twice.' %
name.value, name.lineno,
exc=TemplateAssertionError)
# expressions
if parser.stream.current.type == 'assign':
next(parser.stream)
variables[name.value] = var = parser.parse_expression()
else:
variables[name.value] = var = nodes.Name(name.value, 'load')
if plural_expr is None:
plural_expr = var
num_called_num = name.value == 'num'
parser.stream.expect('block_end')
plural = plural_names = None
have_plural = False
referenced = set()
# now parse until endtrans or pluralize
singular_names, singular = self._parse_block(parser, True)
if singular_names:
referenced.update(singular_names)
if plural_expr is None:
plural_expr = nodes.Name(singular_names[0], 'load')
num_called_num = singular_names[0] == 'num'
# if we have a pluralize block, we parse that too
if parser.stream.current.test('name:pluralize'):
have_plural = True
next(parser.stream)
if parser.stream.current.type != 'block_end':
name = parser.stream.expect('name')
if name.value not in variables:
parser.fail('unknown variable %r for pluralization' %
name.value, name.lineno,
exc=TemplateAssertionError)
plural_expr = variables[name.value]
num_called_num = name.value == 'num'
parser.stream.expect('block_end')
plural_names, plural = self._parse_block(parser, False)
next(parser.stream)
referenced.update(plural_names)
else:
next(parser.stream)
# register free names as simple name expressions
for var in referenced:
if var not in variables:
variables[var] = nodes.Name(var, 'load')
if not have_plural:
plural_expr = None
elif plural_expr is None:
parser.fail('pluralize without variables', lineno)
node = self._make_node(singular, plural, variables, plural_expr,
bool(referenced),
num_called_num and have_plural)
node.set_lineno(lineno)
return node
def _parse_block(self, parser, allow_pluralize):
"""Parse until the next block tag with a given name."""
referenced = []
buf = []
while 1:
if parser.stream.current.type == 'data':
buf.append(parser.stream.current.value.replace('%', '%%'))
next(parser.stream)
elif parser.stream.current.type == 'variable_begin':
next(parser.stream)
name = parser.stream.expect('name').value
referenced.append(name)
buf.append('%%(%s)s' % name)
parser.stream.expect('variable_end')
elif parser.stream.current.type == 'block_begin':
next(parser.stream)
if parser.stream.current.test('name:endtrans'):
break
elif parser.stream.current.test('name:pluralize'):
if allow_pluralize:
break
parser.fail('a translatable section can have only one '
'pluralize section')
parser.fail('control structures in translatable sections are '
'not allowed')
elif parser.stream.eos:
parser.fail('unclosed translation block')
else:
assert False, 'internal parser error'
return referenced, concat(buf)
def _make_node(self, singular, plural, variables, plural_expr,
vars_referenced, num_called_num):
"""Generates a useful node from the data provided."""
# no variables referenced? no need to escape for old style
# gettext invocations only if there are vars.
if not vars_referenced and not self.environment.newstyle_gettext:
singular = singular.replace('%%', '%')
if plural:
plural = plural.replace('%%', '%')
# singular only:
if plural_expr is None:
gettext = nodes.Name('gettext', 'load')
node = nodes.Call(gettext, [nodes.Const(singular)],
[], None, None)
# singular and plural
else:
ngettext = nodes.Name('ngettext', 'load')
node = nodes.Call(ngettext, [
nodes.Const(singular),
nodes.Const(plural),
plural_expr
], [], None, None)
# in case newstyle gettext is used, the method is powerful
# enough to handle the variable expansion and autoescape
# handling itself
if self.environment.newstyle_gettext:
for key, value in variables.iteritems():
# the function adds that later anyways in case num was
# called num, so just skip it.
if num_called_num and key == 'num':
continue
node.kwargs.append(nodes.Keyword(key, value))
# otherwise do that here
else:
# mark the return value as safe if we are in an
# environment with autoescaping turned on
node = nodes.MarkSafeIfAutoescape(node)
if variables:
node = nodes.Mod(node, nodes.Dict([
nodes.Pair(nodes.Const(key), value)
for key, value in variables.items()
]))
return nodes.Output([node])
class ExprStmtExtension(Extension):
"""Adds a `do` tag to Jinja2 that works like the print statement just
that it doesn't print the return value.
"""
tags = set(['do'])
def parse(self, parser):
node = nodes.ExprStmt(lineno=next(parser.stream).lineno)
node.node = parser.parse_tuple()
return node
class LoopControlExtension(Extension):
"""Adds break and continue to the template engine."""
tags = set(['break', 'continue'])
def parse(self, parser):
token = next(parser.stream)
if token.value == 'break':
return nodes.Break(lineno=token.lineno)
return nodes.Continue(lineno=token.lineno)
class WithExtension(Extension):
"""Adds support for a django-like with block."""
tags = set(['with'])
def parse(self, parser):
node = nodes.Scope(lineno=next(parser.stream).lineno)
assignments = []
while parser.stream.current.type != 'block_end':
lineno = parser.stream.current.lineno
if assignments:
parser.stream.expect('comma')
target = parser.parse_assign_target()
parser.stream.expect('assign')
expr = parser.parse_expression()
assignments.append(nodes.Assign(target, expr, lineno=lineno))
node.body = assignments + \
list(parser.parse_statements(('name:endwith',),
drop_needle=True))
return node
class AutoEscapeExtension(Extension):
"""Changes auto escape rules for a scope."""
tags = set(['autoescape'])
def parse(self, parser):
node = nodes.ScopedEvalContextModifier(lineno=next(parser.stream).lineno)
node.options = [
nodes.Keyword('autoescape', parser.parse_expression())
]
node.body = parser.parse_statements(('name:endautoescape',),
drop_needle=True)
return nodes.Scope([node])
def extract_from_ast(node, gettext_functions=GETTEXT_FUNCTIONS,
babel_style=True):
"""Extract localizable strings from the given template node. Per
default this function returns matches in babel style that means non string
parameters as well as keyword arguments are returned as `None`. This
allows Babel to figure out what you really meant if you are using
gettext functions that allow keyword arguments for placeholder expansion.
If you don't want that behavior set the `babel_style` parameter to `False`
which causes only strings to be returned and parameters are always stored
in tuples. As a consequence invalid gettext calls (calls without a single
string parameter or string parameters after non-string parameters) are
skipped.
This example explains the behavior:
>>> from jinja2 import Environment
>>> env = Environment()
>>> node = env.parse('{{ (_("foo"), _(), ngettext("foo", "bar", 42)) }}')
>>> list(extract_from_ast(node))
[(1, '_', 'foo'), (1, '_', ()), (1, 'ngettext', ('foo', 'bar', None))]
>>> list(extract_from_ast(node, babel_style=False))
[(1, '_', ('foo',)), (1, 'ngettext', ('foo', 'bar'))]
For every string found this function yields a ``(lineno, function,
message)`` tuple, where:
* ``lineno`` is the number of the line on which the string was found,
* ``function`` is the name of the ``gettext`` function used (if the
string was extracted from embedded Python code), and
* ``message`` is the string itself (a ``unicode`` object, or a tuple
of ``unicode`` objects for functions with multiple string arguments).
This extraction function operates on the AST and is because of that unable
to extract any comments. For comment support you have to use the babel
extraction interface or extract comments yourself.
"""
for node in node.find_all(nodes.Call):
if not isinstance(node.node, nodes.Name) or \
node.node.name not in gettext_functions:
continue
strings = []
for arg in node.args:
if isinstance(arg, nodes.Const) and \
isinstance(arg.value, basestring):
strings.append(arg.value)
else:
strings.append(None)
for arg in node.kwargs:
strings.append(None)
if node.dyn_args is not None:
strings.append(None)
if node.dyn_kwargs is not None:
strings.append(None)
if not babel_style:
strings = tuple(x for x in strings if x is not None)
if not strings:
continue
else:
if len(strings) == 1:
strings = strings[0]
else:
strings = tuple(strings)
yield node.lineno, node.node.name, strings
class _CommentFinder(object):
"""Helper class to find comments in a token stream. Can only
find comments for gettext calls forwards. Once the comment
from line 4 is found, a comment for line 1 will not return a
usable value.
"""
def __init__(self, tokens, comment_tags):
self.tokens = tokens
self.comment_tags = comment_tags
self.offset = 0
self.last_lineno = 0
def find_backwards(self, offset):
try:
for _, token_type, token_value in \
reversed(self.tokens[self.offset:offset]):
if token_type in ('comment', 'linecomment'):
try:
prefix, comment = token_value.split(None, 1)
except ValueError:
continue
if prefix in self.comment_tags:
return [comment.rstrip()]
return []
finally:
self.offset = offset
def find_comments(self, lineno):
if not self.comment_tags or self.last_lineno > lineno:
return []
for idx, (token_lineno, _, _) in enumerate(self.tokens[self.offset:]):
if token_lineno > lineno:
return self.find_backwards(self.offset + idx)
return self.find_backwards(len(self.tokens))
def babel_extract(fileobj, keywords, comment_tags, options):
"""Babel extraction method for Jinja templates.
.. versionchanged:: 2.3
Basic support for translation comments was added. If `comment_tags`
is now set to a list of keywords for extraction, the extractor will
try to find the best preceeding comment that begins with one of the
keywords. For best results, make sure to not have more than one
gettext call in one line of code and the matching comment in the
same line or the line before.
.. versionchanged:: 2.5.1
The `newstyle_gettext` flag can be set to `True` to enable newstyle
gettext calls.
:param fileobj: the file-like object the messages should be extracted from
:param keywords: a list of keywords (i.e. function names) that should be
recognized as translation functions
:param comment_tags: a list of translator tags to search for and include
in the results.
:param options: a dictionary of additional options (optional)
:return: an iterator over ``(lineno, funcname, message, comments)`` tuples.
(comments will be empty currently)
"""
extensions = set()
for extension in options.get('extensions', '').split(','):
extension = extension.strip()
if not extension:
continue
extensions.add(import_string(extension))
if InternationalizationExtension not in extensions:
extensions.add(InternationalizationExtension)
def getbool(options, key, default=False):
options.get(key, str(default)).lower() in ('1', 'on', 'yes', 'true')
environment = Environment(
options.get('block_start_string', BLOCK_START_STRING),
options.get('block_end_string', BLOCK_END_STRING),
options.get('variable_start_string', VARIABLE_START_STRING),
options.get('variable_end_string', VARIABLE_END_STRING),
options.get('comment_start_string', COMMENT_START_STRING),
options.get('comment_end_string', COMMENT_END_STRING),
options.get('line_statement_prefix') or LINE_STATEMENT_PREFIX,
options.get('line_comment_prefix') or LINE_COMMENT_PREFIX,
getbool(options, 'trim_blocks', TRIM_BLOCKS),
NEWLINE_SEQUENCE, frozenset(extensions),
cache_size=0,
auto_reload=False
)
if getbool(options, 'newstyle_gettext'):
environment.newstyle_gettext = True
source = fileobj.read().decode(options.get('encoding', 'utf-8'))
try:
node = environment.parse(source)
tokens = list(environment.lex(environment.preprocess(source)))
except TemplateSyntaxError, e:
# skip templates with syntax errors
return
finder = _CommentFinder(tokens, comment_tags)
for lineno, func, message in extract_from_ast(node, keywords):
yield lineno, func, message, finder.find_comments(lineno)
#: nicer import names
i18n = InternationalizationExtension
do = ExprStmtExtension
loopcontrols = LoopControlExtension
with_ = WithExtension
autoescape = AutoEscapeExtension
|
doganov/edx-platform | refs/heads/master | common/djangoapps/course_modes/tests/factories.py | 56 | from course_modes.models import CourseMode
from factory.django import DjangoModelFactory
from opaque_keys.edx.locations import SlashSeparatedCourseKey
# Factories are self documenting
# pylint: disable=missing-docstring
class CourseModeFactory(DjangoModelFactory):
class Meta(object):
model = CourseMode
course_id = SlashSeparatedCourseKey('MITx', '999', 'Robot_Super_Course')
mode_slug = 'audit'
mode_display_name = 'audit course'
min_price = 0
currency = 'usd'
expiration_datetime = None
suggested_prices = ''
|
pearsonlab/nipype | refs/heads/master | nipype/utils/spm_docs.py | 8 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""Grab documentation from spm."""
import os
from nipype.interfaces import matlab
def grab_doc(task_name):
"""Grab the SPM documentation for the given SPM task named `task_name`
Parameters
----------
task_name : string
Task name for which we are grabbing documentation. Example
task names are ``Realign: Estimate & Reslice``, ``Normalise:
Estimate & Write``.
See Also
--------
spm_flat_config.m : This function can print out all the possible
task names.
"""
cmd = matlab.MatlabCommandLine()
# We need to tell Matlab where to find our spm_get_doc.m file.
cwd = os.path.dirname(__file__)
# Build matlab command
mcmd = "addpath('%s');spm_get_doc('%s')" % (cwd, task_name)
cmd.inputs.script_lines = mcmd
# Run the command and get the documentation out of the result.
out = cmd.run()
return _strip_header(out.runtime.stdout)
def _strip_header(doc):
"""Strip Matlab header and splash info off doc.
Searches for the tag 'NIPYPE' in the doc and returns everyting after that.
"""
hdr = 'NIPYPE'
# There's some weird cruft at the end of the docstring, almost looks like
# the hex for the escape character 0x1b.
cruft = '\x1b'
try:
index = doc.index(hdr)
index += len(hdr)
index += 1
doc = doc[index:]
try:
index = doc.index(cruft)
except ValueError:
index = len(doc)
return doc[:index]
except KeyError:
raise IOError('This docstring was not generated by Nipype!\n')
|
bmanojlovic/ansible | refs/heads/devel | lib/ansible/modules/inventory/add_host.py | 50 | # -*- mode: python -*-
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['stableinterface'],
'supported_by': 'core',
'version': '1.0'}
DOCUMENTATION = '''
---
module: add_host
short_description: add a host (and alternatively a group) to the ansible-playbook in-memory inventory
description:
- Use variables to create new hosts and groups in inventory for use in later plays of the same playbook.
Takes variables so you can define the new hosts more fully.
version_added: "0.9"
options:
name:
aliases: [ 'hostname', 'host' ]
description:
- The hostname/ip of the host to add to the inventory, can include a colon and a port number.
required: true
groups:
aliases: [ 'groupname', 'group' ]
description:
- The groups to add the hostname to, comma separated.
required: false
notes:
- This module bypasses the play host loop and only runs once for all the hosts in the play, if you need it
to iterate use a with\_ directive.
author:
- "Ansible Core Team"
- "Seth Vidal"
'''
EXAMPLES = '''
# add host to group 'just_created' with variable foo=42
- add_host:
name: "{{ ip_from_ec2 }}"
groups: just_created
foo: 42
# add a host with a non-standard port local to your machines
- add_host:
name: "{{ new_ip }}:{{ new_port }}"
# add a host alias that we reach through a tunnel (Ansible <= 1.9)
- add_host:
hostname: "{{ new_ip }}"
ansible_ssh_host: "{{ inventory_hostname }}"
ansible_ssh_port: "{{ new_port }}"
# add a host alias that we reach through a tunnel (Ansible >= 2.0)
- add_host:
hostname: "{{ new_ip }}"
ansible_host: "{{ inventory_hostname }}"
ansible_port: "{{ new_port }}"
'''
|
jfpla/odoo | refs/heads/8.0 | addons/email_template/tests/test_mail.py | 190 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2012-TODAY OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import base64
from openerp.addons.mail.tests.common import TestMail
from openerp.tools import mute_logger
class test_message_compose(TestMail):
def setUp(self):
super(test_message_compose, self).setUp()
# create a 'pigs' and 'bird' groups that will be used through the various tests
self.group_bird_id = self.mail_group.create(self.cr, self.uid,
{'name': 'Bird', 'description': 'I am angry !'})
def test_00_message_compose_wizard(self):
""" Tests designed for the mail.compose.message wizard updated by email_template. """
cr, uid = self.cr, self.uid
mail_compose = self.registry('mail.compose.message')
self.res_users.write(cr, uid, [uid], {'signature': 'Admin', 'email': 'a@a.a'})
user_admin = self.res_users.browse(cr, uid, uid)
p_a_id = user_admin.partner_id.id
group_pigs = self.mail_group.browse(cr, uid, self.group_pigs_id)
group_bird = self.mail_group.browse(cr, uid, self.group_bird_id)
# Mail data
_subject1 = 'Pigs'
_subject2 = 'Bird'
_body_html1 = 'Fans of Pigs, unite !'
_body_html2 = 'I am angry !'
_attachments = [
{'name': 'First', 'datas_fname': 'first.txt', 'datas': base64.b64encode('My first attachment'), 'res_model': 'res.partner', 'res_id': self.partner_admin_id},
{'name': 'Second', 'datas_fname': 'second.txt', 'datas': base64.b64encode('My second attachment'), 'res_model': 'res.partner', 'res_id': self.partner_admin_id},
]
_attachments_test = [('first.txt', 'My first attachment'), ('second.txt', 'My second attachment')]
# Create template on mail.group, with attachments
group_model_id = self.registry('ir.model').search(cr, uid, [('model', '=', 'mail.group')])[0]
email_template = self.registry('email.template')
email_template_id = email_template.create(cr, uid, {
'model_id': group_model_id,
'name': 'Pigs Template',
'subject': '${object.name}',
'body_html': '${object.description}',
'user_signature': False,
'attachment_ids': [(0, 0, _attachments[0]), (0, 0, _attachments[1])],
'email_to': 'b@b.b, c@c.c',
'email_cc': 'd@d.d'
})
# ----------------------------------------
# CASE1: comment and save as template
# ----------------------------------------
# 1. Comment on pigs
compose_id = mail_compose.create(cr, uid,
{'subject': 'Forget me subject', 'body': '<p>Dummy body</p>'},
{'default_composition_mode': 'comment',
'default_model': 'mail.group',
'default_res_id': self.group_pigs_id,
'active_ids': [self.group_pigs_id, self.group_bird_id]})
compose = mail_compose.browse(cr, uid, compose_id)
# 2. Save current composition form as a template
mail_compose.save_as_template(cr, uid, [compose_id], context={'default_model': 'mail.group'})
# Test: email_template subject, body_html, model
last_template_id = email_template.search(cr, uid, [('model', '=', 'mail.group'), ('subject', '=', 'Forget me subject')], limit=1)[0]
self.assertTrue(last_template_id, 'email_template not found for model mail.group, subject Forget me subject')
last_template = email_template.browse(cr, uid, last_template_id)
self.assertEqual(last_template.body_html, '<p>Dummy body</p>', 'email_template incorrect body_html')
# ----------------------------------------
# CASE2: comment with template, save as template
# ----------------------------------------
# 1. Comment on pigs
context = {
'default_composition_mode': 'comment',
'default_model': 'mail.group',
'default_res_id': self.group_pigs_id,
'default_use_template': False,
'default_template_id': email_template_id,
'active_ids': [self.group_pigs_id, self.group_bird_id]
}
compose_id = mail_compose.create(cr, uid, {'subject': 'Forget me subject', 'body': 'Dummy body'}, context)
compose = mail_compose.browse(cr, uid, compose_id, context)
onchange_res = compose.onchange_template_id(email_template_id, 'comment', 'mail.group', self.group_pigs_id)['value']
onchange_res['partner_ids'] = [(4, partner_id) for partner_id in onchange_res.pop('partner_ids', [])]
onchange_res['attachment_ids'] = [(4, attachment_id) for attachment_id in onchange_res.pop('attachment_ids', [])]
compose.write(onchange_res)
compose.refresh()
message_pids = [partner.id for partner in compose.partner_ids]
partner_ids = self.res_partner.search(cr, uid, [('email', 'in', ['b@b.b', 'c@c.c', 'd@d.d'])])
# Test: mail.compose.message: subject, body, partner_ids
self.assertEqual(compose.subject, _subject1, 'mail.compose.message subject incorrect')
self.assertIn(_body_html1, compose.body, 'mail.compose.message body incorrect')
self.assertEqual(set(message_pids), set(partner_ids), 'mail.compose.message partner_ids incorrect')
# Test: mail.compose.message: attachments (owner has not been modified)
for attach in compose.attachment_ids:
self.assertEqual(attach.res_model, 'res.partner', 'mail.compose.message attachment res_model through templat was overriden')
self.assertEqual(attach.res_id, self.partner_admin_id, 'mail.compose.message attachment res_id incorrect')
self.assertIn((attach.datas_fname, base64.b64decode(attach.datas)), _attachments_test,
'mail.message attachment name / data incorrect')
# Test: mail.message: attachments
mail_compose.send_mail(cr, uid, [compose_id])
group_pigs.refresh()
message_pigs = group_pigs.message_ids[0]
for attach in message_pigs.attachment_ids:
self.assertEqual(attach.res_model, 'mail.group', 'mail.compose.message attachment res_model through templat was overriden')
self.assertEqual(attach.res_id, self.group_pigs_id, 'mail.compose.message attachment res_id incorrect')
self.assertIn((attach.datas_fname, base64.b64decode(attach.datas)), _attachments_test,
'mail.message attachment name / data incorrect')
# ----------------------------------------
# CASE3: mass_mail with template
# ----------------------------------------
# 1. Mass_mail on pigs and bird, with a default_partner_ids set to check he is correctly added
context = {
'default_composition_mode': 'mass_mail',
'default_notify': True,
'default_model': 'mail.group',
'default_res_id': self.group_pigs_id,
'default_template_id': email_template_id,
'default_partner_ids': [p_a_id],
'active_ids': [self.group_pigs_id, self.group_bird_id]
}
compose_id = mail_compose.create(cr, uid, {'subject': 'Forget me subject', 'body': 'Dummy body'}, context)
compose = mail_compose.browse(cr, uid, compose_id, context)
onchange_res = compose.onchange_template_id(email_template_id, 'mass_mail', 'mail.group', self.group_pigs_id)['value']
onchange_res['partner_ids'] = [(4, partner_id) for partner_id in onchange_res.pop('partner_ids', [])]
onchange_res['attachment_ids'] = [(4, attachment_id) for attachment_id in onchange_res.pop('attachment_ids', [])]
compose.write(onchange_res)
compose.refresh()
message_pids = [partner.id for partner in compose.partner_ids]
partner_ids = [p_a_id]
self.assertEqual(compose.subject, '${object.name}', 'mail.compose.message subject incorrect')
self.assertEqual(compose.body, '<p>${object.description}</p>', 'mail.compose.message body incorrect') # todo: check signature
self.assertEqual(set(message_pids), set(partner_ids), 'mail.compose.message partner_ids incorrect')
# 2. Post the comment, get created message
mail_compose.send_mail(cr, uid, [compose_id], {'default_res_id': -1, 'active_ids': [self.group_pigs_id, self.group_bird_id]})
group_pigs.refresh()
group_bird.refresh()
message_pigs = group_pigs.message_ids[0]
message_bird = group_bird.message_ids[0]
# Test: subject, body
self.assertEqual(message_pigs.subject, _subject1, 'mail.message subject on Pigs incorrect')
self.assertEqual(message_bird.subject, _subject2, 'mail.message subject on Bird incorrect')
self.assertIn(_body_html1, message_pigs.body, 'mail.message body on Pigs incorrect')
self.assertIn(_body_html2, message_bird.body, 'mail.message body on Bird incorrect')
# Test: partner_ids: p_a_id (default) + 3 newly created partners
# message_pigs_pids = [partner.id for partner in message_pigs.notified_partner_ids]
# message_bird_pids = [partner.id for partner in message_bird.notified_partner_ids]
# partner_ids = self.res_partner.search(cr, uid, [('email', 'in', ['b@b.b', 'c@c.c', 'd@d.d'])])
# partner_ids.append(p_a_id)
# self.assertEqual(set(message_pigs_pids), set(partner_ids), 'mail.message on pigs incorrect number of notified_partner_ids')
# self.assertEqual(set(message_bird_pids), set(partner_ids), 'mail.message on bird notified_partner_ids incorrect')
# ----------------------------------------
# CASE4: test newly introduced partner_to field
# ----------------------------------------
# get already-created partners back
p_b_id = self.res_partner.search(cr, uid, [('email', '=', 'b@b.b')])[0]
p_c_id = self.res_partner.search(cr, uid, [('email', '=', 'c@c.c')])[0]
p_d_id = self.res_partner.search(cr, uid, [('email', '=', 'd@d.d')])[0]
# modify template: use partner_to, use template and email address in email_to to test all features together
user_model_id = self.registry('ir.model').search(cr, uid, [('model', '=', 'res.users')])[0]
email_template.write(cr, uid, [email_template_id], {
'model_id': user_model_id,
'body_html': '${object.login}',
'email_to': '${object.email}, c@c.c',
'partner_to': '%i,%i' % (p_b_id, p_c_id),
'email_cc': 'd@d.d',
})
# patner by email + partner by id (no double)
send_to = [p_a_id, p_b_id, p_c_id, p_d_id]
# Generate messsage with default email and partner on template
mail_value = mail_compose.generate_email_for_composer(cr, uid, email_template_id, uid)
self.assertEqual(set(mail_value['partner_ids']), set(send_to), 'mail.message partner_ids list created by template is incorrect')
@mute_logger('openerp.models')
def test_10_email_templating(self):
""" Tests designed for the mail.compose.message wizard updated by email_template. """
cr, uid, context = self.cr, self.uid, {}
# create the email.template on mail.group model
group_model_id = self.registry('ir.model').search(cr, uid, [('model', '=', 'mail.group')])[0]
email_template = self.registry('email.template')
email_template_id = email_template.create(cr, uid, {
'model_id': group_model_id,
'name': 'Pigs Template',
'email_from': 'Raoul Grosbedon <raoul@example.com>',
'subject': '${object.name}',
'body_html': '${object.description}',
'user_signature': True,
'email_to': 'b@b.b, c@c.c',
'email_cc': 'd@d.d',
'partner_to': '${user.partner_id.id},%s,%s,-1' % (self.user_raoul.partner_id.id, self.user_bert.partner_id.id)
})
# not force send: email_recipients is not taken into account
msg_id = email_template.send_mail(cr, uid, email_template_id, self.group_pigs_id, context=context)
mail = self.mail_mail.browse(cr, uid, msg_id, context=context)
self.assertEqual(mail.subject, 'Pigs', 'email_template: send_mail: wrong subject')
self.assertEqual(mail.email_to, 'b@b.b, c@c.c', 'email_template: send_mail: wrong email_to')
self.assertEqual(mail.email_cc, 'd@d.d', 'email_template: send_mail: wrong email_cc')
self.assertEqual(
set([partner.id for partner in mail.recipient_ids]),
set((self.partner_admin_id, self.user_raoul.partner_id.id, self.user_bert.partner_id.id)),
'email_template: send_mail: wrong management of partner_to')
# force send: take email_recipients into account
email_template.send_mail(cr, uid, email_template_id, self.group_pigs_id, force_send=True, context=context)
sent_emails = self._build_email_kwargs_list
email_to_lst = [
['b@b.b', 'c@c.c'], ['Administrator <admin@yourcompany.example.com>'],
['Raoul Grosbedon <raoul@raoul.fr>'], ['Bert Tartignole <bert@bert.fr>']]
self.assertEqual(len(sent_emails), 4, 'email_template: send_mail: 3 valid email recipients + email_to -> should send 4 emails')
for email in sent_emails:
self.assertIn(email['email_to'], email_to_lst, 'email_template: send_mail: wrong email_recipients')
|
askaliuk/django-haystack | refs/heads/master | tests/spatial/tests.py | 3 | import httplib2
import warnings
from django.conf import settings
from django.contrib.gis.geos import GEOSGeometry
from django.test import TestCase
from haystack import connections
from haystack.exceptions import SpatialError
from haystack.query import SearchQuerySet
from haystack.utils.geo import Point, D, ensure_geometry, ensure_point, ensure_wgs84, ensure_distance, generate_bounding_box
from spatial.models import Checkin
class SpatialUtilitiesTestCase(TestCase):
def test_ensure_geometry(self):
self.assertRaises(SpatialError, ensure_geometry, [38.97127105172941, -95.23592948913574])
ensure_geometry(GEOSGeometry('POLYGON((-95 38, -96 40, -97 42, -95 38))'))
ensure_geometry(GEOSGeometry('POINT(-95.23592948913574 38.97127105172941)'))
ensure_geometry(Point(-95.23592948913574, 38.97127105172941))
def test_ensure_point(self):
self.assertRaises(SpatialError, ensure_point, [38.97127105172941, -95.23592948913574])
self.assertRaises(SpatialError, ensure_point, GEOSGeometry('POLYGON((-95 38, -96 40, -97 42, -95 38))'))
ensure_point(Point(-95.23592948913574, 38.97127105172941))
def test_ensure_wgs84(self):
self.assertRaises(SpatialError, ensure_wgs84, GEOSGeometry('POLYGON((-95 38, -96 40, -97 42, -95 38))'))
orig_pnt = Point(-95.23592948913574, 38.97127105172941)
std_pnt = ensure_wgs84(orig_pnt)
self.assertEqual(orig_pnt.srid, None)
self.assertEqual(std_pnt.srid, 4326)
self.assertEqual(std_pnt.x, -95.23592948913574)
self.assertEqual(std_pnt.y, 38.97127105172941)
orig_pnt = Point(-95.23592948913574, 38.97127105172941)
orig_pnt.set_srid(2805)
std_pnt = ensure_wgs84(orig_pnt)
self.assertEqual(orig_pnt.srid, 2805)
self.assertEqual(std_pnt.srid, 4326)
# These should be different, since it got transformed.
self.assertNotEqual(std_pnt.x, -95.23592948913574)
self.assertNotEqual(std_pnt.y, 38.97127105172941)
def test_ensure_distance(self):
self.assertRaises(SpatialError, ensure_distance, [38.97127105172941, -95.23592948913574])
ensure_distance(D(mi=5))
def test_generate_bounding_box(self):
downtown_bottom_left = Point(-95.23947, 38.9637903)
downtown_top_right = Point(-95.23362278938293, 38.973081081164715)
((min_lat, min_lng), (max_lat, max_lng)) = generate_bounding_box(downtown_bottom_left, downtown_top_right)
self.assertEqual(min_lat, 38.9637903)
self.assertEqual(min_lng, -95.23947)
self.assertEqual(max_lat, 38.973081081164715)
self.assertEqual(max_lng, -95.23362278938293)
def test_generate_bounding_box_crossing_line_date(self):
downtown_bottom_left = Point(95.23947, 38.9637903)
downtown_top_right = Point(-95.23362278938293, 38.973081081164715)
((south, west), (north, east)) = generate_bounding_box(downtown_bottom_left, downtown_top_right)
self.assertEqual(south, 38.9637903)
self.assertEqual(west, 95.23947)
self.assertEqual(north, 38.973081081164715)
self.assertEqual(east, -95.23362278938293)
class SpatialSolrNoDistanceTestCase(TestCase):
fixtures = ['sample_spatial_data.json']
using = 'default'
def setUp(self):
super(SpatialSolrNoDistanceTestCase, self).setUp()
self.ui = connections[self.using].get_unified_index()
self.checkindex = self.ui.get_index(Checkin)
self.checkindex.reindex(using=self.using)
self.sqs = SearchQuerySet().using(self.using)
self.downtown_pnt = Point(-95.23592948913574, 38.97127105172941)
self.downtown_bottom_left = Point(-95.23947, 38.9637903)
self.downtown_top_right = Point(-95.23362278938293, 38.973081081164715)
self.lawrence_bottom_left = Point(-95.345535, 39.002643)
self.lawrence_top_right = Point(-95.202713, 38.923626)
def tearDown(self):
self.checkindex.clear(using=self.using)
super(SpatialSolrNoDistanceTestCase, self).setUp()
def test_indexing(self):
# Make sure the indexed data looks correct.
first = Checkin.objects.get(pk=1)
sqs = self.sqs.models(Checkin).filter(django_id=first.pk)
self.assertEqual(sqs.count(), 1)
self.assertEqual(sqs[0].username, first.username)
# Make sure we've got a proper ``Point`` object.
self.assertAlmostEqual(sqs[0].location.get_coords()[0], first.longitude)
self.assertAlmostEqual(sqs[0].location.get_coords()[1], first.latitude)
# Double-check, to make sure there was nothing accidentally copied
# between instances.
second = Checkin.objects.get(pk=2)
self.assertNotEqual(second.latitude, first.latitude)
sqs = self.sqs.models(Checkin).filter(django_id=second.pk)
self.assertEqual(sqs.count(), 1)
self.assertEqual(sqs[0].username, second.username)
self.assertAlmostEqual(sqs[0].location.get_coords()[0], second.longitude)
self.assertAlmostEqual(sqs[0].location.get_coords()[1], second.latitude)
def test_within(self):
self.assertEqual(self.sqs.all().count(), 10)
sqs = self.sqs.within('location', self.downtown_bottom_left, self.downtown_top_right)
self.assertEqual(sqs.count(), 7)
sqs = self.sqs.within('location', self.lawrence_bottom_left, self.lawrence_top_right)
self.assertEqual(sqs.count(), 9)
def test_dwithin(self):
self.assertEqual(self.sqs.all().count(), 10)
sqs = self.sqs.dwithin('location', self.downtown_pnt, D(mi=0.1))
self.assertEqual(sqs.count(), 5)
sqs = self.sqs.dwithin('location', self.downtown_pnt, D(mi=0.5))
self.assertEqual(sqs.count(), 7)
sqs = self.sqs.dwithin('location', self.downtown_pnt, D(mi=100))
self.assertEqual(sqs.count(), 10)
def test_distance_added(self):
sqs = self.sqs.within('location', self.downtown_bottom_left, self.downtown_top_right).distance('location', self.downtown_pnt)
self.assertEqual(sqs.count(), 7)
self.assertAlmostEqual(sqs[0].distance.mi, 0.01985226)
self.assertAlmostEqual(sqs[1].distance.mi, 0.03385863)
self.assertAlmostEqual(sqs[2].distance.mi, 0.04539100)
self.assertAlmostEqual(sqs[3].distance.mi, 0.04831436)
self.assertAlmostEqual(sqs[4].distance.mi, 0.41116546)
self.assertAlmostEqual(sqs[5].distance.mi, 0.25098114)
self.assertAlmostEqual(sqs[6].distance.mi, 0.04831436)
sqs = self.sqs.dwithin('location', self.downtown_pnt, D(mi=0.1)).distance('location', self.downtown_pnt)
self.assertEqual(sqs.count(), 5)
self.assertAlmostEqual(sqs[0].distance.mi, 0.01985226)
self.assertAlmostEqual(sqs[1].distance.mi, 0.03385863)
self.assertAlmostEqual(sqs[2].distance.mi, 0.04539100)
self.assertAlmostEqual(sqs[3].distance.mi, 0.04831436)
self.assertAlmostEqual(sqs[4].distance.mi, 0.04831436)
def test_order_by_distance(self):
sqs = self.sqs.within('location', self.downtown_bottom_left, self.downtown_top_right).distance('location', self.downtown_pnt).order_by('distance')
self.assertEqual(sqs.count(), 7)
self.assertEqual([result.pk for result in sqs], ['8', '9', '6', '3', '1', '2', '5'])
self.assertEqual(["%0.04f" % result.distance.mi for result in sqs], ['0.0199', '0.0339', '0.0454', '0.0483', '0.0483', '0.2510', '0.4112'])
sqs = self.sqs.dwithin('location', self.downtown_pnt, D(mi=0.1)).distance('location', self.downtown_pnt).order_by('distance')
self.assertEqual(sqs.count(), 5)
self.assertEqual([result.pk for result in sqs], ['8', '9', '6', '3', '1'])
self.assertEqual(["%0.04f" % result.distance.mi for result in sqs], ['0.0199', '0.0339', '0.0454', '0.0483', '0.0483'])
sqs = self.sqs.dwithin('location', self.downtown_pnt, D(mi=0.1)).distance('location', self.downtown_pnt).order_by('-distance')
self.assertEqual(sqs.count(), 5)
self.assertEqual([result.pk for result in sqs], ['3', '1', '6', '9', '8'])
self.assertEqual(["%0.04f" % result.distance.mi for result in sqs], ['0.0483', '0.0483', '0.0454', '0.0339', '0.0199'])
def test_complex(self):
sqs = self.sqs.auto_query('coffee').within('location', self.downtown_bottom_left, self.downtown_top_right).distance('location', self.downtown_pnt).order_by('distance')
self.assertEqual(sqs.count(), 5)
self.assertEqual([result.pk for result in sqs],['8', '6', '3', '1', '2'])
self.assertEqual(["%0.04f" % result.distance.mi for result in sqs], ['0.0199', '0.0454', '0.0483', '0.0483', '0.2510'])
sqs = self.sqs.auto_query('coffee').dwithin('location', self.downtown_pnt, D(mi=0.1)).distance('location', self.downtown_pnt).order_by('distance')
self.assertEqual(sqs.count(), 4)
self.assertEqual([result.pk for result in sqs], ['8', '6', '3', '1'])
self.assertEqual(["%0.04f" % result.distance.mi for result in sqs], ['0.0199', '0.0454', '0.0483', '0.0483'])
sqs = self.sqs.auto_query('coffee').dwithin('location', self.downtown_pnt, D(mi=0.1)).distance('location', self.downtown_pnt).order_by('-distance')
self.assertEqual(sqs.count(), 4)
self.assertEqual([result.pk for result in sqs], ['3', '1', '6', '8'])
self.assertEqual(["%0.04f" % result.distance.mi for result in sqs], ['0.0483', '0.0483', '0.0454', '0.0199'])
sqs = self.sqs.auto_query('coffee').within('location', self.downtown_bottom_left, self.downtown_top_right).distance('location', self.downtown_pnt).order_by('-created')
self.assertEqual(sqs.count(), 5)
self.assertEqual([result.pk for result in sqs], ['8', '6', '3', '2', '1'])
sqs = self.sqs.auto_query('coffee').dwithin('location', self.downtown_pnt, D(mi=0.1)).distance('location', self.downtown_pnt).order_by('-created')
self.assertEqual(sqs.count(), 4)
self.assertEqual([result.pk for result in sqs], ['8', '6', '3', '1'])
def check_running(using):
http = httplib2.Http(timeout=1)
url = settings.HAYSTACK_CONNECTIONS[using]['URL']
try:
resp, content = http.request(url)
except Exception as e:
warnings.warn("It appears like '%s' is unavailable. Skipping..." % url)
raise
try:
check_running('solr_native_distance')
except:
pass
else:
class SpatialSolrNativeDistanceTestCase(SpatialSolrNoDistanceTestCase):
using = 'solr_native_distance'
|
pratikmallya/hue | refs/heads/master | desktop/core/ext-py/boto-2.38.0/boto/swf/exceptions.py | 190 | """
Exceptions that are specific to the swf module.
This module subclasses the base SWF response exception,
boto.exceptions.SWFResponseError, for some of the SWF specific faults.
"""
from boto.exception import SWFResponseError
class SWFDomainAlreadyExistsError(SWFResponseError):
"""
Raised when when the domain already exists.
"""
pass
class SWFLimitExceededError(SWFResponseError):
"""
Raised when when a system imposed limitation has been reached.
"""
pass
class SWFOperationNotPermittedError(SWFResponseError):
"""
Raised when (reserved for future use).
"""
class SWFTypeAlreadyExistsError(SWFResponseError):
"""
Raised when when the workflow type or activity type already exists.
"""
pass
class SWFWorkflowExecutionAlreadyStartedError(SWFResponseError):
"""
Raised when an open execution with the same workflow_id is already running
in the specified domain.
"""
|
petteyg/intellij-community | refs/heads/master | python/testData/resolve/QualifiedFunc.py | 83 | class Foo:
def bar():
pass
Foo().<ref>bar()
|
asposeslides/Aspose_Slides_Java | refs/heads/master | Plugins/Aspose-Slides-Java-for-Jython/asposeslides/WorkingWithText/ReplaceText.py | 2 | from asposeslides import Settings
from com.aspose.slides import Presentation
from com.aspose.slides import SaveFormat
class ReplaceText:
def __init__(self):
dataDir = Settings.dataDir + 'WorkingWithText/ReplaceText/'
# Create an instance of Presentation class
pres = Presentation(dataDir + 'Welcome.pptx')
# Get the first slide
sld = pres.getSlides().get_Item(0)
# Change the text of each placeholder
shp = sld.getShapes().get_Item(0)
shp.getTextFrame().setText("This is Placeholder")
# Write the presentation as a PPTX file
save_format = SaveFormat
pres.save(dataDir + "Welcome_PH.pptx", save_format.Pptx)
print "Replaced text, please check the output file."
if __name__ == '__main__':
ReplaceText() |
AsgerPetersen/QGIS | refs/heads/master | python/ext-libs/future/tkinter/colorchooser.py | 118 | from __future__ import absolute_import
from future.utils import PY3
if PY3:
from tkinter.colorchooser import *
else:
try:
from tkColorChooser import *
except ImportError:
raise ImportError('The tkColorChooser module is missing. Does your Py2 '
'installation include tkinter?')
|
pegasus-isi/pegasus | refs/heads/master | packages/pegasus-python/src/Pegasus/service/ensembles/__init__.py | 1 | import os
from flask import Flask
emapp = Flask(__name__)
# Load configuration defaults
emapp.config.from_object("Pegasus.service.defaults")
# Load user configuration
conf = os.path.expanduser("~/.pegasus/service.py")
if os.path.isfile(conf):
emapp.config.from_pyfile(conf)
del conf
from Pegasus.service.ensembles import api, views # noqa: E402,F401 isort:skip
|
mikemoraned/geowhatsit-server | refs/heads/master | node_modules/heroku-redis-client/node_modules/publish/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/generator/ninja_test.py | 260 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Unit tests for the ninja.py file. """
import gyp.generator.ninja as ninja
import unittest
import StringIO
import sys
import TestCommon
class TestPrefixesAndSuffixes(unittest.TestCase):
if sys.platform in ('win32', 'cygwin'):
def test_BinaryNamesWindows(self):
writer = ninja.NinjaWriter('foo', 'wee', '.', '.', 'ninja.build', 'win')
spec = { 'target_name': 'wee' }
self.assertTrue(writer.ComputeOutputFileName(spec, 'executable').
endswith('.exe'))
self.assertTrue(writer.ComputeOutputFileName(spec, 'shared_library').
endswith('.dll'))
self.assertTrue(writer.ComputeOutputFileName(spec, 'static_library').
endswith('.lib'))
if sys.platform == 'linux2':
def test_BinaryNamesLinux(self):
writer = ninja.NinjaWriter('foo', 'wee', '.', '.', 'ninja.build', 'linux')
spec = { 'target_name': 'wee' }
self.assertTrue('.' not in writer.ComputeOutputFileName(spec,
'executable'))
self.assertTrue(writer.ComputeOutputFileName(spec, 'shared_library').
startswith('lib'))
self.assertTrue(writer.ComputeOutputFileName(spec, 'static_library').
startswith('lib'))
self.assertTrue(writer.ComputeOutputFileName(spec, 'shared_library').
endswith('.so'))
self.assertTrue(writer.ComputeOutputFileName(spec, 'static_library').
endswith('.a'))
if __name__ == '__main__':
unittest.main()
|
roshantha9/AbstractManycoreSim | refs/heads/master | src/RunSim_Exp_PSBasedRemapping.py | 1 | import sys, os, csv, pprint, math
import argparse
import numpy as np
import random
import shutil
import time
from util_scripts.psbased_remapping_random_params_generation import generate_random_params
## uncomment when running under CLI only version ##
#import matplotlib
#matplotlib.use('Agg')
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import scipy.stats
from matplotlib.colors import ListedColormap, NoNorm
from matplotlib import mlab
from itertools import cycle # for automatic markers
import matplotlib.cm as cm
from matplotlib.font_manager import FontProperties
from libProcessingElement.LocalScheduler import LocalRRScheduler, \
LocalEDFScheduler, \
LocalMPEG2FrameEDFScheduler, \
LocalMPEG2FrameListScheduler, \
LocalMPEG2FramePriorityScheduler, \
LocalMPEG2FramePriorityScheduler_WithDepCheck
from libResourceManager.RMTypes import RMTypes
from libProcessingElement.CPUTypes import CPUTypes
from libResourceManager.Mapper.MapperTypes import MapperTypes
from libTaskDispatcher.TDTypes import TDTypes
from libResourceManager.AdmissionControllerOptions import AdmissionControllerOptions
from libMappingAndScheduling.SemiDynamic.TaskMappingSchemes import TaskMappingSchemes
from libMappingAndScheduling.SemiDynamic.TaskSemiDynamicPrioritySchemes import TaskSemiDynamicPrioritySchemes
from libMappingAndScheduling.SemiDynamic.TaskMappingAndPriAssCombinedSchemes import TaskMappingAndPriAssCombinedSchemes
from libMappingAndScheduling.Decentralised.TaskRemapDecentSchemes import TaskRemapDecentSchemes
from libApplicationModel.Task import TaskModel
from SimParams import SimParams
import Multicore_MPEG_Model as MMMSim
NUM_WORKFLOWS = range(8, 17, 2)
#NUM_WORKFLOWS = [12]
NUM_NODES = [2,4,8,16,32]
NOC_XY = [(2,1), (2,2), (2,4), (2,8), (2,16)]
# simple ps-remapping test
def runSim_Simple():
seed = 99108
print "SEED === " + str(seed)
# fixed params
SimParams.NOC_W = 4
SimParams.NOC_H = 4
SimParams.NUM_NODES = (SimParams.NOC_W * SimParams.NOC_H)
SimParams.NUM_WORKFLOWS = SimParams.NUM_NODES * 2
SimParams.NUM_INPUTBUFFERS = SimParams.NUM_WORKFLOWS
SimParams.PSALGO_ENABLED = True
SimParams.PSALGO_VIEWER_ENABLED = False
SimParams.DYNAMIC_TASK_REMAPPING_ENABLED = True
SimParams.DYNAMIC_TASK_REMAPPING_SCHEME = TaskRemapDecentSchemes.TASKREMAPPINGDECENTSCHEMES_RANDOM_QUEEN
SimParams.CPUNODE_MONITOR_TASKSET_SLACK = True
SimParams.LOCAL_SCHEDULER_TYPE = LocalMPEG2FramePriorityScheduler_WithDepCheck()
SimParams.SIM_ENTITY_RESOURCEMANAGER_CLASS = RMTypes.OPENLOOP
SimParams.SIM_ENTITY_CPUNODE_CLASS = CPUTypes.OPENLOOP
SimParams.SIM_ENTITY_MAPPER_CLASS = MapperTypes.OPENLOOP
SimParams.SIM_ENTITY_TASKDISPATCHER_CLASS = TDTypes.OPENLOOP
SimParams.TASK_MODEL = TaskModel.TASK_MODEL_MHEG2_FRAME_ET_LEVEL
SimParams.TASKSET_MODEL = TaskModel.TASK_MODEL_MHEG2_FRAME_ET_LEVEL
SimParams.COMBINED_MAPPING_AND_PRIASS = TaskMappingAndPriAssCombinedSchemes.TASKMAPPINGANDPRIASSCOMBINED_DISABLED
SimParams.DYNAMIC_TASK_MAPPING_SCHEME = TaskMappingSchemes.TASKMAPPINGSCHEMES_LOWEST_UTILISATION
random.seed(seed)
np.random.seed(seed)
print "----------------------------------------------------------------------------------------------------------------------------"
print "Running runSim_Simple : num_wf=" + str(SimParams.NUM_WORKFLOWS) + \
", mapping="+str(SimParams.DYNAMIC_TASK_MAPPING_SCHEME) + \
", pri_ass="+str(SimParams.DYNAMIC_TASK_PRIASS_SCHEME)
print "----------------------------------------------------------------------------------------------------------------------------"
env, last_scheduled_task_time = MMMSim.runMainSimulation()
env.run(until=last_scheduled_task_time+SimParams.SIM_RUNTIME)
print env.now
# name the report filenames
tm_fname = "test__timeline.png"
vs_bs_fname = "test__vsbs.js"
util_fname = "test__util.js"
wf_res_fname = "test__wfressumm.js"
gops_opbuff_fname = "test__gopsopbuffsumm.js"
rmtbl_dt_fname = "test__rmtbldt.js"
ibuff_fname = "test__ibuff.js"
obuff_fname = "test__obuff.js"
nodetqs_fname = "test__nodetqs.js"
rmtaskrelease_fname = "test__rmtaskrel.js"
mappingandpriass_fname = "test__mappingandpriass.js"
flowscompleted_fname = "test__flwcompleted.js"
nodetaskexectime_fname = "test__nodetaskexectime.js"
psalgo_nodeprops = "test__psalgonodeprops.js"
flowsadded_fname = "test__flwsadded.js"
trminfo_fname = "test__taskremappinginfo.js"
nodecumslack_fname = "test__nodecumslack.js"
(wf_results_summary, gops_in_outputbuff_summary) = MMMSim.SimMon.report_DecodedWorkflows_Summary(timeline_fname=tm_fname,
wf_res_summary_fname = wf_res_fname,
gops_opbuff_summary_fname = gops_opbuff_fname,
rmtbl_dt_summary_fname = rmtbl_dt_fname,
output_format = "json")
MMMSim.SimMon.report_VideoStream_BasicStats(wf_results_summary, vs_bs_fname)
MMMSim.SimMon.report_InstUtilisation(dump_to_file=util_fname)
MMMSim.SimMon.report_InputBuffer(dump_to_file=ibuff_fname)
MMMSim.SimMon.report_NodeTQs(dump_to_file=nodetqs_fname)
MMMSim.SimMon.report_OutputBufferContents(dump_to_file=obuff_fname)
MMMSim.SimMon.report_RMTaskReleaseInfo(dump_to_file=rmtaskrelease_fname)
MMMSim.SimMon.report_MappingAndPriAssInfo(dump_to_file=mappingandpriass_fname)
MMMSim.SimMon.report_FlowsCompleted(dump_to_file=flowscompleted_fname)
MMMSim.SimMon.report_NodeTaskExecTimeline(dump_to_file=nodetaskexectime_fname)
MMMSim.SimMon.report_PSAlgoNodePSProps(dump_to_file=psalgo_nodeprops)
MMMSim.SimMon.report_FlowsAdded(dump_to_file=flowsadded_fname)
MMMSim.SimMon.report_TaskRemappingInfo(dump_to_file=trminfo_fname)
MMMSim.SimMon.report_NodeCumSlack(dump_to_file=nodecumslack_fname)
# remapping on/off experiment
def runSim_RemappingBasic(run_rm_off=True, run_rm_on=True, seed=1234, multiseeds=False, noc_w=10, noc_h=10):
print "---SEED === " + str(seed)
###############
# fixed params
###############
SimParams.NOC_W = noc_w
SimParams.NOC_H = noc_h
SimParams.NUM_NODES = (SimParams.NOC_W * SimParams.NOC_H)
SimParams.NUM_WORKFLOWS = SimParams.NUM_NODES + 3
#SimParams.NUM_WORKFLOWS = 3
SimParams.NUM_INPUTBUFFERS = SimParams.NUM_WORKFLOWS
SimParams.LOCAL_SCHEDULER_TYPE = LocalMPEG2FramePriorityScheduler_WithDepCheck()
SimParams.SIM_ENTITY_RESOURCEMANAGER_CLASS = RMTypes.OPENLOOP
SimParams.SIM_ENTITY_CPUNODE_CLASS = CPUTypes.OPENLOOP
SimParams.SIM_ENTITY_MAPPER_CLASS = MapperTypes.OPENLOOP
SimParams.SIM_ENTITY_TASKDISPATCHER_CLASS = TDTypes.OPENLOOP
SimParams.TASK_MODEL = TaskModel.TASK_MODEL_MHEG2_FRAME_ET_LEVEL
SimParams.TASKSET_MODEL = TaskModel.TASK_MODEL_MHEG2_FRAME_ET_LEVEL
# SimParams.LOCAL_SCHEDULER_TYPE = LocalMPEG2FramePriorityScheduler()
# SimParams.SIM_ENTITY_RESOURCEMANAGER_CLASS = RMTypes.CLOSEDLOOP_WITH_IBUFFERING
# SimParams.SIM_ENTITY_CPUNODE_CLASS = CPUTypes.CLOSEDLOOP_WITH_IBUFFERING
# SimParams.SIM_ENTITY_MAPPER_CLASS = MapperTypes.CLOSEDLOOP_WITH_IBUFFERING
# SimParams.SIM_ENTITY_TASKDISPATCHER_CLASS = TDTypes.CLOSEDLOOP_WITH_IBUFFERING
SimParams.COMBINED_MAPPING_AND_PRIASS = TaskMappingAndPriAssCombinedSchemes.TASKMAPPINGANDPRIASSCOMBINED_DISABLED
SimParams.DYNAMIC_TASK_MAPPING_SCHEME = TaskMappingSchemes.TASKMAPPINGSCHEMES_LOWEST_UTILISATION
SimParams.DYNAMIC_TASK_PRIASS_SCHEME = TaskSemiDynamicPrioritySchemes.TASKSEMIDYNAMICPRIORITYSCHEMES_LOWRES_FIRST
noc_size_dir = "noc_" + str(noc_w) + "x" + str(noc_h) + "/"
if(multiseeds==True):
DATA_FOLDER = "experiment_data/remapping_psbased/"+"seed_"+str(seed)+"/"
else:
#DATA_FOLDER = "experiment_data/remapping_psbased/"
DATA_FOLDER = "experiment_data/remapping_nocsize/remapping_psbased/" + noc_size_dir
# name the report filenames
tm_fname = "test__timeline.png"
vs_bs_fname = "test__vsbs.js"
util_fname = "test__util.js"
wf_res_fname = "test__wfressumm.js"
gops_opbuff_fname = "test__gopsopbuffsumm.js"
rmtbl_dt_fname = "test__rmtbldt.js"
ibuff_fname = "test__ibuff.js"
obuff_fname = "test__obuff.js"
nodetqs_fname = "test__nodetqs.js"
rmtaskrelease_fname = "test__rmtaskrel.js"
mappingandpriass_fname = "test__mappingandpriass.js"
flowscompleted_fname = "test__flwcompleted.js"
nodetaskexectime_fname = "test__nodetaskexectime.js"
psalgo_nodeprops = "test__psalgonodeprops.js"
flowsadded_fname = "test__flwsadded.js"
trminfo_fname = "test__taskremappinginfo.js"
nodecumslack_fname = "test__nodecumslack.js"
node_importedtasks = "test__nodeimportedtasks.js"
rmdebuginfo_1 = "test__remappingdebuginfo_1.js"
if(run_rm_off == True):
################################
# PS-based Remapping disabled
################################
SimParams.PSALGO_ENABLED = False
SimParams.DYNAMIC_TASK_REMAPPING_ENABLED = False
SimParams.DYNAMIC_TASK_REMAPPING_SCHEME = TaskRemapDecentSchemes.TASKREMAPPINGDECENTSCHEMES_NONE
SimParams.CPUNODE_MONITOR_TASKSET_SLACK = False
SimParams.PSALGO_VIEWER_ENABLED = False
SimParams.MMC_ENABLE_DATATRANSMISSION_MODELLING = False
FNAME_PREFIX = DATA_FOLDER+"RMOFF_"
random.seed(seed)
np.random.seed(seed)
print "----------------------------------------------------------------------------------------------------------------------------"
print "Running runSim_RemappingBasic : num_wf=" + str(SimParams.NUM_WORKFLOWS) + \
", Remapping_disabled"
print "----------------------------------------------------------------------------------------------------------------------------"
env, last_scheduled_task_time = MMMSim.runMainSimulation()
env.run(until=last_scheduled_task_time+SimParams.SIM_RUNTIME)
print env.now
(wf_results_summary, gops_in_outputbuff_summary) = MMMSim.SimMon.report_DecodedWorkflows_Summary(timeline_fname =FNAME_PREFIX+tm_fname,
wf_res_summary_fname = FNAME_PREFIX+wf_res_fname,
gops_opbuff_summary_fname = FNAME_PREFIX+gops_opbuff_fname,
rmtbl_dt_summary_fname = FNAME_PREFIX+rmtbl_dt_fname,
output_format = "json")
#MMMSim.SimMon.report_VideoStream_BasicStats(wf_results_summary, FNAME_PREFIX+vs_bs_fname)
#MMMSim.SimMon.report_InstUtilisation(dump_to_file=FNAME_PREFIX+util_fname)
#MMMSim.SimMon.report_InputBuffer(dump_to_file=FNAME_PREFIX+ibuff_fname)
#MMMSim.SimMon.report_NodeTQs(dump_to_file=FNAME_PREFIX+nodetqs_fname)
MMMSim.SimMon.report_OutputBufferContents(dump_to_file=FNAME_PREFIX+obuff_fname)
#MMMSim.SimMon.report_RMTaskReleaseInfo(dump_to_file=FNAME_PREFIX+rmtaskrelease_fname)
#MMMSim.SimMon.report_MappingAndPriAssInfo(dump_to_file=FNAME_PREFIX+mappingandpriass_fname)
MMMSim.SimMon.report_FlowsCompleted(dump_to_file=FNAME_PREFIX+flowscompleted_fname)
#MMMSim.SimMon.report_NodeTaskExecTimeline(dump_to_file=FNAME_PREFIX+nodetaskexectime_fname)
MMMSim.SimMon.report_PSAlgoNodePSProps(dump_to_file=FNAME_PREFIX+psalgo_nodeprops)
#MMMSim.SimMon.report_FlowsAdded(dump_to_file=FNAME_PREFIX+flowsadded_fname)
MMMSim.SimMon.report_TaskRemappingInfo(dump_to_file=FNAME_PREFIX+trminfo_fname)
MMMSim.SimMon.report_NodeCumSlack(dump_to_file=FNAME_PREFIX+nodecumslack_fname)
MMMSim.SimMon.report_NodeImportedTasks(dump_to_file=FNAME_PREFIX+node_importedtasks)
if(run_rm_on == True):
################################
# PS-based Remapping enabled
################################
SimParams.PSALGO_ENABLED = True
SimParams.DYNAMIC_TASK_REMAPPING_ENABLED = True
SimParams.DYNAMIC_TASK_REMAPPING_SCHEME = TaskRemapDecentSchemes.TASKREMAPPINGDECENTSCHEMES_LOWESTBLOCKING_QUEEN_VIA_SYSSLACK
SimParams.CPUNODE_MONITOR_TASKSET_SLACK = True
SimParams.PSALGO_VIEWER_ENABLED = True
SimParams.MMC_ENABLE_DATATRANSMISSION_MODELLING = False
SimParams.PSALGO_PSFLOWPAYLOADSIZE =16
# --------------------------------------------------------------------------------
## FOR MMC - enabled ##
# params taken from mc-test : perm_0.22_0.0629_20.0_0.3_0.3_4.6_
# perm_0.12_0.18_0.2_0.15_
#SimParams.PSALGO_TQN = 0.22
#SimParams.PSALGO_TDECAY = 0.0629
#SimParams.PSALGO_THRESHOLDQN = 20.0
#SimParams.PSALGO_THRESHOLDHOPCOUNT = 2
#SimParams.DYNAMIC_TASK_REMAPPING_SAMPLING_INTERVAL = 4.6
#SimParams.PSALGO_HQN = 14
#SimParams.PSALGO_DYNAMIC_THRESHOLDQN_RATIO = [0.12, 0.18]
#SimParams.PSALGO_KHOPDECAY = 0.2
#SimParams.PSALGO_KTIMEDECAY = 0.15
# --------------------------------------------------------------------------------
# --------------------------------------------------------------------------------
## FOR MMC - disabled ##
## "param": "perm_0.1_0.025_3_6.9_5_0.203_0.3_0.15_0.05_"
#SimParams.PSALGO_TQN = 0.1
#SimParams.PSALGO_TDECAY = 0.025
#SimParams.PSALGO_THRESHOLDQN = 9.0
#SimParams.PSALGO_THRESHOLDHOPCOUNT = 3
#SimParams.DYNAMIC_TASK_REMAPPING_SAMPLING_INTERVAL = 6.9
#SimParams.PSALGO_HQN = 5
#SimParams.PSALGO_DYNAMIC_THRESHOLDQN_RATIO = [0.203, 0.3]
#SimParams.PSALGO_KHOPDECAY = 0.15
#SimParams.PSALGO_KTIMEDECAY = 0.05
# --------------------------------------------------------------------------------
# --------------------------------------------------------------------------------
## FOR MMC - disabled ##
## perm_0.22_0.055_3_6.9_18_0.107_0.01_0.15_0.3_
#SimParams.PSALGO_TQN = 0.22
#SimParams.PSALGO_TDECAY = 0.055
SimParams.PSALGO_TQN = 0.05
SimParams.PSALGO_TDECAY = 0.025
SimParams.PSALGO_THRESHOLDQN = 20.0
SimParams.PSALGO_THRESHOLDHOPCOUNT = 3
SimParams.DYNAMIC_TASK_REMAPPING_SAMPLING_INTERVAL = 6.9
SimParams.PSALGO_HQN = 18
SimParams.PSALGO_DYNAMIC_THRESHOLDQN_RATIO = [0.107, 0.01]
SimParams.PSALGO_KHOPDECAY = 0.15
SimParams.PSALGO_KTIMEDECAY = 0.3
# --------------------------------------------------------------------------------
FNAME_PREFIX = DATA_FOLDER+"RMON_"
random.seed(seed)
np.random.seed(seed)
print "----------------------------------------------------------------------------------------------------------------------------"
print "Running runSim_RemappingBasic : num_wf=" + str(SimParams.NUM_WORKFLOWS) + \
", Remapping_enabled"
print "----------------------------------------------------------------------------------------------------------------------------"
env, last_scheduled_task_time = MMMSim.runMainSimulation()
env.run(until=last_scheduled_task_time+SimParams.SIM_RUNTIME)
print env.now
(wf_results_summary, gops_in_outputbuff_summary) = MMMSim.SimMon.report_DecodedWorkflows_Summary(timeline_fname =FNAME_PREFIX+tm_fname,
wf_res_summary_fname = FNAME_PREFIX+wf_res_fname,
gops_opbuff_summary_fname = FNAME_PREFIX+gops_opbuff_fname,
rmtbl_dt_summary_fname = FNAME_PREFIX+rmtbl_dt_fname,
output_format = "json")
#MMMSim.SimMon.report_VideoStream_BasicStats(wf_results_summary, FNAME_PREFIX+vs_bs_fname)
#MMMSim.SimMon.report_InstUtilisation(dump_to_file=FNAME_PREFIX+util_fname)
#MMMSim.SimMon.report_InputBuffer(dump_to_file=FNAME_PREFIX+ibuff_fname)
#MMMSim.SimMon.report_NodeTQs(dump_to_file=FNAME_PREFIX+nodetqs_fname)
MMMSim.SimMon.report_OutputBufferContents(dump_to_file=FNAME_PREFIX+obuff_fname)
#MMMSim.SimMon.report_RMTaskReleaseInfo(dump_to_file=FNAME_PREFIX+rmtaskrelease_fname)
#MMMSim.SimMon.report_MappingAndPriAssInfo(dump_to_file=FNAME_PREFIX+mappingandpriass_fname)
MMMSim.SimMon.report_FlowsCompleted(dump_to_file=FNAME_PREFIX+flowscompleted_fname)
MMMSim.SimMon.report_NodeTaskExecTimeline(dump_to_file=FNAME_PREFIX+nodetaskexectime_fname)
MMMSim.SimMon.report_PSAlgoNodePSProps(dump_to_file=FNAME_PREFIX+psalgo_nodeprops)
#MMMSim.SimMon.report_FlowsAdded(dump_to_file=FNAME_PREFIX+flowsadded_fname)
MMMSim.SimMon.report_TaskRemappingInfo(dump_to_file=FNAME_PREFIX+trminfo_fname)
MMMSim.SimMon.report_NodeCumSlack(dump_to_file=FNAME_PREFIX+nodecumslack_fname)
MMMSim.SimMon.report_NodeImportedTasks(dump_to_file=FNAME_PREFIX+node_importedtasks)
MMMSim.SimMon.report_PSRemappDebugInfo_1(dump_to_file=FNAME_PREFIX+rmdebuginfo_1)
# remapping on full fact params test
def runSim_RemappingFullFactParams(run_rm_off=True, run_rm_on=True, seed=77379, multiseeds=False,
each_tqn = None,
param_thresholdqn = None,
param_thrhc = None,
param_remapperiod = None,
param_dynthrqnratinc = None,
param_dynthrqnratdec = None
):
print "SEED === " + str(seed)
###############
# fixed params
###############
SimParams.NOC_W = 4
SimParams.NOC_H = 4
SimParams.NUM_NODES = (SimParams.NOC_W * SimParams.NOC_H)
SimParams.NUM_WORKFLOWS = SimParams.NUM_NODES + 1
SimParams.NUM_INPUTBUFFERS = SimParams.NUM_WORKFLOWS
if(multiseeds==True):
DATA_FOLDER = "experiment_data/remapping_psbased_fullfactorial/"+"seed_"+str(seed)+"/"
else:
DATA_FOLDER = "experiment_data/remapping_psbased_fullfactorial/"
# name the report filenames
tm_fname = "test__timeline.png"
vs_bs_fname = "test__vsbs.js"
util_fname = "test__util.js"
wf_res_fname = "test__wfressumm.js"
gops_opbuff_fname = "test__gopsopbuffsumm.js"
rmtbl_dt_fname = "test__rmtbldt.js"
ibuff_fname = "test__ibuff.js"
obuff_fname = "test__obuff.js"
nodetqs_fname = "test__nodetqs.js"
rmtaskrelease_fname = "test__rmtaskrel.js"
mappingandpriass_fname = "test__mappingandpriass.js"
flowscompleted_fname = "test__flwcompleted.js"
nodetaskexectime_fname = "test__nodetaskexectime.js"
psalgo_nodeprops = "test__psalgonodeprops.js"
flowsadded_fname = "test__flwsadded.js"
trminfo_fname = "test__taskremappinginfo.js"
nodecumslack_fname = "test__nodecumslack.js"
node_importedtasks = "test__nodeimportedtasks.js"
if(run_rm_off == True):
################################
# PS-based Remapping disabled
################################
SimParams.PSALGO_ENABLED = False
SimParams.DYNAMIC_TASK_REMAPPING_ENABLED = False
SimParams.DYNAMIC_TASK_REMAPPING_SCHEME = TaskRemapDecentSchemes.TASKREMAPPINGDECENTSCHEMES_LOWESTBLOCKING_QUEEN_VIA_SYSSLACK
SimParams.CPUNODE_MONITOR_TASKSET_SLACK = False
SimParams.PSALGO_VIEWER_ENABLED = False
FNAME_PREFIX = DATA_FOLDER+"RMOFF_"
random.seed(seed)
np.random.seed(seed)
print "----------------------------------------------------------------------------------------------------------------------------"
print "Running runSim_RemappingBasic : num_wf=" + str(SimParams.NUM_WORKFLOWS) + \
", Remapping_disabled"
print "----------------------------------------------------------------------------------------------------------------------------"
env, last_scheduled_task_time = MMMSim.runMainSimulation()
env.run(until=last_scheduled_task_time+SimParams.SIM_RUNTIME)
print env.now
(wf_results_summary, gops_in_outputbuff_summary) = MMMSim.SimMon.report_DecodedWorkflows_Summary(timeline_fname =FNAME_PREFIX+tm_fname,
wf_res_summary_fname = FNAME_PREFIX+wf_res_fname,
gops_opbuff_summary_fname = FNAME_PREFIX+gops_opbuff_fname,
rmtbl_dt_summary_fname = FNAME_PREFIX+rmtbl_dt_fname,
output_format = "json")
MMMSim.SimMon.report_OutputBufferContents(dump_to_file=FNAME_PREFIX+obuff_fname)
MMMSim.SimMon.report_TaskRemappingInfo(dump_to_file=FNAME_PREFIX+trminfo_fname)
MMMSim.SimMon.report_PSAlgoNodePSProps(dump_to_file=FNAME_PREFIX+psalgo_nodeprops)
if(run_rm_on == True):
################################
# PS-based Remapping enabled
################################
SimParams.PSALGO_ENABLED = True
SimParams.DYNAMIC_TASK_REMAPPING_ENABLED = True
SimParams.DYNAMIC_TASK_REMAPPING_SCHEME = TaskRemapDecentSchemes.TASKREMAPPINGDECENTSCHEMES_LOWESTBLOCKING_QUEEN_VIA_SYSSLACK
SimParams.CPUNODE_MONITOR_TASKSET_SLACK = True
SimParams.PSALGO_VIEWER_ENABLED = False
FNAME_PREFIX = DATA_FOLDER+"RMON_"
TQN = [0.072, 0.24, 0.48, 0.96]
THRESHOLDQN = [2.0, 9.0, 15.0]
REMAP_PERIOD = [float(SimParams.FR25_GOPLEN12_E2EDEADLINE * 1.98),
float(SimParams.FR25_GOPLEN12_E2EDEADLINE * 2.98),
float(SimParams.FR25_GOPLEN12_E2EDEADLINE * 4.98)]
THRESHOLDHOPCOUNT = [2,5,8]
DYN_THRQN_RATIO_INC = [0.1, 0.5, 0.7]
DYN_THRQN_RATIO_DEC = [0.1, 0.5, 0.7]
# loop through all permutations of the params
for each_tqn in TQN:
for each_thresholdqn in THRESHOLDQN:
for each_remapperiod in REMAP_PERIOD:
for each_thrhc in THRESHOLDHOPCOUNT:
for each_dynthrqnratinc in DYN_THRQN_RATIO_INC:
for each_dynthrqnratdec in DYN_THRQN_RATIO_DEC:
# set params
SimParams.PSALGO_TQN = each_tqn
SimParams.PSALGO_TDECAY = float(each_tqn/2.5)
SimParams.PSALGO_THRESHOLDQN = each_thresholdqn
SimParams.PSALGO_THRESHOLDHOPCOUNT = each_thrhc
SimParams.DYNAMIC_TASK_REMAPPING_SAMPLING_INTERVAL = each_remapperiod
SimParams.PSALGO_DYNAMIC_THRESHOLDQN_RATIO = [each_dynthrqnratinc, each_dynthrqnratdec]
fname_param_prefix = FNAME_PREFIX + str(each_tqn) + \
"_" + str(each_thresholdqn) + \
"_" + str(each_thrhc) + \
"_" + str(each_remapperiod) + \
"_" + str(each_dynthrqnratinc) + \
"_" + str(each_dynthrqnratdec) + "_"
# check if the simulation has run already before continuing
check_fname = fname_param_prefix+tm_fname
print "Checking file exists : " + str(check_fname)
if(_check_file_exists(check_fname) == True):
print "Simulation already exists ! "
else:
random.seed(seed)
np.random.seed(seed)
print "----------------------------------------------------------------------------------------------------------------------------"
print "Running runSim_Simple : num_wf=" + str(SimParams.NUM_WORKFLOWS) + \
", Remapping_enabled"
print fname_param_prefix
print "----------------------------------------------------------------------------------------------------------------------------"
env, last_scheduled_task_time = MMMSim.runMainSimulation()
env.run(until=last_scheduled_task_time+SimParams.SIM_RUNTIME)
print env.now
(wf_results_summary, gops_in_outputbuff_summary) = MMMSim.SimMon.report_DecodedWorkflows_Summary(timeline_fname =fname_param_prefix+tm_fname,
wf_res_summary_fname = fname_param_prefix+wf_res_fname,
gops_opbuff_summary_fname = fname_param_prefix+gops_opbuff_fname,
rmtbl_dt_summary_fname = fname_param_prefix+rmtbl_dt_fname,
output_format = "json")
MMMSim.SimMon.report_OutputBufferContents(dump_to_file=fname_param_prefix+obuff_fname)
MMMSim.SimMon.report_TaskRemappingInfo(dump_to_file=fname_param_prefix+trminfo_fname)
MMMSim.SimMon.report_PSAlgoNodePSProps(dump_to_file=fname_param_prefix+psalgo_nodeprops)
# remapping on full fact params test
def runSim_RemappingMonteCarloParamsTest(run_rm_off=True, run_rm_on=True, seed=77379, multiseeds=False):
print "SEED === " + str(seed)
###############
# fixed params
###############
SimParams.NOC_W = 4
SimParams.NOC_H = 4
SimParams.NUM_NODES = (SimParams.NOC_W * SimParams.NOC_H)
SimParams.NUM_WORKFLOWS = SimParams.NUM_NODES + 1
SimParams.NUM_INPUTBUFFERS = SimParams.NUM_WORKFLOWS
if(multiseeds==True):
DATA_FOLDER = "experiment_data/remapping_psbased_montecarlo/"+"seed_"+str(seed)+"/"
else:
DATA_FOLDER = "experiment_data/remapping_psbased_montecarlo/"
# name the report filenames
tm_fname = "test__timeline.png"
vs_bs_fname = "test__vsbs.js"
util_fname = "test__util.js"
wf_res_fname = "test__wfressumm.js"
gops_opbuff_fname = "test__gopsopbuffsumm.js"
rmtbl_dt_fname = "test__rmtbldt.js"
ibuff_fname = "test__ibuff.js"
obuff_fname = "test__obuff.js"
nodetqs_fname = "test__nodetqs.js"
rmtaskrelease_fname = "test__rmtaskrel.js"
mappingandpriass_fname = "test__mappingandpriass.js"
flowscompleted_fname = "test__flwcompleted.js"
nodetaskexectime_fname = "test__nodetaskexectime.js"
psalgo_nodeprops = "test__psalgonodeprops.js"
flowsadded_fname = "test__flwsadded.js"
trminfo_fname = "test__taskremappinginfo.js"
nodecumslack_fname = "test__nodecumslack.js"
node_importedtasks = "test__nodeimportedtasks.js"
################################
# PS-based Remapping disabled
################################
SimParams.PSALGO_ENABLED = False
SimParams.DYNAMIC_TASK_REMAPPING_ENABLED = False
SimParams.DYNAMIC_TASK_REMAPPING_SCHEME = TaskRemapDecentSchemes.TASKREMAPPINGDECENTSCHEMES_RANDOM_QUEEN
SimParams.CPUNODE_MONITOR_TASKSET_SLACK = False
SimParams.PSALGO_VIEWER_ENABLED = False
FNAME_PREFIX = DATA_FOLDER+"RMOFF_"
random.seed(seed)
np.random.seed(seed)
print "----------------------------------------------------------------------------------------------------------------------------"
print "Running runSim_RemappingBasic : num_wf=" + str(SimParams.NUM_WORKFLOWS) + \
", Remapping_disabled"
print "----------------------------------------------------------------------------------------------------------------------------"
env, last_scheduled_task_time = MMMSim.runMainSimulation()
env.run(until=last_scheduled_task_time+SimParams.SIM_RUNTIME)
print env.now
(wf_results_summary, gops_in_outputbuff_summary) = MMMSim.SimMon.report_DecodedWorkflows_Summary(timeline_fname =FNAME_PREFIX+tm_fname,
wf_res_summary_fname = FNAME_PREFIX+wf_res_fname,
gops_opbuff_summary_fname = FNAME_PREFIX+gops_opbuff_fname,
rmtbl_dt_summary_fname = FNAME_PREFIX+rmtbl_dt_fname,
output_format = "json")
#MMMSim.SimMon.report_VideoStream_BasicStats(wf_results_summary, FNAME_PREFIX+vs_bs_fname)
#MMMSim.SimMon.report_InstUtilisation(dump_to_file=FNAME_PREFIX+util_fname)
#MMMSim.SimMon.report_InputBuffer(dump_to_file=FNAME_PREFIX+ibuff_fname)
#MMMSim.SimMon.report_NodeTQs(dump_to_file=FNAME_PREFIX+nodetqs_fname)
MMMSim.SimMon.report_OutputBufferContents(dump_to_file=FNAME_PREFIX+obuff_fname)
#MMMSim.SimMon.report_RMTaskReleaseInfo(dump_to_file=FNAME_PREFIX+rmtaskrelease_fname)
#MMMSim.SimMon.report_MappingAndPriAssInfo(dump_to_file=FNAME_PREFIX+mappingandpriass_fname)
#MMMSim.SimMon.report_FlowsCompleted(dump_to_file=FNAME_PREFIX+flowscompleted_fname)
#MMMSim.SimMon.report_NodeTaskExecTimeline(dump_to_file=FNAME_PREFIX+nodetaskexectime_fname)
#MMMSim.SimMon.report_PSAlgoNodePSProps(dump_to_file=FNAME_PREFIX+psalgo_nodeprops)
#MMMSim.SimMon.report_FlowsAdded(dump_to_file=FNAME_PREFIX+flowsadded_fname)
MMMSim.SimMon.report_TaskRemappingInfo(dump_to_file=FNAME_PREFIX+trminfo_fname)
#MMMSim.SimMon.report_NodeCumSlack(dump_to_file=FNAME_PREFIX+nodecumslack_fname)
#MMMSim.SimMon.report_NodeImportedTasks(dump_to_file=FNAME_PREFIX+node_importedtasks)
################################
# PS-based Remapping enabled
################################
SimParams.PSALGO_ENABLED = True
SimParams.DYNAMIC_TASK_REMAPPING_ENABLED = True
SimParams.DYNAMIC_TASK_REMAPPING_SCHEME = TaskRemapDecentSchemes.TASKREMAPPINGDECENTSCHEMES_LOWESTBLOCKING_QUEEN
SimParams.CPUNODE_MONITOR_TASKSET_SLACK = True
SimParams.PSALGO_VIEWER_ENABLED = False
FNAME_PREFIX = DATA_FOLDER+"RMON_"
# get randomly generated params
pregenerated_random_params = generate_random_params()
for each_k, each_param_entry in pregenerated_random_params.iteritems():
# set params
SimParams.PSALGO_TQN = each_param_entry["rand_TQN"]
SimParams.PSALGO_TDECAY = each_param_entry["rand_TDECAY"]
SimParams.PSALGO_THRESHOLDQN = each_param_entry["rand_THRESHOLDQN"]
SimParams.PSALGO_KHOPDECAY = each_param_entry["rand_KHOPDECAY"]
SimParams.PSALGO_KTIMEDECAY = each_param_entry["rand_KTIMEDECAY"]
SimParams.PSALGO_HQN = each_param_entry["rand_HQN"]
SimParams.PSALGO_INITIALHORMONEAMNT_WORKER = each_param_entry["rand_INITHD"]
SimParams.PSALGO_INITIALHORMONEAMNT_QUEEN = each_param_entry["rand_INITHD"]
SimParams.DYNAMIC_TASK_REMAPPING_SAMPLING_INTERVAL = each_param_entry["rand_REMAP_PERIOD"]
SimParams.DYNAMIC_TASK_REMAPPING_TASK_LATE_ESTIM_LATENESS_RATIO = each_param_entry["rand_LATENESS_RATIO"]
fname_param_prefix = FNAME_PREFIX + str(each_param_entry["rand_TQN"]) + \
"_" + str(each_param_entry["rand_TDECAY"]) + \
"_" + str(each_param_entry["rand_THRESHOLDQN"]) + \
"_" + str(each_param_entry["rand_KHOPDECAY"]) + \
"_" + str(each_param_entry["rand_KTIMEDECAY"]) + \
"_" + str(each_param_entry["rand_HQN"]) + \
"_" + str(each_param_entry["rand_INITHD"]) + \
"_" + str(each_param_entry["rand_REMAP_PERIOD"]) + \
"_" + str(each_param_entry["rand_LATENESS_RATIO"]) + "_"
# check if the simulation has run already before continuing
check_fname = fname_param_prefix+tm_fname
print "Checking file exists : " + str(check_fname)
if(_check_file_exists(check_fname) == True):
print "Simulation already exists ! "
else:
random.seed(seed)
np.random.seed(seed)
print "----------------------------------------------------------------------------------------------------------------------------"
print "Running runSim_Simple : num_wf=" + str(SimParams.NUM_WORKFLOWS) + \
", Remapping_enabled"
print fname_param_prefix
print "----------------------------------------------------------------------------------------------------------------------------"
env, last_scheduled_task_time = MMMSim.runMainSimulation()
env.run(until=last_scheduled_task_time+SimParams.SIM_RUNTIME)
print env.now
(wf_results_summary, gops_in_outputbuff_summary) = MMMSim.SimMon.report_DecodedWorkflows_Summary(timeline_fname =fname_param_prefix+tm_fname,
wf_res_summary_fname = fname_param_prefix+wf_res_fname,
gops_opbuff_summary_fname = fname_param_prefix+gops_opbuff_fname,
rmtbl_dt_summary_fname = fname_param_prefix+rmtbl_dt_fname,
output_format = "json")
#MMMSim.SimMon.report_VideoStream_BasicStats(wf_results_summary, FNAME_PREFIX+vs_bs_fname)
#MMMSim.SimMon.report_InstUtilisation(dump_to_file=FNAME_PREFIX+util_fname)
#MMMSim.SimMon.report_InputBuffer(dump_to_file=FNAME_PREFIX+ibuff_fname)
#MMMSim.SimMon.report_NodeTQs(dump_to_file=FNAME_PREFIX+nodetqs_fname)
MMMSim.SimMon.report_OutputBufferContents(dump_to_file=fname_param_prefix+obuff_fname)
#MMMSim.SimMon.report_RMTaskReleaseInfo(dump_to_file=FNAME_PREFIX+rmtaskrelease_fname)
#MMMSim.SimMon.report_MappingAndPriAssInfo(dump_to_file=FNAME_PREFIX+mappingandpriass_fname)
#MMMSim.SimMon.report_FlowsCompleted(dump_to_file=FNAME_PREFIX+flowscompleted_fname)
#MMMSim.SimMon.report_NodeTaskExecTimeline(dump_to_file=FNAME_PREFIX+nodetaskexectime_fname)
#MMMSim.SimMon.report_PSAlgoNodePSProps(dump_to_file=fname_param_prefix+psalgo_nodeprops)
#MMMSim.SimMon.report_FlowsAdded(dump_to_file=FNAME_PREFIX+flowsadded_fname)
MMMSim.SimMon.report_TaskRemappingInfo(dump_to_file=fname_param_prefix+trminfo_fname)
#MMMSim.SimMon.report_NodeCumSlack(dump_to_file=fname_param_prefix+nodecumslack_fname)
#MMMSim.SimMon.report_NodeImportedTasks(dump_to_file=fname_param_prefix+node_importedtasks)
def _makeDir(directory):
try:
os.stat(directory)
except:
os.makedirs(directory)
def _check_file_exists(fname):
return os.path.exists(fname)
############################################################################
############################################################################
## MAIN SCRIPT SECTION
############################################################################
############################################################################
sys.setrecursionlimit(1500)
# collect command line params
parser = argparse.ArgumentParser(__file__, description="Run specified experiment on abstract simulator")
parser.add_argument("--exp_type", "-t", help="Experiment Type", default=None) # which experiment ?
# params for mapping + pri ass exp
parser.add_argument("--wf_num", "-w", help="Number of workflows", type=int, default=-1)
parser.add_argument("--forced_seed", help="experiment - seed", type=int, default=-1)
# psalgo related params
parser.add_argument("--tqn", help="param_rand_TQN", type=float, default=1.0)
parser.add_argument("--tdecay", help="param_rand_TDECAY", type=float, default=1.0)
parser.add_argument("--thresholdqn", help="param_rand_THRESHOLDQN", type=float, default=-1.0)
parser.add_argument("--thresholdqn_r_d", help="param_rand_THRESHOLDQN_RAT_DECREASE", type=float, default=-1.0)
parser.add_argument("--thresholdqn_r_i", help="param_rand_THRESHOLDQN_RAT_INCREASE", type=float, default=-1.0)
parser.add_argument("--khopdec", help="param_rand_KHOPDECAY", type=float, default=1.0)
parser.add_argument("--ktimedec", help="param_rand_KTIMEDECAY", type=float, default=1.0)
parser.add_argument("--hqn", help="param_rand_HQN", type=int, default=-1)
parser.add_argument("--inithd", help="param_rand_INITHD", type=int, default=-1)
parser.add_argument("--remap_p", help="param_rand_REMAP_PERIOD", type=float, default=1.0)
parser.add_argument("--late_rat", help="param_rand_LATENESS_RATIO", type=float, default=1.0)
# noc size
parser.add_argument("--noc_w", help="noc_w", type=int, default=-1)
parser.add_argument("--noc_h", help="noc_h", type=int, default=-1)
args = parser.parse_args()
####################################
## check which experiment to run ##
####################################
if(args.exp_type == "Exp_Simple"):
runSim_Simple()
# basic rm on/off
elif(args.exp_type == "Exp_RemappingBasic"):
if(args.forced_seed != -1):
runSim_RemappingBasic(run_rm_off=True, run_rm_on=True, seed=args.forced_seed, multiseeds=True)
else:
runSim_RemappingBasic(run_rm_off=False, run_rm_on=True, noc_w=args.noc_w, noc_h=args.noc_h)
# full factorial param test on rm_on
elif(args.exp_type == "Exp_RemappingFullFact"):
if(args.forced_seed != -1):
runSim_RemappingFullFactParams(run_rm_off=True, run_rm_on=True, seed=args.forced_seed, multiseeds=True)
else:
runSim_RemappingFullFactParams(run_rm_off=True, run_rm_on=True)
# monte-carlo param test on rm_on
elif(args.exp_type == "Exp_RemappingMonteCarlo"):
if(args.forced_seed != -1):
runSim_RemappingMonteCarloParamsTest(run_rm_off=True, run_rm_on=True, seed=args.forced_seed, multiseeds=True)
else:
runSim_RemappingMonteCarloParamsTest(run_rm_off=True, run_rm_on=True)
else:
#parser.print_usage()
#sys.exit("invalid arguments")
runSim_RemappingBasic(run_rm_off=True, run_rm_on=True)
|
pombreda/pydbgr | refs/heads/master | test/unit/test-bytecode.py | 2 | #!/usr/bin/env python
'Unit test for trepan.bytecode'
import inspect, unittest
from import_relative import import_relative
Mcode = import_relative('lib.bytecode', '...trepan')
class TestByteCode(unittest.TestCase):
def test_contains_make_function(self):
def sqr(x):
return x * x
frame = inspect.currentframe()
co = frame.f_code
lineno = frame.f_lineno
self.assertTrue(Mcode.stmt_contains_opcode(co, lineno-4,
'MAKE_FUNCTION'))
self.assertFalse(Mcode.stmt_contains_opcode(co, lineno,
'MAKE_FUNCTION'))
return
def test_op_at_frame(self):
frame = inspect.currentframe()
self.assertEqual('CALL_FUNCTION', Mcode.op_at_frame(frame))
return
def test_is_def_frame(self):
# Not a "def" statement because frame is wrong spot
frame = inspect.currentframe()
self.assertFalse(Mcode.is_def_stmt('foo(): pass', frame))
return
if __name__ == '__main__':
unittest.main()
|
broferek/ansible | refs/heads/devel | test/units/module_utils/facts/system/distribution/test_parse_distribution_file_ClearLinux.py | 44 | # -*- coding: utf-8 -*-
# Copyright (c) 2019 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from units.compat.mock import Mock
from ansible.module_utils.facts.system.distribution import DistributionFiles
def mock_module():
mock_module = Mock()
mock_module.params = {'gather_subset': ['all'],
'gather_timeout': 5,
'filter': '*'}
mock_module.get_bin_path = Mock(return_value=None)
return mock_module
def test_parse_distribution_file_clear_linux():
test_input = {
'name': 'Clearlinux',
'data': 'NAME="Clear Linux OS"\nVERSION=1\nID=clear-linux-os\nID_LIKE=clear-linux-os\nVERSION_ID=28120\nPRETTY_NAME="Clear Linux OS"\nANSI_COLOR="1;35"'
'\nHOME_URL="https://clearlinux.org"\nSUPPORT_URL="https://clearlinux.org"\nBUG_REPORT_URL="mailto:dev@lists.clearlinux.org"',
'path': '/usr/lib/os-release',
'collected_facts': None,
}
result = (
True,
{
'distribution': 'Clear Linux OS',
'distribution_major_version': '28120',
'distribution_release': 'clear-linux-os',
'distribution_version': '28120'
}
)
distribution = DistributionFiles(module=mock_module())
assert result == distribution.parse_distribution_file_ClearLinux(**test_input)
def test_parse_distribution_file_clear_linux_no_match():
# Test against data from Linux Mint and CoreOS to ensure we do not get a reported
# match from parse_distribution_file_ClearLinux()
scenarios = [
{
# CoreOS
'case': {
'name': 'Clearlinux',
'data': 'NAME="Container Linux by CoreOS"\nID=coreos\nVERSION=1911.5.0\nVERSION_ID=1911.5.0\nBUILD_ID=2018-12-15-2317\nPRETTY_NAME="Container L'
'inux by CoreOS 1911.5.0 (Rhyolite)"\nANSI_COLOR="38;5;75"\nHOME_URL="https://coreos.com/"\nBUG_REPORT_URL="https://issues.coreos.com"'
'\nCOREOS_BOARD="amd64-usr"',
'path': '/usr/lib/os-release',
'collected_facts': None,
},
'result': (False, {}),
},
{
# Linux Mint
'case': {
'name': 'Clearlinux',
'data': 'NAME="Linux Mint"\nVERSION="19.1 (Tessa)"\nID=linuxmint\nID_LIKE=ubuntu\nPRETTY_NAME="Linux Mint 19.1"\nVERSION_ID="19.1"\nHOME_URL="h'
'ttps://www.linuxmint.com/"\nSUPPORT_URL="https://forums.ubuntu.com/"\nBUG_REPORT_URL="http://linuxmint-troubleshooting-guide.readthedo'
'cs.io/en/latest/"\nPRIVACY_POLICY_URL="https://www.linuxmint.com/"\nVERSION_CODENAME=tessa\nUBUNTU_CODENAME=bionic',
'path': '/usr/lib/os-release',
'collected_facts': None,
},
'result': (False, {}),
},
]
distribution = DistributionFiles(module=mock_module())
for scenario in scenarios:
assert scenario['result'] == distribution.parse_distribution_file_ClearLinux(**scenario['case'])
|
MortalViews/incubator-airflow | refs/heads/master | airflow/operators/hive_to_druid.py | 15 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from airflow.hooks.hive_hooks import HiveCliHook, HiveMetastoreHook
from airflow.hooks.druid_hook import DruidHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
LOAD_CHECK_INTERVAL = 5
DEFAULT_TARGET_PARTITION_SIZE = 5000000
class HiveToDruidTransfer(BaseOperator):
"""
Moves data from Hive to Druid, [del]note that for now the data is loaded
into memory before being pushed to Druid, so this operator should
be used for smallish amount of data.[/del]
:param sql: SQL query to execute against the Druid database
:type sql: str
:param druid_datasource: the datasource you want to ingest into in druid
:type druid_datasource: str
:param ts_dim: the timestamp dimension
:type ts_dim: str
:param metric_spec: the metrics you want to define for your data
:type metric_spec: list
:param hive_cli_conn_id: the hive connection id
:type hive_cli_conn_id: str
:param druid_ingest_conn_id: the druid ingest connection id
:type druid_ingest_conn_id: str
:param metastore_conn_id: the metastore connection id
:type metastore_conn_id: str
:param hadoop_dependency_coordinates: list of coordinates to squeeze
int the ingest json
:type hadoop_dependency_coordinates: list of str
:param intervals: list of time intervals that defines segments, this
is passed as is to the json object
:type intervals: list
:param hive_tblproperties: additional properties for tblproperties in
hive for the staging table
:type hive_tblproperties: dict
"""
template_fields = ('sql', 'intervals')
template_ext = ('.sql',)
@apply_defaults
def __init__(
self,
sql,
druid_datasource,
ts_dim,
metric_spec=None,
hive_cli_conn_id='hive_cli_default',
druid_ingest_conn_id='druid_ingest_default',
metastore_conn_id='metastore_default',
hadoop_dependency_coordinates=None,
intervals=None,
num_shards=-1,
target_partition_size=-1,
query_granularity="NONE",
segment_granularity="DAY",
hive_tblproperties=None,
*args, **kwargs):
super(HiveToDruidTransfer, self).__init__(*args, **kwargs)
self.sql = sql
self.druid_datasource = druid_datasource
self.ts_dim = ts_dim
self.intervals = intervals or ['{{ ds }}/{{ tomorrow_ds }}']
self.num_shards = num_shards
self.target_partition_size = target_partition_size
self.query_granularity = query_granularity
self.segment_granularity = segment_granularity
self.metric_spec = metric_spec or [{
"name": "count",
"type": "count"}]
self.hive_cli_conn_id = hive_cli_conn_id
self.hadoop_dependency_coordinates = hadoop_dependency_coordinates
self.druid_ingest_conn_id = druid_ingest_conn_id
self.metastore_conn_id = metastore_conn_id
self.hive_tblproperties = hive_tblproperties
def execute(self, context):
hive = HiveCliHook(hive_cli_conn_id=self.hive_cli_conn_id)
self.log.info("Extracting data from Hive")
hive_table = 'druid.' + context['task_instance_key_str'].replace('.', '_')
sql = self.sql.strip().strip(';')
tblproperties = ''.join([", '{}' = '{}'".format(k, v) for k, v in self.hive_tblproperties.items()])
hql = """\
SET mapred.output.compress=false;
SET hive.exec.compress.output=false;
DROP TABLE IF EXISTS {hive_table};
CREATE TABLE {hive_table}
ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t'
STORED AS TEXTFILE
TBLPROPERTIES ('serialization.null.format' = ''{tblproperties})
AS
{sql}
""".format(**locals())
self.log.info("Running command:\n %s", hql)
hive.run_cli(hql)
m = HiveMetastoreHook(self.metastore_conn_id)
# Get the Hive table and extract the columns
t = m.get_table(hive_table)
columns = [col.name for col in t.sd.cols]
# Get the path on hdfs
hdfs_uri = m.get_table(hive_table).sd.location
pos = hdfs_uri.find('/user')
static_path = hdfs_uri[pos:]
schema, table = hive_table.split('.')
druid = DruidHook(druid_ingest_conn_id=self.druid_ingest_conn_id)
try:
index_spec = self.construct_ingest_query(
static_path=static_path,
columns=columns,
)
self.log.info("Inserting rows into Druid, hdfs path: %s", static_path)
druid.submit_indexing_job(index_spec)
self.log.info("Load seems to have succeeded!")
finally:
self.log.info(
"Cleaning up by dropping the temp Hive table %s",
hive_table
)
hql = "DROP TABLE IF EXISTS {}".format(hive_table)
hive.run_cli(hql)
def construct_ingest_query(self, static_path, columns):
"""
Builds an ingest query for an HDFS TSV load.
:param static_path: The path on hdfs where the data is
:type static_path: str
:param columns: List of all the columns that are available
:type columns: list
"""
# backward compatibilty for num_shards, but target_partition_size is the default setting
# and overwrites the num_shards
num_shards = self.num_shards
target_partition_size = self.target_partition_size
if self.target_partition_size == -1:
if self.num_shards == -1:
target_partition_size = DEFAULT_TARGET_PARTITION_SIZE
else:
num_shards = -1
metric_names = [m['fieldName'] for m in self.metric_spec if m['type'] != 'count']
# Take all the columns, which are not the time dimension or a metric, as the dimension columns
dimensions = [c for c in columns if c not in metric_names and c != self.ts_dim]
ingest_query_dict = {
"type": "index_hadoop",
"spec": {
"dataSchema": {
"metricsSpec": self.metric_spec,
"granularitySpec": {
"queryGranularity": self.query_granularity,
"intervals": self.intervals,
"type": "uniform",
"segmentGranularity": self.segment_granularity,
},
"parser": {
"type": "string",
"parseSpec": {
"columns": columns,
"dimensionsSpec": {
"dimensionExclusions": [],
"dimensions": dimensions, # list of names
"spatialDimensions": []
},
"timestampSpec": {
"column": self.ts_dim,
"format": "auto"
},
"format": "tsv"
}
},
"dataSource": self.druid_datasource
},
"tuningConfig": {
"type": "hadoop",
"jobProperties": {
"mapreduce.job.user.classpath.first": "false",
"mapreduce.map.output.compress": "false",
"mapreduce.output.fileoutputformat.compress": "false",
},
"partitionsSpec": {
"type": "hashed",
"targetPartitionSize": target_partition_size,
"numShards": num_shards,
},
},
"ioConfig": {
"inputSpec": {
"paths": static_path,
"type": "static"
},
"type": "hadoop"
}
}
}
if self.hadoop_dependency_coordinates:
ingest_query_dict['hadoopDependencyCoordinates'] = self.hadoop_dependency_coordinates
return ingest_query_dict
|
harry-7/addons-server | refs/heads/master | src/olympia/accounts/tests/test_helpers.py | 6 | from django.test import RequestFactory
import mock
from olympia.accounts.templatetags import jinja_helpers
@mock.patch(
'olympia.accounts.templatetags.jinja_helpers.utils.default_fxa_login_url',
lambda c: 'http://auth.ca')
def test_login_link():
request = RequestFactory().get('/en-US/firefox/addons')
assert jinja_helpers.login_link({'request': request}) == (
'http://auth.ca')
@mock.patch(
'olympia.accounts.templatetags.jinja_helpers.utils.'
'default_fxa_register_url',
lambda c: 'http://auth.ca')
def test_register_link():
request = RequestFactory().get('/en-US/firefox/addons')
assert jinja_helpers.register_link({'request': request}) == (
'http://auth.ca')
|
smishenk/blink-crosswalk | refs/heads/master | Tools/Scripts/webkitpy/common/system/executive.py | 12 | # Copyright (c) 2009, Google Inc. All rights reserved.
# Copyright (c) 2009 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import StringIO
import errno
import logging
import multiprocessing
import os
import signal
import subprocess
import sys
import time
from webkitpy.common.system.outputtee import Tee
from webkitpy.common.system.filesystem import FileSystem
_log = logging.getLogger(__name__)
class ScriptError(Exception):
def __init__(self,
message=None,
script_args=None,
exit_code=None,
output=None,
cwd=None,
output_limit=500):
shortened_output = output
if output and output_limit and len(output) > output_limit:
shortened_output = "Last %s characters of output:\n%s" % (output_limit, output[-output_limit:])
if not message:
message = 'Failed to run "%s"' % repr(script_args)
if exit_code:
message += " exit_code: %d" % exit_code
if cwd:
message += " cwd: %s" % cwd
if shortened_output:
message += "\n\noutput: %s" % shortened_output
Exception.__init__(self, message)
self.script_args = script_args # 'args' is already used by Exception
self.exit_code = exit_code
self.output = output
self.cwd = cwd
def message_with_output(self):
return unicode(self)
def command_name(self):
command_path = self.script_args
if type(command_path) is list:
command_path = command_path[0]
return os.path.basename(command_path)
class Executive(object):
PIPE = subprocess.PIPE
STDOUT = subprocess.STDOUT
def _should_close_fds(self):
# We need to pass close_fds=True to work around Python bug #2320
# (otherwise we can hang when we kill DumpRenderTree when we are running
# multiple threads). See http://bugs.python.org/issue2320 .
# Note that close_fds isn't supported on Windows, but this bug only
# shows up on Mac and Linux.
return sys.platform not in ('win32', 'cygwin')
def _run_command_with_teed_output(self, args, teed_output, **kwargs):
child_process = self.popen(args,
stdout=self.PIPE,
stderr=self.STDOUT,
close_fds=self._should_close_fds(),
**kwargs)
# Use our own custom wait loop because Popen ignores a tee'd
# stderr/stdout.
# FIXME: This could be improved not to flatten output to stdout.
while True:
output_line = child_process.stdout.readline()
if output_line == "" and child_process.poll() != None:
# poll() is not threadsafe and can throw OSError due to:
# http://bugs.python.org/issue1731717
return child_process.poll()
# We assume that the child process wrote to us in utf-8,
# so no re-encoding is necessary before writing here.
teed_output.write(output_line)
def cpu_count(self):
return multiprocessing.cpu_count()
@staticmethod
def interpreter_for_script(script_path, fs=None):
fs = fs or FileSystem()
lines = fs.read_text_file(script_path).splitlines()
if not len(lines):
return None
first_line = lines[0]
if not first_line.startswith('#!'):
return None
if first_line.find('python') > -1:
return sys.executable
if first_line.find('perl') > -1:
return 'perl'
if first_line.find('ruby') > -1:
return 'ruby'
return None
@staticmethod
def shell_command_for_script(script_path, fs=None):
fs = fs or FileSystem()
# Win32 does not support shebang. We need to detect the interpreter ourself.
if sys.platform == 'win32':
interpreter = Executive.interpreter_for_script(script_path, fs)
if interpreter:
return [interpreter, script_path]
return [script_path]
def kill_process(self, pid):
"""Attempts to kill the given pid.
Will fail silently if pid does not exist or insufficient permisssions."""
if sys.platform == "win32":
# We only use taskkill.exe on windows (not cygwin) because subprocess.pid
# is a CYGWIN pid and taskkill.exe expects a windows pid.
# Thankfully os.kill on CYGWIN handles either pid type.
command = ["taskkill.exe", "/f", "/t", "/pid", pid]
# taskkill will exit 128 if the process is not found. We should log.
self.run_command(command, error_handler=self.ignore_error)
return
# According to http://docs.python.org/library/os.html
# os.kill isn't available on Windows. python 2.5.5 os.kill appears
# to work in cygwin, however it occasionally raises EAGAIN.
retries_left = 10 if sys.platform == "cygwin" else 1
while retries_left > 0:
try:
retries_left -= 1
os.kill(pid, signal.SIGKILL)
_ = os.waitpid(pid, os.WNOHANG)
except OSError, e:
if e.errno == errno.EAGAIN:
if retries_left <= 0:
_log.warn("Failed to kill pid %s. Too many EAGAIN errors." % pid)
continue
if e.errno == errno.ESRCH: # The process does not exist.
return
if e.errno == errno.EPIPE: # The process has exited already on cygwin
return
if e.errno == errno.ECHILD:
# Can't wait on a non-child process, but the kill worked.
return
if e.errno == errno.EACCES and sys.platform == 'cygwin':
# Cygwin python sometimes can't kill native processes.
return
raise
def _win32_check_running_pid(self, pid):
# importing ctypes at the top-level seems to cause weird crashes at
# exit under cygwin on apple's win port. Only win32 needs cygwin, so
# we import it here instead. See https://bugs.webkit.org/show_bug.cgi?id=91682
import ctypes
class PROCESSENTRY32(ctypes.Structure):
_fields_ = [("dwSize", ctypes.c_ulong),
("cntUsage", ctypes.c_ulong),
("th32ProcessID", ctypes.c_ulong),
("th32DefaultHeapID", ctypes.POINTER(ctypes.c_ulong)),
("th32ModuleID", ctypes.c_ulong),
("cntThreads", ctypes.c_ulong),
("th32ParentProcessID", ctypes.c_ulong),
("pcPriClassBase", ctypes.c_ulong),
("dwFlags", ctypes.c_ulong),
("szExeFile", ctypes.c_char * 260)]
CreateToolhelp32Snapshot = ctypes.windll.kernel32.CreateToolhelp32Snapshot
Process32First = ctypes.windll.kernel32.Process32First
Process32Next = ctypes.windll.kernel32.Process32Next
CloseHandle = ctypes.windll.kernel32.CloseHandle
TH32CS_SNAPPROCESS = 0x00000002 # win32 magic number
hProcessSnap = CreateToolhelp32Snapshot(TH32CS_SNAPPROCESS, 0)
pe32 = PROCESSENTRY32()
pe32.dwSize = ctypes.sizeof(PROCESSENTRY32)
result = False
if not Process32First(hProcessSnap, ctypes.byref(pe32)):
_log.debug("Failed getting first process.")
CloseHandle(hProcessSnap)
return result
while True:
if pe32.th32ProcessID == pid:
result = True
break
if not Process32Next(hProcessSnap, ctypes.byref(pe32)):
break
CloseHandle(hProcessSnap)
return result
def check_running_pid(self, pid):
"""Return True if pid is alive, otherwise return False."""
if sys.platform == 'win32':
return self._win32_check_running_pid(pid)
try:
os.kill(pid, 0)
return True
except OSError:
return False
def running_pids(self, process_name_filter=None):
if not process_name_filter:
process_name_filter = lambda process_name: True
running_pids = []
if sys.platform in ("win32", "cygwin"):
# FIXME: running_pids isn't implemented on Windows yet...
return []
ps_process = self.popen(['ps', '-eo', 'pid,comm'], stdout=self.PIPE, stderr=self.PIPE)
stdout, _ = ps_process.communicate()
for line in stdout.splitlines():
try:
# In some cases the line can contain one or more
# leading white-spaces, so strip it before split.
pid, process_name = line.strip().split(' ', 1)
if process_name_filter(process_name):
running_pids.append(int(pid))
except ValueError, e:
pass
return sorted(running_pids)
def wait_newest(self, process_name_filter=None):
if not process_name_filter:
process_name_filter = lambda process_name: True
running_pids = self.running_pids(process_name_filter)
if not running_pids:
return
pid = running_pids[-1]
while self.check_running_pid(pid):
time.sleep(0.25)
def wait_limited(self, pid, limit_in_seconds=None, check_frequency_in_seconds=None):
seconds_left = limit_in_seconds or 10
sleep_length = check_frequency_in_seconds or 1
while seconds_left > 0 and self.check_running_pid(pid):
seconds_left -= sleep_length
time.sleep(sleep_length)
def _windows_image_name(self, process_name):
name, extension = os.path.splitext(process_name)
if not extension:
# taskkill expects processes to end in .exe
# If necessary we could add a flag to disable appending .exe.
process_name = "%s.exe" % name
return process_name
def interrupt(self, pid):
interrupt_signal = signal.SIGINT
# FIXME: The python docs seem to imply that platform == 'win32' may need to use signal.CTRL_C_EVENT
# http://docs.python.org/2/library/signal.html
try:
os.kill(pid, interrupt_signal)
except OSError:
# Silently ignore when the pid doesn't exist.
# It's impossible for callers to avoid race conditions with process shutdown.
pass
# Error handlers do not need to be static methods once all callers are
# updated to use an Executive object.
@staticmethod
def default_error_handler(error):
raise error
@staticmethod
def ignore_error(error):
pass
def _compute_stdin(self, input):
"""Returns (stdin, string_to_communicate)"""
# FIXME: We should be returning /dev/null for stdin
# or closing stdin after process creation to prevent
# child processes from getting input from the user.
if not input:
return (None, None)
if hasattr(input, "read"): # Check if the input is a file.
return (input, None) # Assume the file is in the right encoding.
# Popen in Python 2.5 and before does not automatically encode unicode objects.
# http://bugs.python.org/issue5290
# See https://bugs.webkit.org/show_bug.cgi?id=37528
# for an example of a regresion caused by passing a unicode string directly.
# FIXME: We may need to encode differently on different platforms.
if isinstance(input, unicode):
input = input.encode(self._child_process_encoding())
return (self.PIPE, input)
def command_for_printing(self, args):
"""Returns a print-ready string representing command args.
The string should be copy/paste ready for execution in a shell."""
args = self._stringify_args(args)
escaped_args = []
for arg in args:
if isinstance(arg, unicode):
# Escape any non-ascii characters for easy copy/paste
arg = arg.encode("unicode_escape")
# FIXME: Do we need to fix quotes here?
escaped_args.append(arg)
return " ".join(escaped_args)
def run_command(self,
args,
cwd=None,
env=None,
input=None,
error_handler=None,
return_exit_code=False,
return_stderr=True,
decode_output=True, debug_logging=True):
"""Popen wrapper for convenience and to work around python bugs."""
assert(isinstance(args, list) or isinstance(args, tuple))
start_time = time.time()
stdin, string_to_communicate = self._compute_stdin(input)
stderr = self.STDOUT if return_stderr else None
process = self.popen(args,
stdin=stdin,
stdout=self.PIPE,
stderr=stderr,
cwd=cwd,
env=env,
close_fds=self._should_close_fds())
output = process.communicate(string_to_communicate)[0]
# run_command automatically decodes to unicode() unless explicitly told not to.
if decode_output:
output = output.decode(self._child_process_encoding())
# wait() is not threadsafe and can throw OSError due to:
# http://bugs.python.org/issue1731717
exit_code = process.wait()
if debug_logging:
_log.debug('"%s" took %.2fs' % (self.command_for_printing(args), time.time() - start_time))
if return_exit_code:
return exit_code
if exit_code:
script_error = ScriptError(script_args=args,
exit_code=exit_code,
output=output,
cwd=cwd)
(error_handler or self.default_error_handler)(script_error)
return output
def _child_process_encoding(self):
# Win32 Python 2.x uses CreateProcessA rather than CreateProcessW
# to launch subprocesses, so we have to encode arguments using the
# current code page.
if sys.platform == 'win32' and sys.version < '3':
return 'mbcs'
# All other platforms use UTF-8.
# FIXME: Using UTF-8 on Cygwin will confuse Windows-native commands
# which will expect arguments to be encoded using the current code
# page.
return 'utf-8'
def _should_encode_child_process_arguments(self):
# Cygwin's Python's os.execv doesn't support unicode command
# arguments, and neither does Cygwin's execv itself.
if sys.platform == 'cygwin':
return True
# Win32 Python 2.x uses CreateProcessA rather than CreateProcessW
# to launch subprocesses, so we have to encode arguments using the
# current code page.
if sys.platform == 'win32' and sys.version < '3':
return True
return False
def _encode_argument_if_needed(self, argument):
if not self._should_encode_child_process_arguments():
return argument
return argument.encode(self._child_process_encoding())
def _stringify_args(self, args):
# Popen will throw an exception if args are non-strings (like int())
string_args = map(unicode, args)
# The Windows implementation of Popen cannot handle unicode strings. :(
return map(self._encode_argument_if_needed, string_args)
# The only required arugment to popen is named "args", the rest are optional keyword arguments.
def popen(self, args, **kwargs):
# FIXME: We should always be stringifying the args, but callers who pass shell=True
# expect that the exact bytes passed will get passed to the shell (even if they're wrongly encoded).
# shell=True is wrong for many other reasons, and we should remove this
# hack as soon as we can fix all callers to not use shell=True.
if kwargs.get('shell') == True:
string_args = args
else:
string_args = self._stringify_args(args)
return subprocess.Popen(string_args, **kwargs)
def call(self, args, **kwargs):
return subprocess.call(self._stringify_args(args), **kwargs)
def run_in_parallel(self, command_lines_and_cwds, processes=None):
"""Runs a list of (cmd_line list, cwd string) tuples in parallel and returns a list of (retcode, stdout, stderr) tuples."""
assert len(command_lines_and_cwds)
return self.map(_run_command_thunk, command_lines_and_cwds, processes)
def map(self, thunk, arglist, processes=None):
if sys.platform in ('cygwin', 'win32') or len(arglist) == 1:
return map(thunk, arglist)
pool = multiprocessing.Pool(processes=(processes or multiprocessing.cpu_count()))
try:
return pool.map(thunk, arglist)
finally:
pool.close()
pool.join()
def _run_command_thunk(cmd_line_and_cwd):
# Note that this needs to be a bare module (and hence Picklable) method to work with multiprocessing.Pool.
(cmd_line, cwd) = cmd_line_and_cwd
proc = subprocess.Popen(cmd_line, cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = proc.communicate()
return (proc.returncode, stdout, stderr)
|
qewerty/moto.old | refs/heads/master | tools/scons/engine/SCons/compat/_scons_subprocess.py | 5 | # subprocess - Subprocesses with accessible I/O streams
#
# For more information about this module, see PEP 324.
#
# This module should remain compatible with Python 2.2, see PEP 291.
#
# Copyright (c) 2003-2005 by Peter Astrand <astrand@lysator.liu.se>
#
# Licensed to PSF under a Contributor Agreement.
# See http://www.python.org/2.4/license for licensing details.
r"""subprocess - Subprocesses with accessible I/O streams
This module allows you to spawn processes, connect to their
input/output/error pipes, and obtain their return codes. This module
intends to replace several other, older modules and functions, like:
os.system
os.spawn*
os.popen*
popen2.*
commands.*
Information about how the subprocess module can be used to replace these
modules and functions can be found below.
Using the subprocess module
===========================
This module defines one class called Popen:
class Popen(args, bufsize=0, executable=None,
stdin=None, stdout=None, stderr=None,
preexec_fn=None, close_fds=False, shell=False,
cwd=None, env=None, universal_newlines=False,
startupinfo=None, creationflags=0):
Arguments are:
args should be a string, or a sequence of program arguments. The
program to execute is normally the first item in the args sequence or
string, but can be explicitly set by using the executable argument.
On UNIX, with shell=False (default): In this case, the Popen class
uses os.execvp() to execute the child program. args should normally
be a sequence. A string will be treated as a sequence with the string
as the only item (the program to execute).
On UNIX, with shell=True: If args is a string, it specifies the
command string to execute through the shell. If args is a sequence,
the first item specifies the command string, and any additional items
will be treated as additional shell arguments.
On Windows: the Popen class uses CreateProcess() to execute the child
program, which operates on strings. If args is a sequence, it will be
converted to a string using the list2cmdline method. Please note that
not all MS Windows applications interpret the command line the same
way: The list2cmdline is designed for applications using the same
rules as the MS C runtime.
bufsize, if given, has the same meaning as the corresponding argument
to the built-in open() function: 0 means unbuffered, 1 means line
buffered, any other positive value means use a buffer of
(approximately) that size. A negative bufsize means to use the system
default, which usually means fully buffered. The default value for
bufsize is 0 (unbuffered).
stdin, stdout and stderr specify the executed programs' standard
input, standard output and standard error file handles, respectively.
Valid values are PIPE, an existing file descriptor (a positive
integer), an existing file object, and None. PIPE indicates that a
new pipe to the child should be created. With None, no redirection
will occur; the child's file handles will be inherited from the
parent. Additionally, stderr can be STDOUT, which indicates that the
stderr data from the applications should be captured into the same
file handle as for stdout.
If preexec_fn is set to a callable object, this object will be called
in the child process just before the child is executed.
If close_fds is true, all file descriptors except 0, 1 and 2 will be
closed before the child process is executed.
if shell is true, the specified command will be executed through the
shell.
If cwd is not None, the current directory will be changed to cwd
before the child is executed.
If env is not None, it defines the environment variables for the new
process.
If universal_newlines is true, the file objects stdout and stderr are
opened as a text files, but lines may be terminated by any of '\n',
the Unix end-of-line convention, '\r', the Macintosh convention or
'\r\n', the Windows convention. All of these external representations
are seen as '\n' by the Python program. Note: This feature is only
available if Python is built with universal newline support (the
default). Also, the newlines attribute of the file objects stdout,
stdin and stderr are not updated by the communicate() method.
The startupinfo and creationflags, if given, will be passed to the
underlying CreateProcess() function. They can specify things such as
appearance of the main window and priority for the new process.
(Windows only)
This module also defines two shortcut functions:
call(*popenargs, **kwargs):
Run command with arguments. Wait for command to complete, then
return the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
retcode = call(["ls", "-l"])
check_call(*popenargs, **kwargs):
Run command with arguments. Wait for command to complete. If the
exit code was zero then return, otherwise raise
CalledProcessError. The CalledProcessError object will have the
return code in the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
check_call(["ls", "-l"])
Exceptions
----------
Exceptions raised in the child process, before the new program has
started to execute, will be re-raised in the parent. Additionally,
the exception object will have one extra attribute called
'child_traceback', which is a string containing traceback information
from the childs point of view.
The most common exception raised is OSError. This occurs, for
example, when trying to execute a non-existent file. Applications
should prepare for OSErrors.
A ValueError will be raised if Popen is called with invalid arguments.
check_call() will raise CalledProcessError, if the called process
returns a non-zero return code.
Security
--------
Unlike some other popen functions, this implementation will never call
/bin/sh implicitly. This means that all characters, including shell
metacharacters, can safely be passed to child processes.
Popen objects
=============
Instances of the Popen class have the following methods:
poll()
Check if child process has terminated. Returns returncode
attribute.
wait()
Wait for child process to terminate. Returns returncode attribute.
communicate(input=None)
Interact with process: Send data to stdin. Read data from stdout
and stderr, until end-of-file is reached. Wait for process to
terminate. The optional stdin argument should be a string to be
sent to the child process, or None, if no data should be sent to
the child.
communicate() returns a tuple (stdout, stderr).
Note: The data read is buffered in memory, so do not use this
method if the data size is large or unlimited.
The following attributes are also available:
stdin
If the stdin argument is PIPE, this attribute is a file object
that provides input to the child process. Otherwise, it is None.
stdout
If the stdout argument is PIPE, this attribute is a file object
that provides output from the child process. Otherwise, it is
None.
stderr
If the stderr argument is PIPE, this attribute is file object that
provides error output from the child process. Otherwise, it is
None.
pid
The process ID of the child process.
returncode
The child return code. A None value indicates that the process
hasn't terminated yet. A negative value -N indicates that the
child was terminated by signal N (UNIX only).
Replacing older functions with the subprocess module
====================================================
In this section, "a ==> b" means that b can be used as a replacement
for a.
Note: All functions in this section fail (more or less) silently if
the executed program cannot be found; this module raises an OSError
exception.
In the following examples, we assume that the subprocess module is
imported with "from subprocess import *".
Replacing /bin/sh shell backquote
---------------------------------
output=`mycmd myarg`
==>
output = Popen(["mycmd", "myarg"], stdout=PIPE).communicate()[0]
Replacing shell pipe line
-------------------------
output=`dmesg | grep hda`
==>
p1 = Popen(["dmesg"], stdout=PIPE)
p2 = Popen(["grep", "hda"], stdin=p1.stdout, stdout=PIPE)
output = p2.communicate()[0]
Replacing os.system()
---------------------
sts = os.system("mycmd" + " myarg")
==>
p = Popen("mycmd" + " myarg", shell=True)
pid, sts = os.waitpid(p.pid, 0)
Note:
* Calling the program through the shell is usually not required.
* It's easier to look at the returncode attribute than the
exitstatus.
A more real-world example would look like this:
try:
retcode = call("mycmd" + " myarg", shell=True)
if retcode < 0:
print >>sys.stderr, "Child was terminated by signal", -retcode
else:
print >>sys.stderr, "Child returned", retcode
except OSError, e:
print >>sys.stderr, "Execution failed:", e
Replacing os.spawn*
-------------------
P_NOWAIT example:
pid = os.spawnlp(os.P_NOWAIT, "/bin/mycmd", "mycmd", "myarg")
==>
pid = Popen(["/bin/mycmd", "myarg"]).pid
P_WAIT example:
retcode = os.spawnlp(os.P_WAIT, "/bin/mycmd", "mycmd", "myarg")
==>
retcode = call(["/bin/mycmd", "myarg"])
Vector example:
os.spawnvp(os.P_NOWAIT, path, args)
==>
Popen([path] + args[1:])
Environment example:
os.spawnlpe(os.P_NOWAIT, "/bin/mycmd", "mycmd", "myarg", env)
==>
Popen(["/bin/mycmd", "myarg"], env={"PATH": "/usr/bin"})
Replacing os.popen*
-------------------
pipe = os.popen(cmd, mode='r', bufsize)
==>
pipe = Popen(cmd, shell=True, bufsize=bufsize, stdout=PIPE).stdout
pipe = os.popen(cmd, mode='w', bufsize)
==>
pipe = Popen(cmd, shell=True, bufsize=bufsize, stdin=PIPE).stdin
(child_stdin, child_stdout) = os.popen2(cmd, mode, bufsize)
==>
p = Popen(cmd, shell=True, bufsize=bufsize,
stdin=PIPE, stdout=PIPE, close_fds=True)
(child_stdin, child_stdout) = (p.stdin, p.stdout)
(child_stdin,
child_stdout,
child_stderr) = os.popen3(cmd, mode, bufsize)
==>
p = Popen(cmd, shell=True, bufsize=bufsize,
stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True)
(child_stdin,
child_stdout,
child_stderr) = (p.stdin, p.stdout, p.stderr)
(child_stdin, child_stdout_and_stderr) = os.popen4(cmd, mode, bufsize)
==>
p = Popen(cmd, shell=True, bufsize=bufsize,
stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=True)
(child_stdin, child_stdout_and_stderr) = (p.stdin, p.stdout)
Replacing popen2.*
------------------
Note: If the cmd argument to popen2 functions is a string, the command
is executed through /bin/sh. If it is a list, the command is directly
executed.
(child_stdout, child_stdin) = popen2.popen2("somestring", bufsize, mode)
==>
p = Popen(["somestring"], shell=True, bufsize=bufsize
stdin=PIPE, stdout=PIPE, close_fds=True)
(child_stdout, child_stdin) = (p.stdout, p.stdin)
(child_stdout, child_stdin) = popen2.popen2(["mycmd", "myarg"], bufsize, mode)
==>
p = Popen(["mycmd", "myarg"], bufsize=bufsize,
stdin=PIPE, stdout=PIPE, close_fds=True)
(child_stdout, child_stdin) = (p.stdout, p.stdin)
The popen2.Popen3 and popen3.Popen4 basically works as subprocess.Popen,
except that:
* subprocess.Popen raises an exception if the execution fails
* the capturestderr argument is replaced with the stderr argument.
* stdin=PIPE and stdout=PIPE must be specified.
* popen2 closes all filedescriptors by default, but you have to specify
close_fds=True with subprocess.Popen.
"""
import sys
mswindows = (sys.platform == "win32")
import os
import string
import types
import traceback
# Exception classes used by this module.
class CalledProcessError(Exception):
"""This exception is raised when a process run by check_call() returns
a non-zero exit status. The exit status will be stored in the
returncode attribute."""
def __init__(self, returncode, cmd):
self.returncode = returncode
self.cmd = cmd
def __str__(self):
return "Command '%s' returned non-zero exit status %d" % (self.cmd, self.returncode)
if mswindows:
try:
import threading
except ImportError:
# SCons: the threading module is only used by the communicate()
# method, which we don't actually use, so don't worry if we
# can't import it.
pass
import msvcrt
try:
# Try to get _subprocess
from _subprocess import *
class STARTUPINFO:
dwFlags = 0
hStdInput = None
hStdOutput = None
hStdError = None
wShowWindow = 0
class pywintypes:
error = IOError
except ImportError:
# If not there, then drop back to requiring pywin32
# TODO: Should this be wrapped in try as well? To notify user to install
# pywin32 ? With URL to it?
import pywintypes
from win32api import GetStdHandle, STD_INPUT_HANDLE, \
STD_OUTPUT_HANDLE, STD_ERROR_HANDLE
from win32api import GetCurrentProcess, DuplicateHandle, \
GetModuleFileName, GetVersion
from win32con import DUPLICATE_SAME_ACCESS, SW_HIDE
from win32pipe import CreatePipe
from win32process import CreateProcess, STARTUPINFO, \
GetExitCodeProcess, STARTF_USESTDHANDLES, \
STARTF_USESHOWWINDOW, CREATE_NEW_CONSOLE
from win32event import WaitForSingleObject, INFINITE, WAIT_OBJECT_0
else:
import select
import errno
import fcntl
import pickle
try:
fcntl.F_GETFD
except AttributeError:
fcntl.F_GETFD = 1
try:
fcntl.F_SETFD
except AttributeError:
fcntl.F_SETFD = 2
__all__ = ["Popen", "PIPE", "STDOUT", "call", "check_call", "CalledProcessError"]
try:
MAXFD = os.sysconf("SC_OPEN_MAX")
except KeyboardInterrupt:
raise # SCons: don't swallow keyboard interrupts
except:
MAXFD = 256
# True/False does not exist on 2.2.0
try:
False
except NameError:
False = 0
True = 1
try:
isinstance(1, int)
except TypeError:
def is_int(obj):
return type(obj) == type(1)
def is_int_or_long(obj):
return type(obj) in (type(1), type(1L))
else:
def is_int(obj):
return isinstance(obj, int)
def is_int_or_long(obj):
return isinstance(obj, (int, long))
try:
types.StringTypes
except AttributeError:
try:
types.StringTypes = (types.StringType, types.UnicodeType)
except AttributeError:
types.StringTypes = (types.StringType,)
def is_string(obj):
return type(obj) in types.StringTypes
else:
def is_string(obj):
return isinstance(obj, types.StringTypes)
_active = []
def _cleanup():
for inst in _active[:]:
if inst.poll(_deadstate=sys.maxint) >= 0:
try:
_active.remove(inst)
except ValueError:
# This can happen if two threads create a new Popen instance.
# It's harmless that it was already removed, so ignore.
pass
PIPE = -1
STDOUT = -2
def call(*popenargs, **kwargs):
"""Run command with arguments. Wait for command to complete, then
return the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
retcode = call(["ls", "-l"])
"""
return apply(Popen, popenargs, kwargs).wait()
def check_call(*popenargs, **kwargs):
"""Run command with arguments. Wait for command to complete. If
the exit code was zero then return, otherwise raise
CalledProcessError. The CalledProcessError object will have the
return code in the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
check_call(["ls", "-l"])
"""
retcode = apply(call, popenargs, kwargs)
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
if retcode:
raise CalledProcessError(retcode, cmd)
return retcode
def list2cmdline(seq):
"""
Translate a sequence of arguments into a command line
string, using the same rules as the MS C runtime:
1) Arguments are delimited by white space, which is either a
space or a tab.
2) A string surrounded by double quotation marks is
interpreted as a single argument, regardless of white space
contained within. A quoted string can be embedded in an
argument.
3) A double quotation mark preceded by a backslash is
interpreted as a literal double quotation mark.
4) Backslashes are interpreted literally, unless they
immediately precede a double quotation mark.
5) If backslashes immediately precede a double quotation mark,
every pair of backslashes is interpreted as a literal
backslash. If the number of backslashes is odd, the last
backslash escapes the next double quotation mark as
described in rule 3.
"""
# See
# http://msdn.microsoft.com/library/en-us/vccelng/htm/progs_12.asp
result = []
needquote = False
for arg in seq:
bs_buf = []
# Add a space to separate this argument from the others
if result:
result.append(' ')
needquote = (" " in arg) or ("\t" in arg)
if needquote:
result.append('"')
for c in arg:
if c == '\\':
# Don't know if we need to double yet.
bs_buf.append(c)
elif c == '"':
# Double backspaces.
result.append('\\' * len(bs_buf)*2)
bs_buf = []
result.append('\\"')
else:
# Normal char
if bs_buf:
result.extend(bs_buf)
bs_buf = []
result.append(c)
# Add remaining backspaces, if any.
if bs_buf:
result.extend(bs_buf)
if needquote:
result.extend(bs_buf)
result.append('"')
return string.join(result, '')
try:
object
except NameError:
class object:
pass
class Popen(object):
def __init__(self, args, bufsize=0, executable=None,
stdin=None, stdout=None, stderr=None,
preexec_fn=None, close_fds=False, shell=False,
cwd=None, env=None, universal_newlines=False,
startupinfo=None, creationflags=0):
"""Create new Popen instance."""
_cleanup()
self._child_created = False
if not is_int_or_long(bufsize):
raise TypeError("bufsize must be an integer")
if mswindows:
if preexec_fn is not None:
raise ValueError("preexec_fn is not supported on Windows "
"platforms")
if close_fds:
raise ValueError("close_fds is not supported on Windows "
"platforms")
else:
# POSIX
if startupinfo is not None:
raise ValueError("startupinfo is only supported on Windows "
"platforms")
if creationflags != 0:
raise ValueError("creationflags is only supported on Windows "
"platforms")
self.stdin = None
self.stdout = None
self.stderr = None
self.pid = None
self.returncode = None
self.universal_newlines = universal_newlines
# Input and output objects. The general principle is like
# this:
#
# Parent Child
# ------ -----
# p2cwrite ---stdin---> p2cread
# c2pread <--stdout--- c2pwrite
# errread <--stderr--- errwrite
#
# On POSIX, the child objects are file descriptors. On
# Windows, these are Windows file handles. The parent objects
# are file descriptors on both platforms. The parent objects
# are None when not using PIPEs. The child objects are None
# when not redirecting.
(p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite) = self._get_handles(stdin, stdout, stderr)
self._execute_child(args, executable, preexec_fn, close_fds,
cwd, env, universal_newlines,
startupinfo, creationflags, shell,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite)
if p2cwrite:
self.stdin = os.fdopen(p2cwrite, 'wb', bufsize)
if c2pread:
if universal_newlines:
self.stdout = os.fdopen(c2pread, 'rU', bufsize)
else:
self.stdout = os.fdopen(c2pread, 'rb', bufsize)
if errread:
if universal_newlines:
self.stderr = os.fdopen(errread, 'rU', bufsize)
else:
self.stderr = os.fdopen(errread, 'rb', bufsize)
def _translate_newlines(self, data):
data = data.replace("\r\n", "\n")
data = data.replace("\r", "\n")
return data
def __del__(self):
if not self._child_created:
# We didn't get to successfully create a child process.
return
# In case the child hasn't been waited on, check if it's done.
self.poll(_deadstate=sys.maxint)
if self.returncode is None and _active is not None:
# Child is still running, keep us alive until we can wait on it.
_active.append(self)
def communicate(self, input=None):
"""Interact with process: Send data to stdin. Read data from
stdout and stderr, until end-of-file is reached. Wait for
process to terminate. The optional input argument should be a
string to be sent to the child process, or None, if no data
should be sent to the child.
communicate() returns a tuple (stdout, stderr)."""
# Optimization: If we are only using one pipe, or no pipe at
# all, using select() or threads is unnecessary.
if [self.stdin, self.stdout, self.stderr].count(None) >= 2:
stdout = None
stderr = None
if self.stdin:
if input:
self.stdin.write(input)
self.stdin.close()
elif self.stdout:
stdout = self.stdout.read()
elif self.stderr:
stderr = self.stderr.read()
self.wait()
return (stdout, stderr)
return self._communicate(input)
if mswindows:
#
# Windows methods
#
def _get_handles(self, stdin, stdout, stderr):
"""Construct and return tupel with IO objects:
p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite
"""
if stdin is None and stdout is None and stderr is None:
return (None, None, None, None, None, None)
p2cread, p2cwrite = None, None
c2pread, c2pwrite = None, None
errread, errwrite = None, None
if stdin is None:
p2cread = GetStdHandle(STD_INPUT_HANDLE)
elif stdin == PIPE:
p2cread, p2cwrite = CreatePipe(None, 0)
# Detach and turn into fd
p2cwrite = p2cwrite.Detach()
p2cwrite = msvcrt.open_osfhandle(p2cwrite, 0)
elif is_int(stdin):
p2cread = msvcrt.get_osfhandle(stdin)
else:
# Assuming file-like object
p2cread = msvcrt.get_osfhandle(stdin.fileno())
p2cread = self._make_inheritable(p2cread)
if stdout is None:
c2pwrite = GetStdHandle(STD_OUTPUT_HANDLE)
elif stdout == PIPE:
c2pread, c2pwrite = CreatePipe(None, 0)
# Detach and turn into fd
c2pread = c2pread.Detach()
c2pread = msvcrt.open_osfhandle(c2pread, 0)
elif is_int(stdout):
c2pwrite = msvcrt.get_osfhandle(stdout)
else:
# Assuming file-like object
c2pwrite = msvcrt.get_osfhandle(stdout.fileno())
c2pwrite = self._make_inheritable(c2pwrite)
if stderr is None:
errwrite = GetStdHandle(STD_ERROR_HANDLE)
elif stderr == PIPE:
errread, errwrite = CreatePipe(None, 0)
# Detach and turn into fd
errread = errread.Detach()
errread = msvcrt.open_osfhandle(errread, 0)
elif stderr == STDOUT:
errwrite = c2pwrite
elif is_int(stderr):
errwrite = msvcrt.get_osfhandle(stderr)
else:
# Assuming file-like object
errwrite = msvcrt.get_osfhandle(stderr.fileno())
errwrite = self._make_inheritable(errwrite)
return (p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite)
def _make_inheritable(self, handle):
"""Return a duplicate of handle, which is inheritable"""
return DuplicateHandle(GetCurrentProcess(), handle,
GetCurrentProcess(), 0, 1,
DUPLICATE_SAME_ACCESS)
def _find_w9xpopen(self):
"""Find and return absolut path to w9xpopen.exe"""
w9xpopen = os.path.join(os.path.dirname(GetModuleFileName(0)),
"w9xpopen.exe")
if not os.path.exists(w9xpopen):
# Eeek - file-not-found - possibly an embedding
# situation - see if we can locate it in sys.exec_prefix
w9xpopen = os.path.join(os.path.dirname(sys.exec_prefix),
"w9xpopen.exe")
if not os.path.exists(w9xpopen):
raise RuntimeError("Cannot locate w9xpopen.exe, which is "
"needed for Popen to work with your "
"shell or platform.")
return w9xpopen
def _execute_child(self, args, executable, preexec_fn, close_fds,
cwd, env, universal_newlines,
startupinfo, creationflags, shell,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite):
"""Execute program (MS Windows version)"""
if not isinstance(args, types.StringTypes):
args = list2cmdline(args)
# Process startup details
if startupinfo is None:
startupinfo = STARTUPINFO()
if None not in (p2cread, c2pwrite, errwrite):
startupinfo.dwFlags = startupinfo.dwFlags | STARTF_USESTDHANDLES
startupinfo.hStdInput = p2cread
startupinfo.hStdOutput = c2pwrite
startupinfo.hStdError = errwrite
if shell:
startupinfo.dwFlags = startupinfo.dwFlags | STARTF_USESHOWWINDOW
startupinfo.wShowWindow = SW_HIDE
comspec = os.environ.get("COMSPEC", "cmd.exe")
args = comspec + " /c " + args
if (GetVersion() >= 0x80000000L or
os.path.basename(comspec).lower() == "command.com"):
# Win9x, or using command.com on NT. We need to
# use the w9xpopen intermediate program. For more
# information, see KB Q150956
# (http://web.archive.org/web/20011105084002/http://support.microsoft.com/support/kb/articles/Q150/9/56.asp)
w9xpopen = self._find_w9xpopen()
args = '"%s" %s' % (w9xpopen, args)
# Not passing CREATE_NEW_CONSOLE has been known to
# cause random failures on win9x. Specifically a
# dialog: "Your program accessed mem currently in
# use at xxx" and a hopeful warning about the
# stability of your system. Cost is Ctrl+C wont
# kill children.
creationflags = creationflags | CREATE_NEW_CONSOLE
# Start the process
try:
hp, ht, pid, tid = CreateProcess(executable, args,
# no special security
None, None,
# must inherit handles to pass std
# handles
1,
creationflags,
env,
cwd,
startupinfo)
except pywintypes.error, e:
# Translate pywintypes.error to WindowsError, which is
# a subclass of OSError. FIXME: We should really
# translate errno using _sys_errlist (or simliar), but
# how can this be done from Python?
raise apply(WindowsError, e.args)
# Retain the process handle, but close the thread handle
self._child_created = True
self._handle = hp
self.pid = pid
ht.Close()
# Child is launched. Close the parent's copy of those pipe
# handles that only the child should have open. You need
# to make sure that no handles to the write end of the
# output pipe are maintained in this process or else the
# pipe will not close when the child process exits and the
# ReadFile will hang.
if p2cread is not None:
p2cread.Close()
if c2pwrite is not None:
c2pwrite.Close()
if errwrite is not None:
errwrite.Close()
def poll(self, _deadstate=None):
"""Check if child process has terminated. Returns returncode
attribute."""
if self.returncode is None:
if WaitForSingleObject(self._handle, 0) == WAIT_OBJECT_0:
self.returncode = GetExitCodeProcess(self._handle)
return self.returncode
def wait(self):
"""Wait for child process to terminate. Returns returncode
attribute."""
if self.returncode is None:
obj = WaitForSingleObject(self._handle, INFINITE)
self.returncode = GetExitCodeProcess(self._handle)
return self.returncode
def _readerthread(self, fh, buffer):
buffer.append(fh.read())
def _communicate(self, input):
stdout = None # Return
stderr = None # Return
if self.stdout:
stdout = []
stdout_thread = threading.Thread(target=self._readerthread,
args=(self.stdout, stdout))
stdout_thread.setDaemon(True)
stdout_thread.start()
if self.stderr:
stderr = []
stderr_thread = threading.Thread(target=self._readerthread,
args=(self.stderr, stderr))
stderr_thread.setDaemon(True)
stderr_thread.start()
if self.stdin:
if input is not None:
self.stdin.write(input)
self.stdin.close()
if self.stdout:
stdout_thread.join()
if self.stderr:
stderr_thread.join()
# All data exchanged. Translate lists into strings.
if stdout is not None:
stdout = stdout[0]
if stderr is not None:
stderr = stderr[0]
# Translate newlines, if requested. We cannot let the file
# object do the translation: It is based on stdio, which is
# impossible to combine with select (unless forcing no
# buffering).
if self.universal_newlines and hasattr(file, 'newlines'):
if stdout:
stdout = self._translate_newlines(stdout)
if stderr:
stderr = self._translate_newlines(stderr)
self.wait()
return (stdout, stderr)
else:
#
# POSIX methods
#
def _get_handles(self, stdin, stdout, stderr):
"""Construct and return tupel with IO objects:
p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite
"""
p2cread, p2cwrite = None, None
c2pread, c2pwrite = None, None
errread, errwrite = None, None
if stdin is None:
pass
elif stdin == PIPE:
p2cread, p2cwrite = os.pipe()
elif is_int(stdin):
p2cread = stdin
else:
# Assuming file-like object
p2cread = stdin.fileno()
if stdout is None:
pass
elif stdout == PIPE:
c2pread, c2pwrite = os.pipe()
elif is_int(stdout):
c2pwrite = stdout
else:
# Assuming file-like object
c2pwrite = stdout.fileno()
if stderr is None:
pass
elif stderr == PIPE:
errread, errwrite = os.pipe()
elif stderr == STDOUT:
errwrite = c2pwrite
elif is_int(stderr):
errwrite = stderr
else:
# Assuming file-like object
errwrite = stderr.fileno()
return (p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite)
def _set_cloexec_flag(self, fd):
try:
cloexec_flag = fcntl.FD_CLOEXEC
except AttributeError:
cloexec_flag = 1
old = fcntl.fcntl(fd, fcntl.F_GETFD)
fcntl.fcntl(fd, fcntl.F_SETFD, old | cloexec_flag)
def _close_fds(self, but):
for i in xrange(3, MAXFD):
if i == but:
continue
try:
os.close(i)
except KeyboardInterrupt:
raise # SCons: don't swallow keyboard interrupts
except:
pass
def _execute_child(self, args, executable, preexec_fn, close_fds,
cwd, env, universal_newlines,
startupinfo, creationflags, shell,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite):
"""Execute program (POSIX version)"""
if is_string(args):
args = [args]
if shell:
args = ["/bin/sh", "-c"] + args
if executable is None:
executable = args[0]
# For transferring possible exec failure from child to parent
# The first char specifies the exception type: 0 means
# OSError, 1 means some other error.
errpipe_read, errpipe_write = os.pipe()
self._set_cloexec_flag(errpipe_write)
self.pid = os.fork()
self._child_created = True
if self.pid == 0:
# Child
try:
# Close parent's pipe ends
if p2cwrite:
os.close(p2cwrite)
if c2pread:
os.close(c2pread)
if errread:
os.close(errread)
os.close(errpipe_read)
# Dup fds for child
if p2cread:
os.dup2(p2cread, 0)
if c2pwrite:
os.dup2(c2pwrite, 1)
if errwrite:
os.dup2(errwrite, 2)
# Close pipe fds. Make sure we don't close the same
# fd more than once, or standard fds.
try:
set
except NameError:
# Fall-back for earlier Python versions, so epydoc
# can use this module directly to execute things.
if p2cread:
os.close(p2cread)
if c2pwrite and c2pwrite not in (p2cread,):
os.close(c2pwrite)
if errwrite and errwrite not in (p2cread, c2pwrite):
os.close(errwrite)
else:
for fd in set((p2cread, c2pwrite, errwrite))-set((0,1,2)):
if fd: os.close(fd)
# Close all other fds, if asked for
if close_fds:
self._close_fds(but=errpipe_write)
if cwd is not None:
os.chdir(cwd)
if preexec_fn:
apply(preexec_fn)
if env is None:
os.execvp(executable, args)
else:
os.execvpe(executable, args, env)
except KeyboardInterrupt:
raise # SCons: don't swallow keyboard interrupts
except:
exc_type, exc_value, tb = sys.exc_info()
# Save the traceback and attach it to the exception object
exc_lines = traceback.format_exception(exc_type,
exc_value,
tb)
exc_value.child_traceback = string.join(exc_lines, '')
os.write(errpipe_write, pickle.dumps(exc_value))
# This exitcode won't be reported to applications, so it
# really doesn't matter what we return.
os._exit(255)
# Parent
os.close(errpipe_write)
if p2cread and p2cwrite:
os.close(p2cread)
if c2pwrite and c2pread:
os.close(c2pwrite)
if errwrite and errread:
os.close(errwrite)
# Wait for exec to fail or succeed; possibly raising exception
data = os.read(errpipe_read, 1048576) # Exceptions limited to 1 MB
os.close(errpipe_read)
if data != "":
os.waitpid(self.pid, 0)
child_exception = pickle.loads(data)
raise child_exception
def _handle_exitstatus(self, sts):
if os.WIFSIGNALED(sts):
self.returncode = -os.WTERMSIG(sts)
elif os.WIFEXITED(sts):
self.returncode = os.WEXITSTATUS(sts)
else:
# Should never happen
raise RuntimeError("Unknown child exit status!")
def poll(self, _deadstate=None):
"""Check if child process has terminated. Returns returncode
attribute."""
if self.returncode is None:
try:
pid, sts = os.waitpid(self.pid, os.WNOHANG)
if pid == self.pid:
self._handle_exitstatus(sts)
except os.error:
if _deadstate is not None:
self.returncode = _deadstate
return self.returncode
def wait(self):
"""Wait for child process to terminate. Returns returncode
attribute."""
if self.returncode is None:
pid, sts = os.waitpid(self.pid, 0)
self._handle_exitstatus(sts)
return self.returncode
def _communicate(self, input):
read_set = []
write_set = []
stdout = None # Return
stderr = None # Return
if self.stdin:
# Flush stdio buffer. This might block, if the user has
# been writing to .stdin in an uncontrolled fashion.
self.stdin.flush()
if input:
write_set.append(self.stdin)
else:
self.stdin.close()
if self.stdout:
read_set.append(self.stdout)
stdout = []
if self.stderr:
read_set.append(self.stderr)
stderr = []
input_offset = 0
while read_set or write_set:
rlist, wlist, xlist = select.select(read_set, write_set, [])
if self.stdin in wlist:
# When select has indicated that the file is writable,
# we can write up to PIPE_BUF bytes without risk
# blocking. POSIX defines PIPE_BUF >= 512
bytes_written = os.write(self.stdin.fileno(), buffer(input, input_offset, 512))
input_offset = input_offset + bytes_written
if input_offset >= len(input):
self.stdin.close()
write_set.remove(self.stdin)
if self.stdout in rlist:
data = os.read(self.stdout.fileno(), 1024)
if data == "":
self.stdout.close()
read_set.remove(self.stdout)
stdout.append(data)
if self.stderr in rlist:
data = os.read(self.stderr.fileno(), 1024)
if data == "":
self.stderr.close()
read_set.remove(self.stderr)
stderr.append(data)
# All data exchanged. Translate lists into strings.
if stdout is not None:
stdout = string.join(stdout, '')
if stderr is not None:
stderr = string.join(stderr, '')
# Translate newlines, if requested. We cannot let the file
# object do the translation: It is based on stdio, which is
# impossible to combine with select (unless forcing no
# buffering).
if self.universal_newlines and hasattr(file, 'newlines'):
if stdout:
stdout = self._translate_newlines(stdout)
if stderr:
stderr = self._translate_newlines(stderr)
self.wait()
return (stdout, stderr)
def _demo_posix():
#
# Example 1: Simple redirection: Get process list
#
plist = Popen(["ps"], stdout=PIPE).communicate()[0]
print "Process list:"
print plist
#
# Example 2: Change uid before executing child
#
if os.getuid() == 0:
p = Popen(["id"], preexec_fn=lambda: os.setuid(100))
p.wait()
#
# Example 3: Connecting several subprocesses
#
print "Looking for 'hda'..."
p1 = Popen(["dmesg"], stdout=PIPE)
p2 = Popen(["grep", "hda"], stdin=p1.stdout, stdout=PIPE)
print repr(p2.communicate()[0])
#
# Example 4: Catch execution error
#
print
print "Trying a weird file..."
try:
print Popen(["/this/path/does/not/exist"]).communicate()
except OSError, e:
if e.errno == errno.ENOENT:
print "The file didn't exist. I thought so..."
print "Child traceback:"
print e.child_traceback
else:
print "Error", e.errno
else:
sys.stderr.write( "Gosh. No error.\n" )
def _demo_windows():
#
# Example 1: Connecting several subprocesses
#
print "Looking for 'PROMPT' in set output..."
p1 = Popen("set", stdout=PIPE, shell=True)
p2 = Popen('find "PROMPT"', stdin=p1.stdout, stdout=PIPE)
print repr(p2.communicate()[0])
#
# Example 2: Simple execution of program
#
print "Executing calc..."
p = Popen("calc")
p.wait()
if __name__ == "__main__":
if mswindows:
_demo_windows()
else:
_demo_posix()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
mendax-grip/cfdemUtilities | refs/heads/master | ergun/plotExplicitImplicit.py | 1 | ###############################################################################
#
# File : analysePressure.py
#
# Run Instructions : python analysePressure.py directory/with/the/files
#
# Author : Bruno Blais
#
# Description : This script takes all the files in a folder and output
# a two column file that is N vs P
#
#
###############################################################################
#Python imports
#-------------------------------
import os
import math
import numpy
import matplotlib.pyplot as plt
import sys
#-------------------------------
#Figure size and parameters
#-------------------------
plt.rcParams['figure.figsize'] = 10, 7
params = {'backend': 'ps',
'axes.labelsize': 24,
'text.fontsize': 16,
'legend.fontsize': 20,
'xtick.labelsize': 20,
'ytick.labelsize': 20,
'text.usetex': True,
}
plt.rcParams.update(params)
pdf=True
#=====================
# Main plot
#=====================
if (len(sys.argv)<2) :
print "You need to enter a file argument"
#===================
# Ergun Analysis from Christoph
#===================
rhoG=1000
dp = 0.001 # particle diameter
phip = 1 # sphericity
epsilon = 0.451335 # void fraction
L = 0.0151 # length of bed
nuG = 1.*10**-4 # kinemat Visk in m2/s
muG = nuG*rhoG # dynam visc in Pa s
#==================================
# min fluidization velocity in m/s
#==================================
rhoP = 2000 # particle density in kg/m3
g = 9.81 # gravity m/s2
Umf = dp**2*(rhoP-rhoG)*g/(150*muG)*(epsilon**3*phip**2)/(1-epsilon);
ReMF = Umf*dp*rhoG/muG;
if (ReMF>=1000):
Umf = sqrt(dp*(rhoP-rhoG)*g/(1.75*rhoG)*epsilon**3*phip);
ReMF = Umf*dp*rhoG/muG;
#=================================
# Output files
#=================================
print "Opening ", sys.argv[1]
pMat1 = numpy.loadtxt(sys.argv[1], unpack=True,comments="#")
t1=pMat1[0,::9]
dP1=(pMat1[1,::9]-pMat1[-1,::9]) * rhoG
print "Opening ", sys.argv[2]
pMat2 = numpy.loadtxt(sys.argv[2], unpack=True,comments="#")
t2=pMat2[0,::9]
dP2=(pMat2[1,::9]-pMat2[-1,::9]) * rhoG
plt.plot(t1,0.99*dP1,'g',label="Implicit momentum exchange",alpha=0.3,linewidth=2)
plt.plot(t2,0.99*dP2,'m',label="Explicit momentum exchange",alpha=0.8,linewidth=2)
dpUmf= numpy.ones(len(t1)) * L * (
150*((1-epsilon)**2/epsilon**3)*((muG*Umf)/(phip*dp)**2)
+1.75*((1-epsilon)/epsilon**3)*((rhoG*Umf**2)/(phip*dp))
);
plt.plot(t1,dpUmf,'k',label="Ergun correlation",linewidth=3)
plt.ylim([-10,150])
plt.ylabel('Pressure drop [Pa]')
plt.xlabel('Time [s]')
plt.legend(loc=4)
if (pdf): plt.savefig("./pressureRuns.pdf")
plt.show()
|
florianjacob/pelican-plugins | refs/heads/master | simple_footnotes/simple_footnotes.py | 17 | #!/usr/bin/env python
# -*- coding: utf-8 -*- #
from pelican import signals
import html5lib
import six
RAW_FOOTNOTE_CONTAINERS = ["code"]
def getText(node, recursive=False):
"""Get all the text associated with this node.
With recursive == True, all text from child nodes is retrieved."""
L = [u'']
for n in node.childNodes:
if n.nodeType in (node.TEXT_NODE, node.CDATA_SECTION_NODE):
L.append(n.data)
else:
if not recursive:
return None
L.append(getText(n))
return u''.join(L)
def sequence_gen(genlist):
for gen in genlist:
for elem in gen:
yield elem
def parse_for_footnotes(article_or_page_generator):
all_content = [
getattr(article_or_page_generator, attr, None) \
for attr in [u'articles', u'drafts', u'pages']]
all_content = [x for x in all_content if x is not None]
for article in sequence_gen(all_content):
if u"[ref]" in article._content and u"[/ref]" in article._content:
content = article._content.replace(u"[ref]", u"<x-simple-footnote>").replace(u"[/ref]",
u"</x-simple-footnote>")
parser = html5lib.HTMLParser(tree=html5lib.getTreeBuilder(u"dom"))
dom = parser.parse(content)
endnotes = []
count = 0
for footnote in dom.getElementsByTagName(u"x-simple-footnote"):
pn = footnote
leavealone = False
while pn:
if pn.nodeName in RAW_FOOTNOTE_CONTAINERS:
leavealone = True
break
pn = pn.parentNode
if leavealone:
continue
count += 1
fnid = u"sf-%s-%s" % (article.slug, count)
fnbackid = u"%s-back" % (fnid,)
endnotes.append((footnote, fnid, fnbackid))
number = dom.createElement(u"sup")
number.setAttribute(u"id", fnbackid)
numbera = dom.createElement(u"a")
numbera.setAttribute(u"href", u"#%s" % fnid)
numbera.setAttribute(u"class", u"simple-footnote")
numbera.appendChild(dom.createTextNode(six.text_type(count)))
txt = getText(footnote, recursive=True).replace(u"\n", u" ")
numbera.setAttribute(u"title", txt)
number.appendChild(numbera)
footnote.parentNode.insertBefore(number, footnote)
if endnotes:
ol = dom.createElement(u"ol")
ol.setAttribute(u"class", u"simple-footnotes")
for e, fnid, fnbackid in endnotes:
li = dom.createElement(u"li")
li.setAttribute(u"id", fnid)
while e.firstChild:
li.appendChild(e.firstChild)
backlink = dom.createElement(u"a")
backlink.setAttribute(u"href", u"#%s" % fnbackid)
backlink.setAttribute(u"class", u"simple-footnote-back")
backlink.appendChild(dom.createTextNode(u'\u21a9'))
li.appendChild(dom.createTextNode(u" "))
li.appendChild(backlink)
ol.appendChild(li)
e.parentNode.removeChild(e)
dom.getElementsByTagName(u"body")[0].appendChild(ol)
s = html5lib.serializer.HTMLSerializer(omit_optional_tags=False, quote_attr_values='legacy')
output_generator = s.serialize(
html5lib.treewalkers.getTreeWalker(u"dom")(dom.getElementsByTagName(u"body")[0]))
article._content = u"".join(list(output_generator)).replace(
u"<x-simple-footnote>", u"[ref]").replace(u"</x-simple-footnote>", u"[/ref]").replace(
u"<body>", u"").replace(u"</body>", u"")
def register():
signals.article_generator_finalized.connect(parse_for_footnotes)
signals.page_generator_finalized.connect(parse_for_footnotes)
|
cloudera/hue | refs/heads/master | desktop/core/ext-py/Django-1.11.29/tests/syndication_tests/feeds.py | 144 | from __future__ import unicode_literals
from django.contrib.syndication import views
from django.utils import feedgenerator
from django.utils.timezone import get_fixed_timezone
from .models import Article, Entry
class TestRss2Feed(views.Feed):
title = 'My blog'
description = 'A more thorough description of my blog.'
link = '/blog/'
feed_guid = '/foo/bar/1234'
author_name = 'Sally Smith'
author_email = 'test@example.com'
author_link = 'http://www.example.com/'
categories = ('python', 'django')
feed_copyright = 'Copyright (c) 2007, Sally Smith'
ttl = 600
def items(self):
return Entry.objects.all()
def item_description(self, item):
return "Overridden description: %s" % item
def item_pubdate(self, item):
return item.published
def item_updateddate(self, item):
return item.updated
item_author_name = 'Sally Smith'
item_author_email = 'test@example.com'
item_author_link = 'http://www.example.com/'
item_categories = ('python', 'testing')
item_copyright = 'Copyright (c) 2007, Sally Smith'
class TestRss2FeedWithGuidIsPermaLinkTrue(TestRss2Feed):
def item_guid_is_permalink(self, item):
return True
class TestRss2FeedWithGuidIsPermaLinkFalse(TestRss2Feed):
def item_guid(self, item):
return str(item.pk)
def item_guid_is_permalink(self, item):
return False
class TestRss091Feed(TestRss2Feed):
feed_type = feedgenerator.RssUserland091Feed
class TestNoPubdateFeed(views.Feed):
title = 'Test feed'
link = '/feed/'
def items(self):
return Entry.objects.all()
class TestAtomFeed(TestRss2Feed):
feed_type = feedgenerator.Atom1Feed
subtitle = TestRss2Feed.description
class TestLatestFeed(TestRss2Feed):
"""
A feed where the latest entry date is an `updated` element.
"""
feed_type = feedgenerator.Atom1Feed
subtitle = TestRss2Feed.description
def items(self):
return Entry.objects.exclude(pk=5)
class ArticlesFeed(TestRss2Feed):
"""
A feed to test no link being defined. Articles have no get_absolute_url()
method, and item_link() is not defined.
"""
def items(self):
return Article.objects.all()
class TestSingleEnclosureRSSFeed(TestRss2Feed):
"""
A feed to test that RSS feeds work with a single enclosure.
"""
def item_enclosure_url(self, item):
return 'http://example.com'
def item_enclosure_size(self, item):
return 0
def item_mime_type(self, item):
return 'image/png'
class TestMultipleEnclosureRSSFeed(TestRss2Feed):
"""
A feed to test that RSS feeds raise an exception with multiple enclosures.
"""
def item_enclosures(self, item):
return [
feedgenerator.Enclosure('http://example.com/hello.png', 0, 'image/png'),
feedgenerator.Enclosure('http://example.com/goodbye.png', 0, 'image/png'),
]
class TemplateFeed(TestRss2Feed):
"""
A feed to test defining item titles and descriptions with templates.
"""
title_template = 'syndication/title.html'
description_template = 'syndication/description.html'
# Defining a template overrides any item_title definition
def item_title(self):
return "Not in a template"
class TemplateContextFeed(TestRss2Feed):
"""
A feed to test custom context data in templates for title or description.
"""
title_template = 'syndication/title_context.html'
description_template = 'syndication/description_context.html'
def get_context_data(self, **kwargs):
context = super(TemplateContextFeed, self).get_context_data(**kwargs)
context['foo'] = 'bar'
return context
class NaiveDatesFeed(TestAtomFeed):
"""
A feed with naive (non-timezone-aware) dates.
"""
def item_pubdate(self, item):
return item.published
class TZAwareDatesFeed(TestAtomFeed):
"""
A feed with timezone-aware dates.
"""
def item_pubdate(self, item):
# Provide a weird offset so that the test can know it's getting this
# specific offset and not accidentally getting on from
# settings.TIME_ZONE.
return item.published.replace(tzinfo=get_fixed_timezone(42))
class TestFeedUrlFeed(TestAtomFeed):
feed_url = 'http://example.com/customfeedurl/'
class MyCustomAtom1Feed(feedgenerator.Atom1Feed):
"""
Test of a custom feed generator class.
"""
def root_attributes(self):
attrs = super(MyCustomAtom1Feed, self).root_attributes()
attrs['django'] = 'rocks'
return attrs
def add_root_elements(self, handler):
super(MyCustomAtom1Feed, self).add_root_elements(handler)
handler.addQuickElement('spam', 'eggs')
def item_attributes(self, item):
attrs = super(MyCustomAtom1Feed, self).item_attributes(item)
attrs['bacon'] = 'yum'
return attrs
def add_item_elements(self, handler, item):
super(MyCustomAtom1Feed, self).add_item_elements(handler, item)
handler.addQuickElement('ministry', 'silly walks')
class TestCustomFeed(TestAtomFeed):
feed_type = MyCustomAtom1Feed
class TestSingleEnclosureAtomFeed(TestAtomFeed):
"""
A feed to test that Atom feeds work with a single enclosure.
"""
def item_enclosure_url(self, item):
return 'http://example.com'
def item_enclosure_size(self, item):
return 0
def item_mime_type(self, item):
return 'image/png'
class TestMultipleEnclosureAtomFeed(TestAtomFeed):
"""
A feed to test that Atom feeds work with multiple enclosures.
"""
def item_enclosures(self, item):
return [
feedgenerator.Enclosure('http://example.com/hello.png', '0', 'image/png'),
feedgenerator.Enclosure('http://example.com/goodbye.png', '0', 'image/png'),
]
|
nickpack/reportlab | refs/heads/master | src/reportlab/graphics/barcode/eanbc.py | 3 | __all__=(
'Ean13BarcodeWidget','isEanString',
)
from reportlab.graphics.shapes import Group, String, Rect
from reportlab.lib import colors
from reportlab.pdfbase.pdfmetrics import stringWidth
from reportlab.lib.validators import isNumber, isColor, isString, Validator, isBoolean
from reportlab.lib.attrmap import *
from reportlab.graphics.charts.areas import PlotArea
from reportlab.lib.units import mm
#work out a list of manufacturer codes....
_eanNumberSystems = [
('00-13', 'USA & Canada'),
('20-29', 'In-Store Functions'),
('30-37', 'France'),
('40-44', 'Germany'),
('45', 'Japan (also 49)'),
('46', 'Russian Federation'),
('471', 'Taiwan'),
('474', 'Estonia'),
('475', 'Latvia'),
('477', 'Lithuania'),
('479', 'Sri Lanka'),
('480', 'Philippines'),
('482', 'Ukraine'),
('484', 'Moldova'),
('485', 'Armenia'),
('486', 'Georgia'),
('487', 'Kazakhstan'),
('489', 'Hong Kong'),
('49', 'Japan (JAN-13)'),
('50', 'United Kingdom'),
('520', 'Greece'),
('528', 'Lebanon'),
('529', 'Cyprus'),
('531', 'Macedonia'),
('535', 'Malta'),
('539', 'Ireland'),
('54', 'Belgium & Luxembourg'),
('560', 'Portugal'),
('569', 'Iceland'),
('57', 'Denmark'),
('590', 'Poland'),
('594', 'Romania'),
('599', 'Hungary'),
('600-601', 'South Africa'),
('609', 'Mauritius'),
('611', 'Morocco'),
('613', 'Algeria'),
('619', 'Tunisia'),
('622', 'Egypt'),
('625', 'Jordan'),
('626', 'Iran'),
('64', 'Finland'),
('690-692', 'China'),
('70', 'Norway'),
('729', 'Israel'),
('73', 'Sweden'),
('740', 'Guatemala'),
('741', 'El Salvador'),
('742', 'Honduras'),
('743', 'Nicaragua'),
('744', 'Costa Rica'),
('746', 'Dominican Republic'),
('750', 'Mexico'),
('759', 'Venezuela'),
('76', 'Switzerland'),
('770', 'Colombia'),
('773', 'Uruguay'),
('775', 'Peru'),
('777', 'Bolivia'),
('779', 'Argentina'),
('780', 'Chile'),
('784', 'Paraguay'),
('785', 'Peru'),
('786', 'Ecuador'),
('789', 'Brazil'),
('80-83', 'Italy'),
('84', 'Spain'),
('850', 'Cuba'),
('858', 'Slovakia'),
('859', 'Czech Republic'),
('860', 'Yugloslavia'),
('869', 'Turkey'),
('87', 'Netherlands'),
('880', 'South Korea'),
('885', 'Thailand'),
('888', 'Singapore'),
('890', 'India'),
('893', 'Vietnam'),
('899', 'Indonesia'),
('90-91', 'Austria'),
('93', 'Australia'),
('94', 'New Zealand'),
('955', 'Malaysia'),
('977', 'International Standard Serial Number for Periodicals (ISSN)'),
('978', 'International Standard Book Numbering (ISBN)'),
('979', 'International Standard Music Number (ISMN)'),
('980', 'Refund receipts'),
('981-982', 'Common Currency Coupons'),
('99', 'Coupons')
]
manufacturerCodes = {}
for (k, v) in _eanNumberSystems:
words = k.split('-')
if len(words)==2:
fromCode = int(words[0])
toCode = int(words[1])
for code in range(fromCode, toCode+1):
manufacturerCodes[code] = v
else:
manufacturerCodes[int(k)] = v
def nDigits(n):
class _ndigits(Validator):
def test(self,x):
return type(x) is str and len(x)<=n and len([c for c in x if c in "0123456789"])==n
return _ndigits()
class Ean13BarcodeWidget(PlotArea):
codeName = "EAN13"
_attrMap = AttrMap(BASE=PlotArea,
value = AttrMapValue(nDigits(12), desc='the number'),
fontName = AttrMapValue(isString, desc='fontName'),
fontSize = AttrMapValue(isNumber, desc='font size'),
x = AttrMapValue(isNumber, desc='x-coord'),
y = AttrMapValue(isNumber, desc='y-coord'),
barFillColor = AttrMapValue(isColor, desc='bar color'),
barHeight = AttrMapValue(isNumber, desc='Height of bars.'),
barWidth = AttrMapValue(isNumber, desc='Width of bars.'),
barStrokeWidth = AttrMapValue(isNumber, desc='Width of bar borders.'),
barStrokeColor = AttrMapValue(isColor, desc='Color of bar borders.'),
textColor = AttrMapValue(isColor, desc='human readable text color'),
humanReadable = AttrMapValue(isBoolean, desc='if human readable'),
quiet = AttrMapValue(isBoolean, desc='if quiet zone to be used'),
lquiet = AttrMapValue(isBoolean, desc='left quiet zone length'),
rquiet = AttrMapValue(isBoolean, desc='right quiet zone length'),
)
_digits=12
_start_right = 7 #for ean-13 left = [0:7] right=[7:13]
_nbars = 113
barHeight = 25.93*mm #millimeters
barWidth = (37.29/_nbars)*mm
humanReadable = 1
_0csw = 1
_1csw = 3
#Left Hand Digits.
_left = ( ("0001101", "0011001", "0010011", "0111101",
"0100011", "0110001", "0101111", "0111011",
"0110111", "0001011",
), #odd left hand digits
("0100111", "0110011", "0011011", "0100001",
"0011101", "0111001", "0000101", "0010001",
"0001001", "0010111"), #even left hand digits
)
_right = ("1110010", "1100110", "1101100", "1000010",
"1011100", "1001110", "1010000", "1000100",
"1001000", "1110100")
quiet = 1
rquiet = lquiet = None
_tail = "101"
_sep = "01010"
_lhconvert={
"0": (0,0,0,0,0,0),
"1": (0,0,1,0,1,1),
"2": (0,0,1,1,0,1),
"3": (0,0,1,1,1,0),
"4": (0,1,0,0,1,1),
"5": (0,1,1,0,0,1),
"6": (0,1,1,1,0,0),
"7": (0,1,0,1,0,1),
"8": (0,1,0,1,1,0),
"9": (0,1,1,0,1,0)
}
fontSize = 8 #millimeters
fontName = 'Helvetica'
textColor = barFillColor = colors.black
barStrokeColor = None
barStrokeWidth = 0
x = 0
y = 0
def __init__(self,value='123456789012',**kw):
self.value=max(self._digits-len(value),0)*'0'+value[:self._digits]
for k, v in kw.iteritems():
setattr(self, k, v)
width = property(lambda self: self.barWidth*(self._nbars-18+self._calc_quiet(self.lquiet)+self._calc_quiet(self.rquiet)))
def wrap(self,aW,aH):
return self.width,self.barHeight
def _encode_left(self,s,a):
cp = self._lhconvert[s[0]] #convert the left hand numbers
_left = self._left
z = ord('0')
for i,c in enumerate(s[1:self._start_right]):
a(_left[cp[i]][ord(c)-z])
def _short_bar(self,i):
i += 9 - self._lquiet
return self.humanReadable and ((12<i<55) or (57<i<101))
def _calc_quiet(self,v):
if self.quiet:
if v is None:
v = 9
else:
x = float(max(v,0))/self.barWidth
v = int(x)
if v-x>0: v += 1
else:
v = 0
return v
def draw(self):
g = Group()
gAdd = g.add
barWidth = self.barWidth
width = self.width
barHeight = self.barHeight
x = self.x
y = self.y
gAdd(Rect(x,y,width,barHeight,fillColor=None,strokeColor=None,strokeWidth=0))
s = self.value+self._checkdigit(self.value)
self._lquiet = lquiet = self._calc_quiet(self.lquiet)
rquiet = self._calc_quiet(self.rquiet)
b = [lquiet*'0',self._tail] #the signal string
a = b.append
self._encode_left(s,a)
a(self._sep)
z = ord('0')
_right = self._right
for c in s[self._start_right:]:
a(_right[ord(c)-z])
a(self._tail)
a(rquiet*'0')
fontSize = self.fontSize
barFillColor = self.barFillColor
barStrokeWidth = self.barStrokeWidth
barStrokeColor = self.barStrokeColor
fth = fontSize*1.2
b = ''.join(b)
lrect = None
for i,c in enumerate(b):
if c=="1":
dh = self._short_bar(i) and fth or 0
yh = y+dh
if lrect and lrect.y==yh:
lrect.width += barWidth
else:
lrect = Rect(x,yh,barWidth,barHeight-dh,fillColor=barFillColor,strokeWidth=barStrokeWidth,strokeColor=barStrokeColor)
gAdd(lrect)
else:
lrect = None
x += barWidth
if self.humanReadable: self._add_human_readable(s,gAdd)
return g
def _add_human_readable(self,s,gAdd):
barWidth = self.barWidth
fontSize = self.fontSize
textColor = self.textColor
fontName = self.fontName
fth = fontSize*1.2
# draw the num below the line.
c = s[0]
w = stringWidth(c,fontName,fontSize)
x = self.x+barWidth*(self._lquiet-8)
y = self.y + 0.2*fth
gAdd(String(x,y,c,fontName=fontName,fontSize=fontSize,fillColor=textColor))
x = self.x + (33-9+self._lquiet)*barWidth
c = s[1:7]
gAdd(String(x,y,c,fontName=fontName,fontSize=fontSize,fillColor=textColor,textAnchor='middle'))
x += 47*barWidth
c = s[7:]
gAdd(String(x,y,c,fontName=fontName,fontSize=fontSize,fillColor=textColor,textAnchor='middle'))
def _checkdigit(cls,num):
z = ord('0')
iSum = cls._0csw*sum([(ord(x)-z) for x in num[::2]]) \
+ cls._1csw*sum([(ord(x)-z) for x in num[1::2]])
return chr(z+((10-(iSum%10))%10))
_checkdigit=classmethod(_checkdigit)
class Ean8BarcodeWidget(Ean13BarcodeWidget):
codeName = "EAN8"
_attrMap = AttrMap(BASE=Ean13BarcodeWidget,
value = AttrMapValue(nDigits(7), desc='the number'),
)
_start_right = 4 #for ean-13 left = [0:7] right=[7:13]
_nbars = 85
_digits=7
_0csw = 3
_1csw = 1
def _encode_left(self,s,a):
cp = self._lhconvert[s[0]] #convert the left hand numbers
_left = self._left[0]
z = ord('0')
for i,c in enumerate(s[0:self._start_right]):
a(_left[ord(c)-z])
def _short_bar(self,i):
i += 9 - self._lquiet
return self.humanReadable and ((12<i<41) or (43<i<73))
def _add_human_readable(self,s,gAdd):
barWidth = self.barWidth
fontSize = self.fontSize
textColor = self.textColor
fontName = self.fontName
fth = fontSize*1.2
# draw the num below the line.
y = self.y + 0.2*fth
x = (26.5-9+self._lquiet)*barWidth
c = s[0:4]
gAdd(String(x,y,c,fontName=fontName,fontSize=fontSize,fillColor=textColor,textAnchor='middle'))
x = (59.5-9+self._lquiet)*barWidth
c = s[4:]
gAdd(String(x,y,c,fontName=fontName,fontSize=fontSize,fillColor=textColor,textAnchor='middle'))
class UPCA(Ean13BarcodeWidget):
codeName = "UPCA"
_attrMap = AttrMap(BASE=Ean13BarcodeWidget,
value = AttrMapValue(nDigits(11), desc='the number'),
)
_start_right = 6
_digits = 11
_0csw = 3
_1csw = 1
_nbars = 1+7*11+2*3+5
|
MichaelCoughlinAN/Odds-N-Ends | refs/heads/master | Python/Python Modules/lxml-4.2.0/src/lxml/html/_setmixin.py | 53 | from collections import MutableSet
class SetMixin(MutableSet):
"""
Mix-in for sets. You must define __iter__, add, remove
"""
def __len__(self):
length = 0
for item in self:
length += 1
return length
def __contains__(self, item):
for has_item in self:
if item == has_item:
return True
return False
issubset = MutableSet.__le__
issuperset = MutableSet.__ge__
union = MutableSet.__or__
intersection = MutableSet.__and__
difference = MutableSet.__sub__
symmetric_difference = MutableSet.__xor__
def copy(self):
return set(self)
def update(self, other):
self |= other
def intersection_update(self, other):
self &= other
def difference_update(self, other):
self -= other
def symmetric_difference_update(self, other):
self ^= other
def discard(self, item):
try:
self.remove(item)
except KeyError:
pass
@classmethod
def _from_iterable(cls, it):
return set(it)
|
guschmue/tensorflow | refs/heads/master | tensorflow/contrib/learn/python/learn/graph_actions.py | 76 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""High level operations on graphs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import sys
import threading
import time
import numpy as np
from six import reraise
from tensorflow.contrib.framework import load_variable
from tensorflow.contrib.framework.python.ops import ops as contrib_ops
from tensorflow.contrib.framework.python.ops import variables as contrib_variables
from tensorflow.contrib.learn.python.learn import monitors as monitors_lib
from tensorflow.core.framework import summary_pb2
from tensorflow.python.client import session as tf_session
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import resources
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner
from tensorflow.python.training import saver as tf_saver
from tensorflow.python.training import session_manager as session_manager_lib
from tensorflow.python.training import summary_io
from tensorflow.python.training import supervisor as tf_supervisor
from tensorflow.python.util.deprecation import deprecated
# Singleton for SummaryWriter per logdir folder.
_SUMMARY_WRITERS = {}
# Lock protecting _SUMMARY_WRITERS
_summary_writer_lock = threading.Lock()
_graph_action_deprecation = deprecated(
'2017-02-15',
'graph_actions.py will be deleted. Use tf.train.* utilities instead. '
'You can use learn/estimators/estimator.py as an example.')
@_graph_action_deprecation
def clear_summary_writers():
"""Clear cached summary writers. Currently only used for unit tests."""
return summary_io.SummaryWriterCache.clear()
def get_summary_writer(logdir):
"""Returns single SummaryWriter per logdir in current run.
Args:
logdir: str, folder to write summaries.
Returns:
Existing `SummaryWriter` object or new one if never wrote to given
directory.
"""
return summary_io.SummaryWriterCache.get(logdir)
def _make_saver(graph, keep_checkpoint_max=5):
vars_to_save = (graph.get_collection(ops.GraphKeys.GLOBAL_VARIABLES) +
graph.get_collection(ops.GraphKeys.SAVEABLE_OBJECTS))
if vars_to_save:
return tf_saver.Saver(vars_to_save,
sharded=True,
max_to_keep=keep_checkpoint_max)
else:
return None
def _restore_from_checkpoint(session, graph, checkpoint_path, saver=None):
logging.info('Loading model from checkpoint: %s.', checkpoint_path)
saver = saver or _make_saver(graph)
if saver:
saver.restore(session, checkpoint_path)
else:
logging.info('No variables found in graph, not creating Saver() object.')
def _run_with_monitors(session, step, tensors, feed_dict, monitors):
"""Runs session for given tensors with monitor callbacks."""
for monitor in monitors:
tensors += monitor.step_begin(step)
tensors = list(set(tensors))
outputs = session.run(tensors, feed_dict=feed_dict)
outputs = dict(zip(
[t.name if isinstance(t, ops.Tensor) else t for t in tensors],
outputs))
should_stop = False
for monitor in monitors:
induce_stop = monitor.step_end(step, outputs)
should_stop = should_stop or induce_stop
return outputs, should_stop
@_graph_action_deprecation
def train(graph,
output_dir,
train_op,
loss_op,
global_step_tensor=None,
init_op=None,
init_feed_dict=None,
init_fn=None,
log_every_steps=10,
supervisor_is_chief=True,
supervisor_master='',
supervisor_save_model_secs=600,
keep_checkpoint_max=5,
supervisor_save_summaries_steps=100,
feed_fn=None,
steps=None,
fail_on_nan_loss=True,
monitors=None,
max_steps=None):
"""Train a model.
Given `graph`, a directory to write outputs to (`output_dir`), and some ops,
run a training loop. The given `train_op` performs one step of training on the
model. The `loss_op` represents the objective function of the training. It is
expected to increment the `global_step_tensor`, a scalar integer tensor
counting training steps. This function uses `Supervisor` to initialize the
graph (from a checkpoint if one is available in `output_dir`), write summaries
defined in the graph, and write regular checkpoints as defined by
`supervisor_save_model_secs`.
Training continues until `global_step_tensor` evaluates to `max_steps`, or, if
`fail_on_nan_loss`, until `loss_op` evaluates to `NaN`. In that case the
program is terminated with exit code 1.
Args:
graph: A graph to train. It is expected that this graph is not in use
elsewhere.
output_dir: A directory to write outputs to.
train_op: An op that performs one training step when run.
loss_op: A scalar loss tensor.
global_step_tensor: A tensor representing the global step. If none is given,
one is extracted from the graph using the same logic as in `Supervisor`.
init_op: An op that initializes the graph. If `None`, use `Supervisor`'s
default.
init_feed_dict: A dictionary that maps `Tensor` objects to feed values.
This feed dictionary will be used when `init_op` is evaluated.
init_fn: Optional callable passed to Supervisor to initialize the model.
log_every_steps: Output logs regularly. The logs contain timing data and the
current loss.
supervisor_is_chief: Whether the current process is the chief supervisor in
charge of restoring the model and running standard services.
supervisor_master: The master string to use when preparing the session.
supervisor_save_model_secs: Save a checkpoint every
`supervisor_save_model_secs` seconds when training.
keep_checkpoint_max: The maximum number of recent checkpoint files to
keep. As new files are created, older files are deleted. If None or 0,
all checkpoint files are kept. This is simply passed as the max_to_keep
arg to tf.train.Saver constructor.
supervisor_save_summaries_steps: Save summaries every
`supervisor_save_summaries_steps` seconds when training.
feed_fn: A function that is called every iteration to produce a `feed_dict`
passed to `session.run` calls. Optional.
steps: Trains for this many steps (e.g. current global step + `steps`).
fail_on_nan_loss: If true, raise `NanLossDuringTrainingError` if `loss_op`
evaluates to `NaN`. If false, continue training as if nothing happened.
monitors: List of `BaseMonitor` subclass instances. Used for callbacks
inside the training loop.
max_steps: Number of total steps for which to train model. If `None`,
train forever. Two calls fit(steps=100) means 200 training iterations.
On the other hand two calls of fit(max_steps=100) means, second call
will not do any iteration since first call did all 100 steps.
Returns:
The final loss value.
Raises:
ValueError: If `output_dir`, `train_op`, `loss_op`, or `global_step_tensor`
is not provided. See `tf.contrib.framework.get_global_step` for how we
look up the latter if not provided explicitly.
NanLossDuringTrainingError: If `fail_on_nan_loss` is `True`, and loss ever
evaluates to `NaN`.
ValueError: If both `steps` and `max_steps` are not `None`.
"""
while True:
try:
return _train_internal(graph,
output_dir,
train_op,
loss_op,
global_step_tensor,
init_op,
init_feed_dict,
init_fn,
log_every_steps,
supervisor_is_chief,
supervisor_master,
supervisor_save_model_secs,
keep_checkpoint_max,
supervisor_save_summaries_steps,
feed_fn,
steps,
fail_on_nan_loss,
monitors,
max_steps)
except errors.AbortedError:
# Happens when PS restarts, keep training.
logging.warning('Training got Aborted error. Keep training.')
def _train_internal(graph,
output_dir,
train_op,
loss_op,
global_step_tensor,
init_op,
init_feed_dict,
init_fn,
log_every_steps,
supervisor_is_chief,
supervisor_master,
supervisor_save_model_secs,
keep_checkpoint_max,
supervisor_save_summaries_steps,
feed_fn,
steps,
fail_on_nan_loss,
monitors,
max_steps):
"""See train."""
if (steps is not None) and (max_steps is not None):
raise ValueError('Can not provide both steps and max_steps.')
if not output_dir:
raise ValueError('Output directory should be non-empty %s.' % output_dir)
if train_op is None:
raise ValueError('Missing train_op.')
if loss_op is None:
raise ValueError('Missing loss_op.')
with graph.as_default():
global_step_tensor = contrib_variables.assert_or_get_global_step(
graph, global_step_tensor)
if global_step_tensor is None:
raise ValueError('No "global_step" was provided or found in the graph.')
# Get current step.
try:
start_step = load_variable(output_dir, global_step_tensor.name)
except (errors.NotFoundError, ValueError):
start_step = 0
summary_writer = (get_summary_writer(output_dir)
if supervisor_is_chief else None)
# Add default chief monitors if none were provided.
if not monitors:
monitors = monitors_lib.get_default_monitors(
loss_op=loss_op,
summary_op=logging_ops.get_summary_op(),
save_summary_steps=supervisor_save_summaries_steps,
summary_writer=summary_writer) if supervisor_is_chief else []
# TODO(ipolosukhin): Replace all functionality of Supervisor
# with Chief-Exclusive Monitors.
if not supervisor_is_chief:
# Prune list of monitor to the ones runnable on all workers.
monitors = [monitor for monitor in monitors if monitor.run_on_all_workers]
if max_steps is None:
max_steps = (start_step + steps) if steps else None
# Start monitors, can create graph parts.
for monitor in monitors:
monitor.begin(max_steps=max_steps)
supervisor = tf_supervisor.Supervisor(
graph,
init_op=init_op or tf_supervisor.Supervisor.USE_DEFAULT,
init_feed_dict=init_feed_dict,
is_chief=supervisor_is_chief,
logdir=output_dir,
saver=_make_saver(graph, keep_checkpoint_max),
global_step=global_step_tensor,
summary_op=None,
summary_writer=summary_writer,
save_model_secs=supervisor_save_model_secs,
init_fn=init_fn)
session = supervisor.PrepareSession(master=supervisor_master,
start_standard_services=True)
supervisor.StartQueueRunners(session)
with session:
get_current_step = lambda: session.run(global_step_tensor)
start_step = get_current_step()
last_step = start_step
last_log_step = start_step
loss_value = None
logging.info('Training steps [%d,%s)', last_step, 'inf'
if max_steps is None else str(max_steps))
excinfo = None
try:
while not supervisor.ShouldStop() and (
(max_steps is None) or (last_step < max_steps)):
start_time = time.time()
feed_dict = feed_fn() if feed_fn is not None else None
outputs, should_stop = _run_with_monitors(
session, last_step + 1, [train_op, loss_op], feed_dict, monitors)
loss_value = outputs[loss_op.name]
if np.isnan(loss_value):
failure_message = 'Model diverged with loss = NaN.'
if fail_on_nan_loss:
logging.error(failure_message)
raise monitors_lib.NanLossDuringTrainingError()
else:
logging.warning(failure_message)
if should_stop:
break
this_step = get_current_step()
if this_step <= last_step:
logging.error(
'Global step was not incremented by train op at step %s'
': new step %d', last_step, this_step)
last_step = this_step
is_last_step = (max_steps is not None) and (last_step >= max_steps)
if is_last_step or (last_step - last_log_step >= log_every_steps):
logging.info(
'training step %d, loss = %.5f (%.3f sec/batch).',
last_step, loss_value, float(time.time() - start_time))
last_log_step = last_step
except errors.OutOfRangeError as e:
logging.warn('Got exception during tf.learn training loop possibly '
'due to exhausted input queue %s.', e)
except StopIteration:
logging.info('Exhausted input iterarator.')
except BaseException as e: # pylint: disable=broad-except
# Hold on to any other exceptions while we try recording a final
# checkpoint and summary.
excinfo = sys.exc_info()
finally:
try:
# Call supervisor.Stop() from within a try block because it re-raises
# exceptions thrown by the supervised threads.
supervisor.Stop(close_summary_writer=False)
# Save one last checkpoint and summaries
# TODO(wicke): This should be handled by Supervisor
# In case we encountered an exception in the try block before we updated
# last_step, update it here (again).
last_step = get_current_step()
if supervisor_is_chief:
ckpt_path = supervisor.save_path
logging.info('Saving checkpoint for step %d to checkpoint: %s.',
last_step, ckpt_path)
supervisor.saver.save(session, ckpt_path, global_step=last_step)
# Finish monitors.
for monitor in monitors:
monitor.end()
# catch OutOfRangeError which is thrown when queue is out of data (and for
# other reasons as well).
except errors.OutOfRangeError as e:
logging.warn('OutOfRangeError in tf.learn final checkpoint possibly '
'due to exhausted input queue. Note: summary_op is not '
'expected to trigger dequeues. %s.', e)
except BaseException as e: # pylint: disable=broad-except
# If we don't already have an exception to re-raise, raise this one.
if not excinfo:
raise
# Otherwise, log this one and raise the other in the finally block.
logging.error('Got exception during tf.learn final checkpoint %s.', e)
finally:
if excinfo:
reraise(*excinfo)
return loss_value
def _get_first_op_from_collection(collection_name):
elements = ops.get_collection(collection_name)
if elements:
return elements[0]
return None
def _get_saver():
"""Lazy init and return saver."""
saver = _get_first_op_from_collection(ops.GraphKeys.SAVERS)
if saver is None and variables.global_variables():
saver = tf_saver.Saver()
ops.add_to_collection(ops.GraphKeys.SAVERS, saver)
return saver
def _get_ready_op():
ready_op = _get_first_op_from_collection(ops.GraphKeys.READY_OP)
if ready_op is None:
ready_op = variables.report_uninitialized_variables()
ops.add_to_collection(ops.GraphKeys.READY_OP, ready_op)
return ready_op
def _get_local_init_op():
"""Returns the local init ops to initialize tables and local variables."""
local_init_op = _get_first_op_from_collection(
ops.GraphKeys.LOCAL_INIT_OP)
if local_init_op is None:
op_list = [
variables.local_variables_initializer(),
lookup_ops.tables_initializer()
]
if op_list:
local_init_op = control_flow_ops.group(*op_list)
ops.add_to_collection(ops.GraphKeys.LOCAL_INIT_OP, local_init_op)
return local_init_op
def _eval_results_to_str(eval_results):
return ', '.join('%s = %s' % (k, v) for k, v in sorted(eval_results.items()))
def _write_summary_results(output_dir, eval_results, current_global_step):
"""Writes eval results into summary file in given dir."""
logging.info('Saving evaluation summary for step %d: %s', current_global_step,
_eval_results_to_str(eval_results))
summary_writer = get_summary_writer(output_dir)
summary = summary_pb2.Summary()
for key in eval_results:
if eval_results[key] is None:
continue
value = summary.value.add()
value.tag = key
if (isinstance(eval_results[key], np.float32) or
isinstance(eval_results[key], float)):
value.simple_value = float(eval_results[key])
else:
logging.warn('Skipping summary for %s, must be a float or np.float32.',
key)
summary_writer.add_summary(summary, current_global_step)
summary_writer.flush()
@_graph_action_deprecation
def evaluate(graph,
output_dir,
checkpoint_path,
eval_dict,
update_op=None,
global_step_tensor=None,
supervisor_master='',
log_every_steps=10,
feed_fn=None,
max_steps=None):
"""Evaluate a model loaded from a checkpoint.
Given `graph`, a directory to write summaries to (`output_dir`), a checkpoint
to restore variables from, and a `dict` of `Tensor`s to evaluate, run an eval
loop for `max_steps` steps, or until an exception (generally, an
end-of-input signal from a reader operation) is raised from running
`eval_dict`.
In each step of evaluation, all tensors in the `eval_dict` are evaluated, and
every `log_every_steps` steps, they are logged. At the very end of evaluation,
a summary is evaluated (finding the summary ops using `Supervisor`'s logic)
and written to `output_dir`.
Args:
graph: A `Graph` to train. It is expected that this graph is not in use
elsewhere.
output_dir: A string containing the directory to write a summary to.
checkpoint_path: A string containing the path to a checkpoint to restore.
Can be `None` if the graph doesn't require loading any variables.
eval_dict: A `dict` mapping string names to tensors to evaluate. It is
evaluated in every logging step. The result of the final evaluation is
returned. If `update_op` is None, then it's evaluated in every step. If
`max_steps` is `None`, this should depend on a reader that will raise an
end-of-input exception when the inputs are exhausted.
update_op: A `Tensor` which is run in every step.
global_step_tensor: A `Variable` containing the global step. If `None`,
one is extracted from the graph using the same logic as in `Supervisor`.
Used to place eval summaries on training curves.
supervisor_master: The master string to use when preparing the session.
log_every_steps: Integer. Output logs every `log_every_steps` evaluation
steps. The logs contain the `eval_dict` and timing information.
feed_fn: A function that is called every iteration to produce a `feed_dict`
passed to `session.run` calls. Optional.
max_steps: Integer. Evaluate `eval_dict` this many times.
Returns:
A tuple `(eval_results, global_step)`:
eval_results: A `dict` mapping `string` to numeric values (`int`, `float`)
that are the result of running eval_dict in the last step. `None` if no
eval steps were run.
global_step: The global step this evaluation corresponds to.
Raises:
ValueError: if `output_dir` is empty.
"""
if not output_dir:
raise ValueError('Output directory should be non-empty %s.' % output_dir)
with graph.as_default():
global_step_tensor = contrib_variables.assert_or_get_global_step(
graph, global_step_tensor)
# Create or get summary op, global_step and saver.
saver = _get_saver()
local_init_op = _get_local_init_op()
ready_for_local_init_op = _get_first_op_from_collection(
ops.GraphKeys.READY_FOR_LOCAL_INIT_OP)
ready_op = _get_ready_op()
session_manager = session_manager_lib.SessionManager(
local_init_op=local_init_op,
ready_op=ready_op,
ready_for_local_init_op=ready_for_local_init_op)
session, initialized = session_manager.recover_session(
master=supervisor_master,
saver=saver,
checkpoint_dir=checkpoint_path)
# Start queue runners.
coord = coordinator.Coordinator()
threads = queue_runner.start_queue_runners(session, coord)
with session:
if not initialized:
logging.warning('Failed to initialize from %s.', checkpoint_path)
# TODO(ipolosukhin): This should be failing, but old code relies on that.
session.run(variables.global_variables_initializer())
if checkpoint_path:
_restore_from_checkpoint(session, graph, checkpoint_path, saver)
current_global_step = session.run(global_step_tensor)
eval_results = None
# TODO(amodei): Fix this to run through the eval set exactly once.
step = 0
eval_step = None
feed_dict = None
logging.info('Eval steps [%d,%s) for training step %d.', step,
'inf' if max_steps is None
else str(max_steps), current_global_step)
try:
try:
while (max_steps is None) or (step < max_steps):
step += 1
start_time = time.time()
feed_dict = feed_fn() if feed_fn is not None else None
if update_op is not None:
session.run(update_op, feed_dict=feed_dict)
else:
eval_results = session.run(eval_dict, feed_dict=feed_dict)
eval_step = step
# TODO(wicke): We should assert that the global step hasn't changed.
if step % log_every_steps == 0:
if eval_step is None or step != eval_step:
eval_results = session.run(eval_dict, feed_dict=feed_dict)
eval_step = step
duration = time.time() - start_time
logging.info('Results after %d steps (%.3f sec/batch): %s.',
step, float(duration),
_eval_results_to_str(eval_results))
finally:
if eval_results is None or step != eval_step:
eval_results = session.run(eval_dict, feed_dict=feed_dict)
eval_step = step
# Stop session first, before queue runners.
session.close()
# Stop queue runners.
try:
coord.request_stop()
coord.join(threads, stop_grace_period_secs=120)
except (RuntimeError, errors.CancelledError) as e:
logging.warning('Coordinator didn\'t stop cleanly: %s', e)
# catch OutOfRangeError which is thrown when queue is out of data (and for
# other reasons as well).
except errors.OutOfRangeError as e:
if max_steps is None:
logging.info('Input queue is exhausted.')
else:
logging.warn('Input queue is exhausted: %s.', e)
# catch StopIteration which is thrown is DataReader is out of data.
except StopIteration as e:
if max_steps is None:
logging.info('Input iterator is exhausted.')
else:
logging.warn('Input iterator is exhausted: %s.', e)
# Save summaries for this evaluation.
_write_summary_results(output_dir, eval_results, current_global_step)
return eval_results, current_global_step
@_graph_action_deprecation
def run_n(output_dict, feed_dict=None, restore_checkpoint_path=None, n=1):
"""Run `output_dict` tensors `n` times, with the same `feed_dict` each run.
Args:
output_dict: A `dict` mapping string names to tensors to run. Must all be
from the same graph.
feed_dict: `dict` of input values to feed each run.
restore_checkpoint_path: A string containing the path to a checkpoint to
restore.
n: Number of times to repeat.
Returns:
A list of `n` `dict` objects, each containing values read from `output_dict`
tensors.
"""
return run_feeds(
output_dict=output_dict,
feed_dicts=itertools.repeat(feed_dict, n),
restore_checkpoint_path=restore_checkpoint_path)
@_graph_action_deprecation
def run_feeds_iter(output_dict, feed_dicts, restore_checkpoint_path=None):
"""Run `output_dict` tensors with each input in `feed_dicts`.
If `restore_checkpoint_path` is supplied, restore from checkpoint. Otherwise,
init all variables.
Args:
output_dict: A `dict` mapping string names to `Tensor` objects to run.
Tensors must all be from the same graph.
feed_dicts: Iterable of `dict` objects of input values to feed.
restore_checkpoint_path: A string containing the path to a checkpoint to
restore.
Yields:
A sequence of dicts of values read from `output_dict` tensors, one item
yielded for each item in `feed_dicts`. Keys are the same as `output_dict`,
values are the results read from the corresponding `Tensor` in
`output_dict`.
Raises:
ValueError: if `output_dict` or `feed_dicts` is None or empty.
"""
if not output_dict:
raise ValueError('output_dict is invalid: %s.' % output_dict)
if not feed_dicts:
raise ValueError('feed_dicts is invalid: %s.' % feed_dicts)
graph = contrib_ops.get_graph_from_inputs(output_dict.values())
with graph.as_default() as g:
with tf_session.Session('') as session:
session.run(
resources.initialize_resources(resources.shared_resources() +
resources.local_resources()))
if restore_checkpoint_path:
_restore_from_checkpoint(session, g, restore_checkpoint_path)
else:
session.run(variables.global_variables_initializer())
session.run(variables.local_variables_initializer())
session.run(lookup_ops.tables_initializer())
coord = coordinator.Coordinator()
threads = None
try:
threads = queue_runner.start_queue_runners(session, coord=coord)
for f in feed_dicts:
yield session.run(output_dict, f)
finally:
coord.request_stop()
if threads:
coord.join(threads, stop_grace_period_secs=120)
@_graph_action_deprecation
def run_feeds(*args, **kwargs):
"""See run_feeds_iter(). Returns a `list` instead of an iterator."""
return list(run_feeds_iter(*args, **kwargs))
@_graph_action_deprecation
def infer(restore_checkpoint_path, output_dict, feed_dict=None):
"""Restore graph from `restore_checkpoint_path` and run `output_dict` tensors.
If `restore_checkpoint_path` is supplied, restore from checkpoint. Otherwise,
init all variables.
Args:
restore_checkpoint_path: A string containing the path to a checkpoint to
restore.
output_dict: A `dict` mapping string names to `Tensor` objects to run.
Tensors must all be from the same graph.
feed_dict: `dict` object mapping `Tensor` objects to input values to feed.
Returns:
Dict of values read from `output_dict` tensors. Keys are the same as
`output_dict`, values are the results read from the corresponding `Tensor`
in `output_dict`.
Raises:
ValueError: if `output_dict` or `feed_dicts` is None or empty.
"""
return run_feeds(output_dict=output_dict,
feed_dicts=[feed_dict] if feed_dict is not None else [None],
restore_checkpoint_path=restore_checkpoint_path)[0]
|
costular/sanic-rest | refs/heads/master | tests/test_permissions.py | 1 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import pytest
from sanic import views
__author__ = 'costular'
@pytest.fixture
def basic_view():
class BasicView(views.APIView):
async def get(self, request):
return 'Hello'
return BasicView
class TestAPIView:
@pytest.mark.asyncio
async def test_basic_view_get(self, api, basic_view, test_client):
api.register_view('/testingmyself', basic_view)
request, response = test_client.get('/testingmyself')
assert response.status == 200
|
thehyve/variant | refs/heads/master | eggs/django-1.3.1-py2.7.egg/django/db/backends/mysql/creation.py | 311 | from django.db.backends.creation import BaseDatabaseCreation
class DatabaseCreation(BaseDatabaseCreation):
# This dictionary maps Field objects to their associated MySQL column
# types, as strings. Column-type strings can contain format strings; they'll
# be interpolated against the values of Field.__dict__ before being output.
# If a column type is set to None, it won't be included in the output.
data_types = {
'AutoField': 'integer AUTO_INCREMENT',
'BooleanField': 'bool',
'CharField': 'varchar(%(max_length)s)',
'CommaSeparatedIntegerField': 'varchar(%(max_length)s)',
'DateField': 'date',
'DateTimeField': 'datetime',
'DecimalField': 'numeric(%(max_digits)s, %(decimal_places)s)',
'FileField': 'varchar(%(max_length)s)',
'FilePathField': 'varchar(%(max_length)s)',
'FloatField': 'double precision',
'IntegerField': 'integer',
'BigIntegerField': 'bigint',
'IPAddressField': 'char(15)',
'NullBooleanField': 'bool',
'OneToOneField': 'integer',
'PositiveIntegerField': 'integer UNSIGNED',
'PositiveSmallIntegerField': 'smallint UNSIGNED',
'SlugField': 'varchar(%(max_length)s)',
'SmallIntegerField': 'smallint',
'TextField': 'longtext',
'TimeField': 'time',
}
def sql_table_creation_suffix(self):
suffix = []
if self.connection.settings_dict['TEST_CHARSET']:
suffix.append('CHARACTER SET %s' % self.connection.settings_dict['TEST_CHARSET'])
if self.connection.settings_dict['TEST_COLLATION']:
suffix.append('COLLATE %s' % self.connection.settings_dict['TEST_COLLATION'])
return ' '.join(suffix)
def sql_for_inline_foreign_key_references(self, field, known_models, style):
"All inline references are pending under MySQL"
return [], True
def sql_for_inline_many_to_many_references(self, model, field, style):
from django.db import models
opts = model._meta
qn = self.connection.ops.quote_name
table_output = [
' %s %s %s,' %
(style.SQL_FIELD(qn(field.m2m_column_name())),
style.SQL_COLTYPE(models.ForeignKey(model).db_type(connection=self.connection)),
style.SQL_KEYWORD('NOT NULL')),
' %s %s %s,' %
(style.SQL_FIELD(qn(field.m2m_reverse_name())),
style.SQL_COLTYPE(models.ForeignKey(field.rel.to).db_type(connection=self.connection)),
style.SQL_KEYWORD('NOT NULL'))
]
deferred = [
(field.m2m_db_table(), field.m2m_column_name(), opts.db_table,
opts.pk.column),
(field.m2m_db_table(), field.m2m_reverse_name(),
field.rel.to._meta.db_table, field.rel.to._meta.pk.column)
]
return table_output, deferred
|
asvetlov/motor | refs/heads/master | test/asyncio_tests/test_asyncio_tests.py | 1 | # Copyright 2014 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test Motor's asyncio test utilities."""
import asyncio
import contextlib
import io
import os
import unittest
from test.asyncio_tests import AsyncIOTestCase, asyncio_test
def run_test_case(case, suppress_output=True):
suite = unittest.defaultTestLoader.loadTestsFromTestCase(case)
if suppress_output:
stream = io.StringIO()
else:
stream = None
runner = unittest.TextTestRunner(stream=stream)
return runner.run(suite)
@contextlib.contextmanager
def set_environ(name, value):
old_value = os.environ.get(name)
os.environ[name] = value
try:
yield
finally:
if old_value is None:
del os.environ[name]
else:
os.environ[name] = old_value
class TestAsyncIOTests(unittest.TestCase):
def test_basic(self):
class Test(AsyncIOTestCase):
@asyncio_test
def test(self):
pass
result = run_test_case(Test)
self.assertEqual(1, result.testsRun)
self.assertEqual(0, len(result.errors))
def test_decorator_with_no_args(self):
class TestPasses(AsyncIOTestCase):
@asyncio_test()
def test_decorated_with_no_args(self):
pass
result = run_test_case(TestPasses)
self.assertEqual(0, len(result.errors))
class TestFails(AsyncIOTestCase):
@asyncio_test()
def test_decorated_with_no_args(self):
assert False
result = run_test_case(TestFails)
self.assertEqual(1, len(result.failures))
def test_timeout_passed_as_positional(self):
with self.assertRaises(TypeError):
class _(AsyncIOTestCase):
# Should be "timeout=10".
@asyncio_test(10)
def test_decorated_with_no_args(self):
pass
def test_timeout(self):
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(None)
self.addCleanup(self.loop.close)
self.addCleanup(setattr, self, 'loop', None)
class Test(AsyncIOTestCase):
@asyncio_test(timeout=0.01)
def test_that_is_too_slow(self):
yield from self.middle()
@asyncio.coroutine
def middle(self):
yield from self.inner()
@asyncio.coroutine
def inner(self):
yield from asyncio.sleep(1, loop=self.loop)
with set_environ('ASYNC_TEST_TIMEOUT', '0'):
result = run_test_case(Test)
self.assertEqual(1, len(result.errors))
case, text = result.errors[0]
self.assertTrue('CancelledError' in text)
self.assertTrue('timed out after 0.01 seconds' in text)
# The traceback shows where the coroutine hung.
self.assertTrue('test_that_is_too_slow' in text)
self.assertTrue('middle' in text)
self.assertTrue('inner' in text)
def test_timeout_environment_variable(self):
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(None)
self.addCleanup(self.loop.close)
self.addCleanup(setattr, self, 'loop', None)
@asyncio_test
def default_timeout(self):
yield from asyncio.sleep(0.1, loop=self.loop)
with set_environ('ASYNC_TEST_TIMEOUT', '0.2'):
# No error, sleeps for 0.1 seconds and the timeout is 0.2 seconds.
default_timeout(self)
@asyncio_test(timeout=0.1)
def custom_timeout(self):
yield from asyncio.sleep(0.2, loop=self.loop)
with set_environ('ASYNC_TEST_TIMEOUT', '0'):
# No error, default timeout of 5 seconds overrides '0'.
default_timeout(self)
with set_environ('ASYNC_TEST_TIMEOUT', '0'):
with self.assertRaises(asyncio.CancelledError):
custom_timeout(self)
with set_environ('ASYNC_TEST_TIMEOUT', '1'):
# No error, 1-second timeout from environment overrides custom
# timeout of 0.1 seconds.
custom_timeout(self)
def test_failure(self):
class Test(AsyncIOTestCase):
@asyncio_test
def test_that_fails(self):
yield from self.middle()
@asyncio.coroutine
def middle(self):
yield from self.inner()
@asyncio.coroutine
def inner(self):
assert False, 'expected error'
result = run_test_case(Test)
self.assertEqual(1, len(result.failures))
case, text = result.failures[0]
self.assertFalse('CancelledError' in text)
self.assertTrue('AssertionError' in text)
self.assertTrue('expected error' in text)
# The traceback shows where the coroutine raised.
self.assertTrue('test_that_fails' in text)
self.assertTrue('middle' in text)
self.assertTrue('inner' in text)
def test_undecorated(self):
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(None)
self.addCleanup(self.loop.close)
self.addCleanup(setattr, self, 'loop', None)
class Test(AsyncIOTestCase):
def test_that_should_be_decorated(self):
yield from asyncio.sleep(0.01, loop=self.loop)
result = run_test_case(Test)
self.assertEqual(1, len(result.errors))
case, text = result.errors[0]
self.assertFalse('CancelledError' in text)
self.assertTrue('TypeError' in text)
self.assertTrue('should be decorated with @asyncio_test' in text)
def test_other_return(self):
class Test(AsyncIOTestCase):
def test_other_return(self):
return 42
result = run_test_case(Test)
self.assertEqual(len(result.errors), 1)
case, text = result.errors[0]
self.assertIn('Return value from test method ignored', text)
if __name__ == '__main__':
unittest.main()
|
gaddman/ansible | refs/heads/devel | test/units/modules/network/f5/test_bigip_asm_policy_import.py | 17 | # -*- coding: utf-8 -*-
#
# Copyright: (c) 2018, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
if sys.version_info < (2, 7):
pytestmark = pytest.mark.skip("F5 Ansible modules require Python >= 2.7")
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_asm_policy_import import ApiParameters
from library.modules.bigip_asm_policy_import import ModuleParameters
from library.modules.bigip_asm_policy_import import ModuleManager
from library.modules.bigip_asm_policy_import import ArgumentSpec
# In Ansible 2.8, Ansible changed import paths.
from test.units.compat import unittest
from test.units.compat.mock import Mock
from test.units.compat.mock import patch
from test.units.modules.utils import set_module_args
except ImportError:
from ansible.modules.network.f5.bigip_asm_policy_import import ApiParameters
from ansible.modules.network.f5.bigip_asm_policy_import import ModuleParameters
from ansible.modules.network.f5.bigip_asm_policy_import import ModuleManager
from ansible.modules.network.f5.bigip_asm_policy_import import ArgumentSpec
# Ansible 2.8 imports
from units.compat import unittest
from units.compat.mock import Mock
from units.compat.mock import patch
from units.modules.utils import set_module_args
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
name='fake_policy',
state='present',
source='/var/fake/fake.xml'
)
p = ModuleParameters(params=args)
assert p.name == 'fake_policy'
assert p.source == '/var/fake/fake.xml'
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
self.policy = os.path.join(fixture_path, 'fake_policy.xml')
self.patcher1 = patch('time.sleep')
self.patcher1.start()
try:
self.p1 = patch('library.modules.bigip_asm_policy_import.module_provisioned')
self.m1 = self.p1.start()
self.m1.return_value = True
except Exception:
self.p1 = patch('ansible.modules.network.f5.bigip_asm_policy_import.module_provisioned')
self.m1 = self.p1.start()
self.m1.return_value = True
def tearDown(self):
self.patcher1.stop()
self.p1.stop()
def test_import_from_file(self, *args):
set_module_args(dict(
name='fake_policy',
source=self.policy,
server='localhost',
password='password',
user='admin',
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.exists = Mock(return_value=False)
mm.import_file_to_device = Mock(return_value=True)
mm.remove_temp_policy_from_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
assert results['name'] == 'fake_policy'
assert results['source'] == self.policy
|
konrado0/vosqa | refs/heads/master | forum/settings/basic.py | 1 | import os.path
from base import Setting, SettingSet
from forms import ImageFormWidget
from django.utils.translation import ugettext_lazy as _
from django.forms.widgets import Textarea
BASIC_SET = SettingSet('basic', _('Basic settings'), _("The basic settings for your application"), 1)
APP_LOGO = Setting('APP_LOGO', '/upfiles/logo.png', BASIC_SET, dict(
label = _("Application logo"),
help_text = _("Your site main logo."),
widget=ImageFormWidget))
APP_LOGO_EMAIL = Setting('APP_LOGO_EMAIL', '/upfiles/logo.png', BASIC_SET, dict(
label = _("Logo for email"),
help_text = _("Logo used in email notifications."),
widget=ImageFormWidget))
APP_FAVICON = Setting('APP_FAVICON', '/m/default/media/images/favicon.ico', BASIC_SET, dict(
label = _("Favicon"),
help_text = _("Your site favicon."),
widget=ImageFormWidget))
APP_TITLE = Setting('APP_TITLE', u'VOSQA: Open Source Q&A Forum', BASIC_SET, dict(
label = _("Application title"),
help_text = _("The title of your application that will show in the browsers title bar")))
APP_SHORT_NAME = Setting(u'APP_SHORT_NAME', 'VOSQA', BASIC_SET, dict(
label = _("Application short name"),
help_text = "The short name for your application that will show up in many places."))
APP_KEYWORDS = Setting('APP_KEYWORDS', u'OSQA,CNPROG,forum,community', BASIC_SET, dict(
label = _("Application keywords"),
help_text = _("The meta keywords that will be available through the HTML meta tags.")))
APP_DESCRIPTION = Setting('APP_DESCRIPTION', u'Ask and answer questions.', BASIC_SET, dict(
label = _("Application description"),
help_text = _("The description of your application"),
widget=Textarea))
APP_COPYRIGHT = Setting('APP_COPYRIGHT', u'Copyright OSQA, 2010. Some rights reserved under creative commons license.', BASIC_SET, dict(
label = _("Copyright notice"),
help_text = _("The copyright notice visible at the footer of your page.")))
SUPPORT_URL = Setting('SUPPORT_URL', '', BASIC_SET, dict(
label = _("Support URL"),
help_text = _("The URL provided for users to get support. It can be http: or mailto: or whatever your preferred support scheme is."),
required=False))
CONTACT_URL = Setting('CONTACT_URL', '', BASIC_SET, dict(
label = _("Contact URL"),
help_text = _("The URL provided for users to contact you. It can be http: or mailto: or whatever your preferred contact scheme is."),
required=False))
LOAD_RESOURCES_FROM_CDN = Setting('LOAD_RESOURCES_FROM_CDN', True, BASIC_SET, dict(
label=_("Load JS/CSS resources from cdn"),
help_text = _("JS/CSS resources (jquery, tinymce etc) will be loaded from CDN instead of local."),
required=False,
))
EMBEDD_YOUTUBE_FROM_LINKS = Setting('EMBEDD_YOUTUBE_FROM_LINKS', True, BASIC_SET, dict(
label=_("Auto embed youtube videos"),
help_text = _("Try embedding youtube videos that are given as links."),
required=False,
))
EMBEDDED_VIDEO_WIDTH = Setting('EMBEDDED_VIDEO_WIDTH', 480, BASIC_SET, dict(
label = _("Embedded video width"),
help_text = _("Used for auto-embedded videos. Max 580 in current desing.")))
EMBEDDED_VIDEO_HEIGHT = Setting('EMBEDDED_VIDEO_HEIGHT', 360, BASIC_SET, dict(
label = _("Embedded video height"),
help_text = _("Used for auto-embedded videos.")))
EMBEDD_IMG_FROM_LINKS = Setting('EMBEDD_IMG_FROM_LINKS', True, BASIC_SET, dict(
label=_("Auto embed pictures"),
help_text = _("Try embedding pictures that are given as links.."),
required=False,
))
EMBEDDED_IMG_WIDTH = Setting('EMBEDDED_IMG_WIDTH', 480, BASIC_SET, dict(
label = _("Embedded pictures max width"),
help_text = _("Used for auto-embedded pictures. Max 580 in current desing."))) |
Soya93/Extract-Refactoring | refs/heads/master | python/testData/inspections/ChainedComparison5_after.py | 83 | mapsize = 35
def test(x, y):
if 0 <= x < mapsize and 0 <= y < mapsize:
return 1 |
MorellatoAriel/PyGobstones | refs/heads/develop | interpreter/vgbs/lang/judge/gbs_judge.py | 4 | #
# Copyright (C) 2011, 2012 Pablo Barenbaum <foones@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import os
import re
import sys
import zipfile
import tempfile
import shutil
import common.utils
import common.i18n as i18n
import lang.judge
import lang.board.formats
import lang.gbs_board
import lang.bnf_parser
import lang.gbs_parser
import lang.gbs_lint
import lang.gbs_compiler
import lang.gbs_vm
import lang.gbs_vm_serializer
import lang.gbs_builtins
import lang.gbs_board
def read_problem_tree(f):
def level(x):
i = 0
while x[i] == '>':
i += 1
return i
d = {}
lst0 = common.utils.read_stripped_lines(f)
tree = []
for x in lst0:
lv = level(x)
if lv == 0:
tree[-1].append(x)
else:
while len(tree) >= level(x):
sub = tree.pop()
tree[-1].append(sub)
tree.append([x[lv:]])
while len(tree) > 1:
sub = tree.pop()
tree[-1].append(sub)
return tree[0]
def problem_tree_to_list(tree):
if isinstance(tree, list):
res = []
for x in tree[1:]:
res.extend(problem_tree_to_list(x))
return res
else:
return [tree]
def read_dict_file(f):
d = {}
for l in common.utils.read_stripped_lines(f):
k, v = l.split(' ')
assert v in ['True', 'False']
if v == 'True':
d[k] = True
else:
d[k] = False
return d
def object_for(s):
if s.endswith('.gbs'):
s = s[:-4]
s += '.gbo'
return s
class GbsRun(object):
def __init__(self, run_name, program_name, board_name):
self._run_name = run_name
self._program_name = program_name
self._board_name = board_name
def id(self):
return '%s_%s' % (self._program_name, self._board_name.replace('/', '.'))
def run_name(self):
return self._run_name
def program_name(self):
return self._program_name
def board_name(self):
return self._board_name
def read_run_file(problem, prefix, f):
res = []
i = 0
for l in common.utils.read_stripped_lines(f):
i += 1
t = l.split(' ')
assert len(t) == 2
name = '%s.%s%u' % (problem, prefix, i)
res.append(GbsRun(name, t[0], t[1]))
return res
def collect_bundles():
res = []
for dr in lang.judge.Bundle_lookup_path:
try:
dr_contents = os.listdir(dr)
except OSError:
continue
for bn in dr_contents:
fn = os.path.join(dr, bn)
if len(fn) >= 3 and fn[-3:].lower() == 'gbz':
try:
zf = zipfile.ZipFile(fn, 'r')
except: # zipfile.BadZipfile
continue
if '__GBZ__' in zf.namelist():
res.append(fn)
zf.close()
return res
class ProblemBundle(object):
def test_source_names(self, problem):
res = []
for tc in self.test_cases_for(problem):
res.append(tc.program_name())
return common.utils.seq_no_repeats(res)
class SourceProblemBundle(ProblemBundle):
def __init__(self, problem_set):
self._path = os.path.dirname(problem_set)
f = open(problem_set, 'r')
self._problem_tree = read_problem_tree(f)
f.close()
self._problem_list = problem_tree_to_list(self._problem_tree)
def files_for_problem(self, problem):
fs = []
for x in os.listdir(os.path.join(self._path, problem)):
if x != 'Solution.gbs':
fs.append(os.path.join(self._path, problem, x))
return fs
def problems(self):
return self._problem_list
def options(self, problem):
assert problem in self._problem_list
f = open(os.path.join(self._path, problem, 'problem_type.txt'))
opts = read_dict_file(f)
f.close()
return opts
def solution_path_for(self, problem):
return os.path.join(self._path, problem, 'Solution.gbs')
def solution_for(self, problem):
return common.utils.read_file(self.solution_path_for(problem))
def test_cases_for(self, problem):
f = open(os.path.join(self._path, problem, 'tests.txt'))
res = read_run_file(problem, 'test', f)
f.close()
return res
def load_source(self, problem, source_name):
f = open(os.path.join(self._path, problem, source_name), 'r')
src = f.read()
f.close()
return src
def load_board(self, problem, board_name):
fn = os.path.join(self._path, board_name)
fmt = lang.board.formats.format_for(fn)
board = lang.gbs_board.Board((1, 1))
f = open(fn, 'r')
board.load(f, fmt)
f.close()
return board
def common_boards(self):
if not os.path.exists(os.path.join(self._path, '_boards')):
return []
res = []
for bfn in os.listdir(os.path.join(self._path, '_boards')):
res.append(os.path.join(self._path, '_boards', bfn))
return res
def dump_compiled_code(self, problem, source_name, outf):
contents = self.load_source(problem, source_name)
fn = os.path.join(self._path, problem, source_name)
code = compile_source(contents, fn=fn)
lang.gbs_vm_serializer.dump(code, outf, style='compact')
_statement_template = '''<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<meta name="generator" content="PyGobstones$VERSION">
<title>$TITLE</title>
<style type="text/css">')
body {
font-family: Verdana, helvetica;
font-size: 10pt;
}
.statement {
width: 50%;
}
p {
text-indent: 1cm;
}
h1 {
border: black solid 1px;
padding: 10px;
}
</style>
</head>
<body>
<h1>$TITLE</h1>
<div class="statement">
$STATEMENT
</div>
</body>
</html>
'''
def zip_path_join(*args):
return '/'.join(args)
def zipfile_stream(zf, filename):
# The body of this function should be conceptually
# return zf.open(filename).
#
# We can't use that directly because zf.open is available
# only in Python >= 2.6.
#
# Also, in Python 3.1, we must wrap the output
# in an io.TextIOWrapper for the lines to be read as.
# strings (not as bytes).
#
if common.utils.python_major_version() < 3:
return zf.open(filename)
# For still older Python versions:
# return common.utils.StringIO(zf.read(filename))
else:
import io
return io.TextIOWrapper(zf.open(filename))
class GbzProblemBundle(ProblemBundle):
def __init__(self, zipname):
self._fn = zipname
zf = zipfile.ZipFile(self._fn)
f = zipfile_stream(zf, '__GBZ__')
self._problem_tree = read_problem_tree(f)
f.close()
zf.close()
self._problem_list = problem_tree_to_list(self._problem_tree)
def options(self, problem):
zf = zipfile.ZipFile(self._fn)
f = zipfile_stream(zf, zip_path_join(problem, 'problem_type.txt'))
opts = read_dict_file(f)
f.close()
zf.close()
return opts
def problems(self):
return self._problem_list
def problem_tree(self):
return self._problem_tree
def problem_statement(self, problem):
zf = zipfile.ZipFile(self._fn)
f = zipfile_stream(zf, zip_path_join(problem, 'problem.html'))
statement = f.read()
f.close()
zf.close()
template = _statement_template
replacements = {
'$TITLE': problem,
'$STATEMENT': statement,
'$VERSION': common.utils.version_number(),
}
for k, v in replacements.items():
template = template.replace(k, v)
return template
def test_cases_for(self, problem):
zf = zipfile.ZipFile(self._fn)
f = zipfile_stream(zf, zip_path_join(problem, 'tests.txt'))
res = read_run_file(problem, 'test', f)
f.close()
zf.close()
return res
def load_source(self, problem, source_name):
zf = zipfile.ZipFile(self._fn)
f = zipfile_stream(zf, zip_path_join(problem, source_name))
src = f.read()
f.close()
zf.close()
return src
def load_object(self, problem, source_name):
zf = zipfile.ZipFile(self._fn)
f = zipfile_stream(zf, zip_path_join(problem, object_for(source_name)))
compiled_code = lang.gbs_vm_serializer.load(f)
f.close()
zf.close()
return compiled_code
def load_board(self, problem, board_name):
zf = zipfile.ZipFile(self._fn)
fmt = lang.board.formats.format_for(board_name)
board = lang.gbs_board.Board((1, 1))
f = zipfile_stream(zf, board_name)
board.load(f, fmt)
f.close()
zf.close()
return board
def global_fingerprint(self, problem):
zf = zipfile.ZipFile(self._fn)
f = zipfile_stream(zf, zip_path_join(problem, 'fingerprint.txt'))
fgp = f.read()
f.close()
zf.close()
return fgp
def fingerprint_for_test_case(self, problem, test_case):
zf = zipfile.ZipFile(self._fn)
f = zipfile_stream(zf, zip_path_join(problem, 'fingerprint_%s.txt' % (test_case.id(),)))
fgp = f.read()
f.close()
zf.close()
return fgp
def compile_source(contents, fn='...', toplevel_filename=None, log=None):
tree = lang.gbs_parser.parse_string_try_prelude(contents, filename=fn, toplevel_filename=toplevel_filename)
lang.gbs_lint.lint(tree, strictness='lax')
compiled_code = lang.gbs_compiler.compile_program(tree)
return compiled_code
class CodeDictionary(object):
def compiled_code_for(self, source_name):
return self.code_dict[source_name]
class CodeDictionaryFromSolution(CodeDictionary):
def __init__(self, bundle, problem, solution_source, fn='...', log=None):
def log_(msg):
if log is not None:
log(msg)
# Create the Solution.gbs file from the given source
tmpdir = tempfile.mkdtemp()
self._solution_filename = solution_filename = os.path.join(tmpdir, 'Solution.gbs')
f = open(solution_filename, 'w')
f.write(solution_source)
f.close()
# Copy the Prelude.gbs file to the test directory
prelude_fn = lang.gbs_parser.prelude_for_file(fn)
if prelude_fn is not None:
shutil.copyfile(prelude_fn, os.path.join(tmpdir, os.path.basename(prelude_fn)))
code_dict = {}
for source_name in bundle.test_source_names(problem):
if source_name != 'Solution.gbs':
test_filename = os.path.join(tmpdir, source_name)
test_source = bundle.load_source(problem, source_name)
f = open(test_filename, 'w')
f.write(test_source)
f.close()
else:
test_filename = solution_filename
test_source = solution_source
try:
compiled_code = compile_source(test_source, fn=test_filename, toplevel_filename=solution_filename, log=log_)
except common.utils.SourceException as exception:
shutil.rmtree(tmpdir)
self.status = 'FAILED'
self.exception = exception
return
code_dict[source_name] = compiled_code
shutil.rmtree(tmpdir)
self.status = 'OK'
self.code_dict = code_dict
class CodeDictionaryFromObject(CodeDictionary):
def __init__(self, bundle, problem):
code_dict = {}
self._solution_filename = '???'
for source_name in bundle.test_source_names(problem):
try:
code_dict[source_name] = bundle.load_object(problem, source_name)
except lang.gbs_vm_serializer.GbsObjectFormatException as exception:
self.status = 'FAILED'
self.exception = exception
return
self.status = 'OK'
self.code_dict = code_dict
class TestDriver(object):
def __init__(self, bundle, problem, code_dict, fn='...', log=None):
self._bundle = bundle
self._problem = problem
self._code_dict = code_dict
self._test_cases = self._bundle.test_cases_for(self._problem)
self._state = 'start_test_case'
self._current_test_case = 0
self._solutions = {}
self._log = log
def problem(self):
return self._problem
def finished(self):
return self._state == 'end'
def log(self, msg):
if self._log is not None:
self._log(msg)
def run(self):
while True:
r = self.step()
if r == 'END':
break
elif r == 'FAILED':
return 'FAILED'
return 'OK'
def step(self):
if self._state == 'failed':
return 'FAILED'
elif self._state == 'start_test_case':
return self.start_test_case(self._current_test_case)
elif self._state == 'running':
return self.step_vm()
else:
assert False
def start_test_case(self, i):
if i >= len(self._test_cases):
self.log(i18n.i18n('Program execution finished.'))
self._state = 'end'
return 'END'
n_of_m = ' (%s/%s)' % ((i + 1), len(self._test_cases))
self.log(i18n.i18n('Starting test case %s.') % (self._test_cases[i].id(),) + n_of_m)
compiled_code = self._code_dict.compiled_code_for(self._test_cases[i].program_name())
self._test_board = self._bundle.load_board(self._problem, self._test_cases[i].board_name())
try:
self._vm = lang.gbs_vm.GbsVmInterpreter(toplevel_filename=self._code_dict._solution_filename)
self._vm.init_program(compiled_code, self._test_board)
except common.utils.SourceException as exception:
self._error_exception = exception
self._state = 'failed'
return 'FAILED'
self._state = 'running'
return 'CONTINUE'
def step_vm(self):
try:
r = self._vm.step()
if r[0] == 'END':
tc = self._test_cases[self._current_test_case]
self._solutions[tc.id()] = (self._test_board, r[1])
self._state = 'start_test_case'
self._current_test_case += 1
return 'CONTINUE'
return 'CONTINUE'
except common.utils.SourceException as exception:
self._error_exception = exception
self._state = 'failed'
return 'FAILED'
def solution(self, test_case):
return self._solutions[test_case.id()]
def current_test_case(self):
return self._test_cases[self._current_test_case]
def solutions_equal(options, sol1, sol2):
board1, keyval1 = sol1
board2, keyval2 = sol2
if options['check_board']:
if not board1.equal_contents(board2):
return False
if options['check_head']:
if board1.head != board2.head:
return False
if options['check_result']:
if keyval1 != keyval2:
return False
return True
|
lcy-seso/Paddle | refs/heads/develop | python/paddle/fluid/tests/unittests/test_optimizer.py | 4 | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import paddle.fluid.framework as framework
import paddle.fluid.optimizer as optimizer
from paddle.fluid.backward import append_backward
class TestOptimizer(unittest.TestCase):
def test_sgd_optimizer(self):
def check_sgd_optimizer(optimizer_attr):
init_program = framework.Program()
program = framework.Program()
block = program.global_block()
mul_x = block.create_parameter(
dtype="float32",
shape=[5, 10],
lod_level=0,
name="mul.x",
optimize_attr=optimizer_attr)
mul_y = block.create_var(
dtype="float32", shape=[10, 8], lod_level=0, name="mul.y")
mul_out = block.create_var(
dtype="float32", shape=[5, 8], lod_level=0, name="mul.out")
mean_out = block.create_var(
dtype="float32", shape=[1], lod_level=0, name="mean.out")
block.append_op(
type="mul",
inputs={"X": mul_x,
"Y": mul_y},
outputs={"Out": mul_out},
attrs={"x_num_col_dims": 1})
block.append_op(
type="mean", inputs={"X": mul_out}, outputs={"Out": mean_out})
sgd_optimizer = optimizer.SGDOptimizer(learning_rate=0.01)
opts, _ = sgd_optimizer.minimize(mean_out, init_program)
return opts
opts = check_sgd_optimizer({'learning_rate': 1.1})
self.assertEqual(len(opts), 3)
self.assertEqual([op.type for op in opts],
["fill_constant", "elementwise_mul", "sgd"])
opts = check_sgd_optimizer({'learning_rate': 1.0})
self.assertEqual(len(opts), 1)
self.assertEqual([op.type for op in opts], ["sgd"])
class TestMomentumOptimizer(unittest.TestCase):
class MockMomentum(optimizer.MomentumOptimizer):
def get_accumulators(self):
return self._accumulators
def get_velocity_str(self):
return self._velocity_acc_str
def test_vanilla_momentum_optimizer(self):
init_program = framework.Program()
program = framework.Program()
block = program.global_block()
mul_x = block.create_parameter(
dtype="float32",
shape=[5, 10],
lod_level=0,
name="mul.x",
optimize_attr={'learning_rate': 1.1})
mul_y = block.create_var(
dtype="float32", shape=[10, 8], lod_level=0, name="mul.y")
mul_out = block.create_var(
dtype="float32", shape=[5, 8], lod_level=0, name="mul.out")
block.append_op(
type="mul",
inputs={"X": mul_x,
"Y": mul_y},
outputs={"Out": mul_out},
attrs={"x_num_col_dims": 1})
learning_rate = 0.01
momentum_optimizer = self.MockMomentum(
learning_rate=learning_rate, momentum=0.2)
mean_out = block.create_var(
dtype="float32", shape=[1], lod_level=0, name="mean.out")
block.append_op(
type="mean", inputs={"X": mul_out}, outputs={"Out": mean_out})
params_grads = append_backward(mean_out)
self.assertEqual(len(params_grads), 1)
self.assertEqual(len(momentum_optimizer.get_accumulators()), 0)
opts = momentum_optimizer.create_optimization_pass(
params_grads, mul_out, init_program)
self.assertEqual(len(opts), 3)
sgd_op = opts[-1]
self.assertEqual([op.type for op in opts],
["fill_constant", "elementwise_mul", "momentum"])
self.assertFalse(sgd_op.attr('use_nesterov'))
# Check accumulators
accumulators = momentum_optimizer.get_accumulators()
self.assertEqual(len(accumulators), 1)
self.assertTrue(momentum_optimizer.get_velocity_str() in accumulators)
velocity_acc = accumulators[momentum_optimizer.get_velocity_str()]
self.assertEqual(len(velocity_acc), 1)
self.assertTrue(mul_x.name in velocity_acc)
# Check init_program
init_ops = init_program.global_block().ops
self.assertEqual(len(init_ops), 2)
self.assertEqual(init_ops[0].type, "fill_constant")
self.assertAlmostEqual(init_ops[0].attr('value'), learning_rate)
self.assertEqual(init_ops[1].type, "fill_constant")
self.assertAlmostEqual(init_ops[1].attr('value'), 0.0)
def test_nesterov_momentum_optimizer(self):
init_program = framework.Program()
program = framework.Program()
block = program.global_block()
mul_x = block.create_parameter(
dtype="float32",
shape=[5, 10],
lod_level=0,
name="mul.x",
optimize_attr={'learning_rate': 1.1})
mul_y = block.create_var(
dtype="float32", shape=[10, 8], lod_level=0, name="mul.y")
mul_out = block.create_var(
dtype="float32", shape=[5, 8], lod_level=0, name="mul.out")
block.append_op(
type="mul",
inputs={"X": mul_x,
"Y": mul_y},
outputs={"Out": mul_out},
attrs={"x_num_col_dims": 1})
mean_out = block.create_var(
dtype="float32", shape=[1], lod_level=0, name="mean.out")
block.append_op(
type="mean", inputs={"X": mul_out}, outputs={"Out": mean_out})
learning_rate = 0.01
momentum_optimizer = self.MockMomentum(
learning_rate=learning_rate, momentum=0.2, use_nesterov=True)
params_grads = append_backward(mean_out)
self.assertEqual(len(params_grads), 1)
self.assertEqual(len(momentum_optimizer.get_accumulators()), 0)
opts = momentum_optimizer.create_optimization_pass(
params_grads, mul_out, init_program)
self.assertEqual(len(opts), 3)
sgd_op = opts[-1]
self.assertEqual([op.type for op in opts],
["fill_constant", "elementwise_mul", "momentum"])
self.assertTrue(sgd_op.attr('use_nesterov'))
# Check accumulators
accumulators = momentum_optimizer.get_accumulators()
self.assertEqual(len(accumulators), 1)
self.assertTrue(momentum_optimizer.get_velocity_str() in accumulators)
velocity_acc = accumulators[momentum_optimizer.get_velocity_str()]
self.assertEqual(len(velocity_acc), 1)
self.assertTrue(mul_x.name in velocity_acc)
# Check init_program
init_ops = init_program.global_block().ops
self.assertEqual(len(init_ops), 2)
self.assertEqual(init_ops[0].type, "fill_constant")
self.assertAlmostEqual(init_ops[0].attr('value'), learning_rate)
self.assertEqual(init_ops[1].type, "fill_constant")
self.assertAlmostEqual(init_ops[1].attr('value'), 0.0)
class TestAdagradOptimizer(unittest.TestCase):
class MockAdagrad(optimizer.AdagradOptimizer):
def get_accumulators(self):
return self._accumulators
def get_moment_str(self):
return self._moment_acc_str
def test_adagrad_optimizer(self):
init_program = framework.Program()
program = framework.Program()
block = program.global_block()
mul_x = block.create_parameter(
dtype="float32",
shape=[5, 10],
lod_level=0,
name="mul.x",
optimize_attr={'learning_rate': 1.1})
mul_y = block.create_var(
dtype="float32", shape=[10, 8], lod_level=0, name="mul.y")
mul_out = block.create_var(
dtype="float32", shape=[5, 8], lod_level=0, name="mul.out")
block.append_op(
type="mul",
inputs={"X": mul_x,
"Y": mul_y},
outputs={"Out": mul_out},
attrs={"x_num_col_dims": 1})
mean_out = block.create_var(
dtype="float32", shape=[1], lod_level=0, name="mean.out")
block.append_op(
type="mean", inputs={"X": mul_out}, outputs={"Out": mean_out})
learning_rate = 0.01
adagrad_optimizer = self.MockAdagrad(
learning_rate=learning_rate, epsilon=1.0e-6)
params_grads = append_backward(mean_out)
self.assertEqual(len(params_grads), 1)
self.assertEqual(len(adagrad_optimizer.get_accumulators()), 0)
opts = adagrad_optimizer.create_optimization_pass(params_grads, mul_out,
init_program)
self.assertEqual(len(opts), 3)
self.assertEqual([op.type for op in opts],
["fill_constant", "elementwise_mul", "adagrad"])
# Check accumulators
accumulators = adagrad_optimizer.get_accumulators()
self.assertEqual(len(accumulators), 1)
self.assertTrue(adagrad_optimizer.get_moment_str() in accumulators)
moment_acc = accumulators[adagrad_optimizer.get_moment_str()]
self.assertEqual(len(moment_acc), 1)
self.assertTrue(mul_x.name in moment_acc)
# Check init_program
init_ops = init_program.global_block().ops
self.assertEqual(len(init_ops), 2)
self.assertEqual(init_ops[0].type, "fill_constant")
self.assertAlmostEqual(init_ops[0].attr('value'), learning_rate)
self.assertEqual(init_ops[1].type, "fill_constant")
self.assertAlmostEqual(init_ops[1].attr('value'), 0.0)
class TestAdamOptimizer(unittest.TestCase):
class MockAdam(optimizer.AdamOptimizer):
def get_accumulators(self):
return self._accumulators
def get_moment1_str(self):
return self._moment1_acc_str
def get_moment2_str(self):
return self._moment2_acc_str
def test_adam_optimizer(self):
init_program = framework.Program()
program = framework.Program()
block = program.global_block()
mul_x = block.create_parameter(
dtype="float32",
shape=[5, 10],
lod_level=0,
name="mul.x",
optimize_attr={'learning_rate': 1.1})
mul_y = block.create_var(
dtype="float32", shape=[10, 8], lod_level=0, name="mul.y")
mul_out = block.create_var(
dtype="float32", shape=[5, 8], lod_level=0, name="mul.out")
block.append_op(
type="mul",
inputs={"X": mul_x,
"Y": mul_y},
outputs={"Out": mul_out},
attrs={"x_num_col_dims": 1})
mean_out = block.create_var(
dtype="float32", shape=[1], lod_level=0, name="mean.out")
block.append_op(
type="mean", inputs={"X": mul_out}, outputs={"Out": mean_out})
learning_rate = 0.01
adam_optimizer = self.MockAdam(
learning_rate=learning_rate, beta1=0.9, beta2=0.999)
params_grads = append_backward(mean_out)
self.assertEqual(len(params_grads), 1)
self.assertEqual(len(adam_optimizer.get_accumulators()), 0)
opts = adam_optimizer.create_optimization_pass(params_grads, mul_out,
init_program)
self.assertEqual(len(opts), 5)
self.assertEqual(
[op.type for op in opts],
["fill_constant", "elementwise_mul", "adam", "scale", "scale"])
# Check accumulators
accumulators = adam_optimizer.get_accumulators()
self.assertEqual(len(accumulators), 2)
self.assertTrue(adam_optimizer.get_moment1_str() in accumulators)
self.assertTrue(adam_optimizer.get_moment2_str() in accumulators)
moment1_acc = accumulators[adam_optimizer.get_moment1_str()]
moment2_acc = accumulators[adam_optimizer.get_moment2_str()]
self.assertEqual(len(moment1_acc), 1)
self.assertEqual(len(moment2_acc), 1)
self.assertTrue(mul_x.name in moment1_acc)
self.assertTrue(mul_x.name in moment2_acc)
# Check init_program
init_ops = init_program.global_block().ops
self.assertEqual(len(init_ops), 5)
self.assertEqual(init_ops[0].type, "fill_constant")
self.assertAlmostEqual(init_ops[0].attr('value'), learning_rate)
class TestAdamaxOptimizer(unittest.TestCase):
class MockAdamax(optimizer.AdamaxOptimizer):
def get_accumulators(self):
return self._accumulators
def get_moment_str(self):
return self._moment_acc_str
def get_inf_norm_str(self):
return self._inf_norm_acc_str
def test_adamax_optimizer(self):
init_program = framework.Program()
program = framework.Program()
block = program.global_block()
mul_x = block.create_parameter(
dtype="float32",
shape=[5, 10],
lod_level=0,
name="mul.x",
optimize_attr={'learning_rate': 1.1})
mul_y = block.create_var(
dtype="float32", shape=[10, 8], lod_level=0, name="mul.y")
mul_out = block.create_var(
dtype="float32", shape=[5, 8], lod_level=0, name="mul.out")
block.append_op(
type="mul",
inputs={"X": mul_x,
"Y": mul_y},
outputs={"Out": mul_out},
attrs={"x_num_col_dims": 1})
mean_out = block.create_var(
dtype="float32", shape=[1], lod_level=0, name="mean.out")
block.append_op(
type="mean", inputs={"X": mul_out}, outputs={"Out": mean_out})
learning_rate = 0.01
adamax_optimizer = self.MockAdamax(
learning_rate=learning_rate, beta1=0.9, beta2=0.999)
params_grads = append_backward(mean_out)
self.assertEqual(len(params_grads), 1)
self.assertEqual(len(adamax_optimizer.get_accumulators()), 0)
opts = adamax_optimizer.create_optimization_pass(params_grads, mul_out,
init_program)
self.assertEqual(len(opts), 4)
self.assertEqual(
[op.type for op in opts],
["fill_constant", "elementwise_mul", "adamax", "scale"])
# Check accumulators
accumulators = adamax_optimizer.get_accumulators()
self.assertEqual(len(accumulators), 2)
self.assertTrue(adamax_optimizer.get_moment_str() in accumulators)
self.assertTrue(adamax_optimizer.get_inf_norm_str() in accumulators)
moment_acc = accumulators[adamax_optimizer.get_moment_str()]
inf_norm_acc = accumulators[adamax_optimizer.get_inf_norm_str()]
self.assertEqual(len(moment_acc), 1)
self.assertEqual(len(inf_norm_acc), 1)
self.assertTrue(mul_x.name in moment_acc)
self.assertTrue(mul_x.name in inf_norm_acc)
# Check init_program
init_ops = init_program.global_block().ops
self.assertEqual(len(init_ops), 4)
self.assertEqual(init_ops[0].type, "fill_constant")
self.assertAlmostEqual(init_ops[0].attr('value'), learning_rate)
class TestDecayedAdagradOptimizer(unittest.TestCase):
class MockDecayedAdagrad(optimizer.DecayedAdagradOptimizer):
def get_accumulators(self):
return self._accumulators
def get_moment_str(self):
return self._moment_acc_str
def test_decayed_adagrad_optimizer(self):
init_program = framework.Program()
program = framework.Program()
block = program.global_block()
mul_x = block.create_parameter(
dtype="float32",
shape=[5, 10],
lod_level=0,
name="mul.x",
optimize_attr={'learning_rate': 1.1})
mul_y = block.create_var(
dtype="float32", shape=[10, 8], lod_level=0, name="mul.y")
mul_out = block.create_var(
dtype="float32", shape=[5, 8], lod_level=0, name="mul.out")
block.append_op(
type="mul",
inputs={"X": mul_x,
"Y": mul_y},
outputs={"Out": mul_out},
attrs={"x_num_col_dims": 1})
mean_out = block.create_var(
dtype="float32", shape=[1], lod_level=0, name="mean.out")
block.append_op(
type="mean", inputs={"X": mul_out}, outputs={"Out": mean_out})
learning_rate = 0.01
decayed_adagrad_optimizer = self.MockDecayedAdagrad(
learning_rate=learning_rate, decay=0.95, epsilon=1.0e-6)
params_grads = append_backward(mean_out)
self.assertEqual(len(params_grads), 1)
self.assertEqual(len(decayed_adagrad_optimizer.get_accumulators()), 0)
opts = decayed_adagrad_optimizer.create_optimization_pass(
params_grads, mul_out, init_program)
self.assertEqual(len(opts), 3)
self.assertEqual(
[op.type for op in opts],
["fill_constant", "elementwise_mul", "decayed_adagrad"])
# Check accumulators
accumulators = decayed_adagrad_optimizer.get_accumulators()
self.assertEqual(len(accumulators), 1)
self.assertTrue(
decayed_adagrad_optimizer.get_moment_str() in accumulators)
moment_acc = accumulators[decayed_adagrad_optimizer.get_moment_str()]
self.assertEqual(len(moment_acc), 1)
self.assertTrue(mul_x.name in moment_acc)
# Check init_program
init_ops = init_program.global_block().ops
self.assertEqual(len(init_ops), 2)
self.assertEqual(init_ops[0].type, "fill_constant")
self.assertAlmostEqual(init_ops[0].attr('value'), learning_rate)
self.assertEqual(init_ops[1].type, "fill_constant")
self.assertAlmostEqual(init_ops[1].attr('value'), 0.0)
if __name__ == '__main__':
unittest.main()
|
willthames/ansible-lint | refs/heads/master | test/custom_rules/example_com/ExampleComRule.py | 2 | # Copyright (c) 2020, Ansible Project
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""A dummy custom rule module #2."""
import ansiblelint.rules.AlwaysRunRule
class ExampleComRule(ansiblelint.rules.AlwaysRunRule.AlwaysRunRule):
"""A dummy custom rule class."""
id = '100002'
|
hachreak/invenio-redirector | refs/heads/master | docs/conf.py | 4 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015 CERN.
#
# Invenio is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
from __future__ import print_function
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
sys.path.insert(0, os.path.abspath('_ext'))
import ultramock
ultramock.activate()
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Invenio-Redirector'
copyright = u'2015, CERN'
author = u'CERN'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
# Get the version string. Cannot be done with import!
g = {}
with open(os.path.join('..', 'invenio_redirector', 'version.py'), 'rt') as fp:
exec(fp.read(), g)
version = g['__version__']
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
# only set the theme when we are not on RTD
if not on_rtd:
try:
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
except ImportError:
print("`sphinx_rtd_theme` not found, pip install it", file=sys.stderr)
html_theme = 'alabaster'
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'invenio-redirector_namedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'invenio-redirector.tex', u'invenio-redirector Documentation',
u'CERN', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'invenio-redirector', u'invenio-redirector Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'invenio-redirector', u'Invenio-Redirector Documentation',
author, 'invenio-redirector', 'Invenio module that adds custom redirection handlers.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
|
Amechi101/concepteur-market-app | refs/heads/master | venv/lib/python2.7/site-packages/django/contrib/auth/admin.py | 29 | from django.db import transaction
from django.conf import settings
from django.contrib import admin
from django.contrib.admin.options import IS_POPUP_VAR
from django.contrib.auth.forms import (UserCreationForm, UserChangeForm,
AdminPasswordChangeForm)
from django.contrib.auth.models import User, Group
from django.contrib import messages
from django.core.exceptions import PermissionDenied
from django.http import HttpResponseRedirect, Http404
from django.shortcuts import get_object_or_404
from django.template.response import TemplateResponse
from django.utils.html import escape
from django.utils.decorators import method_decorator
from django.utils.translation import ugettext, ugettext_lazy as _
from django.views.decorators.csrf import csrf_protect
from django.views.decorators.debug import sensitive_post_parameters
csrf_protect_m = method_decorator(csrf_protect)
sensitive_post_parameters_m = method_decorator(sensitive_post_parameters())
class GroupAdmin(admin.ModelAdmin):
search_fields = ('name',)
ordering = ('name',)
filter_horizontal = ('permissions',)
def formfield_for_manytomany(self, db_field, request=None, **kwargs):
if db_field.name == 'permissions':
qs = kwargs.get('queryset', db_field.rel.to.objects)
# Avoid a major performance hit resolving permission names which
# triggers a content_type load:
kwargs['queryset'] = qs.select_related('content_type')
return super(GroupAdmin, self).formfield_for_manytomany(
db_field, request=request, **kwargs)
class UserAdmin(admin.ModelAdmin):
add_form_template = 'admin/auth/user/add_form.html'
change_user_password_template = None
fieldsets = (
(None, {'fields': ('username', 'password')}),
(_('Personal info'), {'fields': ('first_name', 'last_name', 'email')}),
(_('Permissions'), {'fields': ('is_active', 'is_staff', 'is_superuser',
'groups', 'user_permissions')}),
(_('Important dates'), {'fields': ('last_login', 'date_joined')}),
)
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('username', 'password1', 'password2')}
),
)
form = UserChangeForm
add_form = UserCreationForm
change_password_form = AdminPasswordChangeForm
list_display = ('username', 'email', 'first_name', 'last_name', 'is_staff')
list_filter = ('is_staff', 'is_superuser', 'is_active', 'groups')
search_fields = ('username', 'first_name', 'last_name', 'email')
ordering = ('username',)
filter_horizontal = ('groups', 'user_permissions',)
def get_fieldsets(self, request, obj=None):
if not obj:
return self.add_fieldsets
return super(UserAdmin, self).get_fieldsets(request, obj)
def get_form(self, request, obj=None, **kwargs):
"""
Use special form during user creation
"""
defaults = {}
if obj is None:
defaults.update({
'form': self.add_form,
'fields': admin.util.flatten_fieldsets(self.add_fieldsets),
})
defaults.update(kwargs)
return super(UserAdmin, self).get_form(request, obj, **defaults)
def get_urls(self):
from django.conf.urls import patterns
return patterns('',
(r'^(\d+)/password/$',
self.admin_site.admin_view(self.user_change_password))
) + super(UserAdmin, self).get_urls()
def lookup_allowed(self, lookup, value):
# See #20078: we don't want to allow any lookups involving passwords.
if lookup.startswith('password'):
return False
return super(UserAdmin, self).lookup_allowed(lookup, value)
@sensitive_post_parameters_m
@csrf_protect_m
@transaction.atomic
def add_view(self, request, form_url='', extra_context=None):
# It's an error for a user to have add permission but NOT change
# permission for users. If we allowed such users to add users, they
# could create superusers, which would mean they would essentially have
# the permission to change users. To avoid the problem entirely, we
# disallow users from adding users if they don't have change
# permission.
if not self.has_change_permission(request):
if self.has_add_permission(request) and settings.DEBUG:
# Raise Http404 in debug mode so that the user gets a helpful
# error message.
raise Http404(
'Your user does not have the "Change user" permission. In '
'order to add users, Django requires that your user '
'account have both the "Add user" and "Change user" '
'permissions set.')
raise PermissionDenied
if extra_context is None:
extra_context = {}
username_field = self.model._meta.get_field(self.model.USERNAME_FIELD)
defaults = {
'auto_populated_fields': (),
'username_help_text': username_field.help_text,
}
extra_context.update(defaults)
return super(UserAdmin, self).add_view(request, form_url,
extra_context)
@sensitive_post_parameters_m
def user_change_password(self, request, id, form_url=''):
if not self.has_change_permission(request):
raise PermissionDenied
user = get_object_or_404(self.get_queryset(request), pk=id)
if request.method == 'POST':
form = self.change_password_form(user, request.POST)
if form.is_valid():
form.save()
change_message = self.construct_change_message(request, form, None)
self.log_change(request, request.user, change_message)
msg = ugettext('Password changed successfully.')
messages.success(request, msg)
return HttpResponseRedirect('..')
else:
form = self.change_password_form(user)
fieldsets = [(None, {'fields': list(form.base_fields)})]
adminForm = admin.helpers.AdminForm(form, fieldsets, {})
context = {
'title': _('Change password: %s') % escape(user.get_username()),
'adminForm': adminForm,
'form_url': form_url,
'form': form,
'is_popup': IS_POPUP_VAR in request.REQUEST,
'add': True,
'change': False,
'has_delete_permission': False,
'has_change_permission': True,
'has_absolute_url': False,
'opts': self.model._meta,
'original': user,
'save_as': False,
'show_save': True,
}
return TemplateResponse(request,
self.change_user_password_template or
'admin/auth/user/change_password.html',
context, current_app=self.admin_site.name)
def response_add(self, request, obj, post_url_continue=None):
"""
Determines the HttpResponse for the add_view stage. It mostly defers to
its superclass implementation but is customized because the User model
has a slightly different workflow.
"""
# We should allow further modification of the user just added i.e. the
# 'Save' button should behave like the 'Save and continue editing'
# button except in two scenarios:
# * The user has pressed the 'Save and add another' button
# * We are adding a user in a popup
if '_addanother' not in request.POST and IS_POPUP_VAR not in request.POST:
request.POST['_continue'] = 1
return super(UserAdmin, self).response_add(request, obj,
post_url_continue)
admin.site.register(Group, GroupAdmin)
admin.site.register(User, UserAdmin)
|
efortuna/AndroidSDKClone | refs/heads/master | ndk/prebuilt/linux-x86_64/lib/python2.7/encodings/hz.py | 817 | #
# hz.py: Python Unicode Codec for HZ
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
#
import _codecs_cn, codecs
import _multibytecodec as mbc
codec = _codecs_cn.getcodec('hz')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='hz',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
|
usmanantharikta/workshop_bekup | refs/heads/master | node-v4.6.1-linux-x64/lib/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/simple_copy.py | 1869 | # Copyright 2014 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A clone of the default copy.deepcopy that doesn't handle cyclic
structures or complex types except for dicts and lists. This is
because gyp copies so large structure that small copy overhead ends up
taking seconds in a project the size of Chromium."""
class Error(Exception):
pass
__all__ = ["Error", "deepcopy"]
def deepcopy(x):
"""Deep copy operation on gyp objects such as strings, ints, dicts
and lists. More than twice as fast as copy.deepcopy but much less
generic."""
try:
return _deepcopy_dispatch[type(x)](x)
except KeyError:
raise Error('Unsupported type %s for deepcopy. Use copy.deepcopy ' +
'or expand simple_copy support.' % type(x))
_deepcopy_dispatch = d = {}
def _deepcopy_atomic(x):
return x
for x in (type(None), int, long, float,
bool, str, unicode, type):
d[x] = _deepcopy_atomic
def _deepcopy_list(x):
return [deepcopy(a) for a in x]
d[list] = _deepcopy_list
def _deepcopy_dict(x):
y = {}
for key, value in x.iteritems():
y[deepcopy(key)] = deepcopy(value)
return y
d[dict] = _deepcopy_dict
del d
|
loafbaker/django_ecommerce2 | refs/heads/master | django_ecommerce2/wsgi.py | 1 | """
WSGI config for django_ecommerce2 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "django_ecommerce2.settings")
application = get_wsgi_application()
|
jarshwah/django | refs/heads/master | django/contrib/gis/db/backends/mysql/base.py | 58 | from django.db.backends.mysql.base import \
DatabaseWrapper as MySQLDatabaseWrapper
from .features import DatabaseFeatures
from .introspection import MySQLIntrospection
from .operations import MySQLOperations
from .schema import MySQLGISSchemaEditor
class DatabaseWrapper(MySQLDatabaseWrapper):
SchemaEditorClass = MySQLGISSchemaEditor
# Classes instantiated in __init__().
features_class = DatabaseFeatures
introspection_class = MySQLIntrospection
ops_class = MySQLOperations
|
yury-s/v8-inspector | refs/heads/master | Source/chrome/tools/telemetry/telemetry/timeline/event_unittest.py | 95 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from telemetry.timeline import event
class TimelineEventTest(unittest.TestCase):
def testHasThreadTimestamps(self):
# No thread_start and no thread_duration
event_1 = event.TimelineEvent('test', 'foo', 0, 10)
# Has thread_start but no thread_duration
event_2 = event.TimelineEvent('test', 'foo', 0, 10, 2)
# Has thread_duration but no thread_start
event_3 = event.TimelineEvent('test', 'foo', 0, 10, None, 4)
# Has thread_start and thread_duration
event_4 = event.TimelineEvent('test', 'foo', 0, 10, 2, 4)
self.assertFalse(event_1.has_thread_timestamps)
self.assertFalse(event_2.has_thread_timestamps)
self.assertFalse(event_3.has_thread_timestamps)
self.assertTrue(event_4.has_thread_timestamps)
|
Sajid3/orp | refs/heads/master | third-party/qemu-orp/scripts/qapi.py | 66 | #
# QAPI helper library
#
# Copyright IBM, Corp. 2011
# Copyright (c) 2013 Red Hat Inc.
#
# Authors:
# Anthony Liguori <aliguori@us.ibm.com>
# Markus Armbruster <armbru@redhat.com>
#
# This work is licensed under the terms of the GNU GPL, version 2.
# See the COPYING file in the top-level directory.
import re
from ordereddict import OrderedDict
import os
import sys
builtin_types = [
'str', 'int', 'number', 'bool',
'int8', 'int16', 'int32', 'int64',
'uint8', 'uint16', 'uint32', 'uint64'
]
builtin_type_qtypes = {
'str': 'QTYPE_QSTRING',
'int': 'QTYPE_QINT',
'number': 'QTYPE_QFLOAT',
'bool': 'QTYPE_QBOOL',
'int8': 'QTYPE_QINT',
'int16': 'QTYPE_QINT',
'int32': 'QTYPE_QINT',
'int64': 'QTYPE_QINT',
'uint8': 'QTYPE_QINT',
'uint16': 'QTYPE_QINT',
'uint32': 'QTYPE_QINT',
'uint64': 'QTYPE_QINT',
}
def error_path(parent):
res = ""
while parent:
res = ("In file included from %s:%d:\n" % (parent['file'],
parent['line'])) + res
parent = parent['parent']
return res
class QAPISchemaError(Exception):
def __init__(self, schema, msg):
self.input_file = schema.input_file
self.msg = msg
self.col = 1
self.line = schema.line
for ch in schema.src[schema.line_pos:schema.pos]:
if ch == '\t':
self.col = (self.col + 7) % 8 + 1
else:
self.col += 1
self.info = schema.parent_info
def __str__(self):
return error_path(self.info) + \
"%s:%d:%d: %s" % (self.input_file, self.line, self.col, self.msg)
class QAPIExprError(Exception):
def __init__(self, expr_info, msg):
self.info = expr_info
self.msg = msg
def __str__(self):
return error_path(self.info['parent']) + \
"%s:%d: %s" % (self.info['file'], self.info['line'], self.msg)
class QAPISchema:
def __init__(self, fp, input_relname=None, include_hist=[],
previously_included=[], parent_info=None):
""" include_hist is a stack used to detect inclusion cycles
previously_included is a global state used to avoid multiple
inclusions of the same file"""
input_fname = os.path.abspath(fp.name)
if input_relname is None:
input_relname = fp.name
self.input_dir = os.path.dirname(input_fname)
self.input_file = input_relname
self.include_hist = include_hist + [(input_relname, input_fname)]
previously_included.append(input_fname)
self.parent_info = parent_info
self.src = fp.read()
if self.src == '' or self.src[-1] != '\n':
self.src += '\n'
self.cursor = 0
self.line = 1
self.line_pos = 0
self.exprs = []
self.accept()
while self.tok != None:
expr_info = {'file': input_relname, 'line': self.line, 'parent': self.parent_info}
expr = self.get_expr(False)
if isinstance(expr, dict) and "include" in expr:
if len(expr) != 1:
raise QAPIExprError(expr_info, "Invalid 'include' directive")
include = expr["include"]
if not isinstance(include, str):
raise QAPIExprError(expr_info,
'Expected a file name (string), got: %s'
% include)
include_path = os.path.join(self.input_dir, include)
for elem in self.include_hist:
if include_path == elem[1]:
raise QAPIExprError(expr_info, "Inclusion loop for %s"
% include)
# skip multiple include of the same file
if include_path in previously_included:
continue
try:
fobj = open(include_path, 'r')
except IOError, e:
raise QAPIExprError(expr_info,
'%s: %s' % (e.strerror, include))
exprs_include = QAPISchema(fobj, include, self.include_hist,
previously_included, expr_info)
self.exprs.extend(exprs_include.exprs)
else:
expr_elem = {'expr': expr,
'info': expr_info}
self.exprs.append(expr_elem)
def accept(self):
while True:
self.tok = self.src[self.cursor]
self.pos = self.cursor
self.cursor += 1
self.val = None
if self.tok == '#':
self.cursor = self.src.find('\n', self.cursor)
elif self.tok in ['{', '}', ':', ',', '[', ']']:
return
elif self.tok == "'":
string = ''
esc = False
while True:
ch = self.src[self.cursor]
self.cursor += 1
if ch == '\n':
raise QAPISchemaError(self,
'Missing terminating "\'"')
if esc:
string += ch
esc = False
elif ch == "\\":
esc = True
elif ch == "'":
self.val = string
return
else:
string += ch
elif self.tok == '\n':
if self.cursor == len(self.src):
self.tok = None
return
self.line += 1
self.line_pos = self.cursor
elif not self.tok.isspace():
raise QAPISchemaError(self, 'Stray "%s"' % self.tok)
def get_members(self):
expr = OrderedDict()
if self.tok == '}':
self.accept()
return expr
if self.tok != "'":
raise QAPISchemaError(self, 'Expected string or "}"')
while True:
key = self.val
self.accept()
if self.tok != ':':
raise QAPISchemaError(self, 'Expected ":"')
self.accept()
if key in expr:
raise QAPISchemaError(self, 'Duplicate key "%s"' % key)
expr[key] = self.get_expr(True)
if self.tok == '}':
self.accept()
return expr
if self.tok != ',':
raise QAPISchemaError(self, 'Expected "," or "}"')
self.accept()
if self.tok != "'":
raise QAPISchemaError(self, 'Expected string')
def get_values(self):
expr = []
if self.tok == ']':
self.accept()
return expr
if not self.tok in [ '{', '[', "'" ]:
raise QAPISchemaError(self, 'Expected "{", "[", "]" or string')
while True:
expr.append(self.get_expr(True))
if self.tok == ']':
self.accept()
return expr
if self.tok != ',':
raise QAPISchemaError(self, 'Expected "," or "]"')
self.accept()
def get_expr(self, nested):
if self.tok != '{' and not nested:
raise QAPISchemaError(self, 'Expected "{"')
if self.tok == '{':
self.accept()
expr = self.get_members()
elif self.tok == '[':
self.accept()
expr = self.get_values()
elif self.tok == "'":
expr = self.val
self.accept()
else:
raise QAPISchemaError(self, 'Expected "{", "[" or string')
return expr
def find_base_fields(base):
base_struct_define = find_struct(base)
if not base_struct_define:
return None
return base_struct_define['data']
# Return the discriminator enum define if discriminator is specified as an
# enum type, otherwise return None.
def discriminator_find_enum_define(expr):
base = expr.get('base')
discriminator = expr.get('discriminator')
if not (discriminator and base):
return None
base_fields = find_base_fields(base)
if not base_fields:
return None
discriminator_type = base_fields.get(discriminator)
if not discriminator_type:
return None
return find_enum(discriminator_type)
def check_event(expr, expr_info):
params = expr.get('data')
if params:
for argname, argentry, optional, structured in parse_args(params):
if structured:
raise QAPIExprError(expr_info,
"Nested structure define in event is not "
"supported, event '%s', argname '%s'"
% (expr['event'], argname))
def check_union(expr, expr_info):
name = expr['union']
base = expr.get('base')
discriminator = expr.get('discriminator')
members = expr['data']
# If the object has a member 'base', its value must name a complex type.
if base:
base_fields = find_base_fields(base)
if not base_fields:
raise QAPIExprError(expr_info,
"Base '%s' is not a valid type"
% base)
# If the union object has no member 'discriminator', it's an
# ordinary union.
if not discriminator:
enum_define = None
# Else if the value of member 'discriminator' is {}, it's an
# anonymous union.
elif discriminator == {}:
enum_define = None
# Else, it's a flat union.
else:
# The object must have a member 'base'.
if not base:
raise QAPIExprError(expr_info,
"Flat union '%s' must have a base field"
% name)
# The value of member 'discriminator' must name a member of the
# base type.
discriminator_type = base_fields.get(discriminator)
if not discriminator_type:
raise QAPIExprError(expr_info,
"Discriminator '%s' is not a member of base "
"type '%s'"
% (discriminator, base))
enum_define = find_enum(discriminator_type)
# Do not allow string discriminator
if not enum_define:
raise QAPIExprError(expr_info,
"Discriminator '%s' must be of enumeration "
"type" % discriminator)
# Check every branch
for (key, value) in members.items():
# If this named member's value names an enum type, then all members
# of 'data' must also be members of the enum type.
if enum_define and not key in enum_define['enum_values']:
raise QAPIExprError(expr_info,
"Discriminator value '%s' is not found in "
"enum '%s'" %
(key, enum_define["enum_name"]))
# Todo: add checking for values. Key is checked as above, value can be
# also checked here, but we need more functions to handle array case.
def check_exprs(schema):
for expr_elem in schema.exprs:
expr = expr_elem['expr']
if expr.has_key('union'):
check_union(expr, expr_elem['info'])
if expr.has_key('event'):
check_event(expr, expr_elem['info'])
def parse_schema(input_file):
try:
schema = QAPISchema(open(input_file, "r"))
except (QAPISchemaError, QAPIExprError), e:
print >>sys.stderr, e
exit(1)
exprs = []
for expr_elem in schema.exprs:
expr = expr_elem['expr']
if expr.has_key('enum'):
add_enum(expr['enum'], expr['data'])
elif expr.has_key('union'):
add_union(expr)
elif expr.has_key('type'):
add_struct(expr)
exprs.append(expr)
# Try again for hidden UnionKind enum
for expr_elem in schema.exprs:
expr = expr_elem['expr']
if expr.has_key('union'):
if not discriminator_find_enum_define(expr):
add_enum('%sKind' % expr['union'])
try:
check_exprs(schema)
except QAPIExprError, e:
print >>sys.stderr, e
exit(1)
return exprs
def parse_args(typeinfo):
if isinstance(typeinfo, basestring):
struct = find_struct(typeinfo)
assert struct != None
typeinfo = struct['data']
for member in typeinfo:
argname = member
argentry = typeinfo[member]
optional = False
structured = False
if member.startswith('*'):
argname = member[1:]
optional = True
if isinstance(argentry, OrderedDict):
structured = True
yield (argname, argentry, optional, structured)
def de_camel_case(name):
new_name = ''
for ch in name:
if ch.isupper() and new_name:
new_name += '_'
if ch == '-':
new_name += '_'
else:
new_name += ch.lower()
return new_name
def camel_case(name):
new_name = ''
first = True
for ch in name:
if ch in ['_', '-']:
first = True
elif first:
new_name += ch.upper()
first = False
else:
new_name += ch.lower()
return new_name
def c_var(name, protect=True):
# ANSI X3J11/88-090, 3.1.1
c89_words = set(['auto', 'break', 'case', 'char', 'const', 'continue',
'default', 'do', 'double', 'else', 'enum', 'extern', 'float',
'for', 'goto', 'if', 'int', 'long', 'register', 'return',
'short', 'signed', 'sizeof', 'static', 'struct', 'switch',
'typedef', 'union', 'unsigned', 'void', 'volatile', 'while'])
# ISO/IEC 9899:1999, 6.4.1
c99_words = set(['inline', 'restrict', '_Bool', '_Complex', '_Imaginary'])
# ISO/IEC 9899:2011, 6.4.1
c11_words = set(['_Alignas', '_Alignof', '_Atomic', '_Generic', '_Noreturn',
'_Static_assert', '_Thread_local'])
# GCC http://gcc.gnu.org/onlinedocs/gcc-4.7.1/gcc/C-Extensions.html
# excluding _.*
gcc_words = set(['asm', 'typeof'])
# C++ ISO/IEC 14882:2003 2.11
cpp_words = set(['bool', 'catch', 'class', 'const_cast', 'delete',
'dynamic_cast', 'explicit', 'false', 'friend', 'mutable',
'namespace', 'new', 'operator', 'private', 'protected',
'public', 'reinterpret_cast', 'static_cast', 'template',
'this', 'throw', 'true', 'try', 'typeid', 'typename',
'using', 'virtual', 'wchar_t',
# alternative representations
'and', 'and_eq', 'bitand', 'bitor', 'compl', 'not',
'not_eq', 'or', 'or_eq', 'xor', 'xor_eq'])
# namespace pollution:
polluted_words = set(['unix', 'errno'])
if protect and (name in c89_words | c99_words | c11_words | gcc_words | cpp_words | polluted_words):
return "q_" + name
return name.replace('-', '_').lstrip("*")
def c_fun(name, protect=True):
return c_var(name, protect).replace('.', '_')
def c_list_type(name):
return '%sList' % name
def type_name(name):
if type(name) == list:
return c_list_type(name[0])
return name
enum_types = []
struct_types = []
union_types = []
def add_struct(definition):
global struct_types
struct_types.append(definition)
def find_struct(name):
global struct_types
for struct in struct_types:
if struct['type'] == name:
return struct
return None
def add_union(definition):
global union_types
union_types.append(definition)
def find_union(name):
global union_types
for union in union_types:
if union['union'] == name:
return union
return None
def add_enum(name, enum_values = None):
global enum_types
enum_types.append({"enum_name": name, "enum_values": enum_values})
def find_enum(name):
global enum_types
for enum in enum_types:
if enum['enum_name'] == name:
return enum
return None
def is_enum(name):
return find_enum(name) != None
eatspace = '\033EATSPACE.'
# A special suffix is added in c_type() for pointer types, and it's
# stripped in mcgen(). So please notice this when you check the return
# value of c_type() outside mcgen().
def c_type(name, is_param=False):
if name == 'str':
if is_param:
return 'const char *' + eatspace
return 'char *' + eatspace
elif name == 'int':
return 'int64_t'
elif (name == 'int8' or name == 'int16' or name == 'int32' or
name == 'int64' or name == 'uint8' or name == 'uint16' or
name == 'uint32' or name == 'uint64'):
return name + '_t'
elif name == 'size':
return 'uint64_t'
elif name == 'bool':
return 'bool'
elif name == 'number':
return 'double'
elif type(name) == list:
return '%s *%s' % (c_list_type(name[0]), eatspace)
elif is_enum(name):
return name
elif name == None or len(name) == 0:
return 'void'
elif name == name.upper():
return '%sEvent *%s' % (camel_case(name), eatspace)
else:
return '%s *%s' % (name, eatspace)
def is_c_ptr(name):
suffix = "*" + eatspace
return c_type(name).endswith(suffix)
def genindent(count):
ret = ""
for i in range(count):
ret += " "
return ret
indent_level = 0
def push_indent(indent_amount=4):
global indent_level
indent_level += indent_amount
def pop_indent(indent_amount=4):
global indent_level
indent_level -= indent_amount
def cgen(code, **kwds):
indent = genindent(indent_level)
lines = code.split('\n')
lines = map(lambda x: indent + x, lines)
return '\n'.join(lines) % kwds + '\n'
def mcgen(code, **kwds):
raw = cgen('\n'.join(code.split('\n')[1:-1]), **kwds)
return re.sub(re.escape(eatspace) + ' *', '', raw)
def basename(filename):
return filename.split("/")[-1]
def guardname(filename):
guard = basename(filename).rsplit(".", 1)[0]
for substr in [".", " ", "-"]:
guard = guard.replace(substr, "_")
return guard.upper() + '_H'
def guardstart(name):
return mcgen('''
#ifndef %(name)s
#define %(name)s
''',
name=guardname(name))
def guardend(name):
return mcgen('''
#endif /* %(name)s */
''',
name=guardname(name))
# ENUMName -> ENUM_NAME, EnumName1 -> ENUM_NAME1
# ENUM_NAME -> ENUM_NAME, ENUM_NAME1 -> ENUM_NAME1, ENUM_Name2 -> ENUM_NAME2
# ENUM24_Name -> ENUM24_NAME
def _generate_enum_string(value):
c_fun_str = c_fun(value, False)
if value.isupper():
return c_fun_str
new_name = ''
l = len(c_fun_str)
for i in range(l):
c = c_fun_str[i]
# When c is upper and no "_" appears before, do more checks
if c.isupper() and (i > 0) and c_fun_str[i - 1] != "_":
# Case 1: next string is lower
# Case 2: previous string is digit
if (i < (l - 1) and c_fun_str[i + 1].islower()) or \
c_fun_str[i - 1].isdigit():
new_name += '_'
new_name += c
return new_name.lstrip('_').upper()
def generate_enum_full_value(enum_name, enum_value):
abbrev_string = _generate_enum_string(enum_name)
value_string = _generate_enum_string(enum_value)
return "%s_%s" % (abbrev_string, value_string)
|
kalxas/OWSLib | refs/heads/master | owslib/map/__init__.py | 5 | # -*- coding: ISO-8859-15 -*-
|
vvv1559/intellij-community | refs/heads/master | python/helpers/pycharm/teamcity/diff_tools.py | 2 | import base64
import pprint
import sys
import unittest
_PY2K = sys.version_info < (3,)
_PRIMITIVES = [int, str, bool]
if _PY2K:
# Not available in py3
# noinspection PyUnresolvedReferences
_PRIMITIVES.append(unicode) # noqa
# Not available in py3
# noinspection PyUnresolvedReferences
_STR_F = unicode # noqa
else:
_STR_F = str
def patch_unittest_diff():
"""
Patches "assertEquals" to throw DiffError
"""
if sys.version_info < (2, 7):
return
old = unittest.TestCase.assertEqual
def _patched_equals(self, first, second, msg=None):
if first != second:
error = EqualsAssertionError(first, second, msg)
if error.is_too_big():
old(self, first, second, msg)
else:
raise error
unittest.TestCase.assertEqual = _patched_equals
def _format_and_convert(val):
# No need to pretty-print primitives
return val if any(x for x in _PRIMITIVES if isinstance(val, x)) else pprint.pformat(val)
class EqualsAssertionError(AssertionError):
def __init__(self, expected, actual, msg=None, preformated=False):
super(AssertionError, self).__init__()
self.expected = expected
self.actual = actual
self.msg = msg
if not preformated:
self.expected = _format_and_convert(self.expected)
self.actual = _format_and_convert(self.actual)
self.msg = msg if msg else ""
self.expected = _STR_F(self.expected)
self.actual = _STR_F(self.actual)
def is_too_big(self):
return len(self.actual) + len(self.expected) > 10000
def __str__(self):
return self._serialize()
def __unicode__(self):
return self._serialize()
def _serialize(self):
def fix_type(msg):
return msg if _PY2K else bytes(str(msg), "utf-8")
encoded_fields = [base64.b64encode(fix_type(x)) for x in [self.expected, self.actual, self.msg]]
if not _PY2K:
encoded_fields = [bytes.decode(x) for x in encoded_fields]
return "|".join(encoded_fields)
def deserialize_error(serialized_message):
parts = [base64.b64decode(x) for x in str(serialized_message).split("|")]
if not _PY2K:
parts = [bytes.decode(x) for x in parts]
return EqualsAssertionError(parts[0], parts[1], parts[2], preformated=True)
|
MichaLasry/ImageTalke | refs/heads/master | functions/node_modules/firebase-admin/node_modules/grpc/third_party/boringssl/third_party/googletest/scripts/fuse_gtest_files.py | 346 | #!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""fuse_gtest_files.py v0.2.0
Fuses Google Test source code into a .h file and a .cc file.
SYNOPSIS
fuse_gtest_files.py [GTEST_ROOT_DIR] OUTPUT_DIR
Scans GTEST_ROOT_DIR for Google Test source code, and generates
two files: OUTPUT_DIR/gtest/gtest.h and OUTPUT_DIR/gtest/gtest-all.cc.
Then you can build your tests by adding OUTPUT_DIR to the include
search path and linking with OUTPUT_DIR/gtest/gtest-all.cc. These
two files contain everything you need to use Google Test. Hence
you can "install" Google Test by copying them to wherever you want.
GTEST_ROOT_DIR can be omitted and defaults to the parent
directory of the directory holding this script.
EXAMPLES
./fuse_gtest_files.py fused_gtest
./fuse_gtest_files.py path/to/unpacked/gtest fused_gtest
This tool is experimental. In particular, it assumes that there is no
conditional inclusion of Google Test headers. Please report any
problems to googletestframework@googlegroups.com. You can read
http://code.google.com/p/googletest/wiki/GoogleTestAdvancedGuide for
more information.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
try:
from sets import Set as set # For Python 2.3 compatibility
except ImportError:
pass
import sys
# We assume that this file is in the scripts/ directory in the Google
# Test root directory.
DEFAULT_GTEST_ROOT_DIR = os.path.join(os.path.dirname(__file__), '..')
# Regex for matching '#include "gtest/..."'.
INCLUDE_GTEST_FILE_REGEX = re.compile(r'^\s*#\s*include\s*"(gtest/.+)"')
# Regex for matching '#include "src/..."'.
INCLUDE_SRC_FILE_REGEX = re.compile(r'^\s*#\s*include\s*"(src/.+)"')
# Where to find the source seed files.
GTEST_H_SEED = 'include/gtest/gtest.h'
GTEST_SPI_H_SEED = 'include/gtest/gtest-spi.h'
GTEST_ALL_CC_SEED = 'src/gtest-all.cc'
# Where to put the generated files.
GTEST_H_OUTPUT = 'gtest/gtest.h'
GTEST_ALL_CC_OUTPUT = 'gtest/gtest-all.cc'
def VerifyFileExists(directory, relative_path):
"""Verifies that the given file exists; aborts on failure.
relative_path is the file path relative to the given directory.
"""
if not os.path.isfile(os.path.join(directory, relative_path)):
print('ERROR: Cannot find %s in directory %s.' % (relative_path,
directory))
print('Please either specify a valid project root directory '
'or omit it on the command line.')
sys.exit(1)
def ValidateGTestRootDir(gtest_root):
"""Makes sure gtest_root points to a valid gtest root directory.
The function aborts the program on failure.
"""
VerifyFileExists(gtest_root, GTEST_H_SEED)
VerifyFileExists(gtest_root, GTEST_ALL_CC_SEED)
def VerifyOutputFile(output_dir, relative_path):
"""Verifies that the given output file path is valid.
relative_path is relative to the output_dir directory.
"""
# Makes sure the output file either doesn't exist or can be overwritten.
output_file = os.path.join(output_dir, relative_path)
if os.path.exists(output_file):
# TODO(wan@google.com): The following user-interaction doesn't
# work with automated processes. We should provide a way for the
# Makefile to force overwriting the files.
print('%s already exists in directory %s - overwrite it? (y/N) ' %
(relative_path, output_dir))
answer = sys.stdin.readline().strip()
if answer not in ['y', 'Y']:
print('ABORTED.')
sys.exit(1)
# Makes sure the directory holding the output file exists; creates
# it and all its ancestors if necessary.
parent_directory = os.path.dirname(output_file)
if not os.path.isdir(parent_directory):
os.makedirs(parent_directory)
def ValidateOutputDir(output_dir):
"""Makes sure output_dir points to a valid output directory.
The function aborts the program on failure.
"""
VerifyOutputFile(output_dir, GTEST_H_OUTPUT)
VerifyOutputFile(output_dir, GTEST_ALL_CC_OUTPUT)
def FuseGTestH(gtest_root, output_dir):
"""Scans folder gtest_root to generate gtest/gtest.h in output_dir."""
output_file = open(os.path.join(output_dir, GTEST_H_OUTPUT), 'w')
processed_files = set() # Holds all gtest headers we've processed.
def ProcessFile(gtest_header_path):
"""Processes the given gtest header file."""
# We don't process the same header twice.
if gtest_header_path in processed_files:
return
processed_files.add(gtest_header_path)
# Reads each line in the given gtest header.
for line in open(os.path.join(gtest_root, gtest_header_path), 'r'):
m = INCLUDE_GTEST_FILE_REGEX.match(line)
if m:
# It's '#include "gtest/..."' - let's process it recursively.
ProcessFile('include/' + m.group(1))
else:
# Otherwise we copy the line unchanged to the output file.
output_file.write(line)
ProcessFile(GTEST_H_SEED)
output_file.close()
def FuseGTestAllCcToFile(gtest_root, output_file):
"""Scans folder gtest_root to generate gtest/gtest-all.cc in output_file."""
processed_files = set()
def ProcessFile(gtest_source_file):
"""Processes the given gtest source file."""
# We don't process the same #included file twice.
if gtest_source_file in processed_files:
return
processed_files.add(gtest_source_file)
# Reads each line in the given gtest source file.
for line in open(os.path.join(gtest_root, gtest_source_file), 'r'):
m = INCLUDE_GTEST_FILE_REGEX.match(line)
if m:
if 'include/' + m.group(1) == GTEST_SPI_H_SEED:
# It's '#include "gtest/gtest-spi.h"'. This file is not
# #included by "gtest/gtest.h", so we need to process it.
ProcessFile(GTEST_SPI_H_SEED)
else:
# It's '#include "gtest/foo.h"' where foo is not gtest-spi.
# We treat it as '#include "gtest/gtest.h"', as all other
# gtest headers are being fused into gtest.h and cannot be
# #included directly.
# There is no need to #include "gtest/gtest.h" more than once.
if not GTEST_H_SEED in processed_files:
processed_files.add(GTEST_H_SEED)
output_file.write('#include "%s"\n' % (GTEST_H_OUTPUT,))
else:
m = INCLUDE_SRC_FILE_REGEX.match(line)
if m:
# It's '#include "src/foo"' - let's process it recursively.
ProcessFile(m.group(1))
else:
output_file.write(line)
ProcessFile(GTEST_ALL_CC_SEED)
def FuseGTestAllCc(gtest_root, output_dir):
"""Scans folder gtest_root to generate gtest/gtest-all.cc in output_dir."""
output_file = open(os.path.join(output_dir, GTEST_ALL_CC_OUTPUT), 'w')
FuseGTestAllCcToFile(gtest_root, output_file)
output_file.close()
def FuseGTest(gtest_root, output_dir):
"""Fuses gtest.h and gtest-all.cc."""
ValidateGTestRootDir(gtest_root)
ValidateOutputDir(output_dir)
FuseGTestH(gtest_root, output_dir)
FuseGTestAllCc(gtest_root, output_dir)
def main():
argc = len(sys.argv)
if argc == 2:
# fuse_gtest_files.py OUTPUT_DIR
FuseGTest(DEFAULT_GTEST_ROOT_DIR, sys.argv[1])
elif argc == 3:
# fuse_gtest_files.py GTEST_ROOT_DIR OUTPUT_DIR
FuseGTest(sys.argv[1], sys.argv[2])
else:
print(__doc__)
sys.exit(1)
if __name__ == '__main__':
main()
|
ptesarik/crash-python | refs/heads/master | crash/kdump/__init__.py | 9 | # -*- coding: utf-8 -*-
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
|
mangelajo/python-neutronclient | refs/heads/master | neutronclient/neutron/v2_0/securitygroup.py | 7 | # Copyright 2012 OpenStack Foundation.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import argparse
from neutronclient.common import exceptions
from neutronclient.i18n import _
from neutronclient.neutron import v2_0 as neutronV20
def _get_remote(rule):
if rule['remote_ip_prefix']:
remote = '%s (CIDR)' % rule['remote_ip_prefix']
elif rule['remote_group_id']:
remote = '%s (group)' % rule['remote_group_id']
else:
remote = None
return remote
def _get_protocol_port(rule):
proto = rule['protocol']
port_min = rule['port_range_min']
port_max = rule['port_range_max']
if proto in ('tcp', 'udp'):
if (port_min and port_min == port_max):
protocol_port = '%s/%s' % (port_min, proto)
elif port_min:
protocol_port = '%s-%s/%s' % (port_min, port_max, proto)
else:
protocol_port = proto
elif proto == 'icmp':
icmp_opts = []
if port_min is not None:
icmp_opts.append('type:%s' % port_min)
if port_max is not None:
icmp_opts.append('code:%s' % port_max)
if icmp_opts:
protocol_port = 'icmp (%s)' % ', '.join(icmp_opts)
else:
protocol_port = 'icmp'
elif proto is not None:
# port_range_min/max are not recognized for protocol
# other than TCP, UDP and ICMP.
protocol_port = proto
else:
protocol_port = None
return protocol_port
def _format_sg_rule(rule):
formatted = []
for field in ['direction',
'ethertype',
('protocol_port', _get_protocol_port),
'remote_ip_prefix',
'remote_group_id']:
if isinstance(field, tuple):
field, get_method = field
data = get_method(rule)
else:
data = rule[field]
if not data:
continue
if field in ('remote_ip_prefix', 'remote_group_id'):
data = '%s: %s' % (field, data)
formatted.append(data)
return ', '.join(formatted)
def _format_sg_rules(secgroup):
try:
return '\n'.join(sorted([_format_sg_rule(rule) for rule
in secgroup['security_group_rules']]))
except Exception:
return ''
class ListSecurityGroup(neutronV20.ListCommand):
"""List security groups that belong to a given tenant."""
resource = 'security_group'
list_columns = ['id', 'name', 'security_group_rules']
_formatters = {'security_group_rules': _format_sg_rules}
pagination_support = True
sorting_support = True
class ShowSecurityGroup(neutronV20.ShowCommand):
"""Show information of a given security group."""
resource = 'security_group'
allow_names = True
json_indent = 5
class CreateSecurityGroup(neutronV20.CreateCommand):
"""Create a security group."""
resource = 'security_group'
def add_known_arguments(self, parser):
parser.add_argument(
'name', metavar='NAME',
help=_('Name of security group.'))
parser.add_argument(
'--description',
help=_('Description of security group.'))
def args2body(self, parsed_args):
body = {'security_group': {
'name': parsed_args.name}}
if parsed_args.description:
body['security_group'].update(
{'description': parsed_args.description})
if parsed_args.tenant_id:
body['security_group'].update({'tenant_id': parsed_args.tenant_id})
return body
class DeleteSecurityGroup(neutronV20.DeleteCommand):
"""Delete a given security group."""
resource = 'security_group'
allow_names = True
class UpdateSecurityGroup(neutronV20.UpdateCommand):
"""Update a given security group."""
resource = 'security_group'
def add_known_arguments(self, parser):
parser.add_argument(
'--name',
help=_('Name of security group.'))
parser.add_argument(
'--description',
help=_('Description of security group.'))
def args2body(self, parsed_args):
body = {'security_group': {}}
if parsed_args.name:
body['security_group'].update(
{'name': parsed_args.name})
if parsed_args.description:
body['security_group'].update(
{'description': parsed_args.description})
return body
class ListSecurityGroupRule(neutronV20.ListCommand):
"""List security group rules that belong to a given tenant."""
resource = 'security_group_rule'
list_columns = ['id', 'security_group_id', 'direction',
'ethertype', 'protocol/port', 'remote']
# replace_rules: key is an attribute name in Neutron API and
# corresponding value is a display name shown by CLI.
replace_rules = {'security_group_id': 'security_group',
'remote_group_id': 'remote_group'}
digest_fields = {
'remote': {
'method': _get_remote,
'depends_on': ['remote_ip_prefix', 'remote_group_id']},
'protocol/port': {
'method': _get_protocol_port,
'depends_on': ['protocol', 'port_range_min', 'port_range_max']}}
pagination_support = True
sorting_support = True
def get_parser(self, prog_name):
parser = super(ListSecurityGroupRule, self).get_parser(prog_name)
parser.add_argument(
'--no-nameconv', action='store_true',
help=_('Do not convert security group ID to its name.'))
return parser
@staticmethod
def replace_columns(cols, rules, reverse=False):
if reverse:
rules = dict((rules[k], k) for k in rules.keys())
return [rules.get(col, col) for col in cols]
def get_required_fields(self, fields):
fields = self.replace_columns(fields, self.replace_rules, reverse=True)
for field, digest_fields in self.digest_fields.items():
if field in fields:
fields += digest_fields['depends_on']
fields.remove(field)
return fields
def retrieve_list(self, parsed_args):
parsed_args.fields = self.get_required_fields(parsed_args.fields)
return super(ListSecurityGroupRule, self).retrieve_list(parsed_args)
def _get_sg_name_dict(self, data, page_size, no_nameconv):
"""Get names of security groups referred in the retrieved rules.
:return: a dict from secgroup ID to secgroup name
"""
if no_nameconv:
return {}
neutron_client = self.get_client()
search_opts = {'fields': ['id', 'name']}
if self.pagination_support:
if page_size:
search_opts.update({'limit': page_size})
sec_group_ids = set()
for rule in data:
for key in self.replace_rules:
if rule.get(key):
sec_group_ids.add(rule[key])
sec_group_ids = list(sec_group_ids)
def _get_sec_group_list(sec_group_ids):
search_opts['id'] = sec_group_ids
return neutron_client.list_security_groups(
**search_opts).get('security_groups', [])
try:
secgroups = _get_sec_group_list(sec_group_ids)
except exceptions.RequestURITooLong as uri_len_exc:
# Length of a query filter on security group rule id
# id=<uuid>& (with len(uuid)=36)
sec_group_id_filter_len = 40
# The URI is too long because of too many sec_group_id filters
# Use the excess attribute of the exception to know how many
# sec_group_id filters can be inserted into a single request
sec_group_count = len(sec_group_ids)
max_size = ((sec_group_id_filter_len * sec_group_count) -
uri_len_exc.excess)
chunk_size = max_size // sec_group_id_filter_len
secgroups = []
for i in range(0, sec_group_count, chunk_size):
secgroups.extend(
_get_sec_group_list(sec_group_ids[i: i + chunk_size]))
return dict([(sg['id'], sg['name'])
for sg in secgroups if sg['name']])
@staticmethod
def _has_fileds(rule, required_fileds):
return all([key in rule for key in required_fileds])
def extend_list(self, data, parsed_args):
sg_dict = self._get_sg_name_dict(data, parsed_args.page_size,
parsed_args.no_nameconv)
for rule in data:
# Replace security group UUID with its name.
for key in self.replace_rules:
if key in rule:
rule[key] = sg_dict.get(rule[key], rule[key])
for field, digest_rule in self.digest_fields.items():
if self._has_fileds(rule, digest_rule['depends_on']):
rule[field] = digest_rule['method'](rule) or 'any'
def setup_columns(self, info, parsed_args):
# Translate the specified columns from the command line
# into field names used in "info".
parsed_args.columns = self.replace_columns(parsed_args.columns,
self.replace_rules,
reverse=True)
# NOTE(amotoki): 2nd element of the tuple returned by setup_columns()
# is a generator, so if you need to create a look using the generator
# object, you need to recreate a generator to show a list expectedly.
info = super(ListSecurityGroupRule, self).setup_columns(info,
parsed_args)
cols = info[0]
if not parsed_args.no_nameconv:
# Replace column names in the header line (in info[0])
cols = self.replace_columns(info[0], self.replace_rules)
parsed_args.columns = cols
return (cols, info[1])
class ShowSecurityGroupRule(neutronV20.ShowCommand):
"""Show information of a given security group rule."""
resource = 'security_group_rule'
allow_names = False
class CreateSecurityGroupRule(neutronV20.CreateCommand):
"""Create a security group rule."""
resource = 'security_group_rule'
def add_known_arguments(self, parser):
parser.add_argument(
'security_group_id', metavar='SECURITY_GROUP',
help=_('Security group name or ID to add rule.'))
parser.add_argument(
'--direction',
default='ingress', choices=['ingress', 'egress'],
help=_('Direction of traffic: ingress/egress.'))
parser.add_argument(
'--ethertype',
default='IPv4',
help=_('IPv4/IPv6'))
parser.add_argument(
'--protocol',
help=_('Protocol of packet.'))
parser.add_argument(
'--port-range-min',
help=_('Starting port range.'))
parser.add_argument(
'--port_range_min',
help=argparse.SUPPRESS)
parser.add_argument(
'--port-range-max',
help=_('Ending port range.'))
parser.add_argument(
'--port_range_max',
help=argparse.SUPPRESS)
parser.add_argument(
'--remote-ip-prefix',
help=_('CIDR to match on.'))
parser.add_argument(
'--remote_ip_prefix',
help=argparse.SUPPRESS)
parser.add_argument(
'--remote-group-id', metavar='REMOTE_GROUP',
help=_('Remote security group name or ID to apply rule.'))
parser.add_argument(
'--remote_group_id',
help=argparse.SUPPRESS)
def args2body(self, parsed_args):
_security_group_id = neutronV20.find_resourceid_by_name_or_id(
self.get_client(), 'security_group', parsed_args.security_group_id)
body = {'security_group_rule': {
'security_group_id': _security_group_id,
'direction': parsed_args.direction,
'ethertype': parsed_args.ethertype}}
if parsed_args.protocol:
body['security_group_rule'].update(
{'protocol': parsed_args.protocol})
if parsed_args.port_range_min:
body['security_group_rule'].update(
{'port_range_min': parsed_args.port_range_min})
if parsed_args.port_range_max:
body['security_group_rule'].update(
{'port_range_max': parsed_args.port_range_max})
if parsed_args.remote_ip_prefix:
body['security_group_rule'].update(
{'remote_ip_prefix': parsed_args.remote_ip_prefix})
if parsed_args.remote_group_id:
_remote_group_id = neutronV20.find_resourceid_by_name_or_id(
self.get_client(), 'security_group',
parsed_args.remote_group_id)
body['security_group_rule'].update(
{'remote_group_id': _remote_group_id})
if parsed_args.tenant_id:
body['security_group_rule'].update(
{'tenant_id': parsed_args.tenant_id})
return body
class DeleteSecurityGroupRule(neutronV20.DeleteCommand):
"""Delete a given security group rule."""
resource = 'security_group_rule'
allow_names = False
|
lowitty/server | refs/heads/master | libsLinux/twisted/internet/test/__init__.py | 84 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.internet}.
"""
|
antoinecarme/pyaf | refs/heads/master | tests/artificial/transf_Logit/trend_MovingMedian/cycle_7/ar_12/test_artificial_1024_Logit_MovingMedian_7_12_0.py | 1 | import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "MovingMedian", cycle_length = 7, transform = "Logit", sigma = 0.0, exog_count = 0, ar_order = 12); |
abhishekgahlot/youtube-dl | refs/heads/master | youtube_dl/extractor/foxgay.py | 146 | from __future__ import unicode_literals
from .common import InfoExtractor
class FoxgayIE(InfoExtractor):
_VALID_URL = r'http://(?:www\.)?foxgay\.com/videos/(?:\S+-)?(?P<id>\d+)\.shtml'
_TEST = {
'url': 'http://foxgay.com/videos/fuck-turkish-style-2582.shtml',
'md5': '80d72beab5d04e1655a56ad37afe6841',
'info_dict': {
'id': '2582',
'ext': 'mp4',
'title': 'md5:6122f7ae0fc6b21ebdf59c5e083ce25a',
'description': 'md5:5e51dc4405f1fd315f7927daed2ce5cf',
'age_limit': 18,
'thumbnail': 're:https?://.*\.jpg$',
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
title = self._html_search_regex(
r'<title>(?P<title>.*?)</title>',
webpage, 'title', fatal=False)
description = self._html_search_regex(
r'<div class="ico_desc"><h2>(?P<description>.*?)</h2>',
webpage, 'description', fatal=False)
# Find the URL for the iFrame which contains the actual video.
iframe = self._download_webpage(
self._html_search_regex(r'iframe src="(?P<frame>.*?)"', webpage, 'video frame'),
video_id)
video_url = self._html_search_regex(
r"v_path = '(?P<vid>http://.*?)'", iframe, 'url')
thumb_url = self._html_search_regex(
r"t_path = '(?P<thumb>http://.*?)'", iframe, 'thumbnail', fatal=False)
return {
'id': video_id,
'title': title,
'url': video_url,
'description': description,
'thumbnail': thumb_url,
'age_limit': 18,
}
|
longde123/MultiversePlatform | refs/heads/master | server/bin/who.py | 1 | #
# The Multiverse Platform is made available under the MIT License.
#
# Copyright (c) 2012 The Multiverse Foundation
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify,
# merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software
# is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
#
#
proxyPlugin = Engine.getPlugin("Proxy1")
playerNames = proxyPlugin.getPlayerNames()
for name in playerNames:
print name
print str(len(playerNames)) + " players logged in currently."
|
openhatch/oh-mainline | refs/heads/master | vendor/packages/Django/tests/regressiontests/file_uploads/uploadhandler.py | 151 | """
Upload handlers to test the upload API.
"""
from django.core.files.uploadhandler import FileUploadHandler, StopUpload
class QuotaUploadHandler(FileUploadHandler):
"""
This test upload handler terminates the connection if more than a quota
(5MB) is uploaded.
"""
QUOTA = 5 * 2**20 # 5 MB
def __init__(self, request=None):
super(QuotaUploadHandler, self).__init__(request)
self.total_upload = 0
def receive_data_chunk(self, raw_data, start):
self.total_upload += len(raw_data)
if self.total_upload >= self.QUOTA:
raise StopUpload(connection_reset=True)
return raw_data
def file_complete(self, file_size):
return None
class CustomUploadError(Exception):
pass
class ErroringUploadHandler(FileUploadHandler):
"""A handler that raises an exception."""
def receive_data_chunk(self, raw_data, start):
raise CustomUploadError("Oops!")
|
ghber/My-Django-Nonrel | refs/heads/master | django/contrib/localflavor/cz/cz_regions.py | 514 | """
Czech regions, translations get from http://www.crwflags.com/fotw/Flags/cz-re.html
"""
from django.utils.translation import ugettext_lazy as _
REGION_CHOICES = (
('PR', _('Prague')),
('CE', _('Central Bohemian Region')),
('SO', _('South Bohemian Region')),
('PI', _('Pilsen Region')),
('CA', _('Carlsbad Region')),
('US', _('Usti Region')),
('LB', _('Liberec Region')),
('HK', _('Hradec Region')),
('PA', _('Pardubice Region')),
('VY', _('Vysocina Region')),
('SM', _('South Moravian Region')),
('OL', _('Olomouc Region')),
('ZL', _('Zlin Region')),
('MS', _('Moravian-Silesian Region')),
)
|
litchfield/django | refs/heads/master | tests/utils_tests/test_simplelazyobject.py | 434 | from __future__ import unicode_literals
import pickle
from django.contrib.auth.models import User
from django.test import TestCase
from django.utils import six
from django.utils.functional import SimpleLazyObject
class TestUtilsSimpleLazyObjectDjangoTestCase(TestCase):
def test_pickle_py2_regression(self):
# See ticket #20212
user = User.objects.create_user('johndoe', 'john@example.com', 'pass')
x = SimpleLazyObject(lambda: user)
# This would fail with "TypeError: can't pickle instancemethod objects",
# only on Python 2.X.
pickle.dumps(x)
# Try the variant protocol levels.
pickle.dumps(x, 0)
pickle.dumps(x, 1)
pickle.dumps(x, 2)
if six.PY2:
import cPickle
# This would fail with "TypeError: expected string or Unicode object, NoneType found".
cPickle.dumps(x)
|
rdhyee/modular-file-renderer | refs/heads/develop | mfr/server/app.py | 2 | import asyncio
import tornado.web
import tornado.httpserver
import tornado.platform.asyncio
import logging
from raven.contrib.tornado import AsyncSentryClient
import mfr
from mfr import settings
from mfr.server import settings as server_settings
from mfr.server.handlers.export import ExportHandler
from mfr.server.handlers.render import RenderHandler
from mfr.server.handlers.status import StatusHandler
from mfr.server.handlers.core import ExtensionsStaticFileHandler
logger = logging.getLogger(__name__)
def make_app(debug):
app = tornado.web.Application(
[
(r'/static/(.*)', tornado.web.StaticFileHandler, {'path': server_settings.STATIC_PATH}),
(r'/assets/(.*?)/(.*\..*)', ExtensionsStaticFileHandler),
(r'/export', ExportHandler),
(r'/render', RenderHandler),
(r'/status', StatusHandler),
],
debug=debug,
)
app.sentry_client = AsyncSentryClient(settings.SENTRY_DSN, release=mfr.__version__)
return app
def serve():
tornado.platform.asyncio.AsyncIOMainLoop().install()
app = make_app(server_settings.DEBUG)
ssl_options = None
if server_settings.SSL_CERT_FILE and server_settings.SSL_KEY_FILE:
ssl_options = {
'certfile': server_settings.SSL_CERT_FILE,
'keyfile': server_settings.SSL_KEY_FILE,
}
app.listen(
server_settings.PORT,
address=server_settings.ADDRESS,
xheaders=server_settings.XHEADERS,
max_buffer_size=server_settings.MAX_BUFFER_SIZE,
ssl_options=ssl_options,
)
logger.info("Listening on {0}:{1}".format(server_settings.ADDRESS, server_settings.PORT))
asyncio.get_event_loop().set_debug(server_settings.DEBUG)
asyncio.get_event_loop().run_forever()
|
40223249-1/-w16b_test | refs/heads/master | static/Brython3.1.1-20150328-091302/Lib/unittest/test/test_loader.py | 738 | import sys
import types
import unittest
class Test_TestLoader(unittest.TestCase):
### Tests for TestLoader.loadTestsFromTestCase
################################################################
# "Return a suite of all tests cases contained in the TestCase-derived
# class testCaseClass"
def test_loadTestsFromTestCase(self):
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
tests = unittest.TestSuite([Foo('test_1'), Foo('test_2')])
loader = unittest.TestLoader()
self.assertEqual(loader.loadTestsFromTestCase(Foo), tests)
# "Return a suite of all tests cases contained in the TestCase-derived
# class testCaseClass"
#
# Make sure it does the right thing even if no tests were found
def test_loadTestsFromTestCase__no_matches(self):
class Foo(unittest.TestCase):
def foo_bar(self): pass
empty_suite = unittest.TestSuite()
loader = unittest.TestLoader()
self.assertEqual(loader.loadTestsFromTestCase(Foo), empty_suite)
# "Return a suite of all tests cases contained in the TestCase-derived
# class testCaseClass"
#
# What happens if loadTestsFromTestCase() is given an object
# that isn't a subclass of TestCase? Specifically, what happens
# if testCaseClass is a subclass of TestSuite?
#
# This is checked for specifically in the code, so we better add a
# test for it.
def test_loadTestsFromTestCase__TestSuite_subclass(self):
class NotATestCase(unittest.TestSuite):
pass
loader = unittest.TestLoader()
try:
loader.loadTestsFromTestCase(NotATestCase)
except TypeError:
pass
else:
self.fail('Should raise TypeError')
# "Return a suite of all tests cases contained in the TestCase-derived
# class testCaseClass"
#
# Make sure loadTestsFromTestCase() picks up the default test method
# name (as specified by TestCase), even though the method name does
# not match the default TestLoader.testMethodPrefix string
def test_loadTestsFromTestCase__default_method_name(self):
class Foo(unittest.TestCase):
def runTest(self):
pass
loader = unittest.TestLoader()
# This has to be false for the test to succeed
self.assertFalse('runTest'.startswith(loader.testMethodPrefix))
suite = loader.loadTestsFromTestCase(Foo)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [Foo('runTest')])
################################################################
### /Tests for TestLoader.loadTestsFromTestCase
### Tests for TestLoader.loadTestsFromModule
################################################################
# "This method searches `module` for classes derived from TestCase"
def test_loadTestsFromModule__TestCase_subclass(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromModule(m)
self.assertIsInstance(suite, loader.suiteClass)
expected = [loader.suiteClass([MyTestCase('test')])]
self.assertEqual(list(suite), expected)
# "This method searches `module` for classes derived from TestCase"
#
# What happens if no tests are found (no TestCase instances)?
def test_loadTestsFromModule__no_TestCase_instances(self):
m = types.ModuleType('m')
loader = unittest.TestLoader()
suite = loader.loadTestsFromModule(m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [])
# "This method searches `module` for classes derived from TestCase"
#
# What happens if no tests are found (TestCases instances, but no tests)?
def test_loadTestsFromModule__no_TestCase_tests(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromModule(m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [loader.suiteClass()])
# "This method searches `module` for classes derived from TestCase"s
#
# What happens if loadTestsFromModule() is given something other
# than a module?
#
# XXX Currently, it succeeds anyway. This flexibility
# should either be documented or loadTestsFromModule() should
# raise a TypeError
#
# XXX Certain people are using this behaviour. We'll add a test for it
def test_loadTestsFromModule__not_a_module(self):
class MyTestCase(unittest.TestCase):
def test(self):
pass
class NotAModule(object):
test_2 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromModule(NotAModule)
reference = [unittest.TestSuite([MyTestCase('test')])]
self.assertEqual(list(suite), reference)
# Check that loadTestsFromModule honors (or not) a module
# with a load_tests function.
def test_loadTestsFromModule__load_tests(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
load_tests_args = []
def load_tests(loader, tests, pattern):
self.assertIsInstance(tests, unittest.TestSuite)
load_tests_args.extend((loader, tests, pattern))
return tests
m.load_tests = load_tests
loader = unittest.TestLoader()
suite = loader.loadTestsFromModule(m)
self.assertIsInstance(suite, unittest.TestSuite)
self.assertEqual(load_tests_args, [loader, suite, None])
load_tests_args = []
suite = loader.loadTestsFromModule(m, use_load_tests=False)
self.assertEqual(load_tests_args, [])
def test_loadTestsFromModule__faulty_load_tests(self):
m = types.ModuleType('m')
def load_tests(loader, tests, pattern):
raise TypeError('some failure')
m.load_tests = load_tests
loader = unittest.TestLoader()
suite = loader.loadTestsFromModule(m)
self.assertIsInstance(suite, unittest.TestSuite)
self.assertEqual(suite.countTestCases(), 1)
test = list(suite)[0]
self.assertRaisesRegex(TypeError, "some failure", test.m)
################################################################
### /Tests for TestLoader.loadTestsFromModule()
### Tests for TestLoader.loadTestsFromName()
################################################################
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# Is ValueError raised in response to an empty name?
def test_loadTestsFromName__empty_name(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromName('')
except ValueError as e:
self.assertEqual(str(e), "Empty module name")
else:
self.fail("TestLoader.loadTestsFromName failed to raise ValueError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# What happens when the name contains invalid characters?
def test_loadTestsFromName__malformed_name(self):
loader = unittest.TestLoader()
# XXX Should this raise ValueError or ImportError?
try:
loader.loadTestsFromName('abc () //')
except ValueError:
pass
except ImportError:
pass
else:
self.fail("TestLoader.loadTestsFromName failed to raise ValueError")
# "The specifier name is a ``dotted name'' that may resolve ... to a
# module"
#
# What happens when a module by that name can't be found?
def test_loadTestsFromName__unknown_module_name(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromName('sdasfasfasdf')
except ImportError as e:
self.assertEqual(str(e), "No module named 'sdasfasfasdf'")
else:
self.fail("TestLoader.loadTestsFromName failed to raise ImportError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# What happens when the module is found, but the attribute can't?
def test_loadTestsFromName__unknown_attr_name(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromName('unittest.sdasfasfasdf')
except AttributeError as e:
self.assertEqual(str(e), "'module' object has no attribute 'sdasfasfasdf'")
else:
self.fail("TestLoader.loadTestsFromName failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# What happens when we provide the module, but the attribute can't be
# found?
def test_loadTestsFromName__relative_unknown_name(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromName('sdasfasfasdf', unittest)
except AttributeError as e:
self.assertEqual(str(e), "'module' object has no attribute 'sdasfasfasdf'")
else:
self.fail("TestLoader.loadTestsFromName failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
# ...
# "The method optionally resolves name relative to the given module"
#
# Does loadTestsFromName raise ValueError when passed an empty
# name relative to a provided module?
#
# XXX Should probably raise a ValueError instead of an AttributeError
def test_loadTestsFromName__relative_empty_name(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromName('', unittest)
except AttributeError as e:
pass
else:
self.fail("Failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
# ...
# "The method optionally resolves name relative to the given module"
#
# What happens when an impossible name is given, relative to the provided
# `module`?
def test_loadTestsFromName__relative_malformed_name(self):
loader = unittest.TestLoader()
# XXX Should this raise AttributeError or ValueError?
try:
loader.loadTestsFromName('abc () //', unittest)
except ValueError:
pass
except AttributeError:
pass
else:
self.fail("TestLoader.loadTestsFromName failed to raise ValueError")
# "The method optionally resolves name relative to the given module"
#
# Does loadTestsFromName raise TypeError when the `module` argument
# isn't a module object?
#
# XXX Accepts the not-a-module object, ignorning the object's type
# This should raise an exception or the method name should be changed
#
# XXX Some people are relying on this, so keep it for now
def test_loadTestsFromName__relative_not_a_module(self):
class MyTestCase(unittest.TestCase):
def test(self):
pass
class NotAModule(object):
test_2 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromName('test_2', NotAModule)
reference = [MyTestCase('test')]
self.assertEqual(list(suite), reference)
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# Does it raise an exception if the name resolves to an invalid
# object?
def test_loadTestsFromName__relative_bad_object(self):
m = types.ModuleType('m')
m.testcase_1 = object()
loader = unittest.TestLoader()
try:
loader.loadTestsFromName('testcase_1', m)
except TypeError:
pass
else:
self.fail("Should have raised TypeError")
# "The specifier name is a ``dotted name'' that may
# resolve either to ... a test case class"
def test_loadTestsFromName__relative_TestCase_subclass(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromName('testcase_1', m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [MyTestCase('test')])
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
def test_loadTestsFromName__relative_TestSuite(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testsuite = unittest.TestSuite([MyTestCase('test')])
loader = unittest.TestLoader()
suite = loader.loadTestsFromName('testsuite', m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [MyTestCase('test')])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a test method within a test case class"
def test_loadTestsFromName__relative_testmethod(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromName('testcase_1.test', m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [MyTestCase('test')])
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# Does loadTestsFromName() raise the proper exception when trying to
# resolve "a test method within a test case class" that doesn't exist
# for the given name (relative to a provided module)?
def test_loadTestsFromName__relative_invalid_testmethod(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
try:
loader.loadTestsFromName('testcase_1.testfoo', m)
except AttributeError as e:
self.assertEqual(str(e), "type object 'MyTestCase' has no attribute 'testfoo'")
else:
self.fail("Failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a ... TestSuite instance"
def test_loadTestsFromName__callable__TestSuite(self):
m = types.ModuleType('m')
testcase_1 = unittest.FunctionTestCase(lambda: None)
testcase_2 = unittest.FunctionTestCase(lambda: None)
def return_TestSuite():
return unittest.TestSuite([testcase_1, testcase_2])
m.return_TestSuite = return_TestSuite
loader = unittest.TestLoader()
suite = loader.loadTestsFromName('return_TestSuite', m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [testcase_1, testcase_2])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a TestCase ... instance"
def test_loadTestsFromName__callable__TestCase_instance(self):
m = types.ModuleType('m')
testcase_1 = unittest.FunctionTestCase(lambda: None)
def return_TestCase():
return testcase_1
m.return_TestCase = return_TestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromName('return_TestCase', m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [testcase_1])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a TestCase ... instance"
#*****************************************************************
#Override the suiteClass attribute to ensure that the suiteClass
#attribute is used
def test_loadTestsFromName__callable__TestCase_instance_ProperSuiteClass(self):
class SubTestSuite(unittest.TestSuite):
pass
m = types.ModuleType('m')
testcase_1 = unittest.FunctionTestCase(lambda: None)
def return_TestCase():
return testcase_1
m.return_TestCase = return_TestCase
loader = unittest.TestLoader()
loader.suiteClass = SubTestSuite
suite = loader.loadTestsFromName('return_TestCase', m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [testcase_1])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a test method within a test case class"
#*****************************************************************
#Override the suiteClass attribute to ensure that the suiteClass
#attribute is used
def test_loadTestsFromName__relative_testmethod_ProperSuiteClass(self):
class SubTestSuite(unittest.TestSuite):
pass
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
loader.suiteClass=SubTestSuite
suite = loader.loadTestsFromName('testcase_1.test', m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [MyTestCase('test')])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a TestCase or TestSuite instance"
#
# What happens if the callable returns something else?
def test_loadTestsFromName__callable__wrong_type(self):
m = types.ModuleType('m')
def return_wrong():
return 6
m.return_wrong = return_wrong
loader = unittest.TestLoader()
try:
suite = loader.loadTestsFromName('return_wrong', m)
except TypeError:
pass
else:
self.fail("TestLoader.loadTestsFromName failed to raise TypeError")
# "The specifier can refer to modules and packages which have not been
# imported; they will be imported as a side-effect"
def test_loadTestsFromName__module_not_loaded(self):
# We're going to try to load this module as a side-effect, so it
# better not be loaded before we try.
#
module_name = 'unittest.test.dummy'
sys.modules.pop(module_name, None)
loader = unittest.TestLoader()
try:
suite = loader.loadTestsFromName(module_name)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [])
# module should now be loaded, thanks to loadTestsFromName()
self.assertIn(module_name, sys.modules)
finally:
if module_name in sys.modules:
del sys.modules[module_name]
################################################################
### Tests for TestLoader.loadTestsFromName()
### Tests for TestLoader.loadTestsFromNames()
################################################################
# "Similar to loadTestsFromName(), but takes a sequence of names rather
# than a single name."
#
# What happens if that sequence of names is empty?
def test_loadTestsFromNames__empty_name_list(self):
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames([])
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [])
# "Similar to loadTestsFromName(), but takes a sequence of names rather
# than a single name."
# ...
# "The method optionally resolves name relative to the given module"
#
# What happens if that sequence of names is empty?
#
# XXX Should this raise a ValueError or just return an empty TestSuite?
def test_loadTestsFromNames__relative_empty_name_list(self):
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames([], unittest)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [])
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# Is ValueError raised in response to an empty name?
def test_loadTestsFromNames__empty_name(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromNames([''])
except ValueError as e:
self.assertEqual(str(e), "Empty module name")
else:
self.fail("TestLoader.loadTestsFromNames failed to raise ValueError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# What happens when presented with an impossible module name?
def test_loadTestsFromNames__malformed_name(self):
loader = unittest.TestLoader()
# XXX Should this raise ValueError or ImportError?
try:
loader.loadTestsFromNames(['abc () //'])
except ValueError:
pass
except ImportError:
pass
else:
self.fail("TestLoader.loadTestsFromNames failed to raise ValueError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# What happens when no module can be found for the given name?
def test_loadTestsFromNames__unknown_module_name(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromNames(['sdasfasfasdf'])
except ImportError as e:
self.assertEqual(str(e), "No module named 'sdasfasfasdf'")
else:
self.fail("TestLoader.loadTestsFromNames failed to raise ImportError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# What happens when the module can be found, but not the attribute?
def test_loadTestsFromNames__unknown_attr_name(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromNames(['unittest.sdasfasfasdf', 'unittest'])
except AttributeError as e:
self.assertEqual(str(e), "'module' object has no attribute 'sdasfasfasdf'")
else:
self.fail("TestLoader.loadTestsFromNames failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
# ...
# "The method optionally resolves name relative to the given module"
#
# What happens when given an unknown attribute on a specified `module`
# argument?
def test_loadTestsFromNames__unknown_name_relative_1(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromNames(['sdasfasfasdf'], unittest)
except AttributeError as e:
self.assertEqual(str(e), "'module' object has no attribute 'sdasfasfasdf'")
else:
self.fail("TestLoader.loadTestsFromName failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
# ...
# "The method optionally resolves name relative to the given module"
#
# Do unknown attributes (relative to a provided module) still raise an
# exception even in the presence of valid attribute names?
def test_loadTestsFromNames__unknown_name_relative_2(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromNames(['TestCase', 'sdasfasfasdf'], unittest)
except AttributeError as e:
self.assertEqual(str(e), "'module' object has no attribute 'sdasfasfasdf'")
else:
self.fail("TestLoader.loadTestsFromName failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
# ...
# "The method optionally resolves name relative to the given module"
#
# What happens when faced with the empty string?
#
# XXX This currently raises AttributeError, though ValueError is probably
# more appropriate
def test_loadTestsFromNames__relative_empty_name(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromNames([''], unittest)
except AttributeError:
pass
else:
self.fail("Failed to raise ValueError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
# ...
# "The method optionally resolves name relative to the given module"
#
# What happens when presented with an impossible attribute name?
def test_loadTestsFromNames__relative_malformed_name(self):
loader = unittest.TestLoader()
# XXX Should this raise AttributeError or ValueError?
try:
loader.loadTestsFromNames(['abc () //'], unittest)
except AttributeError:
pass
except ValueError:
pass
else:
self.fail("TestLoader.loadTestsFromNames failed to raise ValueError")
# "The method optionally resolves name relative to the given module"
#
# Does loadTestsFromNames() make sure the provided `module` is in fact
# a module?
#
# XXX This validation is currently not done. This flexibility should
# either be documented or a TypeError should be raised.
def test_loadTestsFromNames__relative_not_a_module(self):
class MyTestCase(unittest.TestCase):
def test(self):
pass
class NotAModule(object):
test_2 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames(['test_2'], NotAModule)
reference = [unittest.TestSuite([MyTestCase('test')])]
self.assertEqual(list(suite), reference)
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# Does it raise an exception if the name resolves to an invalid
# object?
def test_loadTestsFromNames__relative_bad_object(self):
m = types.ModuleType('m')
m.testcase_1 = object()
loader = unittest.TestLoader()
try:
loader.loadTestsFromNames(['testcase_1'], m)
except TypeError:
pass
else:
self.fail("Should have raised TypeError")
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a test case class"
def test_loadTestsFromNames__relative_TestCase_subclass(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames(['testcase_1'], m)
self.assertIsInstance(suite, loader.suiteClass)
expected = loader.suiteClass([MyTestCase('test')])
self.assertEqual(list(suite), [expected])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a TestSuite instance"
def test_loadTestsFromNames__relative_TestSuite(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testsuite = unittest.TestSuite([MyTestCase('test')])
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames(['testsuite'], m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [m.testsuite])
# "The specifier name is a ``dotted name'' that may resolve ... to ... a
# test method within a test case class"
def test_loadTestsFromNames__relative_testmethod(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames(['testcase_1.test'], m)
self.assertIsInstance(suite, loader.suiteClass)
ref_suite = unittest.TestSuite([MyTestCase('test')])
self.assertEqual(list(suite), [ref_suite])
# "The specifier name is a ``dotted name'' that may resolve ... to ... a
# test method within a test case class"
#
# Does the method gracefully handle names that initially look like they
# resolve to "a test method within a test case class" but don't?
def test_loadTestsFromNames__relative_invalid_testmethod(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
try:
loader.loadTestsFromNames(['testcase_1.testfoo'], m)
except AttributeError as e:
self.assertEqual(str(e), "type object 'MyTestCase' has no attribute 'testfoo'")
else:
self.fail("Failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a ... TestSuite instance"
def test_loadTestsFromNames__callable__TestSuite(self):
m = types.ModuleType('m')
testcase_1 = unittest.FunctionTestCase(lambda: None)
testcase_2 = unittest.FunctionTestCase(lambda: None)
def return_TestSuite():
return unittest.TestSuite([testcase_1, testcase_2])
m.return_TestSuite = return_TestSuite
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames(['return_TestSuite'], m)
self.assertIsInstance(suite, loader.suiteClass)
expected = unittest.TestSuite([testcase_1, testcase_2])
self.assertEqual(list(suite), [expected])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a TestCase ... instance"
def test_loadTestsFromNames__callable__TestCase_instance(self):
m = types.ModuleType('m')
testcase_1 = unittest.FunctionTestCase(lambda: None)
def return_TestCase():
return testcase_1
m.return_TestCase = return_TestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames(['return_TestCase'], m)
self.assertIsInstance(suite, loader.suiteClass)
ref_suite = unittest.TestSuite([testcase_1])
self.assertEqual(list(suite), [ref_suite])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a TestCase or TestSuite instance"
#
# Are staticmethods handled correctly?
def test_loadTestsFromNames__callable__call_staticmethod(self):
m = types.ModuleType('m')
class Test1(unittest.TestCase):
def test(self):
pass
testcase_1 = Test1('test')
class Foo(unittest.TestCase):
@staticmethod
def foo():
return testcase_1
m.Foo = Foo
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames(['Foo.foo'], m)
self.assertIsInstance(suite, loader.suiteClass)
ref_suite = unittest.TestSuite([testcase_1])
self.assertEqual(list(suite), [ref_suite])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a TestCase or TestSuite instance"
#
# What happens when the callable returns something else?
def test_loadTestsFromNames__callable__wrong_type(self):
m = types.ModuleType('m')
def return_wrong():
return 6
m.return_wrong = return_wrong
loader = unittest.TestLoader()
try:
suite = loader.loadTestsFromNames(['return_wrong'], m)
except TypeError:
pass
else:
self.fail("TestLoader.loadTestsFromNames failed to raise TypeError")
# "The specifier can refer to modules and packages which have not been
# imported; they will be imported as a side-effect"
def test_loadTestsFromNames__module_not_loaded(self):
# We're going to try to load this module as a side-effect, so it
# better not be loaded before we try.
#
module_name = 'unittest.test.dummy'
sys.modules.pop(module_name, None)
loader = unittest.TestLoader()
try:
suite = loader.loadTestsFromNames([module_name])
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [unittest.TestSuite()])
# module should now be loaded, thanks to loadTestsFromName()
self.assertIn(module_name, sys.modules)
finally:
if module_name in sys.modules:
del sys.modules[module_name]
################################################################
### /Tests for TestLoader.loadTestsFromNames()
### Tests for TestLoader.getTestCaseNames()
################################################################
# "Return a sorted sequence of method names found within testCaseClass"
#
# Test.foobar is defined to make sure getTestCaseNames() respects
# loader.testMethodPrefix
def test_getTestCaseNames(self):
class Test(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foobar(self): pass
loader = unittest.TestLoader()
self.assertEqual(loader.getTestCaseNames(Test), ['test_1', 'test_2'])
# "Return a sorted sequence of method names found within testCaseClass"
#
# Does getTestCaseNames() behave appropriately if no tests are found?
def test_getTestCaseNames__no_tests(self):
class Test(unittest.TestCase):
def foobar(self): pass
loader = unittest.TestLoader()
self.assertEqual(loader.getTestCaseNames(Test), [])
# "Return a sorted sequence of method names found within testCaseClass"
#
# Are not-TestCases handled gracefully?
#
# XXX This should raise a TypeError, not return a list
#
# XXX It's too late in the 2.5 release cycle to fix this, but it should
# probably be revisited for 2.6
def test_getTestCaseNames__not_a_TestCase(self):
class BadCase(int):
def test_foo(self):
pass
loader = unittest.TestLoader()
names = loader.getTestCaseNames(BadCase)
self.assertEqual(names, ['test_foo'])
# "Return a sorted sequence of method names found within testCaseClass"
#
# Make sure inherited names are handled.
#
# TestP.foobar is defined to make sure getTestCaseNames() respects
# loader.testMethodPrefix
def test_getTestCaseNames__inheritance(self):
class TestP(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foobar(self): pass
class TestC(TestP):
def test_1(self): pass
def test_3(self): pass
loader = unittest.TestLoader()
names = ['test_1', 'test_2', 'test_3']
self.assertEqual(loader.getTestCaseNames(TestC), names)
################################################################
### /Tests for TestLoader.getTestCaseNames()
### Tests for TestLoader.testMethodPrefix
################################################################
# "String giving the prefix of method names which will be interpreted as
# test methods"
#
# Implicit in the documentation is that testMethodPrefix is respected by
# all loadTestsFrom* methods.
def test_testMethodPrefix__loadTestsFromTestCase(self):
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
tests_1 = unittest.TestSuite([Foo('foo_bar')])
tests_2 = unittest.TestSuite([Foo('test_1'), Foo('test_2')])
loader = unittest.TestLoader()
loader.testMethodPrefix = 'foo'
self.assertEqual(loader.loadTestsFromTestCase(Foo), tests_1)
loader.testMethodPrefix = 'test'
self.assertEqual(loader.loadTestsFromTestCase(Foo), tests_2)
# "String giving the prefix of method names which will be interpreted as
# test methods"
#
# Implicit in the documentation is that testMethodPrefix is respected by
# all loadTestsFrom* methods.
def test_testMethodPrefix__loadTestsFromModule(self):
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
m.Foo = Foo
tests_1 = [unittest.TestSuite([Foo('foo_bar')])]
tests_2 = [unittest.TestSuite([Foo('test_1'), Foo('test_2')])]
loader = unittest.TestLoader()
loader.testMethodPrefix = 'foo'
self.assertEqual(list(loader.loadTestsFromModule(m)), tests_1)
loader.testMethodPrefix = 'test'
self.assertEqual(list(loader.loadTestsFromModule(m)), tests_2)
# "String giving the prefix of method names which will be interpreted as
# test methods"
#
# Implicit in the documentation is that testMethodPrefix is respected by
# all loadTestsFrom* methods.
def test_testMethodPrefix__loadTestsFromName(self):
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
m.Foo = Foo
tests_1 = unittest.TestSuite([Foo('foo_bar')])
tests_2 = unittest.TestSuite([Foo('test_1'), Foo('test_2')])
loader = unittest.TestLoader()
loader.testMethodPrefix = 'foo'
self.assertEqual(loader.loadTestsFromName('Foo', m), tests_1)
loader.testMethodPrefix = 'test'
self.assertEqual(loader.loadTestsFromName('Foo', m), tests_2)
# "String giving the prefix of method names which will be interpreted as
# test methods"
#
# Implicit in the documentation is that testMethodPrefix is respected by
# all loadTestsFrom* methods.
def test_testMethodPrefix__loadTestsFromNames(self):
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
m.Foo = Foo
tests_1 = unittest.TestSuite([unittest.TestSuite([Foo('foo_bar')])])
tests_2 = unittest.TestSuite([Foo('test_1'), Foo('test_2')])
tests_2 = unittest.TestSuite([tests_2])
loader = unittest.TestLoader()
loader.testMethodPrefix = 'foo'
self.assertEqual(loader.loadTestsFromNames(['Foo'], m), tests_1)
loader.testMethodPrefix = 'test'
self.assertEqual(loader.loadTestsFromNames(['Foo'], m), tests_2)
# "The default value is 'test'"
def test_testMethodPrefix__default_value(self):
loader = unittest.TestLoader()
self.assertEqual(loader.testMethodPrefix, 'test')
################################################################
### /Tests for TestLoader.testMethodPrefix
### Tests for TestLoader.sortTestMethodsUsing
################################################################
# "Function to be used to compare method names when sorting them in
# getTestCaseNames() and all the loadTestsFromX() methods"
def test_sortTestMethodsUsing__loadTestsFromTestCase(self):
def reversed_cmp(x, y):
return -((x > y) - (x < y))
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
loader = unittest.TestLoader()
loader.sortTestMethodsUsing = reversed_cmp
tests = loader.suiteClass([Foo('test_2'), Foo('test_1')])
self.assertEqual(loader.loadTestsFromTestCase(Foo), tests)
# "Function to be used to compare method names when sorting them in
# getTestCaseNames() and all the loadTestsFromX() methods"
def test_sortTestMethodsUsing__loadTestsFromModule(self):
def reversed_cmp(x, y):
return -((x > y) - (x < y))
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
m.Foo = Foo
loader = unittest.TestLoader()
loader.sortTestMethodsUsing = reversed_cmp
tests = [loader.suiteClass([Foo('test_2'), Foo('test_1')])]
self.assertEqual(list(loader.loadTestsFromModule(m)), tests)
# "Function to be used to compare method names when sorting them in
# getTestCaseNames() and all the loadTestsFromX() methods"
def test_sortTestMethodsUsing__loadTestsFromName(self):
def reversed_cmp(x, y):
return -((x > y) - (x < y))
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
m.Foo = Foo
loader = unittest.TestLoader()
loader.sortTestMethodsUsing = reversed_cmp
tests = loader.suiteClass([Foo('test_2'), Foo('test_1')])
self.assertEqual(loader.loadTestsFromName('Foo', m), tests)
# "Function to be used to compare method names when sorting them in
# getTestCaseNames() and all the loadTestsFromX() methods"
def test_sortTestMethodsUsing__loadTestsFromNames(self):
def reversed_cmp(x, y):
return -((x > y) - (x < y))
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
m.Foo = Foo
loader = unittest.TestLoader()
loader.sortTestMethodsUsing = reversed_cmp
tests = [loader.suiteClass([Foo('test_2'), Foo('test_1')])]
self.assertEqual(list(loader.loadTestsFromNames(['Foo'], m)), tests)
# "Function to be used to compare method names when sorting them in
# getTestCaseNames()"
#
# Does it actually affect getTestCaseNames()?
def test_sortTestMethodsUsing__getTestCaseNames(self):
def reversed_cmp(x, y):
return -((x > y) - (x < y))
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
loader = unittest.TestLoader()
loader.sortTestMethodsUsing = reversed_cmp
test_names = ['test_2', 'test_1']
self.assertEqual(loader.getTestCaseNames(Foo), test_names)
# "The default value is the built-in cmp() function"
# Since cmp is now defunct, we simply verify that the results
# occur in the same order as they would with the default sort.
def test_sortTestMethodsUsing__default_value(self):
loader = unittest.TestLoader()
class Foo(unittest.TestCase):
def test_2(self): pass
def test_3(self): pass
def test_1(self): pass
test_names = ['test_2', 'test_3', 'test_1']
self.assertEqual(loader.getTestCaseNames(Foo), sorted(test_names))
# "it can be set to None to disable the sort."
#
# XXX How is this different from reassigning cmp? Are the tests returned
# in a random order or something? This behaviour should die
def test_sortTestMethodsUsing__None(self):
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
loader = unittest.TestLoader()
loader.sortTestMethodsUsing = None
test_names = ['test_2', 'test_1']
self.assertEqual(set(loader.getTestCaseNames(Foo)), set(test_names))
################################################################
### /Tests for TestLoader.sortTestMethodsUsing
### Tests for TestLoader.suiteClass
################################################################
# "Callable object that constructs a test suite from a list of tests."
def test_suiteClass__loadTestsFromTestCase(self):
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
tests = [Foo('test_1'), Foo('test_2')]
loader = unittest.TestLoader()
loader.suiteClass = list
self.assertEqual(loader.loadTestsFromTestCase(Foo), tests)
# It is implicit in the documentation for TestLoader.suiteClass that
# all TestLoader.loadTestsFrom* methods respect it. Let's make sure
def test_suiteClass__loadTestsFromModule(self):
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
m.Foo = Foo
tests = [[Foo('test_1'), Foo('test_2')]]
loader = unittest.TestLoader()
loader.suiteClass = list
self.assertEqual(loader.loadTestsFromModule(m), tests)
# It is implicit in the documentation for TestLoader.suiteClass that
# all TestLoader.loadTestsFrom* methods respect it. Let's make sure
def test_suiteClass__loadTestsFromName(self):
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
m.Foo = Foo
tests = [Foo('test_1'), Foo('test_2')]
loader = unittest.TestLoader()
loader.suiteClass = list
self.assertEqual(loader.loadTestsFromName('Foo', m), tests)
# It is implicit in the documentation for TestLoader.suiteClass that
# all TestLoader.loadTestsFrom* methods respect it. Let's make sure
def test_suiteClass__loadTestsFromNames(self):
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
m.Foo = Foo
tests = [[Foo('test_1'), Foo('test_2')]]
loader = unittest.TestLoader()
loader.suiteClass = list
self.assertEqual(loader.loadTestsFromNames(['Foo'], m), tests)
# "The default value is the TestSuite class"
def test_suiteClass__default_value(self):
loader = unittest.TestLoader()
self.assertTrue(loader.suiteClass is unittest.TestSuite)
|
vFense/vFenseAgent-nix | refs/heads/development | agent/deps/mac/Python-2.7.5/lib/python2.7/test/test_io.py | 37 | """Unit tests for the io module."""
# Tests of io are scattered over the test suite:
# * test_bufio - tests file buffering
# * test_memoryio - tests BytesIO and StringIO
# * test_fileio - tests FileIO
# * test_file - tests the file interface
# * test_io - tests everything else in the io module
# * test_univnewlines - tests universal newline support
# * test_largefile - tests operations on a file greater than 2**32 bytes
# (only enabled with -ulargefile)
################################################################################
# ATTENTION TEST WRITERS!!!
################################################################################
# When writing tests for io, it's important to test both the C and Python
# implementations. This is usually done by writing a base test that refers to
# the type it is testing as a attribute. Then it provides custom subclasses to
# test both implementations. This file has lots of examples.
################################################################################
from __future__ import print_function
from __future__ import unicode_literals
import os
import sys
import time
import array
import random
import unittest
import weakref
import abc
import signal
import errno
from itertools import cycle, count
from collections import deque
from UserList import UserList
from test import test_support as support
import contextlib
import codecs
import io # C implementation of io
import _pyio as pyio # Python implementation of io
try:
import threading
except ImportError:
threading = None
try:
import fcntl
except ImportError:
fcntl = None
__metaclass__ = type
bytes = support.py3k_bytes
def _default_chunk_size():
"""Get the default TextIOWrapper chunk size"""
with io.open(__file__, "r", encoding="latin1") as f:
return f._CHUNK_SIZE
class MockRawIOWithoutRead:
"""A RawIO implementation without read(), so as to exercise the default
RawIO.read() which calls readinto()."""
def __init__(self, read_stack=()):
self._read_stack = list(read_stack)
self._write_stack = []
self._reads = 0
self._extraneous_reads = 0
def write(self, b):
self._write_stack.append(bytes(b))
return len(b)
def writable(self):
return True
def fileno(self):
return 42
def readable(self):
return True
def seekable(self):
return True
def seek(self, pos, whence):
return 0 # wrong but we gotta return something
def tell(self):
return 0 # same comment as above
def readinto(self, buf):
self._reads += 1
max_len = len(buf)
try:
data = self._read_stack[0]
except IndexError:
self._extraneous_reads += 1
return 0
if data is None:
del self._read_stack[0]
return None
n = len(data)
if len(data) <= max_len:
del self._read_stack[0]
buf[:n] = data
return n
else:
buf[:] = data[:max_len]
self._read_stack[0] = data[max_len:]
return max_len
def truncate(self, pos=None):
return pos
class CMockRawIOWithoutRead(MockRawIOWithoutRead, io.RawIOBase):
pass
class PyMockRawIOWithoutRead(MockRawIOWithoutRead, pyio.RawIOBase):
pass
class MockRawIO(MockRawIOWithoutRead):
def read(self, n=None):
self._reads += 1
try:
return self._read_stack.pop(0)
except:
self._extraneous_reads += 1
return b""
class CMockRawIO(MockRawIO, io.RawIOBase):
pass
class PyMockRawIO(MockRawIO, pyio.RawIOBase):
pass
class MisbehavedRawIO(MockRawIO):
def write(self, b):
return MockRawIO.write(self, b) * 2
def read(self, n=None):
return MockRawIO.read(self, n) * 2
def seek(self, pos, whence):
return -123
def tell(self):
return -456
def readinto(self, buf):
MockRawIO.readinto(self, buf)
return len(buf) * 5
class CMisbehavedRawIO(MisbehavedRawIO, io.RawIOBase):
pass
class PyMisbehavedRawIO(MisbehavedRawIO, pyio.RawIOBase):
pass
class CloseFailureIO(MockRawIO):
closed = 0
def close(self):
if not self.closed:
self.closed = 1
raise IOError
class CCloseFailureIO(CloseFailureIO, io.RawIOBase):
pass
class PyCloseFailureIO(CloseFailureIO, pyio.RawIOBase):
pass
class MockFileIO:
def __init__(self, data):
self.read_history = []
super(MockFileIO, self).__init__(data)
def read(self, n=None):
res = super(MockFileIO, self).read(n)
self.read_history.append(None if res is None else len(res))
return res
def readinto(self, b):
res = super(MockFileIO, self).readinto(b)
self.read_history.append(res)
return res
class CMockFileIO(MockFileIO, io.BytesIO):
pass
class PyMockFileIO(MockFileIO, pyio.BytesIO):
pass
class MockNonBlockWriterIO:
def __init__(self):
self._write_stack = []
self._blocker_char = None
def pop_written(self):
s = b"".join(self._write_stack)
self._write_stack[:] = []
return s
def block_on(self, char):
"""Block when a given char is encountered."""
self._blocker_char = char
def readable(self):
return True
def seekable(self):
return True
def writable(self):
return True
def write(self, b):
b = bytes(b)
n = -1
if self._blocker_char:
try:
n = b.index(self._blocker_char)
except ValueError:
pass
else:
if n > 0:
# write data up to the first blocker
self._write_stack.append(b[:n])
return n
else:
# cancel blocker and indicate would block
self._blocker_char = None
return None
self._write_stack.append(b)
return len(b)
class CMockNonBlockWriterIO(MockNonBlockWriterIO, io.RawIOBase):
BlockingIOError = io.BlockingIOError
class PyMockNonBlockWriterIO(MockNonBlockWriterIO, pyio.RawIOBase):
BlockingIOError = pyio.BlockingIOError
class IOTest(unittest.TestCase):
def setUp(self):
support.unlink(support.TESTFN)
def tearDown(self):
support.unlink(support.TESTFN)
def write_ops(self, f):
self.assertEqual(f.write(b"blah."), 5)
f.truncate(0)
self.assertEqual(f.tell(), 5)
f.seek(0)
self.assertEqual(f.write(b"blah."), 5)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(b"Hello."), 6)
self.assertEqual(f.tell(), 6)
self.assertEqual(f.seek(-1, 1), 5)
self.assertEqual(f.tell(), 5)
self.assertEqual(f.write(bytearray(b" world\n\n\n")), 9)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(b"h"), 1)
self.assertEqual(f.seek(-1, 2), 13)
self.assertEqual(f.tell(), 13)
self.assertEqual(f.truncate(12), 12)
self.assertEqual(f.tell(), 13)
self.assertRaises(TypeError, f.seek, 0.0)
def read_ops(self, f, buffered=False):
data = f.read(5)
self.assertEqual(data, b"hello")
data = bytearray(data)
self.assertEqual(f.readinto(data), 5)
self.assertEqual(data, b" worl")
self.assertEqual(f.readinto(data), 2)
self.assertEqual(len(data), 5)
self.assertEqual(data[:2], b"d\n")
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.read(20), b"hello world\n")
self.assertEqual(f.read(1), b"")
self.assertEqual(f.readinto(bytearray(b"x")), 0)
self.assertEqual(f.seek(-6, 2), 6)
self.assertEqual(f.read(5), b"world")
self.assertEqual(f.read(0), b"")
self.assertEqual(f.readinto(bytearray()), 0)
self.assertEqual(f.seek(-6, 1), 5)
self.assertEqual(f.read(5), b" worl")
self.assertEqual(f.tell(), 10)
self.assertRaises(TypeError, f.seek, 0.0)
if buffered:
f.seek(0)
self.assertEqual(f.read(), b"hello world\n")
f.seek(6)
self.assertEqual(f.read(), b"world\n")
self.assertEqual(f.read(), b"")
LARGE = 2**31
def large_file_ops(self, f):
assert f.readable()
assert f.writable()
self.assertEqual(f.seek(self.LARGE), self.LARGE)
self.assertEqual(f.tell(), self.LARGE)
self.assertEqual(f.write(b"xxx"), 3)
self.assertEqual(f.tell(), self.LARGE + 3)
self.assertEqual(f.seek(-1, 1), self.LARGE + 2)
self.assertEqual(f.truncate(), self.LARGE + 2)
self.assertEqual(f.tell(), self.LARGE + 2)
self.assertEqual(f.seek(0, 2), self.LARGE + 2)
self.assertEqual(f.truncate(self.LARGE + 1), self.LARGE + 1)
self.assertEqual(f.tell(), self.LARGE + 2)
self.assertEqual(f.seek(0, 2), self.LARGE + 1)
self.assertEqual(f.seek(-1, 2), self.LARGE)
self.assertEqual(f.read(2), b"x")
def test_invalid_operations(self):
# Try writing on a file opened in read mode and vice-versa.
for mode in ("w", "wb"):
with self.open(support.TESTFN, mode) as fp:
self.assertRaises(IOError, fp.read)
self.assertRaises(IOError, fp.readline)
with self.open(support.TESTFN, "rb") as fp:
self.assertRaises(IOError, fp.write, b"blah")
self.assertRaises(IOError, fp.writelines, [b"blah\n"])
with self.open(support.TESTFN, "r") as fp:
self.assertRaises(IOError, fp.write, "blah")
self.assertRaises(IOError, fp.writelines, ["blah\n"])
def test_raw_file_io(self):
with self.open(support.TESTFN, "wb", buffering=0) as f:
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.write_ops(f)
with self.open(support.TESTFN, "rb", buffering=0) as f:
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
self.read_ops(f)
def test_buffered_file_io(self):
with self.open(support.TESTFN, "wb") as f:
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.write_ops(f)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
self.read_ops(f, True)
def test_readline(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"abc\ndef\nxyzzy\nfoo\x00bar\nanother line")
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.readline(), b"abc\n")
self.assertEqual(f.readline(10), b"def\n")
self.assertEqual(f.readline(2), b"xy")
self.assertEqual(f.readline(4), b"zzy\n")
self.assertEqual(f.readline(), b"foo\x00bar\n")
self.assertEqual(f.readline(None), b"another line")
self.assertRaises(TypeError, f.readline, 5.3)
with self.open(support.TESTFN, "r") as f:
self.assertRaises(TypeError, f.readline, 5.3)
def test_raw_bytes_io(self):
f = self.BytesIO()
self.write_ops(f)
data = f.getvalue()
self.assertEqual(data, b"hello world\n")
f = self.BytesIO(data)
self.read_ops(f, True)
def test_large_file_ops(self):
# On Windows and Mac OSX this test comsumes large resources; It takes
# a long time to build the >2GB file and takes >2GB of disk space
# therefore the resource must be enabled to run this test.
if sys.platform[:3] == 'win' or sys.platform == 'darwin':
if not support.is_resource_enabled("largefile"):
print("\nTesting large file ops skipped on %s." % sys.platform,
file=sys.stderr)
print("It requires %d bytes and a long time." % self.LARGE,
file=sys.stderr)
print("Use 'regrtest.py -u largefile test_io' to run it.",
file=sys.stderr)
return
with self.open(support.TESTFN, "w+b", 0) as f:
self.large_file_ops(f)
with self.open(support.TESTFN, "w+b") as f:
self.large_file_ops(f)
def test_with_open(self):
for bufsize in (0, 1, 100):
f = None
with self.open(support.TESTFN, "wb", bufsize) as f:
f.write(b"xxx")
self.assertEqual(f.closed, True)
f = None
try:
with self.open(support.TESTFN, "wb", bufsize) as f:
1 // 0
except ZeroDivisionError:
self.assertEqual(f.closed, True)
else:
self.fail("1 // 0 didn't raise an exception")
# issue 5008
def test_append_mode_tell(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"xxx")
with self.open(support.TESTFN, "ab", buffering=0) as f:
self.assertEqual(f.tell(), 3)
with self.open(support.TESTFN, "ab") as f:
self.assertEqual(f.tell(), 3)
with self.open(support.TESTFN, "a") as f:
self.assertTrue(f.tell() > 0)
def test_destructor(self):
record = []
class MyFileIO(self.FileIO):
def __del__(self):
record.append(1)
try:
f = super(MyFileIO, self).__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super(MyFileIO, self).close()
def flush(self):
record.append(3)
super(MyFileIO, self).flush()
f = MyFileIO(support.TESTFN, "wb")
f.write(b"xxx")
del f
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"xxx")
def _check_base_destructor(self, base):
record = []
class MyIO(base):
def __init__(self):
# This exercises the availability of attributes on object
# destruction.
# (in the C version, close() is called by the tp_dealloc
# function, not by __del__)
self.on_del = 1
self.on_close = 2
self.on_flush = 3
def __del__(self):
record.append(self.on_del)
try:
f = super(MyIO, self).__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(self.on_close)
super(MyIO, self).close()
def flush(self):
record.append(self.on_flush)
super(MyIO, self).flush()
f = MyIO()
del f
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_IOBase_destructor(self):
self._check_base_destructor(self.IOBase)
def test_RawIOBase_destructor(self):
self._check_base_destructor(self.RawIOBase)
def test_BufferedIOBase_destructor(self):
self._check_base_destructor(self.BufferedIOBase)
def test_TextIOBase_destructor(self):
self._check_base_destructor(self.TextIOBase)
def test_close_flushes(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"xxx")
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"xxx")
def test_array_writes(self):
a = array.array(b'i', range(10))
n = len(a.tostring())
with self.open(support.TESTFN, "wb", 0) as f:
self.assertEqual(f.write(a), n)
with self.open(support.TESTFN, "wb") as f:
self.assertEqual(f.write(a), n)
def test_closefd(self):
self.assertRaises(ValueError, self.open, support.TESTFN, 'w',
closefd=False)
def test_read_closed(self):
with self.open(support.TESTFN, "w") as f:
f.write("egg\n")
with self.open(support.TESTFN, "r") as f:
file = self.open(f.fileno(), "r", closefd=False)
self.assertEqual(file.read(), "egg\n")
file.seek(0)
file.close()
self.assertRaises(ValueError, file.read)
def test_no_closefd_with_filename(self):
# can't use closefd in combination with a file name
self.assertRaises(ValueError, self.open, support.TESTFN, "r", closefd=False)
def test_closefd_attr(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"egg\n")
with self.open(support.TESTFN, "r") as f:
self.assertEqual(f.buffer.raw.closefd, True)
file = self.open(f.fileno(), "r", closefd=False)
self.assertEqual(file.buffer.raw.closefd, False)
def test_garbage_collection(self):
# FileIO objects are collected, and collecting them flushes
# all data to disk.
f = self.FileIO(support.TESTFN, "wb")
f.write(b"abcxxx")
f.f = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertTrue(wr() is None, wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"abcxxx")
def test_unbounded_file(self):
# Issue #1174606: reading from an unbounded stream such as /dev/zero.
zero = "/dev/zero"
if not os.path.exists(zero):
self.skipTest("{0} does not exist".format(zero))
if sys.maxsize > 0x7FFFFFFF:
self.skipTest("test can only run in a 32-bit address space")
if support.real_max_memuse < support._2G:
self.skipTest("test requires at least 2GB of memory")
with self.open(zero, "rb", buffering=0) as f:
self.assertRaises(OverflowError, f.read)
with self.open(zero, "rb") as f:
self.assertRaises(OverflowError, f.read)
with self.open(zero, "r") as f:
self.assertRaises(OverflowError, f.read)
def test_flush_error_on_close(self):
f = self.open(support.TESTFN, "wb", buffering=0)
def bad_flush():
raise IOError()
f.flush = bad_flush
self.assertRaises(IOError, f.close) # exception not swallowed
self.assertTrue(f.closed)
def test_multi_close(self):
f = self.open(support.TESTFN, "wb", buffering=0)
f.close()
f.close()
f.close()
self.assertRaises(ValueError, f.flush)
def test_RawIOBase_read(self):
# Exercise the default RawIOBase.read() implementation (which calls
# readinto() internally).
rawio = self.MockRawIOWithoutRead((b"abc", b"d", None, b"efg", None))
self.assertEqual(rawio.read(2), b"ab")
self.assertEqual(rawio.read(2), b"c")
self.assertEqual(rawio.read(2), b"d")
self.assertEqual(rawio.read(2), None)
self.assertEqual(rawio.read(2), b"ef")
self.assertEqual(rawio.read(2), b"g")
self.assertEqual(rawio.read(2), None)
self.assertEqual(rawio.read(2), b"")
def test_fileio_closefd(self):
# Issue #4841
with self.open(__file__, 'rb') as f1, \
self.open(__file__, 'rb') as f2:
fileio = self.FileIO(f1.fileno(), closefd=False)
# .__init__() must not close f1
fileio.__init__(f2.fileno(), closefd=False)
f1.readline()
# .close() must not close f2
fileio.close()
f2.readline()
class CIOTest(IOTest):
def test_IOBase_finalize(self):
# Issue #12149: segmentation fault on _PyIOBase_finalize when both a
# class which inherits IOBase and an object of this class are caught
# in a reference cycle and close() is already in the method cache.
class MyIO(self.IOBase):
def close(self):
pass
# create an instance to populate the method cache
MyIO()
obj = MyIO()
obj.obj = obj
wr = weakref.ref(obj)
del MyIO
del obj
support.gc_collect()
self.assertTrue(wr() is None, wr)
class PyIOTest(IOTest):
test_array_writes = unittest.skip(
"len(array.array) returns number of elements rather than bytelength"
)(IOTest.test_array_writes)
class CommonBufferedTests:
# Tests common to BufferedReader, BufferedWriter and BufferedRandom
def test_detach(self):
raw = self.MockRawIO()
buf = self.tp(raw)
self.assertIs(buf.detach(), raw)
self.assertRaises(ValueError, buf.detach)
def test_fileno(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertEqual(42, bufio.fileno())
def test_no_fileno(self):
# XXX will we always have fileno() function? If so, kill
# this test. Else, write it.
pass
def test_invalid_args(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
# Invalid whence
self.assertRaises(ValueError, bufio.seek, 0, -1)
self.assertRaises(ValueError, bufio.seek, 0, 3)
def test_override_destructor(self):
tp = self.tp
record = []
class MyBufferedIO(tp):
def __del__(self):
record.append(1)
try:
f = super(MyBufferedIO, self).__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super(MyBufferedIO, self).close()
def flush(self):
record.append(3)
super(MyBufferedIO, self).flush()
rawio = self.MockRawIO()
bufio = MyBufferedIO(rawio)
writable = bufio.writable()
del bufio
support.gc_collect()
if writable:
self.assertEqual(record, [1, 2, 3])
else:
self.assertEqual(record, [1, 2])
def test_context_manager(self):
# Test usability as a context manager
rawio = self.MockRawIO()
bufio = self.tp(rawio)
def _with():
with bufio:
pass
_with()
# bufio should now be closed, and using it a second time should raise
# a ValueError.
self.assertRaises(ValueError, _with)
def test_error_through_destructor(self):
# Test that the exception state is not modified by a destructor,
# even if close() fails.
rawio = self.CloseFailureIO()
def f():
self.tp(rawio).xyzzy
with support.captured_output("stderr") as s:
self.assertRaises(AttributeError, f)
s = s.getvalue().strip()
if s:
# The destructor *may* have printed an unraisable error, check it
self.assertEqual(len(s.splitlines()), 1)
self.assertTrue(s.startswith("Exception IOError: "), s)
self.assertTrue(s.endswith(" ignored"), s)
def test_repr(self):
raw = self.MockRawIO()
b = self.tp(raw)
clsname = "%s.%s" % (self.tp.__module__, self.tp.__name__)
self.assertEqual(repr(b), "<%s>" % clsname)
raw.name = "dummy"
self.assertEqual(repr(b), "<%s name=u'dummy'>" % clsname)
raw.name = b"dummy"
self.assertEqual(repr(b), "<%s name='dummy'>" % clsname)
def test_flush_error_on_close(self):
raw = self.MockRawIO()
def bad_flush():
raise IOError()
raw.flush = bad_flush
b = self.tp(raw)
self.assertRaises(IOError, b.close) # exception not swallowed
self.assertTrue(b.closed)
def test_close_error_on_close(self):
raw = self.MockRawIO()
def bad_flush():
raise IOError('flush')
def bad_close():
raise IOError('close')
raw.close = bad_close
b = self.tp(raw)
b.flush = bad_flush
with self.assertRaises(IOError) as err: # exception not swallowed
b.close()
self.assertEqual(err.exception.args, ('close',))
self.assertFalse(b.closed)
def test_multi_close(self):
raw = self.MockRawIO()
b = self.tp(raw)
b.close()
b.close()
b.close()
self.assertRaises(ValueError, b.flush)
def test_readonly_attributes(self):
raw = self.MockRawIO()
buf = self.tp(raw)
x = self.MockRawIO()
with self.assertRaises((AttributeError, TypeError)):
buf.raw = x
class SizeofTest:
@support.cpython_only
def test_sizeof(self):
bufsize1 = 4096
bufsize2 = 8192
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize1)
size = sys.getsizeof(bufio) - bufsize1
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize2)
self.assertEqual(sys.getsizeof(bufio), size + bufsize2)
class BufferedReaderTest(unittest.TestCase, CommonBufferedTests):
read_mode = "rb"
def test_constructor(self):
rawio = self.MockRawIO([b"abc"])
bufio = self.tp(rawio)
bufio.__init__(rawio)
bufio.__init__(rawio, buffer_size=1024)
bufio.__init__(rawio, buffer_size=16)
self.assertEqual(b"abc", bufio.read())
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
rawio = self.MockRawIO([b"abc"])
bufio.__init__(rawio)
self.assertEqual(b"abc", bufio.read())
def test_read(self):
for arg in (None, 7):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read(arg))
# Invalid args
self.assertRaises(ValueError, bufio.read, -2)
def test_read1(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"a", bufio.read(1))
self.assertEqual(b"b", bufio.read1(1))
self.assertEqual(rawio._reads, 1)
self.assertEqual(b"c", bufio.read1(100))
self.assertEqual(rawio._reads, 1)
self.assertEqual(b"d", bufio.read1(100))
self.assertEqual(rawio._reads, 2)
self.assertEqual(b"efg", bufio.read1(100))
self.assertEqual(rawio._reads, 3)
self.assertEqual(b"", bufio.read1(100))
self.assertEqual(rawio._reads, 4)
# Invalid args
self.assertRaises(ValueError, bufio.read1, -1)
def test_readinto(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
b = bytearray(2)
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ab")
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"cd")
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ef")
self.assertEqual(bufio.readinto(b), 1)
self.assertEqual(b, b"gf")
self.assertEqual(bufio.readinto(b), 0)
self.assertEqual(b, b"gf")
def test_readlines(self):
def bufio():
rawio = self.MockRawIO((b"abc\n", b"d\n", b"ef"))
return self.tp(rawio)
self.assertEqual(bufio().readlines(), [b"abc\n", b"d\n", b"ef"])
self.assertEqual(bufio().readlines(5), [b"abc\n", b"d\n"])
self.assertEqual(bufio().readlines(None), [b"abc\n", b"d\n", b"ef"])
def test_buffering(self):
data = b"abcdefghi"
dlen = len(data)
tests = [
[ 100, [ 3, 1, 4, 8 ], [ dlen, 0 ] ],
[ 100, [ 3, 3, 3], [ dlen ] ],
[ 4, [ 1, 2, 4, 2 ], [ 4, 4, 1 ] ],
]
for bufsize, buf_read_sizes, raw_read_sizes in tests:
rawio = self.MockFileIO(data)
bufio = self.tp(rawio, buffer_size=bufsize)
pos = 0
for nbytes in buf_read_sizes:
self.assertEqual(bufio.read(nbytes), data[pos:pos+nbytes])
pos += nbytes
# this is mildly implementation-dependent
self.assertEqual(rawio.read_history, raw_read_sizes)
def test_read_non_blocking(self):
# Inject some None's in there to simulate EWOULDBLOCK
rawio = self.MockRawIO((b"abc", b"d", None, b"efg", None, None, None))
bufio = self.tp(rawio)
self.assertEqual(b"abcd", bufio.read(6))
self.assertEqual(b"e", bufio.read(1))
self.assertEqual(b"fg", bufio.read())
self.assertEqual(b"", bufio.peek(1))
self.assertIsNone(bufio.read())
self.assertEqual(b"", bufio.read())
rawio = self.MockRawIO((b"a", None, None))
self.assertEqual(b"a", rawio.readall())
self.assertIsNone(rawio.readall())
def test_read_past_eof(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read(9000))
def test_read_all(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read())
@unittest.skipUnless(threading, 'Threading required for this test.')
@support.requires_resource('cpu')
def test_threads(self):
try:
# Write out many bytes with exactly the same number of 0's,
# 1's... 255's. This will help us check that concurrent reading
# doesn't duplicate or forget contents.
N = 1000
l = list(range(256)) * N
random.shuffle(l)
s = bytes(bytearray(l))
with self.open(support.TESTFN, "wb") as f:
f.write(s)
with self.open(support.TESTFN, self.read_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
errors = []
results = []
def f():
try:
# Intra-buffer read then buffer-flushing read
for n in cycle([1, 19]):
s = bufio.read(n)
if not s:
break
# list.append() is atomic
results.append(s)
except Exception as e:
errors.append(e)
raise
threads = [threading.Thread(target=f) for x in range(20)]
for t in threads:
t.start()
time.sleep(0.02) # yield
for t in threads:
t.join()
self.assertFalse(errors,
"the following exceptions were caught: %r" % errors)
s = b''.join(results)
for i in range(256):
c = bytes(bytearray([i]))
self.assertEqual(s.count(c), N)
finally:
support.unlink(support.TESTFN)
def test_misbehaved_io(self):
rawio = self.MisbehavedRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertRaises(IOError, bufio.seek, 0)
self.assertRaises(IOError, bufio.tell)
def test_no_extraneous_read(self):
# Issue #9550; when the raw IO object has satisfied the read request,
# we should not issue any additional reads, otherwise it may block
# (e.g. socket).
bufsize = 16
for n in (2, bufsize - 1, bufsize, bufsize + 1, bufsize * 2):
rawio = self.MockRawIO([b"x" * n])
bufio = self.tp(rawio, bufsize)
self.assertEqual(bufio.read(n), b"x" * n)
# Simple case: one raw read is enough to satisfy the request.
self.assertEqual(rawio._extraneous_reads, 0,
"failed for {}: {} != 0".format(n, rawio._extraneous_reads))
# A more complex case where two raw reads are needed to satisfy
# the request.
rawio = self.MockRawIO([b"x" * (n - 1), b"x"])
bufio = self.tp(rawio, bufsize)
self.assertEqual(bufio.read(n), b"x" * n)
self.assertEqual(rawio._extraneous_reads, 0,
"failed for {}: {} != 0".format(n, rawio._extraneous_reads))
class CBufferedReaderTest(BufferedReaderTest, SizeofTest):
tp = io.BufferedReader
def test_constructor(self):
BufferedReaderTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2GB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_initialization(self):
rawio = self.MockRawIO([b"abc"])
bufio = self.tp(rawio)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.read)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.read)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
self.assertRaises(ValueError, bufio.read)
def test_misbehaved_io_read(self):
rawio = self.MisbehavedRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
# _pyio.BufferedReader seems to implement reading different, so that
# checking this is not so easy.
self.assertRaises(IOError, bufio.read, 10)
def test_garbage_collection(self):
# C BufferedReader objects are collected.
# The Python version has __del__, so it ends into gc.garbage instead
rawio = self.FileIO(support.TESTFN, "w+b")
f = self.tp(rawio)
f.f = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertTrue(wr() is None, wr)
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegexp(TypeError, "BufferedReader"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedReaderTest(BufferedReaderTest):
tp = pyio.BufferedReader
class BufferedWriterTest(unittest.TestCase, CommonBufferedTests):
write_mode = "wb"
def test_constructor(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
bufio.__init__(rawio)
bufio.__init__(rawio, buffer_size=1024)
bufio.__init__(rawio, buffer_size=16)
self.assertEqual(3, bufio.write(b"abc"))
bufio.flush()
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
bufio.__init__(rawio)
self.assertEqual(3, bufio.write(b"ghi"))
bufio.flush()
self.assertEqual(b"".join(rawio._write_stack), b"abcghi")
def test_detach_flush(self):
raw = self.MockRawIO()
buf = self.tp(raw)
buf.write(b"howdy!")
self.assertFalse(raw._write_stack)
buf.detach()
self.assertEqual(raw._write_stack, [b"howdy!"])
def test_write(self):
# Write to the buffered IO but don't overflow the buffer.
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
self.assertFalse(writer._write_stack)
def test_write_overflow(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
contents = b"abcdefghijklmnop"
for n in range(0, len(contents), 3):
bufio.write(contents[n:n+3])
flushed = b"".join(writer._write_stack)
# At least (total - 8) bytes were implicitly flushed, perhaps more
# depending on the implementation.
self.assertTrue(flushed.startswith(contents[:-8]), flushed)
def check_writes(self, intermediate_func):
# Lots of writes, test the flushed output is as expected.
contents = bytes(range(256)) * 1000
n = 0
writer = self.MockRawIO()
bufio = self.tp(writer, 13)
# Generator of write sizes: repeat each N 15 times then proceed to N+1
def gen_sizes():
for size in count(1):
for i in range(15):
yield size
sizes = gen_sizes()
while n < len(contents):
size = min(next(sizes), len(contents) - n)
self.assertEqual(bufio.write(contents[n:n+size]), size)
intermediate_func(bufio)
n += size
bufio.flush()
self.assertEqual(contents,
b"".join(writer._write_stack))
def test_writes(self):
self.check_writes(lambda bufio: None)
def test_writes_and_flushes(self):
self.check_writes(lambda bufio: bufio.flush())
def test_writes_and_seeks(self):
def _seekabs(bufio):
pos = bufio.tell()
bufio.seek(pos + 1, 0)
bufio.seek(pos - 1, 0)
bufio.seek(pos, 0)
self.check_writes(_seekabs)
def _seekrel(bufio):
pos = bufio.seek(0, 1)
bufio.seek(+1, 1)
bufio.seek(-1, 1)
bufio.seek(pos, 0)
self.check_writes(_seekrel)
def test_writes_and_truncates(self):
self.check_writes(lambda bufio: bufio.truncate(bufio.tell()))
def test_write_non_blocking(self):
raw = self.MockNonBlockWriterIO()
bufio = self.tp(raw, 8)
self.assertEqual(bufio.write(b"abcd"), 4)
self.assertEqual(bufio.write(b"efghi"), 5)
# 1 byte will be written, the rest will be buffered
raw.block_on(b"k")
self.assertEqual(bufio.write(b"jklmn"), 5)
# 8 bytes will be written, 8 will be buffered and the rest will be lost
raw.block_on(b"0")
try:
bufio.write(b"opqrwxyz0123456789")
except self.BlockingIOError as e:
written = e.characters_written
else:
self.fail("BlockingIOError should have been raised")
self.assertEqual(written, 16)
self.assertEqual(raw.pop_written(),
b"abcdefghijklmnopqrwxyz")
self.assertEqual(bufio.write(b"ABCDEFGHI"), 9)
s = raw.pop_written()
# Previously buffered bytes were flushed
self.assertTrue(s.startswith(b"01234567A"), s)
def test_write_and_rewind(self):
raw = io.BytesIO()
bufio = self.tp(raw, 4)
self.assertEqual(bufio.write(b"abcdef"), 6)
self.assertEqual(bufio.tell(), 6)
bufio.seek(0, 0)
self.assertEqual(bufio.write(b"XY"), 2)
bufio.seek(6, 0)
self.assertEqual(raw.getvalue(), b"XYcdef")
self.assertEqual(bufio.write(b"123456"), 6)
bufio.flush()
self.assertEqual(raw.getvalue(), b"XYcdef123456")
def test_flush(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
bufio.flush()
self.assertEqual(b"abc", writer._write_stack[0])
def test_writelines(self):
l = [b'ab', b'cd', b'ef']
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.writelines(l)
bufio.flush()
self.assertEqual(b''.join(writer._write_stack), b'abcdef')
def test_writelines_userlist(self):
l = UserList([b'ab', b'cd', b'ef'])
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.writelines(l)
bufio.flush()
self.assertEqual(b''.join(writer._write_stack), b'abcdef')
def test_writelines_error(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
self.assertRaises(TypeError, bufio.writelines, [1, 2, 3])
self.assertRaises(TypeError, bufio.writelines, None)
def test_destructor(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
del bufio
support.gc_collect()
self.assertEqual(b"abc", writer._write_stack[0])
def test_truncate(self):
# Truncate implicitly flushes the buffer.
with self.open(support.TESTFN, self.write_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
bufio.write(b"abcdef")
self.assertEqual(bufio.truncate(3), 3)
self.assertEqual(bufio.tell(), 6)
with self.open(support.TESTFN, "rb", buffering=0) as f:
self.assertEqual(f.read(), b"abc")
@unittest.skipUnless(threading, 'Threading required for this test.')
@support.requires_resource('cpu')
def test_threads(self):
try:
# Write out many bytes from many threads and test they were
# all flushed.
N = 1000
contents = bytes(range(256)) * N
sizes = cycle([1, 19])
n = 0
queue = deque()
while n < len(contents):
size = next(sizes)
queue.append(contents[n:n+size])
n += size
del contents
# We use a real file object because it allows us to
# exercise situations where the GIL is released before
# writing the buffer to the raw streams. This is in addition
# to concurrency issues due to switching threads in the middle
# of Python code.
with self.open(support.TESTFN, self.write_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
errors = []
def f():
try:
while True:
try:
s = queue.popleft()
except IndexError:
return
bufio.write(s)
except Exception as e:
errors.append(e)
raise
threads = [threading.Thread(target=f) for x in range(20)]
for t in threads:
t.start()
time.sleep(0.02) # yield
for t in threads:
t.join()
self.assertFalse(errors,
"the following exceptions were caught: %r" % errors)
bufio.close()
with self.open(support.TESTFN, "rb") as f:
s = f.read()
for i in range(256):
self.assertEqual(s.count(bytes([i])), N)
finally:
support.unlink(support.TESTFN)
def test_misbehaved_io(self):
rawio = self.MisbehavedRawIO()
bufio = self.tp(rawio, 5)
self.assertRaises(IOError, bufio.seek, 0)
self.assertRaises(IOError, bufio.tell)
self.assertRaises(IOError, bufio.write, b"abcdef")
def test_max_buffer_size_deprecation(self):
with support.check_warnings(("max_buffer_size is deprecated",
DeprecationWarning)):
self.tp(self.MockRawIO(), 8, 12)
def test_write_error_on_close(self):
raw = self.MockRawIO()
def bad_write(b):
raise IOError()
raw.write = bad_write
b = self.tp(raw)
b.write(b'spam')
self.assertRaises(IOError, b.close) # exception not swallowed
self.assertTrue(b.closed)
class CBufferedWriterTest(BufferedWriterTest, SizeofTest):
tp = io.BufferedWriter
def test_constructor(self):
BufferedWriterTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2GB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_initialization(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.write, b"def")
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.write, b"def")
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
self.assertRaises(ValueError, bufio.write, b"def")
def test_garbage_collection(self):
# C BufferedWriter objects are collected, and collecting them flushes
# all data to disk.
# The Python version has __del__, so it ends into gc.garbage instead
rawio = self.FileIO(support.TESTFN, "w+b")
f = self.tp(rawio)
f.write(b"123xxx")
f.x = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertTrue(wr() is None, wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"123xxx")
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegexp(TypeError, "BufferedWriter"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedWriterTest(BufferedWriterTest):
tp = pyio.BufferedWriter
class BufferedRWPairTest(unittest.TestCase):
def test_constructor(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.closed)
def test_detach(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertRaises(self.UnsupportedOperation, pair.detach)
def test_constructor_max_buffer_size_deprecation(self):
with support.check_warnings(("max_buffer_size is deprecated",
DeprecationWarning)):
self.tp(self.MockRawIO(), self.MockRawIO(), 8, 12)
def test_constructor_with_not_readable(self):
class NotReadable(MockRawIO):
def readable(self):
return False
self.assertRaises(IOError, self.tp, NotReadable(), self.MockRawIO())
def test_constructor_with_not_writeable(self):
class NotWriteable(MockRawIO):
def writable(self):
return False
self.assertRaises(IOError, self.tp, self.MockRawIO(), NotWriteable())
def test_read(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertEqual(pair.read(3), b"abc")
self.assertEqual(pair.read(1), b"d")
self.assertEqual(pair.read(), b"ef")
pair = self.tp(self.BytesIO(b"abc"), self.MockRawIO())
self.assertEqual(pair.read(None), b"abc")
def test_readlines(self):
pair = lambda: self.tp(self.BytesIO(b"abc\ndef\nh"), self.MockRawIO())
self.assertEqual(pair().readlines(), [b"abc\n", b"def\n", b"h"])
self.assertEqual(pair().readlines(), [b"abc\n", b"def\n", b"h"])
self.assertEqual(pair().readlines(5), [b"abc\n", b"def\n"])
def test_read1(self):
# .read1() is delegated to the underlying reader object, so this test
# can be shallow.
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertEqual(pair.read1(3), b"abc")
def test_readinto(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
data = bytearray(5)
self.assertEqual(pair.readinto(data), 5)
self.assertEqual(data, b"abcde")
def test_write(self):
w = self.MockRawIO()
pair = self.tp(self.MockRawIO(), w)
pair.write(b"abc")
pair.flush()
pair.write(b"def")
pair.flush()
self.assertEqual(w._write_stack, [b"abc", b"def"])
def test_peek(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertTrue(pair.peek(3).startswith(b"abc"))
self.assertEqual(pair.read(3), b"abc")
def test_readable(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertTrue(pair.readable())
def test_writeable(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertTrue(pair.writable())
def test_seekable(self):
# BufferedRWPairs are never seekable, even if their readers and writers
# are.
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.seekable())
# .flush() is delegated to the underlying writer object and has been
# tested in the test_write method.
def test_close_and_closed(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.closed)
pair.close()
self.assertTrue(pair.closed)
def test_isatty(self):
class SelectableIsAtty(MockRawIO):
def __init__(self, isatty):
MockRawIO.__init__(self)
self._isatty = isatty
def isatty(self):
return self._isatty
pair = self.tp(SelectableIsAtty(False), SelectableIsAtty(False))
self.assertFalse(pair.isatty())
pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(False))
self.assertTrue(pair.isatty())
pair = self.tp(SelectableIsAtty(False), SelectableIsAtty(True))
self.assertTrue(pair.isatty())
pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(True))
self.assertTrue(pair.isatty())
class CBufferedRWPairTest(BufferedRWPairTest):
tp = io.BufferedRWPair
class PyBufferedRWPairTest(BufferedRWPairTest):
tp = pyio.BufferedRWPair
class BufferedRandomTest(BufferedReaderTest, BufferedWriterTest):
read_mode = "rb+"
write_mode = "wb+"
def test_constructor(self):
BufferedReaderTest.test_constructor(self)
BufferedWriterTest.test_constructor(self)
def test_read_and_write(self):
raw = self.MockRawIO((b"asdf", b"ghjk"))
rw = self.tp(raw, 8)
self.assertEqual(b"as", rw.read(2))
rw.write(b"ddd")
rw.write(b"eee")
self.assertFalse(raw._write_stack) # Buffer writes
self.assertEqual(b"ghjk", rw.read())
self.assertEqual(b"dddeee", raw._write_stack[0])
def test_seek_and_tell(self):
raw = self.BytesIO(b"asdfghjkl")
rw = self.tp(raw)
self.assertEqual(b"as", rw.read(2))
self.assertEqual(2, rw.tell())
rw.seek(0, 0)
self.assertEqual(b"asdf", rw.read(4))
rw.write(b"123f")
rw.seek(0, 0)
self.assertEqual(b"asdf123fl", rw.read())
self.assertEqual(9, rw.tell())
rw.seek(-4, 2)
self.assertEqual(5, rw.tell())
rw.seek(2, 1)
self.assertEqual(7, rw.tell())
self.assertEqual(b"fl", rw.read(11))
rw.flush()
self.assertEqual(b"asdf123fl", raw.getvalue())
self.assertRaises(TypeError, rw.seek, 0.0)
def check_flush_and_read(self, read_func):
raw = self.BytesIO(b"abcdefghi")
bufio = self.tp(raw)
self.assertEqual(b"ab", read_func(bufio, 2))
bufio.write(b"12")
self.assertEqual(b"ef", read_func(bufio, 2))
self.assertEqual(6, bufio.tell())
bufio.flush()
self.assertEqual(6, bufio.tell())
self.assertEqual(b"ghi", read_func(bufio))
raw.seek(0, 0)
raw.write(b"XYZ")
# flush() resets the read buffer
bufio.flush()
bufio.seek(0, 0)
self.assertEqual(b"XYZ", read_func(bufio, 3))
def test_flush_and_read(self):
self.check_flush_and_read(lambda bufio, *args: bufio.read(*args))
def test_flush_and_readinto(self):
def _readinto(bufio, n=-1):
b = bytearray(n if n >= 0 else 9999)
n = bufio.readinto(b)
return bytes(b[:n])
self.check_flush_and_read(_readinto)
def test_flush_and_peek(self):
def _peek(bufio, n=-1):
# This relies on the fact that the buffer can contain the whole
# raw stream, otherwise peek() can return less.
b = bufio.peek(n)
if n != -1:
b = b[:n]
bufio.seek(len(b), 1)
return b
self.check_flush_and_read(_peek)
def test_flush_and_write(self):
raw = self.BytesIO(b"abcdefghi")
bufio = self.tp(raw)
bufio.write(b"123")
bufio.flush()
bufio.write(b"45")
bufio.flush()
bufio.seek(0, 0)
self.assertEqual(b"12345fghi", raw.getvalue())
self.assertEqual(b"12345fghi", bufio.read())
def test_threads(self):
BufferedReaderTest.test_threads(self)
BufferedWriterTest.test_threads(self)
def test_writes_and_peek(self):
def _peek(bufio):
bufio.peek(1)
self.check_writes(_peek)
def _peek(bufio):
pos = bufio.tell()
bufio.seek(-1, 1)
bufio.peek(1)
bufio.seek(pos, 0)
self.check_writes(_peek)
def test_writes_and_reads(self):
def _read(bufio):
bufio.seek(-1, 1)
bufio.read(1)
self.check_writes(_read)
def test_writes_and_read1s(self):
def _read1(bufio):
bufio.seek(-1, 1)
bufio.read1(1)
self.check_writes(_read1)
def test_writes_and_readintos(self):
def _read(bufio):
bufio.seek(-1, 1)
bufio.readinto(bytearray(1))
self.check_writes(_read)
def test_write_after_readahead(self):
# Issue #6629: writing after the buffer was filled by readahead should
# first rewind the raw stream.
for overwrite_size in [1, 5]:
raw = self.BytesIO(b"A" * 10)
bufio = self.tp(raw, 4)
# Trigger readahead
self.assertEqual(bufio.read(1), b"A")
self.assertEqual(bufio.tell(), 1)
# Overwriting should rewind the raw stream if it needs so
bufio.write(b"B" * overwrite_size)
self.assertEqual(bufio.tell(), overwrite_size + 1)
# If the write size was smaller than the buffer size, flush() and
# check that rewind happens.
bufio.flush()
self.assertEqual(bufio.tell(), overwrite_size + 1)
s = raw.getvalue()
self.assertEqual(s,
b"A" + b"B" * overwrite_size + b"A" * (9 - overwrite_size))
def test_write_rewind_write(self):
# Various combinations of reading / writing / seeking backwards / writing again
def mutate(bufio, pos1, pos2):
assert pos2 >= pos1
# Fill the buffer
bufio.seek(pos1)
bufio.read(pos2 - pos1)
bufio.write(b'\x02')
# This writes earlier than the previous write, but still inside
# the buffer.
bufio.seek(pos1)
bufio.write(b'\x01')
b = b"\x80\x81\x82\x83\x84"
for i in range(0, len(b)):
for j in range(i, len(b)):
raw = self.BytesIO(b)
bufio = self.tp(raw, 100)
mutate(bufio, i, j)
bufio.flush()
expected = bytearray(b)
expected[j] = 2
expected[i] = 1
self.assertEqual(raw.getvalue(), expected,
"failed result for i=%d, j=%d" % (i, j))
def test_truncate_after_read_or_write(self):
raw = self.BytesIO(b"A" * 10)
bufio = self.tp(raw, 100)
self.assertEqual(bufio.read(2), b"AA") # the read buffer gets filled
self.assertEqual(bufio.truncate(), 2)
self.assertEqual(bufio.write(b"BB"), 2) # the write buffer increases
self.assertEqual(bufio.truncate(), 4)
def test_misbehaved_io(self):
BufferedReaderTest.test_misbehaved_io(self)
BufferedWriterTest.test_misbehaved_io(self)
def test_interleaved_read_write(self):
# Test for issue #12213
with self.BytesIO(b'abcdefgh') as raw:
with self.tp(raw, 100) as f:
f.write(b"1")
self.assertEqual(f.read(1), b'b')
f.write(b'2')
self.assertEqual(f.read1(1), b'd')
f.write(b'3')
buf = bytearray(1)
f.readinto(buf)
self.assertEqual(buf, b'f')
f.write(b'4')
self.assertEqual(f.peek(1), b'h')
f.flush()
self.assertEqual(raw.getvalue(), b'1b2d3f4h')
with self.BytesIO(b'abc') as raw:
with self.tp(raw, 100) as f:
self.assertEqual(f.read(1), b'a')
f.write(b"2")
self.assertEqual(f.read(1), b'c')
f.flush()
self.assertEqual(raw.getvalue(), b'a2c')
def test_interleaved_readline_write(self):
with self.BytesIO(b'ab\ncdef\ng\n') as raw:
with self.tp(raw) as f:
f.write(b'1')
self.assertEqual(f.readline(), b'b\n')
f.write(b'2')
self.assertEqual(f.readline(), b'def\n')
f.write(b'3')
self.assertEqual(f.readline(), b'\n')
f.flush()
self.assertEqual(raw.getvalue(), b'1b\n2def\n3\n')
class CBufferedRandomTest(CBufferedReaderTest, CBufferedWriterTest,
BufferedRandomTest, SizeofTest):
tp = io.BufferedRandom
def test_constructor(self):
BufferedRandomTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2GB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_garbage_collection(self):
CBufferedReaderTest.test_garbage_collection(self)
CBufferedWriterTest.test_garbage_collection(self)
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegexp(TypeError, "BufferedRandom"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedRandomTest(BufferedRandomTest):
tp = pyio.BufferedRandom
# To fully exercise seek/tell, the StatefulIncrementalDecoder has these
# properties:
# - A single output character can correspond to many bytes of input.
# - The number of input bytes to complete the character can be
# undetermined until the last input byte is received.
# - The number of input bytes can vary depending on previous input.
# - A single input byte can correspond to many characters of output.
# - The number of output characters can be undetermined until the
# last input byte is received.
# - The number of output characters can vary depending on previous input.
class StatefulIncrementalDecoder(codecs.IncrementalDecoder):
"""
For testing seek/tell behavior with a stateful, buffering decoder.
Input is a sequence of words. Words may be fixed-length (length set
by input) or variable-length (period-terminated). In variable-length
mode, extra periods are ignored. Possible words are:
- 'i' followed by a number sets the input length, I (maximum 99).
When I is set to 0, words are space-terminated.
- 'o' followed by a number sets the output length, O (maximum 99).
- Any other word is converted into a word followed by a period on
the output. The output word consists of the input word truncated
or padded out with hyphens to make its length equal to O. If O
is 0, the word is output verbatim without truncating or padding.
I and O are initially set to 1. When I changes, any buffered input is
re-scanned according to the new I. EOF also terminates the last word.
"""
def __init__(self, errors='strict'):
codecs.IncrementalDecoder.__init__(self, errors)
self.reset()
def __repr__(self):
return '<SID %x>' % id(self)
def reset(self):
self.i = 1
self.o = 1
self.buffer = bytearray()
def getstate(self):
i, o = self.i ^ 1, self.o ^ 1 # so that flags = 0 after reset()
return bytes(self.buffer), i*100 + o
def setstate(self, state):
buffer, io = state
self.buffer = bytearray(buffer)
i, o = divmod(io, 100)
self.i, self.o = i ^ 1, o ^ 1
def decode(self, input, final=False):
output = ''
for b in input:
if self.i == 0: # variable-length, terminated with period
if b == '.':
if self.buffer:
output += self.process_word()
else:
self.buffer.append(b)
else: # fixed-length, terminate after self.i bytes
self.buffer.append(b)
if len(self.buffer) == self.i:
output += self.process_word()
if final and self.buffer: # EOF terminates the last word
output += self.process_word()
return output
def process_word(self):
output = ''
if self.buffer[0] == ord('i'):
self.i = min(99, int(self.buffer[1:] or 0)) # set input length
elif self.buffer[0] == ord('o'):
self.o = min(99, int(self.buffer[1:] or 0)) # set output length
else:
output = self.buffer.decode('ascii')
if len(output) < self.o:
output += '-'*self.o # pad out with hyphens
if self.o:
output = output[:self.o] # truncate to output length
output += '.'
self.buffer = bytearray()
return output
codecEnabled = False
@classmethod
def lookupTestDecoder(cls, name):
if cls.codecEnabled and name == 'test_decoder':
latin1 = codecs.lookup('latin-1')
return codecs.CodecInfo(
name='test_decoder', encode=latin1.encode, decode=None,
incrementalencoder=None,
streamreader=None, streamwriter=None,
incrementaldecoder=cls)
# Register the previous decoder for testing.
# Disabled by default, tests will enable it.
codecs.register(StatefulIncrementalDecoder.lookupTestDecoder)
class StatefulIncrementalDecoderTest(unittest.TestCase):
"""
Make sure the StatefulIncrementalDecoder actually works.
"""
test_cases = [
# I=1, O=1 (fixed-length input == fixed-length output)
(b'abcd', False, 'a.b.c.d.'),
# I=0, O=0 (variable-length input, variable-length output)
(b'oiabcd', True, 'abcd.'),
# I=0, O=0 (should ignore extra periods)
(b'oi...abcd...', True, 'abcd.'),
# I=0, O=6 (variable-length input, fixed-length output)
(b'i.o6.x.xyz.toolongtofit.', False, 'x-----.xyz---.toolon.'),
# I=2, O=6 (fixed-length input < fixed-length output)
(b'i.i2.o6xyz', True, 'xy----.z-----.'),
# I=6, O=3 (fixed-length input > fixed-length output)
(b'i.o3.i6.abcdefghijklmnop', True, 'abc.ghi.mno.'),
# I=0, then 3; O=29, then 15 (with longer output)
(b'i.o29.a.b.cde.o15.abcdefghijabcdefghij.i3.a.b.c.d.ei00k.l.m', True,
'a----------------------------.' +
'b----------------------------.' +
'cde--------------------------.' +
'abcdefghijabcde.' +
'a.b------------.' +
'.c.------------.' +
'd.e------------.' +
'k--------------.' +
'l--------------.' +
'm--------------.')
]
def test_decoder(self):
# Try a few one-shot test cases.
for input, eof, output in self.test_cases:
d = StatefulIncrementalDecoder()
self.assertEqual(d.decode(input, eof), output)
# Also test an unfinished decode, followed by forcing EOF.
d = StatefulIncrementalDecoder()
self.assertEqual(d.decode(b'oiabcd'), '')
self.assertEqual(d.decode(b'', 1), 'abcd.')
class TextIOWrapperTest(unittest.TestCase):
def setUp(self):
self.testdata = b"AAA\r\nBBB\rCCC\r\nDDD\nEEE\r\n"
self.normalized = b"AAA\nBBB\nCCC\nDDD\nEEE\n".decode("ascii")
support.unlink(support.TESTFN)
def tearDown(self):
support.unlink(support.TESTFN)
def test_constructor(self):
r = self.BytesIO(b"\xc3\xa9\n\n")
b = self.BufferedReader(r, 1000)
t = self.TextIOWrapper(b)
t.__init__(b, encoding="latin1", newline="\r\n")
self.assertEqual(t.encoding, "latin1")
self.assertEqual(t.line_buffering, False)
t.__init__(b, encoding="utf8", line_buffering=True)
self.assertEqual(t.encoding, "utf8")
self.assertEqual(t.line_buffering, True)
self.assertEqual("\xe9\n", t.readline())
self.assertRaises(TypeError, t.__init__, b, newline=42)
self.assertRaises(ValueError, t.__init__, b, newline='xyzzy')
def test_detach(self):
r = self.BytesIO()
b = self.BufferedWriter(r)
t = self.TextIOWrapper(b)
self.assertIs(t.detach(), b)
t = self.TextIOWrapper(b, encoding="ascii")
t.write("howdy")
self.assertFalse(r.getvalue())
t.detach()
self.assertEqual(r.getvalue(), b"howdy")
self.assertRaises(ValueError, t.detach)
def test_repr(self):
raw = self.BytesIO("hello".encode("utf-8"))
b = self.BufferedReader(raw)
t = self.TextIOWrapper(b, encoding="utf-8")
modname = self.TextIOWrapper.__module__
self.assertEqual(repr(t),
"<%s.TextIOWrapper encoding='utf-8'>" % modname)
raw.name = "dummy"
self.assertEqual(repr(t),
"<%s.TextIOWrapper name=u'dummy' encoding='utf-8'>" % modname)
raw.name = b"dummy"
self.assertEqual(repr(t),
"<%s.TextIOWrapper name='dummy' encoding='utf-8'>" % modname)
def test_line_buffering(self):
r = self.BytesIO()
b = self.BufferedWriter(r, 1000)
t = self.TextIOWrapper(b, newline="\n", line_buffering=True)
t.write("X")
self.assertEqual(r.getvalue(), b"") # No flush happened
t.write("Y\nZ")
self.assertEqual(r.getvalue(), b"XY\nZ") # All got flushed
t.write("A\rB")
self.assertEqual(r.getvalue(), b"XY\nZA\rB")
def test_encoding(self):
# Check the encoding attribute is always set, and valid
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="utf8")
self.assertEqual(t.encoding, "utf8")
t = self.TextIOWrapper(b)
self.assertTrue(t.encoding is not None)
codecs.lookup(t.encoding)
def test_encoding_errors_reading(self):
# (1) default
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii")
self.assertRaises(UnicodeError, t.read)
# (2) explicit strict
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="strict")
self.assertRaises(UnicodeError, t.read)
# (3) ignore
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="ignore")
self.assertEqual(t.read(), "abc\n\n")
# (4) replace
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="replace")
self.assertEqual(t.read(), "abc\n\ufffd\n")
def test_encoding_errors_writing(self):
# (1) default
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii")
self.assertRaises(UnicodeError, t.write, "\xff")
# (2) explicit strict
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="strict")
self.assertRaises(UnicodeError, t.write, "\xff")
# (3) ignore
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="ignore",
newline="\n")
t.write("abc\xffdef\n")
t.flush()
self.assertEqual(b.getvalue(), b"abcdef\n")
# (4) replace
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="replace",
newline="\n")
t.write("abc\xffdef\n")
t.flush()
self.assertEqual(b.getvalue(), b"abc?def\n")
def test_newlines(self):
input_lines = [ "unix\n", "windows\r\n", "os9\r", "last\n", "nonl" ]
tests = [
[ None, [ 'unix\n', 'windows\n', 'os9\n', 'last\n', 'nonl' ] ],
[ '', input_lines ],
[ '\n', [ "unix\n", "windows\r\n", "os9\rlast\n", "nonl" ] ],
[ '\r\n', [ "unix\nwindows\r\n", "os9\rlast\nnonl" ] ],
[ '\r', [ "unix\nwindows\r", "\nos9\r", "last\nnonl" ] ],
]
encodings = (
'utf-8', 'latin-1',
'utf-16', 'utf-16-le', 'utf-16-be',
'utf-32', 'utf-32-le', 'utf-32-be',
)
# Try a range of buffer sizes to test the case where \r is the last
# character in TextIOWrapper._pending_line.
for encoding in encodings:
# XXX: str.encode() should return bytes
data = bytes(''.join(input_lines).encode(encoding))
for do_reads in (False, True):
for bufsize in range(1, 10):
for newline, exp_lines in tests:
bufio = self.BufferedReader(self.BytesIO(data), bufsize)
textio = self.TextIOWrapper(bufio, newline=newline,
encoding=encoding)
if do_reads:
got_lines = []
while True:
c2 = textio.read(2)
if c2 == '':
break
self.assertEqual(len(c2), 2)
got_lines.append(c2 + textio.readline())
else:
got_lines = list(textio)
for got_line, exp_line in zip(got_lines, exp_lines):
self.assertEqual(got_line, exp_line)
self.assertEqual(len(got_lines), len(exp_lines))
def test_newlines_input(self):
testdata = b"AAA\nBB\x00B\nCCC\rDDD\rEEE\r\nFFF\r\nGGG"
normalized = testdata.replace(b"\r\n", b"\n").replace(b"\r", b"\n")
for newline, expected in [
(None, normalized.decode("ascii").splitlines(True)),
("", testdata.decode("ascii").splitlines(True)),
("\n", ["AAA\n", "BB\x00B\n", "CCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]),
("\r\n", ["AAA\nBB\x00B\nCCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]),
("\r", ["AAA\nBB\x00B\nCCC\r", "DDD\r", "EEE\r", "\nFFF\r", "\nGGG"]),
]:
buf = self.BytesIO(testdata)
txt = self.TextIOWrapper(buf, encoding="ascii", newline=newline)
self.assertEqual(txt.readlines(), expected)
txt.seek(0)
self.assertEqual(txt.read(), "".join(expected))
def test_newlines_output(self):
testdict = {
"": b"AAA\nBBB\nCCC\nX\rY\r\nZ",
"\n": b"AAA\nBBB\nCCC\nX\rY\r\nZ",
"\r": b"AAA\rBBB\rCCC\rX\rY\r\rZ",
"\r\n": b"AAA\r\nBBB\r\nCCC\r\nX\rY\r\r\nZ",
}
tests = [(None, testdict[os.linesep])] + sorted(testdict.items())
for newline, expected in tests:
buf = self.BytesIO()
txt = self.TextIOWrapper(buf, encoding="ascii", newline=newline)
txt.write("AAA\nB")
txt.write("BB\nCCC\n")
txt.write("X\rY\r\nZ")
txt.flush()
self.assertEqual(buf.closed, False)
self.assertEqual(buf.getvalue(), expected)
def test_destructor(self):
l = []
base = self.BytesIO
class MyBytesIO(base):
def close(self):
l.append(self.getvalue())
base.close(self)
b = MyBytesIO()
t = self.TextIOWrapper(b, encoding="ascii")
t.write("abc")
del t
support.gc_collect()
self.assertEqual([b"abc"], l)
def test_override_destructor(self):
record = []
class MyTextIO(self.TextIOWrapper):
def __del__(self):
record.append(1)
try:
f = super(MyTextIO, self).__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super(MyTextIO, self).close()
def flush(self):
record.append(3)
super(MyTextIO, self).flush()
b = self.BytesIO()
t = MyTextIO(b, encoding="ascii")
del t
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_error_through_destructor(self):
# Test that the exception state is not modified by a destructor,
# even if close() fails.
rawio = self.CloseFailureIO()
def f():
self.TextIOWrapper(rawio).xyzzy
with support.captured_output("stderr") as s:
self.assertRaises(AttributeError, f)
s = s.getvalue().strip()
if s:
# The destructor *may* have printed an unraisable error, check it
self.assertEqual(len(s.splitlines()), 1)
self.assertTrue(s.startswith("Exception IOError: "), s)
self.assertTrue(s.endswith(" ignored"), s)
# Systematic tests of the text I/O API
def test_basic_io(self):
for chunksize in (1, 2, 3, 4, 5, 15, 16, 17, 31, 32, 33, 63, 64, 65):
for enc in "ascii", "latin1", "utf8" :# , "utf-16-be", "utf-16-le":
f = self.open(support.TESTFN, "w+", encoding=enc)
f._CHUNK_SIZE = chunksize
self.assertEqual(f.write("abc"), 3)
f.close()
f = self.open(support.TESTFN, "r+", encoding=enc)
f._CHUNK_SIZE = chunksize
self.assertEqual(f.tell(), 0)
self.assertEqual(f.read(), "abc")
cookie = f.tell()
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.read(None), "abc")
f.seek(0)
self.assertEqual(f.read(2), "ab")
self.assertEqual(f.read(1), "c")
self.assertEqual(f.read(1), "")
self.assertEqual(f.read(), "")
self.assertEqual(f.tell(), cookie)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.seek(0, 2), cookie)
self.assertEqual(f.write("def"), 3)
self.assertEqual(f.seek(cookie), cookie)
self.assertEqual(f.read(), "def")
if enc.startswith("utf"):
self.multi_line_test(f, enc)
f.close()
def multi_line_test(self, f, enc):
f.seek(0)
f.truncate()
sample = "s\xff\u0fff\uffff"
wlines = []
for size in (0, 1, 2, 3, 4, 5, 30, 31, 32, 33, 62, 63, 64, 65, 1000):
chars = []
for i in range(size):
chars.append(sample[i % len(sample)])
line = "".join(chars) + "\n"
wlines.append((f.tell(), line))
f.write(line)
f.seek(0)
rlines = []
while True:
pos = f.tell()
line = f.readline()
if not line:
break
rlines.append((pos, line))
self.assertEqual(rlines, wlines)
def test_telling(self):
f = self.open(support.TESTFN, "w+", encoding="utf8")
p0 = f.tell()
f.write("\xff\n")
p1 = f.tell()
f.write("\xff\n")
p2 = f.tell()
f.seek(0)
self.assertEqual(f.tell(), p0)
self.assertEqual(f.readline(), "\xff\n")
self.assertEqual(f.tell(), p1)
self.assertEqual(f.readline(), "\xff\n")
self.assertEqual(f.tell(), p2)
f.seek(0)
for line in f:
self.assertEqual(line, "\xff\n")
self.assertRaises(IOError, f.tell)
self.assertEqual(f.tell(), p2)
f.close()
def test_seeking(self):
chunk_size = _default_chunk_size()
prefix_size = chunk_size - 2
u_prefix = "a" * prefix_size
prefix = bytes(u_prefix.encode("utf-8"))
self.assertEqual(len(u_prefix), len(prefix))
u_suffix = "\u8888\n"
suffix = bytes(u_suffix.encode("utf-8"))
line = prefix + suffix
f = self.open(support.TESTFN, "wb")
f.write(line*2)
f.close()
f = self.open(support.TESTFN, "r", encoding="utf-8")
s = f.read(prefix_size)
self.assertEqual(s, prefix.decode("ascii"))
self.assertEqual(f.tell(), prefix_size)
self.assertEqual(f.readline(), u_suffix)
def test_seeking_too(self):
# Regression test for a specific bug
data = b'\xe0\xbf\xbf\n'
f = self.open(support.TESTFN, "wb")
f.write(data)
f.close()
f = self.open(support.TESTFN, "r", encoding="utf-8")
f._CHUNK_SIZE # Just test that it exists
f._CHUNK_SIZE = 2
f.readline()
f.tell()
def test_seek_and_tell(self):
#Test seek/tell using the StatefulIncrementalDecoder.
# Make test faster by doing smaller seeks
CHUNK_SIZE = 128
def test_seek_and_tell_with_data(data, min_pos=0):
"""Tell/seek to various points within a data stream and ensure
that the decoded data returned by read() is consistent."""
f = self.open(support.TESTFN, 'wb')
f.write(data)
f.close()
f = self.open(support.TESTFN, encoding='test_decoder')
f._CHUNK_SIZE = CHUNK_SIZE
decoded = f.read()
f.close()
for i in range(min_pos, len(decoded) + 1): # seek positions
for j in [1, 5, len(decoded) - i]: # read lengths
f = self.open(support.TESTFN, encoding='test_decoder')
self.assertEqual(f.read(i), decoded[:i])
cookie = f.tell()
self.assertEqual(f.read(j), decoded[i:i + j])
f.seek(cookie)
self.assertEqual(f.read(), decoded[i:])
f.close()
# Enable the test decoder.
StatefulIncrementalDecoder.codecEnabled = 1
# Run the tests.
try:
# Try each test case.
for input, _, _ in StatefulIncrementalDecoderTest.test_cases:
test_seek_and_tell_with_data(input)
# Position each test case so that it crosses a chunk boundary.
for input, _, _ in StatefulIncrementalDecoderTest.test_cases:
offset = CHUNK_SIZE - len(input)//2
prefix = b'.'*offset
# Don't bother seeking into the prefix (takes too long).
min_pos = offset*2
test_seek_and_tell_with_data(prefix + input, min_pos)
# Ensure our test decoder won't interfere with subsequent tests.
finally:
StatefulIncrementalDecoder.codecEnabled = 0
def test_encoded_writes(self):
data = "1234567890"
tests = ("utf-16",
"utf-16-le",
"utf-16-be",
"utf-32",
"utf-32-le",
"utf-32-be")
for encoding in tests:
buf = self.BytesIO()
f = self.TextIOWrapper(buf, encoding=encoding)
# Check if the BOM is written only once (see issue1753).
f.write(data)
f.write(data)
f.seek(0)
self.assertEqual(f.read(), data * 2)
f.seek(0)
self.assertEqual(f.read(), data * 2)
self.assertEqual(buf.getvalue(), (data * 2).encode(encoding))
def test_unreadable(self):
class UnReadable(self.BytesIO):
def readable(self):
return False
txt = self.TextIOWrapper(UnReadable())
self.assertRaises(IOError, txt.read)
def test_read_one_by_one(self):
txt = self.TextIOWrapper(self.BytesIO(b"AA\r\nBB"))
reads = ""
while True:
c = txt.read(1)
if not c:
break
reads += c
self.assertEqual(reads, "AA\nBB")
def test_readlines(self):
txt = self.TextIOWrapper(self.BytesIO(b"AA\nBB\nCC"))
self.assertEqual(txt.readlines(), ["AA\n", "BB\n", "CC"])
txt.seek(0)
self.assertEqual(txt.readlines(None), ["AA\n", "BB\n", "CC"])
txt.seek(0)
self.assertEqual(txt.readlines(5), ["AA\n", "BB\n"])
# read in amounts equal to TextIOWrapper._CHUNK_SIZE which is 128.
def test_read_by_chunk(self):
# make sure "\r\n" straddles 128 char boundary.
txt = self.TextIOWrapper(self.BytesIO(b"A" * 127 + b"\r\nB"))
reads = ""
while True:
c = txt.read(128)
if not c:
break
reads += c
self.assertEqual(reads, "A"*127+"\nB")
def test_writelines(self):
l = ['ab', 'cd', 'ef']
buf = self.BytesIO()
txt = self.TextIOWrapper(buf)
txt.writelines(l)
txt.flush()
self.assertEqual(buf.getvalue(), b'abcdef')
def test_writelines_userlist(self):
l = UserList(['ab', 'cd', 'ef'])
buf = self.BytesIO()
txt = self.TextIOWrapper(buf)
txt.writelines(l)
txt.flush()
self.assertEqual(buf.getvalue(), b'abcdef')
def test_writelines_error(self):
txt = self.TextIOWrapper(self.BytesIO())
self.assertRaises(TypeError, txt.writelines, [1, 2, 3])
self.assertRaises(TypeError, txt.writelines, None)
self.assertRaises(TypeError, txt.writelines, b'abc')
def test_issue1395_1(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
# read one char at a time
reads = ""
while True:
c = txt.read(1)
if not c:
break
reads += c
self.assertEqual(reads, self.normalized)
def test_issue1395_2(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = ""
while True:
c = txt.read(4)
if not c:
break
reads += c
self.assertEqual(reads, self.normalized)
def test_issue1395_3(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
reads += txt.read(4)
reads += txt.readline()
reads += txt.readline()
reads += txt.readline()
self.assertEqual(reads, self.normalized)
def test_issue1395_4(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
reads += txt.read()
self.assertEqual(reads, self.normalized)
def test_issue1395_5(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
pos = txt.tell()
txt.seek(0)
txt.seek(pos)
self.assertEqual(txt.read(4), "BBB\n")
def test_issue2282(self):
buffer = self.BytesIO(self.testdata)
txt = self.TextIOWrapper(buffer, encoding="ascii")
self.assertEqual(buffer.seekable(), txt.seekable())
def test_append_bom(self):
# The BOM is not written again when appending to a non-empty file
filename = support.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
pos = f.tell()
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaa'.encode(charset))
with self.open(filename, 'a', encoding=charset) as f:
f.write('xxx')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaaxxx'.encode(charset))
def test_seek_bom(self):
# Same test, but when seeking manually
filename = support.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
pos = f.tell()
with self.open(filename, 'r+', encoding=charset) as f:
f.seek(pos)
f.write('zzz')
f.seek(0)
f.write('bbb')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'bbbzzz'.encode(charset))
def test_errors_property(self):
with self.open(support.TESTFN, "w") as f:
self.assertEqual(f.errors, "strict")
with self.open(support.TESTFN, "w", errors="replace") as f:
self.assertEqual(f.errors, "replace")
@unittest.skipUnless(threading, 'Threading required for this test.')
def test_threads_write(self):
# Issue6750: concurrent writes could duplicate data
event = threading.Event()
with self.open(support.TESTFN, "w", buffering=1) as f:
def run(n):
text = "Thread%03d\n" % n
event.wait()
f.write(text)
threads = [threading.Thread(target=lambda n=x: run(n))
for x in range(20)]
for t in threads:
t.start()
time.sleep(0.02)
event.set()
for t in threads:
t.join()
with self.open(support.TESTFN) as f:
content = f.read()
for n in range(20):
self.assertEqual(content.count("Thread%03d\n" % n), 1)
def test_flush_error_on_close(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
def bad_flush():
raise IOError()
txt.flush = bad_flush
self.assertRaises(IOError, txt.close) # exception not swallowed
self.assertTrue(txt.closed)
def test_multi_close(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt.close()
txt.close()
txt.close()
self.assertRaises(ValueError, txt.flush)
def test_readonly_attributes(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
buf = self.BytesIO(self.testdata)
with self.assertRaises((AttributeError, TypeError)):
txt.buffer = buf
def test_read_nonbytes(self):
# Issue #17106
# Crash when underlying read() returns non-bytes
class NonbytesStream(self.StringIO):
read1 = self.StringIO.read
class NonbytesStream(self.StringIO):
read1 = self.StringIO.read
t = self.TextIOWrapper(NonbytesStream('a'))
with self.maybeRaises(TypeError):
t.read(1)
t = self.TextIOWrapper(NonbytesStream('a'))
with self.maybeRaises(TypeError):
t.readline()
t = self.TextIOWrapper(NonbytesStream('a'))
self.assertEqual(t.read(), u'a')
def test_illegal_decoder(self):
# Issue #17106
# Crash when decoder returns non-string
t = self.TextIOWrapper(self.BytesIO(b'aaaaaa'), newline='\n',
encoding='quopri_codec')
with self.maybeRaises(TypeError):
t.read(1)
t = self.TextIOWrapper(self.BytesIO(b'aaaaaa'), newline='\n',
encoding='quopri_codec')
with self.maybeRaises(TypeError):
t.readline()
t = self.TextIOWrapper(self.BytesIO(b'aaaaaa'), newline='\n',
encoding='quopri_codec')
with self.maybeRaises(TypeError):
t.read()
class CTextIOWrapperTest(TextIOWrapperTest):
def test_initialization(self):
r = self.BytesIO(b"\xc3\xa9\n\n")
b = self.BufferedReader(r, 1000)
t = self.TextIOWrapper(b)
self.assertRaises(TypeError, t.__init__, b, newline=42)
self.assertRaises(ValueError, t.read)
self.assertRaises(ValueError, t.__init__, b, newline='xyzzy')
self.assertRaises(ValueError, t.read)
def test_garbage_collection(self):
# C TextIOWrapper objects are collected, and collecting them flushes
# all data to disk.
# The Python version has __del__, so it ends in gc.garbage instead.
rawio = io.FileIO(support.TESTFN, "wb")
b = self.BufferedWriter(rawio)
t = self.TextIOWrapper(b, encoding="ascii")
t.write("456def")
t.x = t
wr = weakref.ref(t)
del t
support.gc_collect()
self.assertTrue(wr() is None, wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"456def")
def test_rwpair_cleared_before_textio(self):
# Issue 13070: TextIOWrapper's finalization would crash when called
# after the reference to the underlying BufferedRWPair's writer got
# cleared by the GC.
for i in range(1000):
b1 = self.BufferedRWPair(self.MockRawIO(), self.MockRawIO())
t1 = self.TextIOWrapper(b1, encoding="ascii")
b2 = self.BufferedRWPair(self.MockRawIO(), self.MockRawIO())
t2 = self.TextIOWrapper(b2, encoding="ascii")
# circular references
t1.buddy = t2
t2.buddy = t1
support.gc_collect()
maybeRaises = unittest.TestCase.assertRaises
class PyTextIOWrapperTest(TextIOWrapperTest):
@contextlib.contextmanager
def maybeRaises(self, *args, **kwds):
yield
class IncrementalNewlineDecoderTest(unittest.TestCase):
def check_newline_decoding_utf8(self, decoder):
# UTF-8 specific tests for a newline decoder
def _check_decode(b, s, **kwargs):
# We exercise getstate() / setstate() as well as decode()
state = decoder.getstate()
self.assertEqual(decoder.decode(b, **kwargs), s)
decoder.setstate(state)
self.assertEqual(decoder.decode(b, **kwargs), s)
_check_decode(b'\xe8\xa2\x88', "\u8888")
_check_decode(b'\xe8', "")
_check_decode(b'\xa2', "")
_check_decode(b'\x88', "\u8888")
_check_decode(b'\xe8', "")
_check_decode(b'\xa2', "")
_check_decode(b'\x88', "\u8888")
_check_decode(b'\xe8', "")
self.assertRaises(UnicodeDecodeError, decoder.decode, b'', final=True)
decoder.reset()
_check_decode(b'\n', "\n")
_check_decode(b'\r', "")
_check_decode(b'', "\n", final=True)
_check_decode(b'\r', "\n", final=True)
_check_decode(b'\r', "")
_check_decode(b'a', "\na")
_check_decode(b'\r\r\n', "\n\n")
_check_decode(b'\r', "")
_check_decode(b'\r', "\n")
_check_decode(b'\na', "\na")
_check_decode(b'\xe8\xa2\x88\r\n', "\u8888\n")
_check_decode(b'\xe8\xa2\x88', "\u8888")
_check_decode(b'\n', "\n")
_check_decode(b'\xe8\xa2\x88\r', "\u8888")
_check_decode(b'\n', "\n")
def check_newline_decoding(self, decoder, encoding):
result = []
if encoding is not None:
encoder = codecs.getincrementalencoder(encoding)()
def _decode_bytewise(s):
# Decode one byte at a time
for b in encoder.encode(s):
result.append(decoder.decode(b))
else:
encoder = None
def _decode_bytewise(s):
# Decode one char at a time
for c in s:
result.append(decoder.decode(c))
self.assertEqual(decoder.newlines, None)
_decode_bytewise("abc\n\r")
self.assertEqual(decoder.newlines, '\n')
_decode_bytewise("\nabc")
self.assertEqual(decoder.newlines, ('\n', '\r\n'))
_decode_bytewise("abc\r")
self.assertEqual(decoder.newlines, ('\n', '\r\n'))
_decode_bytewise("abc")
self.assertEqual(decoder.newlines, ('\r', '\n', '\r\n'))
_decode_bytewise("abc\r")
self.assertEqual("".join(result), "abc\n\nabcabc\nabcabc")
decoder.reset()
input = "abc"
if encoder is not None:
encoder.reset()
input = encoder.encode(input)
self.assertEqual(decoder.decode(input), "abc")
self.assertEqual(decoder.newlines, None)
def test_newline_decoder(self):
encodings = (
# None meaning the IncrementalNewlineDecoder takes unicode input
# rather than bytes input
None, 'utf-8', 'latin-1',
'utf-16', 'utf-16-le', 'utf-16-be',
'utf-32', 'utf-32-le', 'utf-32-be',
)
for enc in encodings:
decoder = enc and codecs.getincrementaldecoder(enc)()
decoder = self.IncrementalNewlineDecoder(decoder, translate=True)
self.check_newline_decoding(decoder, enc)
decoder = codecs.getincrementaldecoder("utf-8")()
decoder = self.IncrementalNewlineDecoder(decoder, translate=True)
self.check_newline_decoding_utf8(decoder)
def test_newline_bytes(self):
# Issue 5433: Excessive optimization in IncrementalNewlineDecoder
def _check(dec):
self.assertEqual(dec.newlines, None)
self.assertEqual(dec.decode("\u0D00"), "\u0D00")
self.assertEqual(dec.newlines, None)
self.assertEqual(dec.decode("\u0A00"), "\u0A00")
self.assertEqual(dec.newlines, None)
dec = self.IncrementalNewlineDecoder(None, translate=False)
_check(dec)
dec = self.IncrementalNewlineDecoder(None, translate=True)
_check(dec)
class CIncrementalNewlineDecoderTest(IncrementalNewlineDecoderTest):
pass
class PyIncrementalNewlineDecoderTest(IncrementalNewlineDecoderTest):
pass
# XXX Tests for open()
class MiscIOTest(unittest.TestCase):
def tearDown(self):
support.unlink(support.TESTFN)
def test___all__(self):
for name in self.io.__all__:
obj = getattr(self.io, name, None)
self.assertTrue(obj is not None, name)
if name == "open":
continue
elif "error" in name.lower() or name == "UnsupportedOperation":
self.assertTrue(issubclass(obj, Exception), name)
elif not name.startswith("SEEK_"):
self.assertTrue(issubclass(obj, self.IOBase))
def test_attributes(self):
f = self.open(support.TESTFN, "wb", buffering=0)
self.assertEqual(f.mode, "wb")
f.close()
f = self.open(support.TESTFN, "U")
self.assertEqual(f.name, support.TESTFN)
self.assertEqual(f.buffer.name, support.TESTFN)
self.assertEqual(f.buffer.raw.name, support.TESTFN)
self.assertEqual(f.mode, "U")
self.assertEqual(f.buffer.mode, "rb")
self.assertEqual(f.buffer.raw.mode, "rb")
f.close()
f = self.open(support.TESTFN, "w+")
self.assertEqual(f.mode, "w+")
self.assertEqual(f.buffer.mode, "rb+") # Does it really matter?
self.assertEqual(f.buffer.raw.mode, "rb+")
g = self.open(f.fileno(), "wb", closefd=False)
self.assertEqual(g.mode, "wb")
self.assertEqual(g.raw.mode, "wb")
self.assertEqual(g.name, f.fileno())
self.assertEqual(g.raw.name, f.fileno())
f.close()
g.close()
def test_io_after_close(self):
for kwargs in [
{"mode": "w"},
{"mode": "wb"},
{"mode": "w", "buffering": 1},
{"mode": "w", "buffering": 2},
{"mode": "wb", "buffering": 0},
{"mode": "r"},
{"mode": "rb"},
{"mode": "r", "buffering": 1},
{"mode": "r", "buffering": 2},
{"mode": "rb", "buffering": 0},
{"mode": "w+"},
{"mode": "w+b"},
{"mode": "w+", "buffering": 1},
{"mode": "w+", "buffering": 2},
{"mode": "w+b", "buffering": 0},
]:
f = self.open(support.TESTFN, **kwargs)
f.close()
self.assertRaises(ValueError, f.flush)
self.assertRaises(ValueError, f.fileno)
self.assertRaises(ValueError, f.isatty)
self.assertRaises(ValueError, f.__iter__)
if hasattr(f, "peek"):
self.assertRaises(ValueError, f.peek, 1)
self.assertRaises(ValueError, f.read)
if hasattr(f, "read1"):
self.assertRaises(ValueError, f.read1, 1024)
if hasattr(f, "readall"):
self.assertRaises(ValueError, f.readall)
if hasattr(f, "readinto"):
self.assertRaises(ValueError, f.readinto, bytearray(1024))
self.assertRaises(ValueError, f.readline)
self.assertRaises(ValueError, f.readlines)
self.assertRaises(ValueError, f.seek, 0)
self.assertRaises(ValueError, f.tell)
self.assertRaises(ValueError, f.truncate)
self.assertRaises(ValueError, f.write,
b"" if "b" in kwargs['mode'] else "")
self.assertRaises(ValueError, f.writelines, [])
self.assertRaises(ValueError, next, f)
def test_blockingioerror(self):
# Various BlockingIOError issues
self.assertRaises(TypeError, self.BlockingIOError)
self.assertRaises(TypeError, self.BlockingIOError, 1)
self.assertRaises(TypeError, self.BlockingIOError, 1, 2, 3, 4)
self.assertRaises(TypeError, self.BlockingIOError, 1, "", None)
b = self.BlockingIOError(1, "")
self.assertEqual(b.characters_written, 0)
class C(unicode):
pass
c = C("")
b = self.BlockingIOError(1, c)
c.b = b
b.c = c
wr = weakref.ref(c)
del c, b
support.gc_collect()
self.assertTrue(wr() is None, wr)
def test_abcs(self):
# Test the visible base classes are ABCs.
self.assertIsInstance(self.IOBase, abc.ABCMeta)
self.assertIsInstance(self.RawIOBase, abc.ABCMeta)
self.assertIsInstance(self.BufferedIOBase, abc.ABCMeta)
self.assertIsInstance(self.TextIOBase, abc.ABCMeta)
def _check_abc_inheritance(self, abcmodule):
with self.open(support.TESTFN, "wb", buffering=0) as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertIsInstance(f, abcmodule.RawIOBase)
self.assertNotIsInstance(f, abcmodule.BufferedIOBase)
self.assertNotIsInstance(f, abcmodule.TextIOBase)
with self.open(support.TESTFN, "wb") as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertNotIsInstance(f, abcmodule.RawIOBase)
self.assertIsInstance(f, abcmodule.BufferedIOBase)
self.assertNotIsInstance(f, abcmodule.TextIOBase)
with self.open(support.TESTFN, "w") as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertNotIsInstance(f, abcmodule.RawIOBase)
self.assertNotIsInstance(f, abcmodule.BufferedIOBase)
self.assertIsInstance(f, abcmodule.TextIOBase)
def test_abc_inheritance(self):
# Test implementations inherit from their respective ABCs
self._check_abc_inheritance(self)
def test_abc_inheritance_official(self):
# Test implementations inherit from the official ABCs of the
# baseline "io" module.
self._check_abc_inheritance(io)
@unittest.skipUnless(fcntl, 'fcntl required for this test')
def test_nonblock_pipe_write_bigbuf(self):
self._test_nonblock_pipe_write(16*1024)
@unittest.skipUnless(fcntl, 'fcntl required for this test')
def test_nonblock_pipe_write_smallbuf(self):
self._test_nonblock_pipe_write(1024)
def _set_non_blocking(self, fd):
flags = fcntl.fcntl(fd, fcntl.F_GETFL)
self.assertNotEqual(flags, -1)
res = fcntl.fcntl(fd, fcntl.F_SETFL, flags | os.O_NONBLOCK)
self.assertEqual(res, 0)
def _test_nonblock_pipe_write(self, bufsize):
sent = []
received = []
r, w = os.pipe()
self._set_non_blocking(r)
self._set_non_blocking(w)
# To exercise all code paths in the C implementation we need
# to play with buffer sizes. For instance, if we choose a
# buffer size less than or equal to _PIPE_BUF (4096 on Linux)
# then we will never get a partial write of the buffer.
rf = self.open(r, mode='rb', closefd=True, buffering=bufsize)
wf = self.open(w, mode='wb', closefd=True, buffering=bufsize)
with rf, wf:
for N in 9999, 73, 7574:
try:
i = 0
while True:
msg = bytes([i % 26 + 97]) * N
sent.append(msg)
wf.write(msg)
i += 1
except self.BlockingIOError as e:
self.assertEqual(e.args[0], errno.EAGAIN)
sent[-1] = sent[-1][:e.characters_written]
received.append(rf.read())
msg = b'BLOCKED'
wf.write(msg)
sent.append(msg)
while True:
try:
wf.flush()
break
except self.BlockingIOError as e:
self.assertEqual(e.args[0], errno.EAGAIN)
self.assertEqual(e.characters_written, 0)
received.append(rf.read())
received += iter(rf.read, None)
sent, received = b''.join(sent), b''.join(received)
self.assertTrue(sent == received)
self.assertTrue(wf.closed)
self.assertTrue(rf.closed)
class CMiscIOTest(MiscIOTest):
io = io
class PyMiscIOTest(MiscIOTest):
io = pyio
@unittest.skipIf(os.name == 'nt', 'POSIX signals required for this test.')
class SignalsTest(unittest.TestCase):
def setUp(self):
self.oldalrm = signal.signal(signal.SIGALRM, self.alarm_interrupt)
def tearDown(self):
signal.signal(signal.SIGALRM, self.oldalrm)
def alarm_interrupt(self, sig, frame):
1 // 0
@unittest.skipUnless(threading, 'Threading required for this test.')
@unittest.skipIf(sys.platform in ('freebsd5', 'freebsd6', 'freebsd7'),
'issue #12429: skip test on FreeBSD <= 7')
def check_interrupted_write(self, item, bytes, **fdopen_kwargs):
"""Check that a partial write, when it gets interrupted, properly
invokes the signal handler, and bubbles up the exception raised
in the latter."""
read_results = []
def _read():
s = os.read(r, 1)
read_results.append(s)
t = threading.Thread(target=_read)
t.daemon = True
r, w = os.pipe()
try:
wio = self.io.open(w, **fdopen_kwargs)
t.start()
signal.alarm(1)
# Fill the pipe enough that the write will be blocking.
# It will be interrupted by the timer armed above. Since the
# other thread has read one byte, the low-level write will
# return with a successful (partial) result rather than an EINTR.
# The buffered IO layer must check for pending signal
# handlers, which in this case will invoke alarm_interrupt().
self.assertRaises(ZeroDivisionError,
wio.write, item * (support.PIPE_MAX_SIZE // len(item) + 1))
t.join()
# We got one byte, get another one and check that it isn't a
# repeat of the first one.
read_results.append(os.read(r, 1))
self.assertEqual(read_results, [bytes[0:1], bytes[1:2]])
finally:
os.close(w)
os.close(r)
# This is deliberate. If we didn't close the file descriptor
# before closing wio, wio would try to flush its internal
# buffer, and block again.
try:
wio.close()
except IOError as e:
if e.errno != errno.EBADF:
raise
def test_interrupted_write_unbuffered(self):
self.check_interrupted_write(b"xy", b"xy", mode="wb", buffering=0)
def test_interrupted_write_buffered(self):
self.check_interrupted_write(b"xy", b"xy", mode="wb")
def test_interrupted_write_text(self):
self.check_interrupted_write("xy", b"xy", mode="w", encoding="ascii")
def check_reentrant_write(self, data, **fdopen_kwargs):
def on_alarm(*args):
# Will be called reentrantly from the same thread
wio.write(data)
1//0
signal.signal(signal.SIGALRM, on_alarm)
r, w = os.pipe()
wio = self.io.open(w, **fdopen_kwargs)
try:
signal.alarm(1)
# Either the reentrant call to wio.write() fails with RuntimeError,
# or the signal handler raises ZeroDivisionError.
with self.assertRaises((ZeroDivisionError, RuntimeError)) as cm:
while 1:
for i in range(100):
wio.write(data)
wio.flush()
# Make sure the buffer doesn't fill up and block further writes
os.read(r, len(data) * 100)
exc = cm.exception
if isinstance(exc, RuntimeError):
self.assertTrue(str(exc).startswith("reentrant call"), str(exc))
finally:
wio.close()
os.close(r)
def test_reentrant_write_buffered(self):
self.check_reentrant_write(b"xy", mode="wb")
def test_reentrant_write_text(self):
self.check_reentrant_write("xy", mode="w", encoding="ascii")
def check_interrupted_read_retry(self, decode, **fdopen_kwargs):
"""Check that a buffered read, when it gets interrupted (either
returning a partial result or EINTR), properly invokes the signal
handler and retries if the latter returned successfully."""
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
def alarm_handler(sig, frame):
os.write(w, b"bar")
signal.signal(signal.SIGALRM, alarm_handler)
try:
rio = self.io.open(r, **fdopen_kwargs)
os.write(w, b"foo")
signal.alarm(1)
# Expected behaviour:
# - first raw read() returns partial b"foo"
# - second raw read() returns EINTR
# - third raw read() returns b"bar"
self.assertEqual(decode(rio.read(6)), "foobar")
finally:
rio.close()
os.close(w)
os.close(r)
def test_interrupterd_read_retry_buffered(self):
self.check_interrupted_read_retry(lambda x: x.decode('latin1'),
mode="rb")
def test_interrupterd_read_retry_text(self):
self.check_interrupted_read_retry(lambda x: x,
mode="r")
@unittest.skipUnless(threading, 'Threading required for this test.')
def check_interrupted_write_retry(self, item, **fdopen_kwargs):
"""Check that a buffered write, when it gets interrupted (either
returning a partial result or EINTR), properly invokes the signal
handler and retries if the latter returned successfully."""
select = support.import_module("select")
# A quantity that exceeds the buffer size of an anonymous pipe's
# write end.
N = support.PIPE_MAX_SIZE
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
# We need a separate thread to read from the pipe and allow the
# write() to finish. This thread is started after the SIGALRM is
# received (forcing a first EINTR in write()).
read_results = []
write_finished = False
def _read():
while not write_finished:
while r in select.select([r], [], [], 1.0)[0]:
s = os.read(r, 1024)
read_results.append(s)
t = threading.Thread(target=_read)
t.daemon = True
def alarm1(sig, frame):
signal.signal(signal.SIGALRM, alarm2)
signal.alarm(1)
def alarm2(sig, frame):
t.start()
signal.signal(signal.SIGALRM, alarm1)
try:
wio = self.io.open(w, **fdopen_kwargs)
signal.alarm(1)
# Expected behaviour:
# - first raw write() is partial (because of the limited pipe buffer
# and the first alarm)
# - second raw write() returns EINTR (because of the second alarm)
# - subsequent write()s are successful (either partial or complete)
self.assertEqual(N, wio.write(item * N))
wio.flush()
write_finished = True
t.join()
self.assertEqual(N, sum(len(x) for x in read_results))
finally:
write_finished = True
os.close(w)
os.close(r)
# This is deliberate. If we didn't close the file descriptor
# before closing wio, wio would try to flush its internal
# buffer, and could block (in case of failure).
try:
wio.close()
except IOError as e:
if e.errno != errno.EBADF:
raise
def test_interrupterd_write_retry_buffered(self):
self.check_interrupted_write_retry(b"x", mode="wb")
def test_interrupterd_write_retry_text(self):
self.check_interrupted_write_retry("x", mode="w", encoding="latin1")
class CSignalsTest(SignalsTest):
io = io
class PySignalsTest(SignalsTest):
io = pyio
# Handling reentrancy issues would slow down _pyio even more, so the
# tests are disabled.
test_reentrant_write_buffered = None
test_reentrant_write_text = None
def test_main():
tests = (CIOTest, PyIOTest,
CBufferedReaderTest, PyBufferedReaderTest,
CBufferedWriterTest, PyBufferedWriterTest,
CBufferedRWPairTest, PyBufferedRWPairTest,
CBufferedRandomTest, PyBufferedRandomTest,
StatefulIncrementalDecoderTest,
CIncrementalNewlineDecoderTest, PyIncrementalNewlineDecoderTest,
CTextIOWrapperTest, PyTextIOWrapperTest,
CMiscIOTest, PyMiscIOTest,
CSignalsTest, PySignalsTest,
)
# Put the namespaces of the IO module we are testing and some useful mock
# classes in the __dict__ of each test.
mocks = (MockRawIO, MisbehavedRawIO, MockFileIO, CloseFailureIO,
MockNonBlockWriterIO, MockRawIOWithoutRead)
all_members = io.__all__ + ["IncrementalNewlineDecoder"]
c_io_ns = dict((name, getattr(io, name)) for name in all_members)
py_io_ns = dict((name, getattr(pyio, name)) for name in all_members)
globs = globals()
c_io_ns.update((x.__name__, globs["C" + x.__name__]) for x in mocks)
py_io_ns.update((x.__name__, globs["Py" + x.__name__]) for x in mocks)
# Avoid turning open into a bound method.
py_io_ns["open"] = pyio.OpenWrapper
for test in tests:
if test.__name__.startswith("C"):
for name, obj in c_io_ns.items():
setattr(test, name, obj)
elif test.__name__.startswith("Py"):
for name, obj in py_io_ns.items():
setattr(test, name, obj)
support.run_unittest(*tests)
if __name__ == "__main__":
test_main()
|
glaubitz/fs-uae-debian | refs/heads/master | launcher/OpenGL/raw/GLES2/ARM/shader_framebuffer_fetch_depth_stencil.py | 8 | '''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GLES2 import _types as _cs
# End users want this...
from OpenGL.raw.GLES2._types import *
from OpenGL.raw.GLES2 import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GLES2_ARM_shader_framebuffer_fetch_depth_stencil'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GLES2,'GLES2_ARM_shader_framebuffer_fetch_depth_stencil',error_checker=_errors._error_checker)
|
vivianli32/TravelConnect | refs/heads/master | flask/lib/python3.4/site-packages/jinja2/nodes.py | 623 | # -*- coding: utf-8 -*-
"""
jinja2.nodes
~~~~~~~~~~~~
This module implements additional nodes derived from the ast base node.
It also provides some node tree helper functions like `in_lineno` and
`get_nodes` used by the parser and translator in order to normalize
python and jinja nodes.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import operator
from collections import deque
from jinja2.utils import Markup
from jinja2._compat import next, izip, with_metaclass, text_type, \
method_type, function_type
#: the types we support for context functions
_context_function_types = (function_type, method_type)
_binop_to_func = {
'*': operator.mul,
'/': operator.truediv,
'//': operator.floordiv,
'**': operator.pow,
'%': operator.mod,
'+': operator.add,
'-': operator.sub
}
_uaop_to_func = {
'not': operator.not_,
'+': operator.pos,
'-': operator.neg
}
_cmpop_to_func = {
'eq': operator.eq,
'ne': operator.ne,
'gt': operator.gt,
'gteq': operator.ge,
'lt': operator.lt,
'lteq': operator.le,
'in': lambda a, b: a in b,
'notin': lambda a, b: a not in b
}
class Impossible(Exception):
"""Raised if the node could not perform a requested action."""
class NodeType(type):
"""A metaclass for nodes that handles the field and attribute
inheritance. fields and attributes from the parent class are
automatically forwarded to the child."""
def __new__(cls, name, bases, d):
for attr in 'fields', 'attributes':
storage = []
storage.extend(getattr(bases[0], attr, ()))
storage.extend(d.get(attr, ()))
assert len(bases) == 1, 'multiple inheritance not allowed'
assert len(storage) == len(set(storage)), 'layout conflict'
d[attr] = tuple(storage)
d.setdefault('abstract', False)
return type.__new__(cls, name, bases, d)
class EvalContext(object):
"""Holds evaluation time information. Custom attributes can be attached
to it in extensions.
"""
def __init__(self, environment, template_name=None):
self.environment = environment
if callable(environment.autoescape):
self.autoescape = environment.autoescape(template_name)
else:
self.autoescape = environment.autoescape
self.volatile = False
def save(self):
return self.__dict__.copy()
def revert(self, old):
self.__dict__.clear()
self.__dict__.update(old)
def get_eval_context(node, ctx):
if ctx is None:
if node.environment is None:
raise RuntimeError('if no eval context is passed, the '
'node must have an attached '
'environment.')
return EvalContext(node.environment)
return ctx
class Node(with_metaclass(NodeType, object)):
"""Baseclass for all Jinja2 nodes. There are a number of nodes available
of different types. There are four major types:
- :class:`Stmt`: statements
- :class:`Expr`: expressions
- :class:`Helper`: helper nodes
- :class:`Template`: the outermost wrapper node
All nodes have fields and attributes. Fields may be other nodes, lists,
or arbitrary values. Fields are passed to the constructor as regular
positional arguments, attributes as keyword arguments. Each node has
two attributes: `lineno` (the line number of the node) and `environment`.
The `environment` attribute is set at the end of the parsing process for
all nodes automatically.
"""
fields = ()
attributes = ('lineno', 'environment')
abstract = True
def __init__(self, *fields, **attributes):
if self.abstract:
raise TypeError('abstract nodes are not instanciable')
if fields:
if len(fields) != len(self.fields):
if not self.fields:
raise TypeError('%r takes 0 arguments' %
self.__class__.__name__)
raise TypeError('%r takes 0 or %d argument%s' % (
self.__class__.__name__,
len(self.fields),
len(self.fields) != 1 and 's' or ''
))
for name, arg in izip(self.fields, fields):
setattr(self, name, arg)
for attr in self.attributes:
setattr(self, attr, attributes.pop(attr, None))
if attributes:
raise TypeError('unknown attribute %r' %
next(iter(attributes)))
def iter_fields(self, exclude=None, only=None):
"""This method iterates over all fields that are defined and yields
``(key, value)`` tuples. Per default all fields are returned, but
it's possible to limit that to some fields by providing the `only`
parameter or to exclude some using the `exclude` parameter. Both
should be sets or tuples of field names.
"""
for name in self.fields:
if (exclude is only is None) or \
(exclude is not None and name not in exclude) or \
(only is not None and name in only):
try:
yield name, getattr(self, name)
except AttributeError:
pass
def iter_child_nodes(self, exclude=None, only=None):
"""Iterates over all direct child nodes of the node. This iterates
over all fields and yields the values of they are nodes. If the value
of a field is a list all the nodes in that list are returned.
"""
for field, item in self.iter_fields(exclude, only):
if isinstance(item, list):
for n in item:
if isinstance(n, Node):
yield n
elif isinstance(item, Node):
yield item
def find(self, node_type):
"""Find the first node of a given type. If no such node exists the
return value is `None`.
"""
for result in self.find_all(node_type):
return result
def find_all(self, node_type):
"""Find all the nodes of a given type. If the type is a tuple,
the check is performed for any of the tuple items.
"""
for child in self.iter_child_nodes():
if isinstance(child, node_type):
yield child
for result in child.find_all(node_type):
yield result
def set_ctx(self, ctx):
"""Reset the context of a node and all child nodes. Per default the
parser will all generate nodes that have a 'load' context as it's the
most common one. This method is used in the parser to set assignment
targets and other nodes to a store context.
"""
todo = deque([self])
while todo:
node = todo.popleft()
if 'ctx' in node.fields:
node.ctx = ctx
todo.extend(node.iter_child_nodes())
return self
def set_lineno(self, lineno, override=False):
"""Set the line numbers of the node and children."""
todo = deque([self])
while todo:
node = todo.popleft()
if 'lineno' in node.attributes:
if node.lineno is None or override:
node.lineno = lineno
todo.extend(node.iter_child_nodes())
return self
def set_environment(self, environment):
"""Set the environment for all nodes."""
todo = deque([self])
while todo:
node = todo.popleft()
node.environment = environment
todo.extend(node.iter_child_nodes())
return self
def __eq__(self, other):
return type(self) is type(other) and \
tuple(self.iter_fields()) == tuple(other.iter_fields())
def __ne__(self, other):
return not self.__eq__(other)
# Restore Python 2 hashing behavior on Python 3
__hash__ = object.__hash__
def __repr__(self):
return '%s(%s)' % (
self.__class__.__name__,
', '.join('%s=%r' % (arg, getattr(self, arg, None)) for
arg in self.fields)
)
class Stmt(Node):
"""Base node for all statements."""
abstract = True
class Helper(Node):
"""Nodes that exist in a specific context only."""
abstract = True
class Template(Node):
"""Node that represents a template. This must be the outermost node that
is passed to the compiler.
"""
fields = ('body',)
class Output(Stmt):
"""A node that holds multiple expressions which are then printed out.
This is used both for the `print` statement and the regular template data.
"""
fields = ('nodes',)
class Extends(Stmt):
"""Represents an extends statement."""
fields = ('template',)
class For(Stmt):
"""The for loop. `target` is the target for the iteration (usually a
:class:`Name` or :class:`Tuple`), `iter` the iterable. `body` is a list
of nodes that are used as loop-body, and `else_` a list of nodes for the
`else` block. If no else node exists it has to be an empty list.
For filtered nodes an expression can be stored as `test`, otherwise `None`.
"""
fields = ('target', 'iter', 'body', 'else_', 'test', 'recursive')
class If(Stmt):
"""If `test` is true, `body` is rendered, else `else_`."""
fields = ('test', 'body', 'else_')
class Macro(Stmt):
"""A macro definition. `name` is the name of the macro, `args` a list of
arguments and `defaults` a list of defaults if there are any. `body` is
a list of nodes for the macro body.
"""
fields = ('name', 'args', 'defaults', 'body')
class CallBlock(Stmt):
"""Like a macro without a name but a call instead. `call` is called with
the unnamed macro as `caller` argument this node holds.
"""
fields = ('call', 'args', 'defaults', 'body')
class FilterBlock(Stmt):
"""Node for filter sections."""
fields = ('body', 'filter')
class Block(Stmt):
"""A node that represents a block."""
fields = ('name', 'body', 'scoped')
class Include(Stmt):
"""A node that represents the include tag."""
fields = ('template', 'with_context', 'ignore_missing')
class Import(Stmt):
"""A node that represents the import tag."""
fields = ('template', 'target', 'with_context')
class FromImport(Stmt):
"""A node that represents the from import tag. It's important to not
pass unsafe names to the name attribute. The compiler translates the
attribute lookups directly into getattr calls and does *not* use the
subscript callback of the interface. As exported variables may not
start with double underscores (which the parser asserts) this is not a
problem for regular Jinja code, but if this node is used in an extension
extra care must be taken.
The list of names may contain tuples if aliases are wanted.
"""
fields = ('template', 'names', 'with_context')
class ExprStmt(Stmt):
"""A statement that evaluates an expression and discards the result."""
fields = ('node',)
class Assign(Stmt):
"""Assigns an expression to a target."""
fields = ('target', 'node')
class Expr(Node):
"""Baseclass for all expressions."""
abstract = True
def as_const(self, eval_ctx=None):
"""Return the value of the expression as constant or raise
:exc:`Impossible` if this was not possible.
An :class:`EvalContext` can be provided, if none is given
a default context is created which requires the nodes to have
an attached environment.
.. versionchanged:: 2.4
the `eval_ctx` parameter was added.
"""
raise Impossible()
def can_assign(self):
"""Check if it's possible to assign something to this node."""
return False
class BinExpr(Expr):
"""Baseclass for all binary expressions."""
fields = ('left', 'right')
operator = None
abstract = True
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
# intercepted operators cannot be folded at compile time
if self.environment.sandboxed and \
self.operator in self.environment.intercepted_binops:
raise Impossible()
f = _binop_to_func[self.operator]
try:
return f(self.left.as_const(eval_ctx), self.right.as_const(eval_ctx))
except Exception:
raise Impossible()
class UnaryExpr(Expr):
"""Baseclass for all unary expressions."""
fields = ('node',)
operator = None
abstract = True
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
# intercepted operators cannot be folded at compile time
if self.environment.sandboxed and \
self.operator in self.environment.intercepted_unops:
raise Impossible()
f = _uaop_to_func[self.operator]
try:
return f(self.node.as_const(eval_ctx))
except Exception:
raise Impossible()
class Name(Expr):
"""Looks up a name or stores a value in a name.
The `ctx` of the node can be one of the following values:
- `store`: store a value in the name
- `load`: load that name
- `param`: like `store` but if the name was defined as function parameter.
"""
fields = ('name', 'ctx')
def can_assign(self):
return self.name not in ('true', 'false', 'none',
'True', 'False', 'None')
class Literal(Expr):
"""Baseclass for literals."""
abstract = True
class Const(Literal):
"""All constant values. The parser will return this node for simple
constants such as ``42`` or ``"foo"`` but it can be used to store more
complex values such as lists too. Only constants with a safe
representation (objects where ``eval(repr(x)) == x`` is true).
"""
fields = ('value',)
def as_const(self, eval_ctx=None):
return self.value
@classmethod
def from_untrusted(cls, value, lineno=None, environment=None):
"""Return a const object if the value is representable as
constant value in the generated code, otherwise it will raise
an `Impossible` exception.
"""
from .compiler import has_safe_repr
if not has_safe_repr(value):
raise Impossible()
return cls(value, lineno=lineno, environment=environment)
class TemplateData(Literal):
"""A constant template string."""
fields = ('data',)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
if eval_ctx.volatile:
raise Impossible()
if eval_ctx.autoescape:
return Markup(self.data)
return self.data
class Tuple(Literal):
"""For loop unpacking and some other things like multiple arguments
for subscripts. Like for :class:`Name` `ctx` specifies if the tuple
is used for loading the names or storing.
"""
fields = ('items', 'ctx')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return tuple(x.as_const(eval_ctx) for x in self.items)
def can_assign(self):
for item in self.items:
if not item.can_assign():
return False
return True
class List(Literal):
"""Any list literal such as ``[1, 2, 3]``"""
fields = ('items',)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return [x.as_const(eval_ctx) for x in self.items]
class Dict(Literal):
"""Any dict literal such as ``{1: 2, 3: 4}``. The items must be a list of
:class:`Pair` nodes.
"""
fields = ('items',)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return dict(x.as_const(eval_ctx) for x in self.items)
class Pair(Helper):
"""A key, value pair for dicts."""
fields = ('key', 'value')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return self.key.as_const(eval_ctx), self.value.as_const(eval_ctx)
class Keyword(Helper):
"""A key, value pair for keyword arguments where key is a string."""
fields = ('key', 'value')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return self.key, self.value.as_const(eval_ctx)
class CondExpr(Expr):
"""A conditional expression (inline if expression). (``{{
foo if bar else baz }}``)
"""
fields = ('test', 'expr1', 'expr2')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
if self.test.as_const(eval_ctx):
return self.expr1.as_const(eval_ctx)
# if we evaluate to an undefined object, we better do that at runtime
if self.expr2 is None:
raise Impossible()
return self.expr2.as_const(eval_ctx)
class Filter(Expr):
"""This node applies a filter on an expression. `name` is the name of
the filter, the rest of the fields are the same as for :class:`Call`.
If the `node` of a filter is `None` the contents of the last buffer are
filtered. Buffers are created by macros and filter blocks.
"""
fields = ('node', 'name', 'args', 'kwargs', 'dyn_args', 'dyn_kwargs')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
if eval_ctx.volatile or self.node is None:
raise Impossible()
# we have to be careful here because we call filter_ below.
# if this variable would be called filter, 2to3 would wrap the
# call in a list beause it is assuming we are talking about the
# builtin filter function here which no longer returns a list in
# python 3. because of that, do not rename filter_ to filter!
filter_ = self.environment.filters.get(self.name)
if filter_ is None or getattr(filter_, 'contextfilter', False):
raise Impossible()
obj = self.node.as_const(eval_ctx)
args = [x.as_const(eval_ctx) for x in self.args]
if getattr(filter_, 'evalcontextfilter', False):
args.insert(0, eval_ctx)
elif getattr(filter_, 'environmentfilter', False):
args.insert(0, self.environment)
kwargs = dict(x.as_const(eval_ctx) for x in self.kwargs)
if self.dyn_args is not None:
try:
args.extend(self.dyn_args.as_const(eval_ctx))
except Exception:
raise Impossible()
if self.dyn_kwargs is not None:
try:
kwargs.update(self.dyn_kwargs.as_const(eval_ctx))
except Exception:
raise Impossible()
try:
return filter_(obj, *args, **kwargs)
except Exception:
raise Impossible()
class Test(Expr):
"""Applies a test on an expression. `name` is the name of the test, the
rest of the fields are the same as for :class:`Call`.
"""
fields = ('node', 'name', 'args', 'kwargs', 'dyn_args', 'dyn_kwargs')
class Call(Expr):
"""Calls an expression. `args` is a list of arguments, `kwargs` a list
of keyword arguments (list of :class:`Keyword` nodes), and `dyn_args`
and `dyn_kwargs` has to be either `None` or a node that is used as
node for dynamic positional (``*args``) or keyword (``**kwargs``)
arguments.
"""
fields = ('node', 'args', 'kwargs', 'dyn_args', 'dyn_kwargs')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
if eval_ctx.volatile:
raise Impossible()
obj = self.node.as_const(eval_ctx)
# don't evaluate context functions
args = [x.as_const(eval_ctx) for x in self.args]
if isinstance(obj, _context_function_types):
if getattr(obj, 'contextfunction', False):
raise Impossible()
elif getattr(obj, 'evalcontextfunction', False):
args.insert(0, eval_ctx)
elif getattr(obj, 'environmentfunction', False):
args.insert(0, self.environment)
kwargs = dict(x.as_const(eval_ctx) for x in self.kwargs)
if self.dyn_args is not None:
try:
args.extend(self.dyn_args.as_const(eval_ctx))
except Exception:
raise Impossible()
if self.dyn_kwargs is not None:
try:
kwargs.update(self.dyn_kwargs.as_const(eval_ctx))
except Exception:
raise Impossible()
try:
return obj(*args, **kwargs)
except Exception:
raise Impossible()
class Getitem(Expr):
"""Get an attribute or item from an expression and prefer the item."""
fields = ('node', 'arg', 'ctx')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
if self.ctx != 'load':
raise Impossible()
try:
return self.environment.getitem(self.node.as_const(eval_ctx),
self.arg.as_const(eval_ctx))
except Exception:
raise Impossible()
def can_assign(self):
return False
class Getattr(Expr):
"""Get an attribute or item from an expression that is a ascii-only
bytestring and prefer the attribute.
"""
fields = ('node', 'attr', 'ctx')
def as_const(self, eval_ctx=None):
if self.ctx != 'load':
raise Impossible()
try:
eval_ctx = get_eval_context(self, eval_ctx)
return self.environment.getattr(self.node.as_const(eval_ctx),
self.attr)
except Exception:
raise Impossible()
def can_assign(self):
return False
class Slice(Expr):
"""Represents a slice object. This must only be used as argument for
:class:`Subscript`.
"""
fields = ('start', 'stop', 'step')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
def const(obj):
if obj is None:
return None
return obj.as_const(eval_ctx)
return slice(const(self.start), const(self.stop), const(self.step))
class Concat(Expr):
"""Concatenates the list of expressions provided after converting them to
unicode.
"""
fields = ('nodes',)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return ''.join(text_type(x.as_const(eval_ctx)) for x in self.nodes)
class Compare(Expr):
"""Compares an expression with some other expressions. `ops` must be a
list of :class:`Operand`\s.
"""
fields = ('expr', 'ops')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
result = value = self.expr.as_const(eval_ctx)
try:
for op in self.ops:
new_value = op.expr.as_const(eval_ctx)
result = _cmpop_to_func[op.op](value, new_value)
value = new_value
except Exception:
raise Impossible()
return result
class Operand(Helper):
"""Holds an operator and an expression."""
fields = ('op', 'expr')
if __debug__:
Operand.__doc__ += '\nThe following operators are available: ' + \
', '.join(sorted('``%s``' % x for x in set(_binop_to_func) |
set(_uaop_to_func) | set(_cmpop_to_func)))
class Mul(BinExpr):
"""Multiplies the left with the right node."""
operator = '*'
class Div(BinExpr):
"""Divides the left by the right node."""
operator = '/'
class FloorDiv(BinExpr):
"""Divides the left by the right node and truncates conver the
result into an integer by truncating.
"""
operator = '//'
class Add(BinExpr):
"""Add the left to the right node."""
operator = '+'
class Sub(BinExpr):
"""Substract the right from the left node."""
operator = '-'
class Mod(BinExpr):
"""Left modulo right."""
operator = '%'
class Pow(BinExpr):
"""Left to the power of right."""
operator = '**'
class And(BinExpr):
"""Short circuited AND."""
operator = 'and'
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return self.left.as_const(eval_ctx) and self.right.as_const(eval_ctx)
class Or(BinExpr):
"""Short circuited OR."""
operator = 'or'
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return self.left.as_const(eval_ctx) or self.right.as_const(eval_ctx)
class Not(UnaryExpr):
"""Negate the expression."""
operator = 'not'
class Neg(UnaryExpr):
"""Make the expression negative."""
operator = '-'
class Pos(UnaryExpr):
"""Make the expression positive (noop for most expressions)"""
operator = '+'
# Helpers for extensions
class EnvironmentAttribute(Expr):
"""Loads an attribute from the environment object. This is useful for
extensions that want to call a callback stored on the environment.
"""
fields = ('name',)
class ExtensionAttribute(Expr):
"""Returns the attribute of an extension bound to the environment.
The identifier is the identifier of the :class:`Extension`.
This node is usually constructed by calling the
:meth:`~jinja2.ext.Extension.attr` method on an extension.
"""
fields = ('identifier', 'name')
class ImportedName(Expr):
"""If created with an import name the import name is returned on node
access. For example ``ImportedName('cgi.escape')`` returns the `escape`
function from the cgi module on evaluation. Imports are optimized by the
compiler so there is no need to assign them to local variables.
"""
fields = ('importname',)
class InternalName(Expr):
"""An internal name in the compiler. You cannot create these nodes
yourself but the parser provides a
:meth:`~jinja2.parser.Parser.free_identifier` method that creates
a new identifier for you. This identifier is not available from the
template and is not threated specially by the compiler.
"""
fields = ('name',)
def __init__(self):
raise TypeError('Can\'t create internal names. Use the '
'`free_identifier` method on a parser.')
class MarkSafe(Expr):
"""Mark the wrapped expression as safe (wrap it as `Markup`)."""
fields = ('expr',)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return Markup(self.expr.as_const(eval_ctx))
class MarkSafeIfAutoescape(Expr):
"""Mark the wrapped expression as safe (wrap it as `Markup`) but
only if autoescaping is active.
.. versionadded:: 2.5
"""
fields = ('expr',)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
if eval_ctx.volatile:
raise Impossible()
expr = self.expr.as_const(eval_ctx)
if eval_ctx.autoescape:
return Markup(expr)
return expr
class ContextReference(Expr):
"""Returns the current template context. It can be used like a
:class:`Name` node, with a ``'load'`` ctx and will return the
current :class:`~jinja2.runtime.Context` object.
Here an example that assigns the current template name to a
variable named `foo`::
Assign(Name('foo', ctx='store'),
Getattr(ContextReference(), 'name'))
"""
class Continue(Stmt):
"""Continue a loop."""
class Break(Stmt):
"""Break a loop."""
class Scope(Stmt):
"""An artificial scope."""
fields = ('body',)
class EvalContextModifier(Stmt):
"""Modifies the eval context. For each option that should be modified,
a :class:`Keyword` has to be added to the :attr:`options` list.
Example to change the `autoescape` setting::
EvalContextModifier(options=[Keyword('autoescape', Const(True))])
"""
fields = ('options',)
class ScopedEvalContextModifier(EvalContextModifier):
"""Modifies the eval context and reverts it later. Works exactly like
:class:`EvalContextModifier` but will only modify the
:class:`~jinja2.nodes.EvalContext` for nodes in the :attr:`body`.
"""
fields = ('body',)
# make sure nobody creates custom nodes
def _failing_new(*args, **kwargs):
raise TypeError('can\'t create custom node types')
NodeType.__new__ = staticmethod(_failing_new); del _failing_new
|
programadorjc/django | refs/heads/master | tests/field_defaults/tests.py | 405 | from datetime import datetime
from django.test import TestCase
from django.utils import six
from .models import Article
class DefaultTests(TestCase):
def test_field_defaults(self):
a = Article()
now = datetime.now()
a.save()
self.assertIsInstance(a.id, six.integer_types)
self.assertEqual(a.headline, "Default headline")
self.assertLess((now - a.pub_date).seconds, 5)
|
badge/doll | refs/heads/master | setup.py | 1 | #!/usr/bin/env python
"""DoLL project"""
from setuptools import find_packages, setup
setup(name='doll',
version='0.3',
description="Database of Latin Lexicon.",
long_description="A database of the Latin lexicon, generated from the input files for Whitaker's Words.",
author="Matthew Badger",
url="https://github.com/badge/doll",
license="Apache",
entry_points={
'console_scripts': [
'doll = doll.__main__:main'
]},
packages=find_packages()
)
|
runjmc/maraschino | refs/heads/master | lib/flask/app.py | 14 | # -*- coding: utf-8 -*-
"""
flask.app
~~~~~~~~~
This module implements the central WSGI application object.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from __future__ import with_statement
import os
import sys
from threading import Lock
from datetime import timedelta
from itertools import chain
from functools import update_wrapper
from werkzeug.datastructures import ImmutableDict
from werkzeug.routing import Map, Rule, RequestRedirect
from werkzeug.exceptions import HTTPException, InternalServerError, \
MethodNotAllowed, BadRequest
from .helpers import _PackageBoundObject, url_for, get_flashed_messages, \
locked_cached_property, _tojson_filter, _endpoint_from_view_func, \
find_package
from .wrappers import Request, Response
from .config import ConfigAttribute, Config
from .ctx import RequestContext
from .globals import _request_ctx_stack, request
from .sessions import SecureCookieSessionInterface
from .module import blueprint_is_module
from .templating import DispatchingJinjaLoader, Environment, \
_default_template_ctx_processor
from .signals import request_started, request_finished, got_request_exception, \
request_tearing_down
# a lock used for logger initialization
_logger_lock = Lock()
def _make_timedelta(value):
if not isinstance(value, timedelta):
return timedelta(seconds=value)
return value
def setupmethod(f):
"""Wraps a method so that it performs a check in debug mode if the
first request was already handled.
"""
def wrapper_func(self, *args, **kwargs):
if self.debug and self._got_first_request:
raise AssertionError('A setup function was called after the '
'first request was handled. This usually indicates a bug '
'in the application where a module was not imported '
'and decorators or other functionality was called too late.\n'
'To fix this make sure to import all your view modules, '
'database models and everything related at a central place '
'before the application starts serving requests.')
return f(self, *args, **kwargs)
return update_wrapper(wrapper_func, f)
class Flask(_PackageBoundObject):
"""The flask object implements a WSGI application and acts as the central
object. It is passed the name of the module or package of the
application. Once it is created it will act as a central registry for
the view functions, the URL rules, template configuration and much more.
The name of the package is used to resolve resources from inside the
package or the folder the module is contained in depending on if the
package parameter resolves to an actual python package (a folder with
an `__init__.py` file inside) or a standard module (just a `.py` file).
For more information about resource loading, see :func:`open_resource`.
Usually you create a :class:`Flask` instance in your main module or
in the `__init__.py` file of your package like this::
from flask import Flask
app = Flask(__name__)
.. admonition:: About the First Parameter
The idea of the first parameter is to give Flask an idea what
belongs to your application. This name is used to find resources
on the file system, can be used by extensions to improve debugging
information and a lot more.
So it's important what you provide there. If you are using a single
module, `__name__` is always the correct value. If you however are
using a package, it's usually recommended to hardcode the name of
your package there.
For example if your application is defined in `yourapplication/app.py`
you should create it with one of the two versions below::
app = Flask('yourapplication')
app = Flask(__name__.split('.')[0])
Why is that? The application will work even with `__name__`, thanks
to how resources are looked up. However it will make debugging more
painful. Certain extensions can make assumptions based on the
import name of your application. For example the Flask-SQLAlchemy
extension will look for the code in your application that triggered
an SQL query in debug mode. If the import name is not properly set
up, that debugging information is lost. (For example it would only
pick up SQL queries in `yourapplication.app` and not
`yourapplication.views.frontend`)
.. versionadded:: 0.7
The `static_url_path`, `static_folder`, and `template_folder`
parameters were added.
.. versionadded:: 0.8
The `instance_path` and `instance_relative_config` parameters were
added.
:param import_name: the name of the application package
:param static_url_path: can be used to specify a different path for the
static files on the web. Defaults to the name
of the `static_folder` folder.
:param static_folder: the folder with static files that should be served
at `static_url_path`. Defaults to the ``'static'``
folder in the root path of the application.
:param template_folder: the folder that contains the templates that should
be used by the application. Defaults to
``'templates'`` folder in the root path of the
application.
:param instance_path: An alternative instance path for the application.
By default the folder ``'instance'`` next to the
package or module is assumed to be the instance
path.
:param instance_relative_config: if set to `True` relative filenames
for loading the config are assumed to
be relative to the instance path instead
of the application root.
"""
#: The class that is used for request objects. See :class:`~flask.Request`
#: for more information.
request_class = Request
#: The class that is used for response objects. See
#: :class:`~flask.Response` for more information.
response_class = Response
#: The debug flag. Set this to `True` to enable debugging of the
#: application. In debug mode the debugger will kick in when an unhandled
#: exception ocurrs and the integrated server will automatically reload
#: the application if changes in the code are detected.
#:
#: This attribute can also be configured from the config with the `DEBUG`
#: configuration key. Defaults to `False`.
debug = ConfigAttribute('DEBUG')
#: The testing flag. Set this to `True` to enable the test mode of
#: Flask extensions (and in the future probably also Flask itself).
#: For example this might activate unittest helpers that have an
#: additional runtime cost which should not be enabled by default.
#:
#: If this is enabled and PROPAGATE_EXCEPTIONS is not changed from the
#: default it's implicitly enabled.
#:
#: This attribute can also be configured from the config with the
#: `TESTING` configuration key. Defaults to `False`.
testing = ConfigAttribute('TESTING')
#: If a secret key is set, cryptographic components can use this to
#: sign cookies and other things. Set this to a complex random value
#: when you want to use the secure cookie for instance.
#:
#: This attribute can also be configured from the config with the
#: `SECRET_KEY` configuration key. Defaults to `None`.
secret_key = ConfigAttribute('SECRET_KEY')
#: The secure cookie uses this for the name of the session cookie.
#:
#: This attribute can also be configured from the config with the
#: `SESSION_COOKIE_NAME` configuration key. Defaults to ``'session'``
session_cookie_name = ConfigAttribute('SESSION_COOKIE_NAME')
#: A :class:`~datetime.timedelta` which is used to set the expiration
#: date of a permanent session. The default is 31 days which makes a
#: permanent session survive for roughly one month.
#:
#: This attribute can also be configured from the config with the
#: `PERMANENT_SESSION_LIFETIME` configuration key. Defaults to
#: ``timedelta(days=31)``
permanent_session_lifetime = ConfigAttribute('PERMANENT_SESSION_LIFETIME',
get_converter=_make_timedelta)
#: Enable this if you want to use the X-Sendfile feature. Keep in
#: mind that the server has to support this. This only affects files
#: sent with the :func:`send_file` method.
#:
#: .. versionadded:: 0.2
#:
#: This attribute can also be configured from the config with the
#: `USE_X_SENDFILE` configuration key. Defaults to `False`.
use_x_sendfile = ConfigAttribute('USE_X_SENDFILE')
#: The name of the logger to use. By default the logger name is the
#: package name passed to the constructor.
#:
#: .. versionadded:: 0.4
logger_name = ConfigAttribute('LOGGER_NAME')
#: Enable the deprecated module support? This is active by default
#: in 0.7 but will be changed to False in 0.8. With Flask 1.0 modules
#: will be removed in favor of Blueprints
enable_modules = True
#: The logging format used for the debug logger. This is only used when
#: the application is in debug mode, otherwise the attached logging
#: handler does the formatting.
#:
#: .. versionadded:: 0.3
debug_log_format = (
'-' * 80 + '\n' +
'%(levelname)s in %(module)s [%(pathname)s:%(lineno)d]:\n' +
'%(message)s\n' +
'-' * 80
)
#: Options that are passed directly to the Jinja2 environment.
jinja_options = ImmutableDict(
extensions=['jinja2.ext.autoescape', 'jinja2.ext.with_']
)
#: Default configuration parameters.
default_config = ImmutableDict({
'DEBUG': False,
'TESTING': False,
'PROPAGATE_EXCEPTIONS': None,
'PRESERVE_CONTEXT_ON_EXCEPTION': None,
'SECRET_KEY': None,
'PERMANENT_SESSION_LIFETIME': timedelta(days=31),
'USE_X_SENDFILE': False,
'LOGGER_NAME': None,
'SERVER_NAME': None,
'APPLICATION_ROOT': None,
'SESSION_COOKIE_NAME': 'session',
'SESSION_COOKIE_DOMAIN': None,
'SESSION_COOKIE_PATH': None,
'SESSION_COOKIE_HTTPONLY': True,
'SESSION_COOKIE_SECURE': False,
'MAX_CONTENT_LENGTH': None,
'TRAP_BAD_REQUEST_ERRORS': False,
'TRAP_HTTP_EXCEPTIONS': False
})
#: The rule object to use for URL rules created. This is used by
#: :meth:`add_url_rule`. Defaults to :class:`werkzeug.routing.Rule`.
#:
#: .. versionadded:: 0.7
url_rule_class = Rule
#: the test client that is used with when `test_client` is used.
#:
#: .. versionadded:: 0.7
test_client_class = None
#: the session interface to use. By default an instance of
#: :class:`~flask.sessions.SecureCookieSessionInterface` is used here.
#:
#: .. versionadded:: 0.8
session_interface = SecureCookieSessionInterface()
def __init__(self, import_name, static_path=None, static_url_path=None,
static_folder='static', template_folder='templates',
instance_path=None, instance_relative_config=False):
_PackageBoundObject.__init__(self, import_name,
template_folder=template_folder)
if static_path is not None:
from warnings import warn
warn(DeprecationWarning('static_path is now called '
'static_url_path'), stacklevel=2)
static_url_path = static_path
if static_url_path is not None:
self.static_url_path = static_url_path
if static_folder is not None:
self.static_folder = static_folder
if instance_path is None:
instance_path = self.auto_find_instance_path()
elif not os.path.isabs(instance_path):
raise ValueError('If an instance path is provided it must be '
'absolute. A relative path was given instead.')
#: Holds the path to the instance folder.
#:
#: .. versionadded:: 0.8
self.instance_path = instance_path
#: The configuration dictionary as :class:`Config`. This behaves
#: exactly like a regular dictionary but supports additional methods
#: to load a config from files.
self.config = self.make_config(instance_relative_config)
# Prepare the deferred setup of the logger.
self._logger = None
self.logger_name = self.import_name
#: A dictionary of all view functions registered. The keys will
#: be function names which are also used to generate URLs and
#: the values are the function objects themselves.
#: To register a view function, use the :meth:`route` decorator.
self.view_functions = {}
# support for the now deprecated `error_handlers` attribute. The
# :attr:`error_handler_spec` shall be used now.
self._error_handlers = {}
#: A dictionary of all registered error handlers. The key is `None`
#: for error handlers active on the application, otherwise the key is
#: the name of the blueprint. Each key points to another dictionary
#: where they key is the status code of the http exception. The
#: special key `None` points to a list of tuples where the first item
#: is the class for the instance check and the second the error handler
#: function.
#:
#: To register a error handler, use the :meth:`errorhandler`
#: decorator.
self.error_handler_spec = {None: self._error_handlers}
#: A dictionary with lists of functions that should be called at the
#: beginning of the request. The key of the dictionary is the name of
#: the blueprint this function is active for, `None` for all requests.
#: This can for example be used to open database connections or
#: getting hold of the currently logged in user. To register a
#: function here, use the :meth:`before_request` decorator.
self.before_request_funcs = {}
#: A lists of functions that should be called at the beginning of the
#: first request to this instance. To register a function here, use
#: the :meth:`before_first_request` decorator.
#:
#: .. versionadded:: 0.8
self.before_first_request_funcs = []
#: A dictionary with lists of functions that should be called after
#: each request. The key of the dictionary is the name of the blueprint
#: this function is active for, `None` for all requests. This can for
#: example be used to open database connections or getting hold of the
#: currently logged in user. To register a function here, use the
#: :meth:`after_request` decorator.
self.after_request_funcs = {}
#: A dictionary with lists of functions that are called after
#: each request, even if an exception has occurred. The key of the
#: dictionary is the name of the blueprint this function is active for,
#: `None` for all requests. These functions are not allowed to modify
#: the request, and their return values are ignored. If an exception
#: occurred while processing the request, it gets passed to each
#: teardown_request function. To register a function here, use the
#: :meth:`teardown_request` decorator.
#:
#: .. versionadded:: 0.7
self.teardown_request_funcs = {}
#: A dictionary with lists of functions that can be used as URL
#: value processor functions. Whenever a URL is built these functions
#: are called to modify the dictionary of values in place. The key
#: `None` here is used for application wide
#: callbacks, otherwise the key is the name of the blueprint.
#: Each of these functions has the chance to modify the dictionary
#:
#: .. versionadded:: 0.7
self.url_value_preprocessors = {}
#: A dictionary with lists of functions that can be used as URL value
#: preprocessors. The key `None` here is used for application wide
#: callbacks, otherwise the key is the name of the blueprint.
#: Each of these functions has the chance to modify the dictionary
#: of URL values before they are used as the keyword arguments of the
#: view function. For each function registered this one should also
#: provide a :meth:`url_defaults` function that adds the parameters
#: automatically again that were removed that way.
#:
#: .. versionadded:: 0.7
self.url_default_functions = {}
#: A dictionary with list of functions that are called without argument
#: to populate the template context. The key of the dictionary is the
#: name of the blueprint this function is active for, `None` for all
#: requests. Each returns a dictionary that the template context is
#: updated with. To register a function here, use the
#: :meth:`context_processor` decorator.
self.template_context_processors = {
None: [_default_template_ctx_processor]
}
#: all the attached blueprints in a directory by name. Blueprints
#: can be attached multiple times so this dictionary does not tell
#: you how often they got attached.
#:
#: .. versionadded:: 0.7
self.blueprints = {}
#: a place where extensions can store application specific state. For
#: example this is where an extension could store database engines and
#: similar things. For backwards compatibility extensions should register
#: themselves like this::
#:
#: if not hasattr(app, 'extensions'):
#: app.extensions = {}
#: app.extensions['extensionname'] = SomeObject()
#:
#: The key must match the name of the `flaskext` module. For example in
#: case of a "Flask-Foo" extension in `flaskext.foo`, the key would be
#: ``'foo'``.
#:
#: .. versionadded:: 0.7
self.extensions = {}
#: The :class:`~werkzeug.routing.Map` for this instance. You can use
#: this to change the routing converters after the class was created
#: but before any routes are connected. Example::
#:
#: from werkzeug.routing import BaseConverter
#:
#: class ListConverter(BaseConverter):
#: def to_python(self, value):
#: return value.split(',')
#: def to_url(self, values):
#: return ','.join(BaseConverter.to_url(value)
#: for value in values)
#:
#: app = Flask(__name__)
#: app.url_map.converters['list'] = ListConverter
self.url_map = Map()
# tracks internally if the application already handled at least one
# request.
self._got_first_request = False
self._before_request_lock = Lock()
# register the static folder for the application. Do that even
# if the folder does not exist. First of all it might be created
# while the server is running (usually happens during development)
# but also because google appengine stores static files somewhere
# else when mapped with the .yml file.
if self.has_static_folder:
self.add_url_rule(self.static_url_path + '/<path:filename>',
endpoint='static',
view_func=self.send_static_file)
def _get_error_handlers(self):
from warnings import warn
warn(DeprecationWarning('error_handlers is deprecated, use the '
'new error_handler_spec attribute instead.'), stacklevel=1)
return self._error_handlers
def _set_error_handlers(self, value):
self._error_handlers = value
self.error_handler_spec[None] = value
error_handlers = property(_get_error_handlers, _set_error_handlers)
del _get_error_handlers, _set_error_handlers
@locked_cached_property
def name(self):
"""The name of the application. This is usually the import name
with the difference that it's guessed from the run file if the
import name is main. This name is used as a display name when
Flask needs the name of the application. It can be set and overriden
to change the value.
.. versionadded:: 0.8
"""
if self.import_name == '__main__':
fn = getattr(sys.modules['__main__'], '__file__', None)
if fn is None:
return '__main__'
return os.path.splitext(os.path.basename(fn))[0]
return self.import_name
@property
def propagate_exceptions(self):
"""Returns the value of the `PROPAGATE_EXCEPTIONS` configuration
value in case it's set, otherwise a sensible default is returned.
.. versionadded:: 0.7
"""
rv = self.config['PROPAGATE_EXCEPTIONS']
if rv is not None:
return rv
return self.testing or self.debug
@property
def preserve_context_on_exception(self):
"""Returns the value of the `PRESERVE_CONTEXT_ON_EXCEPTION`
configuration value in case it's set, otherwise a sensible default
is returned.
.. versionadded:: 0.7
"""
rv = self.config['PRESERVE_CONTEXT_ON_EXCEPTION']
if rv is not None:
return rv
return self.debug
@property
def logger(self):
"""A :class:`logging.Logger` object for this application. The
default configuration is to log to stderr if the application is
in debug mode. This logger can be used to (surprise) log messages.
Here some examples::
app.logger.debug('A value for debugging')
app.logger.warning('A warning ocurred (%d apples)', 42)
app.logger.error('An error occoured')
.. versionadded:: 0.3
"""
if self._logger and self._logger.name == self.logger_name:
return self._logger
with _logger_lock:
if self._logger and self._logger.name == self.logger_name:
return self._logger
from flask.logging import create_logger
self._logger = rv = create_logger(self)
return rv
@locked_cached_property
def jinja_env(self):
"""The Jinja2 environment used to load templates."""
rv = self.create_jinja_environment()
# Hack to support the init_jinja_globals method which is supported
# until 1.0 but has an API deficiency.
if getattr(self.init_jinja_globals, 'im_func', None) is not \
Flask.init_jinja_globals.im_func:
from warnings import warn
warn(DeprecationWarning('This flask class uses a customized '
'init_jinja_globals() method which is deprecated. '
'Move the code from that method into the '
'create_jinja_environment() method instead.'))
self.__dict__['jinja_env'] = rv
self.init_jinja_globals()
return rv
@property
def got_first_request(self):
"""This attribute is set to `True` if the application started
handling the first request.
.. versionadded:: 0.8
"""
return self._got_first_request
def make_config(self, instance_relative=False):
"""Used to create the config attribute by the Flask constructor.
The `instance_relative` parameter is passed in from the constructor
of Flask (there named `instance_relative_config`) and indicates if
the config should be relative to the instance path or the root path
of the application.
.. versionadded:: 0.8
"""
root_path = self.root_path
if instance_relative:
root_path = self.instance_path
return Config(root_path, self.default_config)
def auto_find_instance_path(self):
"""Tries to locate the instance path if it was not provided to the
constructor of the application class. It will basically calculate
the path to a folder named ``instance`` next to your main file or
the package.
.. versionadded:: 0.8
"""
prefix, package_path = find_package(self.import_name)
if prefix is None:
return os.path.join(package_path, 'instance')
return os.path.join(prefix, 'var', self.name + '-instance')
def open_instance_resource(self, resource, mode='rb'):
"""Opens a resource from the application's instance folder
(:attr:`instance_path`). Otherwise works like
:meth:`open_resource`. Instance resources can also be opened for
writing.
:param resource: the name of the resource. To access resources within
subfolders use forward slashes as separator.
"""
return open(os.path.join(self.instance_path, resource), mode)
def create_jinja_environment(self):
"""Creates the Jinja2 environment based on :attr:`jinja_options`
and :meth:`select_jinja_autoescape`. Since 0.7 this also adds
the Jinja2 globals and filters after initialization. Override
this function to customize the behavior.
.. versionadded:: 0.5
"""
options = dict(self.jinja_options)
if 'autoescape' not in options:
options['autoescape'] = self.select_jinja_autoescape
rv = Environment(self, **options)
rv.globals.update(
url_for=url_for,
get_flashed_messages=get_flashed_messages
)
rv.filters['tojson'] = _tojson_filter
return rv
def create_global_jinja_loader(self):
"""Creates the loader for the Jinja2 environment. Can be used to
override just the loader and keeping the rest unchanged. It's
discouraged to override this function. Instead one should override
the :meth:`jinja_loader` function instead.
The global loader dispatches between the loaders of the application
and the individual blueprints.
.. versionadded:: 0.7
"""
return DispatchingJinjaLoader(self)
def init_jinja_globals(self):
"""Deprecated. Used to initialize the Jinja2 globals.
.. versionadded:: 0.5
.. versionchanged:: 0.7
This method is deprecated with 0.7. Override
:meth:`create_jinja_environment` instead.
"""
def select_jinja_autoescape(self, filename):
"""Returns `True` if autoescaping should be active for the given
template name.
.. versionadded:: 0.5
"""
if filename is None:
return False
return filename.endswith(('.html', '.htm', '.xml', '.xhtml'))
def update_template_context(self, context):
"""Update the template context with some commonly used variables.
This injects request, session, config and g into the template
context as well as everything template context processors want
to inject. Note that the as of Flask 0.6, the original values
in the context will not be overriden if a context processor
decides to return a value with the same key.
:param context: the context as a dictionary that is updated in place
to add extra variables.
"""
funcs = self.template_context_processors[None]
bp = _request_ctx_stack.top.request.blueprint
if bp is not None and bp in self.template_context_processors:
funcs = chain(funcs, self.template_context_processors[bp])
orig_ctx = context.copy()
for func in funcs:
context.update(func())
# make sure the original values win. This makes it possible to
# easier add new variables in context processors without breaking
# existing views.
context.update(orig_ctx)
def run(self, host='127.0.0.1', port=5000, debug=None, **options):
"""Runs the application on a local development server. If the
:attr:`debug` flag is set the server will automatically reload
for code changes and show a debugger in case an exception happened.
If you want to run the application in debug mode, but disable the
code execution on the interactive debugger, you can pass
``use_evalex=False`` as parameter. This will keep the debugger's
traceback screen active, but disable code execution.
.. admonition:: Keep in Mind
Flask will suppress any server error with a generic error page
unless it is in debug mode. As such to enable just the
interactive debugger without the code reloading, you have to
invoke :meth:`run` with ``debug=True`` and ``use_reloader=False``.
Setting ``use_debugger`` to `True` without being in debug mode
won't catch any exceptions because there won't be any to
catch.
:param host: the hostname to listen on. set this to ``'0.0.0.0'``
to have the server available externally as well.
:param port: the port of the webserver
:param debug: if given, enable or disable debug mode.
See :attr:`debug`.
:param options: the options to be forwarded to the underlying
Werkzeug server. See
:func:`werkzeug.serving.run_simple` for more
information.
"""
from werkzeug.serving import run_simple
if debug is not None:
self.debug = bool(debug)
options.setdefault('use_reloader', self.debug)
options.setdefault('use_debugger', self.debug)
try:
run_simple(host, port, self, **options)
finally:
# reset the first request information if the development server
# resetted normally. This makes it possible to restart the server
# without reloader and that stuff from an interactive shell.
self._got_first_request = False
def test_client(self, use_cookies=True):
"""Creates a test client for this application. For information
about unit testing head over to :ref:`testing`.
The test client can be used in a `with` block to defer the closing down
of the context until the end of the `with` block. This is useful if
you want to access the context locals for testing::
with app.test_client() as c:
rv = c.get('/?vodka=42')
assert request.args['vodka'] == '42'
See :class:`~flask.testing.FlaskClient` for more information.
.. versionchanged:: 0.4
added support for `with` block usage for the client.
.. versionadded:: 0.7
The `use_cookies` parameter was added as well as the ability
to override the client to be used by setting the
:attr:`test_client_class` attribute.
"""
cls = self.test_client_class
if cls is None:
from flask.testing import FlaskClient as cls
return cls(self, self.response_class, use_cookies=use_cookies)
def open_session(self, request):
"""Creates or opens a new session. Default implementation stores all
session data in a signed cookie. This requires that the
:attr:`secret_key` is set. Instead of overriding this method
we recommend replacing the :class:`session_interface`.
:param request: an instance of :attr:`request_class`.
"""
return self.session_interface.open_session(self, request)
def save_session(self, session, response):
"""Saves the session if it needs updates. For the default
implementation, check :meth:`open_session`. Instead of overriding this
method we recommend replacing the :class:`session_interface`.
:param session: the session to be saved (a
:class:`~werkzeug.contrib.securecookie.SecureCookie`
object)
:param response: an instance of :attr:`response_class`
"""
return self.session_interface.save_session(self, session, response)
def make_null_session(self):
"""Creates a new instance of a missing session. Instead of overriding
this method we recommend replacing the :class:`session_interface`.
.. versionadded:: 0.7
"""
return self.session_interface.make_null_session(self)
def register_module(self, module, **options):
"""Registers a module with this application. The keyword argument
of this function are the same as the ones for the constructor of the
:class:`Module` class and will override the values of the module if
provided.
.. versionchanged:: 0.7
The module system was deprecated in favor for the blueprint
system.
"""
assert blueprint_is_module(module), 'register_module requires ' \
'actual module objects. Please upgrade to blueprints though.'
if not self.enable_modules:
raise RuntimeError('Module support was disabled but code '
'attempted to register a module named %r' % module)
else:
from warnings import warn
warn(DeprecationWarning('Modules are deprecated. Upgrade to '
'using blueprints. Have a look into the documentation for '
'more information. If this module was registered by a '
'Flask-Extension upgrade the extension or contact the author '
'of that extension instead. (Registered %r)' % module),
stacklevel=2)
self.register_blueprint(module, **options)
@setupmethod
def register_blueprint(self, blueprint, **options):
"""Registers a blueprint on the application.
.. versionadded:: 0.7
"""
first_registration = False
if blueprint.name in self.blueprints:
assert self.blueprints[blueprint.name] is blueprint, \
'A blueprint\'s name collision ocurred between %r and ' \
'%r. Both share the same name "%s". Blueprints that ' \
'are created on the fly need unique names.' % \
(blueprint, self.blueprints[blueprint.name], blueprint.name)
else:
self.blueprints[blueprint.name] = blueprint
first_registration = True
blueprint.register(self, options, first_registration)
@setupmethod
def add_url_rule(self, rule, endpoint=None, view_func=None, **options):
"""Connects a URL rule. Works exactly like the :meth:`route`
decorator. If a view_func is provided it will be registered with the
endpoint.
Basically this example::
@app.route('/')
def index():
pass
Is equivalent to the following::
def index():
pass
app.add_url_rule('/', 'index', index)
If the view_func is not provided you will need to connect the endpoint
to a view function like so::
app.view_functions['index'] = index
Internally :meth:`route` invokes :meth:`add_url_rule` so if you want
to customize the behavior via subclassing you only need to change
this method.
For more information refer to :ref:`url-route-registrations`.
.. versionchanged:: 0.2
`view_func` parameter added.
.. versionchanged:: 0.6
`OPTIONS` is added automatically as method.
:param rule: the URL rule as string
:param endpoint: the endpoint for the registered URL rule. Flask
itself assumes the name of the view function as
endpoint
:param view_func: the function to call when serving a request to the
provided endpoint
:param options: the options to be forwarded to the underlying
:class:`~werkzeug.routing.Rule` object. A change
to Werkzeug is handling of method options. methods
is a list of methods this rule should be limited
to (`GET`, `POST` etc.). By default a rule
just listens for `GET` (and implicitly `HEAD`).
Starting with Flask 0.6, `OPTIONS` is implicitly
added and handled by the standard request handling.
"""
if endpoint is None:
endpoint = _endpoint_from_view_func(view_func)
options['endpoint'] = endpoint
methods = options.pop('methods', None)
# if the methods are not given and the view_func object knows its
# methods we can use that instead. If neither exists, we go with
# a tuple of only `GET` as default.
if methods is None:
methods = getattr(view_func, 'methods', None) or ('GET',)
# starting with Flask 0.8 the view_func object can disable and
# force-enable the automatic options handling.
provide_automatic_options = getattr(view_func,
'provide_automatic_options', None)
if provide_automatic_options is None:
if 'OPTIONS' not in methods:
methods = tuple(methods) + ('OPTIONS',)
provide_automatic_options = True
else:
provide_automatic_options = False
# due to a werkzeug bug we need to make sure that the defaults are
# None if they are an empty dictionary. This should not be necessary
# with Werkzeug 0.7
options['defaults'] = options.get('defaults') or None
rule = self.url_rule_class(rule, methods=methods, **options)
rule.provide_automatic_options = provide_automatic_options
self.url_map.add(rule)
if view_func is not None:
self.view_functions[endpoint] = view_func
def route(self, rule, **options):
"""A decorator that is used to register a view function for a
given URL rule. This does the same thing as :meth:`add_url_rule`
but is intended for decorator usage::
@app.route('/')
def index():
return 'Hello World'
For more information refer to :ref:`url-route-registrations`.
:param rule: the URL rule as string
:param endpoint: the endpoint for the registered URL rule. Flask
itself assumes the name of the view function as
endpoint
:param view_func: the function to call when serving a request to the
provided endpoint
:param options: the options to be forwarded to the underlying
:class:`~werkzeug.routing.Rule` object. A change
to Werkzeug is handling of method options. methods
is a list of methods this rule should be limited
to (`GET`, `POST` etc.). By default a rule
just listens for `GET` (and implicitly `HEAD`).
Starting with Flask 0.6, `OPTIONS` is implicitly
added and handled by the standard request handling.
"""
def decorator(f):
endpoint = options.pop('endpoint', None)
self.add_url_rule(rule, endpoint, f, **options)
return f
return decorator
@setupmethod
def endpoint(self, endpoint):
"""A decorator to register a function as an endpoint.
Example::
@app.endpoint('example.endpoint')
def example():
return "example"
:param endpoint: the name of the endpoint
"""
def decorator(f):
self.view_functions[endpoint] = f
return f
return decorator
@setupmethod
def errorhandler(self, code_or_exception):
"""A decorator that is used to register a function give a given
error code. Example::
@app.errorhandler(404)
def page_not_found(error):
return 'This page does not exist', 404
You can also register handlers for arbitrary exceptions::
@app.errorhandler(DatabaseError)
def special_exception_handler(error):
return 'Database connection failed', 500
You can also register a function as error handler without using
the :meth:`errorhandler` decorator. The following example is
equivalent to the one above::
def page_not_found(error):
return 'This page does not exist', 404
app.error_handler_spec[None][404] = page_not_found
Setting error handlers via assignments to :attr:`error_handler_spec`
however is discouraged as it requires fidling with nested dictionaries
and the special case for arbitrary exception types.
The first `None` refers to the active blueprint. If the error
handler should be application wide `None` shall be used.
.. versionadded:: 0.7
One can now additionally also register custom exception types
that do not necessarily have to be a subclass of the
:class:`~werkzeug.exceptions.HTTPException` class.
:param code: the code as integer for the handler
"""
def decorator(f):
self._register_error_handler(None, code_or_exception, f)
return f
return decorator
def register_error_handler(self, code_or_exception, f):
"""Alternative error attach function to the :meth:`errorhandler`
decorator that is more straightforward to use for non decorator
usage.
.. versionadded:: 0.7
"""
self._register_error_handler(None, code_or_exception, f)
@setupmethod
def _register_error_handler(self, key, code_or_exception, f):
if isinstance(code_or_exception, HTTPException):
code_or_exception = code_or_exception.code
if isinstance(code_or_exception, (int, long)):
assert code_or_exception != 500 or key is None, \
'It is currently not possible to register a 500 internal ' \
'server error on a per-blueprint level.'
self.error_handler_spec.setdefault(key, {})[code_or_exception] = f
else:
self.error_handler_spec.setdefault(key, {}).setdefault(None, []) \
.append((code_or_exception, f))
@setupmethod
def template_filter(self, name=None):
"""A decorator that is used to register custom template filter.
You can specify a name for the filter, otherwise the function
name will be used. Example::
@app.template_filter()
def reverse(s):
return s[::-1]
:param name: the optional name of the filter, otherwise the
function name will be used.
"""
def decorator(f):
self.jinja_env.filters[name or f.__name__] = f
return f
return decorator
@setupmethod
def before_request(self, f):
"""Registers a function to run before each request."""
self.before_request_funcs.setdefault(None, []).append(f)
return f
@setupmethod
def before_first_request(self, f):
"""Registers a function to be run before the first request to this
instance of the application.
.. versionadded:: 0.8
"""
self.before_first_request_funcs.append(f)
@setupmethod
def after_request(self, f):
"""Register a function to be run after each request. Your function
must take one parameter, a :attr:`response_class` object and return
a new response object or the same (see :meth:`process_response`).
As of Flask 0.7 this function might not be executed at the end of the
request in case an unhandled exception ocurred.
"""
self.after_request_funcs.setdefault(None, []).append(f)
return f
@setupmethod
def teardown_request(self, f):
"""Register a function to be run at the end of each request,
regardless of whether there was an exception or not. These functions
are executed when the request context is popped, even if not an
actual request was performed.
Example::
ctx = app.test_request_context()
ctx.push()
...
ctx.pop()
When ``ctx.pop()`` is executed in the above example, the teardown
functions are called just before the request context moves from the
stack of active contexts. This becomes relevant if you are using
such constructs in tests.
Generally teardown functions must take every necesary step to avoid
that they will fail. If they do execute code that might fail they
will have to surround the execution of these code by try/except
statements and log ocurring errors.
"""
self.teardown_request_funcs.setdefault(None, []).append(f)
return f
@setupmethod
def context_processor(self, f):
"""Registers a template context processor function."""
self.template_context_processors[None].append(f)
return f
@setupmethod
def url_value_preprocessor(self, f):
"""Registers a function as URL value preprocessor for all view
functions of the application. It's called before the view functions
are called and can modify the url values provided.
"""
self.url_value_preprocessors.setdefault(None, []).append(f)
return f
@setupmethod
def url_defaults(self, f):
"""Callback function for URL defaults for all view functions of the
application. It's called with the endpoint and values and should
update the values passed in place.
"""
self.url_default_functions.setdefault(None, []).append(f)
return f
def handle_http_exception(self, e):
"""Handles an HTTP exception. By default this will invoke the
registered error handlers and fall back to returning the
exception as response.
.. versionadded: 0.3
"""
handlers = self.error_handler_spec.get(request.blueprint)
if handlers and e.code in handlers:
handler = handlers[e.code]
else:
handler = self.error_handler_spec[None].get(e.code)
if handler is None:
return e
return handler(e)
def trap_http_exception(self, e):
"""Checks if an HTTP exception should be trapped or not. By default
this will return `False` for all exceptions except for a bad request
key error if ``TRAP_BAD_REQUEST_ERRORS`` is set to `True`. It
also returns `True` if ``TRAP_HTTP_EXCEPTIONS`` is set to `True`.
This is called for all HTTP exceptions raised by a view function.
If it returns `True` for any exception the error handler for this
exception is not called and it shows up as regular exception in the
traceback. This is helpful for debugging implicitly raised HTTP
exceptions.
.. versionadded:: 0.8
"""
if self.config['TRAP_HTTP_EXCEPTIONS']:
return True
if self.config['TRAP_BAD_REQUEST_ERRORS']:
return isinstance(e, BadRequest)
return False
def handle_user_exception(self, e):
"""This method is called whenever an exception occurs that should be
handled. A special case are
:class:`~werkzeug.exception.HTTPException`\s which are forwarded by
this function to the :meth:`handle_http_exception` method. This
function will either return a response value or reraise the
exception with the same traceback.
.. versionadded:: 0.7
"""
exc_type, exc_value, tb = sys.exc_info()
assert exc_value is e
# ensure not to trash sys.exc_info() at that point in case someone
# wants the traceback preserved in handle_http_exception. Of course
# we cannot prevent users from trashing it themselves in a custom
# trap_http_exception method so that's their fault then.
if isinstance(e, HTTPException) and not self.trap_http_exception(e):
return self.handle_http_exception(e)
blueprint_handlers = ()
handlers = self.error_handler_spec.get(request.blueprint)
if handlers is not None:
blueprint_handlers = handlers.get(None, ())
app_handlers = self.error_handler_spec[None].get(None, ())
for typecheck, handler in chain(blueprint_handlers, app_handlers):
if isinstance(e, typecheck):
return handler(e)
raise exc_type, exc_value, tb
def handle_exception(self, e):
"""Default exception handling that kicks in when an exception
occours that is not caught. In debug mode the exception will
be re-raised immediately, otherwise it is logged and the handler
for a 500 internal server error is used. If no such handler
exists, a default 500 internal server error message is displayed.
.. versionadded: 0.3
"""
exc_type, exc_value, tb = sys.exc_info()
got_request_exception.send(self, exception=e)
handler = self.error_handler_spec[None].get(500)
if self.propagate_exceptions:
# if we want to repropagate the exception, we can attempt to
# raise it with the whole traceback in case we can do that
# (the function was actually called from the except part)
# otherwise, we just raise the error again
if exc_value is e:
raise exc_type, exc_value, tb
else:
raise e
self.log_exception((exc_type, exc_value, tb))
if handler is None:
return InternalServerError()
return handler(e)
def log_exception(self, exc_info):
"""Logs an exception. This is called by :meth:`handle_exception`
if debugging is disabled and right before the handler is called.
The default implementation logs the exception as error on the
:attr:`logger`.
.. versionadded:: 0.8
"""
self.logger.error('Exception on %s [%s]' % (
request.path,
request.method
), exc_info=exc_info)
def raise_routing_exception(self, request):
"""Exceptions that are recording during routing are reraised with
this method. During debug we are not reraising redirect requests
for non ``GET``, ``HEAD``, or ``OPTIONS`` requests and we're raising
a different error instead to help debug situations.
:internal:
"""
if not self.debug \
or not isinstance(request.routing_exception, RequestRedirect) \
or request.method in ('GET', 'HEAD', 'OPTIONS'):
raise request.routing_exception
from .debughelpers import FormDataRoutingRedirect
raise FormDataRoutingRedirect(request)
def dispatch_request(self):
"""Does the request dispatching. Matches the URL and returns the
return value of the view or error handler. This does not have to
be a response object. In order to convert the return value to a
proper response object, call :func:`make_response`.
.. versionchanged:: 0.7
This no longer does the exception handling, this code was
moved to the new :meth:`full_dispatch_request`.
"""
req = _request_ctx_stack.top.request
if req.routing_exception is not None:
self.raise_routing_exception(req)
rule = req.url_rule
# if we provide automatic options for this URL and the
# request came with the OPTIONS method, reply automatically
if getattr(rule, 'provide_automatic_options', False) \
and req.method == 'OPTIONS':
return self.make_default_options_response()
# otherwise dispatch to the handler for that endpoint
return self.view_functions[rule.endpoint](**req.view_args)
def full_dispatch_request(self):
"""Dispatches the request and on top of that performs request
pre and postprocessing as well as HTTP exception catching and
error handling.
.. versionadded:: 0.7
"""
self.try_trigger_before_first_request_functions()
try:
request_started.send(self)
rv = self.preprocess_request()
if rv is None:
rv = self.dispatch_request()
except Exception, e:
rv = self.handle_user_exception(e)
response = self.make_response(rv)
response = self.process_response(response)
request_finished.send(self, response=response)
return response
def try_trigger_before_first_request_functions(self):
"""Called before each request and will ensure that it triggers
the :attr:`before_first_request_funcs` and only exactly once per
application instance (which means process usually).
:internal:
"""
if self._got_first_request:
return
with self._before_request_lock:
if self._got_first_request:
return
self._got_first_request = True
for func in self.before_first_request_funcs:
func()
def make_default_options_response(self):
"""This method is called to create the default `OPTIONS` response.
This can be changed through subclassing to change the default
behaviour of `OPTIONS` responses.
.. versionadded:: 0.7
"""
adapter = _request_ctx_stack.top.url_adapter
if hasattr(adapter, 'allowed_methods'):
methods = adapter.allowed_methods()
else:
# fallback for Werkzeug < 0.7
methods = []
try:
adapter.match(method='--')
except MethodNotAllowed, e:
methods = e.valid_methods
except HTTPException, e:
pass
rv = self.response_class()
rv.allow.update(methods)
return rv
def make_response(self, rv):
"""Converts the return value from a view function to a real
response object that is an instance of :attr:`response_class`.
The following types are allowed for `rv`:
.. tabularcolumns:: |p{3.5cm}|p{9.5cm}|
======================= ===========================================
:attr:`response_class` the object is returned unchanged
:class:`str` a response object is created with the
string as body
:class:`unicode` a response object is created with the
string encoded to utf-8 as body
:class:`tuple` the response object is created with the
contents of the tuple as arguments
a WSGI function the function is called as WSGI application
and buffered as response object
======================= ===========================================
:param rv: the return value from the view function
"""
if rv is None:
raise ValueError('View function did not return a response')
if isinstance(rv, self.response_class):
return rv
if isinstance(rv, basestring):
return self.response_class(rv)
if isinstance(rv, tuple):
return self.response_class(*rv)
return self.response_class.force_type(rv, request.environ)
def create_url_adapter(self, request):
"""Creates a URL adapter for the given request. The URL adapter
is created at a point where the request context is not yet set up
so the request is passed explicitly.
.. versionadded:: 0.6
"""
return self.url_map.bind_to_environ(request.environ,
server_name=self.config['SERVER_NAME'])
def inject_url_defaults(self, endpoint, values):
"""Injects the URL defaults for the given endpoint directly into
the values dictionary passed. This is used internally and
automatically called on URL building.
.. versionadded:: 0.7
"""
funcs = self.url_default_functions.get(None, ())
if '.' in endpoint:
bp = endpoint.split('.', 1)[0]
funcs = chain(funcs, self.url_default_functions.get(bp, ()))
for func in funcs:
func(endpoint, values)
def preprocess_request(self):
"""Called before the actual request dispatching and will
call every as :meth:`before_request` decorated function.
If any of these function returns a value it's handled as
if it was the return value from the view and further
request handling is stopped.
This also triggers the :meth:`url_value_processor` functions before
the actualy :meth:`before_request` functions are called.
"""
bp = _request_ctx_stack.top.request.blueprint
funcs = self.url_value_preprocessors.get(None, ())
if bp is not None and bp in self.url_value_preprocessors:
funcs = chain(funcs, self.url_value_preprocessors[bp])
for func in funcs:
func(request.endpoint, request.view_args)
funcs = self.before_request_funcs.get(None, ())
if bp is not None and bp in self.before_request_funcs:
funcs = chain(funcs, self.before_request_funcs[bp])
for func in funcs:
rv = func()
if rv is not None:
return rv
def process_response(self, response):
"""Can be overridden in order to modify the response object
before it's sent to the WSGI server. By default this will
call all the :meth:`after_request` decorated functions.
.. versionchanged:: 0.5
As of Flask 0.5 the functions registered for after request
execution are called in reverse order of registration.
:param response: a :attr:`response_class` object.
:return: a new response object or the same, has to be an
instance of :attr:`response_class`.
"""
ctx = _request_ctx_stack.top
bp = ctx.request.blueprint
if not self.session_interface.is_null_session(ctx.session):
self.save_session(ctx.session, response)
funcs = ()
if bp is not None and bp in self.after_request_funcs:
funcs = reversed(self.after_request_funcs[bp])
if None in self.after_request_funcs:
funcs = chain(funcs, reversed(self.after_request_funcs[None]))
for handler in funcs:
response = handler(response)
return response
def do_teardown_request(self):
"""Called after the actual request dispatching and will
call every as :meth:`teardown_request` decorated function. This is
not actually called by the :class:`Flask` object itself but is always
triggered when the request context is popped. That way we have a
tighter control over certain resources under testing environments.
"""
funcs = reversed(self.teardown_request_funcs.get(None, ()))
bp = _request_ctx_stack.top.request.blueprint
if bp is not None and bp in self.teardown_request_funcs:
funcs = chain(funcs, reversed(self.teardown_request_funcs[bp]))
exc = sys.exc_info()[1]
for func in funcs:
rv = func(exc)
if rv is not None:
return rv
request_tearing_down.send(self)
def request_context(self, environ):
"""Creates a :class:`~flask.ctx.RequestContext` from the given
environment and binds it to the current context. This must be used in
combination with the `with` statement because the request is only bound
to the current context for the duration of the `with` block.
Example usage::
with app.request_context(environ):
do_something_with(request)
The object returned can also be used without the `with` statement
which is useful for working in the shell. The example above is
doing exactly the same as this code::
ctx = app.request_context(environ)
ctx.push()
try:
do_something_with(request)
finally:
ctx.pop()
.. versionchanged:: 0.3
Added support for non-with statement usage and `with` statement
is now passed the ctx object.
:param environ: a WSGI environment
"""
return RequestContext(self, environ)
def test_request_context(self, *args, **kwargs):
"""Creates a WSGI environment from the given values (see
:func:`werkzeug.test.EnvironBuilder` for more information, this
function accepts the same arguments).
"""
from flask.testing import make_test_environ_builder
builder = make_test_environ_builder(self, *args, **kwargs)
try:
return self.request_context(builder.get_environ())
finally:
builder.close()
def wsgi_app(self, environ, start_response):
"""The actual WSGI application. This is not implemented in
`__call__` so that middlewares can be applied without losing a
reference to the class. So instead of doing this::
app = MyMiddleware(app)
It's a better idea to do this instead::
app.wsgi_app = MyMiddleware(app.wsgi_app)
Then you still have the original application object around and
can continue to call methods on it.
.. versionchanged:: 0.7
The behavior of the before and after request callbacks was changed
under error conditions and a new callback was added that will
always execute at the end of the request, independent on if an
error ocurred or not. See :ref:`callbacks-and-errors`.
:param environ: a WSGI environment
:param start_response: a callable accepting a status code,
a list of headers and an optional
exception context to start the response
"""
with self.request_context(environ):
try:
response = self.full_dispatch_request()
except Exception, e:
response = self.make_response(self.handle_exception(e))
return response(environ, start_response)
@property
def modules(self):
from warnings import warn
warn(DeprecationWarning('Flask.modules is deprecated, use '
'Flask.blueprints instead'), stacklevel=2)
return self.blueprints
def __call__(self, environ, start_response):
"""Shortcut for :attr:`wsgi_app`."""
return self.wsgi_app(environ, start_response)
|
taedla01/MissionPlanner | refs/heads/master | Lib/site-packages/numpy/core/info.py | 103 | __doc__ = """Defines a multi-dimensional array and useful procedures for Numerical computation.
Functions
- array - NumPy Array construction
- zeros - Return an array of all zeros
- empty - Return an unitialized array
- shape - Return shape of sequence or array
- rank - Return number of dimensions
- size - Return number of elements in entire array or a
certain dimension
- fromstring - Construct array from (byte) string
- take - Select sub-arrays using sequence of indices
- put - Set sub-arrays using sequence of 1-D indices
- putmask - Set portion of arrays using a mask
- reshape - Return array with new shape
- repeat - Repeat elements of array
- choose - Construct new array from indexed array tuple
- correlate - Correlate two 1-d arrays
- searchsorted - Search for element in 1-d array
- sum - Total sum over a specified dimension
- average - Average, possibly weighted, over axis or array.
- cumsum - Cumulative sum over a specified dimension
- product - Total product over a specified dimension
- cumproduct - Cumulative product over a specified dimension
- alltrue - Logical and over an entire axis
- sometrue - Logical or over an entire axis
- allclose - Tests if sequences are essentially equal
More Functions:
- arange - Return regularly spaced array
- asarray - Guarantee NumPy array
- convolve - Convolve two 1-d arrays
- swapaxes - Exchange axes
- concatenate - Join arrays together
- transpose - Permute axes
- sort - Sort elements of array
- argsort - Indices of sorted array
- argmax - Index of largest value
- argmin - Index of smallest value
- inner - Innerproduct of two arrays
- dot - Dot product (matrix multiplication)
- outer - Outerproduct of two arrays
- resize - Return array with arbitrary new shape
- indices - Tuple of indices
- fromfunction - Construct array from universal function
- diagonal - Return diagonal array
- trace - Trace of array
- dump - Dump array to file object (pickle)
- dumps - Return pickled string representing data
- load - Return array stored in file object
- loads - Return array from pickled string
- ravel - Return array as 1-D
- nonzero - Indices of nonzero elements for 1-D array
- shape - Shape of array
- where - Construct array from binary result
- compress - Elements of array where condition is true
- clip - Clip array between two values
- ones - Array of all ones
- identity - 2-D identity array (matrix)
(Universal) Math Functions
add logical_or exp
subtract logical_xor log
multiply logical_not log10
divide maximum sin
divide_safe minimum sinh
conjugate bitwise_and sqrt
power bitwise_or tan
absolute bitwise_xor tanh
negative invert ceil
greater left_shift fabs
greater_equal right_shift floor
less arccos arctan2
less_equal arcsin fmod
equal arctan hypot
not_equal cos around
logical_and cosh sign
arccosh arcsinh arctanh
"""
depends = ['testing']
global_symbols = ['*']
|
maxicecilia/simple_classroom | refs/heads/master | simple_classroom/apps/core/admin.py | 1 | # -*- coding: utf-8 -*-
from django.contrib import admin
from site_news.admin import NewsItemSimplifiedAdmin
from site_news.models import NewsItem
from simple_classroom.apps.core.models import ExtendedSite
from contact_us.admin import ContactUsAdmin
from contact_us.models import SimpleContact
admin.site.register(NewsItem, NewsItemSimplifiedAdmin)
admin.site.register(ExtendedSite)
admin.site.register(SimpleContact, ContactUsAdmin)
|
KlubJagiellonski/pola-backend | refs/heads/master | ai_pics/urls.py | 1 | from django.urls import path
from . import views
urlpatterns = [
path('', views.AIPicsPageView.as_view(), name="list"),
path('<int:pk>', views.AIPicsDetailView.as_view(), name="detail"),
path('api/set-api-pic-state', views.ApiSetAiPicStateView.as_view(), name='set-api-pic-state'),
path('api/delete-api-pic', views.ApiDeleteAiPicsView.as_view(), name='delete-api-pic'),
path('api/delete-attachment', views.ApiDeleteAttachmentView.as_view(), name='delete-attachment'),
]
|
tentwatch/tentwatch | refs/heads/master | tentwatch/imports/models.py | 1 | from django.db import models
class OtherTent(models.Model):
name = models.CharField(max_length=140)
url = models.URLField(max_length=1024)
lat = models.FloatField(default=0.0, editable=False)
lon = models.FloatField(default=0.0, editable=False)
def __unicode__(self):
return self.name
|
Altiscale/pig | refs/heads/trunk | bin/pig.py | 16 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# 'License'); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# The Pig command script
#
# Environment Variables
#
# JAVA_HOME The java implementation to use. Overrides JAVA_HOME.
#
# PIG_CLASSPATH Extra Java CLASSPATH entries.
#
# HADOOP_HOME/HADOOP_PREFIX Environment HADOOP_HOME/HADOOP_PREFIX(0.20.205)
#
# HADOOP_CONF_DIR Hadoop conf dir
#
# PIG_HEAPSIZE The maximum amount of heap to use, in MB.
# Default is 1000.
#
# PIG_OPTS Extra Java runtime options.
#
# PIG_CONF_DIR Alternate conf dir. Default is ${PIG_HOME}/conf.
#
# HBASE_CONF_DIR - Optionally, the HBase configuration to run against
# when using HBaseStorage
import sys
import os
import glob
import subprocess
debug = False
restArgs = []
includeHCatalog = False
additionalJars = ""
for arg in sys.argv:
if arg == __file__:
continue
if arg == "-secretDebugCmd":
debug = True
elif arg == "-useHCatalog":
includeHCatalog = True
elif arg.split("=")[0] == "-D.pig.additional.jars":
if includeHCatalog == True:
additionalJars = arg.split("=")[1]
else:
restArgs.append(arg)
else:
restArgs.append(arg)
# Determine our absolute path, resolving any symbolic links
this = os.path.realpath(sys.argv[0])
bindir = os.path.dirname(this) + os.path.sep
# the root of the pig installation
os.environ['PIG_HOME'] = os.path.join(bindir, os.path.pardir)
if 'PIG_CONF_DIR' not in os.environ:
pigPropertiesPath = os.path.join(os.environ['PIG_HOME'], 'conf', 'pig.properties')
if os.path.exists(pigPropertiesPath):
try:
fhdl = open(pigPropertiesPath, 'r')
fhdl.close()
os.environ['PIG_CONF_DIR'] = os.path.join(os.environ['PIG_HOME'], 'conf')
except:
# in the small window after checking for file, if file is deleted,
# we should fail if we hit an exception
sys.exit('Failed to access file %s' % pigPropertiesPath)
elif os.path.exists(os.path.join(os.path.sep, 'etc', 'pig')):
os.environ['PIG_CONF_DIR'] = os.path.join(os.path.sep, 'etc', 'pig')
else:
sys.exit('Cannot determine PIG_CONF_DIR. Please set it to the directory containing pig.properties')
# Hack to get to read a shell script and the changes to the environment it makes
# This is potentially bad because we could execute arbitrary code
try:
importScript = os.path.join(os.environ['PIG_CONF_DIR'], 'runPigEnv.sh')
fd = open(importScript, 'w')
fd.write(". " + os.path.join(os.environ['PIG_CONF_DIR'], 'pig-env.sh'))
fd.write("\n")
fd.write("set")
fd.close()
outFd = open(os.path.join(os.environ['PIG_CONF_DIR'], 'pigStartPython.out'), 'w')
output = subprocess.Popen(importScript, shell=True, stdout=outFd)
output.wait()
outFd.close()
outFd = open(os.path.join(os.environ['PIG_CONF_DIR'], 'pigStartPython.out'), 'r')
for line in outFd:
if line.split(' ') > 1:
continue
envSplit = line.split('=')
if len(envSplit) == 2:
os.environ[envSplit[0]] = os.environ[1]
outFd.close()
except:
pass
# functionality similar to the shell script. This executes a pig script instead
try:
if os.path.exists(os.environ['PIG_CONF_DIR'], 'pig.conf'):
pigConf = os.path.join(os.environ['PIG_CONF_DIR'], 'pig.conf')
__import__(pigConf)
except:
pass
if 'JAVA_HOME' not in os.environ:
sys.exit('Error: JAVA_HOME is not set')
if 'HADOOP_HOME' not in os.environ:
os.environ['HADOOP_HOME'] = os.path.sep + 'usr'
java = os.path.join(os.environ['JAVA_HOME'], 'bin', 'java')
javaHeapMax = "-Xmx1000m"
if 'PIG_HEAPSIZE' in os.environ:
javaHeapMax = '-Xmx' + os.environ['PIG_HEAPSIZE'] + 'm'
classpath = os.environ['PIG_CONF_DIR']
classpath += os.pathsep + os.path.join(os.environ['JAVA_HOME'], 'lib', 'tools.jar')
if 'PIG_CLASSPATH' in os.environ:
classpath += os.pathsep + os.environ['PIG_CLASSPATH']
if 'HADOOP_CONF_DIR' in os.environ:
classpath += os.pathsep + os.environ['HADOOP_CONF_DIR']
pigLibJars = glob.glob(os.path.join(os.environ['PIG_HOME'], "lib", "*.jar"))
for jar in pigLibJars:
classpath += os.pathsep + jar
######### if hcatalog is to be included, add hcatalog and its required jars
if includeHCatalog == True:
# adding the hive jars required by hcatalog
hiveJarLoc = ""
if 'HIVE_HOME' in os.environ:
hiveJarLoc = os.path.join(os.environ['HIVE_HOME'], "lib")
else:
if os.path.exists(os.path.join('usr', 'lib', 'hive')):
hiveJarLoc = os.path.join('usr', 'lib', 'hive', 'lib')
else:
sys.exit("Please initialize HIVE_HOME to the hive install directory")
allHiveJars = ["hive-metastore-*.jar", "libthrift-*.jar", "hive-exec-*.jar", "libfb303-*.jar", "jdo*-api-*.jar", "slf4j-api-*.jar", "hive-hbase-handler-*.jar"]
for jarName in allHiveJars:
jar = glob.glob(os.path.join(hiveJarLoc, jarName))
if (len(jar) != 0) and (os.path.exists(jar)):
classpath += os.pathsep + jar[0]
else:
sys.exit("Failed to find the jar %s" % os.path.join(hiveJarLoc, jarName))
# done with adding the hive jars required by hcatalog
# adding the hcat jars
hcatHome = ""
if 'HCAT_HOME' in os.environ:
hcatHome = os.environ['HCAT_HOME']
else:
if os.path.exists(os.path.join(os.path.sep + "usr", "lib", "hcatalog")):
hcatHome = os.path.join(os.path.sep + "usr", "lib", "hcatalog")
else:
sys.exit("Please initialize HCAT_HOME to the hcatalog install directory")
hcatJars = glob.glob(os.path.join(hcatHome, "share", "hcatalog", "*hcatalog-*.jar"))
found = False
for hcatJar in hcatJars:
if hcatJar.find("server") != -1:
found = True
classpath += os.pathsep + hcatJar
break
if found == False:
sys.exit("Failed to find the hcatalog server jar in %s" % (os.path.join(hcatHome, "share", "hcatalog")))
hcatHBaseJar = glob.glob(os.path.join(hcatHome, "lib", "hbase-storage-handler-*.jar"))
try:
classpath += os.pathsep + hcatHBaseJar[0]
except:
pass
# done with adding the hcat jars
# now also add the additional jars passed through the command line
classpath += os.pathsep + additionalJars
# done adding the additional jars from the command line
######### done with adding hcatalog and related jars
######### Add the jython jars to classpath
jythonJars = glob.glob(os.path.join(os.environ['PIG_HOME'], "lib", "jython*.jar"))
if len(jythonJars) == 1:
classpath += os.pathsep + jythonJars[0]
else:
jythonJars = glob.glob(os.path.join(os.environ['PIG_HOME'], "build", "ivy", "lib", "Pig", "jython*.jar"))
if len(jythonJars) == 1:
classpath += os.pathsep + jythonJars[0]
######### Done adding the jython jars to classpath
######### Add the jruby jars to classpath
jrubyJars = glob.glob(os.path.join(os.environ['PIG_HOME'], "lib", "jruby-complete-*.jar"))
if len(jrubyJars) == 1:
classpath += os.pathsep + jrubyJars[0]
else:
jrubyJars = glob.glob(os.path.join(os.environ['PIG_HOME'], "build", "ivy", "lib", "Pig", "jruby-complete-*.jar"))
if len(jrubyJars) == 1:
classpath += os.pathsep + jrubyJars[0]
pigJars = glob.glob(os.path.join(os.environ['PIG_HOME'], "share", "pig", "lib", "*.jar"))
for jar in pigJars:
classpath += os.pathsep + jar
######### Done adding jruby jars to classpath
######### Add hadoop and hbase conf directories
hadoopConfDir = os.path.join(os.environ['PIG_HOME'], "etc", "hadoop")
if os.path.exists(hadoopConfDir):
classpath += os.pathsep + hadoopConfDir
if 'HBASE_CONF_DIR' in os.environ:
classpath += os.pathsep + os.environ['HBASE_CONF_DIR']
else:
hbaseConfDir = os.path.join(os.path.sep + "etc", "hbase")
if os.path.exists(hbaseConfDir):
classpath += os.pathsep + hbaseConfDir
######### Done adding hadoop and hbase conf directories
######### Locate and add Zookeeper jars if they exist
zkDir = ""
if 'ZOOKEEPER_HOME' in os.environ:
zkDir = os.environ['ZOOKEEPER_HOME']
else:
zkDir = os.path.join(os.environ['PIG_HOME'], "share", "zookeeper")
if os.path.exists(zkDir):
zkJars = glob.glob(os.path.join(zkdir, "zookeeper-*.jar"))
for jar in zkJars:
classpath += os.pathsep + jar
######### Done adding zookeeper jars
######### Locate and add hbase jars if they exist
hbaseDir = ""
if 'HBASE_HOME' in os.environ:
hbaseDir = os.environ['HBASE_HOME']
else:
hbaseDir = os.path.join(os.environ['PIG_HOME'], "share", "hbase")
if os.path.exists(hbaseDir):
hbaseJars = glob.glob(os.path.join(hbaseDir, "hbase-*.jar"))
for jar in hbaseJars:
classpath += os.pathsep + jar
######### Done adding hbase jars
######### set the log directory and logfile if they don't exist
if 'PIG_LOG_DIR' not in os.environ:
pigLogDir = os.path.join(os.environ['PIG_HOME'], "logs")
if 'PIG_LOGFILE' not in os.environ:
pigLogFile = 'pid.log'
######### Done setting the logging directory and logfile
pigOpts = ""
try:
pigOpts = os.environ['PIG_OPTS']
except:
pass
pigOpts += " -Dpig.log.dir=" + pigLogDir
pigOpts += " -Dpig.log.file=" + pigLogFile
pigOpts += " -Dpig.home.dir=" + os.environ['PIG_HOME']
pigJar = ""
hadoopBin = ""
print "HADOOP_HOME: %s" % os.path.expandvars(os.environ['HADOOP_HOME'])
if (os.environ.get('HADOOP_PREFIX') is not None):
print "Found a hadoop prefix"
hadoopPrefixPath = os.path.expandvars(os.environ['HADOOP_PREFIX'])
if os.path.exists(os.path.join(hadoopPrefixPath, "bin", "hadoop")):
hadoopBin = os.path.join(hadoopPrefixPath, "bin", "hadoop")
if (os.environ.get('HADOOP_HOME') is not None):
print "Found a hadoop home"
hadoopHomePath = os.path.expandvars(os.environ['HADOOP_HOME'])
print "Hadoop home path: %s" % hadoopHomePath
if os.path.exists(os.path.join(hadoopHomePath, "bin", "hadoop")):
hadoopBin = os.path.join(hadoopHomePath, "bin", "hadoop")
if hadoopBin == "":
if os.path.exists(os.path.join(os.path.sep + "usr", "bin", "hadoop")):
hadoopBin = os.path.join(os.path.sep + "usr", "bin", "hadoop")
# find out the HADOOP_HOME in order to find hadoop jar
# we use the name of hadoop jar to decide if user is using
# hadoop 1 or hadoop 2
if (hadoopHomePath is None and hadoopPrefixPath is not None):
hadoopHomePath = hadoopPrefixPath
if (os.environ.get('HADOOP_HOME') is None and hadoopBin != ""):
hadoopHomePath = os.path.join(hadoopBin, "..")
hadoopCoreJars = glob.glob(os.path.join(hadoopHomePath, "hadoop-core*.jar"))
if len(hadoopCoreJars) == 0:
hadoopVersion = 2
else:
hadoopVersion = 1
if hadoopBin != "":
if debug == True:
print "Find hadoop at %s" % hadoopBin
if os.path.exists(os.path.join(os.environ['PIG_HOME'], "pig-core-h$hadoopVersion.jar")):
pigJar = os.path.join(os.environ['PIG_HOME'], "pig-core-h$hadoopVersion.jar")
else:
pigJars = glob.glob(os.path.join(os.environ['PIG_HOME'], "pig-*-core-h" + str(hadoopVersion) + ".jar"))
if len(pigJars) == 1:
pigJar = pigJars[0]
elif len(pigJars) > 1:
print "Ambiguity with pig jars found the following jars"
print pigJars
sys.exit("Please remove irrelavant jars from %s" % os.path.join(os.environ['PIG_HOME'], "pig-*-core-h" + str(hadoopVersion) + ".jar"))
else:
pigJars = glob.glob(os.path.join(os.environ['PIG_HOME'], "share", "pig", "pig-*-core-h" + str(hadoopVersion) + ".jar"))
if len(pigJars) == 1:
pigJar = pigJars[0]
else:
if hadoopVersion == 1:
sys.exit("Cannot locate pig-core-h1.jar do 'ant jar', and try again")
else:
sys.exit("Cannot locate pig-core-h2.jar do 'ant -Dhadoopversion=23 jar', and try again")
pigLibJars = glob.glob(os.path.join(os.environ['PIG_HOME']+"/lib", "h" + str(hadoopVersion), "*.jar"))
for jar in pigLibJars:
classpath += os.pathsep + jar
if 'HADOOP_CLASSPATH' in os.environ:
os.environ['HADOOP_CLASSPATH'] += os.pathsep + classpath
else:
os.environ['HADOOP_CLASSPATH'] = classpath
os.environ['HADOOP_CLASSPATH'] += os.pathsep + pigJar
if debug == True:
print "dry run:"
print "HADOOP_CLASSPATH: %s" % os.environ['HADOOP_CLASSPATH']
try:
print "HADOOP_OPTS: %s" % os.environ['HADOOP_OPTS']
except:
pass
print "%s jar %s %s" % (hadoopBin, pigJar, ' '.join(restArgs))
else:
cmdLine = hadoopBin + ' jar ' + pigJar + ' ' + ' '.join(restArgs)
subprocess.call(cmdLine, shell=True)
else:
# fall back to use fat pig.jar
if debug == True:
print "Cannot find local hadoop installation, using bundled hadoop 1"
if os.path.exists(os.path.join(os.environ['PIG_HOME'], "pig-core-h1.jar")):
pigJar = os.path.join(os.environ['PIG_HOME'], "pig-core-h1.jar")
else:
pigJars = glob.glob(os.path.join(os.environ['PIG_HOME'], "pig-*-core-h1.jar"))
if len(pigJars) == 1:
pigJar = pigJars[0]
elif len(pigJars) > 1:
print "Ambiguity with pig jars found the following jars"
print pigJars
sys.exit("Please remove irrelavant jars from %s" % os.path.join(os.environ['PIG_HOME'], "pig-core-h1.jar"))
else:
sys.exit("Cannot locate pig-core-h1.jar. do 'ant jar' and try again")
pigLibJars = glob.glob(os.path.join(os.environ['PIG_HOME']+"/lib", "h1", "*.jar"))
for jar in pigLibJars:
classpath += os.pathsep + jar
pigLibJars = glob.glob(os.path.join(os.environ['PIG_HOME']+"/lib", "hadoop1-runtime", "*.jar"))
for jar in pigLibJars:
classpath += os.pathsep + jar
classpath += os.pathsep + pigJar
pigClass = "org.apache.pig.Main"
if debug == True:
print "dry runXXX:"
print "%s %s %s -classpath %s %s %s" % (java, javaHeapMax, pigOpts, classpath, pigClass, ' '.join(restArgs))
else:
cmdLine = java + ' ' + javaHeapMax + ' ' + pigOpts
cmdLine += ' ' + '-classpath ' + classpath + ' ' + pigClass + ' ' + ' '.join(restArgs)
subprocess.call(cmdLine, shell=True)
|
aboganas/frappe | refs/heads/develop | frappe/core/page/background_jobs/background_jobs.py | 6 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
from rq import Queue, Worker
from frappe.utils.background_jobs import get_redis_conn
from frappe.utils import format_datetime, cint
colors = {
'queued': 'orange',
'failed': 'red',
'started': 'green'
}
@frappe.whitelist()
def get_info(show_failed=False):
conn = get_redis_conn()
queues = Queue.all(conn)
workers = Worker.all(conn)
jobs = []
def add_job(j, name):
if j.kwargs.get('site')==frappe.local.site or True:
jobs.append({
'job_name': j.kwargs.get('kwargs', {}).get('playbook_method') \
or str(j.kwargs.get('job_name')),
'status': j.status, 'queue': name,
'creation': format_datetime(j.created_at),
'color': colors[j.status]
})
if j.exc_info:
jobs[-1]['exc_info'] = j.exc_info
for w in workers:
j = w.get_current_job()
if j:
add_job(j, w.name)
for q in queues:
if q.name != 'failed':
for j in q.get_jobs(): add_job(j, q.name)
if cint(show_failed):
for q in queues:
if q.name == 'failed':
for j in q.get_jobs()[:10]: add_job(j, q.name)
return jobs |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.