code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
import sys
import imp
# Dependecy reloader for Emmet plugin
# The original idea is borrowed from
# https://github.com/wbond/sublime_package_control/blob/master/package_control/reloader.py
reload_mods = []
for mod in sys.modules:
if mod.startswith('emmet') and sys.modules[mod] != None:
reload_mods.append(mod)
mods_load_order = [
'emmet.semver',
'emmet.pyv8loader',
'emmet_completions.trackers',
'emmet_completions.meta',
'emmet_completions',
'emmet.file',
'emmet.context'
]
for mod in mods_load_order:
if mod in reload_mods:
m = sys.modules[mod]
if 'on_module_reload' in m.__dict__:
m.on_module_reload()
imp.reload(sys.modules[mod])
|
sergeche/emmet-sublime
|
emmet/reloader.py
|
Python
|
mit
| 659
|
"""Tests for elpy.ropebackend."""
import os
import shutil
import sys
import tempfile
import mock
from elpy import ropebackend
from elpy import rpc
from elpy.tests import compat
from elpy.tests.support import BackendTestCase
from elpy.tests.support import RPCGetCompletionsTests
from elpy.tests.support import RPCGetCompletionDocstringTests
from elpy.tests.support import RPCGetCompletionLocationTests
from elpy.tests.support import RPCGetDefinitionTests
from elpy.tests.support import RPCGetCalltipTests
from elpy.tests.support import RPCGetDocstringTests
class RopeBackendTestCase(BackendTestCase):
def setUp(self):
super(RopeBackendTestCase, self).setUp()
self.backend = ropebackend.RopeBackend(self.project_root)
class ShouldCallValidateTest(object):
def test_should_call_validate(self):
with mock.patch.object(self.backend, 'validate') as validate:
self.rpc(None, "", 0)
self.assertTrue(validate.called)
class TestInit(RopeBackendTestCase):
def test_should_have_rope_as_name(self):
self.assertEqual(self.backend.name, "rope")
def test_should_patch_project_files(self):
self.project_file("foo.txt", "")
self.project_file("baddir/file.py", "")
self.backend.project.validate()
actual = [f.real_path for f in
self.backend.project.file_list.get_files()]
self.assertEqual([os.path.join(self.project_root, "foo.txt")],
actual)
def test_should_fail_for_inexisting_project_root(self):
with self.assertRaises(rpc.Fault):
ropebackend.RopeBackend("/does/not/exist/")
class TestValidate(RopeBackendTestCase):
def test_should_call_validate_after_timeout(self):
with mock.patch("time.time") as t:
t.return_value = 10
self.backend.validate()
with mock.patch.object(self.backend, 'project') as project:
t.return_value = 10 + ropebackend.VALIDATE_EVERY_SECONDS + 1
self.backend.validate()
self.assertTrue(project.validate.called)
def test_should_not_call_validate_before_timeout(self):
with mock.patch("time.time") as t:
t.return_value = 10
self.backend.validate()
with mock.patch.object(self.backend, 'project') as project:
t.return_value = 10 + ropebackend.VALIDATE_EVERY_SECONDS - 1
self.backend.validate()
self.assertFalse(project.validate.called)
def test_should_not_fail_if_root_vanishes(self):
# Bug #353
tmpdir = tempfile.mkdtemp(prefix="elpy-test-validate-")
try:
backend = ropebackend.RopeBackend(tmpdir)
finally:
shutil.rmtree(tmpdir)
backend.validate()
class TestRPCGetCompletions(RPCGetCompletionsTests,
RopeBackendTestCase):
pass
class TestRPCGetCompletionDocstring(RPCGetCompletionDocstringTests,
RopeBackendTestCase):
pass
class TestRPCGetCompletionLocation(RPCGetCompletionLocationTests,
RopeBackendTestCase):
pass
class TestRPCGetDefinition(RPCGetDefinitionTests,
ShouldCallValidateTest,
RopeBackendTestCase):
pass
class TestRPCGetCalltip(RPCGetCalltipTests,
ShouldCallValidateTest,
RopeBackendTestCase):
ADD_CALLTIP = 'Add.add(a, b)'
RADIX_CALLTIP = "Decimal.radix()"
if compat.PYTHON3:
THREAD_CALLTIP = (
"threading.Thread(group=None, target=None, "
"name=None, args=(), kwargs=None, daemon=None, *)"
)
KEYS_CALLTIP = "builtins.keys()"
else:
THREAD_CALLTIP = (
"threading.Thread(group=None, target=None, "
"name=None, args=(), kwargs=None, verbose=None)"
)
KEYS_CALLTIP = "__builtin__.keys()"
class TestRPCGetDocstring(RPCGetDocstringTests,
ShouldCallValidateTest,
RopeBackendTestCase):
if sys.version_info < (2, 7):
JSON_LOADS_DOCSTRING = (
'loads(s, encoding=None, cls=None, object_hook=None, '
'parse_float=None, parse_int=None, parse_constant=None, '
'**kw):'
)
else:
JSON_LOADS_DOCSTRING = (
'loads(s, encoding=None, cls=None, object_hook=None, '
'parse_float=None, parse_int=None, parse_constant=None, '
'object_pairs_hook=None, **kw):'
)
class TestGetPythonProjectFiles(RopeBackendTestCase):
def test(self):
self.project_file("foo.txt", "")
self.project_file("gooddir/__init__.py", "")
self.project_file("gooddir/file.py", "")
self.project_file("baddir/file.py", "")
expected = set(os.path.join(self.project_root, name)
for name in ["foo.txt", "gooddir/__init__.py",
"gooddir/file.py"])
project = self.backend.project
actual = set(resource.real_path
for resource
in ropebackend.get_python_project_files(project))
self.assertEqual(expected, actual)
class TestPatchProjectFiles(RopeBackendTestCase):
def test(self):
self.project_file("foo.txt", "")
self.project_file("gooddir/__init__.py", "")
self.project_file("gooddir/file.py", "")
self.project_file("baddir/file.py", "")
expected = set(os.path.join(self.project_root, name)
for name in ["foo.txt", "gooddir/__init__.py",
"gooddir/file.py"])
actual = set(resource.real_path
for resource
in self.backend.project.get_files())
self.assertEqual(expected, actual)
class TestCallRope(RopeBackendTestCase):
def test_should_return_value(self):
func = mock.MagicMock()
func.return_value = 23
actual = self.backend.call_rope(
func, "foo.py", "", 0
)
self.assertEqual(23, actual)
def test_should_raise_fault_with_data_on_exception(self):
func = mock.MagicMock()
func.side_effect = RuntimeError("Stuff!")
func.__module__ = "rope.test"
func.__name__ = "test_function"
try:
self.backend.call_rope(
func, "foo.py", "", 0
)
except rpc.Fault as e:
self.assertEqual(500, e.code)
self.assertEqual("Stuff!", e.message)
self.assertIn("traceback", e.data)
self.assertIn("rope_debug_info", e.data)
self.assertEqual("rope.test.test_function",
e.data["rope_debug_info"]["function_name"])
|
robin-liu-1983/emacs-robin.d
|
emacs.d/elpa/elpy-20160131.118/elpy/tests/test_ropebackend.py
|
Python
|
mit
| 6,871
|
"""HTML form handling for web clients.
ClientForm is a Python module for handling HTML forms on the client
side, useful for parsing HTML forms, filling them in and returning the
completed forms to the server. It has developed from a port of Gisle
Aas' Perl module HTML::Form, from the libwww-perl library, but the
interface is not the same.
The most useful docstring is the one for HTMLForm.
RFC 1866: HTML 2.0
RFC 1867: Form-based File Upload in HTML
RFC 2388: Returning Values from Forms: multipart/form-data
HTML 3.2 Specification, W3C Recommendation 14 January 1997 (for ISINDEX)
HTML 4.01 Specification, W3C Recommendation 24 December 1999
Copyright 2002-2006 John J. Lee <jjl@pobox.com>
Copyright 2005 Gary Poster
Copyright 2005 Zope Corporation
Copyright 1998-2000 Gisle Aas.
This code is free software; you can redistribute it and/or modify it
under the terms of the BSD or ZPL 2.1 licenses (see the file
COPYING.txt included with the distribution).
"""
# XXX
# add an __all__
# Remove parser testing hack
# safeUrl()-ize action
# Switch to unicode throughout (would be 0.3.x)
# See Wichert Akkerman's 2004-01-22 message to c.l.py.
# Add charset parameter to Content-type headers? How to find value??
# Add some more functional tests
# Especially single and multiple file upload on the internet.
# Does file upload work when name is missing? Sourceforge tracker form
# doesn't like it. Check standards, and test with Apache. Test
# binary upload with Apache.
# mailto submission & enctype text/plain
# I'm not going to fix this unless somebody tells me what real servers
# that want this encoding actually expect: If enctype is
# application/x-www-form-urlencoded and there's a FILE control present.
# Strictly, it should be 'name=data' (see HTML 4.01 spec., section
# 17.13.2), but I send "name=" ATM. What about multiple file upload??
# Would be nice, but I'm not going to do it myself:
# -------------------------------------------------
# Maybe a 0.4.x?
# Replace by_label etc. with moniker / selector concept. Allows, eg.,
# a choice between selection by value / id / label / element
# contents. Or choice between matching labels exactly or by
# substring. Etc.
# Remove deprecated methods.
# ...what else?
# Work on DOMForm.
# XForms? Don't know if there's a need here.
try: True
except NameError:
True = 1
False = 0
try: bool
except NameError:
def bool(expr):
if expr: return True
else: return False
try:
import logging
except ImportError:
def debug(msg, *args, **kwds):
pass
else:
_logger = logging.getLogger("ClientForm")
OPTIMIZATION_HACK = True
def debug(msg, *args, **kwds):
if OPTIMIZATION_HACK:
return
try:
raise Exception()
except:
caller_name = (
sys.exc_info()[2].tb_frame.f_back.f_back.f_code.co_name)
extended_msg = '%%s %s' % msg
extended_args = (caller_name,)+args
debug = _logger.debug(extended_msg, *extended_args, **kwds)
def _show_debug_messages():
global OPTIMIZATION_HACK
OPTIMIZATION_HACK = False
_logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.DEBUG)
_logger.addHandler(handler)
import sys, urllib, urllib2, types, mimetools, copy, urlparse, \
htmlentitydefs, re, random
from cStringIO import StringIO
import sgmllib
# monkeypatch to fix http://www.python.org/sf/803422 :-(
sgmllib.charref = re.compile("&#(x?[0-9a-fA-F]+)[^0-9a-fA-F]")
# HTMLParser.HTMLParser is recent, so live without it if it's not available
# (also, sgmllib.SGMLParser is much more tolerant of bad HTML)
try:
import HTMLParser
except ImportError:
HAVE_MODULE_HTMLPARSER = False
else:
HAVE_MODULE_HTMLPARSER = True
try:
import warnings
except ImportError:
def deprecation(message):
pass
else:
def deprecation(message):
warnings.warn(message, DeprecationWarning, stacklevel=2)
VERSION = "0.2.7"
CHUNK = 1024 # size of chunks fed to parser, in bytes
DEFAULT_ENCODING = "latin-1"
class Missing: pass
_compress_re = re.compile(r"\s+")
def compress_text(text): return _compress_re.sub(" ", text.strip())
def normalize_line_endings(text):
return re.sub(r"(?:(?<!\r)\n)|(?:\r(?!\n))", "\r\n", text)
# This version of urlencode is from my Python 1.5.2 back-port of the
# Python 2.1 CVS maintenance branch of urllib. It will accept a sequence
# of pairs instead of a mapping -- the 2.0 version only accepts a mapping.
def urlencode(query,doseq=False,):
"""Encode a sequence of two-element tuples or dictionary into a URL query \
string.
If any values in the query arg are sequences and doseq is true, each
sequence element is converted to a separate parameter.
If the query arg is a sequence of two-element tuples, the order of the
parameters in the output will match the order of parameters in the
input.
"""
if hasattr(query,"items"):
# mapping objects
query = query.items()
else:
# it's a bother at times that strings and string-like objects are
# sequences...
try:
# non-sequence items should not work with len()
x = len(query)
# non-empty strings will fail this
if len(query) and type(query[0]) != types.TupleType:
raise TypeError()
# zero-length sequences of all types will get here and succeed,
# but that's a minor nit - since the original implementation
# allowed empty dicts that type of behavior probably should be
# preserved for consistency
except TypeError:
ty,va,tb = sys.exc_info()
raise TypeError("not a valid non-string sequence or mapping "
"object", tb)
l = []
if not doseq:
# preserve old behavior
for k, v in query:
k = urllib.quote_plus(str(k))
v = urllib.quote_plus(str(v))
l.append(k + '=' + v)
else:
for k, v in query:
k = urllib.quote_plus(str(k))
if type(v) == types.StringType:
v = urllib.quote_plus(v)
l.append(k + '=' + v)
elif type(v) == types.UnicodeType:
# is there a reasonable way to convert to ASCII?
# encode generates a string, but "replace" or "ignore"
# lose information and "strict" can raise UnicodeError
v = urllib.quote_plus(v.encode("ASCII","replace"))
l.append(k + '=' + v)
else:
try:
# is this a sufficient test for sequence-ness?
x = len(v)
except TypeError:
# not a sequence
v = urllib.quote_plus(str(v))
l.append(k + '=' + v)
else:
# loop over the sequence
for elt in v:
l.append(k + '=' + urllib.quote_plus(str(elt)))
return '&'.join(l)
def unescape(data, entities, encoding=DEFAULT_ENCODING):
if data is None or "&" not in data:
return data
def replace_entities(match, entities=entities, encoding=encoding):
ent = match.group()
if ent[1] == "#":
return unescape_charref(ent[2:-1], encoding)
repl = entities.get(ent)
if repl is not None:
if type(repl) != type(""):
try:
repl = repl.encode(encoding)
except UnicodeError:
repl = ent
else:
repl = ent
return repl
return re.sub(r"&#?[A-Za-z0-9]+?;", replace_entities, data)
def unescape_charref(data, encoding):
name, base = data, 10
if name.startswith("x"):
name, base= name[1:], 16
uc = unichr(int(name, base))
if encoding is None:
return uc
else:
try:
repl = uc.encode(encoding)
except UnicodeError:
repl = "&#%s;" % data
return repl
def get_entitydefs():
import htmlentitydefs
from codecs import latin_1_decode
entitydefs = {}
try:
htmlentitydefs.name2codepoint
except AttributeError:
entitydefs = {}
for name, char in htmlentitydefs.entitydefs.items():
uc = latin_1_decode(char)[0]
if uc.startswith("&#") and uc.endswith(";"):
uc = unescape_charref(uc[2:-1], None)
entitydefs["&%s;" % name] = uc
else:
for name, codepoint in htmlentitydefs.name2codepoint.items():
entitydefs["&%s;" % name] = unichr(codepoint)
return entitydefs
def issequence(x):
try:
x[0]
except (TypeError, KeyError):
return False
except IndexError:
pass
return True
def isstringlike(x):
try: x+""
except: return False
else: return True
def choose_boundary():
"""Return a string usable as a multipart boundary."""
# follow IE and firefox
nonce = "".join([str(random.randint(0, sys.maxint-1)) for i in 0,1,2])
return "-"*27 + nonce
# This cut-n-pasted MimeWriter from standard library is here so can add
# to HTTP headers rather than message body when appropriate. It also uses
# \r\n in place of \n. This is a bit nasty.
class MimeWriter:
"""Generic MIME writer.
Methods:
__init__()
addheader()
flushheaders()
startbody()
startmultipartbody()
nextpart()
lastpart()
A MIME writer is much more primitive than a MIME parser. It
doesn't seek around on the output file, and it doesn't use large
amounts of buffer space, so you have to write the parts in the
order they should occur on the output file. It does buffer the
headers you add, allowing you to rearrange their order.
General usage is:
f = <open the output file>
w = MimeWriter(f)
...call w.addheader(key, value) 0 or more times...
followed by either:
f = w.startbody(content_type)
...call f.write(data) for body data...
or:
w.startmultipartbody(subtype)
for each part:
subwriter = w.nextpart()
...use the subwriter's methods to create the subpart...
w.lastpart()
The subwriter is another MimeWriter instance, and should be
treated in the same way as the toplevel MimeWriter. This way,
writing recursive body parts is easy.
Warning: don't forget to call lastpart()!
XXX There should be more state so calls made in the wrong order
are detected.
Some special cases:
- startbody() just returns the file passed to the constructor;
but don't use this knowledge, as it may be changed.
- startmultipartbody() actually returns a file as well;
this can be used to write the initial 'if you can read this your
mailer is not MIME-aware' message.
- If you call flushheaders(), the headers accumulated so far are
written out (and forgotten); this is useful if you don't need a
body part at all, e.g. for a subpart of type message/rfc822
that's (mis)used to store some header-like information.
- Passing a keyword argument 'prefix=<flag>' to addheader(),
start*body() affects where the header is inserted; 0 means
append at the end, 1 means insert at the start; default is
append for addheader(), but insert for start*body(), which use
it to determine where the Content-type header goes.
"""
def __init__(self, fp, http_hdrs=None):
self._http_hdrs = http_hdrs
self._fp = fp
self._headers = []
self._boundary = []
self._first_part = True
def addheader(self, key, value, prefix=0,
add_to_http_hdrs=0):
"""
prefix is ignored if add_to_http_hdrs is true.
"""
lines = value.split("\r\n")
while lines and not lines[-1]: del lines[-1]
while lines and not lines[0]: del lines[0]
if add_to_http_hdrs:
value = "".join(lines)
self._http_hdrs.append((key, value))
else:
for i in range(1, len(lines)):
lines[i] = " " + lines[i].strip()
value = "\r\n".join(lines) + "\r\n"
line = key + ": " + value
if prefix:
self._headers.insert(0, line)
else:
self._headers.append(line)
def flushheaders(self):
self._fp.writelines(self._headers)
self._headers = []
def startbody(self, ctype=None, plist=[], prefix=1,
add_to_http_hdrs=0, content_type=1):
"""
prefix is ignored if add_to_http_hdrs is true.
"""
if content_type and ctype:
for name, value in plist:
ctype = ctype + ';\r\n %s=%s' % (name, value)
self.addheader("Content-type", ctype, prefix=prefix,
add_to_http_hdrs=add_to_http_hdrs)
self.flushheaders()
if not add_to_http_hdrs: self._fp.write("\r\n")
self._first_part = True
return self._fp
def startmultipartbody(self, subtype, boundary=None, plist=[], prefix=1,
add_to_http_hdrs=0, content_type=1):
boundary = boundary or choose_boundary()
self._boundary.append(boundary)
return self.startbody("multipart/" + subtype,
[("boundary", boundary)] + plist,
prefix=prefix,
add_to_http_hdrs=add_to_http_hdrs,
content_type=content_type)
def nextpart(self):
boundary = self._boundary[-1]
if self._first_part:
self._first_part = False
else:
self._fp.write("\r\n")
self._fp.write("--" + boundary + "\r\n")
return self.__class__(self._fp)
def lastpart(self):
if self._first_part:
self.nextpart()
boundary = self._boundary.pop()
self._fp.write("\r\n--" + boundary + "--\r\n")
class LocateError(ValueError): pass
class AmbiguityError(LocateError): pass
class ControlNotFoundError(LocateError): pass
class ItemNotFoundError(LocateError): pass
class ItemCountError(ValueError): pass
# for backwards compatibility, ParseError derives from exceptions that were
# raised by versions of ClientForm <= 0.2.5
if HAVE_MODULE_HTMLPARSER:
SGMLLIB_PARSEERROR = sgmllib.SGMLParseError
class ParseError(sgmllib.SGMLParseError,
HTMLParser.HTMLParseError,
):
pass
else:
if hasattr(sgmllib, "SGMLParseError"):
SGMLLIB_PARSEERROR = sgmllib.SGMLParseError
class ParseError(sgmllib.SGMLParseError):
pass
else:
SGMLLIB_PARSEERROR = RuntimeError
class ParseError(RuntimeError):
pass
class _AbstractFormParser:
"""forms attribute contains HTMLForm instances on completion."""
# thanks to Moshe Zadka for an example of sgmllib/htmllib usage
def __init__(self, entitydefs=None, encoding=DEFAULT_ENCODING):
if entitydefs is None:
entitydefs = get_entitydefs()
self._entitydefs = entitydefs
self._encoding = encoding
self.base = None
self.forms = []
self.labels = []
self._current_label = None
self._current_form = None
self._select = None
self._optgroup = None
self._option = None
self._textarea = None
# forms[0] will contain all controls that are outside of any form
# self._global_form is an alias for self.forms[0]
self._global_form = None
self.start_form([])
self.end_form()
self._current_form = self._global_form = self.forms[0]
def do_base(self, attrs):
debug("%s", attrs)
for key, value in attrs:
if key == "href":
self.base = value
def end_body(self):
debug("")
if self._current_label is not None:
self.end_label()
if self._current_form is not self._global_form:
self.end_form()
def start_form(self, attrs):
debug("%s", attrs)
if self._current_form is not self._global_form:
raise ParseError("nested FORMs")
name = None
action = None
enctype = "application/x-www-form-urlencoded"
method = "GET"
d = {}
for key, value in attrs:
if key == "name":
name = value
elif key == "action":
action = value
elif key == "method":
method = value.upper()
elif key == "enctype":
enctype = value.lower()
d[key] = value
controls = []
self._current_form = (name, action, method, enctype), d, controls
def end_form(self):
debug("")
if self._current_label is not None:
self.end_label()
if self._current_form is self._global_form:
raise ParseError("end of FORM before start")
self.forms.append(self._current_form)
self._current_form = self._global_form
def start_select(self, attrs):
debug("%s", attrs)
if self._select is not None:
raise ParseError("nested SELECTs")
if self._textarea is not None:
raise ParseError("SELECT inside TEXTAREA")
d = {}
for key, val in attrs:
d[key] = val
self._select = d
self._add_label(d)
self._append_select_control({"__select": d})
def end_select(self):
debug("")
if self._current_form is self._global_form:
return
if self._select is None:
raise ParseError("end of SELECT before start")
if self._option is not None:
self._end_option()
self._select = None
def start_optgroup(self, attrs):
debug("%s", attrs)
if self._select is None:
raise ParseError("OPTGROUP outside of SELECT")
d = {}
for key, val in attrs:
d[key] = val
self._optgroup = d
def end_optgroup(self):
debug("")
if self._optgroup is None:
raise ParseError("end of OPTGROUP before start")
self._optgroup = None
def _start_option(self, attrs):
debug("%s", attrs)
if self._select is None:
raise ParseError("OPTION outside of SELECT")
if self._option is not None:
self._end_option()
d = {}
for key, val in attrs:
d[key] = val
self._option = {}
self._option.update(d)
if (self._optgroup and self._optgroup.has_key("disabled") and
not self._option.has_key("disabled")):
self._option["disabled"] = None
def _end_option(self):
debug("")
if self._option is None:
raise ParseError("end of OPTION before start")
contents = self._option.get("contents", "").strip()
self._option["contents"] = contents
if not self._option.has_key("value"):
self._option["value"] = contents
if not self._option.has_key("label"):
self._option["label"] = contents
# stuff dict of SELECT HTML attrs into a special private key
# (gets deleted again later)
self._option["__select"] = self._select
self._append_select_control(self._option)
self._option = None
def _append_select_control(self, attrs):
debug("%s", attrs)
controls = self._current_form[2]
name = self._select.get("name")
controls.append(("select", name, attrs))
def start_textarea(self, attrs):
debug("%s", attrs)
if self._textarea is not None:
raise ParseError("nested TEXTAREAs")
if self._select is not None:
raise ParseError("TEXTAREA inside SELECT")
d = {}
for key, val in attrs:
d[key] = val
self._add_label(d)
self._textarea = d
def end_textarea(self):
debug("")
if self._current_form is self._global_form:
return
if self._textarea is None:
raise ParseError("end of TEXTAREA before start")
controls = self._current_form[2]
name = self._textarea.get("name")
controls.append(("textarea", name, self._textarea))
self._textarea = None
def start_label(self, attrs):
debug("%s", attrs)
if self._current_label:
self.end_label()
d = {}
for key, val in attrs:
d[key] = val
taken = bool(d.get("for")) # empty id is invalid
d["__text"] = ""
d["__taken"] = taken
if taken:
self.labels.append(d)
self._current_label = d
def end_label(self):
debug("")
label = self._current_label
if label is None:
# something is ugly in the HTML, but we're ignoring it
return
self._current_label = None
label["__text"] = label["__text"]
# if it is staying around, it is True in all cases
del label["__taken"]
def _add_label(self, d):
#debug("%s", d)
if self._current_label is not None:
if self._current_label["__taken"]:
self.end_label() # be fuzzy
else:
self._current_label["__taken"] = True
d["__label"] = self._current_label
def handle_data(self, data):
debug("%s", data)
# according to http://www.w3.org/TR/html4/appendix/notes.html#h-B.3.1
# line break immediately after start tags or immediately before end
# tags must be ignored, but real browsers only ignore a line break
# after a start tag, so we'll do that.
if data[0:2] == "\r\n":
data = data[2:]
if data[0:1] in ["\n", "\r"]:
data = data[1:]
if self._option is not None:
# self._option is a dictionary of the OPTION element's HTML
# attributes, but it has two special keys, one of which is the
# special "contents" key contains text between OPTION tags (the
# other is the "__select" key: see the end_option method)
map = self._option
key = "contents"
elif self._textarea is not None:
map = self._textarea
key = "value"
data = normalize_line_endings(data)
# not if within option or textarea
elif self._current_label is not None:
map = self._current_label
key = "__text"
else:
return
if not map.has_key(key):
map[key] = data
else:
map[key] = map[key] + data
def do_button(self, attrs):
debug("%s", attrs)
d = {}
d["type"] = "submit" # default
for key, val in attrs:
d[key] = val
controls = self._current_form[2]
type = d["type"]
name = d.get("name")
# we don't want to lose information, so use a type string that
# doesn't clash with INPUT TYPE={SUBMIT,RESET,BUTTON}
# e.g. type for BUTTON/RESET is "resetbutton"
# (type for INPUT/RESET is "reset")
type = type+"button"
self._add_label(d)
controls.append((type, name, d))
def do_input(self, attrs):
debug("%s", attrs)
d = {}
d["type"] = "text" # default
for key, val in attrs:
d[key] = val
controls = self._current_form[2]
type = d["type"]
name = d.get("name")
self._add_label(d)
controls.append((type, name, d))
def do_isindex(self, attrs):
debug("%s", attrs)
d = {}
for key, val in attrs:
d[key] = val
controls = self._current_form[2]
self._add_label(d)
# isindex doesn't have type or name HTML attributes
controls.append(("isindex", None, d))
def handle_entityref(self, name):
#debug("%s", name)
self.handle_data(unescape(
'&%s;' % name, self._entitydefs, self._encoding))
def handle_charref(self, name):
#debug("%s", name)
self.handle_data(unescape_charref(name, self._encoding))
def unescape_attr(self, name):
#debug("%s", name)
return unescape(name, self._entitydefs, self._encoding)
def unescape_attrs(self, attrs):
#debug("%s", attrs)
escaped_attrs = {}
for key, val in attrs.items():
try:
val.items
except AttributeError:
escaped_attrs[key] = self.unescape_attr(val)
else:
# e.g. "__select" -- yuck!
escaped_attrs[key] = self.unescape_attrs(val)
return escaped_attrs
def unknown_entityref(self, ref): self.handle_data("&%s;" % ref)
def unknown_charref(self, ref): self.handle_data("&#%s;" % ref)
if not HAVE_MODULE_HTMLPARSER:
class XHTMLCompatibleFormParser:
def __init__(self, entitydefs=None, encoding=DEFAULT_ENCODING):
raise ValueError("HTMLParser could not be imported")
else:
class XHTMLCompatibleFormParser(_AbstractFormParser, HTMLParser.HTMLParser):
"""Good for XHTML, bad for tolerance of incorrect HTML."""
# thanks to Michael Howitz for this!
def __init__(self, entitydefs=None, encoding=DEFAULT_ENCODING):
HTMLParser.HTMLParser.__init__(self)
_AbstractFormParser.__init__(self, entitydefs, encoding)
def feed(self, data):
try:
HTMLParser.HTMLParser.feed(self, data)
except HTMLParser.HTMLParseError, exc:
raise ParseError(exc)
def start_option(self, attrs):
_AbstractFormParser._start_option(self, attrs)
def end_option(self):
_AbstractFormParser._end_option(self)
def handle_starttag(self, tag, attrs):
try:
method = getattr(self, "start_" + tag)
except AttributeError:
try:
method = getattr(self, "do_" + tag)
except AttributeError:
pass # unknown tag
else:
method(attrs)
else:
method(attrs)
def handle_endtag(self, tag):
try:
method = getattr(self, "end_" + tag)
except AttributeError:
pass # unknown tag
else:
method()
def unescape(self, name):
# Use the entitydefs passed into constructor, not
# HTMLParser.HTMLParser's entitydefs.
return self.unescape_attr(name)
def unescape_attr_if_required(self, name):
return name # HTMLParser.HTMLParser already did it
def unescape_attrs_if_required(self, attrs):
return attrs # ditto
class _AbstractSgmllibParser(_AbstractFormParser):
def do_option(self, attrs):
_AbstractFormParser._start_option(self, attrs)
if sys.version_info[:2] >= (2,5):
# we override this attr to decode hex charrefs
entity_or_charref = re.compile(
'&(?:([a-zA-Z][-.a-zA-Z0-9]*)|#(x?[0-9a-fA-F]+))(;?)')
def convert_entityref(self, name):
return unescape("&%s;" % name, self._entitydefs, self._encoding)
def convert_charref(self, name):
return unescape_charref("%s" % name, self._encoding)
def unescape_attr_if_required(self, name):
return name # sgmllib already did it
def unescape_attrs_if_required(self, attrs):
return attrs # ditto
else:
def unescape_attr_if_required(self, name):
return self.unescape_attr(name)
def unescape_attrs_if_required(self, attrs):
return self.unescape_attrs(attrs)
class FormParser(_AbstractSgmllibParser, sgmllib.SGMLParser):
"""Good for tolerance of incorrect HTML, bad for XHTML."""
def __init__(self, entitydefs=None, encoding=DEFAULT_ENCODING):
sgmllib.SGMLParser.__init__(self)
_AbstractFormParser.__init__(self, entitydefs, encoding)
def feed(self, data):
try:
sgmllib.SGMLParser.feed(self, data)
except SGMLLIB_PARSEERROR, exc:
raise ParseError(exc)
# sigh, must support mechanize by allowing dynamic creation of classes based on
# its bundled copy of BeautifulSoup (which was necessary because of dependency
# problems)
def _create_bs_classes(bs,
icbinbs,
):
class _AbstractBSFormParser(_AbstractSgmllibParser):
bs_base_class = None
def __init__(self, entitydefs=None, encoding=DEFAULT_ENCODING):
_AbstractFormParser.__init__(self, entitydefs, encoding)
self.bs_base_class.__init__(self)
def handle_data(self, data):
_AbstractFormParser.handle_data(self, data)
self.bs_base_class.handle_data(self, data)
def feed(self, data):
try:
self.bs_base_class.feed(self, data)
except SGMLLIB_PARSEERROR, exc:
raise ParseError(exc)
class RobustFormParser(_AbstractBSFormParser, bs):
"""Tries to be highly tolerant of incorrect HTML."""
pass
RobustFormParser.bs_base_class = bs
class NestingRobustFormParser(_AbstractBSFormParser, icbinbs):
"""Tries to be highly tolerant of incorrect HTML.
Different from RobustFormParser in that it more often guesses nesting
above missing end tags (see BeautifulSoup docs).
"""
pass
NestingRobustFormParser.bs_base_class = icbinbs
return RobustFormParser, NestingRobustFormParser
try:
if sys.version_info[:2] < (2, 2):
raise ImportError # BeautifulSoup uses generators
import BeautifulSoup
except ImportError:
pass
else:
RobustFormParser, NestingRobustFormParser = _create_bs_classes(
BeautifulSoup.BeautifulSoup, BeautifulSoup.ICantBelieveItsBeautifulSoup
)
#FormParser = XHTMLCompatibleFormParser # testing hack
#FormParser = RobustFormParser # testing hack
def ParseResponseEx(response,
select_default=False,
form_parser_class=FormParser,
request_class=urllib2.Request,
entitydefs=None,
encoding=DEFAULT_ENCODING,
# private
_urljoin=urlparse.urljoin,
_urlparse=urlparse.urlparse,
_urlunparse=urlparse.urlunparse,
):
"""Identical to ParseResponse, except that:
1. The returned list contains an extra item. The first form in the list
contains all controls not contained in any FORM element.
2. The arguments ignore_errors and backwards_compat have been removed.
3. Backwards-compatibility mode (backwards_compat=True) is not available.
"""
return _ParseFileEx(response, response.geturl(),
select_default,
False,
form_parser_class,
request_class,
entitydefs,
False,
encoding,
_urljoin=_urljoin,
_urlparse=_urlparse,
_urlunparse=_urlunparse,
)
def ParseFileEx(file, base_uri,
select_default=False,
form_parser_class=FormParser,
request_class=urllib2.Request,
entitydefs=None,
encoding=DEFAULT_ENCODING,
# private
_urljoin=urlparse.urljoin,
_urlparse=urlparse.urlparse,
_urlunparse=urlparse.urlunparse,
):
"""Identical to ParseFile, except that:
1. The returned list contains an extra item. The first form in the list
contains all controls not contained in any FORM element.
2. The arguments ignore_errors and backwards_compat have been removed.
3. Backwards-compatibility mode (backwards_compat=True) is not available.
"""
return _ParseFileEx(file, base_uri,
select_default,
False,
form_parser_class,
request_class,
entitydefs,
False,
encoding,
_urljoin=_urljoin,
_urlparse=_urlparse,
_urlunparse=_urlunparse,
)
def ParseResponse(response, *args, **kwds):
"""Parse HTTP response and return a list of HTMLForm instances.
The return value of urllib2.urlopen can be conveniently passed to this
function as the response parameter.
ClientForm.ParseError is raised on parse errors.
response: file-like object (supporting read() method) with a method
geturl(), returning the URI of the HTTP response
select_default: for multiple-selection SELECT controls and RADIO controls,
pick the first item as the default if none are selected in the HTML
form_parser_class: class to instantiate and use to pass
request_class: class to return from .click() method (default is
urllib2.Request)
entitydefs: mapping like {"&": "&", ...} containing HTML entity
definitions (a sensible default is used)
encoding: character encoding used for encoding numeric character references
when matching link text. ClientForm does not attempt to find the encoding
in a META HTTP-EQUIV attribute in the document itself (mechanize, for
example, does do that and will pass the correct value to ClientForm using
this parameter).
backwards_compat: boolean that determines whether the returned HTMLForm
objects are backwards-compatible with old code. If backwards_compat is
true:
- ClientForm 0.1 code will continue to work as before.
- Label searches that do not specify a nr (number or count) will always
get the first match, even if other controls match. If
backwards_compat is False, label searches that have ambiguous results
will raise an AmbiguityError.
- Item label matching is done by strict string comparison rather than
substring matching.
- De-selecting individual list items is allowed even if the Item is
disabled.
The backwards_compat argument will be deprecated in a future release.
Pass a true value for select_default if you want the behaviour specified by
RFC 1866 (the HTML 2.0 standard), which is to select the first item in a
RADIO or multiple-selection SELECT control if none were selected in the
HTML. Most browsers (including Microsoft Internet Explorer (IE) and
Netscape Navigator) instead leave all items unselected in these cases. The
W3C HTML 4.0 standard leaves this behaviour undefined in the case of
multiple-selection SELECT controls, but insists that at least one RADIO
button should be checked at all times, in contradiction to browser
behaviour.
There is a choice of parsers. ClientForm.XHTMLCompatibleFormParser (uses
HTMLParser.HTMLParser) works best for XHTML, ClientForm.FormParser (uses
sgmllib.SGMLParser) (the default) works better for ordinary grubby HTML.
Note that HTMLParser is only available in Python 2.2 and later. You can
pass your own class in here as a hack to work around bad HTML, but at your
own risk: there is no well-defined interface.
"""
return _ParseFileEx(response, response.geturl(), *args, **kwds)[1:]
def ParseFile(file, base_uri, *args, **kwds):
"""Parse HTML and return a list of HTMLForm instances.
ClientForm.ParseError is raised on parse errors.
file: file-like object (supporting read() method) containing HTML with zero
or more forms to be parsed
base_uri: the URI of the document (note that the base URI used to submit
the form will be that given in the BASE element if present, not that of
the document)
For the other arguments and further details, see ParseResponse.__doc__.
"""
return _ParseFileEx(file, base_uri, *args, **kwds)[1:]
def _ParseFileEx(file, base_uri,
select_default=False,
ignore_errors=False,
form_parser_class=FormParser,
request_class=urllib2.Request,
entitydefs=None,
backwards_compat=True,
encoding=DEFAULT_ENCODING,
_urljoin=urlparse.urljoin,
_urlparse=urlparse.urlparse,
_urlunparse=urlparse.urlunparse,
):
if backwards_compat:
deprecation("operating in backwards-compatibility mode")
fp = form_parser_class(entitydefs, encoding)
file.seek(0)
while 1:
data = file.read(CHUNK)
try:
fp.feed(data)
except ParseError, e:
e.base_uri = base_uri
raise
if len(data) != CHUNK: break
if fp.base is not None:
# HTML BASE element takes precedence over document URI
base_uri = fp.base
labels = [] # Label(label) for label in fp.labels]
id_to_labels = {}
for l in fp.labels:
label = Label(l)
labels.append(label)
for_id = l["for"]
coll = id_to_labels.get(for_id)
if coll is None:
id_to_labels[for_id] = [label]
else:
coll.append(label)
forms = []
for (name, action, method, enctype), attrs, controls in fp.forms:
if action is None:
action = base_uri
else:
action = _urljoin(base_uri, action)
action = fp.unescape_attr_if_required(action)
name = fp.unescape_attr_if_required(name)
attrs = fp.unescape_attrs_if_required(attrs)
# would be nice to make HTMLForm class (form builder) pluggable
form = HTMLForm(
action, method, enctype, name, attrs, request_class,
forms, labels, id_to_labels, backwards_compat)
form._urlparse = _urlparse
form._urlunparse = _urlunparse
for ii in range(len(controls)):
type, name, attrs = controls[ii]
attrs = fp.unescape_attrs_if_required(attrs)
name = fp.unescape_attr_if_required(name)
# index=ii*10 allows ImageControl to return multiple ordered pairs
form.new_control(type, name, attrs, select_default=select_default,
index=ii*10)
forms.append(form)
for form in forms:
form.fixup()
return forms
class Label:
def __init__(self, attrs):
self.id = attrs.get("for")
self._text = attrs.get("__text").strip()
self._ctext = compress_text(self._text)
self.attrs = attrs
self._backwards_compat = False # maintained by HTMLForm
def __getattr__(self, name):
if name == "text":
if self._backwards_compat:
return self._text
else:
return self._ctext
return getattr(Label, name)
def __setattr__(self, name, value):
if name == "text":
# don't see any need for this, so make it read-only
raise AttributeError("text attribute is read-only")
self.__dict__[name] = value
def __str__(self):
return "<Label(id=%r, text=%r)>" % (self.id, self.text)
def _get_label(attrs):
text = attrs.get("__label")
if text is not None:
return Label(text)
else:
return None
class Control:
"""An HTML form control.
An HTMLForm contains a sequence of Controls. The Controls in an HTMLForm
are accessed using the HTMLForm.find_control method or the
HTMLForm.controls attribute.
Control instances are usually constructed using the ParseFile /
ParseResponse functions. If you use those functions, you can ignore the
rest of this paragraph. A Control is only properly initialised after the
fixup method has been called. In fact, this is only strictly necessary for
ListControl instances. This is necessary because ListControls are built up
from ListControls each containing only a single item, and their initial
value(s) can only be known after the sequence is complete.
The types and values that are acceptable for assignment to the value
attribute are defined by subclasses.
If the disabled attribute is true, this represents the state typically
represented by browsers by 'greying out' a control. If the disabled
attribute is true, the Control will raise AttributeError if an attempt is
made to change its value. In addition, the control will not be considered
'successful' as defined by the W3C HTML 4 standard -- ie. it will
contribute no data to the return value of the HTMLForm.click* methods. To
enable a control, set the disabled attribute to a false value.
If the readonly attribute is true, the Control will raise AttributeError if
an attempt is made to change its value. To make a control writable, set
the readonly attribute to a false value.
All controls have the disabled and readonly attributes, not only those that
may have the HTML attributes of the same names.
On assignment to the value attribute, the following exceptions are raised:
TypeError, AttributeError (if the value attribute should not be assigned
to, because the control is disabled, for example) and ValueError.
If the name or value attributes are None, or the value is an empty list, or
if the control is disabled, the control is not successful.
Public attributes:
type: string describing type of control (see the keys of the
HTMLForm.type2class dictionary for the allowable values) (readonly)
name: name of control (readonly)
value: current value of control (subclasses may allow a single value, a
sequence of values, or either)
disabled: disabled state
readonly: readonly state
id: value of id HTML attribute
"""
def __init__(self, type, name, attrs, index=None):
"""
type: string describing type of control (see the keys of the
HTMLForm.type2class dictionary for the allowable values)
name: control name
attrs: HTML attributes of control's HTML element
"""
raise NotImplementedError()
def add_to_form(self, form):
self._form = form
form.controls.append(self)
def fixup(self):
pass
def is_of_kind(self, kind):
raise NotImplementedError()
def clear(self):
raise NotImplementedError()
def __getattr__(self, name): raise NotImplementedError()
def __setattr__(self, name, value): raise NotImplementedError()
def pairs(self):
"""Return list of (key, value) pairs suitable for passing to urlencode.
"""
return [(k, v) for (i, k, v) in self._totally_ordered_pairs()]
def _totally_ordered_pairs(self):
"""Return list of (key, value, index) tuples.
Like pairs, but allows preserving correct ordering even where several
controls are involved.
"""
raise NotImplementedError()
def _write_mime_data(self, mw, name, value):
"""Write data for a subitem of this control to a MimeWriter."""
# called by HTMLForm
mw2 = mw.nextpart()
mw2.addheader("Content-disposition",
'form-data; name="%s"' % name, 1)
f = mw2.startbody(prefix=0)
f.write(value)
def __str__(self):
raise NotImplementedError()
def get_labels(self):
"""Return all labels (Label instances) for this control.
If the control was surrounded by a <label> tag, that will be the first
label; all other labels, connected by 'for' and 'id', are in the order
that appear in the HTML.
"""
res = []
if self._label:
res.append(self._label)
if self.id:
res.extend(self._form._id_to_labels.get(self.id, ()))
return res
#---------------------------------------------------
class ScalarControl(Control):
"""Control whose value is not restricted to one of a prescribed set.
Some ScalarControls don't accept any value attribute. Otherwise, takes a
single value, which must be string-like.
Additional read-only public attribute:
attrs: dictionary mapping the names of original HTML attributes of the
control to their values
"""
def __init__(self, type, name, attrs, index=None):
self._index = index
self._label = _get_label(attrs)
self.__dict__["type"] = type.lower()
self.__dict__["name"] = name
self._value = attrs.get("value")
self.disabled = attrs.has_key("disabled")
self.readonly = attrs.has_key("readonly")
self.id = attrs.get("id")
self.attrs = attrs.copy()
self._clicked = False
self._urlparse = urlparse.urlparse
self._urlunparse = urlparse.urlunparse
def __getattr__(self, name):
if name == "value":
return self.__dict__["_value"]
else:
raise AttributeError("%s instance has no attribute '%s'" %
(self.__class__.__name__, name))
def __setattr__(self, name, value):
if name == "value":
if not isstringlike(value):
raise TypeError("must assign a string")
elif self.readonly:
raise AttributeError("control '%s' is readonly" % self.name)
elif self.disabled:
raise AttributeError("control '%s' is disabled" % self.name)
self.__dict__["_value"] = value
elif name in ("name", "type"):
raise AttributeError("%s attribute is readonly" % name)
else:
self.__dict__[name] = value
def _totally_ordered_pairs(self):
name = self.name
value = self.value
if name is None or value is None or self.disabled:
return []
return [(self._index, name, value)]
def clear(self):
if self.readonly:
raise AttributeError("control '%s' is readonly" % self.name)
self.__dict__["_value"] = None
def __str__(self):
name = self.name
value = self.value
if name is None: name = "<None>"
if value is None: value = "<None>"
infos = []
if self.disabled: infos.append("disabled")
if self.readonly: infos.append("readonly")
info = ", ".join(infos)
if info: info = " (%s)" % info
return "<%s(%s=%s)%s>" % (self.__class__.__name__, name, value, info)
#---------------------------------------------------
class TextControl(ScalarControl):
"""Textual input control.
Covers:
INPUT/TEXT
INPUT/PASSWORD
INPUT/HIDDEN
TEXTAREA
"""
def __init__(self, type, name, attrs, index=None):
ScalarControl.__init__(self, type, name, attrs, index)
if self.type == "hidden": self.readonly = True
if self._value is None:
self._value = ""
def is_of_kind(self, kind): return kind == "text"
#---------------------------------------------------
class FileControl(ScalarControl):
"""File upload with INPUT TYPE=FILE.
The value attribute of a FileControl is always None. Use add_file instead.
Additional public method: add_file
"""
def __init__(self, type, name, attrs, index=None):
ScalarControl.__init__(self, type, name, attrs, index)
self._value = None
self._upload_data = []
def is_of_kind(self, kind): return kind == "file"
def clear(self):
if self.readonly:
raise AttributeError("control '%s' is readonly" % self.name)
self._upload_data = []
def __setattr__(self, name, value):
if name in ("value", "name", "type"):
raise AttributeError("%s attribute is readonly" % name)
else:
self.__dict__[name] = value
def add_file(self, file_object, content_type=None, filename=None):
if not hasattr(file_object, "read"):
raise TypeError("file-like object must have read method")
if content_type is not None and not isstringlike(content_type):
raise TypeError("content type must be None or string-like")
if filename is not None and not isstringlike(filename):
raise TypeError("filename must be None or string-like")
if content_type is None:
content_type = "application/octet-stream"
self._upload_data.append((file_object, content_type, filename))
def _totally_ordered_pairs(self):
# XXX should it be successful even if unnamed?
if self.name is None or self.disabled:
return []
return [(self._index, self.name, "")]
def _write_mime_data(self, mw, _name, _value):
# called by HTMLForm
# assert _name == self.name and _value == ''
if len(self._upload_data) == 1:
# single file
file_object, content_type, filename = self._upload_data[0]
mw2 = mw.nextpart()
fn_part = filename and ('; filename="%s"' % filename) or ""
disp = 'form-data; name="%s"%s' % (self.name, fn_part)
mw2.addheader("Content-disposition", disp, prefix=1)
fh = mw2.startbody(content_type, prefix=0)
fh.write(file_object.read())
elif len(self._upload_data) != 0:
# multiple files
mw2 = mw.nextpart()
disp = 'form-data; name="%s"' % self.name
mw2.addheader("Content-disposition", disp, prefix=1)
fh = mw2.startmultipartbody("mixed", prefix=0)
for file_object, content_type, filename in self._upload_data:
mw3 = mw2.nextpart()
fn_part = filename and ('; filename="%s"' % filename) or ""
disp = "file%s" % fn_part
mw3.addheader("Content-disposition", disp, prefix=1)
fh2 = mw3.startbody(content_type, prefix=0)
fh2.write(file_object.read())
mw2.lastpart()
def __str__(self):
name = self.name
if name is None: name = "<None>"
if not self._upload_data:
value = "<No files added>"
else:
value = []
for file, ctype, filename in self._upload_data:
if filename is None:
value.append("<Unnamed file>")
else:
value.append(filename)
value = ", ".join(value)
info = []
if self.disabled: info.append("disabled")
if self.readonly: info.append("readonly")
info = ", ".join(info)
if info: info = " (%s)" % info
return "<%s(%s=%s)%s>" % (self.__class__.__name__, name, value, info)
#---------------------------------------------------
class IsindexControl(ScalarControl):
"""ISINDEX control.
ISINDEX is the odd-one-out of HTML form controls. In fact, it isn't really
part of regular HTML forms at all, and predates it. You're only allowed
one ISINDEX per HTML document. ISINDEX and regular form submission are
mutually exclusive -- either submit a form, or the ISINDEX.
Having said this, since ISINDEX controls may appear in forms (which is
probably bad HTML), ParseFile / ParseResponse will include them in the
HTMLForm instances it returns. You can set the ISINDEX's value, as with
any other control (but note that ISINDEX controls have no name, so you'll
need to use the type argument of set_value!). When you submit the form,
the ISINDEX will not be successful (ie., no data will get returned to the
server as a result of its presence), unless you click on the ISINDEX
control, in which case the ISINDEX gets submitted instead of the form:
form.set_value("my isindex value", type="isindex")
urllib2.urlopen(form.click(type="isindex"))
ISINDEX elements outside of FORMs are ignored. If you want to submit one
by hand, do it like so:
url = urlparse.urljoin(page_uri, "?"+urllib.quote_plus("my isindex value"))
result = urllib2.urlopen(url)
"""
def __init__(self, type, name, attrs, index=None):
ScalarControl.__init__(self, type, name, attrs, index)
if self._value is None:
self._value = ""
def is_of_kind(self, kind): return kind in ["text", "clickable"]
def _totally_ordered_pairs(self):
return []
def _click(self, form, coord, return_type, request_class=urllib2.Request):
# Relative URL for ISINDEX submission: instead of "foo=bar+baz",
# want "bar+baz".
# This doesn't seem to be specified in HTML 4.01 spec. (ISINDEX is
# deprecated in 4.01, but it should still say how to submit it).
# Submission of ISINDEX is explained in the HTML 3.2 spec, though.
parts = self._urlparse(form.action)
rest, (query, frag) = parts[:-2], parts[-2:]
parts = rest + (urllib.quote_plus(self.value), None)
url = self._urlunparse(parts)
req_data = url, None, []
if return_type == "pairs":
return []
elif return_type == "request_data":
return req_data
else:
return request_class(url)
def __str__(self):
value = self.value
if value is None: value = "<None>"
infos = []
if self.disabled: infos.append("disabled")
if self.readonly: infos.append("readonly")
info = ", ".join(infos)
if info: info = " (%s)" % info
return "<%s(%s)%s>" % (self.__class__.__name__, value, info)
#---------------------------------------------------
class IgnoreControl(ScalarControl):
"""Control that we're not interested in.
Covers:
INPUT/RESET
BUTTON/RESET
INPUT/BUTTON
BUTTON/BUTTON
These controls are always unsuccessful, in the terminology of HTML 4 (ie.
they never require any information to be returned to the server).
BUTTON/BUTTON is used to generate events for script embedded in HTML.
The value attribute of IgnoreControl is always None.
"""
def __init__(self, type, name, attrs, index=None):
ScalarControl.__init__(self, type, name, attrs, index)
self._value = None
def is_of_kind(self, kind): return False
def __setattr__(self, name, value):
if name == "value":
raise AttributeError(
"control '%s' is ignored, hence read-only" % self.name)
elif name in ("name", "type"):
raise AttributeError("%s attribute is readonly" % name)
else:
self.__dict__[name] = value
#---------------------------------------------------
# ListControls
# helpers and subsidiary classes
class Item:
def __init__(self, control, attrs, index=None):
label = _get_label(attrs)
self.__dict__.update({
"name": attrs["value"],
"_labels": label and [label] or [],
"attrs": attrs,
"_control": control,
"disabled": attrs.has_key("disabled"),
"_selected": False,
"id": attrs.get("id"),
"_index": index,
})
control.items.append(self)
def get_labels(self):
"""Return all labels (Label instances) for this item.
For items that represent radio buttons or checkboxes, if the item was
surrounded by a <label> tag, that will be the first label; all other
labels, connected by 'for' and 'id', are in the order that appear in
the HTML.
For items that represent select options, if the option had a label
attribute, that will be the first label. If the option has contents
(text within the option tags) and it is not the same as the label
attribute (if any), that will be a label. There is nothing in the
spec to my knowledge that makes an option with an id unable to be the
target of a label's for attribute, so those are included, if any, for
the sake of consistency and completeness.
"""
res = []
res.extend(self._labels)
if self.id:
res.extend(self._control._form._id_to_labels.get(self.id, ()))
return res
def __getattr__(self, name):
if name=="selected":
return self._selected
raise AttributeError(name)
def __setattr__(self, name, value):
if name == "selected":
self._control._set_selected_state(self, value)
elif name == "disabled":
self.__dict__["disabled"] = bool(value)
else:
raise AttributeError(name)
def __str__(self):
res = self.name
if self.selected:
res = "*" + res
if self.disabled:
res = "(%s)" % res
return res
def __repr__(self):
attrs = [("name", self.name), ("id", self.id)]+self.attrs.items()
return "<%s %s>" % (
self.__class__.__name__,
" ".join(["%s=%r" % (k, v) for k, v in attrs])
)
def disambiguate(items, nr, **kwds):
msgs = []
for key, value in kwds.items():
msgs.append("%s=%r" % (key, value))
msg = " ".join(msgs)
if not items:
raise ItemNotFoundError(msg)
if nr is None:
if len(items) > 1:
raise AmbiguityError(msg)
nr = 0
if len(items) <= nr:
raise ItemNotFoundError(msg)
return items[nr]
class ListControl(Control):
"""Control representing a sequence of items.
The value attribute of a ListControl represents the successful list items
in the control. The successful list items are those that are selected and
not disabled.
ListControl implements both list controls that take a length-1 value
(single-selection) and those that take length >1 values
(multiple-selection).
ListControls accept sequence values only. Some controls only accept
sequences of length 0 or 1 (RADIO, and single-selection SELECT).
In those cases, ItemCountError is raised if len(sequence) > 1. CHECKBOXes
and multiple-selection SELECTs (those having the "multiple" HTML attribute)
accept sequences of any length.
Note the following mistake:
control.value = some_value
assert control.value == some_value # not necessarily true
The reason for this is that the value attribute always gives the list items
in the order they were listed in the HTML.
ListControl items can also be referred to by their labels instead of names.
Use the label argument to .get(), and the .set_value_by_label(),
.get_value_by_label() methods.
Note that, rather confusingly, though SELECT controls are represented in
HTML by SELECT elements (which contain OPTION elements, representing
individual list items), CHECKBOXes and RADIOs are not represented by *any*
element. Instead, those controls are represented by a collection of INPUT
elements. For example, this is a SELECT control, named "control1":
<select name="control1">
<option>foo</option>
<option value="1">bar</option>
</select>
and this is a CHECKBOX control, named "control2":
<input type="checkbox" name="control2" value="foo" id="cbe1">
<input type="checkbox" name="control2" value="bar" id="cbe2">
The id attribute of a CHECKBOX or RADIO ListControl is always that of its
first element (for example, "cbe1" above).
Additional read-only public attribute: multiple.
"""
# ListControls are built up by the parser from their component items by
# creating one ListControl per item, consolidating them into a single
# master ListControl held by the HTMLForm:
# -User calls form.new_control(...)
# -Form creates Control, and calls control.add_to_form(self).
# -Control looks for a Control with the same name and type in the form,
# and if it finds one, merges itself with that control by calling
# control.merge_control(self). The first Control added to the form, of
# a particular name and type, is the only one that survives in the
# form.
# -Form calls control.fixup for all its controls. ListControls in the
# form know they can now safely pick their default values.
# To create a ListControl without an HTMLForm, use:
# control.merge_control(new_control)
# (actually, it's much easier just to use ParseFile)
_label = None
def __init__(self, type, name, attrs={}, select_default=False,
called_as_base_class=False, index=None):
"""
select_default: for RADIO and multiple-selection SELECT controls, pick
the first item as the default if no 'selected' HTML attribute is
present
"""
if not called_as_base_class:
raise NotImplementedError()
self.__dict__["type"] = type.lower()
self.__dict__["name"] = name
self._value = attrs.get("value")
self.disabled = False
self.readonly = False
self.id = attrs.get("id")
# As Controls are merged in with .merge_control(), self.attrs will
# refer to each Control in turn -- always the most recently merged
# control. Each merged-in Control instance corresponds to a single
# list item: see ListControl.__doc__.
self.items = []
self._form = None
self._select_default = select_default
self._clicked = False
def clear(self):
self.value = []
def is_of_kind(self, kind):
if kind == "list":
return True
elif kind == "multilist":
return bool(self.multiple)
elif kind == "singlelist":
return not self.multiple
else:
return False
def get_items(self, name=None, label=None, id=None,
exclude_disabled=False):
"""Return matching items by name or label.
For argument docs, see the docstring for .get()
"""
if name is not None and not isstringlike(name):
raise TypeError("item name must be string-like")
if label is not None and not isstringlike(label):
raise TypeError("item label must be string-like")
if id is not None and not isstringlike(id):
raise TypeError("item id must be string-like")
items = [] # order is important
compat = self._form.backwards_compat
for o in self.items:
if exclude_disabled and o.disabled:
continue
if name is not None and o.name != name:
continue
if label is not None:
for l in o.get_labels():
if ((compat and l.text == label) or
(not compat and l.text.find(label) > -1)):
break
else:
continue
if id is not None and o.id != id:
continue
items.append(o)
return items
def get(self, name=None, label=None, id=None, nr=None,
exclude_disabled=False):
"""Return item by name or label, disambiguating if necessary with nr.
All arguments must be passed by name, with the exception of 'name',
which may be used as a positional argument.
If name is specified, then the item must have the indicated name.
If label is specified, then the item must have a label whose
whitespace-compressed, stripped, text substring-matches the indicated
label string (eg. label="please choose" will match
" Do please choose an item ").
If id is specified, then the item must have the indicated id.
nr is an optional 0-based index of the items matching the query.
If nr is the default None value and more than item is found, raises
AmbiguityError (unless the HTMLForm instance's backwards_compat
attribute is true).
If no item is found, or if items are found but nr is specified and not
found, raises ItemNotFoundError.
Optionally excludes disabled items.
"""
if nr is None and self._form.backwards_compat:
nr = 0 # :-/
items = self.get_items(name, label, id, exclude_disabled)
return disambiguate(items, nr, name=name, label=label, id=id)
def _get(self, name, by_label=False, nr=None, exclude_disabled=False):
# strictly for use by deprecated methods
if by_label:
name, label = None, name
else:
name, label = name, None
return self.get(name, label, nr, exclude_disabled)
def toggle(self, name, by_label=False, nr=None):
"""Deprecated: given a name or label and optional disambiguating index
nr, toggle the matching item's selection.
Selecting items follows the behavior described in the docstring of the
'get' method.
if the item is disabled, or this control is disabled or readonly,
raise AttributeError.
"""
deprecation(
"item = control.get(...); item.selected = not item.selected")
o = self._get(name, by_label, nr)
self._set_selected_state(o, not o.selected)
def set(self, selected, name, by_label=False, nr=None):
"""Deprecated: given a name or label and optional disambiguating index
nr, set the matching item's selection to the bool value of selected.
Selecting items follows the behavior described in the docstring of the
'get' method.
if the item is disabled, or this control is disabled or readonly,
raise AttributeError.
"""
deprecation(
"control.get(...).selected = <boolean>")
self._set_selected_state(self._get(name, by_label, nr), selected)
def _set_selected_state(self, item, action):
# action:
# bool False: off
# bool True: on
if self.disabled:
raise AttributeError("control '%s' is disabled" % self.name)
if self.readonly:
raise AttributeError("control '%s' is readonly" % self.name)
action == bool(action)
compat = self._form.backwards_compat
if not compat and item.disabled:
raise AttributeError("item is disabled")
else:
if compat and item.disabled and action:
raise AttributeError("item is disabled")
if self.multiple:
item.__dict__["_selected"] = action
else:
if not action:
item.__dict__["_selected"] = False
else:
for o in self.items:
o.__dict__["_selected"] = False
item.__dict__["_selected"] = True
def toggle_single(self, by_label=None):
"""Deprecated: toggle the selection of the single item in this control.
Raises ItemCountError if the control does not contain only one item.
by_label argument is ignored, and included only for backwards
compatibility.
"""
deprecation(
"control.items[0].selected = not control.items[0].selected")
if len(self.items) != 1:
raise ItemCountError(
"'%s' is not a single-item control" % self.name)
item = self.items[0]
self._set_selected_state(item, not item.selected)
def set_single(self, selected, by_label=None):
"""Deprecated: set the selection of the single item in this control.
Raises ItemCountError if the control does not contain only one item.
by_label argument is ignored, and included only for backwards
compatibility.
"""
deprecation(
"control.items[0].selected = <boolean>")
if len(self.items) != 1:
raise ItemCountError(
"'%s' is not a single-item control" % self.name)
self._set_selected_state(self.items[0], selected)
def get_item_disabled(self, name, by_label=False, nr=None):
"""Get disabled state of named list item in a ListControl."""
deprecation(
"control.get(...).disabled")
return self._get(name, by_label, nr).disabled
def set_item_disabled(self, disabled, name, by_label=False, nr=None):
"""Set disabled state of named list item in a ListControl.
disabled: boolean disabled state
"""
deprecation(
"control.get(...).disabled = <boolean>")
self._get(name, by_label, nr).disabled = disabled
def set_all_items_disabled(self, disabled):
"""Set disabled state of all list items in a ListControl.
disabled: boolean disabled state
"""
for o in self.items:
o.disabled = disabled
def get_item_attrs(self, name, by_label=False, nr=None):
"""Return dictionary of HTML attributes for a single ListControl item.
The HTML element types that describe list items are: OPTION for SELECT
controls, INPUT for the rest. These elements have HTML attributes that
you may occasionally want to know about -- for example, the "alt" HTML
attribute gives a text string describing the item (graphical browsers
usually display this as a tooltip).
The returned dictionary maps HTML attribute names to values. The names
and values are taken from the original HTML.
"""
deprecation(
"control.get(...).attrs")
return self._get(name, by_label, nr).attrs
def add_to_form(self, form):
assert self._form is None or form == self._form, (
"can't add control to more than one form")
self._form = form
if self.name is None:
# always count nameless elements as separate controls
Control.add_to_form(self, form)
else:
try:
control = form.find_control(self.name, self.type)
except (ControlNotFoundError, AmbiguityError):
Control.add_to_form(self, form)
else:
control.merge_control(self)
def merge_control(self, control):
assert bool(control.multiple) == bool(self.multiple)
# usually, isinstance(control, self.__class__)
self.items.extend(control.items)
def fixup(self):
"""
ListControls are built up from component list items (which are also
ListControls) during parsing. This method should be called after all
items have been added. See ListControl.__doc__ for the reason this is
required.
"""
# Need to set default selection where no item was indicated as being
# selected by the HTML:
# CHECKBOX:
# Nothing should be selected.
# SELECT/single, SELECT/multiple and RADIO:
# RFC 1866 (HTML 2.0): says first item should be selected.
# W3C HTML 4.01 Specification: says that client behaviour is
# undefined in this case. For RADIO, exactly one must be selected,
# though which one is undefined.
# Both Netscape and Microsoft Internet Explorer (IE) choose first
# item for SELECT/single. However, both IE5 and Mozilla (both 1.0
# and Firebird 0.6) leave all items unselected for RADIO and
# SELECT/multiple.
# Since both Netscape and IE all choose the first item for
# SELECT/single, we do the same. OTOH, both Netscape and IE
# leave SELECT/multiple with nothing selected, in violation of RFC 1866
# (but not in violation of the W3C HTML 4 standard); the same is true
# of RADIO (which *is* in violation of the HTML 4 standard). We follow
# RFC 1866 if the _select_default attribute is set, and Netscape and IE
# otherwise. RFC 1866 and HTML 4 are always violated insofar as you
# can deselect all items in a RadioControl.
for o in self.items:
# set items' controls to self, now that we've merged
o.__dict__["_control"] = self
def __getattr__(self, name):
if name == "value":
compat = self._form.backwards_compat
if self.name is None:
return []
return [o.name for o in self.items if o.selected and
(not o.disabled or compat)]
else:
raise AttributeError("%s instance has no attribute '%s'" %
(self.__class__.__name__, name))
def __setattr__(self, name, value):
if name == "value":
if self.disabled:
raise AttributeError("control '%s' is disabled" % self.name)
if self.readonly:
raise AttributeError("control '%s' is readonly" % self.name)
self._set_value(value)
elif name in ("name", "type", "multiple"):
raise AttributeError("%s attribute is readonly" % name)
else:
self.__dict__[name] = value
def _set_value(self, value):
if value is None or isstringlike(value):
raise TypeError("ListControl, must set a sequence")
if not value:
compat = self._form.backwards_compat
for o in self.items:
if not o.disabled or compat:
o.selected = False
elif self.multiple:
self._multiple_set_value(value)
elif len(value) > 1:
raise ItemCountError(
"single selection list, must set sequence of "
"length 0 or 1")
else:
self._single_set_value(value)
def _get_items(self, name, target=1):
all_items = self.get_items(name)
items = [o for o in all_items if not o.disabled]
if len(items) < target:
if len(all_items) < target:
raise ItemNotFoundError(
"insufficient items with name %r" % name)
else:
raise AttributeError(
"insufficient non-disabled items with name %s" % name)
on = []
off = []
for o in items:
if o.selected:
on.append(o)
else:
off.append(o)
return on, off
def _single_set_value(self, value):
assert len(value) == 1
on, off = self._get_items(value[0])
assert len(on) <= 1
if not on:
off[0].selected = True
def _multiple_set_value(self, value):
compat = self._form.backwards_compat
turn_on = [] # transactional-ish
turn_off = [item for item in self.items if
item.selected and (not item.disabled or compat)]
names = {}
for nn in value:
if nn in names.keys():
names[nn] += 1
else:
names[nn] = 1
for name, count in names.items():
on, off = self._get_items(name, count)
for i in range(count):
if on:
item = on[0]
del on[0]
del turn_off[turn_off.index(item)]
else:
item = off[0]
del off[0]
turn_on.append(item)
for item in turn_off:
item.selected = False
for item in turn_on:
item.selected = True
def set_value_by_label(self, value):
"""Set the value of control by item labels.
value is expected to be an iterable of strings that are substrings of
the item labels that should be selected. Before substring matching is
performed, the original label text is whitespace-compressed
(consecutive whitespace characters are converted to a single space
character) and leading and trailing whitespace is stripped. Ambiguous
labels are accepted without complaint if the form's backwards_compat is
True; otherwise, it will not complain as long as all ambiguous labels
share the same item name (e.g. OPTION value).
"""
if isstringlike(value):
raise TypeError(value)
if not self.multiple and len(value) > 1:
raise ItemCountError(
"single selection list, must set sequence of "
"length 0 or 1")
items = []
for nn in value:
found = self.get_items(label=nn)
if len(found) > 1:
if not self._form.backwards_compat:
# ambiguous labels are fine as long as item names (e.g.
# OPTION values) are same
opt_name = found[0].name
if [o for o in found[1:] if o.name != opt_name]:
raise AmbiguityError(nn)
else:
# OK, we'll guess :-( Assume first available item.
found = found[:1]
for o in found:
# For the multiple-item case, we could try to be smarter,
# saving them up and trying to resolve, but that's too much.
if self._form.backwards_compat or o not in items:
items.append(o)
break
else: # all of them are used
raise ItemNotFoundError(nn)
# now we have all the items that should be on
# let's just turn everything off and then back on.
self.value = []
for o in items:
o.selected = True
def get_value_by_label(self):
"""Return the value of the control as given by normalized labels."""
res = []
compat = self._form.backwards_compat
for o in self.items:
if (not o.disabled or compat) and o.selected:
for l in o.get_labels():
if l.text:
res.append(l.text)
break
else:
res.append(None)
return res
def possible_items(self, by_label=False):
"""Deprecated: return the names or labels of all possible items.
Includes disabled items, which may be misleading for some use cases.
"""
deprecation(
"[item.name for item in self.items]")
if by_label:
res = []
for o in self.items:
for l in o.get_labels():
if l.text:
res.append(l.text)
break
else:
res.append(None)
return res
return [o.name for o in self.items]
def _totally_ordered_pairs(self):
if self.disabled or self.name is None:
return []
else:
return [(o._index, self.name, o.name) for o in self.items
if o.selected and not o.disabled]
def __str__(self):
name = self.name
if name is None: name = "<None>"
display = [str(o) for o in self.items]
infos = []
if self.disabled: infos.append("disabled")
if self.readonly: infos.append("readonly")
info = ", ".join(infos)
if info: info = " (%s)" % info
return "<%s(%s=[%s])%s>" % (self.__class__.__name__,
name, ", ".join(display), info)
class RadioControl(ListControl):
"""
Covers:
INPUT/RADIO
"""
def __init__(self, type, name, attrs, select_default=False, index=None):
attrs.setdefault("value", "on")
ListControl.__init__(self, type, name, attrs, select_default,
called_as_base_class=True, index=index)
self.__dict__["multiple"] = False
o = Item(self, attrs, index)
o.__dict__["_selected"] = attrs.has_key("checked")
def fixup(self):
ListControl.fixup(self)
found = [o for o in self.items if o.selected and not o.disabled]
if not found:
if self._select_default:
for o in self.items:
if not o.disabled:
o.selected = True
break
else:
# Ensure only one item selected. Choose the last one,
# following IE and Firefox.
for o in found[:-1]:
o.selected = False
def get_labels(self):
return []
class CheckboxControl(ListControl):
"""
Covers:
INPUT/CHECKBOX
"""
def __init__(self, type, name, attrs, select_default=False, index=None):
attrs.setdefault("value", "on")
ListControl.__init__(self, type, name, attrs, select_default,
called_as_base_class=True, index=index)
self.__dict__["multiple"] = True
o = Item(self, attrs, index)
o.__dict__["_selected"] = attrs.has_key("checked")
def get_labels(self):
return []
class SelectControl(ListControl):
"""
Covers:
SELECT (and OPTION)
OPTION 'values', in HTML parlance, are Item 'names' in ClientForm parlance.
SELECT control values and labels are subject to some messy defaulting
rules. For example, if the HTML representation of the control is:
<SELECT name=year>
<OPTION value=0 label="2002">current year</OPTION>
<OPTION value=1>2001</OPTION>
<OPTION>2000</OPTION>
</SELECT>
The items, in order, have labels "2002", "2001" and "2000", whereas their
names (the OPTION values) are "0", "1" and "2000" respectively. Note that
the value of the last OPTION in this example defaults to its contents, as
specified by RFC 1866, as do the labels of the second and third OPTIONs.
The OPTION labels are sometimes more meaningful than the OPTION values,
which can make for more maintainable code.
Additional read-only public attribute: attrs
The attrs attribute is a dictionary of the original HTML attributes of the
SELECT element. Other ListControls do not have this attribute, because in
other cases the control as a whole does not correspond to any single HTML
element. control.get(...).attrs may be used as usual to get at the HTML
attributes of the HTML elements corresponding to individual list items (for
SELECT controls, these are OPTION elements).
Another special case is that the Item.attrs dictionaries have a special key
"contents" which does not correspond to any real HTML attribute, but rather
contains the contents of the OPTION element:
<OPTION>this bit</OPTION>
"""
# HTML attributes here are treated slightly differently from other list
# controls:
# -The SELECT HTML attributes dictionary is stuffed into the OPTION
# HTML attributes dictionary under the "__select" key.
# -The content of each OPTION element is stored under the special
# "contents" key of the dictionary.
# After all this, the dictionary is passed to the SelectControl constructor
# as the attrs argument, as usual. However:
# -The first SelectControl constructed when building up a SELECT control
# has a constructor attrs argument containing only the __select key -- so
# this SelectControl represents an empty SELECT control.
# -Subsequent SelectControls have both OPTION HTML-attribute in attrs and
# the __select dictionary containing the SELECT HTML-attributes.
def __init__(self, type, name, attrs, select_default=False, index=None):
# fish out the SELECT HTML attributes from the OPTION HTML attributes
# dictionary
self.attrs = attrs["__select"].copy()
self.__dict__["_label"] = _get_label(self.attrs)
self.__dict__["id"] = self.attrs.get("id")
self.__dict__["multiple"] = self.attrs.has_key("multiple")
# the majority of the contents, label, and value dance already happened
contents = attrs.get("contents")
attrs = attrs.copy()
del attrs["__select"]
ListControl.__init__(self, type, name, self.attrs, select_default,
called_as_base_class=True, index=index)
self.disabled = self.attrs.has_key("disabled")
self.readonly = self.attrs.has_key("readonly")
if attrs.has_key("value"):
# otherwise it is a marker 'select started' token
o = Item(self, attrs, index)
o.__dict__["_selected"] = attrs.has_key("selected")
# add 'label' label and contents label, if different. If both are
# provided, the 'label' label is used for display in HTML
# 4.0-compliant browsers (and any lower spec? not sure) while the
# contents are used for display in older or less-compliant
# browsers. We make label objects for both, if the values are
# different.
label = attrs.get("label")
if label:
o._labels.append(Label({"__text": label}))
if contents and contents != label:
o._labels.append(Label({"__text": contents}))
elif contents:
o._labels.append(Label({"__text": contents}))
def fixup(self):
ListControl.fixup(self)
# Firefox doesn't exclude disabled items from those considered here
# (i.e. from 'found', for both branches of the if below). Note that
# IE6 doesn't support the disabled attribute on OPTIONs at all.
found = [o for o in self.items if o.selected]
if not found:
if not self.multiple or self._select_default:
for o in self.items:
if not o.disabled:
was_disabled = self.disabled
self.disabled = False
try:
o.selected = True
finally:
o.disabled = was_disabled
break
elif not self.multiple:
# Ensure only one item selected. Choose the last one,
# following IE and Firefox.
for o in found[:-1]:
o.selected = False
#---------------------------------------------------
class SubmitControl(ScalarControl):
"""
Covers:
INPUT/SUBMIT
BUTTON/SUBMIT
"""
def __init__(self, type, name, attrs, index=None):
ScalarControl.__init__(self, type, name, attrs, index)
# IE5 defaults SUBMIT value to "Submit Query"; Firebird 0.6 leaves it
# blank, Konqueror 3.1 defaults to "Submit". HTML spec. doesn't seem
# to define this.
if self.value is None: self.value = ""
self.readonly = True
def get_labels(self):
res = []
if self.value:
res.append(Label({"__text": self.value}))
res.extend(ScalarControl.get_labels(self))
return res
def is_of_kind(self, kind): return kind == "clickable"
def _click(self, form, coord, return_type, request_class=urllib2.Request):
self._clicked = coord
r = form._switch_click(return_type, request_class)
self._clicked = False
return r
def _totally_ordered_pairs(self):
if not self._clicked:
return []
return ScalarControl._totally_ordered_pairs(self)
#---------------------------------------------------
class ImageControl(SubmitControl):
"""
Covers:
INPUT/IMAGE
Coordinates are specified using one of the HTMLForm.click* methods.
"""
def __init__(self, type, name, attrs, index=None):
SubmitControl.__init__(self, type, name, attrs, index)
self.readonly = False
def _totally_ordered_pairs(self):
clicked = self._clicked
if self.disabled or not clicked:
return []
name = self.name
if name is None: return []
pairs = [
(self._index, "%s.x" % name, str(clicked[0])),
(self._index+1, "%s.y" % name, str(clicked[1])),
]
value = self._value
if value:
pairs.append((self._index+2, name, value))
return pairs
get_labels = ScalarControl.get_labels
# aliases, just to make str(control) and str(form) clearer
class PasswordControl(TextControl): pass
class HiddenControl(TextControl): pass
class TextareaControl(TextControl): pass
class SubmitButtonControl(SubmitControl): pass
def is_listcontrol(control): return control.is_of_kind("list")
class HTMLForm:
"""Represents a single HTML <form> ... </form> element.
A form consists of a sequence of controls that usually have names, and
which can take on various values. The values of the various types of
controls represent variously: text, zero-or-one-of-many or many-of-many
choices, and files to be uploaded. Some controls can be clicked on to
submit the form, and clickable controls' values sometimes include the
coordinates of the click.
Forms can be filled in with data to be returned to the server, and then
submitted, using the click method to generate a request object suitable for
passing to urllib2.urlopen (or the click_request_data or click_pairs
methods if you're not using urllib2).
import ClientForm
forms = ClientForm.ParseFile(html, base_uri)
form = forms[0]
form["query"] = "Python"
form.find_control("nr_results").get("lots").selected = True
response = urllib2.urlopen(form.click())
Usually, HTMLForm instances are not created directly. Instead, the
ParseFile or ParseResponse factory functions are used. If you do construct
HTMLForm objects yourself, however, note that an HTMLForm instance is only
properly initialised after the fixup method has been called (ParseFile and
ParseResponse do this for you). See ListControl.__doc__ for the reason
this is required.
Indexing a form (form["control_name"]) returns the named Control's value
attribute. Assignment to a form index (form["control_name"] = something)
is equivalent to assignment to the named Control's value attribute. If you
need to be more specific than just supplying the control's name, use the
set_value and get_value methods.
ListControl values are lists of item names (specifically, the names of the
items that are selected and not disabled, and hence are "successful" -- ie.
cause data to be returned to the server). The list item's name is the
value of the corresponding HTML element's"value" attribute.
Example:
<INPUT type="CHECKBOX" name="cheeses" value="leicester"></INPUT>
<INPUT type="CHECKBOX" name="cheeses" value="cheddar"></INPUT>
defines a CHECKBOX control with name "cheeses" which has two items, named
"leicester" and "cheddar".
Another example:
<SELECT name="more_cheeses">
<OPTION>1</OPTION>
<OPTION value="2" label="CHEDDAR">cheddar</OPTION>
</SELECT>
defines a SELECT control with name "more_cheeses" which has two items,
named "1" and "2" (because the OPTION element's value HTML attribute
defaults to the element contents -- see SelectControl.__doc__ for more on
these defaulting rules).
To select, deselect or otherwise manipulate individual list items, use the
HTMLForm.find_control() and ListControl.get() methods. To set the whole
value, do as for any other control: use indexing or the set_/get_value
methods.
Example:
# select *only* the item named "cheddar"
form["cheeses"] = ["cheddar"]
# select "cheddar", leave other items unaffected
form.find_control("cheeses").get("cheddar").selected = True
Some controls (RADIO and SELECT without the multiple attribute) can only
have zero or one items selected at a time. Some controls (CHECKBOX and
SELECT with the multiple attribute) can have multiple items selected at a
time. To set the whole value of a ListControl, assign a sequence to a form
index:
form["cheeses"] = ["cheddar", "leicester"]
If the ListControl is not multiple-selection, the assigned list must be of
length one.
To check if a control has an item, if an item is selected, or if an item is
successful (selected and not disabled), respectively:
"cheddar" in [item.name for item in form.find_control("cheeses").items]
"cheddar" in [item.name for item in form.find_control("cheeses").items and
item.selected]
"cheddar" in form["cheeses"] # (or "cheddar" in form.get_value("cheeses"))
Note that some list items may be disabled (see below).
Note the following mistake:
form[control_name] = control_value
assert form[control_name] == control_value # not necessarily true
The reason for this is that form[control_name] always gives the list items
in the order they were listed in the HTML.
List items (hence list values, too) can be referred to in terms of list
item labels rather than list item names using the appropriate label
arguments. Note that each item may have several labels.
The question of default values of OPTION contents, labels and values is
somewhat complicated: see SelectControl.__doc__ and
ListControl.get_item_attrs.__doc__ if you think you need to know.
Controls can be disabled or readonly. In either case, the control's value
cannot be changed until you clear those flags (see example below).
Disabled is the state typically represented by browsers by 'greying out' a
control. Disabled controls are not 'successful' -- they don't cause data
to get returned to the server. Readonly controls usually appear in
browsers as read-only text boxes. Readonly controls are successful. List
items can also be disabled. Attempts to select or deselect disabled items
fail with AttributeError.
If a lot of controls are readonly, it can be useful to do this:
form.set_all_readonly(False)
To clear a control's value attribute, so that it is not successful (until a
value is subsequently set):
form.clear("cheeses")
More examples:
control = form.find_control("cheeses")
control.disabled = False
control.readonly = False
control.get("gruyere").disabled = True
control.items[0].selected = True
See the various Control classes for further documentation. Many methods
take name, type, kind, id, label and nr arguments to specify the control to
be operated on: see HTMLForm.find_control.__doc__.
ControlNotFoundError (subclass of ValueError) is raised if the specified
control can't be found. This includes occasions where a non-ListControl
is found, but the method (set, for example) requires a ListControl.
ItemNotFoundError (subclass of ValueError) is raised if a list item can't
be found. ItemCountError (subclass of ValueError) is raised if an attempt
is made to select more than one item and the control doesn't allow that, or
set/get_single are called and the control contains more than one item.
AttributeError is raised if a control or item is readonly or disabled and
an attempt is made to alter its value.
Security note: Remember that any passwords you store in HTMLForm instances
will be saved to disk in the clear if you pickle them (directly or
indirectly). The simplest solution to this is to avoid pickling HTMLForm
objects. You could also pickle before filling in any password, or just set
the password to "" before pickling.
Public attributes:
action: full (absolute URI) form action
method: "GET" or "POST"
enctype: form transfer encoding MIME type
name: name of form (None if no name was specified)
attrs: dictionary mapping original HTML form attributes to their values
controls: list of Control instances; do not alter this list
(instead, call form.new_control to make a Control and add it to the
form, or control.add_to_form if you already have a Control instance)
Methods for form filling:
-------------------------
Most of the these methods have very similar arguments. See
HTMLForm.find_control.__doc__ for details of the name, type, kind, label
and nr arguments.
def find_control(self,
name=None, type=None, kind=None, id=None, predicate=None,
nr=None, label=None)
get_value(name=None, type=None, kind=None, id=None, nr=None,
by_label=False, # by_label is deprecated
label=None)
set_value(value,
name=None, type=None, kind=None, id=None, nr=None,
by_label=False, # by_label is deprecated
label=None)
clear_all()
clear(name=None, type=None, kind=None, id=None, nr=None, label=None)
set_all_readonly(readonly)
Method applying only to FileControls:
add_file(file_object,
content_type="application/octet-stream", filename=None,
name=None, id=None, nr=None, label=None)
Methods applying only to clickable controls:
click(name=None, type=None, id=None, nr=0, coord=(1,1), label=None)
click_request_data(name=None, type=None, id=None, nr=0, coord=(1,1),
label=None)
click_pairs(name=None, type=None, id=None, nr=0, coord=(1,1), label=None)
"""
type2class = {
"text": TextControl,
"password": PasswordControl,
"hidden": HiddenControl,
"textarea": TextareaControl,
"isindex": IsindexControl,
"file": FileControl,
"button": IgnoreControl,
"buttonbutton": IgnoreControl,
"reset": IgnoreControl,
"resetbutton": IgnoreControl,
"submit": SubmitControl,
"submitbutton": SubmitButtonControl,
"image": ImageControl,
"radio": RadioControl,
"checkbox": CheckboxControl,
"select": SelectControl,
}
#---------------------------------------------------
# Initialisation. Use ParseResponse / ParseFile instead.
def __init__(self, action, method="GET",
enctype="application/x-www-form-urlencoded",
name=None, attrs=None,
request_class=urllib2.Request,
forms=None, labels=None, id_to_labels=None,
backwards_compat=True):
"""
In the usual case, use ParseResponse (or ParseFile) to create new
HTMLForm objects.
action: full (absolute URI) form action
method: "GET" or "POST"
enctype: form transfer encoding MIME type
name: name of form
attrs: dictionary mapping original HTML form attributes to their values
"""
self.action = action
self.method = method
self.enctype = enctype
self.name = name
if attrs is not None:
self.attrs = attrs.copy()
else:
self.attrs = {}
self.controls = []
self._request_class = request_class
# these attributes are used by zope.testbrowser
self._forms = forms # this is a semi-public API!
self._labels = labels # this is a semi-public API!
self._id_to_labels = id_to_labels # this is a semi-public API!
self.backwards_compat = backwards_compat # note __setattr__
self._urlunparse = urlparse.urlunparse
self._urlparse = urlparse.urlparse
def __getattr__(self, name):
if name == "backwards_compat":
return self._backwards_compat
return getattr(HTMLForm, name)
def __setattr__(self, name, value):
# yuck
if name == "backwards_compat":
name = "_backwards_compat"
value = bool(value)
for cc in self.controls:
try:
items = cc.items
except AttributeError:
continue
else:
for ii in items:
for ll in ii.get_labels():
ll._backwards_compat = value
self.__dict__[name] = value
def new_control(self, type, name, attrs,
ignore_unknown=False, select_default=False, index=None):
"""Adds a new control to the form.
This is usually called by ParseFile and ParseResponse. Don't call it
youself unless you're building your own Control instances.
Note that controls representing lists of items are built up from
controls holding only a single list item. See ListControl.__doc__ for
further information.
type: type of control (see Control.__doc__ for a list)
attrs: HTML attributes of control
ignore_unknown: if true, use a dummy Control instance for controls of
unknown type; otherwise, use a TextControl
select_default: for RADIO and multiple-selection SELECT controls, pick
the first item as the default if no 'selected' HTML attribute is
present (this defaulting happens when the HTMLForm.fixup method is
called)
index: index of corresponding element in HTML (see
MoreFormTests.test_interspersed_controls for motivation)
"""
type = type.lower()
klass = self.type2class.get(type)
if klass is None:
if ignore_unknown:
klass = IgnoreControl
else:
klass = TextControl
a = attrs.copy()
if issubclass(klass, ListControl):
control = klass(type, name, a, select_default, index)
else:
control = klass(type, name, a, index)
control.add_to_form(self)
control._urlparse = self._urlparse
control._urlunparse = self._urlunparse
def fixup(self):
"""Normalise form after all controls have been added.
This is usually called by ParseFile and ParseResponse. Don't call it
youself unless you're building your own Control instances.
This method should only be called once, after all controls have been
added to the form.
"""
for control in self.controls:
control.fixup()
self.backwards_compat = self._backwards_compat
#---------------------------------------------------
def __str__(self):
header = "%s%s %s %s" % (
(self.name and self.name+" " or ""),
self.method, self.action, self.enctype)
rep = [header]
for control in self.controls:
rep.append(" %s" % str(control))
return "<%s>" % "\n".join(rep)
#---------------------------------------------------
# Form-filling methods.
def __getitem__(self, name):
return self.find_control(name).value
def __contains__(self, name):
return bool(self.find_control(name))
def __setitem__(self, name, value):
control = self.find_control(name)
try:
control.value = value
except AttributeError, e:
raise ValueError(str(e))
def get_value(self,
name=None, type=None, kind=None, id=None, nr=None,
by_label=False, # by_label is deprecated
label=None):
"""Return value of control.
If only name and value arguments are supplied, equivalent to
form[name]
"""
if by_label:
deprecation("form.get_value_by_label(...)")
c = self.find_control(name, type, kind, id, label=label, nr=nr)
if by_label:
try:
meth = c.get_value_by_label
except AttributeError:
raise NotImplementedError(
"control '%s' does not yet support by_label" % c.name)
else:
return meth()
else:
return c.value
def set_value(self, value,
name=None, type=None, kind=None, id=None, nr=None,
by_label=False, # by_label is deprecated
label=None):
"""Set value of control.
If only name and value arguments are supplied, equivalent to
form[name] = value
"""
if by_label:
deprecation("form.get_value_by_label(...)")
c = self.find_control(name, type, kind, id, label=label, nr=nr)
if by_label:
try:
meth = c.set_value_by_label
except AttributeError:
raise NotImplementedError(
"control '%s' does not yet support by_label" % c.name)
else:
meth(value)
else:
c.value = value
def get_value_by_label(
self, name=None, type=None, kind=None, id=None, label=None, nr=None):
"""
All arguments should be passed by name.
"""
c = self.find_control(name, type, kind, id, label=label, nr=nr)
return c.get_value_by_label()
def set_value_by_label(
self, value,
name=None, type=None, kind=None, id=None, label=None, nr=None):
"""
All arguments should be passed by name.
"""
c = self.find_control(name, type, kind, id, label=label, nr=nr)
c.set_value_by_label(value)
def set_all_readonly(self, readonly):
for control in self.controls:
control.readonly = bool(readonly)
def clear_all(self):
"""Clear the value attributes of all controls in the form.
See HTMLForm.clear.__doc__.
"""
for control in self.controls:
control.clear()
def clear(self,
name=None, type=None, kind=None, id=None, nr=None, label=None):
"""Clear the value attribute of a control.
As a result, the affected control will not be successful until a value
is subsequently set. AttributeError is raised on readonly controls.
"""
c = self.find_control(name, type, kind, id, label=label, nr=nr)
c.clear()
#---------------------------------------------------
# Form-filling methods applying only to ListControls.
def possible_items(self, # deprecated
name=None, type=None, kind=None, id=None,
nr=None, by_label=False, label=None):
"""Return a list of all values that the specified control can take."""
c = self._find_list_control(name, type, kind, id, label, nr)
return c.possible_items(by_label)
def set(self, selected, item_name, # deprecated
name=None, type=None, kind=None, id=None, nr=None,
by_label=False, label=None):
"""Select / deselect named list item.
selected: boolean selected state
"""
self._find_list_control(name, type, kind, id, label, nr).set(
selected, item_name, by_label)
def toggle(self, item_name, # deprecated
name=None, type=None, kind=None, id=None, nr=None,
by_label=False, label=None):
"""Toggle selected state of named list item."""
self._find_list_control(name, type, kind, id, label, nr).toggle(
item_name, by_label)
def set_single(self, selected, # deprecated
name=None, type=None, kind=None, id=None,
nr=None, by_label=None, label=None):
"""Select / deselect list item in a control having only one item.
If the control has multiple list items, ItemCountError is raised.
This is just a convenience method, so you don't need to know the item's
name -- the item name in these single-item controls is usually
something meaningless like "1" or "on".
For example, if a checkbox has a single item named "on", the following
two calls are equivalent:
control.toggle("on")
control.toggle_single()
""" # by_label ignored and deprecated
self._find_list_control(
name, type, kind, id, label, nr).set_single(selected)
def toggle_single(self, name=None, type=None, kind=None, id=None,
nr=None, by_label=None, label=None): # deprecated
"""Toggle selected state of list item in control having only one item.
The rest is as for HTMLForm.set_single.__doc__.
""" # by_label ignored and deprecated
self._find_list_control(name, type, kind, id, label, nr).toggle_single()
#---------------------------------------------------
# Form-filling method applying only to FileControls.
def add_file(self, file_object, content_type=None, filename=None,
name=None, id=None, nr=None, label=None):
"""Add a file to be uploaded.
file_object: file-like object (with read method) from which to read
data to upload
content_type: MIME content type of data to upload
filename: filename to pass to server
If filename is None, no filename is sent to the server.
If content_type is None, the content type is guessed based on the
filename and the data from read from the file object.
XXX
At the moment, guessed content type is always application/octet-stream.
Use sndhdr, imghdr modules. Should also try to guess HTML, XML, and
plain text.
Note the following useful HTML attributes of file upload controls (see
HTML 4.01 spec, section 17):
accept: comma-separated list of content types that the server will
handle correctly; you can use this to filter out non-conforming files
size: XXX IIRC, this is indicative of whether form wants multiple or
single files
maxlength: XXX hint of max content length in bytes?
"""
self.find_control(name, "file", id=id, label=label, nr=nr).add_file(
file_object, content_type, filename)
#---------------------------------------------------
# Form submission methods, applying only to clickable controls.
def click(self, name=None, type=None, id=None, nr=0, coord=(1,1),
request_class=urllib2.Request,
label=None):
"""Return request that would result from clicking on a control.
The request object is a urllib2.Request instance, which you can pass to
urllib2.urlopen (or ClientCookie.urlopen).
Only some control types (INPUT/SUBMIT & BUTTON/SUBMIT buttons and
IMAGEs) can be clicked.
Will click on the first clickable control, subject to the name, type
and nr arguments (as for find_control). If no name, type, id or number
is specified and there are no clickable controls, a request will be
returned for the form in its current, un-clicked, state.
IndexError is raised if any of name, type, id or nr is specified but no
matching control is found. ValueError is raised if the HTMLForm has an
enctype attribute that is not recognised.
You can optionally specify a coordinate to click at, which only makes a
difference if you clicked on an image.
"""
return self._click(name, type, id, label, nr, coord, "request",
self._request_class)
def click_request_data(self,
name=None, type=None, id=None,
nr=0, coord=(1,1),
request_class=urllib2.Request,
label=None):
"""As for click method, but return a tuple (url, data, headers).
You can use this data to send a request to the server. This is useful
if you're using httplib or urllib rather than urllib2. Otherwise, use
the click method.
# Untested. Have to subclass to add headers, I think -- so use urllib2
# instead!
import urllib
url, data, hdrs = form.click_request_data()
r = urllib.urlopen(url, data)
# Untested. I don't know of any reason to use httplib -- you can get
# just as much control with urllib2.
import httplib, urlparse
url, data, hdrs = form.click_request_data()
tup = urlparse(url)
host, path = tup[1], urlparse.urlunparse((None, None)+tup[2:])
conn = httplib.HTTPConnection(host)
if data:
httplib.request("POST", path, data, hdrs)
else:
httplib.request("GET", path, headers=hdrs)
r = conn.getresponse()
"""
return self._click(name, type, id, label, nr, coord, "request_data",
self._request_class)
def click_pairs(self, name=None, type=None, id=None,
nr=0, coord=(1,1),
label=None):
"""As for click_request_data, but returns a list of (key, value) pairs.
You can use this list as an argument to ClientForm.urlencode. This is
usually only useful if you're using httplib or urllib rather than
urllib2 or ClientCookie. It may also be useful if you want to manually
tweak the keys and/or values, but this should not be necessary.
Otherwise, use the click method.
Note that this method is only useful for forms of MIME type
x-www-form-urlencoded. In particular, it does not return the
information required for file upload. If you need file upload and are
not using urllib2, use click_request_data.
Also note that Python 2.0's urllib.urlencode is slightly broken: it
only accepts a mapping, not a sequence of pairs, as an argument. This
messes up any ordering in the argument. Use ClientForm.urlencode
instead.
"""
return self._click(name, type, id, label, nr, coord, "pairs",
self._request_class)
#---------------------------------------------------
def find_control(self,
name=None, type=None, kind=None, id=None,
predicate=None, nr=None,
label=None):
"""Locate and return some specific control within the form.
At least one of the name, type, kind, predicate and nr arguments must
be supplied. If no matching control is found, ControlNotFoundError is
raised.
If name is specified, then the control must have the indicated name.
If type is specified then the control must have the specified type (in
addition to the types possible for <input> HTML tags: "text",
"password", "hidden", "submit", "image", "button", "radio", "checkbox",
"file" we also have "reset", "buttonbutton", "submitbutton",
"resetbutton", "textarea", "select" and "isindex").
If kind is specified, then the control must fall into the specified
group, each of which satisfies a particular interface. The types are
"text", "list", "multilist", "singlelist", "clickable" and "file".
If id is specified, then the control must have the indicated id.
If predicate is specified, then the control must match that function.
The predicate function is passed the control as its single argument,
and should return a boolean value indicating whether the control
matched.
nr, if supplied, is the sequence number of the control (where 0 is the
first). Note that control 0 is the first control matching all the
other arguments (if supplied); it is not necessarily the first control
in the form. If no nr is supplied, AmbiguityError is raised if
multiple controls match the other arguments (unless the
.backwards-compat attribute is true).
If label is specified, then the control must have this label. Note
that radio controls and checkboxes never have labels: their items do.
"""
if ((name is None) and (type is None) and (kind is None) and
(id is None) and (label is None) and (predicate is None) and
(nr is None)):
raise ValueError(
"at least one argument must be supplied to specify control")
return self._find_control(name, type, kind, id, label, predicate, nr)
#---------------------------------------------------
# Private methods.
def _find_list_control(self,
name=None, type=None, kind=None, id=None,
label=None, nr=None):
if ((name is None) and (type is None) and (kind is None) and
(id is None) and (label is None) and (nr is None)):
raise ValueError(
"at least one argument must be supplied to specify control")
return self._find_control(name, type, kind, id, label,
is_listcontrol, nr)
def _find_control(self, name, type, kind, id, label, predicate, nr):
if ((name is not None) and (name is not Missing) and
not isstringlike(name)):
raise TypeError("control name must be string-like")
if (type is not None) and not isstringlike(type):
raise TypeError("control type must be string-like")
if (kind is not None) and not isstringlike(kind):
raise TypeError("control kind must be string-like")
if (id is not None) and not isstringlike(id):
raise TypeError("control id must be string-like")
if (label is not None) and not isstringlike(label):
raise TypeError("control label must be string-like")
if (predicate is not None) and not callable(predicate):
raise TypeError("control predicate must be callable")
if (nr is not None) and nr < 0:
raise ValueError("control number must be a positive integer")
orig_nr = nr
found = None
ambiguous = False
if nr is None and self.backwards_compat:
nr = 0
for control in self.controls:
if ((name is not None and name != control.name) and
(name is not Missing or control.name is not None)):
continue
if type is not None and type != control.type:
continue
if kind is not None and not control.is_of_kind(kind):
continue
if id is not None and id != control.id:
continue
if predicate and not predicate(control):
continue
if label:
for l in control.get_labels():
if l.text.find(label) > -1:
break
else:
continue
if nr is not None:
if nr == 0:
return control # early exit: unambiguous due to nr
nr -= 1
continue
if found:
ambiguous = True
break
found = control
if found and not ambiguous:
return found
description = []
if name is not None: description.append("name %s" % repr(name))
if type is not None: description.append("type '%s'" % type)
if kind is not None: description.append("kind '%s'" % kind)
if id is not None: description.append("id '%s'" % id)
if label is not None: description.append("label '%s'" % label)
if predicate is not None:
description.append("predicate %s" % predicate)
if orig_nr: description.append("nr %d" % orig_nr)
description = ", ".join(description)
if ambiguous:
raise AmbiguityError("more than one control matching "+description)
elif not found:
raise ControlNotFoundError("no control matching "+description)
assert False
def _click(self, name, type, id, label, nr, coord, return_type,
request_class=urllib2.Request):
try:
control = self._find_control(
name, type, "clickable", id, label, None, nr)
except ControlNotFoundError:
if ((name is not None) or (type is not None) or (id is not None) or
(nr != 0)):
raise
# no clickable controls, but no control was explicitly requested,
# so return state without clicking any control
return self._switch_click(return_type, request_class)
else:
return control._click(self, coord, return_type, request_class)
def _pairs(self):
"""Return sequence of (key, value) pairs suitable for urlencoding."""
return [(k, v) for (i, k, v, c_i) in self._pairs_and_controls()]
def _pairs_and_controls(self):
"""Return sequence of (index, key, value, control_index)
of totally ordered pairs suitable for urlencoding.
control_index is the index of the control in self.controls
"""
pairs = []
for control_index in range(len(self.controls)):
control = self.controls[control_index]
for ii, key, val in control._totally_ordered_pairs():
pairs.append((ii, key, val, control_index))
# stable sort by ONLY first item in tuple
pairs.sort()
return pairs
def _request_data(self):
"""Return a tuple (url, data, headers)."""
method = self.method.upper()
#scheme, netloc, path, parameters, query, frag = urlparse.urlparse(self.action)
parts = self._urlparse(self.action)
rest, (query, frag) = parts[:-2], parts[-2:]
if method == "GET":
if self.enctype != "application/x-www-form-urlencoded":
raise ValueError(
"unknown GET form encoding type '%s'" % self.enctype)
parts = rest + (urlencode(self._pairs()), None)
uri = self._urlunparse(parts)
return uri, None, []
elif method == "POST":
parts = rest + (query, None)
uri = self._urlunparse(parts)
if self.enctype == "application/x-www-form-urlencoded":
return (uri, urlencode(self._pairs()),
[("Content-type", self.enctype)])
elif self.enctype == "multipart/form-data":
data = StringIO()
http_hdrs = []
mw = MimeWriter(data, http_hdrs)
f = mw.startmultipartbody("form-data", add_to_http_hdrs=True,
prefix=0)
for ii, k, v, control_index in self._pairs_and_controls():
self.controls[control_index]._write_mime_data(mw, k, v)
mw.lastpart()
return uri, data.getvalue(), http_hdrs
else:
raise ValueError(
"unknown POST form encoding type '%s'" % self.enctype)
else:
raise ValueError("Unknown method '%s'" % method)
def _switch_click(self, return_type, request_class=urllib2.Request):
# This is called by HTMLForm and clickable Controls to hide switching
# on return_type.
if return_type == "pairs":
return self._pairs()
elif return_type == "request_data":
return self._request_data()
else:
req_data = self._request_data()
req = request_class(req_data[0], req_data[1])
for key, val in req_data[2]:
add_hdr = req.add_header
if key.lower() == "content-type":
try:
add_hdr = req.add_unredirected_header
except AttributeError:
# pre-2.4 and not using ClientCookie
pass
add_hdr(key, val)
return req
|
jasrusable/fun
|
venv/lib/python2.7/site-packages/twill/other_packages/_mechanize_dist/ClientForm.py
|
Python
|
gpl-2.0
| 124,260
|
from collections import OrderedDict
from datetime import datetime, timedelta
from io import BytesIO
import json
import uuid
from hpack.struct import HeaderTuple
from http.cookies import BaseCookie, Morsel
from hyperframe.frame import HeadersFrame, DataFrame, ContinuationFrame
from .constants import response_codes, h2_headers
from .logger import get_logger
from .utils import isomorphic_decode, isomorphic_encode
missing = object()
class Response(object):
"""Object representing the response to a HTTP request
:param handler: RequestHandler being used for this response
:param request: Request that this is the response for
.. attribute:: request
Request associated with this Response.
.. attribute:: encoding
The encoding to use when converting unicode to strings for output.
.. attribute:: add_required_headers
Boolean indicating whether mandatory headers should be added to the
response.
.. attribute:: send_body_for_head_request
Boolean, default False, indicating whether the body content should be
sent when the request method is HEAD.
.. attribute:: writer
The ResponseWriter for this response
.. attribute:: status
Status tuple (code, message). Can be set to an integer in which case the
message part is filled in automatically, or a tuple (code, message) in
which case code is an int and message is a text or binary string.
.. attribute:: headers
List of HTTP headers to send with the response. Each item in the list is a
tuple of (name, value).
.. attribute:: content
The body of the response. This can either be a string or a iterable of response
parts. If it is an iterable, any item may be a string or a function of zero
parameters which, when called, returns a string."""
def __init__(self, handler, request, response_writer_cls=None):
self.request = request
self.encoding = "utf8"
self.add_required_headers = True
self.send_body_for_head_request = False
self.close_connection = False
self.logger = get_logger()
self.writer = response_writer_cls(handler, self) if response_writer_cls else ResponseWriter(handler, self)
self._status = (200, None)
self.headers = ResponseHeaders()
self.content = []
@property
def status(self):
return self._status
@status.setter
def status(self, value):
if hasattr(value, "__len__"):
if len(value) != 2:
raise ValueError
else:
code = int(value[0])
message = value[1]
# Only call str() if message is not a string type, so that we
# don't get `str(b"foo") == "b'foo'"` in Python 3.
if not isinstance(message, (bytes, str)):
message = str(message)
self._status = (code, message)
else:
self._status = (int(value), None)
def set_cookie(self, name, value, path="/", domain=None, max_age=None,
expires=None, secure=False, httponly=False, comment=None):
"""Set a cookie to be sent with a Set-Cookie header in the
response
:param name: name of the cookie (a binary string)
:param value: value of the cookie (a binary string, or None)
:param max_age: datetime.timedelta int representing the time (in seconds)
until the cookie expires
:param path: String path to which the cookie applies
:param domain: String domain to which the cookie applies
:param secure: Boolean indicating whether the cookie is marked as secure
:param httponly: Boolean indicating whether the cookie is marked as
HTTP Only
:param comment: String comment
:param expires: datetime.datetime or datetime.timedelta indicating a
time or interval from now when the cookie expires
"""
# TODO(Python 3): Convert other parameters (e.g. path) to bytes, too.
if value is None:
value = b''
max_age = 0
expires = timedelta(days=-1)
name = isomorphic_decode(name)
value = isomorphic_decode(value)
days = {i+1: name for i, name in enumerate(["jan", "feb", "mar",
"apr", "may", "jun",
"jul", "aug", "sep",
"oct", "nov", "dec"])}
if isinstance(expires, timedelta):
expires = datetime.utcnow() + expires
if expires is not None:
expires_str = expires.strftime("%d %%s %Y %H:%M:%S GMT")
expires_str = expires_str % days[expires.month]
expires = expires_str
if max_age is not None:
if hasattr(max_age, "total_seconds"):
max_age = int(max_age.total_seconds())
max_age = "%.0d" % max_age
m = Morsel()
def maybe_set(key, value):
if value is not None and value is not False:
m[key] = value
m.set(name, value, value)
maybe_set("path", path)
maybe_set("domain", domain)
maybe_set("comment", comment)
maybe_set("expires", expires)
maybe_set("max-age", max_age)
maybe_set("secure", secure)
maybe_set("httponly", httponly)
self.headers.append("Set-Cookie", m.OutputString())
def unset_cookie(self, name):
"""Remove a cookie from those that are being sent with the response"""
name = isomorphic_decode(name)
cookies = self.headers.get("Set-Cookie")
parser = BaseCookie()
for cookie in cookies:
parser.load(isomorphic_decode(cookie))
if name in parser.keys():
del self.headers["Set-Cookie"]
for m in parser.values():
if m.key != name:
self.headers.append(("Set-Cookie", m.OutputString()))
def delete_cookie(self, name, path="/", domain=None):
"""Delete a cookie on the client by setting it to the empty string
and to expire in the past"""
self.set_cookie(name, None, path=path, domain=domain, max_age=0,
expires=timedelta(days=-1))
def iter_content(self, read_file=False):
"""Iterator returning chunks of response body content.
If any part of the content is a function, this will be called
and the resulting value (if any) returned.
:param read_file: boolean controlling the behaviour when content is a
file handle. When set to False the handle will be
returned directly allowing the file to be passed to
the output in small chunks. When set to True, the
entire content of the file will be returned as a
string facilitating non-streaming operations like
template substitution.
"""
if isinstance(self.content, bytes):
yield self.content
elif isinstance(self.content, str):
yield self.content.encode(self.encoding)
elif hasattr(self.content, "read"):
if read_file:
yield self.content.read()
else:
yield self.content
else:
for item in self.content:
if hasattr(item, "__call__"):
value = item()
else:
value = item
if value:
yield value
def write_status_headers(self):
"""Write out the status line and headers for the response"""
self.writer.write_status(*self.status)
for item in self.headers:
self.writer.write_header(*item)
self.writer.end_headers()
def write_content(self):
"""Write out the response content"""
if self.request.method != "HEAD" or self.send_body_for_head_request:
for item in self.iter_content():
self.writer.write_content(item)
def write(self):
"""Write the whole response"""
self.write_status_headers()
self.write_content()
def set_error(self, code, message=u""):
"""Set the response status headers and return a JSON error object:
{"error": {"code": code, "message": message}}
code is an int (HTTP status code), and message is a text string.
"""
err = {"code": code,
"message": message}
data = json.dumps({"error": err})
self.status = code
self.headers = [("Content-Type", "application/json"),
("Content-Length", len(data))]
self.content = data
if code == 500:
if isinstance(message, str) and message:
first_line = message.splitlines()[0]
else:
first_line = "<no message given>"
self.logger.error("Exception loading %s: %s" % (self.request.url,
first_line))
self.logger.info(message)
class MultipartContent(object):
def __init__(self, boundary=None, default_content_type=None):
self.items = []
if boundary is None:
boundary = str(uuid.uuid4())
self.boundary = boundary
self.default_content_type = default_content_type
def __call__(self):
boundary = b"--" + self.boundary.encode("ascii")
rv = [b"", boundary]
for item in self.items:
rv.append(item.to_bytes())
rv.append(boundary)
rv[-1] += b"--"
return b"\r\n".join(rv)
def append_part(self, data, content_type=None, headers=None):
if content_type is None:
content_type = self.default_content_type
self.items.append(MultipartPart(data, content_type, headers))
def __iter__(self):
#This is hackish; when writing the response we need an iterable
#or a string. For a multipart/byterange response we want an
#iterable that contains a single callable; the MultipartContent
#object itself
yield self
class MultipartPart(object):
def __init__(self, data, content_type=None, headers=None):
assert isinstance(data, bytes), data
self.headers = ResponseHeaders()
if content_type is not None:
self.headers.set("Content-Type", content_type)
if headers is not None:
for name, value in headers:
if name.lower() == b"content-type":
func = self.headers.set
else:
func = self.headers.append
func(name, value)
self.data = data
def to_bytes(self):
rv = []
for key, value in self.headers:
assert isinstance(key, bytes)
assert isinstance(value, bytes)
rv.append(b"%s: %s" % (key, value))
rv.append(b"")
rv.append(self.data)
return b"\r\n".join(rv)
def _maybe_encode(s):
"""Encode a string or an int into binary data using isomorphic_encode()."""
if isinstance(s, int):
return b"%i" % (s,)
return isomorphic_encode(s)
class ResponseHeaders(object):
"""Dictionary-like object holding the headers for the response"""
def __init__(self):
self.data = OrderedDict()
def set(self, key, value):
"""Set a header to a specific value, overwriting any previous header
with the same name
:param key: Name of the header to set
:param value: Value to set the header to
"""
key = _maybe_encode(key)
value = _maybe_encode(value)
self.data[key.lower()] = (key, [value])
def append(self, key, value):
"""Add a new header with a given name, not overwriting any existing
headers with the same name
:param key: Name of the header to add
:param value: Value to set for the header
"""
key = _maybe_encode(key)
value = _maybe_encode(value)
if key.lower() in self.data:
self.data[key.lower()][1].append(value)
else:
self.set(key, value)
def get(self, key, default=missing):
"""Get the set values for a particular header."""
key = _maybe_encode(key)
try:
return self[key]
except KeyError:
if default is missing:
return []
return default
def __getitem__(self, key):
"""Get a list of values for a particular header
"""
key = _maybe_encode(key)
return self.data[key.lower()][1]
def __delitem__(self, key):
key = _maybe_encode(key)
del self.data[key.lower()]
def __contains__(self, key):
key = _maybe_encode(key)
return key.lower() in self.data
def __setitem__(self, key, value):
self.set(key, value)
def __iter__(self):
for key, values in self.data.values():
for value in values:
yield key, value
def items(self):
return list(self)
def update(self, items_iter):
for name, value in items_iter:
self.append(name, value)
def __repr__(self):
return repr(self.data)
class H2Response(Response):
def __init__(self, handler, request):
super(H2Response, self).__init__(handler, request, response_writer_cls=H2ResponseWriter)
def write_status_headers(self):
self.writer.write_headers(self.headers, *self.status)
# Hacky way of detecting last item in generator
def write_content(self):
"""Write out the response content"""
if self.request.method != "HEAD" or self.send_body_for_head_request:
item = None
item_iter = self.iter_content()
try:
item = next(item_iter)
while True:
check_last = next(item_iter)
self.writer.write_data(item, last=False)
item = check_last
except StopIteration:
if item:
self.writer.write_data(item, last=True)
class H2ResponseWriter(object):
def __init__(self, handler, response):
self.socket = handler.request
self.h2conn = handler.conn
self._response = response
self._handler = handler
self.stream_ended = False
self.content_written = False
self.request = response.request
self.logger = response.logger
def write_headers(self, headers, status_code, status_message=None, stream_id=None, last=False):
"""
Send a HEADER frame that is tracked by the local state machine.
Write a HEADER frame using the H2 Connection object, will only work if the stream is in a state to send
HEADER frames.
:param headers: List of (header, value) tuples
:param status_code: The HTTP status code of the response
:param stream_id: Id of stream to send frame on. Will use the request stream ID if None
:param last: Flag to signal if this is the last frame in stream.
"""
formatted_headers = []
secondary_headers = [] # Non ':' prefixed headers are to be added afterwards
for header, value in headers:
# h2_headers are native strings
# header field names are strings of ASCII
if isinstance(header, bytes):
header = header.decode('ascii')
# value in headers can be either string or integer
if isinstance(value, bytes):
value = self.decode(value)
if header in h2_headers:
header = ':' + header
formatted_headers.append((header, str(value)))
else:
secondary_headers.append((header, str(value)))
formatted_headers.append((':status', str(status_code)))
formatted_headers.extend(secondary_headers)
with self.h2conn as connection:
connection.send_headers(
stream_id=self.request.h2_stream_id if stream_id is None else stream_id,
headers=formatted_headers,
end_stream=last or self.request.method == "HEAD"
)
self.write(connection)
def write_data(self, item, last=False, stream_id=None):
"""
Send a DATA frame that is tracked by the local state machine.
Write a DATA frame using the H2 Connection object, will only work if the stream is in a state to send
DATA frames. Uses flow control to split data into multiple data frames if it exceeds the size that can
be in a single frame.
:param item: The content of the DATA frame
:param last: Flag to signal if this is the last frame in stream.
:param stream_id: Id of stream to send frame on. Will use the request stream ID if None
"""
if isinstance(item, (str, bytes)):
data = BytesIO(self.encode(item))
else:
data = item
# Find the length of the data
data.seek(0, 2)
data_len = data.tell()
data.seek(0)
# If the data is longer than max payload size, need to write it in chunks
payload_size = self.get_max_payload_size()
while data_len > payload_size:
self.write_data_frame(data.read(payload_size), False, stream_id)
data_len -= payload_size
payload_size = self.get_max_payload_size()
self.write_data_frame(data.read(), last, stream_id)
def write_data_frame(self, data, last, stream_id=None):
with self.h2conn as connection:
connection.send_data(
stream_id=self.request.h2_stream_id if stream_id is None else stream_id,
data=data,
end_stream=last,
)
self.write(connection)
self.stream_ended = last
def write_push(self, promise_headers, push_stream_id=None, status=None, response_headers=None, response_data=None):
"""Write a push promise, and optionally write the push content.
This will write a push promise to the request stream. If you do not provide headers and data for the response,
then no response will be pushed, and you should push them yourself using the ID returned from this function
:param promise_headers: A list of header tuples that matches what the client would use to
request the pushed response
:param push_stream_id: The ID of the stream the response should be pushed to. If none given, will
use the next available id.
:param status: The status code of the response, REQUIRED if response_headers given
:param response_headers: The headers of the response
:param response_data: The response data.
:return: The ID of the push stream
"""
with self.h2conn as connection:
push_stream_id = push_stream_id if push_stream_id is not None else connection.get_next_available_stream_id()
connection.push_stream(self.request.h2_stream_id, push_stream_id, promise_headers)
self.write(connection)
has_data = response_data is not None
if response_headers is not None:
assert status is not None
self.write_headers(response_headers, status, stream_id=push_stream_id, last=not has_data)
if has_data:
self.write_data(response_data, last=True, stream_id=push_stream_id)
return push_stream_id
def end_stream(self, stream_id=None):
"""Ends the stream with the given ID, or the one that request was made on if no ID given."""
with self.h2conn as connection:
connection.end_stream(stream_id if stream_id is not None else self.request.h2_stream_id)
self.write(connection)
self.stream_ended = True
def write_raw_header_frame(self, headers, stream_id=None, end_stream=False, end_headers=False, frame_cls=HeadersFrame):
"""
Ignores the statemachine of the stream and sends a HEADER frame regardless.
Unlike `write_headers`, this does not check to see if a stream is in the correct state to have HEADER frames
sent through to it. It will build a HEADER frame and send it without using the H2 Connection object other than
to HPACK encode the headers.
:param headers: List of (header, value) tuples
:param stream_id: Id of stream to send frame on. Will use the request stream ID if None
:param end_stream: Set to True to add END_STREAM flag to frame
:param end_headers: Set to True to add END_HEADERS flag to frame
"""
if not stream_id:
stream_id = self.request.h2_stream_id
header_t = []
for header, value in headers:
header_t.append(HeaderTuple(header, value))
with self.h2conn as connection:
frame = frame_cls(stream_id, data=connection.encoder.encode(header_t))
if end_stream:
self.stream_ended = True
frame.flags.add('END_STREAM')
if end_headers:
frame.flags.add('END_HEADERS')
data = frame.serialize()
self.write_raw(data)
def write_raw_data_frame(self, data, stream_id=None, end_stream=False):
"""
Ignores the statemachine of the stream and sends a DATA frame regardless.
Unlike `write_data`, this does not check to see if a stream is in the correct state to have DATA frames
sent through to it. It will build a DATA frame and send it without using the H2 Connection object. It will
not perform any flow control checks.
:param data: The data to be sent in the frame
:param stream_id: Id of stream to send frame on. Will use the request stream ID if None
:param end_stream: Set to True to add END_STREAM flag to frame
"""
if not stream_id:
stream_id = self.request.h2_stream_id
frame = DataFrame(stream_id, data=data)
if end_stream:
self.stream_ended = True
frame.flags.add('END_STREAM')
data = frame.serialize()
self.write_raw(data)
def write_raw_continuation_frame(self, headers, stream_id=None, end_headers=False):
"""
Ignores the statemachine of the stream and sends a CONTINUATION frame regardless.
This provides the ability to create and write a CONTINUATION frame to the stream, which is not exposed by
`write_headers` as the h2 library handles the split between HEADER and CONTINUATION internally. Will perform
HPACK encoding on the headers.
:param headers: List of (header, value) tuples
:param stream_id: Id of stream to send frame on. Will use the request stream ID if None
:param end_headers: Set to True to add END_HEADERS flag to frame
"""
self.write_raw_header_frame(headers, stream_id=stream_id, end_headers=end_headers, frame_cls=ContinuationFrame)
def get_max_payload_size(self, stream_id=None):
"""Returns the maximum size of a payload for the given stream."""
stream_id = stream_id if stream_id is not None else self.request.h2_stream_id
with self.h2conn as connection:
return min(connection.remote_settings.max_frame_size, connection.local_flow_control_window(stream_id)) - 9
def write(self, connection):
self.content_written = True
data = connection.data_to_send()
self.socket.sendall(data)
def write_raw(self, raw_data):
"""Used for sending raw bytes/data through the socket"""
self.content_written = True
self.socket.sendall(raw_data)
def decode(self, data):
"""Convert bytes to unicode according to response.encoding."""
if isinstance(data, bytes):
return data.decode(self._response.encoding)
elif isinstance(data, str):
return data
else:
raise ValueError(type(data))
def encode(self, data):
"""Convert unicode to bytes according to response.encoding."""
if isinstance(data, bytes):
return data
elif isinstance(data, str):
return data.encode(self._response.encoding)
else:
raise ValueError
class ResponseWriter(object):
"""Object providing an API to write out a HTTP response.
:param handler: The RequestHandler being used.
:param response: The Response associated with this writer."""
def __init__(self, handler, response):
self._wfile = handler.wfile
self._response = response
self._handler = handler
self._status_written = False
self._headers_seen = set()
self._headers_complete = False
self.content_written = False
self.request = response.request
self.file_chunk_size = 32 * 1024
self.default_status = 200
def _seen_header(self, name):
return self.encode(name.lower()) in self._headers_seen
def write_status(self, code, message=None):
"""Write out the status line of a response.
:param code: The integer status code of the response.
:param message: The message of the response. Defaults to the message commonly used
with the status code."""
if message is None:
if code in response_codes:
message = response_codes[code][0]
else:
message = ''
self.write(b"%s %d %s\r\n" %
(isomorphic_encode(self._response.request.protocol_version), code, isomorphic_encode(message)))
self._status_written = True
def write_header(self, name, value):
"""Write out a single header for the response.
If a status has not been written, a default status will be written (currently 200)
:param name: Name of the header field
:param value: Value of the header field
:return: A boolean indicating whether the write succeeds
"""
if not self._status_written:
self.write_status(self.default_status)
self._headers_seen.add(self.encode(name.lower()))
if not self.write(name):
return False
if not self.write(b": "):
return False
if isinstance(value, int):
if not self.write(str(value)):
return False
elif not self.write(value):
return False
return self.write(b"\r\n")
def write_default_headers(self):
for name, f in [("Server", self._handler.version_string),
("Date", self._handler.date_time_string)]:
if not self._seen_header(name):
if not self.write_header(name, f()):
return False
if (isinstance(self._response.content, (bytes, str)) and
not self._seen_header("content-length")):
#Would be nice to avoid double-encoding here
if not self.write_header("Content-Length", len(self.encode(self._response.content))):
return False
return True
def end_headers(self):
"""Finish writing headers and write the separator.
Unless add_required_headers on the response is False,
this will also add HTTP-mandated headers that have not yet been supplied
to the response headers.
:return: A boolean indicating whether the write succeeds
"""
if self._response.add_required_headers:
if not self.write_default_headers():
return False
if not self.write("\r\n"):
return False
if not self._seen_header("content-length"):
self._response.close_connection = True
self._headers_complete = True
return True
def write_content(self, data):
"""Write the body of the response.
HTTP-mandated headers will be automatically added with status default to 200 if they have
not been explicitly set.
:return: A boolean indicating whether the write succeeds
"""
if not self._status_written:
self.write_status(self.default_status)
if not self._headers_complete:
self._response.content = data
self.end_headers()
return self.write_raw_content(data)
def write_raw_content(self, data):
"""Writes the data 'as is'"""
if data is None:
raise ValueError('data cannot be None')
if isinstance(data, (str, bytes)):
# Deliberately allows both text and binary types. See `self.encode`.
return self.write(data)
else:
return self.write_content_file(data)
def write(self, data):
"""Write directly to the response, converting unicode to bytes
according to response.encoding.
:return: A boolean indicating whether the write succeeds
"""
self.content_written = True
try:
self._wfile.write(self.encode(data))
return True
except OSError:
# This can happen if the socket got closed by the remote end
return False
def write_content_file(self, data):
"""Write a file-like object directly to the response in chunks."""
self.content_written = True
success = True
while True:
buf = data.read(self.file_chunk_size)
if not buf:
success = False
break
try:
self._wfile.write(buf)
except OSError:
success = False
break
data.close()
return success
def encode(self, data):
"""Convert unicode to bytes according to response.encoding."""
if isinstance(data, bytes):
return data
elif isinstance(data, str):
return data.encode(self._response.encoding)
else:
raise ValueError("data %r should be text or binary, but is %s" % (data, type(data)))
|
nwjs/chromium.src
|
third_party/wpt_tools/wpt/tools/wptserve/wptserve/response.py
|
Python
|
bsd-3-clause
| 30,417
|
import json
from coalib.bearlib import deprecate_settings
from coalib.bearlib.abstractions.Linter import linter
from dependency_management.requirements.GemRequirement import GemRequirement
from coalib.results.Result import Result
from coalib.results.SourceRange import SourceRange
from coala_utils.param_conversion import negate
@linter(executable='reek', use_stdin=True)
class RubySmellBear:
"""
Detect code smells in Ruby source code.
For more information about the detected smells, see
<https://github.com/troessner/reek/blob/master/docs/Code-Smells.md>.
"""
LANGUAGES = {'Ruby'}
REQUIREMENTS = {GemRequirement('reek')}
AUTHORS = {'The coala developers'}
AUTHORS_EMAILS = {'coala-devel@googlegroups.com'}
LICENSE = 'AGPL-3.0'
CAN_DETECT = {'Smell'}
@staticmethod
def create_arguments(filename, file, config_file):
return '--format', 'json', '-c', config_file
def process_output(self, output, filename, file):
output = json.loads(output) if output else ()
for issue in output:
sourceranges = []
for line in issue['lines']:
sourceranges.append(SourceRange.from_values(
file=filename, start_line=line))
if 'name' in issue:
message = "'{}' (in '{}') {}.".format(
issue['name'], issue['context'], issue['message'])
else:
message = "'{}' {}".format(issue['context'], issue['message'])
yield Result(
origin='{} ({})'.format(self.__class__.__name__,
issue['smell_type']),
message=message,
affected_code=sourceranges,
additional_info='More information is available at {}'
'.'.format(issue['wiki_link']))
@deprecate_settings(allow_duplicate_method=(
'duplicate_method_call', negate),
allow_data_clump=('data_clump', negate),
allow_control_parameters=('control_parameter', negate),
allow_class_variables=('class_variable', negate),
allow_boolean_parameter_in_functions=(
'boolean_parameter', negate),
allow_setter_in_classes=('attribute', negate),
allow_unused_private_methods=(
'unused_private_method', negate),
allow_unused_variables=('unused_params', negate))
def generate_config(self,
allow_setter_in_classes: bool=False,
allow_boolean_parameter_in_functions: bool=False,
allow_class_variables: bool=False,
allow_control_parameters: bool=False,
allow_data_clump: bool=False,
allow_duplicate_method: bool=False,
feature_envy: bool=True,
missing_module_description: bool=True,
long_param_list: bool=True,
long_yield_list: bool=True,
module_initialize: bool=True,
nested_iterators: bool=True,
nil_check: bool=True,
prima_donna_method: bool=True,
repeated_conditional: bool=True,
too_many_instance_variables: bool=True,
too_many_methods: bool=True,
too_long_method: bool=True,
bad_method_name: bool=True,
bad_module_name: bool=True,
bad_param_name: bool=True,
bad_var_name: bool=True,
allow_unused_variables: bool=False,
allow_unused_private_methods: bool=True,
utility_function: bool=True):
"""
:param allow_setter_in_classes:
Allows setter in classes.
:param allow_boolean_parameter_in_functions:
Allows boolean parameter in functions (control coupling).
:param allow_class_variables:
Allows class variables.
:param allow_control_parameters:
Allows parameters that control function behaviour (control
coupling).
:param allow_data_clump:
Does not warn when the same two or three items frequently appear
together in function/class parameter list.
:param allow_duplicate_method:
Allows having two fragments of code that look nearly identical, or
two fragments of code that have nearly identical effects at some
conceptual level.
:param feature_envy:
Occurs when a code fragment references another object more often
than it references itself, or when several clients do the same
series of manipulations on a particular type of object.
:param missing_module_description:
Warns if a module description is missing.
:param long_param_list:
Warns about too many parameters of functions.
:param long_yield_list:
Warns when a method yields a lot of arguments to the block it gets
passed.
:param module_initialize:
Warns about ``#initialize`` methods in modules.
:param nested_iterators:
Warns when a block contains another block.
:param nil_check:
Warns about nil checks.
:param prima_donna_method:
Warns about methods whose names end with an exclamation mark.
:param repeated_conditional:
Warns about repeated conditionals.
:param too_many_instance_variables:
Warns for too many instance variables.
:param too_many_methods:
Warns if a class has too many methods.
:param too_long_method:
Warns about huge methods.
:param bad_method_name:
Warns about method names which are not communicating the purpose
of the method well.
:param bad_module_name:
Warns about module names which are not communicating the purpose
of the module well.
:param bad_param_name:
Warns about parameter names which are not communicating the purpose
of the parameter well.
:param bad_var_name:
Warns about variable names which are not communicating the purpose
of the variable well.
:param allow_unused_variables:
Allows unused parameters though they are dead code.
:param check_unused_private_methods:
Warns about unused private methods, as they are dead code.
:param utility_function:
Allows any instance method that has no dependency on the state of
the instance.
"""
config = {
'Attribute': not allow_setter_in_classes,
'BooleanParameter': not allow_boolean_parameter_in_functions,
'ClassVariable': not allow_class_variables,
'ControlParameter': not allow_control_parameters,
'DataClump': not allow_data_clump,
'DuplicateMethodCall': not allow_duplicate_method,
'FeatureEnvy': feature_envy,
'IrresponsibleModule': missing_module_description,
'LongParameterList': long_param_list,
'LongYieldList': long_yield_list,
'ModuleInitialize': module_initialize,
'NestedIterators': nested_iterators,
'NilCheck': nil_check,
'PrimaDonnaMethod': prima_donna_method,
'RepeatedConditional': repeated_conditional,
'TooManyInstanceVariables': too_many_instance_variables,
'TooManyMethods': too_many_methods,
'TooManyStatements': too_long_method,
'UncommunicativeMethodName': bad_method_name,
'UncommunicativeModuleName': bad_module_name,
'UncommunicativeParameterName': bad_param_name,
'UncommunicativeVariableName': bad_var_name,
'UnusedParameters': not allow_unused_variables,
'UnusedPrivateMethod': not allow_unused_private_methods,
'UtilityFunction': utility_function}
return ('---\n' +
'\n'.join('{}:\n enabled: {}'.format(key, str(value).lower())
for key, value in config.items()))
|
IPMITMO/statan
|
coala-bears/bears/ruby/RubySmellBear.py
|
Python
|
mit
| 8,585
|
#!/usr/bin/python
import sys
import os
import random
import string
import sqlite3
CREATE = """
CREATE TABLE IF NOT EXISTS hugetable (
id INTEGER PRIMARY KEY AUTOINCREMENT,
weirdtext TEXT,
crazynumber REAL
);
"""
def main():
if len(sys.argv) != 2:
sys.exit("please specify the db filename")
with sqlite3.connect(sys.argv[1]) as c:
c.executescript(CREATE)
rowcount = 1000000
for i in range(rowcount):
text = "".join( [random.choice(string.ascii_letters) for i in range(200)] )
num = random.random() * random.randint(0, 2930)
c.execute("INSERT INTO hugetable(weirdtext, crazynumber) VALUES ( :t, :n);", {"t": text, "n": num})
if i % 1000 == 0:
print("inserted", i, "of", rowcount)
c.commit()
if __name__ == "__main__":
main()
|
alephnaughty/sqlitebrowser
|
tests/createtestdb.py
|
Python
|
gpl-3.0
| 858
|
# Copyright (c) 2005 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
import code
import datetime
import os
import socket
import sys
__all__ = [ 'options', 'arguments', 'main' ]
usage="%prog [gem5 options] script.py [script options]"
version="%prog 2.0"
brief_copyright=\
"gem5 is copyrighted software; use the --copyright option for details."
def parse_options():
import config
from options import OptionParser
options = OptionParser(usage=usage, version=version,
description=brief_copyright)
option = options.add_option
group = options.set_group
# Help options
option('-B', "--build-info", action="store_true", default=False,
help="Show build information")
option('-C', "--copyright", action="store_true", default=False,
help="Show full copyright information")
option('-R', "--readme", action="store_true", default=False,
help="Show the readme")
# Options for configuring the base simulator
option('-d', "--outdir", metavar="DIR", default="m5out",
help="Set the output directory to DIR [Default: %default]")
option('-r', "--redirect-stdout", action="store_true", default=False,
help="Redirect stdout (& stderr, without -e) to file")
option('-e', "--redirect-stderr", action="store_true", default=False,
help="Redirect stderr to file")
option("--stdout-file", metavar="FILE", default="simout",
help="Filename for -r redirection [Default: %default]")
option("--stderr-file", metavar="FILE", default="simerr",
help="Filename for -e redirection [Default: %default]")
option('-i', "--interactive", action="store_true", default=False,
help="Invoke the interactive interpreter after running the script")
option("--pdb", action="store_true", default=False,
help="Invoke the python debugger before running the script")
option('-p', "--path", metavar="PATH[:PATH]", action='append', split=':',
help="Prepend PATH to the system path when invoking the script")
option('-q', "--quiet", action="count", default=0,
help="Reduce verbosity")
option('-v', "--verbose", action="count", default=0,
help="Increase verbosity")
# Statistics options
group("Statistics Options")
option("--stats-file", metavar="FILE", default="stats.txt",
help="Sets the output file for statistics [Default: %default]")
# Configuration Options
group("Configuration Options")
option("--dump-config", metavar="FILE", default="config.ini",
help="Dump configuration output file [Default: %default]")
option("--json-config", metavar="FILE", default="config.json",
help="Create JSON output of the configuration [Default: %default]")
option("--dot-config", metavar="FILE", default="config.dot",
help="Create DOT & pdf outputs of the configuration [Default: %default]")
# Debugging options
group("Debugging Options")
option("--debug-break", metavar="TIME[,TIME]", action='append', split=',',
help="Tick to create a breakpoint")
option("--debug-help", action='store_true',
help="Print help on debug flags")
option("--debug-flags", metavar="FLAG[,FLAG]", action='append', split=',',
help="Sets the flags for debug output (-FLAG disables a flag)")
option("--debug-start", metavar="TIME", type='int',
help="Start debug output at TIME (must be in ticks)")
option("--debug-file", metavar="FILE", default="cout",
help="Sets the output file for debug [Default: %default]")
option("--debug-ignore", metavar="EXPR", action='append', split=':',
help="Ignore EXPR sim objects")
option("--remote-gdb-port", type='int', default=7000,
help="Remote gdb base port (set to 0 to disable listening)")
# Help options
group("Help Options")
option("--list-sim-objects", action='store_true', default=False,
help="List all built-in SimObjects, their params and default values")
# load the options.py config file to allow people to set their own
# default options
options_file = config.get('options.py')
if options_file:
scope = { 'options' : options }
execfile(options_file, scope)
arguments = options.parse_args()
return options,arguments
def interact(scope):
banner = "gem5 Interactive Console"
ipshell = None
prompt_in1 = "gem5 \\#> "
prompt_out = "gem5 \\#: "
# Is IPython version 0.10 or earlier available?
try:
from IPython.Shell import IPShellEmbed
ipshell = IPShellEmbed(argv=["-prompt_in1", prompt_in1,
"-prompt_out", prompt_out],
banner=banner, user_ns=scope)
except ImportError:
pass
# Is IPython version 0.11 or later available?
if not ipshell:
try:
import IPython
from IPython.config.loader import Config
from IPython.frontend.terminal.embed import InteractiveShellEmbed
cfg = Config()
cfg.PromptManager.in_template = prompt_in1
cfg.PromptManager.out_template = prompt_out
ipshell = InteractiveShellEmbed(config=cfg, user_ns=scope,
banner1=banner)
except ImportError:
pass
if ipshell:
ipshell()
else:
# Use the Python shell in the standard library if IPython
# isn't available.
code.InteractiveConsole(scope).interact(banner)
def main(*args):
import m5
import core
import debug
import defines
import event
import info
import stats
import trace
from util import fatal
if len(args) == 0:
options, arguments = parse_options()
elif len(args) == 2:
options, arguments = args
else:
raise TypeError, "main() takes 0 or 2 arguments (%d given)" % len(args)
m5.options = options
def check_tracing():
if defines.TRACING_ON:
return
fatal("Tracing is not enabled. Compile with TRACING_ON")
# Set the main event queue for the main thread.
event.mainq = event.getEventQueue(0)
event.setEventQueue(event.mainq)
if not os.path.isdir(options.outdir):
os.makedirs(options.outdir)
# These filenames are used only if the redirect_std* options are set
stdout_file = os.path.join(options.outdir, options.stdout_file)
stderr_file = os.path.join(options.outdir, options.stderr_file)
# Print redirection notices here before doing any redirection
if options.redirect_stdout and not options.redirect_stderr:
print "Redirecting stdout and stderr to", stdout_file
else:
if options.redirect_stdout:
print "Redirecting stdout to", stdout_file
if options.redirect_stderr:
print "Redirecting stderr to", stderr_file
# Now redirect stdout/stderr as desired
if options.redirect_stdout:
redir_fd = os.open(stdout_file, os. O_WRONLY | os.O_CREAT | os.O_TRUNC)
os.dup2(redir_fd, sys.stdout.fileno())
if not options.redirect_stderr:
os.dup2(redir_fd, sys.stderr.fileno())
if options.redirect_stderr:
redir_fd = os.open(stderr_file, os. O_WRONLY | os.O_CREAT | os.O_TRUNC)
os.dup2(redir_fd, sys.stderr.fileno())
done = False
if options.build_info:
done = True
print 'Build information:'
print
print 'compiled %s' % defines.compileDate;
print 'build options:'
keys = defines.buildEnv.keys()
keys.sort()
for key in keys:
val = defines.buildEnv[key]
print ' %s = %s' % (key, val)
print
if options.copyright:
done = True
print info.COPYING
print
if options.readme:
done = True
print 'Readme:'
print
print info.README
print
if options.debug_help:
done = True
check_tracing()
debug.help()
if options.list_sim_objects:
import SimObject
done = True
print "SimObjects:"
objects = SimObject.allClasses.keys()
objects.sort()
for name in objects:
obj = SimObject.allClasses[name]
print " %s" % obj
params = obj._params.keys()
params.sort()
for pname in params:
param = obj._params[pname]
default = getattr(param, 'default', '')
print " %s" % pname
if default:
print " default: %s" % default
print " desc: %s" % param.desc
print
print
if done:
sys.exit(0)
# setting verbose and quiet at the same time doesn't make sense
if options.verbose > 0 and options.quiet > 0:
options.usage(2)
verbose = options.verbose - options.quiet
if verbose >= 0:
print "gem5 Simulator System. http://gem5.org"
print brief_copyright
print
print "gem5 compiled %s" % defines.compileDate;
print "gem5 started %s" % \
datetime.datetime.now().strftime("%b %e %Y %X")
print "gem5 executing on %s" % socket.gethostname()
print "command line:",
for argv in sys.argv:
print argv,
print
# check to make sure we can find the listed script
if not arguments or not os.path.isfile(arguments[0]):
if arguments and not os.path.isfile(arguments[0]):
print "Script %s not found" % arguments[0]
options.usage(2)
# tell C++ about output directory
core.setOutputDir(options.outdir)
# update the system path with elements from the -p option
sys.path[0:0] = options.path
# set stats options
stats.initText(options.stats_file)
# set debugging options
debug.setRemoteGDBPort(options.remote_gdb_port)
for when in options.debug_break:
debug.schedBreak(int(when))
if options.debug_flags:
check_tracing()
on_flags = []
off_flags = []
for flag in options.debug_flags:
off = False
if flag.startswith('-'):
flag = flag[1:]
off = True
if flag not in debug.flags:
print >>sys.stderr, "invalid debug flag '%s'" % flag
sys.exit(1)
if off:
debug.flags[flag].disable()
else:
debug.flags[flag].enable()
if options.debug_start:
check_tracing()
e = event.create(trace.enable, event.Event.Debug_Enable_Pri)
event.mainq.schedule(e, options.debug_start)
else:
trace.enable()
trace.output(options.debug_file)
for ignore in options.debug_ignore:
check_tracing()
trace.ignore(ignore)
sys.argv = arguments
sys.path = [ os.path.dirname(sys.argv[0]) ] + sys.path
filename = sys.argv[0]
filedata = file(filename, 'r').read()
filecode = compile(filedata, filename, 'exec')
scope = { '__file__' : filename,
'__name__' : '__m5_main__' }
# we want readline if we're doing anything interactive
if options.interactive or options.pdb:
exec "import readline" in scope
# if pdb was requested, execfile the thing under pdb, otherwise,
# just do the execfile normally
if options.pdb:
import pdb
import traceback
pdb = pdb.Pdb()
try:
pdb.run(filecode, scope)
except SystemExit:
print "The program exited via sys.exit(). Exit status: ",
print sys.exc_info()[1]
except:
traceback.print_exc()
print "Uncaught exception. Entering post mortem debugging"
t = sys.exc_info()[2]
while t.tb_next is not None:
t = t.tb_next
pdb.interaction(t.tb_frame,t)
else:
exec filecode in scope
# once the script is done
if options.interactive:
interact(scope)
if __name__ == '__main__':
from pprint import pprint
options, arguments = parse_options()
print 'opts:'
pprint(options, indent=4)
print
print 'args:'
pprint(arguments, indent=4)
|
hoangt/tpzsimul.gem5
|
src/python/m5/main.py
|
Python
|
bsd-3-clause
| 13,849
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Android-specific, installs pre-built profilers."""
import logging
import os
from telemetry import decorators
from telemetry.util import support_binaries
_DEVICE_PROFILER_DIR = '/data/local/tmp/profilers/'
def GetDevicePath(profiler_binary):
return os.path.join(_DEVICE_PROFILER_DIR, os.path.basename(profiler_binary))
@decorators.Cache
def InstallOnDevice(device, profiler_binary):
arch_name = device.GetABI()
host_path = support_binaries.FindPath(profiler_binary, arch_name, 'android')
if not host_path:
logging.error('Profiler binary "%s" not found. Could not be installed',
host_path)
return False
device_binary_path = GetDevicePath(profiler_binary)
device.PushChangedFiles([(host_path, device_binary_path)])
device.RunShellCommand('chmod 777 ' + device_binary_path)
return True
|
markYoungH/chromium.src
|
tools/telemetry/telemetry/core/platform/profiler/android_prebuilt_profiler_helper.py
|
Python
|
bsd-3-clause
| 998
|
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mapper for use with hadoop-streaming bigquery word-count example.
Reads each line of input and writes out lines each containing
a single word and the number 1.
The input lines consist of two tab-separated fields:
1. the record number
2. JSON data
We pick one field of the JSON and use its value as the word to output.
"""
import re
import sys
def main(args):
# Set up the pattern that we use to extract our field
field_name = args[1]
field_pattern = '\\{.*"(' + field_name + ')":"([^"]*)".*\\}'
field_extractor = re.compile(field_pattern)
for line in sys.stdin:
line = line.strip()
key_and_json = line.split('\t', 1)
json = key_and_json[1]
matches = field_extractor.match(json)
if matches:
word = matches.group(2)
if word:
print '%s\t%s' % (word, 1)
if __name__ == '__main__':
main(sys.argv)
|
gruter/bdutil
|
samples/word_count_mapper.py
|
Python
|
apache-2.0
| 1,454
|
# Copyright: (c) 2018, Pluribus Networks
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat.mock import patch
from ansible.modules.network.netvisor import pn_admin_service
from units.modules.utils import set_module_args
from .nvos_module import TestNvosModule
class TestAdminServiceModule(TestNvosModule):
module = pn_admin_service
def setUp(self):
self.mock_run_nvos_commands = patch('ansible.modules.network.netvisor.pn_admin_service.run_cli')
self.run_nvos_commands = self.mock_run_nvos_commands.start()
def tearDown(self):
self.mock_run_nvos_commands.stop()
def run_cli_patch(self, module, cli, state_map):
if state_map['update'] == 'admin-service-modify':
results = dict(
changed=True,
cli_cmd=cli
)
module.exit_json(**results)
def load_fixtures(self, commands=None, state=None, transport='cli'):
self.run_nvos_commands.side_effect = self.run_cli_patch
def test_admin_service_modify_t1(self):
set_module_args({'pn_cliswitch': 'sw01', 'pn__if': 'mgmt',
'pn_web': 'False', 'state': 'update'})
result = self.execute_module(changed=True, state='update')
expected_cmd = ' switch sw01 admin-service-modify if mgmt no-web '
self.assertEqual(result['cli_cmd'], expected_cmd)
def test_admin_service_modify_t2(self):
set_module_args({'pn_cliswitch': 'sw01', 'pn__if': 'mgmt',
'pn_snmp': 'True', 'pn_net_api': 'True', 'pn_ssh': 'True', 'state': 'update'})
result = self.execute_module(changed=True, state='update')
expected_cmd = ' switch sw01 admin-service-modify if mgmt snmp ssh net-api '
self.assertEqual(result['cli_cmd'], expected_cmd)
def test_admin_service_modify_t3(self):
set_module_args({'pn_cliswitch': 'sw01', 'pn__if': 'data',
'pn_web_port': '8080', 'pn_net_api': 'True', 'pn_web_log': 'True', 'state': 'update'})
result = self.execute_module(changed=True, state='update')
expected_cmd = ' switch sw01 admin-service-modify if data web-port 8080 net-api web-log '
self.assertEqual(result['cli_cmd'], expected_cmd)
|
kvar/ansible
|
test/units/modules/network/netvisor/test_pn_admin_service.py
|
Python
|
gpl-3.0
| 2,394
|
"""
WE'RE USING MIGRATIONS!
If you make changes to this model, be sure to create an appropriate migration
file and check it in at the same time as your model changes. To do that,
1. Go to the edx-platform dir
2. ./manage.py schemamigration courseware --auto description_of_your_change
3. Add the migration file created in edx-platform/lms/djangoapps/courseware/migrations/
ASSUMPTIONS: modules have unique IDs, even across different module_types
"""
import logging
import itertools
from django.contrib.auth.models import User
from django.conf import settings
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver, Signal
from model_utils.models import TimeStampedModel
from student.models import user_by_anonymous_id
from submissions.models import score_set, score_reset
from openedx.core.djangoapps.call_stack_manager import CallStackManager, CallStackMixin
from xmodule_django.models import CourseKeyField, LocationKeyField, BlockTypeKeyField # pylint: disable=import-error
log = logging.getLogger(__name__)
log = logging.getLogger("edx.courseware")
def chunks(items, chunk_size):
"""
Yields the values from items in chunks of size chunk_size
"""
items = list(items)
return (items[i:i + chunk_size] for i in xrange(0, len(items), chunk_size))
class ChunkingManager(models.Manager):
"""
:class:`~Manager` that adds an additional method :meth:`chunked_filter` to provide
the ability to make select queries with specific chunk sizes.
"""
def chunked_filter(self, chunk_field, items, **kwargs):
"""
Queries model_class with `chunk_field` set to chunks of size `chunk_size`,
and all other parameters from `**kwargs`.
This works around a limitation in sqlite3 on the number of parameters
that can be put into a single query.
Arguments:
chunk_field (str): The name of the field to chunk the query on.
items: The values for of chunk_field to select. This is chunked into ``chunk_size``
chunks, and passed as the value for the ``chunk_field`` keyword argument to
:meth:`~Manager.filter`. This implies that ``chunk_field`` should be an
``__in`` key.
chunk_size (int): The size of chunks to pass. Defaults to 500.
"""
chunk_size = kwargs.pop('chunk_size', 500)
res = itertools.chain.from_iterable(
self.filter(**dict([(chunk_field, chunk)] + kwargs.items()))
for chunk in chunks(items, chunk_size)
)
return res
class ChunkingCallStackManager(CallStackManager, ChunkingManager):
"""
A derived class of ChunkingManager, and CallStackManager
"""
pass
class StudentModule(CallStackMixin, models.Model):
"""
Keeps student state for a particular module in a particular course.
"""
objects = ChunkingCallStackManager()
MODEL_TAGS = ['course_id', 'module_type']
# For a homework problem, contains a JSON
# object consisting of state
MODULE_TYPES = (('problem', 'problem'),
('video', 'video'),
('html', 'html'),
('course', 'course'),
('chapter', 'Section'),
('sequential', 'Subsection'),
('library_content', 'Library Content'))
## These three are the key for the object
module_type = models.CharField(max_length=32, choices=MODULE_TYPES, default='problem', db_index=True)
# Key used to share state. This is the XBlock usage_id
module_state_key = LocationKeyField(max_length=255, db_index=True, db_column='module_id')
student = models.ForeignKey(User, db_index=True)
course_id = CourseKeyField(max_length=255, db_index=True)
class Meta(object): # pylint: disable=missing-docstring
unique_together = (('student', 'module_state_key', 'course_id'),)
# Internal state of the object
state = models.TextField(null=True, blank=True)
# Grade, and are we done?
grade = models.FloatField(null=True, blank=True, db_index=True)
max_grade = models.FloatField(null=True, blank=True)
DONE_TYPES = (
('na', 'NOT_APPLICABLE'),
('f', 'FINISHED'),
('i', 'INCOMPLETE'),
)
done = models.CharField(max_length=8, choices=DONE_TYPES, default='na', db_index=True)
created = models.DateTimeField(auto_now_add=True, db_index=True)
modified = models.DateTimeField(auto_now=True, db_index=True)
@classmethod
def all_submitted_problems_read_only(cls, course_id):
"""
Return all model instances that correspond to problems that have been
submitted for a given course. So module_type='problem' and a non-null
grade. Use a read replica if one exists for this environment.
"""
queryset = cls.objects.filter(
course_id=course_id,
module_type='problem',
grade__isnull=False
)
if "read_replica" in settings.DATABASES:
return queryset.using("read_replica")
else:
return queryset
def __repr__(self):
return 'StudentModule<%r>' % ({
'course_id': self.course_id,
'module_type': self.module_type,
# We use the student_id instead of username to avoid a database hop.
# This can actually matter in cases where we're logging many of
# these (e.g. on a broken progress page).
'student_id': self.student_id, # pylint: disable=no-member
'module_state_key': self.module_state_key,
'state': str(self.state)[:20],
},)
def __unicode__(self):
return unicode(repr(self))
class StudentModuleHistory(CallStackMixin, models.Model):
"""Keeps a complete history of state changes for a given XModule for a given
Student. Right now, we restrict this to problems so that the table doesn't
explode in size."""
objects = CallStackManager()
HISTORY_SAVING_TYPES = {'problem'}
class Meta(object): # pylint: disable=missing-docstring
get_latest_by = "created"
student_module = models.ForeignKey(StudentModule, db_index=True)
version = models.CharField(max_length=255, null=True, blank=True, db_index=True)
# This should be populated from the modified field in StudentModule
created = models.DateTimeField(db_index=True)
state = models.TextField(null=True, blank=True)
grade = models.FloatField(null=True, blank=True)
max_grade = models.FloatField(null=True, blank=True)
@receiver(post_save, sender=StudentModule)
def save_history(sender, instance, **kwargs): # pylint: disable=no-self-argument, unused-argument
"""
Checks the instance's module_type, and creates & saves a
StudentModuleHistory entry if the module_type is one that
we save.
"""
if instance.module_type in StudentModuleHistory.HISTORY_SAVING_TYPES:
history_entry = StudentModuleHistory(student_module=instance,
version=None,
created=instance.modified,
state=instance.state,
grade=instance.grade,
max_grade=instance.max_grade)
history_entry.save()
class XBlockFieldBase(models.Model):
"""
Base class for all XBlock field storage.
"""
objects = ChunkingManager()
class Meta(object): # pylint: disable=missing-docstring
abstract = True
# The name of the field
field_name = models.CharField(max_length=64, db_index=True)
# The value of the field. Defaults to None dumped as json
value = models.TextField(default='null')
created = models.DateTimeField(auto_now_add=True, db_index=True)
modified = models.DateTimeField(auto_now=True, db_index=True)
def __unicode__(self):
return u'{}<{!r}'.format(
self.__class__.__name__,
{
key: getattr(self, key)
for key in self._meta.get_all_field_names()
if key not in ('created', 'modified')
}
)
class XModuleUserStateSummaryField(XBlockFieldBase):
"""
Stores data set in the Scope.user_state_summary scope by an xmodule field
"""
class Meta(object): # pylint: disable=missing-docstring
unique_together = (('usage_id', 'field_name'),)
# The definition id for the module
usage_id = LocationKeyField(max_length=255, db_index=True)
class XModuleStudentPrefsField(XBlockFieldBase):
"""
Stores data set in the Scope.preferences scope by an xmodule field
"""
class Meta(object): # pylint: disable=missing-docstring
unique_together = (('student', 'module_type', 'field_name'),)
# The type of the module for these preferences
module_type = BlockTypeKeyField(max_length=64, db_index=True)
student = models.ForeignKey(User, db_index=True)
class XModuleStudentInfoField(XBlockFieldBase):
"""
Stores data set in the Scope.preferences scope by an xmodule field
"""
class Meta(object): # pylint: disable=missing-docstring
unique_together = (('student', 'field_name'),)
student = models.ForeignKey(User, db_index=True)
class OfflineComputedGrade(models.Model):
"""
Table of grades computed offline for a given user and course.
"""
user = models.ForeignKey(User, db_index=True)
course_id = CourseKeyField(max_length=255, db_index=True)
created = models.DateTimeField(auto_now_add=True, null=True, db_index=True)
updated = models.DateTimeField(auto_now=True, db_index=True)
gradeset = models.TextField(null=True, blank=True) # grades, stored as JSON
class Meta(object): # pylint: disable=missing-docstring
unique_together = (('user', 'course_id'), )
def __unicode__(self):
return "[OfflineComputedGrade] %s: %s (%s) = %s" % (self.user, self.course_id, self.created, self.gradeset)
class OfflineComputedGradeLog(models.Model):
"""
Log of when offline grades are computed.
Use this to be able to show instructor when the last computed grades were done.
"""
class Meta(object): # pylint: disable=missing-docstring
ordering = ["-created"]
get_latest_by = "created"
course_id = CourseKeyField(max_length=255, db_index=True)
created = models.DateTimeField(auto_now_add=True, null=True, db_index=True)
seconds = models.IntegerField(default=0) # seconds elapsed for computation
nstudents = models.IntegerField(default=0)
def __unicode__(self):
return "[OCGLog] %s: %s" % (self.course_id.to_deprecated_string(), self.created) # pylint: disable=no-member
class StudentFieldOverride(TimeStampedModel):
"""
Holds the value of a specific field overriden for a student. This is used
by the code in the `courseware.student_field_overrides` module to provide
overrides of xblock fields on a per user basis.
"""
course_id = CourseKeyField(max_length=255, db_index=True)
location = LocationKeyField(max_length=255, db_index=True)
student = models.ForeignKey(User, db_index=True)
class Meta(object): # pylint: disable=missing-docstring
unique_together = (('course_id', 'field', 'location', 'student'),)
field = models.CharField(max_length=255)
value = models.TextField(default='null')
# Signal that indicates that a user's score for a problem has been updated.
# This signal is generated when a scoring event occurs either within the core
# platform or in the Submissions module. Note that this signal will be triggered
# regardless of the new and previous values of the score (i.e. it may be the
# case that this signal is generated when a user re-attempts a problem but
# receives the same score).
SCORE_CHANGED = Signal(
providing_args=[
'points_possible', # Maximum score available for the exercise
'points_earned', # Score obtained by the user
'user_id', # Integer User ID
'course_id', # Unicode string representing the course
'usage_id' # Unicode string indicating the courseware instance
]
)
@receiver(score_set)
def submissions_score_set_handler(sender, **kwargs): # pylint: disable=unused-argument
"""
Consume the score_set signal defined in the Submissions API, and convert it
to a SCORE_CHANGED signal defined in this module. Converts the unicode keys
for user, course and item into the standard representation for the
SCORE_CHANGED signal.
This method expects that the kwargs dictionary will contain the following
entries (See the definition of score_set):
- 'points_possible': integer,
- 'points_earned': integer,
- 'anonymous_user_id': unicode,
- 'course_id': unicode,
- 'item_id': unicode
"""
points_possible = kwargs.get('points_possible', None)
points_earned = kwargs.get('points_earned', None)
course_id = kwargs.get('course_id', None)
usage_id = kwargs.get('item_id', None)
user = None
if 'anonymous_user_id' in kwargs:
user = user_by_anonymous_id(kwargs.get('anonymous_user_id'))
# If any of the kwargs were missing, at least one of the following values
# will be None.
if all((user, points_possible, points_earned, course_id, usage_id)):
SCORE_CHANGED.send(
sender=None,
points_possible=points_possible,
points_earned=points_earned,
user_id=user.id,
course_id=course_id,
usage_id=usage_id
)
else:
log.exception(
u"Failed to process score_set signal from Submissions API. "
"points_possible: %s, points_earned: %s, user: %s, course_id: %s, "
"usage_id: %s", points_possible, points_earned, user, course_id, usage_id
)
@receiver(score_reset)
def submissions_score_reset_handler(sender, **kwargs): # pylint: disable=unused-argument
"""
Consume the score_reset signal defined in the Submissions API, and convert
it to a SCORE_CHANGED signal indicating that the score has been set to 0/0.
Converts the unicode keys for user, course and item into the standard
representation for the SCORE_CHANGED signal.
This method expects that the kwargs dictionary will contain the following
entries (See the definition of score_reset):
- 'anonymous_user_id': unicode,
- 'course_id': unicode,
- 'item_id': unicode
"""
course_id = kwargs.get('course_id', None)
usage_id = kwargs.get('item_id', None)
user = None
if 'anonymous_user_id' in kwargs:
user = user_by_anonymous_id(kwargs.get('anonymous_user_id'))
# If any of the kwargs were missing, at least one of the following values
# will be None.
if all((user, course_id, usage_id)):
SCORE_CHANGED.send(
sender=None,
points_possible=0,
points_earned=0,
user_id=user.id,
course_id=course_id,
usage_id=usage_id
)
else:
log.exception(
u"Failed to process score_reset signal from Submissions API. "
"user: %s, course_id: %s, usage_id: %s", user, course_id, usage_id
)
|
rismalrv/edx-platform
|
lms/djangoapps/courseware/models.py
|
Python
|
agpl-3.0
| 15,447
|
from ._mod1_1_1_1_0_0 import *
from ._mod1_1_1_1_0_1 import *
from ._mod1_1_1_1_0_2 import *
from ._mod1_1_1_1_0_3 import *
from ._mod1_1_1_1_0_4 import *
|
asedunov/intellij-community
|
python/testData/completion/heavyStarPropagation/lib/_pkg1/_pkg1_1/_pkg1_1_1/_pkg1_1_1_1/_pkg1_1_1_1_0/__init__.py
|
Python
|
apache-2.0
| 154
|
from setuptools import setup
setup(
name='click-example-termui',
version='1.0',
py_modules=['termui'],
include_package_data=True,
install_requires=[
'click',
# Colorama is only required for Windows.
'colorama',
],
entry_points='''
[console_scripts]
termui=termui:cli
''',
)
|
Akasurde/click
|
examples/termui/setup.py
|
Python
|
bsd-3-clause
| 347
|
"""
FastCGI server that implements the WSGI protocol.
Uses the flup python package: http://www.saddi.com/software/flup/
This is a adaptation of the flup package to add FastCGI server support
to run Django apps from Web servers that support the FastCGI protocol.
This module can be run standalone or from the django-admin / manage.py
scripts using the "runfcgi" directive.
Run with the extra option "help" for a list of additional options you can
pass to this server.
"""
import sys, os
__version__ = "0.1"
__all__ = ["runfastcgi"]
FASTCGI_HELP = r"""runfcgi:
Run this project as a fastcgi application. To do this, the
flup package from http://www.saddi.com/software/flup/ is
required.
Usage:
django-admin.py runfcgi --settings=yourproject.settings [fcgi settings]
manage.py runfcgi [fcgi settings]
Optional Fcgi settings: (setting=value)
host=HOSTNAME hostname to listen on..
port=PORTNUM port to listen on.
socket=FILE UNIX socket to listen on.
method=IMPL prefork or threaded (default prefork)
maxrequests=NUMBER number of requests a child handles before it is
killed and a new child is forked (0 = no limit).
maxspare=NUMBER max number of spare processes / threads
minspare=NUMBER min number of spare processes / threads.
maxchildren=NUMBER hard limit number of processes / threads
daemonize=BOOL whether to detach from terminal.
pidfile=FILE write the spawned process-id to this file.
workdir=DIRECTORY change to this directory when daemonizing
Examples:
Run a "standard" fastcgi process on a file-descriptor
(for webservers which spawn your processes for you)
$ manage.py runfcgi method=threaded
Run a fastcgi server on a TCP host/port
$ manage.py runfcgi method=prefork host=127.0.0.1 port=8025
Run a fastcgi server on a UNIX domain socket (posix platforms only)
$ manage.py runfcgi method=prefork socket=/tmp/fcgi.sock
Run a fastCGI as a daemon and write the spawned PID in a file
$ manage.py runfcgi socket=/tmp/fcgi.sock method=prefork \
daemonize=true pidfile=/var/run/django-fcgi.pid
"""
FASTCGI_OPTIONS = {
'host': None,
'port': None,
'socket': None,
'method': 'fork',
'daemonize': None,
'workdir': '/',
'pidfile': None,
'maxspare': 5,
'minspare': 2,
'maxchildren': 50,
'maxrequests': 0,
}
def fastcgi_help(message=None):
print FASTCGI_HELP
if message:
print message
return False
def runfastcgi(argset=[], **kwargs):
options = FASTCGI_OPTIONS.copy()
options.update(kwargs)
for x in argset:
if "=" in x:
k, v = x.split('=', 1)
else:
k, v = x, True
options[k.lower()] = v
if "help" in options:
return fastcgi_help()
try:
import flup
except ImportError, e:
print >> sys.stderr, "ERROR: %s" % e
print >> sys.stderr, " Unable to load the flup package. In order to run django"
print >> sys.stderr, " as a FastCGI application, you will need to get flup from"
print >> sys.stderr, " http://www.saddi.com/software/flup/ If you've already"
print >> sys.stderr, " installed flup, then make sure you have it in your PYTHONPATH."
return False
if options['method'] in ('prefork', 'fork'):
from flup.server.fcgi_fork import WSGIServer
wsgi_opts = {
'maxSpare': int(options["maxspare"]),
'minSpare': int(options["minspare"]),
'maxChildren': int(options["maxchildren"]),
'maxRequests': int(options["maxrequests"]),
}
elif options['method'] in ('thread', 'threaded'):
from flup.server.fcgi import WSGIServer
wsgi_opts = {
'maxSpare': int(options["maxspare"]),
'minSpare': int(options["minspare"]),
'maxThreads': int(options["maxchildren"]),
}
else:
return fastcgi_help("ERROR: Implementation must be one of prefork or thread.")
wsgi_opts['debug'] = False # Turn off flup tracebacks
# Prep up and go
from django.core.handlers.wsgi import WSGIHandler
if options["host"] and options["port"] and not options["socket"]:
wsgi_opts['bindAddress'] = (options["host"], int(options["port"]))
elif options["socket"] and not options["host"] and not options["port"]:
wsgi_opts['bindAddress'] = options["socket"]
elif not options["socket"] and not options["host"] and not options["port"]:
wsgi_opts['bindAddress'] = None
else:
return fastcgi_help("Invalid combination of host, port, socket.")
if options["daemonize"] is None:
# Default to daemonizing if we're running on a socket/named pipe.
daemonize = (wsgi_opts['bindAddress'] is not None)
else:
if options["daemonize"].lower() in ('true', 'yes', 't'):
daemonize = True
elif options["daemonize"].lower() in ('false', 'no', 'f'):
daemonize = False
else:
return fastcgi_help("ERROR: Invalid option for daemonize parameter.")
if daemonize:
from django.utils.daemonize import become_daemon
become_daemon(our_home_dir=options["workdir"])
if options["pidfile"]:
fp = open(options["pidfile"], "w")
fp.write("%d\n" % os.getpid())
fp.close()
WSGIServer(WSGIHandler(), **wsgi_opts).run()
if __name__ == '__main__':
runfastcgi(sys.argv[1:])
|
ychen820/microblog
|
y/google-cloud-sdk/platform/google_appengine/lib/django-0.96/django/core/servers/fastcgi.py
|
Python
|
bsd-3-clause
| 5,506
|
from django.contrib.auth import models as auth_app
from django.contrib.auth.management import create_permissions
from django.contrib.auth.models import User, Group
from django.contrib.contenttypes.models import ContentType
from django.core.management import call_command
from django.test import TestCase
from guardian.utils import clean_orphan_obj_perms
from guardian.shortcuts import assign
class OrphanedObjectPermissionsTest(TestCase):
def setUp(self):
# Create objects for which we would assing obj perms
self.target_user1 = User.objects.create(username='user1')
self.target_group1 = Group.objects.create(name='group1')
self.target_obj1 = ContentType.objects.create(name='ct1', model='foo',
app_label='fake-for-guardian-tests')
self.target_obj2 = ContentType.objects.create(name='ct2', model='bar',
app_label='fake-for-guardian-tests')
# Required if MySQL backend is used :/
create_permissions(auth_app, [], 1)
self.user = User.objects.create(username='user')
self.group = Group.objects.create(name='group')
def test_clean_perms(self):
# assign obj perms
target_perms = {
self.target_user1: ["change_user"],
self.target_group1: ["delete_group"],
self.target_obj1: ["change_contenttype", "delete_contenttype"],
self.target_obj2: ["change_contenttype"],
}
obj_perms_count = sum([len(val) for key, val in target_perms.items()])
for target, perms in target_perms.items():
target.__old_pk = target.pk # Store pkeys
for perm in perms:
assign(perm, self.user, target)
# Remove targets
for target, perms in target_perms.items():
target.delete()
# Clean orphans
removed = clean_orphan_obj_perms()
self.assertEqual(removed, obj_perms_count)
# Recreate targets and check if user has no permissions
for target, perms in target_perms.items():
target.pk = target.__old_pk
target.save()
for perm in perms:
self.assertFalse(self.user.has_perm(perm, target))
def test_clean_perms_command(self):
"""
Same test as the one above but rather function directly, we call
management command instead.
"""
# assign obj perms
target_perms = {
self.target_user1: ["change_user"],
self.target_group1: ["delete_group"],
self.target_obj1: ["change_contenttype", "delete_contenttype"],
self.target_obj2: ["change_contenttype"],
}
for target, perms in target_perms.items():
target.__old_pk = target.pk # Store pkeys
for perm in perms:
assign(perm, self.user, target)
# Remove targets
for target, perms in target_perms.items():
target.delete()
# Clean orphans
call_command("clean_orphan_obj_perms", verbosity=0)
# Recreate targets and check if user has no permissions
for target, perms in target_perms.items():
target.pk = target.__old_pk
target.save()
for perm in perms:
self.assertFalse(self.user.has_perm(perm, target))
|
k4ml/jazzpos
|
vendor/guardian/tests/orphans_test.py
|
Python
|
mit
| 3,329
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import multiprocessing
import os
import sys
import traceback
from jinja2.exceptions import TemplateNotFound
# TODO: not needed if we use the cryptography library with its default RNG
# engine
HAS_ATFORK=True
try:
from Crypto.Random import atfork
except ImportError:
HAS_ATFORK=False
from ansible.errors import AnsibleConnectionFailure
from ansible.executor.task_executor import TaskExecutor
from ansible.executor.task_result import TaskResult
from ansible.module_utils._text import to_text
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
__all__ = ['WorkerProcess']
class WorkerProcess(multiprocessing.Process):
'''
The worker thread class, which uses TaskExecutor to run tasks
read from a job queue and pushes results into a results queue
for reading later.
'''
def __init__(self, rslt_q, task_vars, host, task, play_context, loader, variable_manager, shared_loader_obj):
super(WorkerProcess, self).__init__()
# takes a task queue manager as the sole param:
self._rslt_q = rslt_q
self._task_vars = task_vars
self._host = host
self._task = task
self._play_context = play_context
self._loader = loader
self._variable_manager = variable_manager
self._shared_loader_obj = shared_loader_obj
# dupe stdin, if we have one
self._new_stdin = sys.stdin
try:
fileno = sys.stdin.fileno()
if fileno is not None:
try:
self._new_stdin = os.fdopen(os.dup(fileno))
except OSError:
# couldn't dupe stdin, most likely because it's
# not a valid file descriptor, so we just rely on
# using the one that was passed in
pass
except (AttributeError, ValueError):
# couldn't get stdin's fileno, so we just carry on
pass
def run(self):
'''
Called when the process is started. Pushes the result onto the
results queue. We also remove the host from the blocked hosts list, to
signify that they are ready for their next task.
'''
#import cProfile, pstats, StringIO
#pr = cProfile.Profile()
#pr.enable()
if HAS_ATFORK:
atfork()
try:
# execute the task and build a TaskResult from the result
display.debug("running TaskExecutor() for %s/%s" % (self._host, self._task))
executor_result = TaskExecutor(
self._host,
self._task,
self._task_vars,
self._play_context,
self._new_stdin,
self._loader,
self._shared_loader_obj,
self._rslt_q
).run()
display.debug("done running TaskExecutor() for %s/%s" % (self._host, self._task))
self._host.vars = dict()
self._host.groups = []
task_result = TaskResult(
self._host.name,
self._task._uuid,
executor_result,
task_fields=self._task.dump_attrs(),
)
# put the result on the result queue
display.debug("sending task result")
self._rslt_q.put(task_result)
display.debug("done sending task result")
except AnsibleConnectionFailure:
self._host.vars = dict()
self._host.groups = []
task_result = TaskResult(
self._host.name,
self._task._uuid,
dict(unreachable=True),
task_fields=self._task.dump_attrs(),
)
self._rslt_q.put(task_result, block=False)
except Exception as e:
if not isinstance(e, (IOError, EOFError, KeyboardInterrupt, SystemExit)) or isinstance(e, TemplateNotFound):
try:
self._host.vars = dict()
self._host.groups = []
task_result = TaskResult(
self._host.name,
self._task._uuid,
dict(failed=True, exception=to_text(traceback.format_exc()), stdout=''),
task_fields=self._task.dump_attrs(),
)
self._rslt_q.put(task_result, block=False)
except:
display.debug(u"WORKER EXCEPTION: %s" % to_text(e))
display.debug(u"WORKER TRACEBACK: %s" % to_text(traceback.format_exc()))
display.debug("WORKER PROCESS EXITING")
#pr.disable()
#s = StringIO.StringIO()
#sortby = 'time'
#ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
#ps.print_stats()
#with open('worker_%06d.stats' % os.getpid(), 'w') as f:
# f.write(s.getvalue())
|
bjolivot/ansible
|
lib/ansible/executor/process/worker.py
|
Python
|
gpl-3.0
| 5,884
|
def foo(x):
"""
:type x: collections.MutableMapping
:rtype: dict
"""
return {v: k for k, v in x.iteritems()}
d = dict(a=1, b=2)
foo(d)
l = [i for i in range(10)]
foo(<warning descr="Expected type 'MutableMapping', got 'List[int]' instead">l</warning>)
|
asedunov/intellij-community
|
python/testData/inspections/PyTypeCheckerInspection/MutableMapping.py
|
Python
|
apache-2.0
| 273
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""head python module.
Importing from tensorflow.python.estimator is unsupported
and will soon break!
"""
# pylint: disable=unused-import,g-bad-import-order,g-import-not-at-top,wildcard-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow_estimator.python.estimator.canned import head
# Include attrs that start with single underscore.
_HAS_DYNAMIC_ATTRIBUTES = True
head.__all__ = [s for s in dir(head) if not s.startswith('__')]
from tensorflow_estimator.python.estimator.canned.head import *
|
ghchinoy/tensorflow
|
tensorflow/python/estimator/canned/head.py
|
Python
|
apache-2.0
| 1,268
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2013 Shaun Zinck <shaun.zinck at gmail.com>
# Copyright (c) 2015 Lawrence Leonard Gilbert <larry@L2G.to>
# Copyright (c) 2016 Jasper Lievisse Adriaanse <j at jasper.la>
#
# Written by Shaun Zinck
# Based on pacman module written by Afterburn <http://github.com/afterburn>
# that was based on apt module written by Matthew Williams <matthew@flowroute.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: pkgin
short_description: Package manager for SmartOS, NetBSD, et al.
description:
- "The standard package manager for SmartOS, but also usable on NetBSD
or any OS that uses C(pkgsrc). (Home: U(http://pkgin.net/))"
version_added: "1.0"
author:
- "Larry Gilbert (@L2G)"
- "Shaun Zinck (@szinck)"
- "Jasper Lievisse Adriaanse (@jasperla)"
notes:
- "Known bug with pkgin < 0.8.0: if a package is removed and another
package depends on it, the other package will be silently removed as
well. New to Ansible 1.9: check-mode support."
options:
name:
description:
- Name of package to install/remove;
- multiple names may be given, separated by commas
state:
description:
- Intended state of the package
choices: [ 'present', 'absent' ]
default: present
update_cache:
description:
- Update repository database. Can be run with other steps or on it's own.
type: bool
default: 'no'
version_added: "2.1"
upgrade:
description:
- Upgrade main packages to their newer versions
type: bool
default: 'no'
version_added: "2.1"
full_upgrade:
description:
- Upgrade all packages to their newer versions
type: bool
default: 'no'
version_added: "2.1"
clean:
description:
- Clean packages cache
type: bool
default: 'no'
version_added: "2.1"
force:
description:
- Force package reinstall
type: bool
default: 'no'
version_added: "2.1"
'''
EXAMPLES = '''
# install package foo
- pkgin:
name: foo
state: present
# Update database and install "foo" package
- pkgin:
name: foo
update_cache: yes
# remove package foo
- pkgin:
name: foo
state: absent
# remove packages foo and bar
- pkgin:
name: foo,bar
state: absent
# Update repositories as a separate step
- pkgin:
update_cache: yes
# Upgrade main packages (equivalent to C(pkgin upgrade))
- pkgin:
upgrade: yes
# Upgrade all packages (equivalent to C(pkgin full-upgrade))
- pkgin:
full_upgrade: yes
# Force-upgrade all packages (equivalent to C(pkgin -F full-upgrade))
- pkgin:
full_upgrade: yes
force: yes
# clean packages cache (equivalent to C(pkgin clean))
- pkgin:
clean: yes
'''
import re
from ansible.module_utils.basic import AnsibleModule
def query_package(module, name):
"""Search for the package by name.
Possible return values:
* "present" - installed, no upgrade needed
* "outdated" - installed, but can be upgraded
* False - not installed or not found
"""
# test whether '-p' (parsable) flag is supported.
rc, out, err = module.run_command("%s -p -v" % PKGIN_PATH)
if rc == 0:
pflag = '-p'
splitchar = ';'
else:
pflag = ''
splitchar = ' '
# Use "pkgin search" to find the package. The regular expression will
# only match on the complete name.
rc, out, err = module.run_command("%s %s search \"^%s$\"" % (PKGIN_PATH, pflag, name))
# rc will not be 0 unless the search was a success
if rc == 0:
# Search results may contain more than one line (e.g., 'emacs'), so iterate
# through each line to see if we have a match.
packages = out.split('\n')
for package in packages:
# Break up line at spaces. The first part will be the package with its
# version (e.g. 'gcc47-libs-4.7.2nb4'), and the second will be the state
# of the package:
# '' - not installed
# '<' - installed but out of date
# '=' - installed and up to date
# '>' - installed but newer than the repository version
pkgname_with_version, raw_state = package.split(splitchar)[0:2]
# Search for package, stripping version
# (results in sth like 'gcc47-libs' or 'emacs24-nox11')
pkg_search_obj = re.search(r'^(.*?)\-[0-9][0-9.]*(nb[0-9]+)*', pkgname_with_version, re.M)
# Do not proceed unless we have a match
if not pkg_search_obj:
continue
# Grab matched string
pkgname_without_version = pkg_search_obj.group(1)
if name != pkgname_without_version:
continue
# The package was found; now return its state
if raw_state == '<':
return 'outdated'
elif raw_state == '=' or raw_state == '>':
return 'present'
else:
return False
# no fall-through
# No packages were matched, so return False
return False
def format_action_message(module, action, count):
vars = {"actioned": action,
"count": count}
if module.check_mode:
message = "would have %(actioned)s %(count)d package" % vars
else:
message = "%(actioned)s %(count)d package" % vars
if count == 1:
return message
else:
return message + "s"
def format_pkgin_command(module, command, package=None):
# Not all commands take a package argument, so cover this up by passing
# an empty string. Some commands (e.g. 'update') will ignore extra
# arguments, however this behaviour cannot be relied on for others.
if package is None:
package = ""
if module.params["force"]:
force = "-F"
else:
force = ""
vars = {"pkgin": PKGIN_PATH,
"command": command,
"package": package,
"force": force}
if module.check_mode:
return "%(pkgin)s -n %(command)s %(package)s" % vars
else:
return "%(pkgin)s -y %(force)s %(command)s %(package)s" % vars
def remove_packages(module, packages):
remove_c = 0
# Using a for loop in case of error, we can report the package that failed
for package in packages:
# Query the package first, to see if we even need to remove
if not query_package(module, package):
continue
rc, out, err = module.run_command(
format_pkgin_command(module, "remove", package))
if not module.check_mode and query_package(module, package):
module.fail_json(msg="failed to remove %s: %s" % (package, out))
remove_c += 1
if remove_c > 0:
module.exit_json(changed=True, msg=format_action_message(module, "removed", remove_c))
module.exit_json(changed=False, msg="package(s) already absent")
def install_packages(module, packages):
install_c = 0
for package in packages:
if query_package(module, package):
continue
rc, out, err = module.run_command(
format_pkgin_command(module, "install", package))
if not module.check_mode and not query_package(module, package):
module.fail_json(msg="failed to install %s: %s" % (package, out))
install_c += 1
if install_c > 0:
module.exit_json(changed=True, msg=format_action_message(module, "installed", install_c))
module.exit_json(changed=False, msg="package(s) already present")
def update_package_db(module):
rc, out, err = module.run_command(
format_pkgin_command(module, "update"))
if rc == 0:
if re.search('database for.*is up-to-date\n$', out):
return False, "datebase is up-to-date"
else:
return True, "updated repository database"
else:
module.fail_json(msg="could not update package db")
def do_upgrade_packages(module, full=False):
if full:
cmd = "full-upgrade"
else:
cmd = "upgrade"
rc, out, err = module.run_command(
format_pkgin_command(module, cmd))
if rc == 0:
if re.search('^nothing to do.\n$', out):
module.exit_json(changed=False, msg="nothing left to upgrade")
else:
module.fail_json(msg="could not %s packages" % cmd)
def upgrade_packages(module):
do_upgrade_packages(module)
def full_upgrade_packages(module):
do_upgrade_packages(module, True)
def clean_cache(module):
rc, out, err = module.run_command(
format_pkgin_command(module, "clean"))
if rc == 0:
# There's no indication if 'clean' actually removed anything,
# so assume it did.
module.exit_json(changed=True, msg="cleaned caches")
else:
module.fail_json(msg="could not clean package cache")
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(default="present", choices=["present", "absent"]),
name=dict(aliases=["pkg"], type='list'),
update_cache=dict(default='no', type='bool'),
upgrade=dict(default='no', type='bool'),
full_upgrade=dict(default='no', type='bool'),
clean=dict(default='no', type='bool'),
force=dict(default='no', type='bool')),
required_one_of=[['name', 'update_cache', 'upgrade', 'full_upgrade', 'clean']],
supports_check_mode=True)
global PKGIN_PATH
PKGIN_PATH = module.get_bin_path('pkgin', True, ['/opt/local/bin'])
module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
p = module.params
if p["update_cache"]:
c, msg = update_package_db(module)
if not (p['name'] or p["upgrade"] or p["full_upgrade"]):
module.exit_json(changed=c, msg=msg)
if p["upgrade"]:
upgrade_packages(module)
if not p['name']:
module.exit_json(changed=True, msg='upgraded packages')
if p["full_upgrade"]:
full_upgrade_packages(module)
if not p['name']:
module.exit_json(changed=True, msg='upgraded all packages')
if p["clean"]:
clean_cache(module)
if not p['name']:
module.exit_json(changed=True, msg='cleaned caches')
pkgs = p["name"]
if p["state"] == "present":
install_packages(module, pkgs)
elif p["state"] == "absent":
remove_packages(module, pkgs)
if __name__ == '__main__':
main()
|
pilou-/ansible
|
lib/ansible/modules/packaging/os/pkgin.py
|
Python
|
gpl-3.0
| 10,996
|
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Middleware provided and used by Horizon.
"""
import json
import logging
import time
from django.conf import settings
from django.contrib.auth import REDIRECT_FIELD_NAME # noqa
from django.contrib.auth.views import redirect_to_login # noqa
from django.contrib import messages as django_messages
from django import http
from django import shortcuts
from django.utils.encoding import iri_to_uri # noqa
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from openstack_auth import utils as auth_utils
from openstack_auth import views as auth_views
import six
from horizon import exceptions
from horizon.utils import functions as utils
LOG = logging.getLogger(__name__)
class HorizonMiddleware(object):
"""The main Horizon middleware class. Required for use of Horizon."""
logout_reason = None
def _check_has_timed_timeout(self, request):
"""Check for session timeout and return timestamp."""
has_timed_out = False
# Activate timezone handling
tz = request.session.get('django_timezone')
if tz:
timezone.activate(tz)
try:
timeout = settings.SESSION_TIMEOUT
except AttributeError:
timeout = 1800
last_activity = request.session.get('last_activity', None)
timestamp = int(time.time())
if (
hasattr(request, "user")
and hasattr(request.user, "token")
and not auth_utils.is_token_valid(request.user.token)
):
# The user was logged in, but his keystone token expired.
has_timed_out = True
if isinstance(last_activity, int):
if (timestamp - last_activity) > timeout:
has_timed_out = True
if has_timed_out:
request.session.pop('last_activity')
return (has_timed_out, timestamp)
def _logout(self, request, login_url=None, message=None):
"""Logout a user and display a logout message."""
response = auth_views.logout(request, login_url)
if message is not None:
self.logout_reason = message
utils.add_logout_reason(request, response, message)
return response
def process_request(self, request):
"""Adds data necessary for Horizon to function to the request."""
request.horizon = {'dashboard': None,
'panel': None,
'async_messages': []}
if not hasattr(request, "user") or not request.user.is_authenticated():
# proceed no further if the current request is already known
# not to be authenticated
# it is CRITICAL to perform this check as early as possible
# to avoid creating too many sessions
return None
# Check for session timeout if user is (or was) authenticated.
has_timed_out, timestamp = self._check_has_timed_timeout(request)
if has_timed_out:
return self._logout(request, request.path, _("Session timed out."))
if request.is_ajax():
# if the request is Ajax we do not want to proceed, as clients can
# 1) create pages with constant polling, which can create race
# conditions when a page navigation occurs
# 2) might leave a user seemingly left logged in forever
# 3) thrashes db backed session engines with tons of changes
return None
# If we use cookie-based sessions, check that the cookie size does not
# reach the max size accepted by common web browsers.
if (
settings.SESSION_ENGINE ==
'django.contrib.sessions.backends.signed_cookies'
):
max_cookie_size = getattr(
settings, 'SESSION_COOKIE_MAX_SIZE', None)
session_cookie_name = getattr(
settings, 'SESSION_COOKIE_NAME', None)
session_key = request.COOKIES.get(session_cookie_name)
if max_cookie_size is not None and session_key is not None:
cookie_size = sum((
len(key) + len(value)
for key, value in six.iteritems(request.COOKIES)
))
if cookie_size >= max_cookie_size:
LOG.error(
'Total Cookie size for user_id: %(user_id)s is '
'%(cookie_size)sB >= %(max_cookie_size)sB. '
'You need to configure file-based or database-backed '
'sessions instead of cookie-based sessions: '
'http://docs.openstack.org/developer/horizon/topics/'
'deployment.html#session-storage'
% {
'user_id': request.session.get(
'user_id', 'Unknown'),
'cookie_size': cookie_size,
'max_cookie_size': max_cookie_size,
}
)
# We have a valid session, so we set the timestamp
request.session['last_activity'] = timestamp
def process_exception(self, request, exception):
"""Catches internal Horizon exception classes such as NotAuthorized,
NotFound and Http302 and handles them gracefully.
"""
if isinstance(exception, (exceptions.NotAuthorized,
exceptions.NotAuthenticated)):
auth_url = settings.LOGIN_URL
next_url = iri_to_uri(request.get_full_path())
if next_url != auth_url:
field_name = REDIRECT_FIELD_NAME
else:
field_name = None
login_url = request.build_absolute_uri(auth_url)
response = redirect_to_login(next_url, login_url=login_url,
redirect_field_name=field_name)
if request.is_ajax():
response_401 = http.HttpResponse(status=401)
response_401['X-Horizon-Location'] = response['location']
return response_401
return response
# If an internal "NotFound" error gets this far, return a real 404.
if isinstance(exception, exceptions.NotFound):
raise http.Http404(exception)
if isinstance(exception, exceptions.Http302):
# TODO(gabriel): Find a way to display an appropriate message to
# the user *on* the login form...
return shortcuts.redirect(exception.location)
def process_response(self, request, response):
"""Convert HttpResponseRedirect to HttpResponse if request is via ajax
to allow ajax request to redirect url
"""
if request.is_ajax() and hasattr(request, 'horizon'):
queued_msgs = request.horizon['async_messages']
if type(response) == http.HttpResponseRedirect:
# Drop our messages back into the session as per usual so they
# don't disappear during the redirect. Not that we explicitly
# use django's messages methods here.
for tag, message, extra_tags in queued_msgs:
getattr(django_messages, tag)(request, message, extra_tags)
if response['location'].startswith(settings.LOGOUT_URL):
redirect_response = http.HttpResponse(status=401)
# This header is used for handling the logout in JS
redirect_response['logout'] = True
if self.logout_reason is not None:
utils.add_logout_reason(
request, redirect_response, self.logout_reason)
else:
redirect_response = http.HttpResponse()
# Use a set while checking if we want a cookie's attributes
# copied
cookie_keys = set(('max_age', 'expires', 'path', 'domain',
'secure', 'httponly', 'logout_reason'))
# Copy cookies from HttpResponseRedirect towards HttpResponse
for cookie_name, cookie in six.iteritems(response.cookies):
cookie_kwargs = dict((
(key, value) for key, value in six.iteritems(cookie)
if key in cookie_keys and value
))
redirect_response.set_cookie(
cookie_name, cookie.value, **cookie_kwargs)
redirect_response['X-Horizon-Location'] = response['location']
return redirect_response
if queued_msgs:
# TODO(gabriel): When we have an async connection to the
# client (e.g. websockets) this should be pushed to the
# socket queue rather than being sent via a header.
# The header method has notable drawbacks (length limits,
# etc.) and is not meant as a long-term solution.
response['X-Horizon-Messages'] = json.dumps(queued_msgs)
return response
|
wangxiangyu/horizon
|
horizon/middleware.py
|
Python
|
apache-2.0
| 9,937
|
#!/usr/bin/env python
#
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Signs and zipaligns split APKs.
This script is require only by GYP (not GN).
"""
import optparse
import sys
import finalize_apk
from util import build_utils
def main():
parser = optparse.OptionParser()
parser.add_option('--zipalign-path', help='Path to the zipalign tool.')
parser.add_option('--resource-packaged-apk-path',
help='Base path to input .ap_s.')
parser.add_option('--base-output-path',
help='Path to output .apk, minus extension.')
parser.add_option('--key-path', help='Path to keystore for signing.')
parser.add_option('--key-passwd', help='Keystore password')
parser.add_option('--key-name', help='Keystore name')
parser.add_option('--densities',
help='Comma separated list of densities finalize.')
parser.add_option('--languages',
help='GYP list of language splits to finalize.')
options, _ = parser.parse_args()
options.load_library_from_zip = 0
if options.densities:
for density in options.densities.split(','):
options.unsigned_apk_path = ("%s_%s" %
(options.resource_packaged_apk_path, density))
options.final_apk_path = ("%s-density-%s.apk" %
(options.base_output_path, density))
finalize_apk.FinalizeApk(options)
if options.languages:
for lang in build_utils.ParseGypList(options.languages):
options.unsigned_apk_path = ("%s_%s" %
(options.resource_packaged_apk_path, lang))
options.final_apk_path = ("%s-lang-%s.apk" %
(options.base_output_path, lang))
finalize_apk.FinalizeApk(options)
if __name__ == '__main__':
sys.exit(main())
|
Teamxrtc/webrtc-streaming-node
|
third_party/webrtc/src/chromium/src/build/android/gyp/finalize_splits.py
|
Python
|
mit
| 1,786
|
SOCIAL_AUTH_SETTINGS = {
'SOCIAL_AUTH_LOGIN_URL': '/',
'SOCIAL_AUTH_LOGIN_REDIRECT_URL': '/done',
'SOCIAL_AUTH_USER_MODEL': 'example.models.User',
'SOCIAL_AUTH_LOGIN_FUNCTION': 'example.auth.login_user',
'SOCIAL_AUTH_LOGGEDIN_FUNCTION': 'example.auth.login_required',
'SOCIAL_AUTH_AUTHENTICATION_BACKENDS': (
'social.backends.twitter.TwitterOAuth',
'social.backends.open_id.OpenIdAuth',
'social.backends.google.GoogleOpenId',
'social.backends.google.GoogleOAuth2',
'social.backends.google.GoogleOAuth',
'social.backends.yahoo.YahooOpenId',
'social.backends.stripe.StripeOAuth2',
'social.backends.persona.PersonaAuth',
'social.backends.facebook.FacebookOAuth2',
'social.backends.facebook.FacebookAppOAuth2',
'social.backends.yahoo.YahooOAuth',
'social.backends.angel.AngelOAuth2',
'social.backends.behance.BehanceOAuth2',
'social.backends.bitbucket.BitbucketOAuth',
'social.backends.box.BoxOAuth2',
'social.backends.linkedin.LinkedinOAuth',
'social.backends.github.GithubOAuth2',
'social.backends.foursquare.FoursquareOAuth2',
'social.backends.instagram.InstagramOAuth2',
'social.backends.live.LiveOAuth2',
'social.backends.vk.VKOAuth2',
'social.backends.dailymotion.DailymotionOAuth2',
'social.backends.disqus.DisqusOAuth2',
'social.backends.dropbox.DropboxOAuth',
'social.backends.eveonline.EVEOnlineOAuth2',
'social.backends.evernote.EvernoteSandboxOAuth',
'social.backends.fitbit.FitbitOAuth',
'social.backends.flickr.FlickrOAuth',
'social.backends.livejournal.LiveJournalOpenId',
'social.backends.soundcloud.SoundcloudOAuth2',
'social.backends.thisismyjam.ThisIsMyJamOAuth1',
'social.backends.stocktwits.StocktwitsOAuth2',
'social.backends.tripit.TripItOAuth',
'social.backends.twilio.TwilioAuth',
'social.backends.clef.ClefOAuth2',
'social.backends.xing.XingOAuth',
'social.backends.yandex.YandexOAuth2',
'social.backends.podio.PodioOAuth2',
'social.backends.reddit.RedditOAuth2',
'social.backends.mineid.MineIDOAuth2',
'social.backends.wunderlist.WunderlistOAuth2',
)
}
def includeme(config):
config.registry.settings.update(SOCIAL_AUTH_SETTINGS)
|
joelstanner/python-social-auth
|
examples/pyramid_example/example/settings.py
|
Python
|
bsd-3-clause
| 2,415
|
from __future__ import absolute_import, division, unicode_literals
try:
from collections import OrderedDict
except ImportError:
try:
from ordereddict import OrderedDict
except ImportError:
OrderedDict = dict
import re
from pip._vendor.six import string_types
from . import base
from .._utils import moduleFactoryFactory
tag_regexp = re.compile("{([^}]*)}(.*)")
def getETreeBuilder(ElementTreeImplementation):
ElementTree = ElementTreeImplementation
ElementTreeCommentType = ElementTree.Comment("asd").tag
class TreeWalker(base.NonRecursiveTreeWalker): # pylint:disable=unused-variable
"""Given the particular ElementTree representation, this implementation,
to avoid using recursion, returns "nodes" as tuples with the following
content:
1. The current element
2. The index of the element relative to its parent
3. A stack of ancestor elements
4. A flag "text", "tail" or None to indicate if the current node is a
text node; either the text or tail of the current element (1)
"""
def getNodeDetails(self, node):
if isinstance(node, tuple): # It might be the root Element
elt, _, _, flag = node
if flag in ("text", "tail"):
return base.TEXT, getattr(elt, flag)
else:
node = elt
if not(hasattr(node, "tag")):
node = node.getroot()
if node.tag in ("DOCUMENT_ROOT", "DOCUMENT_FRAGMENT"):
return (base.DOCUMENT,)
elif node.tag == "<!DOCTYPE>":
return (base.DOCTYPE, node.text,
node.get("publicId"), node.get("systemId"))
elif node.tag == ElementTreeCommentType:
return base.COMMENT, node.text
else:
assert isinstance(node.tag, string_types), type(node.tag)
# This is assumed to be an ordinary element
match = tag_regexp.match(node.tag)
if match:
namespace, tag = match.groups()
else:
namespace = None
tag = node.tag
attrs = OrderedDict()
for name, value in list(node.attrib.items()):
match = tag_regexp.match(name)
if match:
attrs[(match.group(1), match.group(2))] = value
else:
attrs[(None, name)] = value
return (base.ELEMENT, namespace, tag,
attrs, len(node) or node.text)
def getFirstChild(self, node):
if isinstance(node, tuple):
element, key, parents, flag = node
else:
element, key, parents, flag = node, None, [], None
if flag in ("text", "tail"):
return None
else:
if element.text:
return element, key, parents, "text"
elif len(element):
parents.append(element)
return element[0], 0, parents, None
else:
return None
def getNextSibling(self, node):
if isinstance(node, tuple):
element, key, parents, flag = node
else:
return None
if flag == "text":
if len(element):
parents.append(element)
return element[0], 0, parents, None
else:
return None
else:
if element.tail and flag != "tail":
return element, key, parents, "tail"
elif key < len(parents[-1]) - 1:
return parents[-1][key + 1], key + 1, parents, None
else:
return None
def getParentNode(self, node):
if isinstance(node, tuple):
element, key, parents, flag = node
else:
return None
if flag == "text":
if not parents:
return element
else:
return element, key, parents, None
else:
parent = parents.pop()
if not parents:
return parent
else:
assert list(parents[-1]).count(parent) == 1
return parent, list(parents[-1]).index(parent), parents, None
return locals()
getETreeModule = moduleFactoryFactory(getETreeBuilder)
|
BitWriters/Zenith_project
|
zango/lib/python3.5/site-packages/pip/_vendor/html5lib/treewalkers/etree.py
|
Python
|
mit
| 4,684
|
from django.core.cache import cache
from django.template import Context, Engine, TemplateSyntaxError
from django.test import SimpleTestCase, override_settings
from ..utils import setup
class CacheTagTests(SimpleTestCase):
libraries = {
'cache': 'django.templatetags.cache',
'custom': 'template_tests.templatetags.custom',
}
def tearDown(self):
cache.clear()
@setup({'cache03': '{% load cache %}{% cache 2 test %}cache03{% endcache %}'})
def test_cache03(self):
output = self.engine.render_to_string('cache03')
self.assertEqual(output, 'cache03')
@setup({
'cache03': '{% load cache %}{% cache 2 test %}cache03{% endcache %}',
'cache04': '{% load cache %}{% cache 2 test %}cache04{% endcache %}',
})
def test_cache04(self):
self.engine.render_to_string('cache03')
output = self.engine.render_to_string('cache04')
self.assertEqual(output, 'cache03')
@setup({'cache05': '{% load cache %}{% cache 2 test foo %}cache05{% endcache %}'})
def test_cache05(self):
output = self.engine.render_to_string('cache05', {'foo': 1})
self.assertEqual(output, 'cache05')
@setup({'cache06': '{% load cache %}{% cache 2 test foo %}cache06{% endcache %}'})
def test_cache06(self):
output = self.engine.render_to_string('cache06', {'foo': 2})
self.assertEqual(output, 'cache06')
@setup({
'cache05': '{% load cache %}{% cache 2 test foo %}cache05{% endcache %}',
'cache07': '{% load cache %}{% cache 2 test foo %}cache07{% endcache %}',
})
def test_cache07(self):
context = {'foo': 1}
self.engine.render_to_string('cache05', context)
output = self.engine.render_to_string('cache07', context)
self.assertEqual(output, 'cache05')
@setup({
'cache06': '{% load cache %}{% cache 2 test foo %}cache06{% endcache %}',
'cache08': '{% load cache %}{% cache time test foo %}cache08{% endcache %}',
})
def test_cache08(self):
"""
Allow first argument to be a variable.
"""
context = {'foo': 2, 'time': 2}
self.engine.render_to_string('cache06', context)
output = self.engine.render_to_string('cache08', context)
self.assertEqual(output, 'cache06')
# Raise exception if we don't have at least 2 args, first one integer.
@setup({'cache11': '{% load cache %}{% cache %}{% endcache %}'})
def test_cache11(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('cache11')
@setup({'cache12': '{% load cache %}{% cache 1 %}{% endcache %}'})
def test_cache12(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('cache12')
@setup({'cache13': '{% load cache %}{% cache foo bar %}{% endcache %}'})
def test_cache13(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string('cache13')
@setup({'cache14': '{% load cache %}{% cache foo bar %}{% endcache %}'})
def test_cache14(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string('cache14', {'foo': 'fail'})
@setup({'cache15': '{% load cache %}{% cache foo bar %}{% endcache %}'})
def test_cache15(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string('cache15', {'foo': []})
@setup({'cache16': '{% load cache %}{% cache 1 foo bar %}{% endcache %}'})
def test_cache16(self):
"""
Regression test for #7460.
"""
output = self.engine.render_to_string('cache16', {'foo': 'foo', 'bar': 'with spaces'})
self.assertEqual(output, '')
@setup({'cache17': '{% load cache %}{% cache 10 long_cache_key poem %}Some Content{% endcache %}'})
def test_cache17(self):
"""
Regression test for #11270.
"""
output = self.engine.render_to_string(
'cache17',
{
'poem': (
'Oh freddled gruntbuggly/Thy micturations are to me/'
'As plurdled gabbleblotchits/On a lurgid bee/'
'That mordiously hath bitled out/Its earted jurtles/'
'Into a rancid festering/Or else I shall rend thee in the gobberwarts'
'with my blurglecruncheon/See if I don\'t.'
),
}
)
self.assertEqual(output, 'Some Content')
@setup({'cache18': '{% load cache custom %}{% cache 2|noop:"x y" cache18 %}cache18{% endcache %}'})
def test_cache18(self):
"""
Test whitespace in filter arguments
"""
output = self.engine.render_to_string('cache18')
self.assertEqual(output, 'cache18')
@setup({
'first': '{% load cache %}{% cache None fragment19 %}content{% endcache %}',
'second': '{% load cache %}{% cache None fragment19 %}not rendered{% endcache %}'
})
def test_none_timeout(self):
"""A timeout of None means "cache forever"."""
output = self.engine.render_to_string('first')
self.assertEqual(output, 'content')
output = self.engine.render_to_string('second')
self.assertEqual(output, 'content')
class CacheTests(SimpleTestCase):
@classmethod
def setUpClass(cls):
cls.engine = Engine(libraries={'cache': 'django.templatetags.cache'})
super().setUpClass()
def test_cache_regression_20130(self):
t = self.engine.from_string('{% load cache %}{% cache 1 regression_20130 %}foo{% endcache %}')
cachenode = t.nodelist[1]
self.assertEqual(cachenode.fragment_name, 'regression_20130')
@override_settings(CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'default',
},
'template_fragments': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'fragments',
},
})
def test_cache_fragment_cache(self):
"""
When a cache called "template_fragments" is present, the cache tag
will use it in preference to 'default'
"""
t1 = self.engine.from_string('{% load cache %}{% cache 1 fragment %}foo{% endcache %}')
t2 = self.engine.from_string('{% load cache %}{% cache 1 fragment using="default" %}bar{% endcache %}')
ctx = Context()
o1 = t1.render(ctx)
o2 = t2.render(ctx)
self.assertEqual(o1, 'foo')
self.assertEqual(o2, 'bar')
def test_cache_missing_backend(self):
"""
When a cache that doesn't exist is specified, the cache tag will
raise a TemplateSyntaxError
'"""
t = self.engine.from_string('{% load cache %}{% cache 1 backend using="unknown" %}bar{% endcache %}')
ctx = Context()
with self.assertRaises(TemplateSyntaxError):
t.render(ctx)
|
wkschwartz/django
|
tests/template_tests/syntax_tests/test_cache.py
|
Python
|
bsd-3-clause
| 7,021
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Delivery Costs',
'version': '1.0',
'category': 'Sales Management',
'description': """
Allows you to add delivery methods in sale orders and picking.
==============================================================
You can define your own carrier and delivery grids for prices. When creating
invoices from picking, OpenERP is able to add and compute the shipping line.
""",
'author': 'OpenERP SA',
'depends': ['sale_stock'],
'data': [
'security/ir.model.access.csv',
'delivery_view.xml',
'partner_view.xml',
'delivery_data.xml',
'views/report_shipping.xml',
],
'demo': ['delivery_demo.xml'],
'test': ['test/delivery_cost.yml'],
'installable': True,
'auto_install': False,
'images': ['images/1_delivery_method.jpeg','images/2_delivery_pricelist.jpeg'],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
diogocs1/comps
|
web/addons/delivery/__openerp__.py
|
Python
|
apache-2.0
| 1,902
|
"""
The :mod:`sklearn.metrics` module includes score functions, performance metrics
and pairwise metrics and distance computations.
"""
from .ranking import auc
from .ranking import average_precision_score
from .ranking import coverage_error
from .ranking import label_ranking_average_precision_score
from .ranking import label_ranking_loss
from .ranking import precision_recall_curve
from .ranking import roc_auc_score
from .ranking import roc_curve
from .classification import accuracy_score
from .classification import classification_report
from .classification import cohen_kappa_score
from .classification import confusion_matrix
from .classification import f1_score
from .classification import fbeta_score
from .classification import hamming_loss
from .classification import hinge_loss
from .classification import jaccard_similarity_score
from .classification import log_loss
from .classification import matthews_corrcoef
from .classification import precision_recall_fscore_support
from .classification import precision_score
from .classification import recall_score
from .classification import zero_one_loss
from .classification import brier_score_loss
from . import cluster
from .cluster import adjusted_mutual_info_score
from .cluster import adjusted_rand_score
from .cluster import completeness_score
from .cluster import consensus_score
from .cluster import homogeneity_completeness_v_measure
from .cluster import homogeneity_score
from .cluster import mutual_info_score
from .cluster import normalized_mutual_info_score
from .cluster import silhouette_samples
from .cluster import silhouette_score
from .cluster import v_measure_score
from .pairwise import euclidean_distances
from .pairwise import pairwise_distances
from .pairwise import pairwise_distances_argmin
from .pairwise import pairwise_distances_argmin_min
from .pairwise import pairwise_kernels
from .regression import explained_variance_score
from .regression import mean_absolute_error
from .regression import mean_squared_error
from .regression import median_absolute_error
from .regression import r2_score
from .scorer import make_scorer
from .scorer import SCORERS
from .scorer import get_scorer
__all__ = [
'accuracy_score',
'adjusted_mutual_info_score',
'adjusted_rand_score',
'auc',
'average_precision_score',
'classification_report',
'cluster',
'completeness_score',
'confusion_matrix',
'consensus_score',
'coverage_error',
'euclidean_distances',
'explained_variance_score',
'f1_score',
'fbeta_score',
'get_scorer',
'hamming_loss',
'hinge_loss',
'homogeneity_completeness_v_measure',
'homogeneity_score',
'jaccard_similarity_score',
'label_ranking_average_precision_score',
'label_ranking_loss',
'log_loss',
'make_scorer',
'matthews_corrcoef',
'mean_absolute_error',
'mean_squared_error',
'median_absolute_error',
'mutual_info_score',
'normalized_mutual_info_score',
'pairwise_distances',
'pairwise_distances_argmin',
'pairwise_distances_argmin_min',
'pairwise_distances_argmin_min',
'pairwise_kernels',
'precision_recall_curve',
'precision_recall_fscore_support',
'precision_score',
'r2_score',
'recall_score',
'roc_auc_score',
'roc_curve',
'SCORERS',
'silhouette_samples',
'silhouette_score',
'v_measure_score',
'zero_one_loss',
'brier_score_loss',
]
|
valexandersaulys/airbnb_kaggle_contest
|
venv/lib/python3.4/site-packages/sklearn/metrics/__init__.py
|
Python
|
gpl-2.0
| 3,440
|
# -*- coding: utf-8 -*-
"""dependenpy node module."""
import json
import sys
from .structures import Graph, Matrix, TreeMap
class NodeMixin(object):
"""Shared code between DSM, Package and Module."""
@property
def ismodule(self):
"""Property to check if object is instance of Module."""
return False
@property
def ispackage(self):
"""Property to check if object is instance of Package."""
return False
@property
def isdsm(self):
"""Property to check if object is instance of DSM."""
return False
class RootNode(object):
"""Shared code between DSM and Package."""
def __init__(self, build_tree=True):
"""
Initialization method.
Args:
build_tree (bool): whether to immediately build the tree or not.
"""
self._target_cache = {}
self._item_cache = {}
self._contains_cache = {}
self._matrix_cache = {}
self._graph_cache = {}
self._treemap_cache = None
self.modules = []
self.packages = []
if build_tree:
self.build_tree()
def __contains__(self, item):
"""
Get result of _contains, cache it and return it.
Args:
item (Package/Module): a package or module.
Returns:
bool: True if self contains item, False otherwise.
"""
if item not in self._contains_cache:
self._contains_cache[item] = self._contains(item)
return self._contains_cache[item]
def __getitem__(self, item):
"""
Return the corresponding Package or Module object.
Args:
item (str): name of the package/module, dot-separated.
Returns:
Package/Module: corresponding object.
"""
depth = item.count(".") + 1
parts = item.split(".", 1)
for m in self.modules:
if parts[0] == m.name:
if depth == 1:
return m
for p in self.packages:
if parts[0] == p.name:
if depth == 1:
return p
item = p.get(parts[1])
if item:
return item
raise KeyError(item)
def __bool__(self):
"""
Node as Boolean.
Returns:
bool: result of node.empty.
"""
return bool(self.modules or self.packages)
@property
def empty(self):
"""
Whether the node has neither modules nor packages.
Returns:
bool: True if empty, False otherwise.
"""
return not bool(self)
@property
def submodules(self):
"""
Property to return all sub-modules of the node, recursively.
Returns:
list of Module: the sub-modules.
"""
submodules = []
submodules.extend(self.modules)
for p in self.packages:
submodules.extend(p.submodules)
return submodules
def build_tree(self):
"""To be overridden."""
raise NotImplementedError
def _contains(self, item):
"""
Whether given item is contained inside the node modules/packages.
Args:
item (Package/Module): a package or module.
Returns:
bool: True if self is item or item in self's packages/modules.
"""
if self is item:
return True
for m in self.modules:
if item in m:
return True
for p in self.packages:
if item in p:
return True
return False
def get(self, item):
"""
Get item through ``__getitem__`` and cache the result.
Args:
item (str): name of package or module.
Returns:
Package/Module: the corresponding object.
"""
if item not in self._item_cache:
try:
item = self.__getitem__(item)
except KeyError:
item = None
self._item_cache[item] = item
return self._item_cache[item]
def get_target(self, target):
"""
Get the result of _get_target, cache it and return it.
Args:
target (str): target to find.
Returns:
Package/Module: package containing target or corresponding module.
"""
if target not in self._target_cache:
self._target_cache[target] = self._get_target(target)
return self._target_cache[target]
def _get_target(self, target):
"""
Get the Package or Module related to given target.
Args:
target (str): target to find.
Returns:
Package/Module: package containing target or corresponding module.
"""
depth = target.count(".") + 1
parts = target.split(".", 1)
for m in self.modules:
if parts[0] == m.name:
if depth < 3:
return m
for p in self.packages:
if parts[0] == p.name:
if depth == 1:
return p
# pylama:ignore=W0212
target = p._get_target(parts[1])
if target:
return target
# FIXME: can lead to internal dep instead of external
# see example with django.contrib.auth.forms
# importing forms from django
# Idea: when parsing files with ast, record what objects
# are defined in the module. Then check here if the given
# part is one of these objects.
if depth < 3:
return p
return None
def build_dependencies(self):
"""
Recursively build the dependencies for sub-modules and sub-packages.
Iterate on node's modules then packages and call their
build_dependencies methods.
"""
for m in self.modules:
m.build_dependencies()
for p in self.packages:
p.build_dependencies()
def print_graph(self, format=None, output=sys.stdout, depth=0, **kwargs):
"""
Print the graph for self's nodes.
Args:
format (str): output format (csv, json or text).
output (file): file descriptor on which to write.
depth (int): depth of the graph.
"""
graph = self.as_graph(depth=depth)
graph.print(format=format, output=output, **kwargs)
def print_matrix(self, format=None, output=sys.stdout, depth=0, **kwargs):
"""
Print the matrix for self's nodes.
Args:
format (str): output format (csv, json or text).
output (file): file descriptor on which to write.
depth (int): depth of the matrix.
"""
matrix = self.as_matrix(depth=depth)
matrix.print(format=format, output=output, **kwargs)
def print_treemap(self, format=None, output=sys.stdout, **kwargs):
"""
Print the matrix for self's nodes.
Args:
format (str): output format (csv, json or text).
output (file): file descriptor on which to write.
"""
treemap = self.as_treemap()
treemap.print(format=format, output=output, **kwargs)
def _to_text(self, **kwargs):
indent = kwargs.pop("indent", 2)
base_indent = kwargs.pop("base_indent", None)
if base_indent is None:
base_indent = indent
indent = 0
text = [" " * indent + str(self) + "\n"]
new_indent = indent + base_indent
for m in self.modules:
text.append(m._to_text(indent=new_indent, base_indent=base_indent))
for p in self.packages:
text.append(p._to_text(indent=new_indent, base_indent=base_indent))
return "".join(text)
def _to_csv(self, **kwargs):
header = kwargs.pop("header", True)
modules = sorted(self.submodules, key=lambda x: x.absolute_name())
text = ["module,path,target,lineno,what,external\n" if header else ""]
for m in modules:
text.append(m._to_csv(header=False))
return "".join(text)
def _to_json(self, **kwargs):
return json.dumps(self.as_dict(), **kwargs)
def as_dict(self):
"""
Return the dependencies as a dictionary.
Returns:
dict: dictionary of dependencies.
"""
return {
"name": str(self),
"modules": [m.as_dict() for m in self.modules],
"packages": [p.as_dict() for p in self.packages],
}
def as_graph(self, depth=0):
"""
Create a graph with self as node, cache it, return it.
Args:
depth (int): depth of the graph.
Returns:
Graph: an instance of Graph.
"""
if depth in self._graph_cache:
return self._graph_cache[depth]
self._graph_cache[depth] = graph = Graph(self, depth=depth)
return graph
def as_matrix(self, depth=0):
"""
Create a matrix with self as node, cache it, return it.
Args:
depth (int): depth of the matrix.
Returns:
Matrix: an instance of Matrix.
"""
if depth in self._matrix_cache:
return self._matrix_cache[depth]
self._matrix_cache[depth] = matrix = Matrix(self, depth=depth)
return matrix
def as_treemap(self):
"""
Return the dependencies as a TreeMap.
Returns:
TreeMap: instance of TreeMap.
"""
if self._treemap_cache:
return self._treemap_cache
self._treemap_cache = treemap = TreeMap(self)
return treemap
class LeafNode(object):
"""Shared code between Package and Module."""
def __init__(self):
"""Initialization method."""
self._depth_cache = None
def __str__(self):
"""String method."""
return self.absolute_name()
@property
def root(self):
"""
Property to return the root of this node.
Returns:
Package: this node's root package.
"""
node = self
while node.package is not None:
node = node.package
return node
@property
def depth(self):
"""
Property to tell the depth of the node in the tree.
Returns:
int: the node's depth in the tree.
"""
if self._depth_cache is not None:
return self._depth_cache
depth, node = 1, self
while node.package is not None:
depth += 1
node = node.package
self._depth_cache = depth
return depth
def absolute_name(self, depth=0):
"""
Return the absolute name of the node.
Concatenate names from root to self within depth.
Args:
depth (int): maximum depth to go to.
Returns:
str: absolute name of the node (until given depth is reached).
"""
node, node_depth = self, self.depth
if depth < 1:
depth = node_depth
while node_depth > depth and node.package is not None:
node = node.package
node_depth -= 1
names = []
while node is not None:
names.append(node.name)
node = node.package
return ".".join(reversed(names))
|
Pawamoy/dependenpy
|
src/dependenpy/node.py
|
Python
|
isc
| 11,560
|
from django import template
import sys
register = template.Library()
@register.assignment_tag
def wafer_form_helper(helper_name):
'''
Find the specified Crispy FormHelper and instantiate it.
Handy when you are crispyifying other apps' forms.
'''
module, class_name = helper_name.rsplit('.', 1)
if module not in sys.modules:
__import__(module)
mod = sys.modules[module]
class_ = getattr(mod, class_name)
return class_()
|
CarlFK/wafer
|
wafer/registration/templatetags/wafer_crispy.py
|
Python
|
isc
| 466
|
#! /usr/bin/env python
# Baxter needs an orientation quaternion instead of a rotational matrix. This script will help you calculate that.
import math
import numpy
###########
# Data: remove this when implementing the script in your code
X = numpy.matrix([-0.35166069, 0.87973912, -0.31999036])
Y = numpy.matrix([-0.84929275, -0.15604792, 0.50433211])
Z = numpy.matrix([ 0.39374686, 0.44911927, 0.80203197])
###########
# Given 3 normalised vectors X Y Z (which are the column of the rotational matrix you should have found!) you can obtain the quaternion with the following calculation:
w = math.sqrt(1+X[0,0]+Y[0,1]+Z[0,2])/2
x = (Y[0,2] - Z[0,1])/(4*w)
y = (Z[0,0] - X[0,2])/(4*w)
z = (X[0,1] - Y[0,0])/(4*w)
# Then you can feed the x,y,z,w values to the inverse kinematics function.
print("Resulting quaternion should be something like [0.7 0 0 0.7].")
print("Calculated value:")
print("x: " + str(x))
print("y: " + str(y))
print("z: " + str(z))
print("w: " + str(w))
|
smdth/mimLab
|
rot2quaternion.py
|
Python
|
isc
| 978
|
import os
import unittest2
import mock
from flask import json, current_app
from flask_alfred_db import AlfredDB
from msgpack import packb
from alfred_db.models import Repository, Push, Base
from alfred_listener import create_app
from alfred_listener.database import db
from alfred_listener.helpers import parse_hook_data
TESTS_DIR = os.path.dirname(__file__)
FIXTURES_DIR = os.path.join(TESTS_DIR, 'fixtures')
CONFIG = os.path.join(TESTS_DIR, 'config.yml')
PAYLOAD = os.path.join(FIXTURES_DIR, 'payload.json')
class BaseTestCase(unittest2.TestCase):
def __call__(self, result=None):
try:
self._pre_setup()
with self.client:
super(BaseTestCase, self).__call__(result)
finally:
self._post_teardown()
def _pre_setup(self):
self.app = self.create_app()
self.client = self.app.test_client()
self._ctx = self.app.test_request_context()
self._ctx.push()
Base.metadata.create_all(bind=db.engine)
def _post_teardown(self):
db.session_class.remove()
Base.metadata.drop_all(bind=db.engine)
if getattr(self, '_ctx', None) is not None:
self._ctx.pop()
def setUp(self):
with open(PAYLOAD) as f:
self.payload = f.read()
self.parsed_data = parse_hook_data(json.loads(self.payload))
def create_app(self):
return create_app(CONFIG)
class HookParserTestCase(BaseTestCase):
def test_commit_hash(self):
self.assertTrue('commit_hash' in self.parsed_data.keys())
self.assertEqual(
self.parsed_data['commit_hash'],
'2e7be88382545a9dc7a05b9d2e85a7041e311075',
)
def test_compare_url(self):
self.assertTrue('compare_url' in self.parsed_data.keys())
self.assertEqual(
self.parsed_data['compare_url'],
'https://github.com/xobb1t/test/compare/a90ff8353403...2e7be8838254',
)
def test_ref(self):
self.assertTrue('ref' in self.parsed_data.keys())
self.assertEqual(self.parsed_data['ref'], 'refs/heads/master')
def test_committer(self):
self.assertTrue('committer_name' in self.parsed_data.keys())
self.assertEqual(self.parsed_data['committer_name'], 'Dima Kukushkin')
self.assertTrue('committer_email' in self.parsed_data.keys())
self.assertEqual(self.parsed_data['committer_email'],
'dima@kukushkin.me')
def test_commit_message(self):
self.assertTrue('commit_message' in self.parsed_data.keys())
self.assertEqual(self.parsed_data['commit_message'], 'Update README.md')
class WebhookHandlerTestCase(BaseTestCase):
def setUp(self):
super(WebhookHandlerTestCase, self).setUp()
self.repo_token = 'test-token'
self.repository = Repository(
owner_type='user', owner_name='xobb1t', github_id=3213131,
token=self.repo_token, url='https://github.com/xobb1t/repo',
owner_id=312313123, name='repo'
)
db.session.add(self.repository)
db.session.commit()
self.Context = mock.Mock()
self.context = self.Context.return_value
self.socket = self.context.socket.return_value
self.context_patch = mock.patch('zmq.Context', self.Context)
self.context_patch.start()
def tearDown(self):
super(WebhookHandlerTestCase, self).tearDown()
db.session.delete(self.repository)
db.session.commit()
self.context_patch.stop()
def test_not_allowed(self):
response = self.client.get('/')
self.assertEqual(response.status_code, 405)
response = self.client.post('/')
self.assertNotEqual(response.status_code, 405)
def test_not_acceptable(self):
response = self.client.post('/', data={'payload': self.payload})
self.assertEqual(response.status_code, 400)
def test_bad_request(self):
headers = {'X-Github-Event': 'push'}
response = self.client.post('/', headers=headers)
self.assertEqual(response.status_code, 400)
response = self.client.post('/?token={0}'.format(self.repo_token),
headers=headers,
data={'payload': '{"asd": 123'})
self.assertEqual(response.status_code, 400)
response = self.client.post('/?token={0}'.format('blablabla'),
headers=headers,
data={'payload': self.payload})
self.assertEqual(response.status_code, 404)
def test_good_response_with_payload(self):
data = {'payload': self.payload}
headers = {'X-Github-Event': 'push'}
response = self.client.post('/?token={0}'.format(self.repo_token),
headers=headers, data=data)
self.assertEqual(response.status_code, 200)
class SavedDataTestCase(BaseTestCase):
def setUp(self):
super(SavedDataTestCase, self).setUp()
self.push_query = db.session.query(Push).filter_by(
commit_hash='2e7be88382545a9dc7a05b9d2e85a7041e311075'
)
self.repo_token = 'test-token'
self.repository = Repository(
owner_type='user', owner_name='xobb1t', github_id=3213131,
token=self.repo_token, url='https://github.com/xobb1t/repo',
owner_id=312313123, name='repo'
)
db.session.add(self.repository)
db.session.commit()
self.Context = mock.Mock()
self.context = self.Context.instance.return_value
self.socket = self.context.socket.return_value
self.context_patch = mock.patch('zmq.Context', self.Context)
self.context_patch.start()
def tearDown(self):
super(SavedDataTestCase, self).tearDown()
db.session.delete(self.repository)
db.session.commit()
self.context_patch.stop()
def send_hook(self):
data = {'payload': self.payload}
headers = {'X-Github-Event': 'push'}
return self.client.post('/?token={0}'.format(self.repo_token),
headers=headers, data=data)
def test_push_created(self):
self.send_hook()
self.assertIsNotNone(self.push_query.first())
def test_push_unique(self):
self.send_hook()
self.assertEqual(self.push_query.count(), 1)
self.send_hook()
self.assertEqual(self.push_query.count(), 1)
def test_push_data(self):
self.send_hook()
push = self.push_query.first()
self.assertEqual(push.repository_id, self.repository.id)
self.assertEqual(push.commit_message, 'Update README.md')
self.assertEqual(push.committer_name, 'Dima Kukushkin')
self.assertEqual(push.committer_email, 'dima@kukushkin.me')
self.assertEqual(push.ref, 'refs/heads/master')
def test_message_sent(self):
self.send_hook()
push = self.push_query.first()
task = {
'push_id': push.id,
'owner_name': push.repository.owner_name,
'repo_name': push.repository.name,
'hash': push.commit_hash,
}
self.socket.send.assert_has_calls(mock.call(packb(task)))
def test_socket_connect(self):
self.send_hook()
self.socket.connect.assert_has_calls(
mock.call(current_app.config['COORDINATOR'])
)
|
alfredhq/alfred-listener
|
tests/__init__.py
|
Python
|
isc
| 7,460
|
import zmq
import sys
port = "5556"
if len(sys.argv) > 1:
port = sys.argv[1]
int(port)
if len(sys.argv) > 2:
port1 = sys.argv[2]
int(port1)
context = zmq.Context()
print "Connecting to server..."
socket = context.socket(zmq.REQ)
socket.connect ("tcp://localhost:%s" % port)
if len(sys.argv) > 2:
socket.connect ("tcp://localhost:%s" % port1)
# Do 10 requests, waiting each time for a response
for request in range (1,10):
print "Sending request ", request,"..."
socket.send ("Hello")
# Get the reply.
message = socket.recv()
print "Received reply ", request, "[", message, "]"
|
andreydelpozo2/breadcrumbs
|
zmqtest/reqrep_client.py
|
Python
|
mit
| 623
|
# ----------------------------------------------------------------------------------------------------------------------
#
# Generic Examples
#
# ----------------------------------------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------------------
# Initialize OS
# ----------------------------------------------------------------------------------------------------------------------
from lib.Kernel import Kernel
from lib.toolkit import load_properties, determine_preferred_wifi
# Start-up Kernel
kernel = Kernel(load_properties("conf/os.properties"))
log = kernel.logger
# ----------------------------------------------------------------------------------------------------------------------
# Logger
# ----------------------------------------------------------------------------------------------------------------------
import gc
from lib.Logger import Logger
log = Logger("DEBUG")
log.info("Hello!")
log.error("Critical Issue!!")
log.debug("Free memory: " + str(gc.mem_free()))
# ----------------------------------------------------------------------------------------------------------------------
# Update DuckDNS
# ----------------------------------------------------------------------------------------------------------------------
from lib.toolkit import update_duck_dns
# Update DuckDNS service
update_duck_dns("mydomain", "mytoken", "myip")
# ----------------------------------------------------------------------------------------------------------------------
#
# NodeMCU Examples
#
# ----------------------------------------------------------------------------------------------------------------------
from hw.board.NodeMCU import NodeMCU
# ----------------------------------------------------------------------------------------------------------------------
# Connect to user preferred WiFi
# ----------------------------------------------------------------------------------------------------------------------
# Instantiate our board
board = NodeMCU()
# Find preferred wifi
preferred_wifi = determine_preferred_wifi(load_properties("conf/network.properties"), board.scan_wifi())
# Get IP
ip = board.connect_to_wifi(preferred_wifi["ssid"], preferred_wifi["password"], 10)
# ----------------------------------------------------------------------------------------------------------------------
# Button event listener
# ----------------------------------------------------------------------------------------------------------------------
# Instantiate our board
board = NodeMCU()
# Listen for events on FLASH button and run "hello_world" function on single and double click
board.get_flash_button_events("hello_world", "hello_world")
# Listen for events on USER button and run "hello_world" function on single and double click
board.get_user_button_events("hello_world", "hello_world")
# ----------------------------------------------------------------------------------------------------------------------
# LED blinking
# ----------------------------------------------------------------------------------------------------------------------
# Instantiate our board
board = NodeMCU()
# Blink BLUE LED 5 times with 0.5 sec delay
board.blink_blue_led(5, 0.5)
# ----------------------------------------------------------------------------------------------------------------------
# Format Filesystem
# ----------------------------------------------------------------------------------------------------------------------
# Instantiate our board
board = NodeMCU()
# Request format - this will wipe all the filesystem
board.format()
# ----------------------------------------------------------------------------------------------------------------------
# Start Memory Manager
# ----------------------------------------------------------------------------------------------------------------------
# Instantiate our board
board = NodeMCU()
# Memory collection and reporting will occur every 10 sec
board.start_memory_manager(10000)
# Or you can run memory manager ad hoc
board.mem_cleanup()
# ----------------------------------------------------------------------------------------------------------------------
# Drive Humidity/Temperature sensor (DHT11 or DHT22)
# ----------------------------------------------------------------------------------------------------------------------
from hw.sensor.DHT import DHT, DHTType, TemperatureUnit
# Instantiate our sensor
d = DHT(DHTType.DHT11, 10)
# Get temperature in Celsius
d.get_temperature()
# Get temperature in Fahrenheit
d.get_temperature(TemperatureUnit.FAHRENHEIT)
# Get temperature numeric in Celsius
d.get_temperature(TemperatureUnit.CELSIUS, False)
# Get temperature numeric in Fahrenheit
d.get_temperature(TemperatureUnit.FAHRENHEIT, False)
# ----------------------------------------------------------------------------------------------------------------------
# Drive Temperature/Pressure/Altitude sensor (BMP180 or BMP280)
# ----------------------------------------------------------------------------------------------------------------------
from hw.sensor.BMP import BMP, BMPType, TemperatureUnit, PressureUnit, AltitudeUnit
# Instantiate our sensor
s = BMP(BMPType.BMP180, 2, 0)
# Get temperature in Celsius
s.get_temperature()
# Get temperature in Fahrenheit
s.get_temperature(TemperatureUnit.FAHRENHEIT)
# Get temperature numeric in Celsius
s.get_temperature(TemperatureUnit.CELSIUS, False)
# Get temperature numeric in Fahrenheit
s.get_temperature(TemperatureUnit.FAHRENHEIT, False)
# Get altitude in meters
s.get_altitude(AltitudeUnit.METERS)
# Get pressure in hectopascals
s.get_pressure(PressureUnit.HECTOPASCAL)
# ----------------------------------------------------------------------------------------------------------------------
# Drive SSD1306 Display with I2C connection
# ----------------------------------------------------------------------------------------------------------------------
from hw.screen.SSD1306 import SSD1306
from hw.screen.Screen import ConnectionType
import machine
bus = machine.I2C(machine.Pin(4), machine.Pin(5))
oled = SSD1306(ConnectionType.I2C, bus)
oled.text("hello world")
# ----------------------------------------------------------------------------------------------------------------------
# Drive Analog Sensor (A soil sensor in this example)
# ----------------------------------------------------------------------------------------------------------------------
from hw.sensor.AnalogSensor import AnalogSensor
soil_sensor = AnalogSensor(0, 1024, 364) # 364 is submerged in water and 1024 is dry
soil_sensor.get_value()
|
idimitrakopoulos/illuminOS
|
lib/examples.py
|
Python
|
mit
| 6,738
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._firewall_rules_operations import build_create_or_update_request_initial, build_delete_request_initial, build_get_request, build_list_by_server_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class FirewallRulesOperations:
"""FirewallRulesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.rdbms.mysql_flexibleservers.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _create_or_update_initial(
self,
resource_group_name: str,
server_name: str,
firewall_rule_name: str,
parameters: "_models.FirewallRule",
**kwargs: Any
) -> Optional["_models.FirewallRule"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.FirewallRule"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'FirewallRule')
request = build_create_or_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
server_name=server_name,
firewall_rule_name=firewall_rule_name,
content_type=content_type,
json=_json,
template_url=self._create_or_update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('FirewallRule', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('FirewallRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforMySQL/flexibleServers/{serverName}/firewallRules/{firewallRuleName}'} # type: ignore
@distributed_trace_async
async def begin_create_or_update(
self,
resource_group_name: str,
server_name: str,
firewall_rule_name: str,
parameters: "_models.FirewallRule",
**kwargs: Any
) -> AsyncLROPoller["_models.FirewallRule"]:
"""Creates a new firewall rule or updates an existing firewall rule.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param firewall_rule_name: The name of the server firewall rule.
:type firewall_rule_name: str
:param parameters: The required parameters for creating or updating a firewall rule.
:type parameters: ~azure.mgmt.rdbms.mysql_flexibleservers.models.FirewallRule
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either FirewallRule or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.rdbms.mysql_flexibleservers.models.FirewallRule]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.FirewallRule"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
server_name=server_name,
firewall_rule_name=firewall_rule_name,
parameters=parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('FirewallRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforMySQL/flexibleServers/{serverName}/firewallRules/{firewallRuleName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
server_name: str,
firewall_rule_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
server_name=server_name,
firewall_rule_name=firewall_rule_name,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforMySQL/flexibleServers/{serverName}/firewallRules/{firewallRuleName}'} # type: ignore
@distributed_trace_async
async def begin_delete(
self,
resource_group_name: str,
server_name: str,
firewall_rule_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes a firewall rule.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param firewall_rule_name: The name of the server firewall rule.
:type firewall_rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
server_name=server_name,
firewall_rule_name=firewall_rule_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforMySQL/flexibleServers/{serverName}/firewallRules/{firewallRuleName}'} # type: ignore
@distributed_trace_async
async def get(
self,
resource_group_name: str,
server_name: str,
firewall_rule_name: str,
**kwargs: Any
) -> "_models.FirewallRule":
"""Gets information about a server firewall rule.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param firewall_rule_name: The name of the server firewall rule.
:type firewall_rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: FirewallRule, or the result of cls(response)
:rtype: ~azure.mgmt.rdbms.mysql_flexibleservers.models.FirewallRule
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FirewallRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
server_name=server_name,
firewall_rule_name=firewall_rule_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('FirewallRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforMySQL/flexibleServers/{serverName}/firewallRules/{firewallRuleName}'} # type: ignore
@distributed_trace
def list_by_server(
self,
resource_group_name: str,
server_name: str,
**kwargs: Any
) -> AsyncIterable["_models.FirewallRuleListResult"]:
"""List all the firewall rules in a given server.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either FirewallRuleListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.rdbms.mysql_flexibleservers.models.FirewallRuleListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FirewallRuleListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_server_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
server_name=server_name,
template_url=self.list_by_server.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_server_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
server_name=server_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("FirewallRuleListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_server.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforMySQL/flexibleServers/{serverName}/firewallRules'} # type: ignore
|
Azure/azure-sdk-for-python
|
sdk/rdbms/azure-mgmt-rdbms/azure/mgmt/rdbms/mysql_flexibleservers/aio/operations/_firewall_rules_operations.py
|
Python
|
mit
| 18,869
|
# This file is autogenerated. Do not edit it manually.
# If you want change the content of this file, edit
#
# spec/fixtures/responses/whois.nc/status_registered
#
# and regenerate the tests with the following script
#
# $ scripts/generate_tests.py
#
from nose.tools import *
from dateutil.parser import parse as time_parse
import yawhois
class TestWhoisNcStatusRegistered(object):
def setUp(self):
fixture_path = "spec/fixtures/responses/whois.nc/status_registered.txt"
host = "whois.nc"
part = yawhois.record.Part(open(fixture_path, "r").read(), host)
self.record = yawhois.record.Record(None, [part])
def test_status(self):
eq_(self.record.status, 'registered')
def test_available(self):
eq_(self.record.available, False)
def test_domain(self):
eq_(self.record.domain, "domaine.nc")
def test_nameservers(self):
eq_(self.record.nameservers.__class__.__name__, 'list')
eq_(len(self.record.nameservers), 3)
eq_(self.record.nameservers[0].__class__.__name__, 'Nameserver')
eq_(self.record.nameservers[0].name, "any-ns1.nc")
eq_(self.record.nameservers[1].__class__.__name__, 'Nameserver')
eq_(self.record.nameservers[1].name, "ns1.nc")
eq_(self.record.nameservers[2].__class__.__name__, 'Nameserver')
eq_(self.record.nameservers[2].name, "ns2.nc")
def test_admin_contacts(self):
assert_raises(yawhois.exceptions.AttributeNotSupported, self.record.admin_contacts)
def test_registered(self):
eq_(self.record.registered, True)
def test_created_on(self):
eq_(self.record.created_on.__class__.__name__, 'datetime')
eq_(self.record.created_on, time_parse('2006-05-14'))
def test_registrar(self):
assert_raises(yawhois.exceptions.AttributeNotSupported, self.record.registrar)
def test_registrant_contacts(self):
eq_(self.record.registrant_contacts.__class__.__name__, 'list')
eq_(len(self.record.registrant_contacts), 1)
eq_(self.record.registrant_contacts[0].__class__.__name__, 'Contact')
eq_(self.record.registrant_contacts[0].type, yawhois.record.Contact.TYPE_REGISTRANT)
eq_(self.record.registrant_contacts[0].name, "CCTLD")
eq_(self.record.registrant_contacts[0].organization, None)
eq_(self.record.registrant_contacts[0].address, "1 RUE MONCHOVET\n7 EME ETAGE\nLE WARUNA 1")
eq_(self.record.registrant_contacts[0].city, "NOUMEA CEDEX")
eq_(self.record.registrant_contacts[0].zip, "98841")
eq_(self.record.registrant_contacts[0].state, None)
eq_(self.record.registrant_contacts[0].country, "NEW CALEDONIA")
eq_(self.record.registrant_contacts[0].phone, None)
eq_(self.record.registrant_contacts[0].fax, None)
eq_(self.record.registrant_contacts[0].email, None)
def test_technical_contacts(self):
assert_raises(yawhois.exceptions.AttributeNotSupported, self.record.technical_contacts)
def test_updated_on(self):
eq_(self.record.updated_on.__class__.__name__, 'datetime')
eq_(self.record.updated_on, time_parse('2013-04-03'))
def test_domain_id(self):
assert_raises(yawhois.exceptions.AttributeNotSupported, self.record.domain_id)
def test_expires_on(self):
eq_(self.record.expires_on.__class__.__name__, 'datetime')
eq_(self.record.expires_on, time_parse('2016-05-14'))
def test_disclaimer(self):
assert_raises(yawhois.exceptions.AttributeNotSupported, self.record.disclaimer)
|
huyphan/pyyawhois
|
test/record/parser/test_response_whois_nc_status_registered.py
|
Python
|
mit
| 3,601
|
from flask.ext.mail import Mail, Message
from run import mail
#Sends email notifying user about posted route
def sendValidationEmail(to, url):
header = 'Allez Viens Validation'
sender = 'verify@allezviens.com'
replyTo = 'noreply@allezviens.com'
url = 'allez-viens.herokuapp.com/trip/' + url
body = "Please click <a href='" + url + "'>this link</a> to validate and edit your route.</br> If you did not request this, please disregard this email."
sendEmail([to], replyTo, sender, header, body)
#Sends email notifying user about pick
def sendPickNotificationEmail(to, replyTo, url):
header = 'Allez Viens User Contacted You'
sender = 'messages@allezviens.com'
url = 'allez-viens.herokuapp.com/trip/' + url
body = "A user at Allez Viens has expressed interest in riding with you regarding <a href='" + url + "'>this route.</a> <br><br>Replying to this message will reply directly to the user."
sendEmail([to], replyTo, sender, header, body)
#Sends email via smtp service using template
def sendEmail(to, replyTo, sender, header, body):
msg = Message(
header,
recipients=to,
reply_to = replyTo,
sender = sender
)
msg.body = "body"
msg.html = body
mail.send(msg)
#For Development, uncomment to use function from command line
# with app.app_context():
# mail.send(msg)
|
jimgong92/allezViens
|
communication.py
|
Python
|
mit
| 1,297
|
from flask import Flask, jsonify, make_response, request
from src.twitter import tweet
from src.sensor import get_temperature_celsius
app = Flask(__name__)
# Routes
@app.route('/hello', methods=['GET'])
def hello_world():
return jsonify({'hello': 'world'})
@app.route('/tweet', methods=['GET'])
def send_tweet():
return jsonify({'tweet': 'not here'})
@app.route('/temperature', methods=['GET'])
def get_temperature():
temp = get_temperature_celsius()
return jsonify({'temperature': temp})
# @app.route('/api/v1/submit', methods=['POST'])
# def process_image():
# jsonData = request.get_json()
# cutter = ClairesCutter(jsonData['image'], 'base64')
# positions = cutter.getPositions()
# pieces = {'red': [], 'green': [], 'blue': []}
# for color in positions:
# for position in positions[color]:
# pieces[color].append(cutter.crop(position))
#
# return jsonify({'pieces': pieces})
# Pretty error handling
@app.errorhandler(400)
def not_found(error):
return make_response(jsonify({'error': 'Bad request. Please check your request syntax.'}), 400)
@app.errorhandler(404)
def not_found(error):
return make_response(jsonify({'error': 'Nothing found at this route.'}), 404)
@app.errorhandler(405)
def not_found(error):
return make_response(jsonify({'error': 'Request method not allowed.'}), 405)
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0')
|
Andruschenko/lora-remote-server
|
app.py
|
Python
|
mit
| 1,443
|
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
NOTIFY_MANAGERS = getattr(
settings, 'CMS_FEEDBACK_NOTIFY_MANAGERS', True)
DEFAULT_NOTIFY_SUBJECT = _('New feedback')
NOTIFY_SUBJECT = getattr(
settings, 'CMS_FEEDBACK_NOTIFY_SUBJECT', DEFAULT_NOTIFY_SUBJECT)
|
satyrius/cmsplugin-feedback
|
cmsplugin_feedback/settings.py
|
Python
|
mit
| 308
|
from core.himesis import Himesis, HimesisPreConditionPatternLHS
import uuid
class HPP1_CompleteLHS(HimesisPreConditionPatternLHS):
def __init__(self):
"""
Creates the himesis graph representing the AToM3 model HPP1_CompleteLHS.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HPP1_CompleteLHS, self).__init__(name='HPP1_CompleteLHS', num_nodes=0, edges=[])
# Set the graph attributes
self["mm__"] = []
self["MT_constraint__"] = """#===============================================================================
# This code is executed after the nodes in the LHS have been matched.
# You can access a matched node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# The given constraint must evaluate to a boolean expression:
# returning True enables the rule to be applied,
# returning False forbids the rule from being applied.
#===============================================================================
return True
"""
self["name"] = """"""
self["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'PP1')
# Nodes that represent match classes
# match class SIBLING0() node
self.add_node()
self.vs[0]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[0]["MT_label__"] = """1"""
self.vs[0]["MT_dirty__"] = False
self.vs[0]["mm__"] = """MT_pre__SIBLING0"""
self.vs[0]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'')
# match class Transition() node
self.add_node()
self.vs[1]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[1]["MT_label__"] = """2"""
self.vs[1]["MT_dirty__"] = False
self.vs[1]["mm__"] = """MT_pre__Transition"""
self.vs[1]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'')
# match class Trigger() node
self.add_node()
self.vs[2]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[2]["MT_label__"] = """3"""
self.vs[2]["MT_dirty__"] = False
self.vs[2]["mm__"] = """MT_pre__Trigger"""
self.vs[2]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'')
# match class Signal() node
self.add_node()
self.vs[3]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[3]["MT_label__"] = """4"""
self.vs[3]["MT_dirty__"] = False
self.vs[3]["mm__"] = """MT_pre__Signal"""
self.vs[3]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'')
#Nodes that represent apply classes
# match class ListenBranch() node
self.add_node()
self.vs[4]["MT_subtypeMatching__"] = False
self.vs[4]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[4]["MT_label__"] = """5"""
self.vs[4]["MT_subtypes__"] = []
self.vs[4]["MT_dirty__"] = False
self.vs[4]["mm__"] = """MT_pre__ListenBranch"""
self.vs[4]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'')
# match class Inst() node
self.add_node()
self.vs[5]["MT_subtypeMatching__"] = False
self.vs[5]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[5]["MT_label__"] = """6"""
self.vs[5]["MT_subtypes__"] = []
self.vs[5]["MT_dirty__"] = False
self.vs[5]["mm__"] = """MT_pre__Inst"""
self.vs[5]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'')
# match class Name() node
self.add_node()
self.vs[6]["MT_subtypeMatching__"] = False
self.vs[6]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[6]["MT_label__"] = """7"""
self.vs[6]["MT_subtypes__"] = []
self.vs[6]["MT_dirty__"] = False
self.vs[6]["mm__"] = """MT_pre__Name"""
self.vs[6]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'')
# match class Name() node
self.add_node()
self.vs[7]["MT_subtypeMatching__"] = False
self.vs[7]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[7]["MT_label__"] = """8"""
self.vs[7]["MT_subtypes__"] = []
self.vs[7]["MT_dirty__"] = False
self.vs[7]["mm__"] = """MT_pre__Name"""
self.vs[7]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'')
# match class Name() node
self.add_node()
self.vs[8]["MT_subtypeMatching__"] = False
self.vs[8]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[8]["MT_label__"] = """9"""
self.vs[8]["MT_subtypes__"] = []
self.vs[8]["MT_dirty__"] = False
self.vs[8]["mm__"] = """MT_pre__Name"""
self.vs[8]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'')
# match class Name() node
self.add_node()
self.vs[9]["MT_subtypeMatching__"] = False
self.vs[9]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[9]["MT_label__"] = """10"""
self.vs[9]["MT_subtypes__"] = []
self.vs[9]["MT_dirty__"] = False
self.vs[9]["mm__"] = """MT_pre__Name"""
self.vs[9]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'')
# Nodes that represent the match associations of the property.
# match association Transition--type-->SIBLING0 node
self.add_node()
self.vs[10]["MT_subtypeMatching__"] = False
self.vs[10]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return attr_value == "type"
"""
self.vs[10]["MT_label__"] = """11"""
self.vs[10]["MT_subtypes__"] = []
self.vs[10]["MT_dirty__"] = False
self.vs[10]["mm__"] = """MT_pre__directLink_S"""
self.vs[10]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'assoc10')
# match association Transition--triggers-->Trigger node
self.add_node()
self.vs[11]["MT_subtypeMatching__"] = False
self.vs[11]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return attr_value == "triggers"
"""
self.vs[11]["MT_label__"] = """12"""
self.vs[11]["MT_subtypes__"] = []
self.vs[11]["MT_dirty__"] = False
self.vs[11]["mm__"] = """MT_pre__directLink_S"""
self.vs[11]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'assoc11')
# match association Trigger--signal-->Signal node
self.add_node()
self.vs[12]["MT_subtypeMatching__"] = False
self.vs[12]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return attr_value == "signal"
"""
self.vs[12]["MT_label__"] = """13"""
self.vs[12]["MT_subtypes__"] = []
self.vs[12]["MT_dirty__"] = False
self.vs[12]["mm__"] = """MT_pre__directLink_S"""
self.vs[12]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'assoc12')
# Nodes that represent the apply associations of the property.
# apply association ListenBranch--p-->Inst node
self.add_node()
self.vs[13]["MT_subtypeMatching__"] = False
self.vs[13]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return attr_value == "p"
"""
self.vs[13]["MT_label__"] = """14"""
self.vs[13]["MT_subtypes__"] = []
self.vs[13]["MT_dirty__"] = False
self.vs[13]["mm__"] = """MT_pre__directLink_T"""
self.vs[13]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'assoc13')
# apply association Inst--channelNames-->Name node
self.add_node()
self.vs[14]["MT_subtypeMatching__"] = False
self.vs[14]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return attr_value == "channelNames"
"""
self.vs[14]["MT_label__"] = """15"""
self.vs[14]["MT_subtypes__"] = []
self.vs[14]["MT_dirty__"] = False
self.vs[14]["mm__"] = """MT_pre__directLink_T"""
self.vs[14]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'assoc14')
# apply association Inst--channelNames-->Name node
self.add_node()
self.vs[15]["MT_subtypeMatching__"] = False
self.vs[15]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return attr_value == "channelNames"
"""
self.vs[15]["MT_label__"] = """16"""
self.vs[15]["MT_subtypes__"] = []
self.vs[15]["MT_dirty__"] = False
self.vs[15]["mm__"] = """MT_pre__directLink_T"""
self.vs[15]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'assoc15')
# apply association Inst--channelNames-->Name node
self.add_node()
self.vs[16]["MT_subtypeMatching__"] = False
self.vs[16]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return attr_value == "channelNames"
"""
self.vs[16]["MT_label__"] = """17"""
self.vs[16]["MT_subtypes__"] = []
self.vs[16]["MT_dirty__"] = False
self.vs[16]["mm__"] = """MT_pre__directLink_T"""
self.vs[16]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'assoc16')
# apply association Inst--channelNames-->Name node
self.add_node()
self.vs[17]["MT_subtypeMatching__"] = False
self.vs[17]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return attr_value == "channelNames"
"""
self.vs[17]["MT_label__"] = """18"""
self.vs[17]["MT_subtypes__"] = []
self.vs[17]["MT_dirty__"] = False
self.vs[17]["mm__"] = """MT_pre__directLink_T"""
self.vs[17]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'assoc17')
# Nodes that represent trace relations
# backward association Transition---->Inst node
self.add_node()
self.vs[18]["MT_subtypeMatching__"] = False
self.vs[18]["MT_label__"] = """19"""
self.vs[18]["MT_subtypes__"] = []
self.vs[18]["MT_dirty__"] = False
self.vs[18]["mm__"] = """MT_pre__trace_link"""
self.vs[18]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'blink18')
# Add the edges
self.add_edges([
(5,18), # apply_class Inst() -> backward_association
(18,1), # backward_association -> apply_class Transition()
(4,13), # apply_class ListenBranch() -> association p
(13,5), # association p -> apply_class Inst()
(5,14), # apply_class Inst() -> association channelNames
(14,6), # association channelNames -> apply_class Name()
(5,15), # apply_class Inst() -> association channelNames
(15,7), # association channelNames -> apply_class Name()
(5,16), # apply_class Inst() -> association channelNames
(16,9), # association channelNames -> apply_class Name()
(5,17), # apply_class Inst() -> association channelNames
(17,8), # association channelNames -> apply_class Name()
(1,10), # match_class Transition() -> association type
(10,0), # association type -> match_class SIBLING0()
(1,11), # match_class Transition() -> association triggers
(11,2), # association triggers -> match_class Trigger()
(2,12), # match_class Trigger() -> association signal
(12,3) # association signal -> match_class Signal()
])
# Add the attribute equations
self["equations"] = []
def eval_attr11(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_attr12(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_attr13(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_attr14(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_attr111(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return attr_value == "type"
def eval_attr112(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return attr_value == "triggers"
def eval_attr113(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return attr_value == "signal"
def eval_attr15(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_attr16(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_attr17(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_attr18(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_attr19(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_attr110(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_attr114(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return attr_value == "p"
def eval_attr115(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return attr_value == "channelNames"
def eval_attr116(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return attr_value == "channelNames"
def eval_attr117(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return attr_value == "channelNames"
def eval_attr118(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return attr_value == "channelNames"
def constraint(self, PreNode, graph):
"""
Executable constraint code.
@param PreNode: Function taking an integer as parameter
and returns the node corresponding to that label.
"""
#===============================================================================
# This code is executed after the nodes in the LHS have been matched.
# You can access a matched node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# The given constraint must evaluate to a boolean expression:
# returning True enables the rule to be applied,
# returning False forbids the rule from being applied.
#===============================================================================
return True
|
levilucio/SyVOLT
|
UMLRT2Kiltera_MM/Properties/from_thesis/HPP1_CompleteLHS.py
|
Python
|
mit
| 38,327
|
# coding: utf-8
from contextlib import closing
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import RidgeCV
from sklearn.model_selection import GridSearchCV
from sklearn.svm import SVR
import numpy as np
import pandas as pd
from jinja2 import Environment, FileSystemLoader
import database
from model import Problem, Task
def normalize(value):
# return value / 10000.0
return np.log(value / 10000.0)
def denormalize(value):
# return value * 10000.0
return np.power(np.e, value) * 10000.0
def guess(train_data, test_data):
# extract users having many features in test_data
train_data = train_data.drop('source', 1)
test_data = test_data.drop('source', 1)
col_sum = test_data.sum(0)
user_count = col_sum.rolling(2).sum()[1:-1:2].astype(int)
effective_users = [l[0:-2] for l, v in user_count.iteritems() if v >= 100 and l in train_data]
effective_cols = [name for user in effective_users for name in [user + '_T', user + '_F']]
# print('test_user_count', len(effective_users))
expect = normalize(train_data['point'])
train_data = train_data[effective_cols]
test_data = test_data[effective_cols]
mask = np.random.rand(len(train_data)) >= 1.0
train_selected = train_data[~mask]
train_expect = expect[~mask]
verify_selected = train_data[mask]
verify_expect = expect[mask]
# tuned_parameters = [{'alphas': [[0.1], [0.3], [1], [3], [10], [30], [100], [300]],
# },
# ]
# regressor = GridSearchCV(RidgeCV(), tuned_parameters, cv=4)
tuned_parameters = [{'kernel': ['rbf'],
'gamma': [1e-2, 3e-2, 1e-3, 3e-3, 1e-4],
'C': [1, 3, 10, 30, 100, 300],
'epsilon': [0, 0.0001, 0.0003, 0.001, 0.003, 0.01, 0.03, 0.1],
},
]
regressor = GridSearchCV(SVR(), tuned_parameters, cv=4)
# tuned_parameters = [{'n_estimators': [27, 81, 243],
# 'max_features': ['auto', 'sqrt', 'log2'],
# 'min_samples_split': [2, 3, 4],
# # 'max_leaf_nodes': [None, 2, 4, 8, 16, 32],
# },
# ]
# regressor = GridSearchCV(RandomForestRegressor(), tuned_parameters, cv=4)
regressor.fit(train_selected, train_expect)
# print(regressor.best_params_)
# print(regressor.best_score_)
# train_result = regressor.predict(train_selected)
# for e, r in zip(denormalize(train_expect), denormalize(train_result)):
# print(int(round(e / 100)), int(round(r / 100)))
# print()
#
# verify_result = regressor.predict(verify_selected)
# for e, r in zip(denormalize(verify_expect), denormalize(verify_result)):
# print(int(round(e / 100)), int(round(r / 100)))
# print()
return denormalize(regressor.predict(test_data))
def output(test_data, test_result):
test_pid = test_data['source']
db = database.get_connection()
with closing(db) as con:
all_tasks = Task.loadAll(con)
all_problems = Problem.loadAll(con)
pid_to_task = {task.problem_id: task
for task in all_tasks
if task.contest_id.startswith('arc') or task.contest_id.startswith('abc')}
pid_to_title = {prob.id: prob.title for prob in all_problems}
results = sorted([(pid_to_task[pid], pid_to_title[pid], pid, guessed_score)
for pid, guessed_score in zip(test_pid, test_result)])
# CSV output
# print('contest', 'symbol', 'title', 'estimated_score', sep=',')
# for task, title, pid, guessed_score in results:
# print(task.contest_id, task.symbol, '"' + title + '"', int(round(guessed_score / 100)), sep=',')
# HTML output
env = Environment(loader=FileSystemLoader('./', encoding='utf8'))
template = env.get_template('template.html')
html = template.render(results=results)
print(html)
def main():
train_data = pd.read_csv('train.csv')
test_data = pd.read_csv('test.csv')
test_result = guess(train_data, test_data)
output(test_data, test_result)
if __name__ == '__main__':
main()
|
tomerun/atcoder_statistics
|
src/batch/guess_point.py
|
Python
|
mit
| 4,083
|
# encoding: utf-8
# Fra https://itgk.idi.ntnu.no/oving/index.php?parallell=1
# https://itgk.idi.ntnu.no/oving/2014/skumleskogen2.py
keys = 0
cur = None # current node
# En node er en dictionary med følgende felt:
def make_node(number):
return {"number":number, "label":None, "parent":None, "left":None, "right":None}
def set_arc(from_node, arc, to_node):
from_node[arc] = to_node
to_node["parent"] = from_node
def move_to(to_node):
global cur
if cur["label"] in ('L', 'SL') and to_node in (cur["left"], cur["right"]):
print("Du kan ikke gå videre fra denne låsnoden uten å låse opp først!")
return False
elif to_node == None:
print("Dette er en blindvei.")
return False
elif to_node["label"] == 'K':
raise Exception("Du ble spist av en kanin!")
cur = to_node
return True
def label(n): # DEBUG
return nodes[n]["label"]
nodes = [ make_node(i) for i in range(100) ]
nodes[0]["label"] = "INN"
set_arc(nodes[0], "left", nodes[1])
set_arc(nodes[0], "right", nodes[2])
nodes[2]["label"] = "N"
set_arc(nodes[1], "left", nodes[3])
set_arc(nodes[1], "right", nodes[4])
set_arc(nodes[2], "left", nodes[5])
nodes[5]["label"] = "S"
set_arc(nodes[2], "right", nodes[6])
set_arc(nodes[3], "left", nodes[7])
set_arc(nodes[3], "right", nodes[8])
set_arc(nodes[4], "left", nodes[9])
set_arc(nodes[4], "right", nodes[10])
set_arc(nodes[5], "left", nodes[11])
nodes[11]["label"] = "K"
set_arc(nodes[5], "right", nodes[12])
set_arc(nodes[6], "left", nodes[13])
set_arc(nodes[6], "right", nodes[14])
set_arc(nodes[7], "left", nodes[15])
nodes[15]["label"] = "L"
set_arc(nodes[7], "right", nodes[16])
nodes[16]["label"] = "N"
set_arc(nodes[8], "left", nodes[17])
set_arc(nodes[8], "right", nodes[18])
set_arc(nodes[9], "left", nodes[19])
set_arc(nodes[9], "right", nodes[20])
set_arc(nodes[10], "left", nodes[21])
set_arc(nodes[11], "left", nodes[22])
nodes[22]["label"] = "S"
set_arc(nodes[11], "right", nodes[23])
nodes[23]["label"] = "S"
set_arc(nodes[13], "left", nodes[24])
set_arc(nodes[14], "left", nodes[25])
nodes[25]["label"] = "L"
set_arc(nodes[14], "right", nodes[26])
set_arc(nodes[15], "left", nodes[27])
set_arc(nodes[16], "left", nodes[28])
set_arc(nodes[17], "left", nodes[29])
set_arc(nodes[17], "right", nodes[30])
set_arc(nodes[18], "left", nodes[31])
set_arc(nodes[18], "right", nodes[32])
set_arc(nodes[19], "left", nodes[33])
set_arc(nodes[20], "left", nodes[34])
nodes[34]["label"] = "S"
set_arc(nodes[20], "right", nodes[35])
nodes[35]["label"] = "N"
set_arc(nodes[23], "left", nodes[36])
set_arc(nodes[24], "left", nodes[37])
set_arc(nodes[24], "right", nodes[38])
set_arc(nodes[25], "left", nodes[39])
set_arc(nodes[25], "right", nodes[40])
nodes[40]["label"] = "S"
set_arc(nodes[26], "left", nodes[41])
set_arc(nodes[29], "left", nodes[42])
set_arc(nodes[29], "right", nodes[43])
set_arc(nodes[30], "left", nodes[44])
set_arc(nodes[30], "right", nodes[45])
set_arc(nodes[31], "left", nodes[46])
set_arc(nodes[31], "right", nodes[47])
set_arc(nodes[32], "left", nodes[48])
nodes[48]["label"] = "S"
set_arc(nodes[32], "right", nodes[49])
nodes[49]["label"] = "S"
set_arc(nodes[34], "left", nodes[50])
nodes[50]["label"] = "K"
set_arc(nodes[35], "left", nodes[51])
set_arc(nodes[35], "right", nodes[52])
set_arc(nodes[36], "left", nodes[53])
set_arc(nodes[39], "left", nodes[54])
set_arc(nodes[39], "right", nodes[55])
nodes[55]["label"] = "L"
set_arc(nodes[40], "left", nodes[56])
nodes[56]["label"] = "K"
set_arc(nodes[40], "right", nodes[57])
nodes[57]["label"] = "N"
set_arc(nodes[41], "left", nodes[58])
set_arc(nodes[42], "left", nodes[59])
nodes[59]["label"] = "SL"
set_arc(nodes[43], "left", nodes[60])
set_arc(nodes[44], "left", nodes[61])
set_arc(nodes[45], "left", nodes[62])
set_arc(nodes[45], "right", nodes[63])
set_arc(nodes[48], "left", nodes[64])
nodes[64]["label"] = "K"
set_arc(nodes[49], "left", nodes[65])
nodes[65]["label"] = "K"
set_arc(nodes[49], "right", nodes[66])
set_arc(nodes[53], "left", nodes[67])
set_arc(nodes[54], "left", nodes[68])
set_arc(nodes[54], "right", nodes[69])
set_arc(nodes[55], "left", nodes[70])
nodes[70]["label"] = "N"
set_arc(nodes[56], "left", nodes[71])
set_arc(nodes[57], "left", nodes[72])
set_arc(nodes[58], "left", nodes[73])
set_arc(nodes[59], "left", nodes[74])
nodes[74]["label"] = "SL"
set_arc(nodes[61], "left", nodes[75])
set_arc(nodes[62], "left", nodes[76])
set_arc(nodes[62], "right", nodes[77])
set_arc(nodes[63], "left", nodes[78])
nodes[78]["label"] = "L"
set_arc(nodes[64], "left", nodes[79])
nodes[79]["label"] = "N"
set_arc(nodes[65], "left", nodes[80])
nodes[80]["label"] = "S"
set_arc(nodes[66], "left", nodes[81])
set_arc(nodes[66], "right", nodes[82])
set_arc(nodes[68], "left", nodes[83])
nodes[83]["label"] = "N"
set_arc(nodes[69], "left", nodes[84])
set_arc(nodes[70], "left", nodes[85])
nodes[85]["label"] = "N"
set_arc(nodes[74], "left", nodes[86])
nodes[86]["label"] = "UT"
set_arc(nodes[76], "left", nodes[87])
set_arc(nodes[77], "left", nodes[88])
set_arc(nodes[78], "left", nodes[89])
nodes[89]["label"] = "N"
set_arc(nodes[79], "left", nodes[90])
nodes[90]["label"] = "N"
set_arc(nodes[80], "left", nodes[91])
nodes[91]["label"] = "K"
set_arc(nodes[80], "right", nodes[92])
nodes[92]["label"] = "K"
set_arc(nodes[83], "left", nodes[93])
set_arc(nodes[88], "left", nodes[94])
set_arc(nodes[89], "left", nodes[95])
set_arc(nodes[89], "right", nodes[96])
nodes[96]["label"] = "N"
set_arc(nodes[95], "left", nodes[97])
set_arc(nodes[95], "right", nodes[98])
set_arc(nodes[96], "left", nodes[99])
cur = nodes[0] # Current node er INN
# Public interface ;>
__all__ = ["label", "nummer", "er_vanlig", "er_stank", "er_nokkel", "er_laas", "er_superlaas", "er_inngang", "er_utgang", "gaa_tilbake", "gaa_venstre", "gaa_hoyre", "gaa_ut", "plukk_opp", "laas_opp"]
def nummer():
return cur["number"]
def er_vanlig():
return cur["label"] == None
def er_stank():
return cur["label"] == 'S'
def er_nokkel():
return cur["label"] == 'N'
def er_laas():
return er_superlaas() or cur["label"] == 'L'
def er_superlaas():
return cur["label"] == 'SL'
def er_inngang():
return cur["label"] == 'INN'
def er_utgang():
return cur["label"] == 'UT'
def gaa_tilbake():
return move_to(cur["parent"])
def gaa_venstre():
return move_to(cur["left"])
def gaa_hoyre():
return move_to(cur["right"])
def gaa_ut():
if cur["label"] == 'UT':
print("Du løste labyrinten. Gratulerer!")
return True
if cur["label"] == 'INN':
print("Du er ved inngangen til labyrinten, men kaninene har sperret veien ut!")
else:
print("Det finnes ingen utgang her ...")
return False
def plukk_opp():
if not er_nokkel():
print("Det finnes ingen nøkkel her.")
return False
global keys
keys += 1
cur["label"] = None
return True
def laas_opp():
if not er_laas():
print("Denne noden er ikke en lås.")
return False
global keys
if er_superlaas() and keys < 2:
print("Du kan ikke låse opp denne superlåsen uten minst to nøkler.")
return False
elif keys < 1:
print("Du kan ikke låse opp denne låsen uten en nøkkel.")
return False
if not er_superlaas():
keys -= 1
else:
keys -= 2
cur["label"] = None
return True
|
krissrex/python_projects
|
Projects/Oving10-itgk/skumleskogen2.py
|
Python
|
mit
| 7,224
|
import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BCM)
GPIO.setup(18, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(23, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(24, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(25, GPIO.IN, pull_up_down=GPIO.PUD_UP)
while True:
input_state1 = GPIO.input(18)
input_state2 = GPIO.input(23)
input_state3 = GPIO.input(24)
input_state4 = GPIO.input(25)
if input_state1 == False:
print('1 (18)')
if input_state2 == False:
print('2 (23)')
if input_state3 == False:
print('3 (24)')
if input_state4 == False:
print('4 (25)')
time.sleep(0.2)
|
mleonard87/whatsthatsong
|
controller/buttons_test.py
|
Python
|
mit
| 649
|
from __future__ import print_function
from show3d_balls import *
import argparse
import os
import random
import numpy as np
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torchvision.utils as vutils
from torch.autograd import Variable
from datasets import PartDataset
from pointnet import PointGen, PointGenC, PointGenComp
import torch.nn.functional as F
import matplotlib.pyplot as plt
#showpoints(np.random.randn(2500,3), c1 = np.random.uniform(0,1,size = (2500)))
parser = argparse.ArgumentParser()
parser.add_argument('--model', type=str, default = '', help='model path')
opt = parser.parse_args()
print (opt)
dataset = PartDataset(root = 'shapenetcore_partanno_segmentation_benchmark_v0', class_choice = ['Chair'], shape_comp = True)
gen = PointGenComp()
gen.load_state_dict(torch.load(opt.model))
ld = len(dataset)
idx = np.random.randint(ld)
print(ld, idx)
_,part = dataset[idx]
sim_noise = Variable(torch.randn(2, 1024))
sim_noises = Variable(torch.zeros(30,1024))
for i in range(30):
x = i/30.0
sim_noises[i] = sim_noise[0] * x + sim_noise[1] * (1-x)
part = Variable(part.view(1,2000,3).transpose(2,1)).repeat(30,1,1)
points = gen(part, sim_noises)
print(points.size(), part.size())
points = torch.cat([points, part], 2)
cmap = plt.cm.get_cmap("hsv", 10)
cmap = np.array([cmap(i) for i in range(10)])[:,:3]
color = cmap[np.array([0] * 500 + [2] * 2000), :]
point_np = points.transpose(2,1).data.numpy()
showpoints(point_np, color)
|
fxia22/pointGAN
|
show_gan_comp.py
|
Python
|
mit
| 1,664
|
import datetime
from django.conf import settings
settings.EMAIL_BACKEND = 'django.core.mail.backends.locmem.EmailBackend'
settings.AUTH_PROFILE_MODULE = 'pigeonpost_example.Profile'
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.core import mail
from django.test import TestCase
from django.test.utils import override_settings
from pigeonpost.models import Pigeon, Outbox
from pigeonpost_example.models import ModeratedNews, News, Profile, BobsNews, AggregateNews
from pigeonpost.tasks import send_email, kill_pigeons, process_queue
from pigeonpost.signals import pigeonpost_queue
def create_fixtures(create_message=True):
# Set up test users
andrew = User(username='a', first_name="Andrew", last_name="Test", email="a@example.com")
bob = User(username='b', first_name="Bob", last_name="Test", email="b@example.com")
chelsea = User(username='c', first_name="Chelsea", last_name="Test", email="c@foo.org")
z = User(username='z', first_name="Zach", last_name="Test", email="z@example.com", is_staff=True)
x = User(username='x', first_name="Xray", last_name="Test", email="x@example.com", is_staff=True)
users = [andrew, bob, chelsea, z, x]
staff = [z, x]
for user in users:
user.save()
if user.username in ['a', 'b']:
Profile(user=user, subscribed_to_news=True).save()
else:
Profile(user=user, subscribed_to_news=False).save()
if create_message:
# Setup test pigeon/message
message = News(subject='Test', body='A test message')
message.save()
pigeon = Pigeon.objects.get(
source_content_type=ContentType.objects.get_for_model(message),
source_id=message.id)
return users, staff, message, pigeon
return users, staff, None, None
class TestExampleMessage(TestCase):
"""
Test that the example message gets added to the queue when it is saved.
"""
def setUp(self):
self.users, self.staff, self.message, self.pigeon = create_fixtures()
def _send_now(self):
pigeonpost_queue.send(sender=self.message) # send now
process_queue()
self.pigeon = Pigeon.objects.get(id=self.pigeon.id)
def test_to_send(self):
""" When a message is added, the field 'to_send' should be True """
self.assertEqual(self.pigeon.to_send, True)
self._send_now()
self.assertEqual(self.pigeon.to_send, False)
def test_sent_at(self):
""" When a message is added, the field 'sent_at' should be None """
assert(self.pigeon.sent_at is None)
self._send_now()
self.assertTrue(self.pigeon.sent_at) # check it is not None before comparing
self.assertTrue(self.pigeon.sent_at <= datetime.datetime.now())
def test_scheduled_for(self):
""" The example Message has a deferred sending time of 6 hours """
assert((self.pigeon.scheduled_for - datetime.datetime.now()).seconds > 5*60*60)
assert((self.pigeon.scheduled_for - datetime.datetime.now()).seconds < 7*60*60)
def test_many_signals_one_pigeon(self):
pigeonpost_queue.send(sender=self.message, defer_for=1000)
pigeonpost_queue.send(sender=self.message, defer_for=1000)
pigeonpost_queue.send(sender=self.message, defer_for=1000)
self.assertEqual(Pigeon.objects.count(), 1)
pigeonpost_queue.send(sender=self.message)
process_queue()
pigeonpost_queue.send(sender=self.message)
process_queue()
pigeonpost_queue.send(sender=self.message)
process_queue()
self.assertEqual(Pigeon.objects.count(), 1)
def test_resend_pigeon(self):
process_queue(force=True)
self.assertEqual(Outbox.objects.count(), 2)
self.assertEqual(Outbox.objects.filter(user__username='c').count(), 0)
# Now another user signs up
chelsea = Profile.objects.get(user__username='c')
chelsea.subscribed_to_news = True
chelsea.save()
# And we resend the pigeon
pigeonpost_queue.send(sender=self.message, retry=True)
process_queue(force=True)
# And chelsea gets a message
self.assertEqual(Outbox.objects.count(), 3)
self.assertEqual(Outbox.objects.filter(user__username='c').count(), 1)
def test_save_many_times(self):
""" When a message is saved more than once, only one copy should go on the queue """
self.message.save()
self.message.save()
self.message.save()
pigeons = Pigeon.objects.filter(source_content_type=ContentType.objects.get_for_model(self.message),
source_id=self.message.id)
self.assertEqual(len(pigeons), 1)
def test_no_message_sent_now(self):
""" As the message is deferred, it won't be sent when send_email is run """
send_email()
messages = Outbox.objects.all()
self.assertEqual(len(messages), 0)
self.assertEqual(len(mail.outbox), 0)
def test_message_sent_with_force(self):
""" Force sending of all unsent pigeons """
send_email(force=True)
messages = Outbox.objects.all()
self.assertEqual(len(messages), 2)
self.assertEqual(len(mail.outbox), 2)
def test_email_to_address(self):
send_email(force=True)
self.assertEqual(len(mail.outbox), 2)
self.assertEqual(mail.outbox[0].to, ['a@example.com'])
self.assertEqual(mail.outbox[1].to, ['b@example.com'])
@override_settings(PIGEONPOST_SINK_EMAIL='sink@example.com')
def test_sink_email_setting(self):
send_email(force=True)
messages = Outbox.objects.all()
self.assertEqual(len(messages), 2)
self.assertEqual(len(mail.outbox), 2)
self.assertEqual(mail.outbox[0].to, ['sink@example.com'])
self.assertEqual(mail.outbox[1].to, ['sink@example.com'])
def test_kill_pigeons(self):
""" Kill pigeons stops any unsent pigeons from delivering messages """
kill_pigeons()
send_email(force=True)
messages = Outbox.objects.all()
self.assertEqual(len(messages), 0)
self.assertEqual(len(mail.outbox), 0)
def test_message_not_sent_more_than_once(self):
""" Force sending of all unsent pigeons """
send_email(force=True)
send_email(force=True)
messages = Outbox.objects.all()
self.assertEqual(len(messages), 2)
self.assertEqual(len(mail.outbox), 2)
def test_updated_scheduled_for(self):
""" Sending the same pigeon details just updates scheduled_for """
# First try using defer_for
pigeonpost_queue.send(sender=self.message, defer_for=10) # 10 seconds
pigeon = Pigeon.objects.get(
source_content_type=ContentType.objects.get_for_model(self.message),
source_id=self.message.id)
delta = pigeon.scheduled_for - datetime.datetime.now()
self.assertTrue(delta.seconds<=10)
# now try with scheduled_for
now = datetime.datetime.now()
pigeonpost_queue.send(sender=self.message, scheduled_for=now)
pigeon = Pigeon.objects.get(
source_content_type=ContentType.objects.get_for_model(self.message),
source_id=self.message.id)
self.assertEqual(pigeon.scheduled_for, now)
class FakeSMTPConnection:
def send_messages(*msgs, **meh):
return 0
def close(*aa, **kwaa):
return True
class TestFaultyConnection(TestCase):
def setUp(self):
self.users, self.staff, self.message, self.pigeon = create_fixtures()
self._get_conn = mail.get_connection
mail.get_connection = lambda *aa, **kw: FakeSMTPConnection()
def tearDown(self):
mail.get_connection = self._get_conn
def test_faulty_connection(self):
""" Check that we are noting failures. """
send_email()
outboxes = Outbox.objects.all()
for ob in outboxes:
self.assertEqual(ob.succeeded, False)
self.assertEqual(ob.failures, 1)
assert(ob.pigeon.failures > 0)
class TestImmediateMessage(TestCase):
def setUp(self):
self.users, self.staff, _, _ = create_fixtures(create_message=False)
ModeratedNews(subject='...', body='...', published=True).save()
process_queue()
def test_outboxes_for_staff(self):
messages = Outbox.objects.all()
self.assertEqual(len(messages),2)
for m in messages:
assert m.user in self.staff
def test_no_outboxes_for_nonstaff(self):
messages = Outbox.objects.all()
nonstaff = set(self.users) - set(self.staff)
self.assertEqual(len(messages),2)
for m in messages:
assert m.user not in nonstaff
class TestTargettedPigeons(TestCase):
def setUp(self):
self.users, self.staff, _, _ = create_fixtures(create_message=False)
self.news = BobsNews(subject='Propaganda daily', body='Bob is a great guy.')
self.news.save()
self.moderated_news = ModeratedNews(subject='Propaganda daily', body='Bob is not a great guy.')
self.moderated_news.save()
self.bob = User.objects.get(first_name__iexact='bob')
def test_send_to(self):
""" Test that we can send a pigeon to a specific user """
pigeonpost_queue.send(sender=self.news, render_email_method='email_news',
send_to=self.bob)
process_queue()
messages = Outbox.objects.all()
self.assertEqual(len(messages),1)
self.assertEqual(messages[0].user, self.bob)
def test_send_to_method(self):
""" Test that we can send a pigeon to a specific user """
pigeonpost_queue.send(sender=self.news, render_email_method='email_news',
send_to_method='get_everyone_called_bob')
process_queue()
messages = Outbox.objects.all()
self.assertEqual(len(messages),1)
self.assertEqual(messages[0].user, self.bob)
def test_no_recipients(self):
""" Test that the pigeon is processed correctly when there are no recipients """
# Remove Bob
User.objects.filter(username='b').delete()
# Then try to send custom news to Bob
pigeonpost_queue.send(sender=self.news, render_email_method='email_news',
send_to_method='get_everyone_called_bob')
process_queue()
messages = Outbox.objects.all()
self.assertEqual(len(messages),0)
pigeon = Pigeon.objects.get(source_id=self.news.id)
self.assertEqual(pigeon.to_send, False)
def test_no_method_or_target(self):
""" Test that the pigeon is processed correctly when there are no recipients """
# Then try to send custom news to Bob
pigeonpost_queue.send(sender=self.moderated_news, render_email_method='email_moderators')
process_queue()
messages = Outbox.objects.all()
self.assertEqual(len(messages),2)
class TestUnboundPigeons(TestCase):
def setUp(self):
self.users, self.staff, _, _ = create_fixtures(create_message=False)
self.news = AggregateNews(news_bit='Bob is a great guy.')
self.news.save()
self.bob = User.objects.get(first_name__iexact='bob')
def test_send(self):
pigeonpost_queue.send(sender=AggregateNews, send_to=self.bob)
process_queue()
messages = Outbox.objects.all()
self.assertEqual(len(messages),1)
self.assertEqual(messages[0].user, self.bob)
def test_double_send(self):
pigeonpost_queue.send(sender=AggregateNews, send_to=self.bob)
process_queue()
messages = Outbox.objects.all()
self.assertEqual(len(messages),1)
self.assertEqual(messages[0].user, self.bob)
pigeonpost_queue.send(sender=AggregateNews, send_to=self.bob)
process_queue()
messages = Outbox.objects.all()
self.assertEqual(len(messages),2)
self.assertEqual(messages[1].user, self.bob)
|
dragonfly-science/django-pigeonpost
|
pigeonpost/tests.py
|
Python
|
mit
| 12,077
|
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
"""
RPGDice - a tool to investigate the dice rules of different RPG systems.
"""
__author__ = 'Timid Robot Zehta'
__version__ = '0.0.1'
__licence__ = 'MIT'
|
TimZehta/rpgdice
|
rpgdice/__init__.py
|
Python
|
mit
| 211
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.stacklayout import StackLayout
from kivy.graphics import Rectangle, Color, Line
from kivy.uix.label import Label
from chess.board import Board
from iconbutton import IconButton
red = [1., 0., 0., 1.]
yellow = [1., 1., 0., 1.]
class BoardView(FloatLayout):
def __init__(self, **kwargs):
super(BoardView, self).__init__(**kwargs)
self.is_flipped = False
self.board = Board()
self.board.add_listener(self)
self.bind(pos=self.draw, size=self.draw)
self.side_to_move = 1
self.touches = []
self.validator = None
self.last_move = None
self.selection = None
self.hint = None
def draw(self, *args):
self.canvas.clear()
with self.canvas:
Color(1., 1., 1., 1.)
for rank in range(8):
for file in range(8):
c = self.board.squares[file][rank]
if (rank + file) % 2 == 0:
background = 'b'
else: background = 'w'
if c == ' ':
piece = 'sq'
else:
if c.islower():
piece = 'b' + c.lower()
else: piece = 'w' + c.lower()
img_name = 'img/tiles/' + background + piece + '.png'
if self.is_flipped:
y = 8 - (rank + 1)
x = 8 - (file + 1)
else:
y = rank
x = file
Rectangle(source=img_name, pos=[self.x + x * self.width/8.0,
self.y + y * self.height/8.0],
size=[self.width/8.0, self.height/8.0])
def draw_selection(self, square, color):
file, rank = square[0], square[1]
with self.canvas.after:
Color(color[0], color[1], color[2], color[3])
if self.is_flipped:
rank = 8 - (rank + 1)
file = 8 - (file + 1)
x1 = self.x + file * self.width / 8.0
x2 = self.width / 8.0
y1 = self.y + rank * self.height / 8.0
y2 = self.height / 8.0
Line(rectangle=(x1, y1, x2, y2), width=2)
def draw_last_move(self, input):
orig = input[0]
dest = input[1]
with self.canvas.after:
Color(1. ,1. ,1. , 1.)
file, rank = orig[0], orig[1]
if self.is_flipped:
rank = 8 - (rank + 1)
file = 8 - (file + 1)
x1 = self.x + file * self.width / 8.0
x2 = self.width / 8.0
y1 = self.y + rank * self.height / 8.0
y2 = self.height / 8.0
Line(rectangle=(x1, y1, x2, y2), width=2)
file, rank = dest[0], dest[1]
if self.is_flipped:
rank = 8 - (rank + 1)
file = 8 - (file + 1)
x1 = self.x + file * self.width / 8.0
x2 = self.width / 8.0
y1 = self.y + rank * self.height / 8.0
y2 = self.height / 8.0
Line(rectangle=(x1, y1, x2, y2), width=2)
def draw_highlight(self):
self.canvas.after.clear()
if self.last_move:
self.draw_last_move(self.last_move)
if self.hint:
self.draw_selection(self.hint, red)
if self.selection:
self.draw_selection(self.selection, yellow)
def flip_coords(self, n):
return 8 - (n + 1)
def flip(self, *args):
self.is_flipped = not self.is_flipped
if self.is_flipped:
self.parent.lay_ranks.orientation = 'bt-lr'
self.parent.lay_files.orientation = 'rl-tb'
else:
self.parent.lay_ranks.orientation = 'lr-tb'
self.parent.lay_files.orientation = 'bt-lr'
self.update()
def update(self):
self.draw()
self.draw_highlight()
def on_touch_down(self, touch):
if self.collide_point(touch.x, touch.y):
file = int( (touch.x - self.x) / self.width * 8 )
rank = int( (touch.y - self.y) / self.height * 8 )
if self.is_flipped:
rank = 8 - (rank + 1)
file = 8 - (file + 1)
n = len(self.touches)
if n == 0:
self.touches.append([file, rank])
self.selection = [file, rank]
self.draw_highlight()
if n == 1:
self.touches.append([file, rank])
self.validator.test_next(self.touches[:])
del self.touches[:]
self.selection = []
self.draw_highlight()
def set_validator(self, validator):
self.validator = validator
def show_hint(self, square):
self.hint = square
self.draw_highlight()
def hide_hint(self):
self.hint = None
self.draw_highlight()
class Chessgrid(FloatLayout):
def __init__(self, **kwargs):
super(Chessgrid, self).__init__(**kwargs)
self.boardview = BoardView(size_hint=[.9,.9], pos_hint={'x':.1, 'y':.1})
self.add_widget(self.boardview)
self.lay_files = StackLayout(pos_hint={'x':.1, 'y':0}, size_hint=[.9,.1],
orientation='lr-tb')
self.lay_files.canvas.before.add(Color(.2, .2, .2, 1.))
for i in range(8):
self.lay_files.add_widget(Label(text=chr(i+97), size_hint=[.99/8, 1]))
self.add_widget(self.lay_files)
self.lay_ranks = StackLayout(pos_hint={'x':0, 'y':.1}, size_hint=[.1,.9],
orientation='tb-lr')
for i in range(8):
self.lay_ranks.add_widget(Label(text=str(8-i), size_hint=[1., .99/8]))
self.add_widget(self.lay_ranks)
self.flip_button = IconButton(size_hint=[.1,.1], pos_hint={'x':0, 'y':0},
icon='img/flip.png')
self.flip_button.bind(on_press=self.boardview.flip)
self.add_widget(self.flip_button)
with self.canvas.before:
Color(.2, .2, .2, 1.)
self.rect = Rectangle()
self.bind(pos=self._update_rect, size=self._update_rect)
def _update_rect(self, *args):
self.rect.pos = self.pos
self.rect.size = self.size
|
victor-rene/chess-intuition
|
chessgrid.py
|
Python
|
mit
| 6,513
|
import time
import os
import shutil
import subprocess
import platform
import sys
from subprocess import Popen, PIPE
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from nw_util import *
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
chrome_options = Options()
testdir = os.path.dirname(os.path.abspath(__file__))
chrome_options.add_argument("nwapp=" + testdir)
driver = webdriver.Chrome(executable_path=os.environ['CHROMEDRIVER'], chrome_options=chrome_options)
driver.implicitly_wait(2)
try:
print driver.current_url
ret = driver.find_element_by_id('result').get_attribute('innerHTML')
assert('success' in ret)
finally:
driver.quit()
|
nwjs/nw.js
|
test/sanity/issue6056-spawn-crash/test.py
|
Python
|
mit
| 721
|
import unittest
from kairosdbclient.rest.apirequests.save_metric_datapoints import SaveMetricDataPointsRequest, MetricDataPoints
class SaveMetricDataPointsRequestTest(unittest.TestCase):
def test_save_metric_data_points_request(self):
data_points = MetricDataPoints('metric', [[100, 5.4]], ttl=0, host='azure')
request = SaveMetricDataPointsRequest(data_points=data_points)
self.assertEqual(request.payload(), [{'name': 'metric',
'tags': {'host': 'azure'},
'datapoints': [[100, 5.4]]
}])
self.assertEqual(request.resource, None)
self.assertEqual(request.success_status_code, 204)
self.assertEqual(request.uri, 'datapoints')
|
FrEaKmAn/kairosdb-python-client
|
tests/rest/apirequests/test_save_metric_datapoints_request.py
|
Python
|
mit
| 815
|
from curses import wrapper
import curses
from t3.screen import WrappingPoint
curses.initscr()
curses.cbreak()
def main(stdscr):
stdscr.clear()
stdscr.move(0,0)
stdscr.refresh()
y,x= stdscr.getyx()
location = WrappingPoint(stdscr.getmaxyx(),x=x,y=y)
while True:
current = stdscr.getkey()
if current == 'q':
break
else:
location.y +=1
if location.old.y==location.size.y-1 and location.y==0:
location.x+=1
stdscr.move(location.y,location.x)
stdscr.addstr(current)
stdscr.refresh()
wrapper(main)
|
girishramnani/t3
|
trials/location_fun.py
|
Python
|
mit
| 644
|
import gzip
import json
from hashlib import md5
from concurrent import futures
from operator import itemgetter
import requests
from .utils import prepare_url
from . import BASE
class InvalidReferenceData(Exception):
pass
def _load_single_file(url):
r = requests.get(url['url'])
data = gzip.decompress(r.content)
checksum = md5(data).hexdigest()
if checksum != url['checksum']:
raise InvalidReferenceData('{} != {}'.format(
checksum,
url['checksum']
))
return json.loads(data.decode())
def load_reference_data(api_key, dataset='PerthRestricted'):
params = {
'dataset': dataset
}
url = BASE + '/rest/Datasets/:dataset/AvailableReferenceData'
url = prepare_url(url, params)
files = requests.get(
url,
params={
'ApiKey': api_key,
'format': 'json'
}
)
urls = (
{
'checksum': ref_data['JsonChecksum'],
'url': ref_data['JsonZippedUrl']
}
for ref_data in files.json()['AvailableReferenceDataList']
)
data = futures.ThreadPoolExecutor(5).map(_load_single_file, urls)
reference_data = {}
for json_data in data:
reference_data.update(json_data)
return reference_data
def get_stop_numbers(api_key):
"""
Returns a rather large generator; be careful now :P
"""
ref = load_reference_data(api_key)
stopdata = ref['TransitStopReferenceData']
codes = map(itemgetter('Code'), stopdata)
codes = filter(bool, codes)
return codes
if __name__ == '__main__':
from os.path import dirname, join
filename = join(dirname(__file__), '..', '..', 'auth.json')
with open(filename) as fh:
api_key = json.load(fh)['api_key']
output = load_reference_data(api_key)
import IPython
IPython.embed()
|
Mause/pytransperth
|
transperth/silver_rails/reference_data.py
|
Python
|
mit
| 1,874
|
# -*- coding: utf-8 -*-
from .configuration import read_configuration
from .webdoc import process_screenshots, get_screenshots
def main(config_path=None):
config = read_configuration(config_path)
process_screenshots(get_screenshots(config.folder), config)
|
bjornarg/skald
|
skald/main.py
|
Python
|
mit
| 265
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource import SubResource
class SecurityRule(SubResource):
"""Network security rule.
All required parameters must be populated in order to send to Azure.
:param id: Resource ID.
:type id: str
:param description: A description for this rule. Restricted to 140 chars.
:type description: str
:param protocol: Required. Network protocol this rule applies to. Possible
values are 'Tcp', 'Udp', and '*'. Possible values include: 'Tcp', 'Udp',
'*'
:type protocol: str or
~azure.mgmt.network.v2016_09_01.models.SecurityRuleProtocol
:param source_port_range: The source port or range. Integer or range
between 0 and 65535. Asterix '*' can also be used to match all ports.
:type source_port_range: str
:param destination_port_range: The destination port or range. Integer or
range between 0 and 65535. Asterix '*' can also be used to match all
ports.
:type destination_port_range: str
:param source_address_prefix: Required. The CIDR or source IP range.
Asterix '*' can also be used to match all source IPs. Default tags such as
'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used. If
this is an ingress rule, specifies where network traffic originates from.
:type source_address_prefix: str
:param destination_address_prefix: Required. The destination address
prefix. CIDR or source IP range. Asterix '*' can also be used to match all
source IPs. Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and
'Internet' can also be used.
:type destination_address_prefix: str
:param access: Required. The network traffic is allowed or denied.
Possible values are: 'Allow' and 'Deny'. Possible values include: 'Allow',
'Deny'
:type access: str or
~azure.mgmt.network.v2016_09_01.models.SecurityRuleAccess
:param priority: The priority of the rule. The value can be between 100
and 4096. The priority number must be unique for each rule in the
collection. The lower the priority number, the higher the priority of the
rule.
:type priority: int
:param direction: Required. The direction of the rule. The direction
specifies if rule will be evaluated on incoming or outcoming traffic.
Possible values are: 'Inbound' and 'Outbound'. Possible values include:
'Inbound', 'Outbound'
:type direction: str or
~azure.mgmt.network.v2016_09_01.models.SecurityRuleDirection
:param provisioning_state: The provisioning state of the public IP
resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:type provisioning_state: str
:param name: The name of the resource that is unique within a resource
group. This name can be used to access the resource.
:type name: str
:param etag: A unique read-only string that changes whenever the resource
is updated.
:type etag: str
"""
_validation = {
'protocol': {'required': True},
'source_address_prefix': {'required': True},
'destination_address_prefix': {'required': True},
'access': {'required': True},
'direction': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'description': {'key': 'properties.description', 'type': 'str'},
'protocol': {'key': 'properties.protocol', 'type': 'str'},
'source_port_range': {'key': 'properties.sourcePortRange', 'type': 'str'},
'destination_port_range': {'key': 'properties.destinationPortRange', 'type': 'str'},
'source_address_prefix': {'key': 'properties.sourceAddressPrefix', 'type': 'str'},
'destination_address_prefix': {'key': 'properties.destinationAddressPrefix', 'type': 'str'},
'access': {'key': 'properties.access', 'type': 'str'},
'priority': {'key': 'properties.priority', 'type': 'int'},
'direction': {'key': 'properties.direction', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, *, protocol, source_address_prefix: str, destination_address_prefix: str, access, direction, id: str=None, description: str=None, source_port_range: str=None, destination_port_range: str=None, priority: int=None, provisioning_state: str=None, name: str=None, etag: str=None, **kwargs) -> None:
super(SecurityRule, self).__init__(id=id, **kwargs)
self.description = description
self.protocol = protocol
self.source_port_range = source_port_range
self.destination_port_range = destination_port_range
self.source_address_prefix = source_address_prefix
self.destination_address_prefix = destination_address_prefix
self.access = access
self.priority = priority
self.direction = direction
self.provisioning_state = provisioning_state
self.name = name
self.etag = etag
|
lmazuel/azure-sdk-for-python
|
azure-mgmt-network/azure/mgmt/network/v2016_09_01/models/security_rule_py3.py
|
Python
|
mit
| 5,523
|
import os
from tests import test_client_credentials, create_unique_name
from tests.sharepoint.sharepoint_case import SPTestCase
from office365.sharepoint.changes.change_query import ChangeQuery
from office365.sharepoint.files.file import File
from office365.sharepoint.lists.list import List
from office365.sharepoint.lists.list_creation_information import ListCreationInformation
from office365.sharepoint.lists.list_template_type import ListTemplateType
from office365.sharepoint.pages.template_file_type import TemplateFileType
from office365.sharepoint.webs.web import Web
def normalize_response(response):
content = response.decode("utf-8")
if (content[0] == content[-1]) and content.startswith(("'", '"')):
return content[1:-1]
return content
class TestSharePointFile(SPTestCase):
content_placeholder = create_unique_name("1234567890 abcdABCD")
file_entries = [
{"Name": "Sample.txt", "Type": "Text"},
{"Name": "SharePoint User Guide.docx", "Type": "Binary"}
]
target_list = None # type: List
target_file = None # type: File
@classmethod
def setUpClass(cls):
super(TestSharePointFile, cls).setUpClass()
cls.target_list = cls.ensure_list(cls.client.web,
ListCreationInformation(
create_unique_name("Archive Documents"),
None,
ListTemplateType.DocumentLibrary))
@classmethod
def tearDownClass(cls):
cls.target_list.delete_object().execute_query()
def test1_upload_files(self):
for entry in self.file_entries:
path = "{0}/../data/{1}".format(os.path.dirname(__file__), entry["Name"])
if entry["Type"] == "Binary":
file_content = self.read_file_as_binary(path)
else:
file_content = self.read_file_as_text(path)
target_folder = self.__class__.target_list.root_folder
uploaded_file = target_folder.upload_file(entry["Name"], file_content).execute_query()
self.assertEqual(uploaded_file.name, entry["Name"])
def test2_upload_large_file(self):
path = "{0}/../data/big_buck_bunny.mp4".format(os.path.dirname(__file__))
file_size = os.path.getsize(path)
size_1mb = 1000000
result_file = self.__class__.target_list.root_folder.files.create_upload_session(path, size_1mb).execute_query()
self.assertEqual(file_size, int(result_file.length))
def test3_get_first_file(self):
files = self.__class__.target_list.root_folder.files.top(1).get().execute_query()
self.assertEqual(len(files), 1)
self.__class__.target_file = files[0]
def test4_get_file_from_absolute_url(self):
file_abs_url = self.client.base_url + self.__class__.target_file.serverRelativeUrl
file = File.from_url(file_abs_url).with_credentials(test_client_credentials).get().execute_query()
self.assertIsNotNone(file.serverRelativeUrl)
def test5_create_file_anon_link(self):
file_url = self.__class__.target_file.serverRelativeUrl
result = Web.create_anonymous_link(self.client, file_url, False).execute_query()
self.assertIsNotNone(result.value)
def test6_load_file_metadata(self):
list_item = self.__class__.target_file.listItemAllFields.expand(["File"]).get().execute_query()
self.assertIsInstance(list_item.file, File)
def test7_load_file_metadata_alt(self):
list_item = self.__class__.target_file.listItemAllFields
self.client.load(list_item, ["File"])
self.client.execute_query()
self.assertIsInstance(list_item.file, File)
def test8_update_file_content(self):
"""Test file upload operation"""
files = self.__class__.target_list.root_folder.files.get().execute_query()
for file_upload in files:
response = File.save_binary(self.client, file_upload.properties["ServerRelativeUrl"],
self.content_placeholder)
self.assertTrue(response.ok)
def test9_update_file_metadata(self):
"""Test file update metadata"""
list_item = self.__class__.target_file.listItemAllFields # get metadata
list_item.set_property('Title', 'Updated')
list_item.update().execute_query()
def test_10_list_file_versions(self):
"""Test file update metadata"""
file_with_versions = self.__class__.target_file.expand(["Versions"]).get().execute_query()
self.assertGreater(len(file_with_versions.versions), 0)
def test_11_delete_file_version(self):
versions = self.__class__.target_file.versions.top(1).get().execute_query()
self.assertEqual(len(versions), 1)
self.assertIsNotNone(versions[0].resource_path)
versions[0].delete_object().execute_query()
def test_12_delete_file_version_by_id(self):
versions = self.__class__.target_file.versions.top(1).get().execute_query()
self.assertEqual(len(versions), 1)
ver_id = versions[0].id
versions.delete_by_id(ver_id).execute_query()
def test_13_download_file(self):
"""Test file upload operation"""
files = self.__class__.target_list.root_folder.files.get().execute_query()
for file in files: # type: File
content = file.read()
enc_content = normalize_response(content)
self.assertEqual(enc_content, self.content_placeholder)
def test_14_copy_file(self):
files = self.__class__.target_list.root_folder.files.get().execute_query()
for cur_file in files: # type: File
file_url = cur_file.serverRelativeUrl
path, file_name = os.path.split(file_url)
new_file_url = '/'.join([path, "copied_" + file_name])
cur_file.copyto(new_file_url, True).execute_query()
moved_file = self.client.web.get_file_by_server_relative_url(new_file_url).get().execute_query()
self.assertEqual(new_file_url, moved_file.serverRelativeUrl)
def test_15_move_file(self):
files = self.__class__.target_list.root_folder.files.get().execute_query()
for cur_file in files:
file_url = cur_file.properties["ServerRelativeUrl"]
path, file_name = os.path.split(file_url)
new_file_url = '/'.join([path, "moved_" + file_name])
cur_file.moveto(new_file_url, 1).execute_query()
moved_file = self.client.web.get_file_by_server_relative_url(new_file_url).get().execute_query()
self.assertEqual(new_file_url, moved_file.properties["ServerRelativeUrl"])
def test_16_recycle_first_file(self):
"""Test file upload operation"""
files = self.__class__.target_list.root_folder.files.get().execute_query()
files_count = len(files)
if files_count > 0:
first_file = files[0]
first_file.recycle()
first_file.execute_query()
files_after = self.__class__.target_list.root_folder.files.get().execute_query()
self.assertEqual(len(files) - 1, len(files_after))
def test_17_create_template_file(self):
target_folder = self.__class__.target_list.root_folder.get().execute_query()
file_url = '/'.join([target_folder.serverRelativeUrl, "WikiPage.aspx"])
file_new = self.__class__.target_list.root_folder.files.add_template_file(file_url, TemplateFileType.WikiPage)
self.client.execute_query()
self.assertEqual(file_new.serverRelativeUrl, file_url)
self.__class__.target_file = file_new
def test_18_get_folder_changes(self):
changes = self.__class__.target_file.listItemAllFields.get_changes(ChangeQuery(item=True)).execute_query()
self.assertGreater(len(changes), 0)
def test_19_delete_files(self):
files_to_delete = self.__class__.target_list.root_folder.files.get().execute_query()
for file_to_delete in files_to_delete: # type: File
file_to_delete.delete_object().execute_query()
file_col = self.__class__.target_list.root_folder.files.get().execute_query()
self.assertEqual(len(file_col), 0)
|
vgrem/Office365-REST-Python-Client
|
tests/sharepoint/test_file.py
|
Python
|
mit
| 8,303
|
import os
SQLALCHEMY_DATABASE_URI=os.environ['DATABASE_URL']
|
Opshun/API
|
config.py
|
Python
|
mit
| 62
|
#-------------------------------------------------------------------------------
# Name: circle_detector2.py
# Purpose: To provide detector that detects number of line in a given area
# for each considered pixel.
#-------------------------------------------------------------------------------
import math
from numpy import vstack, array
# my lib
from circle_detector1 import circle_coordinates
def dot(v,w):
x,y = v
X,Y = w
return x*X + y*Y
def length(v):
x,y = v
return math.sqrt(x*x + y*y)
def vector(b,e):
x,y = b
X,Y = e
return (X-x, Y-y)
def unit(v):
x,y = v
mag = length(v)
return (x/mag, y/mag)
def distance(p0,p1):
return length(vector(p0,p1))
def scale(v,sc):
x,y = v
return (x * sc, y * sc)
def add(v,w):
x,y = v
X,Y = w
return (x+X, y+Y)
# Given a line with coordinates 'start' and 'end' and the
# coordinates of a point 'pnt' the proc returns the shortest
# distance from pnt to the line and the coordinates of the
# nearest point on the line.
#
# 1 Convert the line segment to a vector ('line_vec').
# 2 Create a vector connecting start to pnt ('pnt_vec').
# 3 Find the length of the line vector ('line_len').
# 4 Convert line_vec to a unit vector ('line_unitvec').
# 5 Scale pnt_vec by line_len ('pnt_vec_scaled').
# 6 Get the dot product of line_unitvec and pnt_vec_scaled ('t').
# 7 Ensure t is in the range 0 to 1.
# 8 Use t to get the nearest location on the line to the end
# of vector pnt_vec_scaled ('nearest').
# 9 Calculate the distance from nearest to pnt_vec_scaled.
# 10 Translate nearest back to the start/end line.
# Malcolm Kesson 16 Dec 2012
def pnt2line(pnt, start, end):
line_vec = vector(start, end)
pnt_vec = vector(start, pnt)
line_len = length(line_vec)
line_unitvec = unit(line_vec)
pnt_vec_scaled = scale(pnt_vec, 1.0/line_len)
t = dot(line_unitvec, pnt_vec_scaled)
if t < 0.0:
t = 0.0
elif t > 1.0:
t = 1.0
nearest = scale(line_vec, t)
dist = distance(nearest, pnt_vec)
nearest = add(nearest, start)
return (dist, nearest)
#TODO documentation needed
def closest_links_based(border, dx, dy, Trx, Try, Recx, Recy, pix, link_num):
circle = circle_coordinates(border, dx, dy, pix)
Trn = vstack([Trx, Try]).T
Recn = vstack([Recx, Recy]).T
selected = []
for each in circle:
dist = [pnt2line(each, start, end)[0] for start, end in zip(Trn, Recn)]
indices = [i[0] for i in sorted(enumerate(dist), key=lambda x:x[1])]
selected.append(indices[:link_num])
return array(selected)
|
bzohidov/TomoRain
|
linkquality/circle_detector2.py
|
Python
|
mit
| 2,636
|
import pytest
import time
@pytest.fixture
def dep(request):
time.sleep(0.3)
return None
def test_simple1(dep):
time.sleep(0.2)
def test_simple2(dep):
time.sleep(0.2)
|
baverman/flameprof
|
tests/sample_test.py
|
Python
|
mit
| 188
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core
import numpy as np
class ParameterTags(object):
BIAS = 'BIAS'
WEIGHT = 'WEIGHT'
COMPUTED_PARAM = 'COMPUTED_PARAM'
class ParameterInfo(object):
def __init__(
self, param_id, param, key=None, shape=None, length=None,
grad=None, blob_copy=None):
assert isinstance(param, core.BlobReference)
self.param_id = param_id
self.name = str(param)
self.blob = param
self.key = key
self.shape = shape
self.size = None if shape is None else np.prod(shape)
self.length = max(1, length if length is not None else 1)
self.grad = grad
self._cloned_init_net = None
# Optionally store equivalent copies of the blob
# in different precisions (i.e. half and float copies)
# stored as a dict of TensorProto.DataType -> BlobReference
self.blob_copy = blob_copy
# each param_info can have its own optimizer. It can be set within
# OptimizerContext (caffe2/python/optimizer.py)
self._optimizer = None
@property
def parameter(self):
return self.blob
@property
def optimizer(self):
return self._optimizer
@optimizer.setter
def optimizer(self, value):
assert self._optimizer is None, "optimizer has already been set"
self._optimizer = value
def __str__(self):
return self.name
|
ryfeus/lambda-packs
|
pytorch/source/caffe2/python/modeling/parameter_info.py
|
Python
|
mit
| 1,583
|
import os
from flask_appbuilder.security.manager import AUTH_DB
basedir = os.path.abspath(os.path.dirname(__file__))
# Your App secret key
SECRET_KEY = '\2\1thisismyscretkey\1\2\e\y\y\h'
# The SQLAlchemy connection string.
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'app.db')
#SQLALCHEMY_DATABASE_URI = 'mysql://myapp@localhost/myapp'
#SQLALCHEMY_DATABASE_URI = 'postgresql://root:password@localhost/myapp'
# Flask-WTF flag for CSRF
CSRF_ENABLED = True
#------------------------------
# GLOBALS FOR APP Builder
#------------------------------
# Uncomment to setup Your App name
APP_NAME = "BDPDB"
# Uncomment to setup Setup an App icon
#APP_ICON = "static/img/logo.jpg"
#----------------------------------------------------
# AUTHENTICATION CONFIG
#----------------------------------------------------
# The authentication type
# AUTH_OID : Is for OpenID
# AUTH_DB : Is for database (username/password()
# AUTH_LDAP : Is for LDAP
# AUTH_REMOTE_USER : Is for using REMOTE_USER from web server
AUTH_TYPE = AUTH_DB
# Uncomment to setup Full admin role name
AUTH_ROLE_ADMIN = 'Admin'
# Uncomment to setup Public role name, no authentication needed
AUTH_ROLE_PUBLIC = 'Public'
# Will allow user self registration
#AUTH_USER_REGISTRATION = True
# The default user self registration role
#AUTH_USER_REGISTRATION_ROLE = "Public"
# When using LDAP Auth, setup the ldap server
#AUTH_LDAP_SERVER = "ldap://ldapserver.new"
# Uncomment to setup OpenID providers example for OpenID authentication
#OPENID_PROVIDERS = [
# { 'name': 'Yahoo', 'url': 'https://me.yahoo.com' },
# { 'name': 'AOL', 'url': 'http://openid.aol.com/<username>' },
# { 'name': 'Flickr', 'url': 'http://www.flickr.com/<username>' },
# { 'name': 'MyOpenID', 'url': 'https://www.myopenid.com' }]
#---------------------------------------------------
# Babel config for translations
#---------------------------------------------------
# Setup default language
BABEL_DEFAULT_LOCALE = 'en'
# Your application default translation path
BABEL_DEFAULT_FOLDER = 'translations'
# The allowed translation for you app
LANGUAGES = {
'en': {'flag':'gb', 'name':'English'}
}
#---------------------------------------------------
# Image and file configuration
#---------------------------------------------------
# The file upload folder, when using models with files
UPLOAD_FOLDER = basedir + '/app/static/uploads/'
# Custom
FILE_ALLOWED_EXTENSIONS = ('nii.gz')
DERIVATIVES_SERVER_BASEDIR = '/home/despoB/dlurie/Projects/despolab_lesion/data/patients/'
RAW_SERVER_BASEDIR = '/home/despoB/lesion/data/original/bids/'
# The image upload folder, when using models with images
IMG_UPLOAD_FOLDER = basedir + '/app/static/uploads/'
# The image upload url, when using models with images
IMG_UPLOAD_URL = '/static/uploads/'
# Setup image size default is (300, 200, True)
#IMG_SIZE = (300, 200, True)
# Theme configuration
# these are located on static/appbuilder/css/themes
# you can create your own and easily use them placing them on the same dir structure to override
#APP_THEME = "bootstrap-theme.css" # default bootstrap
#APP_THEME = "amelia.css"
#APP_THEME = "cerulean.css"
#APP_THEME = "cosmo.css"
#APP_THEME = "cyborg.css"
#APP_THEME = "darkly.css"
#APP_THEME = "flatly.css"
#APP_THEME = "journal.css"
#APP_THEME = "lumen.css"
APP_THEME = "paper.css"
#APP_THEME = "readable.css"
#APP_THEME = "sandstone.css"
#APP_THEME = "simplex.css"
#APP_THEME = "slate.css"
#APP_THEME = "solar.css"
#APP_THEME = "spacelab.css"
#APP_THEME = "superhero.css"
#APP_THEME = "united.css"
#APP_THEME = "yeti.css"
|
danlurie/bdpdb
|
bdpdb/config.py
|
Python
|
mit
| 3,591
|
'''takes a 2 dimenstional array that represents data points in a n dimensional
space and labels s.t the ith label corresponds to the ith data points
returns a hyperplane consisting of a vector normal to a separating hyperplane
and a point it passes through'''
def train(data, labels):
class0 = []
class1 = []
mean0 = []
mean1 = []
support_vector0 = []
support_vector1 = []
#calculate mean for each dimension while also populating class list
for i in range(len(data)):
if labels[i] == 0:
class0.append(data[i])
else:
class1.append(data[i])
mean0 = __calculate_mean(class0)
mean1 = __calculate_mean(class1)
support_vector0 = __find_supportvecs(class0, mean1)
support_vector1 = __find_supportvecs(class1, mean0)
midpoint = [((x+y)/2.0) for x,y in zip(support_vector0, support_vector1)]
#this vector is normal to the separating hyperplane
slope = [(x - y) for x,y in zip(support_vector0, support_vector1)]
hyperplane = (slope, midpoint)
return hyperplane
'''evaluate the hyperplane (of the form ax + by + cz + ... = 0)
if the result is greater than 0 it lies above the plane and belongs to one class
and if not it belongs to the other
'''
def predict(data_point, hyperplane):
value = sum([(hyperplane[0][x] * (data_point[x] - hyperplane[1][x])) for x \
in range(len(data_point))])
#TODO: account for higher values not corresponding to 'bigger' label
if value > 0:
return 0
else:
return 1
#finds the mean of a given class
def __calculate_mean(classN):
mean = []
for i in range(len(classN)):
mean_i = 0.0
count = 0.0
for j in range(len(classN[i])):
mean_i += classN[i][j]
count += 1.0
mean.append(mean_i/count)
return mean
'''find the supportvectors used to form our separating hyperplane
this is done by finding the point in each class closest to the mean
of the other class'''
def __find_supportvecs(classN, mean):
curr_min_dist = 10000000000000000 #TODO THIS IS BAD
support_vector = []
for i in range(len(classN)):
distance = (sum([(x - y)**2.0 for x,y in zip(classN[i],mean)]))**0.5
if distance < curr_min_dist:
curr_min_dist = distance
support_vector = classN[i]
return support_vector
if __name__ == "__main__":
#some test code
data = [[1, 1] , [1, 2], [2, 1], [2, 2], [-1, -1], [-1, -2], [-2, -1],
[-2, -2]]
labels = [0, 0, 0, 0, 1, 1, 1, 1]
hyperplane = train(data, labels)
print(predict([-3, -3], hyperplane))
|
chsahit/sillysvm
|
sillysvm.py
|
Python
|
mit
| 2,647
|
#!/usr/bin/env python
from lxml import etree
import re
import json
import locale
import ec2
from six.moves.urllib import request as urllib2
# Following advice from https://stackoverflow.com/a/1779324/216138
# The locale must be installed in the system, and it must be one where ',' is
# the thousans separator and '.' is the decimal fraction separator.
locale.setlocale(locale.LC_ALL, 'en_US.UTF-8')
class Instance(object):
def __init__(self):
self.arch = []
self.api_description = None
self.availability_zones = {}
self.base_performance = None
self.burst_minutes = None
self.clock_speed_ghz = None
self.compute_capability = 0
self.devices = 0
self.drive_size = None
self.ebs_iops = 0
self.ebs_max_bandwidth = 0
self.ebs_only = True
self.ebs_optimized = False
self.ebs_throughput = 0
self.ebs_as_nvme = False
self.ECU = 0
self.enhanced_networking = None
self.family = ''
self.FPGA = 0
self.generation = None
self.GPU = 0
self.GPU_memory = 0
self.GPU_model = None
self.includes_swap_partition = False
self.instance_type = ''
self.intel_avx = None
self.intel_avx2 = None
self.intel_avx512 = None
self.intel_turbo = None
self.linux_virtualization_types = []
self.memory = 0
self.network_performance = None
self.num_drives = None
self.nvme_ssd = False
self.physical_processor = None
self.placement_group_support = False
self.pretty_name = ''
self.pricing = {}
self.size = 0
self.ssd = False
self.storage_needs_initialization = False
self.trim_support = False
self.vCPU = 0
self.vpc = None
self.vpc_only = True
self.emr = False
def get_type_prefix(self):
"""h1, i3, d2, etc"""
return self.instance_type.split(".")[0]
def get_ipv6_support(self):
"""Fancy parsing not needed for ipv6 support.
"IPv6 is supported on all current generation instance types and the
C3, R3, and I2 previous generation instance types."
- https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html
FIXME: This should be a @property, but this project is still Python 2. Yikes!
"""
ipv4_only_families = ("cg1", "m1", "m3", "c1", "cc2", "g2", "m2", "cr1", "hs1", "t1")
return self.get_type_prefix() not in ipv4_only_families
def to_dict(self):
d = dict(family=self.family,
instance_type=self.instance_type,
pretty_name=self.pretty_name,
arch=self.arch,
vCPU=self.vCPU,
GPU=self.GPU,
GPU_model=self.GPU_model,
GPU_memory=self.GPU_memory,
compute_capability=self.compute_capability,
FPGA=self.FPGA,
ECU=self.ECU,
base_performance=self.base_performance,
burst_minutes=self.burst_minutes,
memory=self.memory,
ebs_optimized=self.ebs_optimized,
ebs_throughput=self.ebs_throughput,
ebs_iops=self.ebs_iops,
ebs_as_nvme=self.ebs_as_nvme,
ebs_max_bandwidth=self.ebs_max_bandwidth,
network_performance=self.network_performance,
enhanced_networking=self.enhanced_networking,
placement_group_support=self.placement_group_support,
pricing=self.pricing,
vpc=self.vpc,
linux_virtualization_types=self.linux_virtualization_types,
generation=self.generation,
vpc_only=self.vpc_only,
ipv6_support=self.get_ipv6_support(),
physical_processor=self.physical_processor,
clock_speed_ghz=self.clock_speed_ghz,
intel_avx=self.intel_avx,
intel_avx2=self.intel_avx2,
intel_avx512=self.intel_avx512,
intel_turbo=self.intel_turbo,
emr=self.emr,
availability_zones=self.availability_zones)
if self.ebs_only:
d['storage'] = None
else:
d['storage'] = dict(ssd=self.ssd,
trim_support=self.trim_support,
nvme_ssd=self.nvme_ssd,
storage_needs_initialization=self.storage_needs_initialization,
includes_swap_partition=self.includes_swap_partition,
devices=self.num_drives,
size=self.drive_size,
size_unit=self.size_unit)
return d
def __repr__(self):
return "<Instance {}>".format(self.instance_type)
def sanitize_instance_type(instance_type):
"""Typos and other bad data are common in the instance type colums for some reason"""
# Remove random whitespace
instance_type = re.sub(r"\s+", "", instance_type, flags=re.UNICODE)
# Correct typos
typo_corrections = {
"x1.16large": "x1.16xlarge", # https://github.com/powdahound/ec2instances.info/issues/199
"i3.4xlxarge": "i3.4xlarge", # https://github.com/powdahound/ec2instances.info/issues/227
"i3.16large": "i3.16xlarge", # https://github.com/powdahound/ec2instances.info/issues/227
"p4d.2xlarge": "p4d.24xlarge", # as of 2020-11-15
}
return typo_corrections.get(instance_type, instance_type)
def totext(elt):
s = etree.tostring(elt, method='text', encoding='unicode').strip()
return re.sub(r'\*\d$', '', s)
def transform_size(size):
if size == 'u':
return 'micro'
if size == 'sm':
return 'small'
if size == 'med':
return 'medium'
m = re.search('^(x+)l$', size)
if m:
xs = len(m.group(1))
if xs == 1:
return 'xlarge'
else:
return str(xs) + 'xlarge'
assert size == 'lg', "Unable to parse size: %s" % (size,)
return 'large'
def transform_region(reg):
region_map = {
'eu-ireland': 'eu-west-1',
'eu-frankfurt': 'eu-central-1',
'apac-sin': 'ap-southeast-1',
'apac-syd': 'ap-southeast-2',
'apac-tokyo': 'ap-northeast-1'}
if reg in region_map:
return region_map[reg]
m = re.search(r'^([^0-9]*)(-(\d))?$', reg)
assert m, "Can't parse region: %s" % (reg,)
base = m.group(1)
num = m.group(3) or '1'
return base + "-" + num
def add_ebs_pricing(imap, data):
for region_spec in data['config']['regions']:
region = transform_region(region_spec['region'])
for t_spec in region_spec['instanceTypes']:
typename = t_spec['type']
for i_spec in t_spec['sizes']:
i_type = i_spec['size']
if i_type not in imap:
print("ERROR: Got EBS pricing data for unknown instance type: {}".format(i_type))
continue
inst = imap[i_type]
inst.pricing.setdefault(region, {})
# print "%s/%s" % (region, i_type)
for col in i_spec['valueColumns']:
inst.pricing[region]['ebs'] = col['prices']['USD']
def add_pricing_info(instances):
for i in instances:
i.pricing = {}
by_type = {i.instance_type: i for i in instances}
ec2.add_pricing(by_type)
# EBS cost surcharge as per https://aws.amazon.com/ec2/pricing/on-demand/#EBS-Optimized_Instances
ebs_pricing_url = 'https://a0.awsstatic.com/pricing/1/ec2/pricing-ebs-optimized-instances.min.js'
pricing = fetch_data(ebs_pricing_url)
add_ebs_pricing(by_type, pricing)
def fetch_data(url):
content = urllib2.urlopen(url).read().decode()
try:
pricing = json.loads(content)
except ValueError:
# if the data isn't compatible JSON, try to parse as jsonP
json_string = re.search(r'callback\((.*)\);', content).groups()[0] # extract javascript object
json_string = re.sub(r"(\w+):", r'"\1":', json_string) # convert to json
pricing = json.loads(json_string)
return pricing
def add_eni_info(instances):
# Canonical URL for this info is https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-eni.html
# eni_url = "https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-eni.partial.html"
# It seems it's no longer dynamically loaded
eni_url = "https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-eni.html"
tree = etree.parse(urllib2.urlopen(eni_url), etree.HTMLParser())
table = tree.xpath('//div[@class="table-contents"]//table')[1]
rows = table.xpath('.//tr[./td]')
by_type = {i.instance_type: i for i in instances}
for r in rows:
instance_type = etree.tostring(r[0], method='text').strip().decode()
max_enis = etree.tostring(r[1], method='text').decode()
# handle <cards>x<interfaces> format
if 'per network card' in max_enis:
match = re.search(r"per network card \((.*)\)", max_enis)
eni_values = match.group(1).replace('or', '').replace(' ', '').split(',')
max_enis = sorted(list(map(int, eni_values)))[-1]
else:
max_enis = locale.atoi(max_enis)
ip_per_eni = locale.atoi(etree.tostring(r[2], method='text').decode())
if instance_type not in by_type:
print("WARNING: Ignoring ENI data for unknown instance type: {}".format(instance_type))
continue
if not by_type[instance_type].vpc:
print(f"WARNING: DescribeInstanceTypes API does not have network info for {instance_type}, scraping instead")
by_type[instance_type].vpc = { 'max_enis': max_enis,
'ips_per_eni': ip_per_eni }
def add_ebs_info(instances):
"""
Three tables on this page:
1: EBS optimized by default
Instance type | Maximum bandwidth (Mib/s) | Maximum throughput (MiB/s, 128 KiB I/O) | Maximum IOPS (16 KiB I/O)
2: Baseline performance metrics for instances with asterisk (unsupported for now, see comment below)
Instance type | Baseline bandwidth (Mib/s) | Baseline throughput (MiB/s, 128 KiB I/O) | Baseline IOPS (16 KiB I/O)
3: Not EBS optimized by default
Instance type | Maximum bandwidth (Mib/s) | Maximum throughput (MiB/s, 128 KiB I/O) | Maximum IOPS (16 KiB I/O)
TODO: Support the asterisk on type names in the first table, which means:
"These instance types can support maximum performance for 30 minutes at least once every 24 hours. For example,
c5.large instances can deliver 281 MB/s for 30 minutes at least once every 24 hours. If you have a workload
that requires sustained maximum performance for longer than 30 minutes, select an instance type based on the
following baseline performance."
"""
def parse_ebs_table(by_type, table, ebs_optimized_by_default):
for row in table.xpath('tr'):
if row.xpath('th'):
continue
cols = row.xpath('td')
instance_type = sanitize_instance_type(totext(cols[0]).replace("*", ""))
ebs_optimized_by_default = ebs_optimized_by_default
ebs_max_bandwidth = locale.atof(totext(cols[1]))
ebs_throughput = locale.atof(totext(cols[2]))
ebs_iops = locale.atof(totext(cols[3]))
if instance_type not in by_type:
print(f"ERROR: Ignoring EBS info for unknown instance {instance_type}")
by_type[instance_type] = Instance()
# continue
by_type[instance_type].ebs_optimized_by_default = ebs_optimized_by_default
by_type[instance_type].ebs_throughput = ebs_throughput
by_type[instance_type].ebs_iops = ebs_iops
by_type[instance_type].ebs_max_bandwidth = ebs_max_bandwidth
return by_type
by_type = {i.instance_type: i for i in instances}
# Canonical URL for this info is https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-optimized.html
# ebs_url = "https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-optimized.partial.html"
# It seems it's no longer dynamically loaded
ebs_url = "https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-optimized.html"
tree = etree.parse(urllib2.urlopen(ebs_url), etree.HTMLParser())
tables = tree.xpath('//div[@class="table-contents"]//table')
parse_ebs_table(by_type, tables[0], True)
parse_ebs_table(by_type, tables[2], False)
def add_linux_ami_info(instances):
"""Add information about which virtualization options are supported.
Note that only HVM is supported for Windows instances so that info is not
given its own column.
"""
checkmark_char = u'\u2713'
url = "http://aws.amazon.com/amazon-linux-ami/instance-type-matrix/"
tree = etree.parse(urllib2.urlopen(url), etree.HTMLParser())
table = tree.xpath('//div[@class="aws-table"]/table')[0]
rows = table.xpath('.//tr[./td]')[1:] # ignore header
for r in rows:
supported_types = []
family_id = totext(r[0]).lower()
if not family_id:
continue
# We only check the primary EBS-backed values here since the 'storage'
# column will already be able to tell users whether or not the instance
# they're looking at can use EBS and/or instance-store AMIs.
try:
if totext(r[1]) == checkmark_char:
supported_types.append('HVM')
if len(r) >= 4 and totext(r[3]) == checkmark_char:
supported_types.append('PV')
except Exception as e:
# 2018-08-01: handle missing cells on last row in this table...
print("Exception while parsing AMI info for {}: {}".format(family_id, e))
# Apply types for this instance family to all matching instances
for i in instances:
i_family_id = i.instance_type.split('.')[0]
if i_family_id == family_id:
i.linux_virtualization_types = supported_types
# http://aws.amazon.com/amazon-linux-ami/instance-type-matrix/ page is
# missing info about both older (t1, m1, c1, m2) and newer exotic (cg1,
# cr1, hi1, hs1, cc2) instance type generations.
# Adding "manual" info about older generations
# Some background info at https://github.com/powdahound/ec2instances.info/pull/161
for i in instances:
i_family_id = i.instance_type.split('.')[0]
if i_family_id in ('cc2', 'cg1', 'hi1', 'hs1'):
if not 'HVM' in i.linux_virtualization_types:
i.linux_virtualization_types.append('HVM')
if i_family_id in ('t1', 'm1', 'm2', 'c1', 'hi1', 'hs1'):
if not 'PV' in i.linux_virtualization_types:
i.linux_virtualization_types.append('PV')
def add_vpconly_detail(instances):
# A few legacy instances can be launched in EC2 Classic, the rest is VPC only
# https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-classic-platform.html#ec2-classic-instance-types
classic_families = ("m1", "m3", "t1", "c1", "c3", "cc2", "cr1", "m2", "r3", "d2", "hs1", "i2", "g2")
for i in instances:
for family in classic_families:
if i.instance_type.startswith(family):
i.vpc_only = False
def add_instance_storage_details(instances):
"""Add information about instance storage features."""
# Canonical URL for this info is http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/InstanceStorage.html
# url = "https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/InstanceStorage.partial.html"
# It seems it's no longer dynamically loaded
url = "http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/InstanceStorage.html"
tree = etree.parse(urllib2.urlopen(url), etree.HTMLParser())
table = tree.xpath('//div[@class="table-contents"]/table')[0]
rows = table.xpath('.//tr[./td]')
checkmark_char = u'\u2714'
dagger_char = u'\u2020'
for r in rows:
columns = r.xpath('.//td')
(instance_type,
storage_volumes,
storage_type,
needs_initialization,
trim_support) = tuple(totext(i) for i in columns)
if instance_type is None:
continue
for i in instances:
if i.instance_type == instance_type:
i.ebs_only = True
# Supports "24 x 13,980 GB" and "2 x 1,200 GB (2.4 TB)"
m = re.search(r'(\d+)\s*x\s*([0-9,]+)?\s+(\w{2})?', storage_volumes)
if m:
size_unit = 'GB'
if m.group(3):
size_unit = m.group(3)
i.ebs_only = False
i.num_drives = locale.atoi(m.group(1))
i.drive_size = locale.atoi(m.group(2))
i.size_unit = size_unit
i.ssd = 'SSD' in storage_type
i.nvme_ssd = 'NVMe' in storage_type
i.trim_support = checkmark_char in trim_support
i.storage_needs_initialization = checkmark_char in needs_initialization
i.includes_swap_partition = dagger_char in storage_volumes
def add_t2_credits(instances):
# Canonical URL for this info is
# http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/t2-credits-baseline-concepts.html
# url = "https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/burstable-credits-baseline-concepts.partial.html"
# It seems it's no longer dynamically loaded
url = "http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/t2-credits-baseline-concepts.html"
tree = etree.parse(urllib2.urlopen(url), etree.HTMLParser())
table = tree.xpath('//div[@class="table-contents"]//table')[0]
rows = table.xpath('.//tr[./td]')
assert len(rows) > 0, "Failed to find T2 CPU credit info"
by_type = {i.instance_type: i for i in instances}
for r in rows:
if len(r) > 1:
inst_type = totext(r[0])
if not inst_type in by_type:
print(f"WARNING: skipping unknown instance type '{inst_type}' in CPU credit info table")
continue
inst = by_type[inst_type]
creds_per_hour = locale.atof(totext(r[1]))
inst.base_performance = creds_per_hour / 60
inst.burst_minutes = creds_per_hour * 24 / inst.vCPU
def add_pretty_names(instances):
family_names = {
'c1': 'C1 High-CPU',
'c3': 'C3 High-CPU',
'c4': 'C4 High-CPU',
'c5': 'C5 High-CPU',
'c5d': 'C5 High-CPU',
'cc2': 'Cluster Compute',
'cg1': 'Cluster GPU',
'cr1': 'High Memory Cluster',
'g4': 'G4 Accelerated Computing',
'hi1': 'HI1. High I/O',
'hs1': 'High Storage',
'i3': 'I3 High I/O',
'm1': 'M1 General Purpose',
'm2': 'M2 High Memory',
'm3': 'M3 General Purpose',
'm4': 'M4 General Purpose',
'm5': 'M5 General Purpose',
'm5d': 'M5 General Purpose',
'g3': 'G3 Graphics GPU',
'g4': 'G4 Graphics and Machine Learning GPU',
'g5': 'G5 Graphics and Machine Learning GPU',
'p2': 'P2 General Purpose GPU',
'p3': 'P3 High Performance GPU',
'p4d': 'P4D Highest Performance GPU',
'r3': 'R3 High-Memory',
'r4': 'R4 High-Memory',
'x1': 'X1 Extra High-Memory'
}
for i in instances:
pieces = i.instance_type.split('.')
family = pieces[0]
short = pieces[1]
prefix = family_names.get(family, family.upper())
extra = None
if short.startswith('8x'):
extra = 'Eight'
elif short.startswith('4x'):
extra = 'Quadruple'
elif short.startswith('2x'):
extra = 'Double'
elif short.startswith('10x'):
extra = 'Deca'
elif short.startswith('x'):
extra = ''
bits = [prefix]
if extra is not None:
bits.extend([extra, 'Extra'])
short = 'Large'
bits.append(short.capitalize())
i.pretty_name = ' '.join([b for b in bits if b])
def add_emr_info(instances):
url = "https://a0.awsstatic.com/pricing/1/emr/pricing-emr.min.js"
pricing = fetch_data(url)
def extract_prices(data):
ret = {}
for x in data["regions"]:
for inst in x["instanceTypes"]:
for size in inst["sizes"]:
if size["size"] not in ret:
ret[size["size"]] = {}
ret[size["size"]][x["region"]] = {
size["valueColumns"][0]["name"]:
size["valueColumns"][0]["prices"]["USD"],
size["valueColumns"][1]["name"]:
size["valueColumns"][1]["prices"]["USD"],
"currencies": data["currencies"],
"rate": data["rate"],
}
return ret
pricing = extract_prices(pricing["config"])
for inst in instances:
if inst.instance_type in pricing:
inst.emr = True
for region in inst.pricing:
if region in pricing[inst.instance_type]:
inst.pricing[region]["emr"] = pricing[
inst.instance_type][region]
def add_gpu_info(instances):
"""
Add info about GPUs from the manually-curated dictionaries below. They are
manually curated because GPU models and their corresponding CUDA Compute
Capability are not listed in a structured form anywhere in the AWS docs.
This function will print a warning if it encounters an instance with
.GPU > 0 for which GPU information is not included in the dictionaries
below. This may indicate that AWS has added a new GPU instance type. If you
see such a warning and want to fill in the missing information, check
https://aws.amazon.com/ec2/instance-types/#Accelerated_Computing for
descriptions of the instance types and https://en.wikipedia.org/wiki/CUDA
for information on the CUDA compute capability of different Nvidia GPU
models.
For G5 instances, please reference the following:
https://aws.amazon.com/ec2/instance-types/g5/
https://github.com/vantage-sh/ec2instances.info/issues/593
"""
gpu_data = {
'g2.2xlarge': {
# No longer listed in AWS docs linked above. Alternative source is
# https://medium.com/@manku_timma1/part-1-g2-2xlarge-gpu-basics-805ad40a37a4
# The model has 2 units, 4G of memory each, but AWS exposes only 1 unit per instance
'gpu_model': 'NVIDIA GRID K520',
'compute_capability': 3.0,
'gpu_count': 1,
'cuda_cores': 3072,
'gpu_memory': 4
},
'g2.8xlarge': {
# No longer listed in AWS docs linked above. Alternative source is
# https://aws.amazon.com/blogs/aws/new-g2-instance-type-with-4x-more-gpu-power/
'gpu_model': 'NVIDIA GRID K520',
'compute_capability': 3.0,
'gpu_count': 4,
'cuda_cores': 6144,
'gpu_memory': 16
},
'g3s.xlarge': {
'gpu_model': 'NVIDIA Tesla M60',
'compute_capability': 5.2,
'gpu_count': 1,
'cuda_cores': 2048,
'gpu_memory': 8
},
'g3.4xlarge': {
'gpu_model': 'NVIDIA Tesla M60',
'compute_capability': 5.2,
'gpu_count': 1,
'cuda_cores': 2048,
'gpu_memory': 8
},
'g3.8xlarge': {
'gpu_model': 'NVIDIA Tesla M60',
'compute_capability': 5.2,
'gpu_count': 2,
'cuda_cores': 4096,
'gpu_memory': 16
},
'g3.16xlarge': {
'gpu_model': 'NVIDIA Tesla M60',
'compute_capability': 5.2,
'gpu_count': 4,
'cuda_cores': 8192,
'gpu_memory': 32
},
'g4dn.xlarge': {
'gpu_model': 'NVIDIA T4 Tensor Core',
'compute_capability': 7.5,
'gpu_count': 1,
'cuda_cores': 2560,
'gpu_memory': 16
},
'g4dn.2xlarge': {
'gpu_model': 'NVIDIA T4 Tensor Core',
'compute_capability': 7.5,
'gpu_count': 1,
'cuda_cores': 2560,
'gpu_memory': 16
},
'g4dn.4xlarge': {
'gpu_model': 'NVIDIA T4 Tensor Core',
'compute_capability': 7.5,
'gpu_count': 1,
'cuda_cores': 2560,
'gpu_memory': 16
},
'g4dn.8xlarge': {
'gpu_model': 'NVIDIA T4 Tensor Core',
'compute_capability': 7.5,
'gpu_count': 1,
'cuda_cores': 2560,
'gpu_memory': 16
},
'g4dn.16xlarge': {
'gpu_model': 'NVIDIA T4 Tensor Core',
'compute_capability': 7.5,
'gpu_count': 1,
'cuda_cores': 2560,
'gpu_memory': 16
},
'g4dn.12xlarge': {
'gpu_model': 'NVIDIA T4 Tensor Core',
'compute_capability': 7.5,
'gpu_count': 4,
'cuda_cores': 10240,
'gpu_memory': 64
},
'g4dn.metal': {
'gpu_model': 'NVIDIA T4 Tensor Core',
'compute_capability': 7.5,
'gpu_count': 8,
'cuda_cores': 20480,
'gpu_memory': 128
},
'p2.xlarge': {
'gpu_model': 'NVIDIA Tesla K80',
'compute_capability': 3.7,
'gpu_count': 1,
'cuda_cores': 2496,
'gpu_memory': 12
},
'p2.8xlarge': {
'gpu_model': 'NVIDIA Tesla K80',
'compute_capability': 3.7,
'gpu_count': 4,
'cuda_cores': 19968,
'gpu_memory': 96
},
'p2.16xlarge': {
'gpu_model': 'NVIDIA Tesla K80',
'compute_capability': 3.7,
'gpu_count': 8,
'cuda_cores': 39936,
'gpu_memory': 192
},
'p3.2xlarge': {
'gpu_model': 'NVIDIA Tesla V100',
'compute_capability': 7.0,
'gpu_count': 1,
'cuda_cores': 5120,
'gpu_memory': 16
},
'p3.8xlarge': {
'gpu_model': 'NVIDIA Tesla V100',
'compute_capability': 7.0,
'gpu_count': 4,
'cuda_cores': 20480,
'gpu_memory': 64
},
'p3.16xlarge': {
'gpu_model': 'NVIDIA Tesla V100',
'compute_capability': 7.0,
'gpu_count': 8,
'cuda_cores': 40960,
'gpu_memory': 128
},
'p3dn.24xlarge': {
'gpu_model': 'NVIDIA Tesla V100',
'compute_capability': 7.0,
'gpu_count': 8,
'cuda_cores': 40960,
'gpu_memory': 256
},
'g5.xlarge': {
'gpu_model': 'NVIDIA A10G',
'compute_capability': 7.5,
'gpu_count': 1,
'cuda_cores': 9616,
'gpu_memory': 24
},
'g5.2xlarge': {
'gpu_model': 'NVIDIA A10G',
'compute_capability': 7.5,
'gpu_count': 1,
'cuda_cores': 9616,
'gpu_memory': 24
},
'g5.4xlarge': {
'gpu_model': 'NVIDIA A10G',
'compute_capability': 7.5,
'gpu_count': 1,
'cuda_cores': 9616,
'gpu_memory': 24
},
'g5.8xlarge': {
'gpu_model': 'NVIDIA A10G',
'compute_capability': 7.5,
'gpu_count': 1,
'cuda_cores': 9616,
'gpu_memory': 24
},
'g5.16xlarge': {
'gpu_model': 'NVIDIA A10G',
'compute_capability': 7.5,
'gpu_count': 1,
'cuda_cores': 9616,
'gpu_memory': 24
},
'g5.12xlarge': {
'gpu_model': 'NVIDIA A10G',
'compute_capability': 7.5,
'gpu_count': 4,
'cuda_cores': 38464,
'gpu_memory': 96
},
'g5.24xlarge': {
'gpu_model': 'NVIDIA A10G',
'compute_capability': 7.5,
'gpu_count': 4,
'cuda_cores': 38464,
'gpu_memory': 96
},
'g5.48xlarge': {
'gpu_model': 'NVIDIA A10G',
'compute_capability': 7.5,
'gpu_count': 8,
'cuda_cores': 76928,
'gpu_memory': 192
},
'p4d.24xlarge': {
'gpu_model': 'NVIDIA A100',
'compute_capability': 8.0,
'gpu_count': 8,
'cuda_cores': 55296, # Source: Asked Matthew Wilson at AWS as this isn't public anywhere.
'gpu_memory': 320
}
}
for inst in instances:
if inst.GPU == 0:
continue
if inst.instance_type not in gpu_data:
print(f"WARNING: instance {inst.instance_type} has GPUs but is missing from gpu_data "
"dict in scrape.add_gpu_info. The dict needs to be updated manually.")
continue
inst_gpu_data = gpu_data[inst.instance_type]
inst.GPU_model = inst_gpu_data['gpu_model']
inst.compute_capability = inst_gpu_data['compute_capability']
inst.GPU_memory = inst_gpu_data['gpu_memory']
def add_availability_zone_info(instances):
"""
Add info about availability zones using information from the following APIs:
- aws ec2 describe-instance-type-offerings --region us-east-1
- aws ec2 describe-instance-type-offerings --location-type availability-zone --region us-east-1
- aws ec2 describe-availability-zones --region us-east-1
https://docs.aws.amazon.com/cli/latest/reference/ec2/describe-instance-type-offerings.html
"""
instance_type_region_availability_zones = {}
for region_name in ec2.describe_regions():
for offering in ec2.describe_instance_type_offerings(region_name=region_name, location_type='availability-zone-id'):
instance_type = offering['InstanceType']
availability_zone_id = offering['Location']
region_availability_zones = instance_type_region_availability_zones.get(instance_type, {})
availability_zones = region_availability_zones.get(region_name, [])
if availability_zone_id not in availability_zones:
availability_zones.append(availability_zone_id)
availability_zones.sort()
region_availability_zones[region_name] = availability_zones
instance_type_region_availability_zones[instance_type] = region_availability_zones
for inst in instances:
inst.availability_zones = instance_type_region_availability_zones.get(inst.instance_type, {})
def scrape(data_file):
"""Scrape AWS to get instance data"""
print("Parsing instance types...")
all_instances = ec2.get_instances()
print("Parsing pricing info...")
add_pricing_info(all_instances)
print("Parsing ENI info...")
add_eni_info(all_instances)
print("Parsing EBS info...")
add_ebs_info(all_instances)
print("Parsing Linux AMI info...")
add_linux_ami_info(all_instances)
print("Parsing VPC-only info...")
add_vpconly_detail(all_instances)
print("Parsing local instance storage...")
add_instance_storage_details(all_instances)
print("Parsing burstable instance credits...")
add_t2_credits(all_instances)
print("Parsing instance names...")
add_pretty_names(all_instances)
print("Parsing emr details...")
add_emr_info(all_instances)
print("Adding GPU details...")
add_gpu_info(all_instances)
print("Adding availability zone details...")
add_availability_zone_info(all_instances)
with open(data_file, 'w') as f:
json.dump([i.to_dict() for i in all_instances],
f,
indent=2,
sort_keys=True,
separators=(',', ': '))
if __name__ == '__main__':
scrape('www/instances.json')
|
powdahound/ec2instances.info
|
scrape.py
|
Python
|
mit
| 32,540
|
from Expression import *
class PredicateExpression(Expression):
"""
Class representing a predicate expression node in the AST
"""
def __init__(self, name, arguments, expression = None):
"""
:param name : Identifier
:param arguments : Declarations
:param expression : NumericExpression | SymbolicExpression | LetExpression
"""
Expression.__init__(self)
self.name = name
self.arguments = arguments
self.expression = expression
self.preparedArguments = None
def __str__(self):
"""
to string
"""
res = "PredicateExpression: predicate "+str(self.name)+"("+str(self.arguments)+")"
if self.expression:
res += " { " + str(self.expression) + " }"
return res
def setPreparedArguments(self, preparedArguments):
self.preparedArguments = preparedArguments
def getDependencies(self, codeGenerator):
dep = self.name.getDependencies(codeGenerator) + self.arguments.getDependencies(codeGenerator)
if self.expression:
deps += self.expression.getDependencies(codeGenerator)
return list(set(dep))
def setupEnvironment(self, codeSetup):
"""
Setup the MiniZinc code for this predicate expression
"""
codeSetup.setupEnvironment(self)
def prepare(self, codePrepare):
"""
Prepare the MiniZinc code for the this predicate expression
"""
codePrepare.prepare(self)
def generateCode(self, codeGenerator):
"""
Generate the MiniZinc code for this predicate expression
"""
return codeGenerator.generateCode(self)
|
rafaellc28/Latex2MiniZinc
|
latex2minizinc/PredicateExpression.py
|
Python
|
mit
| 1,722
|
"""
Edit Search chain dialog.
Licensed under MIT
Copyright (c) 2017 Isaac Muse <isaacmuse@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE SOFTWARE.
"""
import wx
import re
from ..settings import Settings
from .generic_dialogs import errormsg, yesno
from ..localization import _
from .. import gui
from .. import util
RE_KEY = re.compile(r'[\w-]+')
class EditSearchChainDialog(gui.EditSearchChainDialog):
"""Edit search chain dialog."""
def __init__(self, parent, chain=None):
"""Initialize dialog."""
super().__init__(parent)
if util.platform() == "windows":
self.SetDoubleBuffered(True)
self.localize()
self.saved = False
self.search_count = 0
self.load_searches()
self.original_name = ""
if chain:
self.load_chain(chain)
self.refresh_localization()
# Ensure good sizing of frame
self.m_chain_panel.Fit()
self.Fit()
self.SetMinSize(self.GetSize())
self.Centre()
def localize(self):
"""Translate strings."""
self.TITLE = _("Edit/Create Search Chain")
self.OVERWRITE = _("'%s' already exists. Overwrite?")
self.ADD = _("Add")
self.DELETE = _("Delete")
self.UP = _("Up")
self.DOWN = _('Down')
self.OKAY = _('Apply')
self.CLOSE = _('Cancel')
self.NAME = _("Name")
self.CHAIN = _("Chain")
self.ERR_SEARCH_NOT_EXISTS = _("Search '%s' does not exist!")
self.ERR_CHAIN_MISSING = _("Please specify a chain name!")
self.ERR_CHAIN_FORMAT = _("Chain names can only contain Unicode word chars, '_', and '-'!")
self.ERR_CHAIN_EMPTY = _("Chain must contain at least one search!")
def refresh_localization(self):
"""Localize dialog."""
self.SetTitle(self.TITLE)
self.m_add_button.SetLabel(self.ADD)
self.m_remove_button.SetLabel(self.DELETE)
self.m_up_button.SetLabel(self.UP)
self.m_down_button.SetLabel(self.DOWN)
self.m_apply_button.SetLabel(self.OKAY)
self.m_cancel_button.SetLabel(self.CLOSE)
self.m_chain_panel.GetSizer().GetItem(0).GetSizer().GetStaticBox().SetLabel(self.NAME)
self.m_chain_panel.GetSizer().GetItem(1).GetSizer().GetStaticBox().SetLabel(self.CHAIN)
def load_searches(self):
"""Load search list in `wxChoice`."""
searches = Settings.get_search()
keys = list(searches.keys())
self.search_count = len(keys)
self.m_search_choice.Set(keys)
if self.search_count:
self.m_search_choice.SetSelection(0)
self.keys = set(keys)
def load_chain(self, chain):
"""Load an existing chain."""
chains = Settings.get_chains()
if chain in chains:
self.original_name = chain
self.m_chain_textbox.SetValue(chain)
searches = chains[chain]
for x in range(len(searches)):
self.m_search_list.Insert(searches[x], x)
def on_add_click(self, event):
"""Add search selection to list."""
search = self.m_search_choice.GetSelection()
if search != wx.NOT_FOUND:
index = self.m_search_list.GetSelection()
if index == wx.NOT_FOUND:
self.m_search_list.Insert(
self.m_search_choice.GetString(search),
self.m_search_list.GetCount()
)
else:
self.m_search_list.Insert(self.m_search_choice.GetString(search), index)
def on_remove_click(self, event):
"""Remove search from chain."""
index = self.m_search_list.GetSelection()
selected = self.m_search_list.IsSelected(index)
if index != wx.NOT_FOUND:
self.m_search_list.Delete(index)
count = self.m_search_list.GetCount()
if selected and count and index <= count - 1:
self.m_search_list.Select(index)
def on_up_click(self, event):
"""Move up."""
index = self.m_search_list.GetSelection()
if index > 0:
search = self.m_search_list.GetString(index)
self.m_search_list.Delete(index)
self.m_search_list.Insert(search, index - 1)
self.m_search_list.Select(index - 1)
def on_down_click(self, event):
"""Move up."""
count = self.m_search_list.GetCount()
index = self.m_search_list.GetSelection()
if wx.NOT_FOUND < index < count - 1:
search = self.m_search_list.GetString(index)
self.m_search_list.Delete(index)
self.m_search_list.Insert(search, index + 1)
self.m_search_list.Select(index + 1)
def on_apply_click(self, event):
"""Add/modify chain in list."""
string = self.m_chain_textbox.GetValue()
chains = Settings.get_chains()
err = False
if not string:
errormsg(self.ERR_CHAIN_MISSING)
err = True
elif RE_KEY.match(string) is None:
errormsg(self.ERR_CHAIN_FORMAT)
err = True
if not err and string in chains and string != self.original_name and not yesno(self.OVERWRITE % string):
err = True
searches = []
for index in range(self.m_search_list.GetCount()):
text = self.m_search_list.GetString(index)
if text not in self.keys:
errormsg(self.ERR_SEARCH_NOT_EXISTS % text)
err = True
break
searches.append(text)
if not err and not searches:
errormsg(self.ERR_CHAIN_EMPTY)
err = True
if not err:
if self.original_name and string != self.original_name:
Settings.delete_chain(self.original_name)
Settings.add_chain(string, searches)
self.saved = True
self.Close()
def on_cancel_click(self, event):
"""Close dialog."""
self.Close()
|
facelessuser/Rummage
|
rummage/lib/gui/dialogs/edit_search_chain_dialog.py
|
Python
|
mit
| 7,014
|
import sys
import os
p1, p2 = sys.version_info[:2]
curpath = os.path.abspath( sys.argv[0] )
if os.path.islink(curpath):
curpath = os.readlink(curpath)
currentdir = os.path.dirname( curpath )
build_dir = os.path.abspath( os.path.join(currentdir, "lib-dynload", "_llfuse", "build") )
if not os.path.isdir(build_dir):
build_dir = os.path.abspath( os.path.join(currentdir, "..", "lib-dynload", "_llfuse", "build") )
if not os.path.isdir(build_dir):
build_dir = os.path.abspath( os.path.join(currentdir, "..", "..", "lib-dynload", "_llfuse", "build") )
module = None
loaded = False
dirs = os.listdir(build_dir)
for d in dirs:
if d.find("-%s.%s" % (p1, p2)) != -1 and d.find("lib.") != -1:
sys.path.insert(0, os.path.join(build_dir, d) )
import importlib
module = importlib.import_module("llfuse")
loaded = True
sys.path.pop(0)
del p1, p2
del currentdir, build_dir, dirs
break
|
sergey-dryabzhinsky/dedupsqlfs
|
lib-dynload/_llfuse/__init__.py
|
Python
|
mit
| 961
|
"""Unit tests for reviewboard.reviews.ui.image.ImageReviewUI."""
from djblets.util.templatetags.djblets_images import crop_image
from reviewboard.admin.server import build_server_url
from reviewboard.reviews.ui.image import ImageReviewUI
from reviewboard.testing import TestCase
class ImageReviewUITests(TestCase):
"""Unit tests for reviewboard.reviews.ui.image.ImageReviewUI."""
fixtures = ['test_users']
def setUp(self):
super(ImageReviewUITests, self).setUp()
self.review_request = self.create_review_request()
self.attachment = self.create_file_attachment(
self.review_request)
self.review = self.create_review(self.review_request)
def test_get_comment_thumbnail(self):
"""Testing ImageReviewUI.get_comment_thumbnail for an image comment"""
ui = ImageReviewUI(self.review_request, self.attachment)
comment = self.create_file_attachment_comment(
self.review,
self.attachment,
extra_fields={
'x': 0,
'y': 0,
'width': 1,
'height': 1,
})
thumbnail = ui.get_comment_thumbnail(comment)
self.assertHTMLEqual(
thumbnail,
'<img class="modified-image" src="%s" width="1" height="1"'
' alt="%s" />'
% (build_server_url(crop_image(self.attachment.file, 0, 0, 1, 1)),
comment.text)
)
def test_get_comment_thumbnail_diff(self):
"""Testing ImageReviewUI.get_comment_thumbnail for an image diff
comment
"""
diff_attachment = self.create_file_attachment(self.review_request)
ui = ImageReviewUI(self.review_request, self.attachment)
ui.set_diff_against(diff_attachment)
comment = self.create_file_attachment_comment(
self.review,
self.attachment,
diff_attachment,
extra_fields={
'x': 0,
'y': 0,
'width': 1,
'height': 1,
})
thumbnail = ui.get_comment_thumbnail(comment)
self.assertHTMLEqual(
thumbnail,
'<div class="image-review-ui-diff-thumbnail">'
'<img class="orig-image" src="%s" width="1" height="1" alt="%s" />'
'<img class="modified-image" src="%s" width="1" height="1"'
' alt="%s" />'
'</div>'
% (build_server_url(crop_image(diff_attachment.file, 0, 0, 1, 1)),
comment.text,
build_server_url(crop_image(self.attachment.file, 0, 0, 1, 1)),
comment.text)
)
|
reviewboard/reviewboard
|
reviewboard/reviews/tests/test_image_review_ui.py
|
Python
|
mit
| 2,681
|
"""empty message
Revision ID: 6e469162ab8f
Revises: 70d0db914e82
Create Date: 2016-03-19 00:52:56.464140
"""
# revision identifiers, used by Alembic.
revision = '6e469162ab8f'
down_revision = '70d0db914e82'
from alembic import op
import app
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('cursos',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('evento_id', sa.Integer(), nullable=True),
sa.Column('nome', sa.String(), nullable=True),
sa.Column('data_inicio', sa.DateTime(), nullable=True),
sa.Column('duracao', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['evento_id'], ['eventos.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('cursos_inscricoes',
sa.Column('curso_id', sa.Integer(), nullable=False),
sa.Column('inscricao_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['curso_id'], ['cursos.id'], ),
sa.ForeignKeyConstraint(['inscricao_id'], ['inscricoes.id'], ),
sa.PrimaryKeyConstraint('curso_id', 'inscricao_id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('cursos_inscricoes')
op.drop_table('cursos')
### end Alembic commands ###
|
Maethorin/concept2
|
migrations/versions/6e469162ab8f_.py
|
Python
|
mit
| 1,321
|
"""Prepares a distribution for installation
"""
# The following comment should be removed at some point in the future.
# mypy: strict-optional=False
import logging
import mimetypes
import os
import shutil
from pip._vendor import requests
from pip._vendor.six import PY2
from pip._internal.distributions import (
make_distribution_for_install_requirement,
)
from pip._internal.distributions.installed import InstalledDistribution
from pip._internal.exceptions import (
DirectoryUrlHashUnsupported,
HashMismatch,
HashUnpinned,
InstallationError,
PreviousBuildDirError,
VcsHashUnsupported,
)
from pip._internal.utils.filesystem import copy2_fixed
from pip._internal.utils.hashes import MissingHashes
from pip._internal.utils.logging import indent_log
from pip._internal.utils.misc import (
display_path,
hide_url,
path_to_display,
rmtree,
)
from pip._internal.utils.temp_dir import TempDirectory
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
from pip._internal.utils.unpacking import unpack_file
from pip._internal.vcs import vcs
if MYPY_CHECK_RUNNING:
from typing import (
Callable, List, Optional, Tuple,
)
from mypy_extensions import TypedDict
from pip._internal.distributions import AbstractDistribution
from pip._internal.index.package_finder import PackageFinder
from pip._internal.models.link import Link
from pip._internal.network.download import Downloader
from pip._internal.req.req_install import InstallRequirement
from pip._internal.req.req_tracker import RequirementTracker
from pip._internal.utils.hashes import Hashes
if PY2:
CopytreeKwargs = TypedDict(
'CopytreeKwargs',
{
'ignore': Callable[[str, List[str]], List[str]],
'symlinks': bool,
},
total=False,
)
else:
CopytreeKwargs = TypedDict(
'CopytreeKwargs',
{
'copy_function': Callable[[str, str], None],
'ignore': Callable[[str, List[str]], List[str]],
'ignore_dangling_symlinks': bool,
'symlinks': bool,
},
total=False,
)
logger = logging.getLogger(__name__)
def _get_prepared_distribution(
req, # type: InstallRequirement
req_tracker, # type: RequirementTracker
finder, # type: PackageFinder
build_isolation # type: bool
):
# type: (...) -> AbstractDistribution
"""Prepare a distribution for installation.
"""
abstract_dist = make_distribution_for_install_requirement(req)
with req_tracker.track(req):
abstract_dist.prepare_distribution_metadata(finder, build_isolation)
return abstract_dist
def unpack_vcs_link(link, location):
# type: (Link, str) -> None
vcs_backend = vcs.get_backend_for_scheme(link.scheme)
assert vcs_backend is not None
vcs_backend.unpack(location, url=hide_url(link.url))
class File(object):
def __init__(self, path, content_type):
# type: (str, str) -> None
self.path = path
self.content_type = content_type
def get_http_url(
link, # type: Link
downloader, # type: Downloader
download_dir=None, # type: Optional[str]
hashes=None, # type: Optional[Hashes]
):
# type: (...) -> File
temp_dir = TempDirectory(kind="unpack", globally_managed=True)
# If a download dir is specified, is the file already downloaded there?
already_downloaded_path = None
if download_dir:
already_downloaded_path = _check_download_dir(
link, download_dir, hashes
)
if already_downloaded_path:
from_path = already_downloaded_path
content_type = mimetypes.guess_type(from_path)[0]
else:
# let's download to a tmp dir
from_path, content_type = _download_http_url(
link, downloader, temp_dir.path, hashes
)
return File(from_path, content_type)
def _copy2_ignoring_special_files(src, dest):
# type: (str, str) -> None
"""Copying special files is not supported, but as a convenience to users
we skip errors copying them. This supports tools that may create e.g.
socket files in the project source directory.
"""
try:
copy2_fixed(src, dest)
except shutil.SpecialFileError as e:
# SpecialFileError may be raised due to either the source or
# destination. If the destination was the cause then we would actually
# care, but since the destination directory is deleted prior to
# copy we ignore all of them assuming it is caused by the source.
logger.warning(
"Ignoring special file error '%s' encountered copying %s to %s.",
str(e),
path_to_display(src),
path_to_display(dest),
)
def _copy_source_tree(source, target):
# type: (str, str) -> None
target_abspath = os.path.abspath(target)
target_basename = os.path.basename(target_abspath)
target_dirname = os.path.dirname(target_abspath)
def ignore(d, names):
# type: (str, List[str]) -> List[str]
skipped = [] # type: List[str]
if d == source:
# Pulling in those directories can potentially be very slow,
# exclude the following directories if they appear in the top
# level dir (and only it).
# See discussion at https://github.com/pypa/pip/pull/6770
skipped += ['.tox', '.nox']
if os.path.abspath(d) == target_dirname:
# Prevent an infinite recursion if the target is in source.
# This can happen when TMPDIR is set to ${PWD}/...
# and we copy PWD to TMPDIR.
skipped += [target_basename]
return skipped
kwargs = dict(ignore=ignore, symlinks=True) # type: CopytreeKwargs
if not PY2:
# Python 2 does not support copy_function, so we only ignore
# errors on special file copy in Python 3.
kwargs['copy_function'] = _copy2_ignoring_special_files
shutil.copytree(source, target, **kwargs)
def get_file_url(
link, # type: Link
download_dir=None, # type: Optional[str]
hashes=None # type: Optional[Hashes]
):
# type: (...) -> File
"""Get file and optionally check its hash.
"""
# If a download dir is specified, is the file already there and valid?
already_downloaded_path = None
if download_dir:
already_downloaded_path = _check_download_dir(
link, download_dir, hashes
)
if already_downloaded_path:
from_path = already_downloaded_path
else:
from_path = link.file_path
# If --require-hashes is off, `hashes` is either empty, the
# link's embedded hash, or MissingHashes; it is required to
# match. If --require-hashes is on, we are satisfied by any
# hash in `hashes` matching: a URL-based or an option-based
# one; no internet-sourced hash will be in `hashes`.
if hashes:
hashes.check_against_path(from_path)
content_type = mimetypes.guess_type(from_path)[0]
return File(from_path, content_type)
def unpack_url(
link, # type: Link
location, # type: str
downloader, # type: Downloader
download_dir=None, # type: Optional[str]
hashes=None, # type: Optional[Hashes]
):
# type: (...) -> Optional[File]
"""Unpack link into location, downloading if required.
:param hashes: A Hashes object, one of whose embedded hashes must match,
or HashMismatch will be raised. If the Hashes is empty, no matches are
required, and unhashable types of requirements (like VCS ones, which
would ordinarily raise HashUnsupported) are allowed.
"""
# non-editable vcs urls
if link.is_vcs:
unpack_vcs_link(link, location)
return None
# If it's a url to a local directory
if link.is_existing_dir():
if os.path.isdir(location):
rmtree(location)
_copy_source_tree(link.file_path, location)
return None
# file urls
if link.is_file:
file = get_file_url(link, download_dir, hashes=hashes)
# http urls
else:
file = get_http_url(
link,
downloader,
download_dir,
hashes=hashes,
)
# unpack the archive to the build dir location. even when only downloading
# archives, they have to be unpacked to parse dependencies
unpack_file(file.path, location, file.content_type)
return file
def _download_http_url(
link, # type: Link
downloader, # type: Downloader
temp_dir, # type: str
hashes, # type: Optional[Hashes]
):
# type: (...) -> Tuple[str, str]
"""Download link url into temp_dir using provided session"""
download = downloader(link)
file_path = os.path.join(temp_dir, download.filename)
with open(file_path, 'wb') as content_file:
for chunk in download.chunks:
content_file.write(chunk)
if hashes:
hashes.check_against_path(file_path)
return file_path, download.response.headers.get('content-type', '')
def _check_download_dir(link, download_dir, hashes):
# type: (Link, str, Optional[Hashes]) -> Optional[str]
""" Check download_dir for previously downloaded file with correct hash
If a correct file is found return its path else None
"""
download_path = os.path.join(download_dir, link.filename)
if not os.path.exists(download_path):
return None
# If already downloaded, does its hash match?
logger.info('File was already downloaded %s', download_path)
if hashes:
try:
hashes.check_against_path(download_path)
except HashMismatch:
logger.warning(
'Previously-downloaded file %s has bad hash. '
'Re-downloading.',
download_path
)
os.unlink(download_path)
return None
return download_path
class RequirementPreparer(object):
"""Prepares a Requirement
"""
def __init__(
self,
build_dir, # type: str
download_dir, # type: Optional[str]
src_dir, # type: str
wheel_download_dir, # type: Optional[str]
build_isolation, # type: bool
req_tracker, # type: RequirementTracker
downloader, # type: Downloader
finder, # type: PackageFinder
require_hashes, # type: bool
use_user_site, # type: bool
):
# type: (...) -> None
super(RequirementPreparer, self).__init__()
self.src_dir = src_dir
self.build_dir = build_dir
self.req_tracker = req_tracker
self.downloader = downloader
self.finder = finder
# Where still-packed archives should be written to. If None, they are
# not saved, and are deleted immediately after unpacking.
self.download_dir = download_dir
# Where still-packed .whl files should be written to. If None, they are
# written to the download_dir parameter. Separate to download_dir to
# permit only keeping wheel archives for pip wheel.
self.wheel_download_dir = wheel_download_dir
# NOTE
# download_dir and wheel_download_dir overlap semantically and may
# be combined if we're willing to have non-wheel archives present in
# the wheelhouse output by 'pip wheel'.
# Is build isolation allowed?
self.build_isolation = build_isolation
# Should hash-checking be required?
self.require_hashes = require_hashes
# Should install in user site-packages?
self.use_user_site = use_user_site
@property
def _download_should_save(self):
# type: () -> bool
if not self.download_dir:
return False
if os.path.exists(self.download_dir):
return True
logger.critical('Could not find download directory')
raise InstallationError(
"Could not find or access download directory '{}'"
.format(self.download_dir))
def prepare_linked_requirement(
self,
req, # type: InstallRequirement
):
# type: (...) -> AbstractDistribution
"""Prepare a requirement that would be obtained from req.link
"""
assert req.link
link = req.link
# TODO: Breakup into smaller functions
if link.scheme == 'file':
path = link.file_path
logger.info('Processing %s', display_path(path))
else:
logger.info('Collecting %s', req.req or req)
download_dir = self.download_dir
if link.is_wheel and self.wheel_download_dir:
# when doing 'pip wheel` we download wheels to a
# dedicated dir.
download_dir = self.wheel_download_dir
if link.is_wheel:
if download_dir:
# When downloading, we only unpack wheels to get
# metadata.
autodelete_unpacked = True
else:
# When installing a wheel, we use the unpacked
# wheel.
autodelete_unpacked = False
else:
# We always delete unpacked sdists after pip runs.
autodelete_unpacked = True
with indent_log():
# Since source_dir is only set for editable requirements.
assert req.source_dir is None
req.ensure_has_source_dir(self.build_dir, autodelete_unpacked)
# If a checkout exists, it's unwise to keep going. version
# inconsistencies are logged later, but do not fail the
# installation.
# FIXME: this won't upgrade when there's an existing
# package unpacked in `req.source_dir`
if os.path.exists(os.path.join(req.source_dir, 'setup.py')):
raise PreviousBuildDirError(
"pip can't proceed with requirements '{}' due to a"
" pre-existing build directory ({}). This is "
"likely due to a previous installation that failed"
". pip is being responsible and not assuming it "
"can delete this. Please delete it and try again."
.format(req, req.source_dir)
)
# Now that we have the real link, we can tell what kind of
# requirements we have and raise some more informative errors
# than otherwise. (For example, we can raise VcsHashUnsupported
# for a VCS URL rather than HashMissing.)
if self.require_hashes:
# We could check these first 2 conditions inside
# unpack_url and save repetition of conditions, but then
# we would report less-useful error messages for
# unhashable requirements, complaining that there's no
# hash provided.
if link.is_vcs:
raise VcsHashUnsupported()
elif link.is_existing_dir():
raise DirectoryUrlHashUnsupported()
if not req.original_link and not req.is_pinned:
# Unpinned packages are asking for trouble when a new
# version is uploaded. This isn't a security check, but
# it saves users a surprising hash mismatch in the
# future.
#
# file:/// URLs aren't pinnable, so don't complain
# about them not being pinned.
raise HashUnpinned()
hashes = req.hashes(trust_internet=not self.require_hashes)
if self.require_hashes and not hashes:
# Known-good hashes are missing for this requirement, so
# shim it with a facade object that will provoke hash
# computation and then raise a HashMissing exception
# showing the user what the hash should be.
hashes = MissingHashes()
try:
local_file = unpack_url(
link, req.source_dir, self.downloader, download_dir,
hashes=hashes,
)
except requests.HTTPError as exc:
logger.critical(
'Could not install requirement %s because of error %s',
req,
exc,
)
raise InstallationError(
'Could not install requirement {} because of HTTP '
'error {} for URL {}'.format(req, exc, link)
)
# For use in later processing, preserve the file path on the
# requirement.
if local_file:
req.local_file_path = local_file.path
abstract_dist = _get_prepared_distribution(
req, self.req_tracker, self.finder, self.build_isolation,
)
if download_dir:
if link.is_existing_dir():
logger.info('Link is a directory, ignoring download_dir')
elif local_file:
download_location = os.path.join(
download_dir, link.filename
)
if not os.path.exists(download_location):
shutil.copy(local_file.path, download_location)
logger.info(
'Saved %s', display_path(download_location)
)
if self._download_should_save:
# Make a .zip of the source_dir we already created.
if link.is_vcs:
req.archive(self.download_dir)
return abstract_dist
def prepare_editable_requirement(
self,
req, # type: InstallRequirement
):
# type: (...) -> AbstractDistribution
"""Prepare an editable requirement
"""
assert req.editable, "cannot prepare a non-editable req as editable"
logger.info('Obtaining %s', req)
with indent_log():
if self.require_hashes:
raise InstallationError(
'The editable requirement {} cannot be installed when '
'requiring hashes, because there is no single file to '
'hash.'.format(req)
)
req.ensure_has_source_dir(self.src_dir)
req.update_editable(not self._download_should_save)
abstract_dist = _get_prepared_distribution(
req, self.req_tracker, self.finder, self.build_isolation,
)
if self._download_should_save:
req.archive(self.download_dir)
req.check_if_exists(self.use_user_site)
return abstract_dist
def prepare_installed_requirement(
self,
req, # type: InstallRequirement
skip_reason # type: str
):
# type: (...) -> AbstractDistribution
"""Prepare an already-installed requirement
"""
assert req.satisfied_by, "req should have been satisfied but isn't"
assert skip_reason is not None, (
"did not get skip reason skipped but req.satisfied_by "
"is set to {}".format(req.satisfied_by)
)
logger.info(
'Requirement %s: %s (%s)',
skip_reason, req, req.satisfied_by.version
)
with indent_log():
if self.require_hashes:
logger.debug(
'Since it is already installed, we are trusting this '
'package without checking its hash. To ensure a '
'completely repeatable environment, install into an '
'empty virtualenv.'
)
abstract_dist = InstalledDistribution(req)
return abstract_dist
|
davidharvey1986/pyRRG
|
unittests/bugFixPyRRG/lib/python3.7/site-packages/pip/_internal/operations/prepare.py
|
Python
|
mit
| 20,030
|
# -*- coding: utf-8 -*-
# Copyright (c) 2017, Brendan Quinn, Clueful Media Ltd / JT-PATS Ltd
#
# The MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
PATS Python library - Test core functions - Brendan Quinn May 2017
"""
def test_product():
assert True
|
bquinn/pats-api-python
|
pats/test_core.py
|
Python
|
mit
| 1,285
|
# -*- coding: utf-8 -*-
"""
edict
~~~~~
"""
import argparse
import inspect
import sys
class Application:
"""
The central object of edict, this is used to define a new CLI application.
.. versionadded:: 1.0
"""
#: The application's name
#: .. versionadded:: 1.0
name = None
#: The application's version
#: .. versionadded:: 1.0
version = None
#: An argparse instance for the command
#: .. versionadded:: 1.0
parser = None
#: The subparsers from argpass
#: .. versionadded:: 1.0
subparsers = None
def __init__(self, name=None, version=None):
self.name = name
self.version = version
self.parser = argparse.ArgumentParser(
description=self.fullname,
usage='%(prog)s [-h] {command} [options]'
)
self.parser.add_argument('--version', action='version', version=self.fullname)
self.subparsers = self.parser.add_subparsers(title='Subcommands')
@property
def fullname(self):
return '{name} ({ver})'.format(name=self.name or 'UNKNOWN', ver=self.version or 'UNKNOWN')
def add_command(self, func):
"""
Adds a new command to the application. The functions aruments are used
to determine CLI arguments.
"""
cmd = Command(func)
parser = self.subparsers.add_parser(cmd.name, help=cmd.help)
cmd.add_arguments(parser)
parser.set_defaults(_command=cmd)
def run(self, args=None):
args = self.parser.parse_args(args)
if '_command' not in args:
self.parser.print_help();
return 1
return args._command.run(args)
class Command:
func = None
signature = None
def __init__(self, func):
self.func = func
self.signature = inspect.signature(self.func)
@property
def name(self):
if self.signature.return_annotation is self.signature.empty:
return self.func.__name__
return self.signature.return_annotation
@property
def help(self):
return self.func.__doc__
def add_arguments(self, parser):
for _, param in self.signature.parameters.items():
self._add_argument(parser, param)
def run(self, args):
run_args = dict()
for name, param in self.signature.parameters.items():
run_args[name] = getattr(args, name)
bound = self.signature.bind(**run_args)
return self.func(*bound.args, **bound.kwargs)
def _add_argument(self, parser, param):
name = param.name if param.default is param.empty else '--{opt}'.format(opt=param.name)
definition = dict()
definition['action'] = 'store'
if param.default is True:
definition['action'] = 'store_false'
elif param.default is False:
definition['action'] = 'store_true'
elif param.default is not param.empty:
definition['default'] = param.default
if param.kind is param.VAR_POSITIONAL:
definition['nargs'] = '*'
if param.annotation is not param.empty:
definition['help'] = param.annotation
parser.add_argument(name, **definition)
def __repr__(self):
return '<Command={name}>'.format(name=self.name)
|
chrisguitarguy/edict
|
edict.py
|
Python
|
mit
| 3,315
|
import math
import matplotlib.patches
import matplotlib.pyplot
import numpy.random
from matplotlib.path import Path
def calculate_angle(source, point):
x0, y0 = source
x1, y1 = point
dx = x1 - x0
dy = y1 - y0
angle = math.atan2(dy, dx)
return (angle, point)
def calculate_angles(source, points):
return [calculate_angle(source, point) for point in points]
def normalize_angle(theta, delta):
if theta > delta:
return theta - delta
else:
return theta - delta + math.pi * 2
def convexhull(points):
"""using the gift wrapping method"""
start = max(points, key = lambda t : (t[1], t[0]))
hull = [start] # start with the leftmost point
while True:
last = hull[-1]
angles = calculate_angles(last, points)
output = [angle for angle in angles if angle[1] not in hull] # list of (angle, points)
if len(hull) == 1:
next_point = min(output, key=lambda x: x[0])[1]
else:
second_last = hull[-2]
output += [calculate_angle(last, hull[0])] # first hull point is always a candidate.
delta = calculate_angle(second_last, last)[0]
next_point = min(output, key=lambda x: normalize_angle(x[0], delta))[1]
hull.append(next_point)
if hull[-1] == hull[0]: break # terminal condition
return hull
def generate_code(n):
"""generate path command codes for matplotlib for a convex hull"""
result = []
def index_to_code(i):
if i == 0: return Path.MOVETO
elif i == n - 1: return Path.CLOSEPOLY
else: return Path.LINETO
return map(index_to_code, xrange(n))
count = 0
fig = matplotlib.pyplot.figure()
def execute(points):
global count
global fig
count += 1
hull = convexhull(points)
codes = generate_code(len(hull))
path = Path(hull, codes)
ax = fig.add_subplot(3, 2, count)
limit = 10
patch = matplotlib.patches.PathPatch(path, facecolor='orange', alpha = 0.1, lw=2)
# plot the points
x = [p[0] for p in points]
y = [p[1] for p in points]
ax.scatter(x, y, s = 10)
# plot the convex hull
ax.add_patch(patch)
ax.set_xlim(-limit, limit)
ax.set_ylim(-limit, limit)
def test_1():
execute([(1, 0), (-1, 0), (0, 1), (0, -1), (0, 0)])
def test_2():
execute([(1, 0), (-1, 0), (0, 1), (0, -1), (0, 0), (0, -0.25)])
def test_3():
result = []
for i in range(-2, 3):
for j in range(-2, 3):
# add a bit of variation
result.append((i + numpy.random.normal(scale=0.01), j + numpy.random.normal(scale=0.01)))
execute(result)
def test_random_1():
result = []
for i in range(100):
result.append((numpy.random.normal(), numpy.random.normal()))
execute(result)
def test_random_2():
"""two gaussian distributions"""
result = []
for i in range(1000):
result.append((numpy.random.normal(2), numpy.random.normal(2)))
for i in range(1000):
result.append((numpy.random.normal(-2), numpy.random.normal(-2)))
execute(result)
def test_random_3():
"""three gaussian distributions"""
result = []
for i in range(100):
result.append((numpy.random.normal(3), numpy.random.normal()))
for i in range(100):
result.append((numpy.random.normal(-3), numpy.random.normal()))
for i in range(100):
result.append((numpy.random.normal(), numpy.random.normal(-3)))
execute(result)
test_random_1()
test_random_2()
test_random_3()
test_1()
test_2()
test_3()
matplotlib.pyplot.show()
|
jeeyoungk/exercise
|
python/convexhull.py
|
Python
|
mit
| 3,569
|
import datetime
from django.db import connections
from django.db.models import Count
from django.db.models import F
from django.db.models import Q
from django.db.models import Sum
from django.db.utils import DatabaseError
from django.db.utils import OperationalError
from rest_framework import pagination
from rest_framework import permissions
from rest_framework import viewsets
from rest_framework.response import Response
from .serializers import LearnerNotificationSerializer
from .serializers import LessonReportSerializer
from kolibri.core.auth.constants import collection_kinds
from kolibri.core.auth.constants import role_kinds
from kolibri.core.auth.filters import HierarchyRelationsFilter
from kolibri.core.auth.models import AdHocGroup
from kolibri.core.auth.models import Collection
from kolibri.core.auth.models import FacilityUser
from kolibri.core.auth.models import LearnerGroup
from kolibri.core.decorators import query_params_required
from kolibri.core.exams.models import Exam
from kolibri.core.lessons.models import Lesson
from kolibri.core.logger.models import AttemptLog
from kolibri.core.logger.models import ExamAttemptLog
from kolibri.core.logger.models import ExamLog
from kolibri.core.notifications.models import LearnerProgressNotification
from kolibri.core.notifications.models import NotificationsLog
from kolibri.core.sqlite.utils import repair_sqlite_db
collection_kind_choices = tuple(
[choice[0] for choice in collection_kinds.choices] + ["user"]
)
class OptionalPageNumberPagination(pagination.PageNumberPagination):
"""
Pagination class that allows for page number-style pagination, when requested.
To activate, the `page_size` argument must be set. For example, to request the first 20 records:
`?page_size=20&page=1`
"""
page_size = None
page_size_query_param = "page_size"
class LessonReportPermissions(permissions.BasePermission):
"""
List - check if requester has coach/admin permissions on whole facility.
Detail - check if requester has permissions on the Classroom.
"""
def has_permission(self, request, view):
report_pk = view.kwargs.get("pk", None)
if report_pk is None:
collection_id = request.user.facility_id
else:
collection_id = Lesson.objects.get(pk=report_pk).collection.id
allowed_roles = [role_kinds.ADMIN, role_kinds.COACH]
try:
return request.user.has_role_for(
allowed_roles, Collection.objects.get(pk=collection_id)
)
except (Collection.DoesNotExist, ValueError):
return False
class LessonReportViewset(viewsets.ReadOnlyModelViewSet):
permission_classes = (permissions.IsAuthenticated, LessonReportPermissions)
serializer_class = LessonReportSerializer
queryset = Lesson.objects.all()
class ClassroomNotificationsPermissions(permissions.BasePermission):
"""
Allow only users with admin/coach permissions on a collection.
"""
def has_permission(self, request, view):
collection_id = view.kwargs.get("collection_id")
allowed_roles = [role_kinds.ADMIN, role_kinds.COACH]
try:
return request.user.has_role_for(
allowed_roles, Collection.objects.get(pk=collection_id)
)
except (Collection.DoesNotExist, ValueError):
return False
@query_params_required(collection_id=str)
class ClassroomNotificationsViewset(viewsets.ReadOnlyModelViewSet):
permission_classes = (ClassroomNotificationsPermissions,)
serializer_class = LearnerNotificationSerializer
pagination_class = OptionalPageNumberPagination
pagination_class.page_size = 10
def check_after(self):
"""
Check if after parameter must be used for the query
"""
notifications_after = self.request.query_params.get("after", None)
after = None
if notifications_after:
try:
after = int(notifications_after)
except ValueError:
pass # if after has not a valid format, let's not use it
return after
def apply_learner_filter(self, query):
"""
Filter the notifications by learner_id if applicable
"""
learner_id = self.request.query_params.get("learner_id", None)
if learner_id:
return query.filter(user_id=learner_id)
return query
def remove_default_page_size(self):
"""
This is a hack because DRF sets pagination always if pagination_class.page_size is set
"""
if self.request.query_params.get("page", None) is None:
self.paginator.page_size = None
def get_queryset(self):
"""
Returns the notifications in reverse-chronological order, filtered by the query parameters.
By default it sends only notifications from the past day.
If a 'page_size' parameter is used, that sets a maximum number of results.
If a 'page' parameter is used, the past day limit is not applied.
Some url examples:
/coach/api/notifications/?collection_id=9da65157a8603788fd3db890d2035a9f
/coach/api/notifications/?collection_id=9da65157a8603788fd3db890d2035a9f&after=8&page=2
/coach/api/notifications/?page_size=5&page=2&collection_id=9da65157a8603788fd3db890d2035a9f&learner_id=94117bb5868a1ef529b8be60f17ff41a
/coach/api/notifications/?collection_id=9da65157a8603788fd3db890d2035a9f&page=2
:param: collection_id uuid: classroom or learner group identifier (mandatory)
:param: learner_id uuid: user identifier
:param: after integer: all the notifications after this id will be sent.
:param: page_size integer: sets the number of notifications to provide for pagination (defaults: 10)
:param: page integer: sets the page to provide when paginating.
"""
collection_id = self.kwargs["collection_id"]
if collection_id:
try:
collection = Collection.objects.get(pk=collection_id)
except (Collection.DoesNotExist, ValueError):
return []
if collection.kind == collection_kinds.CLASSROOM:
classroom_groups = list(LearnerGroup.objects.filter(parent=collection))
classroom_groups += list(AdHocGroup.objects.filter(parent=collection))
learner_groups = [group.id for group in classroom_groups]
learner_groups.append(collection_id)
notifications_query = LearnerProgressNotification.objects.filter(
classroom_id__in=learner_groups
)
else:
notifications_query = LearnerProgressNotification.objects.filter(
classroom_id=collection_id
)
notifications_query = self.apply_learner_filter(notifications_query)
after = self.check_after()
self.remove_default_page_size()
if after:
notifications_query = notifications_query.filter(id__gt=after)
elif self.request.query_params.get("page", None) is None:
try:
last_id_record = notifications_query.latest("id")
# returns all the notifications 24 hours older than the latest
last_24h = last_id_record.timestamp - datetime.timedelta(days=1)
notifications_query = notifications_query.filter(
timestamp__gte=last_24h
)
except (LearnerProgressNotification.DoesNotExist):
return []
except DatabaseError:
repair_sqlite_db(connections["notifications_db"])
return []
return notifications_query.order_by("-id")
def list(self, request, *args, **kwargs):
"""
It provides the list of ClassroomNotificationsViewset from DRF.
Then it fetches and saves the needed information to know how many coaches
are requesting notifications in the last five minutes
"""
# Use super on the parent class to prevent an infinite recursion.
try:
response = super(viewsets.ReadOnlyModelViewSet, self).list(
request, *args, **kwargs
)
except (OperationalError, DatabaseError):
repair_sqlite_db(connections["notifications_db"])
# L
logging_interval = datetime.datetime.now() - datetime.timedelta(minutes=5)
try:
logged_notifications = (
NotificationsLog.objects.filter(timestamp__gte=logging_interval)
.values("coach_id")
.distinct()
.count()
)
except (OperationalError, DatabaseError):
logged_notifications = 0
repair_sqlite_db(connections["notifications_db"])
# if there are more than 10 notifications we limit the answer to 10
if logged_notifications < 10:
notification_info = NotificationsLog()
notification_info.coach_id = request.user.id
notification_info.save()
NotificationsLog.objects.filter(timestamp__lt=logging_interval).delete()
if "results" not in response.data:
response.data = {
"results": response.data,
"coaches_polling": logged_notifications,
}
else:
response.data["coaches_polling"] = logged_notifications
return response
class ExerciseDifficultiesPermissions(permissions.BasePermission):
# check if requesting user has permission for collection or user
def has_permission(self, request, view):
classroom_id = request.GET.get("classroom_id", None)
group_id = request.GET.get("group_id", None)
collection_id = group_id or classroom_id
lesson_id = request.GET.get("lesson_id", None)
allowed_roles = [role_kinds.ADMIN, role_kinds.COACH]
if lesson_id:
try:
lesson = Lesson.objects.get(id=lesson_id)
classroom = lesson.collection
return request.user.has_role_for(allowed_roles, classroom)
except (
FacilityUser.DoesNotExist,
Collection.DoesNotExist,
Lesson.DoesNotExist,
ValueError,
):
return False
try:
return request.user.has_role_for(
allowed_roles, Collection.objects.get(pk=collection_id)
)
except (FacilityUser.DoesNotExist, Collection.DoesNotExist, ValueError):
return False
# Define a base class so that the inherited class is properly introspectable,
# rather than being the result of our wrapping
@query_params_required(classroom_id=str)
class BaseExerciseDifficultQuestionsViewset(viewsets.ViewSet):
pass
class ExerciseDifficultQuestionsViewset(BaseExerciseDifficultQuestionsViewset):
permission_classes = (permissions.IsAuthenticated, ExerciseDifficultiesPermissions)
def retrieve(self, request, pk):
"""
Get the difficult questions for a particular exercise.
pk maps to the content_id of the exercise in question.
"""
classroom_id = request.GET.get("classroom_id", None)
group_id = request.GET.get("group_id", None)
lesson_id = request.GET.get("lesson_id", None)
queryset = AttemptLog.objects.filter(masterylog__summarylog__content_id=pk)
if lesson_id is not None:
collection_ids = Lesson.objects.get(
id=lesson_id
).lesson_assignments.values_list("collection_id", flat=True)
if group_id is not None:
if (
group_id not in collection_ids
and classroom_id not in collection_ids
):
# In the special case that the group is not in the lesson assignments
# nor the containing classroom, just return an empty queryset.
queryset = AttemptLog.objects.none()
else:
# Only filter by all the collections in the lesson if we are not also
# filtering by a specific group. Otherwise the group should be sufficient.
base_queryset = queryset
# Set starting queryset to null, then OR.
queryset = AttemptLog.objects.none()
for collection_id in collection_ids:
queryset |= HierarchyRelationsFilter(
base_queryset
).filter_by_hierarchy(
ancestor_collection=collection_id, target_user=F("user")
)
queryset = queryset.distinct()
if group_id is not None:
collection_id = group_id or classroom_id
queryset = HierarchyRelationsFilter(queryset).filter_by_hierarchy(
ancestor_collection=collection_id, target_user=F("user")
)
data = (
queryset.values("item")
.annotate(total=Count("correct"))
.annotate(correct=Sum("correct"))
)
return Response(data)
class QuizDifficultiesPermissions(permissions.BasePermission):
# check if requesting user has permission for collection or user
def has_permission(self, request, view):
exam_id = view.kwargs.get("pk", None)
if exam_id is None:
return False
try:
collection = Exam.objects.get(id=exam_id).collection
except Exam.DoesNotExist:
return False
allowed_roles = [role_kinds.ADMIN, role_kinds.COACH]
try:
return request.user.has_role_for(allowed_roles, collection)
except (FacilityUser.DoesNotExist, ValueError):
return False
class QuizDifficultQuestionsViewset(viewsets.ViewSet):
permission_classes = (permissions.IsAuthenticated, QuizDifficultiesPermissions)
def retrieve(self, request, pk):
"""
Get the difficult questions for a particular quiz.
"""
group_id = request.GET.get("group_id", None)
# Only return logs when the learner has submitted the Quiz OR
# the coach has deactivated the Quiz. Do not return logs when Quiz is still
# in-progress.
queryset = ExamAttemptLog.objects.filter(
Q(examlog__closed=True) | Q(examlog__exam__active=False), examlog__exam=pk
)
if group_id is not None:
queryset = HierarchyRelationsFilter(queryset).filter_by_hierarchy(
ancestor_collection=group_id, target_user=F("user")
)
collection_id = group_id
else:
collection_id = Exam.objects.get(pk=pk).collection_id
data = queryset.values("item", "content_id").annotate(correct=Sum("correct"))
# Instead of inferring the totals from the number of logs, use the total
# number of people who submitted (if quiz is active) or started the exam
# (if quiz is inactive) as our guide, as people who started the exam
# but did not attempt the question are still important.
total = (
HierarchyRelationsFilter(
ExamLog.objects.filter(
Q(closed=True) | Q(exam__active=False), exam_id=pk
)
)
.filter_by_hierarchy(
ancestor_collection=collection_id, target_user=F("user")
)
.count()
)
for datum in data:
datum["total"] = total
return Response(data)
|
mrpau/kolibri
|
kolibri/plugins/coach/api.py
|
Python
|
mit
| 15,625
|
import os
from requests.exceptions import ConnectionError, HTTPError, Timeout
from unittest import mock
from django.core.urlresolvers import reverse
from django.test import TestCase
from .exceptions import BadRequest, ServiceUnavailable
from .helpers import make_rest_get_call, make_rest_post_call
class HelpersTestCase(TestCase):
def setUp(self):
self.url = os.environ['BUILDER_URL'] + '/build'
self.headers = {'content-type': 'application/json'}
self.body = {
"uuid": "uuid",
"deploy_key": "deploy_key",
"branch": "branch",
"git_hash": "git_hash",
"repo_owner": "owner",
"path": "path",
"repo_name": "name",
"environment": "staging",
"callback": os.environ['API_BASE_URL'] +
reverse('webhook:builder', args=["uuid1234", ])
}
@mock.patch('core.helpers.requests.post', side_effect=ConnectionError)
def test_make_rest_post_call_conn_error(self, mock_post):
""" Tests make_rest_post_call when the call has an Connection exception.
"""
with self.assertRaises(ServiceUnavailable):
make_rest_post_call(self.url, self.headers, self.body)
@mock.patch('core.helpers.requests.post', side_effect=HTTPError)
def test_make_rest_post_call_http_error(self, mock_post):
""" Tests make_rest_post_call when the call has an HTTP exception.
"""
with self.assertRaises(ServiceUnavailable):
make_rest_post_call(self.url, self.headers, self.body)
@mock.patch('core.helpers.requests.post', side_effect=Timeout)
def test_make_rest_post_call_timeout_error(self, mock_post):
""" Tests make_rest_post_call when the call has an Timeout exception.
"""
with self.assertRaises(ServiceUnavailable):
make_rest_post_call(self.url, self.headers, self.body)
@mock.patch('core.helpers.requests.post')
def test_make_rest_post_call_error(self, mock_post):
""" Tests make_rest_post_call when the api returns some error
"""
# Mock the POST request to builder
mock_response = mock.Mock()
mock_response.status_code = 400
mock_post.return_value = mock_response
with self.assertRaises(BadRequest):
make_rest_post_call(self.url, self.headers, self.body)
@mock.patch('core.helpers.requests.get', side_effect=ConnectionError)
def test_make_rest_get_call_conn_error(self, mock_post):
""" Tests make_rest_get_call when the call has an Connection exception.
"""
with self.assertRaises(ServiceUnavailable):
make_rest_get_call(self.url, self.headers)
@mock.patch('core.helpers.requests.get', side_effect=HTTPError)
def test_make_rest_get_call_http_error(self, mock_post):
""" Tests make_rest_get_call when the call has an HTTP exception.
"""
with self.assertRaises(ServiceUnavailable):
make_rest_get_call(self.url, self.headers)
@mock.patch('core.helpers.requests.get', side_effect=Timeout)
def test_make_rest_get_call_timeout_error(self, mock_post):
""" Tests make_rest_get_call when the call has an Timeout exception.
"""
with self.assertRaises(ServiceUnavailable):
make_rest_get_call(self.url, self.headers)
@mock.patch('core.helpers.requests.get')
def test_make_rest_get_call_error(self, mock_post):
""" Tests make_rest_get_call when the api returns some error
"""
# Mock the POST request to builder
mock_response = mock.Mock()
mock_response.status_code = 400
mock_post.return_value = mock_response
with self.assertRaises(BadRequest):
make_rest_get_call(self.url, self.headers)
|
istrategylabs/franklin-api
|
franklin/core/tests.py
|
Python
|
mit
| 3,827
|
# -*- coding: utf8 -*-
# Copyright (C) 2013 - Oscar Campos <oscar.campos@member.fsf.org>
# This program is Free Software see LICENSE file for details
"""
Anaconda is a python autocompletion and linting plugin for Sublime Text 3
"""
import os
import sys
import logging
from string import Template
import sublime
import sublime_plugin
from .anaconda_lib import ioloop
from .anaconda_lib.helpers import get_settings, is_python
from .commands import *
from .listeners import *
if sys.version_info < (3, 3):
raise RuntimeError('Anaconda works with Sublime Text 3 only')
DISABLED_PLUGINS = []
LOOP_RUNNING = False
logger = logging.getLogger(__name__)
logger.addHandler(logging.StreamHandler(sys.stdout))
logger.setLevel(logging.DEBUG)
def plugin_loaded() -> None:
"""Called directly from sublime on plugin load
"""
package_folder = os.path.dirname(__file__)
if not os.path.exists(os.path.join(package_folder, 'Main.sublime-menu')):
template_file = os.path.join(
package_folder, 'templates', 'Main.sublime-menu.tpl'
)
with open(template_file, 'r', encoding='utf8') as tplfile:
template = Template(tplfile.read())
menu_file = os.path.join(package_folder, 'Main.sublime-menu')
with open(menu_file, 'w', encoding='utf8') as menu:
menu.write(template.safe_substitute({
'package_folder': os.path.basename(package_folder)
}))
# unload any conflictive package while anaconda is running
sublime.set_timeout_async(monitor_plugins, 0)
if not LOOP_RUNNING:
ioloop.loop()
def plugin_unloaded() -> None:
"""Called directly from sublime on plugin unload
"""
# reenable any conflictive package
enable_plugins()
if LOOP_RUNNING:
ioloop.terminate()
def monitor_plugins():
"""Monitor for any plugin that conflicts with anaconda
"""
view = sublime.active_window().active_view()
if not get_settings(view, 'auto_unload_conflictive_plugins', True):
return
plist = [
'Jedi - Python autocompletion', # breaks auto completion
'SublimePythonIDE', # interfere with autocompletion
'SublimeCodeIntel' # breaks everything, SCI is a mess
]
for plugin in plist:
if plugin in sys.modules:
[
sublime_plugin.unload_module(m) for k, m in sys.modules.items()
if plugin in k
]
if plugin not in DISABLED_PLUGINS:
DISABLED_PLUGINS.append(plugin)
sublime.set_timeout_async(monitor_plugins, 5*60*1000)
def enable_plugins():
"""Reenable disabled plugins by anaconda
"""
for plugin in DISABLED_PLUGINS:
sublime_plugin.reload_plugin(plugin)
|
thatneat/dotfiles
|
other_applications/sublime-text-3/Packages/Anaconda/anaconda.py
|
Python
|
mit
| 2,768
|
def main(args):
# this is the main entry point of the kitten, it will be executed in
# the overlay window when the kitten is launched
answer = input('Enter some text: ')
# whatever this function returns will be available in the
# handle_result() function
return answer
def handle_result(args, answer, target_window_id, boss):
# get the kitty window into which to paste answer
w = boss.window_id_map.get(target_window_id)
if w is not None:
w.paste(answer)
|
eggcaker/dotfiles
|
kitty/mykitten.py
|
Python
|
mit
| 489
|
from django.shortcuts import render
from .models import FormDemo
from .forms import SimpleFormDemoForm, CrispyFormDemoModelForm
def formDemoIndex(request):
return render(request, 'form_demo/index.html', {'title':'Demo for Forms'})
def showSimpleFormDemoForm(request):
msg=""
if request.method == 'POST':
form = SimpleFormDemoForm(request.POST)
if form.is_valid():
form.save()
msg="form Saved"
else:
form = SimpleFormDemoForm()
return render(request, 'form_demo/simpleForm.html', {'form':form,"msg":msg})
def showSimpleFormDemoBootstrapUsingTemplateTagsForm(request):
msg=""
if request.method == 'POST':
form = SimpleFormDemoForm(request.POST)
if form.is_valid():
form.save()
msg="form Saved"
else:
form = SimpleFormDemoForm()
return render(request, 'form_demo/tmplTagsForm.html', {'form':form,"title":"Using TemplateTags",'msg':msg})
def showSimpleFormDemoBootstrapUsingCrispyModelForm(request):
msg=""
if request.method == 'POST':
form = CrispyFormDemoModelForm(request.POST)
if form.is_valid():
form.save()
msg="form Saved"
else:
form = CrispyFormDemoModelForm()
return render(request, 'form_demo/CrispyPaperModelForm.html', {'form':form,"title":"Using Crispy FormDemo ModelForm",'msg':msg})
def modalFormSkinned(request):
msg=""
if request.method == 'POST':
form = SimpleFormDemoForm(request.POST)
if form.is_valid():
form.save()
msg="form Saved"
else:
form = SimpleFormDemoForm()
return render(request, 'form_demo/modalFormSkinned.html',{'form':form,"title":"click to see modal form",'msg':msg})
def showModalFormDemoForm(request):
"""Very draft : SimpleFormDemoForm not really used """
msg=""
if request.method == 'POST':
form = SimpleFormDemoForm(request.POST)
if form.is_valid():
form.save()
msg="form Saved"
else:
form = SimpleFormDemoForm()
return render(request, 'form_demo/modalForm.html', {'form':form,"msg":msg})
|
lerina/agileForms
|
apps/form_demo/views.py
|
Python
|
mit
| 2,167
|
# -*- coding: UTF-8 -*
'''
email utils
~~~~~~~~~~~~~~~~
build an email contains qrcode and send it.
:copyright: 20160211 by raptor.zh@gmail.com.
'''
from io import BytesIO
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.image import MIMEImage
import smtplib
import pyqrcode
from config import config
import logging
logger = logging.getLogger(__name__)
register_text = """
您好,
欢迎注册本网站。
为方便起见,本网站设计为可以不必设置和输入密码,您只需要使用email即可登录。
如果您认为需要更加安全,也可以按如下操作启用密码功能(24小时内有效,过期请登录网站,在设置页面点击重新发送密码设置邮件):
首先,通过Android或iOS手机或平板电脑在Google Play或App Store下载安装免费的Google Authenticator软件:
Android版:https://play.google.com/store/apps/details?id=com.google.android.apps.authenticator2
iOS版:https://itunes.apple.com/us/app/google-authenticator/id388497605
然后运行这个软件,在其中选择添加-扫描条形码,然后扫描下面这个二维码:
%(qrurl)s
如果无法打开这个二维码链接,也可以选择添加-手动输入验证码,然后输入您的email地址和下面这个验证码:
%(secret)s
添加完成后点击下面的链接:
%(reset_url)s
按提示输入生成的6位密码并提交,即可完成密码功能启用设置,以后登录本网站时就需要输入密码,每次的密码都由此APP临时生成,仅供一次性使用。
再次感谢您注册本网站。
"""
register_html = """
<!DOCTYPE HTML>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>密码设置说明</title>
</head>
<body>
<p>您好,</p>
<p>欢迎注册本网站。</p>
<p>为方便起见,本网站设计为可以不必设置和输入密码,您只需要使用email即可登录。</p>
<p>如果您认为需要更加安全,也可以按如下操作启用密码功能(24小时内有效,过期请登录网站,在设置页面点击重新发送密码设置邮件):</p>
<p>首先,通过Android或iOS手机或平板电脑在Google Play或App Store下载安装免费的Google Authenticator软件:</p>
<p><a href="https://play.google.com/store/apps/details?id=com.google.android.apps.authenticator2">Android版</a></p>
<p><a href="https://itunes.apple.com/us/app/google-authenticator/id388497605">iOS版</a></p>
<p>然后运行这个软件,在其中选择添加-扫描条形码,然后扫描下面这个二维码:</p>
<p><img src="cid:image1" /></p>
<p>添加完成后点击下面的链接:</p>
<p><a href="%(reset_url)s">设置密码</a></p>
<p>按提示输入生成的6位密码并提交,即可完成密码功能启用设置,以后登录本网站时就需要输入密码,每次的密码都由此APP临时生成,仅供一次性使用。</p>
<p>再次感谢您注册本网站。</p>
</body>
</html>
"""
resetpw_text = """
您好,
您收到此邮件是因为有人申请重新设置此账号的密码,如果不是您本人的操作,请忽略本邮件。
如您确实需要重设密码,请按以下操作重设密码(24小时内有效,过期请重新申请密码重置):
首先,通过Android或iOS手机或平板电脑在Google Play或App Store下载安装免费的Google Authenticator软件:
Android版:https://play.google.com/store/apps/details?id=com.google.android.apps.authenticator2
iOS版:https://itunes.apple.com/us/app/google-authenticator/id388497605
然后运行这个软件,在其中选择添加-扫描条形码,然后扫描下面这个二维码:
%(qrurl)s
如果无法打开这个二维码链接,也可以选择添加-手动输入验证码,然后输入您的email地址和下面这个验证码:
%(secret)s
添加完成后点击下面的链接:
%(reset_url)s
按提示输入生成的6位密码并提交,即可完成密码功能的重设,以后登录本网站时就需要输入这个APP生成的密码,以前如果在其它手机或平板电脑上设置过本账号的密码,那些将失效,以本次设置为准。
再次感谢您使用本网站。
"""
resetpw_html = """
<!DOCTYPE HTML>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>密码重置说明</title>
</head>
<body>
<p>您好,</p>
<p>您收到此邮件是因为有人申请重新设置此账号的密码,如果不是您本人的操作,请忽略本邮件。</p>
<p>如您确实需要重设密码,请按以下操作重设密码(24小时内有效,过期请重新申请密码重置):</p>
<p>首先,通过Android或iOS手机或平板电脑在Google Play或App Store下载安装免费的Google Authenticator软件:</p>
<p><a href="https://play.google.com/store/apps/details?id=com.google.android.apps.authenticator2">Android版</a></p>
<p><a href="https://itunes.apple.com/us/app/google-authenticator/id388497605">iOS版</a></p>
<p>然后运行这个软件,在其中选择添加-扫描条形码,然后扫描下面这个二维码:</p>
<p><img src="cid:image1" /></p>
<p>添加完成后点击下面的链接:</p>
<p><a href="%(reset_url)s">设置密码</a></p>
<p>按提示输入生成的6位密码并提交,即可完成密码功能的重设,以后登录本网站时就需要输入这个APP生成的密码,以前如果在其它手机或平板电脑上设置过本账号的密码,那些将失效,以本次设置为准。</p>
<p>再次感谢您使用本网站。</p>
</body>
</html>
"""
def generate_qrcode(uri):
qrc = pyqrcode.create(uri, error="M")
buf = BytesIO()
qrc.png(buf, scale=8)
buf.seek(0)
return buf
def build_email(addr_from, addr_to, mail_type, secret, uri, qrurl, reset_url):
data = {"secret": secret, "qrurl": qrurl, "reset_url": reset_url}
if mail_type=="register":
subject = "密码设置说明"
content_text = register_text % data
content_html = register_html % data
else:
subject = "密码重置说明"
content_text = resetpw_text % data
content_html = resetpw_html % data
msgRoot = MIMEMultipart('related')
msgRoot['Subject'] = subject
msgRoot['From'] = addr_from
msgRoot['To'] = addr_to
msgRoot.preamble = 'This is a multi-part message in MIME format.'
msgAlternative = MIMEMultipart('alternative')
msgRoot.attach(msgAlternative)
msgText = MIMEText(content_text)
msgAlternative.attach(msgText)
msgText = MIMEText(content_html, 'html')
msgAlternative.attach(msgText)
msgImage = MIMEImage(generate_qrcode(uri).read())
msgImage.add_header('Content-ID', '<image1>')
msgRoot.attach(msgImage)
return msgRoot
def sendmail(addr_to, secret, uri, qrurl, reset_url, mail_type):
msgRoot = build_email(config['smtp_user'], addr_to, mail_type, secret, uri, qrurl, reset_url)
smtp = smtplib.SMTP()
logger.debug(smtp.connect(config['smtp_server'], config['smtp_port']))
logger.debug(smtp.ehlo())
logger.debug(smtp.starttls())
logger.debug(smtp.ehlo())
logger.debug(smtp.login(config['smtp_user'], config['smtp_pass']))
logger.debug(smtp.sendmail(config['smtp_user'], addr_to, msgRoot.as_string()))
logger.debug(smtp.quit())
|
raptorz/userga
|
mail.py
|
Python
|
mit
| 7,373
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
"""Usage: slidie.py NAME
[-t T]
[-q Q]
[--p0 P0]
[--pn PM]
[--dt DT]
[--seed SEED]
[--min_P MP]
[--sigma SIGMA]
Wilcon-Cowan EI model, where the oscillation frequency drifts
with time.
Arguments
NAME name of the results file
Options:
-h help show this screen
-t T simultation run time [default: 3.0]
-q Q avg I drive [default: 1]
--p0 P0 E intial drive at burst [default: .5]
--pn PN E final drive [default: 2]
--dt DT time resolution [default: 1e-3]
--seed SEED random seed
--min_P MP smallest P possible [default: 1]
--sigma SIGMA Population noise [default: 1e-2]
"""
from __future__ import division, print_function
from docopt import docopt
import numpy as np
from pykdf.kdf import save_kdf
from brian2 import *
from fakespikes import rates
def ie(t, P0, PN, c1=15.0, c2=15.0, c3=15.0, c4=3.0, Q=1, dt=1e-3, sigma=0.01):
# --
time = t * second
time_step = dt * second
# -
# Fixed parameters.
re = 1.0
ri = 0.5
kn = 1.0
k = 1.0
tau_e = 5 * msecond
tau_i = 10 * msecond
# -
# Define the drifting drive
times = rates.create_times(t, dt)
P = np.linspace(P0, PN, len(times))
# Scale it
P = P * (2**-0.03)
# Format for Brian2
P = TimedArray(P, dt=time_step)
# -
eqs = """
dE/dt = -E/tau_e + ((1 - re * E) * (1 / (1 + exp(-(k * c1 * E - k * c2 * I+ k * P(t) - 2))) - 1/(1 + exp(2*1.0)))) / tau_e + (sigma / tau_e**.5 * xi_e) : 1
dI/dt = -I/tau_i + ((1 - ri * I) * (1 / (1 + exp(-2 * (kn * c3 * E - kn * c4 * I + kn * Q - 2.5))) - 1/(1 + exp(2*2.5)))) / tau_i + (sigma / tau_i**.5 * xi_i) : 1
"""
pops = NeuronGroup(1, model=eqs, namespace={'Q': Q})
pops.E = 0
pops.I = 0
# --
# Record
mon = StateMonitor(pops, ('E', 'I'), record=True)
# --
# Run
defaultclock.dt = time_step
run(time)
return mon.I.flatten(), mon.E.flatten(), P
if __name__ == "__main__":
args = docopt(__doc__, version='alpha')
try:
seed = int(args['--seed'])
except TypeError:
seed = None
pass
np.random.seed(seed)
# -
# Process params
t = float(args['-t'])
dt = float(args['--dt'])
P0 = float(args['--p0'])
PN = float(args['--pn'])
Q = float(args['-q'])
sigma = float(args['--sigma'])
# -
# Run model
I, E = ie(t, P0, PN, sigma=sigma)
lfp = (E + I)
# -
save_kdf(
str(args['NAME']),
E=E,
I=I,
lfp=lfp,
t=t,
dt=dt,
P0=P0,
PN=PN,
Q=Q,
sigma=sigma)
|
voytekresearch/bw
|
slidie.py
|
Python
|
mit
| 2,871
|
import os
from pyramid.config import Configurator
from sqlalchemy import engine_from_config
from .models import Session, Base
def main(global_config, **settings):
"""
Configure and return a WSGI application.
"""
settings['sqlalchemy.url'] = os.environ.get('DATABASE_URL')
engine = engine_from_config(settings, 'sqlalchemy.')
Session.configure(bind=engine)
Base.metadata.bind = engine
config = Configurator(settings=settings)
config.include('pyramid_jinja2')
config.add_static_view('static', 'static', cache_max_age=3600)
# Define routes
config.add_route('index', '/')
config.add_route('about', 'about/')
config.add_route('hire', 'hire/')
config.add_route('guide', 'guide/')
config.add_route('request', 'request/{rendertype}/{scene_id}/')
config.add_route('scene', 'scene/{scene_id}/')
config.add_route('scene_band', 'scene/{scene_id}/bands/{band_combo}/')
config.add_route('scene_options_ajax', 'scene_options_ajax/')
config.add_route('status_poll', 'status_poll/')
config.add_route('preview_poll', 'preview_poll/')
config.scan()
return config.make_wsgi_app()
|
recombinators/snapsat
|
app/app/__init__.py
|
Python
|
mit
| 1,155
|
# coding: utf8
# Copyright 2015 Vincent Jacques <vincent@vincent-jacques.net>
from .ax_12 import AX12
from .ax_s1 import AXS1
|
jacquev6/Pynamixel
|
Pynamixel/devices/__init__.py
|
Python
|
mit
| 128
|
#!/usr/bin/env python3
from linklist import *
class Solution(object):
def reorderList(self, head):
if head == None:
return
queue = []
while head:
queue.append(head)
head = head.next
i, j = 0, len(queue)-1
while i < j:
queue[i].next, i = queue[j], i+1
if i < j:
queue[j].next, j = queue[i], j-1
queue[j].next = None
head = queue[0]
sol = Solution()
nodeString = '[]'
nodeString = '[1,2]'
nodeString = '[1]'
nodeString = '[1,2,3,4]'
nodeString = '[1,2,3,4,5]'
head = linkListBuilder(nodeString)
traverse(head)
sol.reorderList(head)
traverse(head)
|
eroicaleo/LearningPython
|
interview/leet/143_Reorder_List_v2.py
|
Python
|
mit
| 684
|
from flask.ext.wtf import Form
from wtforms import StringField, PasswordField, BooleanField, SubmitField
from wtforms.validators import Required, Email, Length, Regexp, EqualTo
from wtforms import ValidationError
from ..models import User
class LoginForm(Form):
email = StringField('Email', validators=[Required(), Length(1, 64),
Email()])
password = PasswordField('Password', validators=[Required()])
remember_me = BooleanField('Keep me logged in')
submit = SubmitField('Log in')
class RegistrationForm(Form):
email = StringField('Email', validators=[Required(), Length(1, 64),
Email()])
username = StringField('Username', validators=[Required(), Length(1, 64),
Regexp('^[A-Za-z][A-Za-z0-9_.]*$', 0,
'Usernames must have only letters, '
'numbers, dots or underscores')])
password = PasswordField('Password', validators=[Required(),
EqualTo('password2', message='Passwords must match.')])
password2 = PasswordField('Confirm password', validators=[Required()])
submit = SubmitField('Register')
def validate_email(self, field):
if User.query.filter_by(email=field.data).first():
raise ValidationError('Email already registered.')
def validate_username(self, field):
if User.query.filter_by(username=field.data).first():
raise ValidationError('Username already in use.')
|
HoracioAlvarado/fwd
|
app/auth/forms.py
|
Python
|
mit
| 1,486
|
import os.path
import yaml
import logging
CONFIG_PATH = os.path.join(os.path.dirname(__file__), 'settings.yaml')
logger = logging.getLogger(__name__)
try:
with open(CONFIG_PATH, 'rb') as fh:
config = yaml.load(fh)
except FileNotFoundError:
logging.error('Config file was not found: settings.yaml')
logging.error('You must create it first')
exit(1)
# Process settings
ITERATION_LENGTH = config['process']['iteration_length']
MAX_RESULTS = config['process']['max_search_results']
# Jira settings
JIRA_URL = config['jira']['url']
JIRA_USER = config['jira']['user']
JIRA_PASS = config['jira']['pass']
JIRA_PROJECT = config['jira']['project']
# SMTP settings
SMTP_HOST = config['smtp']['host']
SMTP_PORT = config['smtp']['port']
SMTP_USER = config['smtp']['user']
SMTP_PASS = config['smtp']['pass']
# Mail settings
EMAIL_FROM = config['email']['from']
EMAIL_TO = config['email']['to']
DAILY_SUBJECT = config['email']['daily_subject']
QUEUE_SUBJECT = config['email']['queue_subject']
AGES_SUBJECT = config['email']['ages_subject']
WEEKLY_SUBJECT = config['email']['weekly_subject']
# Team settings
TEAM = [x['mail'] for x in config['team']]
FUNC = [x['mail'] for x in config['team'] if x['role'] == 'manual']
AUTO = [x['mail'] for x in config['team'] if x['role'] == 'auto']
|
vv-p/jira-reports
|
settings.py
|
Python
|
mit
| 1,303
|
#encoding: utf-8
"""
grid.place_network -- Place network model driven by grid-cell inputs, with
competition mediated by global inhibtion
Written by Joe Monaco
Copyright (c) 2007, 2008 Columbia University. All rights reserved.
"""
# Library imports
import numpy as N
# Package imports
from .dmec import GridCollection
from .core.api import AbstractModel, TSPostMortem
from .trajectories import BaseTrajectory, RandomWalk, BipartiteRaster
from .tools.integrate import integrator
# Traits imports
from enthought.traits.api import Delegate, Instance, Property, Trait, Array, \
Range, Enum, Float, CInt, Int, true, false
class PlaceNetwork(AbstractModel):
"""
Simple CA3 model with global inhibition; no excitatory recurrence
Model parameters and other attributes are Traits, so values should be set
as keywork argumnets to the class constructor.
Trial setup computes a new weight matrix (self.W) only if refresh_weights
is True. The MEC input is modified according to refresh_orientation,
refresh_phase, phi and psi. The phi and psi Property traits allow direct
specification of cortical input alignment arrays (you should also then
set refresh_orientation and/or refresh_phases to False).
Model settings and parameters:
EC -- GridCollection instance to be used as input (required)
traj_type -- type of trajectory to traverse (default RandomWalk)
phi_lambda -- field nonlinearity threshold (default 1.5)
phi_sigma -- field nonlinearity smoothness (default 0.1)
J0 -- gain of global inhibition (default 2.5)
tau_r -- time constant for input integration (default 0.05)
C_W -- fraction of afferent connectivity (default 0.5)
mu_W -- mean of afferent weight distribution (default 0.5)
dwell_factor -- duration of pixel dwell in tau (default 5.0)
Simulation control settings (per trial):
refresh_traj -- new trajectory (default False)
refresh_weights -- new network weights (default True)
refresh_orientation -- randomize cortical orientation array (default False)
refresh_phase -- new random array of cortical input phases (default False)
Cortical input alignment:
phi -- directly set the cortical phase array; shape: (2, num_maps)
psi -- directly set the cortical orientation array; shape: (num_maps,)
"""
label = 'Grid Model'
app_name = label
evolve = Instance(integrator)
# Trajectory traits
traj_type = Trait(['randwalk', 'checker'], user=True)
traj = Instance(BaseTrajectory)
refresh_traj = false(user=True)
dwell_factor = Range(low=0.0, value=5.0, exclude_low=True, user=True)
x = Property(Float, track=True)
y = Property(Float, track=True)
# Network and input traits
EC = Trait(None, Instance(GridCollection), user=True)
get_afferent_input = Delegate('EC', prefix='map_value')
N_EC = Delegate('EC', prefix='num_maps')
N_CA = CInt(500, user=True)
refresh_orientation = false(user=True)
refresh_phase = false(user=True)
# Nonlinearity definition
phi_lambda = Float(0.2, user=True)
phi_sigma = Range(low=0.0, value=0.015, exclude_low=True, user=True)
# Weights and rates traits :-)
W = Array
refresh_weights = true(user=True)
C_W = Range(low=0.0, high=1.0, value=0.5, user=True)
mu_W = Float(0.5, user=True)
r = Array(track=True)
r_EC = Array
i_aff = Array
tau_r = Float(0.05, user=True)
dt = 0.005
# Synaptic gains
J0 = Float(250, user=True)
beta = Float
# AbstractModel override methods
def trial_setup(self):
self.r = self._r_default()
self.beta = self._beta_default()
self.evolve = integrator(self.drdt, self.r, dt=self.dt)
if self.refresh_traj:
self.out('Creating new stage trajectory')
self.traj = self.new_trajectory()
else:
self.out('Reseting current stage trajectory')
self.traj.reset()
if self.refresh_weights:
self.out('Computing a new weight matrix')
self.W = self.new_weights()
if self.refresh_phase:
self.EC.randomize_phase()
self.out('Computed new spatial phase vector')
if self.refresh_orientation:
self.EC.randomize_orientation()
self.out('Computed new orientation vector')
def run_timestep(self):
"""Simulation time-step computation
"""
self.r_EC = self.get_afferent_input(self.x, self.y) # MEC input
self.i_aff = N.dot(self.W, self.r_EC) # afferent current
self.evolve() # evolve rates
self.traj.advance() # move trajectory
def drdt(self, r, t0):
"""Rate equation: dr/dt = (Phi[h - lambda] - r)/tau
"""
return (self.phi_h( self.beta * self.i_aff -
self.J0 * r.mean() -
self.phi_lambda) - r) / self.tau_r
# Field nonlinearity
def phi_h(self, h):
phi = N.tanh(h/self.phi_sigma)
phi[phi<0] = 0
return phi
# Create new trajectory, weight matrix
def new_trajectory(self):
"""Get a new trajectory instance
"""
traj = None
if self.traj_type == 'randwalk':
traj = RandomWalk(dt=self.dt, T=self.T)
elif self.traj_type == 'checker':
traj = BipartiteRaster( dt=self.dt,
dwell=self.dwell_factor*self.tau_r)
self.T = traj.T
return traj
def new_weights(self):
"""Get a new weight matrix
"""
from scipy.stats import uniform
from numpy.random import permutation
W = N.empty((self.N_CA, self.N_EC), 'd')
Wdist = uniform.rvs(size=self.N_EC, loc=0, scale=2*self.mu_W)
Wdist[int(self.C_W*self.N_EC):] = 0
for Wi in W:
Wi[:] = permutation(Wdist)
return W
# Property getters for current position
def _get_x(self):
return self.traj.x
def _get_y(self):
return self.traj.y
# Trajectory and rate vectors are available by default
def _traj_default(self):
return self.new_trajectory()
def _W_default(self):
return self.new_weights()
def _r_default(self):
return N.zeros(self.N_CA, 'd')
def _i_aff_default(self):
return N.zeros(self.N_CA, 'd')
def _r_EC_default(self):
return N.zeros(self.N_EC, 'd')
# Normalizing factor for feedforward input current
def _beta_default(self):
return 1 / float(self.C_W * self.N_EC * self.mu_W)
class PlaceNetworkRaster(PlaceNetwork):
"""
PlaceNetwork variant optimized for raster-based trajectories
It provides a modified timestep kernel and turns off trajectory and rate
data tracking. This method uses significantly less memory and should serve
as the superclass for PlaceNetwork subclasses primarily intended for use with
raster trajectories.
Setting dwell_factor determines pixel dwell-time (xTau).
"""
# Redefine x, y, r traits to disable data tracking
x = Float(track=False)
y = Float(track=False)
r = Array(track=False)
# Default to BipartiteRaster trajectory
traj_type = 'checker'
# Matrix structure to hold results of raster scan
scan = Array
scan_ix = Int
trial_result = 'scan'
zero_start = false
# Counter for dwell-time clamp
dwell = Int
dwell_count = Int
def trial_setup(self):
PlaceNetwork.trial_setup(self)
self.scan = self.new_scan()
self.dwell = int(self.traj._init_factor * self.dwell_count)
self.scan_ix = 0
self.set_input()
def set_input(self):
"""Set cortical input according to current scan_ix
"""
x, y = self.scan[self.scan_ix, :2]
self.r_EC = self.get_afferent_input(x, y)
self.i_aff = N.dot(self.W, self.r_EC)
def run_timestep(self):
"""Non-tracking simulation timestep kernel
"""
if self.dwell:
self.dwell -= 1
else:
# Store network state
self.scan[self.scan_ix, 2:] = self.r
# Advance the scan and set inputs
self.scan_ix += 1
self.set_input()
# Reset dwell-time counter
self.dwell = self.dwell_count
# Reset rate vector
if self.zero_start:
self.r[:] = 0.0
self.evolve()
def new_trajectory(self):
if self.traj_type is 'randwalk':
self.out('Forcing bipartite raster trajectory')
self.traj_type = 'checker'
return PlaceNetwork.new_trajectory(self)
def new_scan(self):
points = self.traj.get_points().T
return N.c_[points, N.zeros((points.shape[0], self.N_CA), 'd')]
def post_mortem(self, trial=1):
"""Modified post_mortem to properly return single-trial scan data
"""
assert self.done, 'model simulation must be finished running'
assert trial <= self.num_trials, 'invalid trial number specified'
scan = self.results[trial-1]
return TSPostMortem(x=scan[:,0], y=scan[:,1], r=scan[:,2:], ntrials=1)
def _run_trial(self):
"""Overload AbstractModel._run_trial to use a scan-based while condition
"""
while self.scan_ix < self.scan.shape[0] - 1:
self.run_timestep()
self._handle_monitor_msg()
self.ts.advance()
def _dwell_count_default(self):
return int(N.round(self.dwell_factor * (self.tau_r / self.dt)))
class PlaceNetworkStd(PlaceNetworkRaster):
"""
Resets default dynamic parameter values to the maximum fit achieved during
the PlaceNetworkSearch genetic search.
"""
J0 = 45.0
N_CA = 500
C_W = 0.33
phi_lambda = 0.04
phi_sigma = 0.02
|
jdmonaco/grid-remapping-model
|
src/place_network.py
|
Python
|
mit
| 10,180
|
#!/usr/bin/env python
from __future__ import print_function
import shutil
import unittest
from ruffus.drmaa_wrapper import write_job_script_to_temp_file, read_stdout_stderr_from_files
import ruffus.drmaa_wrapper
import ruffus
import sys
"""
test_drmaa_wrapper_run_job_locally.py
"""
import os
script_dir = os.path.abspath(os.path.dirname(__file__))
tempdir = os.path.relpath(os.path.abspath(os.path.splitext(__file__)[0]))
# add grandparent to search path for testing
grandparent_dir = os.path.abspath(
os.path.join(os.path.dirname(__file__), "..", ".."))
sys.path.insert(0, grandparent_dir)
# module name = script name without extension
module_name = os.path.splitext(os.path.basename(__file__))[0]
# 88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
# imports
# 88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
# 88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
# Main logic
# 88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
class Test_ruffus(unittest.TestCase):
class t_test_logger:
"""
Does nothing!
"""
def __init__(self):
self.clear()
def clear(self):
self.info_msg = []
self.debug_msg = []
self.warning_msg = []
self.error_msg = []
def info(self, message, *args, **kwargs):
self.info_msg.append(message)
def debug(self, message, *args, **kwargs):
self.debug_msg.append(message)
def warning(self, message, *args, **kwargs):
self.warning_msg.append(message)
def error(self, message, *args, **kwargs):
self.error_msg.append(message)
# ___________________________________________________________________________
#
# setup and cleanup
# ___________________________________________________________________________
def setUp(self):
try:
os.mkdir(tempdir)
except OSError:
pass
def tearDown(self):
shutil.rmtree(tempdir)
def test_read_stdout_stderr_from_files(self):
#
# Test missing stdout and stderr files
#
stdout_path = os.path.join(tempdir, "stdout.txt")
stderr_path = os.path.join(tempdir, "stderr.txt")
logger = Test_ruffus.t_test_logger()
read_stdout_stderr_from_files(
stdout_path, stderr_path, logger, cmd_str="test_cmd", tries=0)
self.assertTrue("could not open stdout" in "".join(logger.warning_msg))
self.assertTrue("could not open stderr" in "".join(logger.warning_msg))
logger.clear()
#
# Test present stdout and stderr files
#
with open(stdout_path, "w") as so:
so.write("STDOUT\nSTDOUT\n")
with open(stderr_path, "w") as se:
se.write("STDERR\nSTDERR\n")
stdout_msg, stderr_msg = read_stdout_stderr_from_files(
stdout_path, stderr_path, logger, cmd_str="test_cmd", tries=1)
self.assertEqual(logger.warning_msg, [])
self.assertEqual(stdout_msg, ["STDOUT\n", "STDOUT\n"])
self.assertEqual(stderr_msg, ["STDERR\n", "STDERR\n"])
def test_run_job(self):
environ = {"RUFFUS_HEEHEE": "what?"}
home_dir = os.path.expanduser("~")
sys.stderr.write(" Run echoing to screen...\n")
stdout, stderr = ruffus.drmaa_wrapper.run_job(cmd_str="%s %s/slow_process_for_testing.py" % (sys.executable, script_dir),
job_environment=environ,
working_directory=home_dir,
run_locally=True,
verbose=1,
local_echo=True)
sys.stderr.write(" Run silently...\n")
stdout, stderr = ruffus.drmaa_wrapper.run_job(cmd_str="%s %s/slow_process_for_testing.py" % (sys.executable, script_dir),
job_environment=environ,
working_directory=home_dir,
run_locally=True,
verbose=1,
local_echo=False)
self.assertEqual(
stdout,
[' Stdout 0\n', ' Stdout 1\n', ' Stdout 2\n', ' Stdout 3\n'])
stderr_fixed = [stderr[x] for x in (0, 2, 3, 4, 5)]
stderr_variable = stderr[1]
self.assertEqual(
stderr_fixed,
[' %s\n' % home_dir,
' Stderr 0\n',
' Stderr 1\n',
' Stderr 2\n',
' Stderr 3\n'])
self.assertTrue("'PWD': '{}'".format(home_dir) in stderr_variable)
self.assertTrue("'RUFFUS_HEEHEE': 'what?'" in stderr_variable)
def test_write_job_script_to_temp_file(self):
sys.stderr.write(" Write to temp_file...\n")
job_script_path, stdout_path, stderr_path = write_job_script_to_temp_file(
"ls", None, "job_name", "", None, None)
os.unlink(job_script_path)
job_script_path, stdout_path, stderr_path = write_job_script_to_temp_file(
"ls", tempdir, "job_name", "", None, None)
def test_ls(self):
sys.stderr.write(" ls...\n")
with open(os.path.join(tempdir, "temp.txt"), "w") as oo:
oo.write("done")
stdout, stderr = ruffus.drmaa_wrapper.run_job(cmd_str="ls %s" % tempdir,
run_locally=True,
verbose=1,
local_echo=True)
self.assertEqual(stdout, ['temp.txt\n'])
if __name__ == '__main__':
unittest.main()
|
bunbun/ruffus
|
ruffus/test/test_drmaa_wrapper_run_job_locally.py
|
Python
|
mit
| 6,085
|
from argparse import Namespace
from typing import Tuple
from pymap.config import IMAPConfig
from pymap.interfaces.session import LoginProtocol
from .session import Session
from .config import Config
__all__ = ['add_subparser', 'init']
def add_subparser(subparsers) -> None:
parser = subparsers.add_parser('maildir')
parser.add_argument('-d', '--base-dir', metavar='DIR',
help='Base directory containing user maildirs.')
parser.add_argument('-t', '--concurrency', metavar='NUM', type=int,
help='Maximum number of IO workers.')
def init(args: Namespace) -> Tuple[LoginProtocol, IMAPConfig]:
session_info = {}
if args.base_dir:
session_info['base_dir'] = args.base_dir
if args.concurrency:
session_info['concurrency'] = args.concurrency
return Session.login, Config.from_args(args)
|
icgood/pymap-maildir
|
pymap_maildir/__init__.py
|
Python
|
mit
| 878
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class InformationProtectionPoliciesOperations:
"""InformationProtectionPoliciesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.security.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def get(
self,
scope: str,
information_protection_policy_name: Union[str, "_models.Enum15"],
**kwargs: Any
) -> "_models.InformationProtectionPolicy":
"""Details of the information protection policy.
:param scope: Scope of the query, can be subscription
(/subscriptions/0b06d9ea-afe6-4779-bd59-30e5c2d9d13f) or management group
(/providers/Microsoft.Management/managementGroups/mgName).
:type scope: str
:param information_protection_policy_name: Name of the information protection policy.
:type information_protection_policy_name: str or ~azure.mgmt.security.models.Enum15
:keyword callable cls: A custom type or function that will be passed the direct response
:return: InformationProtectionPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.security.models.InformationProtectionPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.InformationProtectionPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-08-01-preview"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'scope': self._serialize.url("scope", scope, 'str', skip_quote=True),
'informationProtectionPolicyName': self._serialize.url("information_protection_policy_name", information_protection_policy_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('InformationProtectionPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/{scope}/providers/Microsoft.Security/informationProtectionPolicies/{informationProtectionPolicyName}'} # type: ignore
async def create_or_update(
self,
scope: str,
information_protection_policy_name: Union[str, "_models.Enum15"],
information_protection_policy: "_models.InformationProtectionPolicy",
**kwargs: Any
) -> "_models.InformationProtectionPolicy":
"""Details of the information protection policy.
:param scope: Scope of the query, can be subscription
(/subscriptions/0b06d9ea-afe6-4779-bd59-30e5c2d9d13f) or management group
(/providers/Microsoft.Management/managementGroups/mgName).
:type scope: str
:param information_protection_policy_name: Name of the information protection policy.
:type information_protection_policy_name: str or ~azure.mgmt.security.models.Enum15
:param information_protection_policy: Information protection policy.
:type information_protection_policy: ~azure.mgmt.security.models.InformationProtectionPolicy
:keyword callable cls: A custom type or function that will be passed the direct response
:return: InformationProtectionPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.security.models.InformationProtectionPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.InformationProtectionPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-08-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_or_update.metadata['url'] # type: ignore
path_format_arguments = {
'scope': self._serialize.url("scope", scope, 'str', skip_quote=True),
'informationProtectionPolicyName': self._serialize.url("information_protection_policy_name", information_protection_policy_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(information_protection_policy, 'InformationProtectionPolicy')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('InformationProtectionPolicy', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('InformationProtectionPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/{scope}/providers/Microsoft.Security/informationProtectionPolicies/{informationProtectionPolicyName}'} # type: ignore
def list(
self,
scope: str,
**kwargs: Any
) -> AsyncIterable["_models.InformationProtectionPolicyList"]:
"""Information protection policies of a specific management group.
:param scope: Scope of the query, can be subscription
(/subscriptions/0b06d9ea-afe6-4779-bd59-30e5c2d9d13f) or management group
(/providers/Microsoft.Management/managementGroups/mgName).
:type scope: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either InformationProtectionPolicyList or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.security.models.InformationProtectionPolicyList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.InformationProtectionPolicyList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-08-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'scope': self._serialize.url("scope", scope, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('InformationProtectionPolicyList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/{scope}/providers/Microsoft.Security/informationProtectionPolicies'} # type: ignore
|
Azure/azure-sdk-for-python
|
sdk/security/azure-mgmt-security/azure/mgmt/security/aio/operations/_information_protection_policies_operations.py
|
Python
|
mit
| 12,061
|
# coding=utf-8
# __author__ = 'mio'
def auth_mixin():
"""
@apiDefine AuthHeaderMixin
@apiHeaderExample Auth-Header-Example:
{
"Authorization": "token 78323lj4l32l3l23j2n2l22jl"
}
@apiHeader {String} Authorization 验证身份,注意格式中"token {token}"token后面有一个空格
"""
pass
def error_mixin():
"""
@apiDefine ErrorMixin
@apiError content 错误信息
@apiError message 错误消息
@apiErrorExample 400 Bad Request
参数错误
{
content="missed keys set(['city'])",
message="ArgsParseFailed"
}
查询获取单个数据时,找到不止一个(通过id查找)
{
content=null,
message="MultipleResultsFound"
}
查询没有结果(通过id查找)
{
content=null,
message="NoResultFound"
}
@apiErrorExample 403 Forbidden
非可操作用户
{
content=null,
message="Forbidden"
}
未知错误
{
content=null,
message="UnknownError"
}
@apiErrorExample 422 Unprocessable Entity
逻辑层错误
{
content=null,
message="LogicResponseFailed"
}
插入时重复键值
{
content=null,
message="DuplicateEntry"
}
退款失败
{
content=null,
message="RefundMoneyFailed"
}
扣款失败
{
content=null,
message="CostMoneyFailed"
}
"""
pass
def express_details():
"""
@apiDefine ExpressDetails
@apiSuccessExample 成功返回示例
{
"content": {
"status": "FINISHED",
"transfer_status": 1,
"shop": {
"tel": "17808881022",
"name": "测试产品商家001",
"is_test": true,
"avatar": "http://7qnajq.com2.z0.glb.qiniucdn.com/8AD3A76D2D2071E1ECB0A0B21E393573",
"address": "浙江省杭州市滨江区江陵路",
"lat": 30.219866,
"lng": 120.22077,
"id": 1471,
"owner_id": 7750900
},
"remark": "",
"errors": [],
"node": {
"city": "杭州市",
"name": "HA3022",
"distance": 1132,
"mt_num": 1,
"mt_list": [
"0000000000000000000000000000000000000000000020020010060000000000"
],
"couriers": [
7756082,
7759069,
7771381,
7797998,
7756818,
7768146,
7791237,
7814415
],
"node_type": "561a875b421aa9b84017ba91",
"node_type_name": "D3",
"address": "滨江区江陵路与滨盛路交叉口星耀城",
"lat": 30.218993,
"lng": 120.221169,
"id": "561a987b421aa9cf16f453f6"
},
"image": "http://7qnajq.com2.z0.glb.qiniucdn.com/56456465a46s456aaa",
"receiver": {
"lat": "30.210268",
"lng": "120.215111",
"tel": "13920201113",
"name": "杨某某",
"address": "武警医院"
},
"number": "000000000005",
"pick_up_time": null,
"source": "PHH",
"fee": {
"cost": 10,
"order": 15
},
"create_time": "2015-12-28T07:34:53Z",
"pkg_id": null,
"vehicle": {},
"from_courier": 0,
"path": {},
"source_order_id": "ibenben-2121"
"courier": {
"tel": "17705717701",
"id": 7792622,
"avatar": "http://7qnajq.com2.z0.glb.qiniucdn.com/DCE5352E18844B9FC2B6412B3DACD0BD",
"name": "测试黄忠"
},
"trace": [
{
"status": "CREATED",
"loc": {
"lat": 30.219866,
"lng": 120.22077
},
"remark": "",
"estimated_time": "2015-12-28T07:34:53Z",
"msg": "下单成功",
"operator": {
"tel": "17808881022",
"name": "测试产品商家001",
"is_test": true,
"avatar": "http://7qnajq.com2.z0.glb.qiniucdn.com/8AD3A76D2D2071E1ECB0A0B21E393573",
"address": "浙江省杭州市滨江区江陵路",
"lat": 30.219866,
"lng": 120.22077,
"id": 1471,
"owner_id": 7750900
},
"actual_time": "2015-12-28T07:34:53Z"
},
{
"status": "SORTED",
"loc": {
"lat": 30.219866,
"lng": 120.22077
},
"estimated_time": null,
"msg": "城际司机已揽件",
"operator": {
"name": "测试城际司机"
},
"actual_time": "2015-12-28T07:35:07Z"
},
{
"status": "ADOPTED",
"loc": {
"lat": 30.219866,
"lng": 120.22077
},
"estimated_time": null,
"msg": "城内司机已揽件",
"operator": {
"tel": "17705717701",
"id": 7792622,
"avatar": "http://7qnajq.com2.z0.glb.qiniucdn.com/DCE5352E18844B9FC2B6412B3DACD0BD",
"name": "测试城内司机"
},
"actual_time": null
},
{
"status": "SENDING",
"loc": {
"lat": 30.218993,
"lng": 120.221169
},
"estimated_time": null,
"msg": "到达中转站, 配送员已取货",
"operator": {
"tel": "17705717701",
"id": 7792622,
"avatar": "http://7qnajq.com2.z0.glb.qiniucdn.com/DCE5352E18844B9FC2B6412B3DACD0BD",
"name": "测试黄忠"
},
"actual_time": "2015-12-28T07:36:32Z"
},
{
"status": "WAIT_EVIDENCE",
"loc": {
"lat": "30.210268",
"lng": "120.215111"
},
"estimated_time": null,
"msg": "已送达, 等待上传凭证",
"operator": {
"tel": "17705717701",
"id": 7792622,
"avatar": "http://7qnajq.com2.z0.glb.qiniucdn.com/DCE5352E18844B9FC2B6412B3DACD0BD",
"name": "测试黄忠"
},
"actual_time": "2015-12-28T07:36:52Z"
},
{
"status": "ERROR",
"type": "收方联系不到",
"reason": "电话打不通",
"actual_time": "2015-12-28T07:36:52Z",
"operator": {
"tel": "17705717701",
"id": 7792622,
"avatar": "http://7qnajq.com2.z0.glb.qiniucdn.com/DCE5352E18844B9FC2B6412B3DACD0BD",
"name": "测试黄忠"
},
"msg": "标记异常"
},
{
"status": "FINISHED",
"loc": {
"lat": "30.210268",
"lng": "120.215111"
},
"estimated_time": null,
"msg": "已上传凭证, 妥投",
"operator": {
"tel": "17705717701",
"id": 7792622,
"name": "测试黄忠",
"avatar": "http://7qnajq.com2.z0.glb.qiniucdn.com/DCE5352E18844B9FC2B6412B3DACD0BD"
},
"actual_time": "2015-12-28T07:37:08Z",
"image": "http://qiniu.com/testhash"
}
]
},
"message": ""
}
@apiSuccess {Object} content.status CREATED待取货, SENDING配送中已取货, FINISHED已完成, CLOSED商户取消
@apiSuccess {Object} content.transfer_status 1:可转单,2:转单中-转出,3:转单中-转入,4:不可转单,0:其他
"""
pass
class OpenApiExpressHandler(object):
def get(self):
"""
@api {get} /open_api/send_order 送货单列表查询
@apiVersion 0.0.1
@apiName api_get_order_list
@apiGroup BulkSend
@apiUse AuthHeaderMixin
@apiParamExample 请求示例:
Request URL: http://api.123feng.com:6666/open_api/send_order
Request Method: GET
@apiSuccessExample 成功返回示例
HTTP/1.1 200 OK
{
"content": [
{
"number": "171443407746747445",
"status": "ADOPTED",
"origin_order_id": "904dcb84-81a0-4f59-bdb8-dab50baba7d2",
},
......
]
}
@apiSuccess (OK 200) {String} message 消息
@apiSuccess (OK 200) {List} content 数据内容
@apiSuccess (OK 200) {String} content.List.number 快递单号
@apiSuccess (OK 200) {String} content.List.status 快递状态, 详细请看『状态回调』接口
@apiSuccess (OK 200) {String} content.List.origin_order_id 来源订单id
@apiUse ErrorMixin
"""
pass
def post(self):
"""
@api {post} /open_api/send_order 送货单创建
@apiVersion 0.0.1
@apiName api_create_order
@apiGroup BulkSend
@apiUse AuthHeaderMixin
@apiParamExample 请求示例:
Request URL: http://api.123feng.com:6666/open_api/send_order
Request Method: POST
Request Payload:
{
"origin": {
"order_id": "904dcb84-81a0-4f59-bdb8-dab50baba7d2",
"create_time": "2016-01-12 10:11:23"
},
"cargo": {
"name": "美味好吃的什锦沙拉一份+清爽有回味的冰咖啡一杯",
"weight": 350,
"price": 1500
},
"sender": {
"name": "懒猫洗衣滨江店",
"tel": "13012345678",
"city": "杭州市",
"district": "滨江区",
"address": "江陵路2028号星耀城一幢301",
"lng": 120.11,
"lat": 30.23
},
"receiver": {
"name": "杨小姐",
"tel": "0571-812345678",
"city": "杭州市",
"district": "滨江区",
"address": "滨盛路1509号天恒大厦204",
"lng": 120.11,
"lat": 30.23
},
"expected_fetch_time": "2016-01-12 10:11:23",
"expected_finish_time": "2016-01-12 10:11:23",
"remark": "咖啡别撒了,沙拉盒不要翻。告诉杨小姐:健康养生的风先生沙拉对身体好哦,么么哒"
}
@apiParam (Request Payload) {Object} content.origin 来源平台信息
@apiParam (Request Payload) {String} content.origin.order_id 来源订单id
@apiParam (Request Payload) {String} content.origin.create_time=null 来源订单创建时间,北京时间。
@apiParam (Request Payload) {Object} content.cargo 货物信息
@apiParam (Request Payload) {String} content.cargo.name 货物名字
@apiParam (Request Payload) {Integer} content.cargo.weight=null 货物总重(单位:克)
@apiParam (Request Payload) {Integer} content.cargo.price=null 货物总价(单位:人民币 分)。注:price=100 指人民币1元
@apiParam (Request Payload) {Object} content.sender=注册商户信息 发货信息
@apiParam (Request Payload) {String} content.sender.name 发货人名字
@apiParam (Request Payload) {String} content.sender.tel 发货电话
@apiParam (Request Payload) {String} content.sender.city 发货城市
@apiParam (Request Payload) {String} content.sender.district 发货行政区
@apiParam (Request Payload) {String} content.sender.address 发货地址
@apiParam (Request Payload) {Float} content.sender.lng=null 发货位置经度
@apiParam (Request Payload) {Float} content.sender.lat=null 发货位置纬度
@apiParam (Request Payload) {Object} content.receiver 收货信息
@apiParam (Request Payload) {String} content.receiver.name 收货人名字
@apiParam (Request Payload) {String} content.receiver.tel收货电话
@apiParam (Request Payload) {String} content.sender.city 收货城市
@apiParam (Request Payload) {String} content.sender.district 收货行政区
@apiParam (Request Payload) {String} content.receiver.address 收货地址
@apiParam (Request Payload) {Float} content.receiver.lng=null 收货位置经度
@apiParam (Request Payload) {Float} content.receiver.lat=null 收货位置纬度
@apiParam (Request Payload) {String} content.expected_fetch_time=null 期望取货时间,北京时间。
@apiParam (Request Payload) {String} content.expected_finish_time=null 期望送达时间,北京时间。
@apiParam (Request Payload) {Integer} content.remark=null 配送备注
@apiSuccessExample 成功返回示例
HTTP/1.1 201 Created
{
"number": "000000089281"
}
@apiSuccess (Created 201) {String} number 风先生运单号
@apiUse ErrorMixin
"""
pass
class OpenApiOneExpressHandler(object):
def get(self):
"""
@api {get} /open_api/send_order/{order_id} 送货单详情查询
@apiVersion 0.0.1
@apiName api_get_order_details
@apiGroup BulkSend
@apiUse AuthHeaderMixin
@apiParamExample 请求示例:
Request URL: http://api.123feng.com:6666/open_api/send_order/000000054526
Request Method: GET
@apiSuccessExample 成功返回示例
{
"status": "FINISHED",
"number": "000000000005",
"remark": "咖啡别撒了,沙拉盒不要翻。告诉杨小姐:健康养生的风先生沙拉对身体好哦,么么哒",
"create_time": "2016-01-12 10:11:23",
"image": "http://7qnajq.com2.z0.glb.qiniucdn.com/56456465a46s456aaa",
"node": {
"name": "D3",
"id": "561a987b421aa9cf16f453f6"
},
"origin": {
"order_id": "lanmao-2121",
"create_time": "2016-01-12 10:11:23",
},
"cargo":{
"name": "一盒沙拉",
"weight": 120,
"price": 1500
},
"sender": {
"tel": "17808881022",
"name": "测试产品商家001",
"city": "杭州市",
"district": "滨江区",
"address": "江陵路1509号天恒大厦",
"lat": 30.219866,
"lng": 120.22077,
},
"receiver": {
"lat": "30.210268",
"lng": "120.215111",
"tel": "13920201113",
"name": "杨某某",
"city": "杭州市",
"district": "滨江区"
"address": "武警医院住院部201室"
},
"trace": [
{
"status": "FINISHED",
"loc": {
"lat": "30.210268",
"lng": "120.215111"
},
"msg": "已上传凭证, 妥投",
"operator": {
"tel": "17705717701",
"id": 7792622,
"name": "测试黄忠",
"avatar": "http://7qnajq.com2.z0.glb.qiniucdn.com/DCE5352E18844B9FC2B6412B3DACD0BD"
},
"actual_time": "2016-01-12 10:11:23",
"image": "http://qiniu.com/testhash"
},
{
"status": "WAIT_EVIDENCE",
"loc": {
"lat": "30.210268",
"lng": "120.215111"
},
"msg": "已送达, 等待上传凭证",
"operator": {
"tel": "17705717701",
"id": 7792622,
"avatar": "http://7qnajq.com2.z0.glb.qiniucdn.com/DCE5352E18844B9FC2B6412B3DACD0BD",
"name": "测试黄忠"
},
"actual_time": "2016-01-12 10:11:23"
},
{
"status": "ERROR",
"type": "收方联系不到",
"reason": "电话打不通",
"operator": {
"tel": "17705717701",
"id": 7792622,
"avatar": "http://7qnajq.com2.z0.glb.qiniucdn.com/DCE5352E18844B9FC2B6412B3DACD0BD",
"name": "测试黄忠"
},
"msg": "异常",
"actual_time": "2016-01-12 10:11:23",
},
{
"status": "SENDING",
"loc": {
"lat": 30.218993,
"lng": 120.221169
},
"msg": "到达中转站, 配送员已取货",
"operator": {
"tel": "17705717701",
"id": 7792622,
"avatar": "http://7qnajq.com2.z0.glb.qiniucdn.com/DCE5352E18844B9FC2B6412B3DACD0BD",
"name": "测试黄忠"
},
"actual_time": "2016-01-12 10:11:23"
},
{
"status": "ADOPTED",
"loc": {
"lat": 30.219866,
"lng": 120.22077
},
"msg": "城内司机已揽件",
"operator": {
"tel": "17705717701",
"id": 7792622,
"avatar": "http://7qnajq.com2.z0.glb.qiniucdn.com/DCE5352E18844B9FC2B6412B3DACD0BD",
"name": "测试城内司机"
},
"actual_time": "2016-01-12 10:11:23"
},
{
"status": "SORTED",
"loc": {
"lat": 30.219866,
"lng": 120.22077
},
"msg": "城际司机已揽件",
"operator": {
"name": "测试城际司机"
},
"actual_time": "2016-01-12 10:11:23"
},
{
"status": "CREATED",
"loc": {
"lat": 30.219866,
"lng": 120.22077
},
"remark": "",
"msg": "下单成功",
"operator": {
"tel": "17808881022",
"name": "测试产品商家001",
"avatar": "http://7qnajq.com2.z0.glb.qiniucdn.com/DCE5352E18844B9FC2B6412B3DACD0BD",
"id": 1471,
},
"actual_time": "2016-01-12 10:11:23"
},
]
}
@apiUse ErrorMixin
"""
pass
class OpenApiCallbackHandler(object):
def post(self):
"""
@api {post} <callback> 送货单回调
@apiVersion 0.0.1
@apiName api_order_callback
@apiGroup BulkSend
@apiDescription
当运单状态改变时,请求预先设置好的回调地址,将订单状态的改变通知对方。如果来源平台返回失败,暂时不支持重新尝试。
@apiParam (BODY PARAMETERS) {String} number 风先生运单号
@apiParam (BODY PARAMETERS) {String} status 风先生运单状态
@apiParam (BODY PARAMETERS) {String} update_time 运单更新时间,北京时间
@apiParam (BODY PARAMETERS) {String} origin_order_id 对接平台原始订单号
@apiParamExample 请求示例:
Request URL: http://callback.your_company.com:8888/update_order?from=mrwind
Request Method: POST
Request Payload:
{
"number": "201442301916525112",
"status": "CREATED/ADOPTED/PICKED_UP/FINISHED/ERROR",
"msg": "已创建/司机已揽件/配送员已取货/签收/异常"
"update_time": "2016-01-12 10:11:23",
"origin_order_id": "904dcb84-81a0-4f59-bdb8-dab50baba7d2",
}
"""
pass
|
boisde/Greed_Island
|
openapi_doc/lazy_cat_open_api_bulk_send.py
|
Python
|
mit
| 21,871
|
from pygraphviz import AGraph
import itertools
import base64
def get_png_bytes(saf, **options):
g = get_graphviz(saf, **options)
return base64.b64encode(g.draw(format='png', prog='dot'))
def get_graphviz(saf):
"""
Create a pygraphviz graph from the SAF dependencies
"""
g = AGraph(directed=True, strict=False)
nodeset = set(itertools.chain.from_iterable((t['child'], t['parent'])
for t in saf.saf['dependencies']))
for n in sorted(nodeset):
g.add_node(n, **node_hook(saf, n))
connected = set()
# create edges
for triple in saf.saf['dependencies']:
kargs = triple_hook(saf, triple)
g.add_edge(triple['child'], triple['parent'], **kargs)
# some theme options
for obj, attrs in THEME.iteritems():
for k, v in attrs.iteritems():
getattr(g, "%s_attr" % obj)[k] = v
return g
def node_hook(saf, token_id):
token = saf.get_token(token_id)
label = token['word']
labels = ["%s: %s" % (token['id'], token['word'])]
labels += ["%s / %s" % (token['lemma'], token['pos1'])]
#labels += ["%s: %s" % (k, v)
# for k, v in token.__dict__.iteritems()
# if k not in VIS_IGNORE_PROPERTIES]
return {"label": "\\n".join(labels)}
def triple_hook(saf, triple):
kargs = {'label': triple['relation']}
return kargs
THEME = {"graph" : {"rankdir" : "BT",
"concentrate" : "false"},
"node" : {"shape" : "rect",
"fontsize" : 10},
"edge" : {"edgesize" : 10,
"fontsize" : 10}}
|
vanatteveldt/saf
|
saf/visualize.py
|
Python
|
mit
| 1,686
|
from fabric.api import *
def create_database():
"""Creates role and database"""
db_user = 'fit'
db_pass = 'password'
db_name = 'fit_content'
sudo('psql -c "DROP DATABASE IF EXISTS %s"' % db_name, user='postgres')
sudo('psql -c "DROP ROLE IF EXISTS %s"' % db_user, user='postgres')
sudo('psql -c "CREATE USER %s WITH PASSWORD \'%s\'"' % (db_user, db_pass), user='postgres')
sudo('psql -c "CREATE DATABASE %s WITH OWNER %s"' % (
db_name, db_user), user='postgres')
# allow db_user create test db when running python manage.py test polls
sudo('psql -c "ALTER USER %s CREATEDB"' % db_user, user='postgres')
|
kapucko/fit2gether
|
fabfile.py
|
Python
|
mit
| 652
|
# Copyright (c) 2014 Michael Strosaker
# MIT License
# http://opensource.org/licenses/MIT
import os, sys
from distutils.core import setup
try:
with open('README.rst', 'rt') as readme:
description = readme.read()
except IOError:
description = ''
setup(
# metadata
name='hmm',
description='Hidden Markov Models',
long_description=description,
license='MIT License',
version='0.10',
author='Mike Strosaker',
maintainer='Mike Strosaker',
author_email='mstrosaker@gmail.com',
url='https://github.com/mstrosaker/hmm',
platforms='Cross Platform',
classifiers = [
'Programming Language :: Python :: 2',
],
# All packages and sub-packages must be listed here
py_modules=[
'hmm',
],
)
|
mstrosaker/hmm
|
setup.py
|
Python
|
mit
| 784
|
# -*- coding: utf-8 -*-
# By listing the first six prime numbers: 2, 3, 5, 7, 11, and 13, we can see that the 6th prime is 13.
# What is the 10 001st prime number?
import math
def is_prime(n):
if n == 2:
return True
else:
r = math.floor(math.sqrt(n)) # n rounded to the greatest integer r so that r*r<=n
f = 3
while f <= r:
if n % f == 0:
return False
f += 2
return True # (in all other cases)
def calc_prime(limit):
count = 1 # we know that 2 is prime
candidate = 3
while count < limit:
if is_prime(candidate):
count += 1
candidate += 2
return candidate
if __name__ == '__main__':
print calc_prime(10001)
|
songtao-yang/ProjectEuler-Python
|
7.10001st prime/problem_7.py
|
Python
|
mit
| 753
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Based on AboutClassMethods in the Ruby Koans
#
from runner.koan import *
class AboutClassAttributes(Koan):
class Dog:
pass
def test_objects_are_objects(self):
fido = self.Dog()
self.assertEqual(True, isinstance(fido, object))
def test_classes_are_types(self):
self.assertEqual(True, self.Dog.__class__ == type)
def test_classes_are_objects_too(self):
self.assertEqual(True, issubclass(self.Dog, object))
def test_objects_have_methods(self):
fido = self.Dog()
self.assertEqual(25, len(dir(fido)))
def test_classes_have_methods(self):
self.assertEqual(25, len(dir(self.Dog)))
def test_creating_objects_without_defining_a_class(self):
singularity = object()
self.assertEqual(22, len(dir(singularity)))
def test_defining_attributes_on_individual_objects(self):
fido = self.Dog()
fido.legs = 4
self.assertEqual(4, fido.legs)
def test_defining_functions_on_individual_objects(self):
fido = self.Dog()
fido.wag = lambda : 'fidos wag'
self.assertEqual('fidos wag', fido.wag())
def test_other_objects_are_not_affected_by_these_singleton_functions(self):
fido = self.Dog()
rover = self.Dog()
def wag():
return 'fidos wag'
fido.wag = wag
with self.assertRaises(AttributeError): rover.wag()
# ------------------------------------------------------------------
class Dog2:
def wag(self):
return 'instance wag'
def bark(self):
return "instance bark"
def growl(self):
return "instance growl"
@staticmethod
def bark():
return "staticmethod bark, arg: None"
@classmethod
def growl(cls):
return "classmethod growl, arg: cls=" + cls.__name__
def test_since_classes_are_objects_you_can_define_singleton_methods_on_them_too(self):
self.assertRegex(self.Dog2.growl(), 'Dog2')
def test_classmethods_are_not_independent_of_instance_methods(self):
fido = self.Dog2()
self.assertRegex(fido.growl(), 'Dog2')
self.assertRegex(self.Dog2.growl(), 'Dog2')
def test_staticmethods_are_unbound_functions_housed_in_a_class(self):
self.assertRegex(self.Dog2.bark(), "staticmethod bark, arg: None")
def test_staticmethods_also_overshadow_instance_methods(self):
fido = self.Dog2()
self.assertRegex(fido.bark(), "staticmethod bark, arg: None")
# ------------------------------------------------------------------
class Dog3:
def __init__(self):
self._name = None
def get_name_from_instance(self):
return self._name
def set_name_from_instance(self, name):
self._name = name
@classmethod
def get_name(cls):
return cls._name
@classmethod
def set_name(cls, name):
cls._name = name
name = property(get_name, set_name)
name_from_instance = property(get_name_from_instance, set_name_from_instance)
def test_classmethods_can_not_be_used_as_properties(self):
fido = self.Dog3()
with self.assertRaises(TypeError): fido.name = "Fido"
def test_classes_and_instances_do_not_share_instance_attributes(self):
fido = self.Dog3()
fido.set_name_from_instance("Fido")
fido.set_name("Rover")
self.assertEqual('Fido', fido.get_name_from_instance())
self.assertEqual('Rover', self.Dog3.get_name())
def test_classes_and_instances_do_share_class_attributes(self):
fido = self.Dog3()
fido.set_name("Fido")
self.assertEqual('Fido', fido.get_name())
self.assertEqual('Fido', self.Dog3.get_name())
# ------------------------------------------------------------------
class Dog4:
def a_class_method(cls):
return 'dogs class method'
def a_static_method():
return 'dogs static method'
a_class_method = classmethod(a_class_method)
a_static_method = staticmethod(a_static_method)
def test_you_can_define_class_methods_without_using_a_decorator(self):
self.assertEqual('dogs class method', self.Dog4.a_class_method())
def test_you_can_define_static_methods_without_using_a_decorator(self):
self.assertEqual('dogs static method', self.Dog4.a_static_method())
# ------------------------------------------------------------------
def test_heres_an_easy_way_to_explicitly_call_class_methods_from_instance_methods(self):
fido = self.Dog4()
self.assertEqual('dogs class method', fido.__class__.a_class_method())
|
kernbeisser/python_koans
|
python3/koans/about_class_attributes.py
|
Python
|
mit
| 4,796
|
config ={
'modelfile':'mujoco_models/sawyer/sawyer.xml',
'T': 2000
}
|
febert/RoboGym
|
configs/sawyer.py
|
Python
|
mit
| 68
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 06 06:36:03 2015
@author: lzz
定义一个 prime() 函数求整数 n 以内(不包括n)的所有素数(1不是素数),并返回一个按照升序排列的素数列表。使用递归来实现一个二分查找算法函数bi_search(),该函数实现检索任意一个整数在 prime() 函数生成的素数列表中位置(索引)的功能,并返回该位置的索引值,若该数不存在则返回 -1。
输入格式:
第一行为正整数 n
接下来若干行为待查找的数字,每行输入一个数字
输出格式:
每行输出相应的待查找数字的索引值
输入样例:
10
2
4
6
7
输出样例:
0
-1
-1
3
"""
import math
stopword = ""
str=""
N=int(raw_input())
def is_prime(x):
for i in range(2,int(math.sqrt(x)+1)):
if x % i == 0:
return False
break
return True
def bi_search(nums,low,high,value):
if low <=high:
mid = (low+high)/2
if nums[mid]==value:
return mid
elif nums[mid] < value:
return bi_search(nums,mid+1,high,value)
elif nums[mid] > value:
return bi_search(nums,low,mid-1,value)
else:
return -1
nums=[ x for x in range(2,N) if is_prime(x)==True]
for line in iter(raw_input, stopword):
str += (line + '#')
for line in str.split('#'):
if line !='':
print bi_search(nums,0,len(nums)-1,int(line))
|
lzz5235/Code-Segment
|
Python/prime_index.py
|
Python
|
mit
| 1,520
|
from sqlalchemy import Column, String, DateTime, Integer
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
TABLES = {
'hash': 'hashes'
}
class ImageHash(Base):
__tablename__ = TABLES['hash']
image_hash = Column(String(16), primary_key=True)
image_filename = Column(String(255))
image_created = Column(DateTime)
active = Column(Integer)
def __init__(self, **kwargs):
for name, value in kwargs.items():
if hasattr(self, name):
setattr(self, name, value)
else:
raise ValueError('Trying to set a non-existent attribute')
def __repr__(self):
return '%s: %s' % (self.image_filename, self.image_hash)
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
|
tistaharahap/images-dhash
|
models/objects.py
|
Python
|
mit
| 840
|
# coding: utf-8
import pip
def import_or_install(package):
try:
__import__(package)
except ImportError:
pip.main(['install', package])
# In[21]:
import_or_install("json")
import_or_install("azure.common.credentials")
import_or_install("azure.mgmt.resource")
import_or_install("azure.mgmt.storage")
import_or_install("azure.storage")
import_or_install("azure.mgmt.resource.resources.models")
import_or_install("azure.storage.blob")
from azure.common.credentials import UserPassCredentials
from azure.mgmt.resource import ResourceManagementClient
from azure.mgmt.storage import StorageManagementClient
from azure.storage import CloudStorageAccount
# from azure.mgmt.resource.resources.models import ContentSettings
from azure.mgmt.resource.resources.models import ResourceGroup
from azure.mgmt.resource.resources.models import Deployment
from azure.mgmt.resource.resources.models import DeploymentProperties
from azure.mgmt.resource.resources.models import DeploymentMode
from azure.mgmt.resource.resources.models import ParametersLink
from azure.mgmt.resource.resources.models import TemplateLink
# In[22]:
import_or_install("random")
import random
from random import randint
def random_with_N_digits(n):
range_start = 10**(n-1)
range_end = (10**n)-1
return randint(range_start, range_end)
# In[23]:
athena_user = "hbs"
azunique = str(random_with_N_digits(12))
azurestoreid = athena_user + azunique
print("Your azurestoreid is " + azurestoreid)
# In[24]:
def get_credentials(config_data):
return UserPassCredentials(
config_data["username"],
config_data["password"],
)
def get_subscription(config_data):
return config_data["subscription_id"]
# In[25]:
import_or_install("tkinter")
import_or_install("tkinter.messagebox")
from tkinter import *
import tkinter.messagebox as tm
class LoginFrame(Frame):
def __init__(self, master):
super().__init__(master)
self.label_1 = Label(self, text="Azure Email")
self.label_2 = Label(self, text="Azure Password")
self.label_3 = Label(self, text="Subscription Id")
self.entry_1 = Entry(self)
self.entry_2 = Entry(self, show="*")
self.entry_3 = Entry(self)
self.label_1.grid(row=0, sticky=E)
self.label_2.grid(row=1, sticky=E)
self.label_3.grid(row=2,sticky=E)
self.entry_1.grid(row=0, column=1)
self.entry_2.grid(row=1, column=1)
self.entry_3.grid(row=2,column=1)
# self.checkbox = Checkbutton(self, text="Save Credentials")
# self.checkbox.grid(columnspan=2)
self.logbtn = Button(self, text="Save", command = self._login_btn_clickked)
self.logbtn.grid(columnspan=2)
self.pack()
def _login_btn_clickked(self):
username = self.entry_1.get()
password = self.entry_2.get()
subscription_id = self.entry_3.get()
data = {"username":username, "password":password, "subscription_id":subscription_id}
# print(data)
filename = "az_configStudent.json"
with open(filename, 'w+') as temp_file:
json.dump(data,temp_file)
root.destroy()
# In[26]:
##prompt for credentials
import_or_install("json")
import json
import_or_install("pathlib")
from pathlib import Path
my_file = Path("az_configStudent.json")
if not my_file.is_file():
root = Tk()
lf = LoginFrame(root)
root.mainloop()
with open("az_configStudent.json") as data_file:
data = json.load(data_file)
# In[27]:
credentials = get_credentials(data)
subscription_id = get_subscription(data)
print("Creds have been delivered from:", credentials.cred_store)
# In[28]:
client = ResourceManagementClient(
credentials,
subscription_id
)
print("The client was set up")
# In[29]:
##create resource group
group_name = 'StudentRG'
resource_group_params = {'location':'eastus'}
client.resource_groups.create_or_update(group_name, resource_group_params)
print ("Created Resource Group:", group_name)
# In[30]:
##create storage account template
import json
# In[33]:
##new azure deployment
deployment_name = 'testStudentVM'
# In[34]:
template = TemplateLink(
uri= 'https://raw.githubusercontent.com/dstolts/Azure_Classroom/Jessica/Dev/templates/CustStorageAcct.json'
)
result = client.deployments.create_or_update(
group_name,
deployment_name,
properties=DeploymentProperties(
mode=DeploymentMode.incremental,
template_link = template,
parameters = {
"storageAccountType": {
"value": "Standard_LRS"
},
"storageAccountName": {
"value": azurestoreid
}
}
)
)
print("Created Deployment:", deployment_name)
print(deployment_name + " is being deployed...")
result.wait()
# print("The deployment finished successfully")
# In[35]:
##create student vhd
vhdName = "student.vhd"
##destination container name
destContainerName = "studentimage"
# In[36]:
from azure.mgmt.storage import StorageManagementClient
storage_client = StorageManagementClient(credentials, subscription_id)
destStorage_keys = storage_client.storage_accounts.list_keys( group_name, azurestoreid)
destStorage_keys = {v.key_name: v.value for v in destStorage_keys.keys}
destStorage_key = destStorage_keys['key1']
print("The deployment finished successfully")
print("Destination key: " +destStorage_key)
# In[37]:
from azure.common.credentials import ServicePrincipalCredentials
from azure.mgmt.compute import ComputeManagementClient
from azure.mgmt.storage import StorageManagementClient
from azure.mgmt.storage.models import StorageAccountCreateParameters
from azure.mgmt.storage.models import StorageAccountCreateParameters, Sku, SkuName, Kind
from azure.storage.blob import BlockBlobService
from azure.storage.blob import PublicAccess
# In[43]:
# Create the target container in storage
destStorageAcct = azurestoreid
block_blob_service = BlockBlobService(account_name=destStorageAcct, account_key=destStorage_key)
# In[46]:
print("Creating the container...")
block_blob_service.create_container(destContainerName)
print("Updating the container access level")
block_blob_service.set_container_acl(destContainerName, public_access=PublicAccess.Container)
# In[48]:
print("starting the Azure Copy...")
blob_url="https://classroomtestimage.blob.core.windows.net/publicimage/testBlob.json"
block_blob_service.copy_blob(destContainerName, "testBlob.json", blob_url)
generator = block_blob_service.list_blobs(destContainerName)
for blob in generator:
blob_url = block_blob_service.make_blob_url(destContainerName, blob.name)
print(blob_url)
|
dstolts/Azure_Classroom
|
Python/deployStudentVM.py
|
Python
|
mit
| 6,844
|
# -*- coding: utf-8 -*-
"""Includes all the fields classes from `marshmallow.fields` as well as
fields for serializing JSON API-formatted hyperlinks.
"""
from marshmallow import ValidationError
# Make core fields importable from marshmallow_jsonapi
from marshmallow.fields import * # noqa
from marshmallow.utils import get_value, is_collection
from .utils import resolve_params
class BaseRelationship(Field):
"""Base relationship field.
This is used by `marshmallow_jsonapi.Schema` to determine which
fields should be formatted as relationship objects.
See: http://jsonapi.org/format/#document-resource-object-relationships
"""
pass
class Relationship(BaseRelationship):
"""Framework-independent field which serializes to a "relationship object".
See: http://jsonapi.org/format/#document-resource-object-relationships
Examples: ::
author = Relationship(
related_url='/authors/{author_id}',
related_url_kwargs={'author_id': '<author.id>'},
)
comments = Relationship(
related_url='/posts/{post_id}/comments/',
related_url_kwargs={'post_id': '<id>'},
many=True, include_data=True,
type_='comments'
)
This field is read-only by default.
:param str related_url: Format string for related resource links.
:param dict related_url_kwargs: Replacement fields for `related_url`. String arguments
enclosed in `< >` will be interpreted as attributes to pull from the target object.
:param str self_url: Format string for self relationship links.
:param dict self_url_kwargs: Replacement fields for `self_url`. String arguments
enclosed in `< >` will be interpreted as attributes to pull from the target object.
:param bool include_data: Whether to include a resource linkage
(http://jsonapi.org/format/#document-resource-object-linkage) in the serialized result.
:param bool many: Whether the relationship represents a many-to-one or many-to-many
relationship. Only affects serialization of the resource linkage.
:param str type_: The type of resource.
:param str id_field: Attribute name to pull ids from if a resource linkage is included.
"""
id_field = 'id'
def __init__(
self,
related_url='', related_url_kwargs=None,
self_url='', self_url_kwargs=None,
include_data=False, many=False, type_=None, id_field=None, **kwargs
):
self.related_url = related_url
self.related_url_kwargs = related_url_kwargs or {}
self.self_url = self_url
self.self_url_kwargs = self_url_kwargs or {}
if include_data and not type_:
raise ValueError('include_data=True requires the type_ argument.')
self.many = many
self.include_data = include_data
self.type_ = type_
self.id_field = id_field or self.id_field
super(Relationship, self).__init__(**kwargs)
def get_related_url(self, obj):
if self.related_url:
kwargs = resolve_params(obj, self.related_url_kwargs)
return self.related_url.format(**kwargs)
return None
def get_self_url(self, obj):
if self.self_url:
kwargs = resolve_params(obj, self.self_url_kwargs)
return self.self_url.format(**kwargs)
return None
def add_resource_linkage(self, value):
def stringify(value):
if value is not None:
return str(value)
return value
if self.many:
included_data = [{
'type': self.type_,
'id': stringify(get_value(self.id_field, each, each))
} for each in value]
else:
included_data = {
'type': self.type_,
'id': stringify(get_value(self.id_field, value, value))
}
return included_data
def extract_value(self, data):
"""Extract the id key and validate the request structure."""
errors = []
if 'id' not in data:
errors.append('Must have an `id` field')
if 'type' not in data:
errors.append('Must have a `type` field')
elif data['type'] != self.type_:
errors.append('Invalid `type` specified')
if errors:
raise ValidationError(errors)
return data.get('id')
def deserialize(self, value, attr=None, data=None):
"""Deserialize ``value``.
:raise ValidationError: If the value is not type `dict`, if the
value does not contain a `data` key, and if the value is
required but unspecified.
"""
if not isinstance(value, dict) or 'data' not in value:
raise ValidationError('Must include a `data` key')
return super(Relationship, self).deserialize(value['data'], attr, data)
def _deserialize(self, value, attr, obj):
if self.many:
if not is_collection(value):
raise ValidationError('Relationship is list-like')
return [self.extract_value(item) for item in value]
if is_collection(value):
raise ValidationError('Relationship is not list-like')
return self.extract_value(value)
def _serialize(self, value, attr, obj):
dict_class = self.parent.dict_class if self.parent else dict
ret = dict_class()
self_url = self.get_self_url(obj)
related_url = self.get_related_url(obj)
if self_url or related_url:
ret['links'] = dict_class()
if self_url:
ret['links']['self'] = self_url
if related_url:
ret['links']['related'] = related_url
if self.include_data:
if value is None:
ret['data'] = [] if self.many else None
else:
ret['data'] = self.add_resource_linkage(value)
return ret
|
Tim-Erwin/marshmallow-jsonapi
|
marshmallow_jsonapi/fields.py
|
Python
|
mit
| 5,972
|
# -*- coding: utf-8 -*-
import pytest
from dotenvfile import loads
def test_valid_envfile():
lines = loads('\n'.join([
'FOO=1',
'BAR=abc',
]))
assert lines == {
'FOO': '1',
'BAR': 'abc',
}
def test_continuing_line():
lines = loads('\n'.join([
'FOO=abc\\',
'def',
'BAR=hij',
]))
assert lines == {
'FOO': 'abcdef',
'BAR': 'hij',
}
def test_continuing_line_leading_whitespace():
lines = loads('\n'.join([
'FOO=abc\\',
' def',
'BAR=hij',
]))
assert lines == {
'FOO': 'abcdef',
'BAR': 'hij',
}
def test_duplicate_variable():
with pytest.raises(ValueError) as error:
print(loads('\n'.join([
'FOO=1',
'FOO=2',
])))
assert error.value.args[0] == [
'Line 2: duplicate environment variable "FOO": already appears on line 1.',
]
|
smartmob-project/dotenvfile
|
tests/test_parser.py
|
Python
|
mit
| 951
|
from multiprocessing import Lock
import pytest
from dash import Dash, Input, Output, dcc, html
from dash.testing.wait import until
def test_rdls001_multi_loading_components(dash_duo):
lock = Lock()
app = Dash(__name__)
app.layout = html.Div(
children=[
html.H3("Edit text input to see loading state"),
dcc.Input(id="input-3", value="Input triggers the loading states"),
dcc.Loading(
className="loading-1",
children=[html.Div(id="loading-output-1")],
type="default",
),
html.Div(
[
dcc.Loading(
className="loading-2",
children=[html.Div([html.Div(id="loading-output-2")])],
type="circle",
),
dcc.Loading(
className="loading-3",
children=dcc.Graph(id="graph"),
type="cube",
),
]
),
],
)
@app.callback(
[
Output("graph", "figure"),
Output("loading-output-1", "children"),
Output("loading-output-2", "children"),
],
[Input("input-3", "value")],
)
def input_triggers_nested(value):
with lock:
return dict(data=[dict(y=[1, 4, 2, 3])]), value, value
def wait_for_all_spinners():
dash_duo.find_element(".loading-1 .dash-spinner.dash-default-spinner")
dash_duo.find_element(".loading-2 .dash-spinner.dash-sk-circle")
dash_duo.find_element(".loading-3 .dash-spinner.dash-cube-container")
def wait_for_no_spinners():
dash_duo.wait_for_no_elements(".dash-spinner")
with lock:
dash_duo.start_server(app)
wait_for_all_spinners()
wait_for_no_spinners()
with lock:
dash_duo.find_element("#input-3").send_keys("X")
wait_for_all_spinners()
wait_for_no_spinners()
def test_rdls002_chained_loading_states(dash_duo):
lock1, lock2, lock34 = Lock(), Lock(), Lock()
app = Dash(__name__)
def loading_wrapped_div(_id, color):
return html.Div(
dcc.Loading(
html.Div(
id=_id,
style={"width": 200, "height": 200, "backgroundColor": color},
),
className=_id,
),
style={"display": "inline-block"},
)
app.layout = html.Div(
[
html.Button(id="button", children="Start", n_clicks=0),
loading_wrapped_div("output-1", "hotpink"),
loading_wrapped_div("output-2", "rebeccapurple"),
loading_wrapped_div("output-3", "green"),
loading_wrapped_div("output-4", "#FF851B"),
]
)
@app.callback(Output("output-1", "children"), [Input("button", "n_clicks")])
def update_output_1(n_clicks):
with lock1:
return "Output 1: {}".format(n_clicks)
@app.callback(Output("output-2", "children"), [Input("output-1", "children")])
def update_output_2(children):
with lock2:
return "Output 2: {}".format(children)
@app.callback(
[Output("output-3", "children"), Output("output-4", "children")],
[Input("output-2", "children")],
)
def update_output_34(children):
with lock34:
return "Output 3: {}".format(children), "Output 4: {}".format(children)
dash_duo.start_server(app)
def find_spinners(*nums):
if not nums:
dash_duo.wait_for_no_elements(".dash-spinner")
return
for n in nums:
dash_duo.find_element(".output-{} .dash-spinner".format(n))
assert len(dash_duo.find_elements(".dash-spinner")) == len(nums)
def find_text(spec):
templates = [
"Output 1: {}",
"Output 2: Output 1: {}",
"Output 3: Output 2: Output 1: {}",
"Output 4: Output 2: Output 1: {}",
]
for n, v in spec.items():
dash_duo.wait_for_text_to_equal(
"#output-{}".format(n), templates[n - 1].format(v)
)
find_text({1: 0, 2: 0, 3: 0, 4: 0})
find_spinners()
btn = dash_duo.find_element("#button")
# Can't use lock context managers here, because we want to acquire the
# second lock before releasing the first
lock1.acquire()
btn.click()
find_spinners(1)
find_text({2: 0, 3: 0, 4: 0})
lock2.acquire()
lock1.release()
find_spinners(2)
find_text({1: 1, 3: 0, 4: 0})
lock34.acquire()
lock2.release()
find_spinners(3, 4)
find_text({1: 1, 2: 1})
lock34.release()
find_spinners()
find_text({1: 1, 2: 1, 3: 1, 4: 1})
@pytest.mark.parametrize(
"kwargs, expected_update_title, clientside_title",
[
({}, "Updating...", False),
({"update_title": None}, "Dash", False),
({"update_title": ""}, "Dash", False),
({"update_title": "Hello World"}, "Hello World", False),
({}, "Updating...", True),
({"update_title": None}, "Dash", True),
({"update_title": ""}, "Dash", True),
({"update_title": "Hello World"}, "Hello World", True),
],
)
def test_rdls003_update_title(
dash_duo, kwargs, expected_update_title, clientside_title
):
app = Dash("Dash", **kwargs)
lock = Lock()
app.layout = html.Div(
children=[
html.H3("Press button see document title updating"),
html.Div(id="output"),
html.Button("Update", id="button", n_clicks=0),
html.Button("Update Page", id="page", n_clicks=0),
html.Div(id="dummy"),
]
)
if clientside_title:
app.clientside_callback(
"""
function(n_clicks) {
document.title = 'Page ' + n_clicks;
return 'Page ' + n_clicks;
}
""",
Output("dummy", "children"),
[Input("page", "n_clicks")],
)
@app.callback(Output("output", "children"), [Input("button", "n_clicks")])
def update(n):
with lock:
return n
with lock:
dash_duo.start_server(app)
# check for update-title during startup
# the clientside callback isn't blocking so it may update the title
if not clientside_title:
until(lambda: dash_duo.driver.title == expected_update_title, timeout=1)
# check for original title after loading
until(
lambda: dash_duo.driver.title == "Page 0" if clientside_title else "Dash",
timeout=1,
)
with lock:
dash_duo.find_element("#button").click()
# check for update-title while processing callback
if clientside_title and not kwargs.get("update_title", True):
until(lambda: dash_duo.driver.title == "Page 0", timeout=1)
else:
until(lambda: dash_duo.driver.title == expected_update_title, timeout=1)
if clientside_title:
dash_duo.find_element("#page").click()
dash_duo.wait_for_text_to_equal("#dummy", "Page 1")
until(lambda: dash_duo.driver.title == "Page 1", timeout=1)
# verify that when a separate callback runs, the page title gets restored
dash_duo.find_element("#button").click()
dash_duo.wait_for_text_to_equal("#output", "2")
if clientside_title:
until(lambda: dash_duo.driver.title == "Page 1", timeout=1)
else:
until(lambda: dash_duo.driver.title == "Dash", timeout=1)
@pytest.mark.parametrize(
"update_title",
[None, "Custom Update Title"],
)
def test_rdls004_update_title_chained_callbacks(dash_duo, update_title):
initial_title = "Initial Title"
app = Dash("Dash", title=initial_title, update_title=update_title)
lock = Lock()
app.layout = html.Div(
children=[
html.Button(id="page-title", n_clicks=0, children="Page Title"),
html.Div(id="page-output"),
html.Div(id="final-output"),
]
)
app.clientside_callback(
"""
function(n_clicks) {
if (n_clicks > 0) {
document.title = 'Page ' + n_clicks;
}
return n_clicks;
}
""",
Output("page-output", "children"),
[Input("page-title", "n_clicks")],
)
@app.callback(
Output("final-output", "children"), [Input("page-output", "children")]
)
def update(n):
with lock:
return n
# check for original title after loading
dash_duo.start_server(app)
dash_duo.wait_for_text_to_equal("#final-output", "0")
until(lambda: dash_duo.driver.title == initial_title, timeout=1)
with lock:
dash_duo.find_element("#page-title").click()
# check for update-title while processing the serverside callback
if update_title:
until(lambda: dash_duo.driver.title == update_title, timeout=1)
else:
until(lambda: dash_duo.driver.title == "Page 1", timeout=1)
dash_duo.wait_for_text_to_equal("#final-output", "1")
until(lambda: dash_duo.driver.title == "Page 1", timeout=1)
|
plotly/dash
|
tests/integration/renderer/test_loading_states.py
|
Python
|
mit
| 9,265
|
"""
The Plaid API
The Plaid REST API. Please see https://plaid.com/docs/api for more details. # noqa: E501
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from plaid.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from plaid.model.bank_transfer_failure import BankTransferFailure
globals()['BankTransferFailure'] = BankTransferFailure
class SandboxBankTransferSimulateRequest(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'bank_transfer_id': (str,), # noqa: E501
'event_type': (str,), # noqa: E501
'client_id': (str,), # noqa: E501
'secret': (str,), # noqa: E501
'failure_reason': (BankTransferFailure,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'bank_transfer_id': 'bank_transfer_id', # noqa: E501
'event_type': 'event_type', # noqa: E501
'client_id': 'client_id', # noqa: E501
'secret': 'secret', # noqa: E501
'failure_reason': 'failure_reason', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, bank_transfer_id, event_type, *args, **kwargs): # noqa: E501
"""SandboxBankTransferSimulateRequest - a model defined in OpenAPI
Args:
bank_transfer_id (str): Plaid’s unique identifier for a bank transfer.
event_type (str): The asynchronous event to be simulated. May be: `posted`, `failed`, or `reversed`. An error will be returned if the event type is incompatible with the current transfer status. Compatible status --> event type transitions include: `pending` --> `failed` `pending` --> `posted` `posted` --> `reversed`
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
client_id (str): Your Plaid API `client_id`. The `client_id` is required and may be provided either in the `PLAID-CLIENT-ID` header or as part of a request body.. [optional] # noqa: E501
secret (str): Your Plaid API `secret`. The `secret` is required and may be provided either in the `PLAID-SECRET` header or as part of a request body.. [optional] # noqa: E501
failure_reason (BankTransferFailure): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.bank_transfer_id = bank_transfer_id
self.event_type = event_type
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
|
plaid/plaid-python
|
plaid/model/sandbox_bank_transfer_simulate_request.py
|
Python
|
mit
| 8,024
|
import base64
import json
from kounta.objects import Company
try:
import urllib.request as urllib2
except ImportError:
import urllib2
class BasicClient:
"""
BasicClient makes sure the same URL requested will not make another external
request by caching the results of that URL for the lifetime of the client.
This is particularly useful when doing lots of calls on the same or similar
data. However, this may cause an issue when you update data through the API
and get the old cached data returned the next time that endpoint is
requested. So you can erase all cache with the reset_cache() method.
"""
def __init__(self, client_id, client_secret):
"""
:type client_secret: str
:type client_id: str
"""
self.client_id = client_id
self.client_secret = client_secret
self._cache = URLCache()
def _fetch_url(self, url):
"""
This is an internal method, if you need to download an arbitrary
endpoint, see get_url()
:rtype : dict
:param url: str
"""
encoded = base64.b64encode(self.client_id + ':' + self.client_secret)
headers = {
"Authorization": "Basic " + encoded
}
request = urllib2.Request('https://api.kounta.com' + url,
headers=headers)
return urllib2.urlopen(request).read()
def get_url(self, url):
"""
Get a URL (API endpoint). This makes use of URL caching (see class
description).
:type url: string
:rtype: dict
"""
if self._cache[url] is None:
self._cache[url] = json.loads(self._fetch_url(url))
return self._cache[url]
@property
def company(self):
"""
Fetch the company. This is the starting point for all API requests. The
Company object will expose more methods to fetch further endpoints.
:rtype : Company
"""
return Company(self.get_url('/v1/companies/me.json'), self, None)
def reset_cache(self):
"""
This is a crude way or handling the dropping of all cache.
In this future there should be a way of selectively deleting cache.
"""
self._cache = URLCache()
class URLCache:
def __init__(self):
self.cache = {}
def __getitem__(self, item):
return self.cache.get(item, None)
def __setitem__(self, key, value):
self.cache[key] = value
|
elliotchance/kounta-python
|
kounta/client.py
|
Python
|
mit
| 2,510
|
# -*- coding: utf-8 -*-
from __future__ import division, absolute_import, print_function, unicode_literals
from scrapy.contrib import exporter
class JsonItemExporter(exporter.JsonItemExporter):
"""不启用 ensure_ascii 的 JsonItemExporter"""
def __init__(self, file, **kwargs):
self._configure(kwargs)
self.file = file
self.encoder = exporter.ScrapyJSONEncoder(ensure_ascii=False, **kwargs)
self.first_item = True
def export_item(self, item):
if self.first_item:
self.first_item = False
else:
self.file.write(',\n')
itemdict = dict()
for key, value in self._get_serialized_fields(item):
itemdict[self._to_str_if_unicode(key)] = value # 防止解码错误
self.file.write(self.encoder.encode(itemdict))
class JavascriptItemExporter(JsonItemExporter):
"""输出用于 test-viewer.html 的 JavaScript 文件"""
def start_exporting(self):
self.file.write("data = [\n")
|
D6C92FE5/oucfeed.crawler
|
oucfeed/crawler/exporters.py
|
Python
|
mit
| 1,012
|