repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
ryanneufeld/OctoPrint
|
refs/heads/master
|
tests/filemanager/test_localstorage.py
|
6
|
# coding=utf-8
from __future__ import absolute_import
__author__ = "Gina Häußge <osd@foosel.net>"
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
__copyright__ = "Copyright (C) 2014 The OctoPrint Project - Released under terms of the AGPLv3 License"
import unittest
import os
import mock
from ddt import ddt, unpack, data
import octoprint.filemanager.storage
class FileWrapper(object):
def __init__(self, filename):
self.path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "_files", filename)
import hashlib
blocksize = 65536
hash = hashlib.sha1()
with open(self.path, "rb") as f:
buffer = f.read(blocksize)
while len(buffer) > 0:
hash.update(buffer)
buffer = f.read(blocksize)
self.hash = hash.hexdigest()
def save(self, destination):
import shutil
shutil.copy(self.path, destination)
FILE_BP_CASE_STL = FileWrapper("bp_case.stl")
FILE_BP_CASE_GCODE = FileWrapper("bp_case.gcode")
FILE_CRAZYRADIO_STL = FileWrapper("crazyradio.stl")
@ddt
class LocalStorageTest(unittest.TestCase):
def setUp(self):
import tempfile
self.basefolder = tempfile.mkdtemp()
self.storage = octoprint.filemanager.storage.LocalFileStorage(self.basefolder)
# mock file manager module
self.filemanager_patcher = mock.patch("octoprint.filemanager")
self.filemanager = self.filemanager_patcher.start()
self.filemanager.valid_file_type.return_value = True
def get_file_type(name):
if name.lower().endswith(".stl"):
return ["model", "stl"]
elif name.lower().endswith(".gco") or name.lower().endswith(".gcode") or name.lower.endswith(".g"):
return ["machinecode", "gcode"]
else:
return None
self.filemanager.get_file_type.side_effect = get_file_type
def tearDown(self):
import shutil
shutil.rmtree(self.basefolder)
self.filemanager_patcher.stop()
def test_add_file(self):
self._add_file("bp_case.stl", "bp_case.stl", FILE_BP_CASE_STL)
def test_add_file_overwrite(self):
self._add_file("bp_case.stl", "bp_case.stl", FILE_BP_CASE_STL)
try:
self._add_file("bp_case.stl", "bp_case.stl", FILE_BP_CASE_STL, overwrite=False)
except:
pass
self._add_file("bp_case.stl", "bp_case.stl", FILE_BP_CASE_STL, overwrite=True)
def test_add_file_with_web(self):
import time
href = "http://www.example.com"
retrieved = time.time()
stl_name = self._add_file("bp_case.stl", "bp_case.stl", FILE_BP_CASE_STL, links=[("web", dict(href=href, retrieved=retrieved))])
stl_metadata = self.storage.get_metadata(stl_name)
self.assertIsNotNone(stl_metadata)
self.assertEquals(1, len(stl_metadata["links"]))
link = stl_metadata["links"][0]
self.assertTrue("web", link["rel"])
self.assertTrue("href" in link)
self.assertEquals(href, link["href"])
self.assertTrue("retrieved" in link)
self.assertEquals(retrieved, link["retrieved"])
def test_add_file_with_association(self):
stl_name = self._add_file("bp_case.stl", "bp_case.stl", FILE_BP_CASE_STL)
gcode_name = self._add_file("bp_case.gcode", "bp_case.gcode", FILE_BP_CASE_GCODE, links=[("model", dict(name=stl_name))])
stl_metadata = self.storage.get_metadata(stl_name)
gcode_metadata = self.storage.get_metadata(gcode_name)
# forward link
self.assertEquals(1, len(gcode_metadata["links"]))
link = gcode_metadata["links"][0]
self.assertEquals("model", link["rel"])
self.assertTrue("name" in link)
self.assertEquals(stl_name, link["name"])
self.assertTrue("hash" in link)
self.assertEquals(FILE_BP_CASE_STL.hash, link["hash"])
# reverse link
self.assertEquals(1, len(stl_metadata["links"]))
link = stl_metadata["links"][0]
self.assertEquals("machinecode", link["rel"])
self.assertTrue("name" in link)
self.assertEquals(gcode_name, link["name"])
self.assertTrue("hash" in link)
self.assertEquals(FILE_BP_CASE_GCODE.hash, link["hash"])
def test_remove_file(self):
stl_name = self._add_file("bp_case.stl", "bp_case.stl", FILE_BP_CASE_STL)
gcode_name = self._add_file("bp_case.gcode", "bp_case.gcode", FILE_BP_CASE_GCODE, links=[("model", dict(name=stl_name))])
stl_metadata = self.storage.get_metadata(stl_name)
gcode_metadata = self.storage.get_metadata(gcode_name)
self.assertIsNotNone(stl_metadata)
self.assertIsNotNone(gcode_metadata)
self.storage.remove_file(stl_name)
self.assertFalse(os.path.exists(os.path.join(self.basefolder, stl_name)))
stl_metadata = self.storage.get_metadata(stl_name)
gcode_metadata = self.storage.get_metadata(gcode_name)
self.assertIsNone(stl_metadata)
self.assertIsNotNone(gcode_metadata)
self.assertEquals(0, len(gcode_metadata["links"]))
def test_add_folder(self):
self._add_folder("test", "test")
def test_add_subfolder(self):
folder_name = self._add_folder("folder with some spaces", "folder_with_some_spaces")
subfolder_name = self._add_folder((folder_name, "subfolder"), folder_name + "/subfolder")
stl_name = self._add_file((subfolder_name, "bp_case.stl"), subfolder_name + "/bp_case.stl", FILE_BP_CASE_STL)
self.assertTrue(os.path.exists(os.path.join(self.basefolder, folder_name)))
self.assertTrue(os.path.exists(os.path.join(self.basefolder, subfolder_name)))
self.assertTrue(os.path.exists(os.path.join(self.basefolder, stl_name)))
def test_remove_folder(self):
content_folder = self._add_folder("content", "content")
other_stl_name = self._add_file((content_folder, "crazyradio.stl"), content_folder + "/crazyradio.stl", FILE_CRAZYRADIO_STL)
empty_folder = self._add_folder("empty", "empty")
try:
self.storage.remove_folder(content_folder, recursive=False)
except:
self.assertTrue(os.path.exists(os.path.join(self.basefolder, content_folder)))
self.assertTrue(os.path.isdir(os.path.join(self.basefolder, content_folder)))
self.assertTrue(os.path.exists(os.path.join(self.basefolder, other_stl_name)))
self.assertIsNotNone(self.storage.get_metadata(other_stl_name))
self.storage.remove_folder(content_folder, recursive=True)
self.assertFalse(os.path.exists(os.path.join(self.basefolder, content_folder)))
self.assertFalse(os.path.isdir(os.path.join(self.basefolder, content_folder)))
self.storage.remove_folder(empty_folder, recursive=False)
self.assertFalse(os.path.exists(os.path.join(self.basefolder, empty_folder)))
self.assertFalse(os.path.isdir(os.path.join(self.basefolder, empty_folder)))
def test_remove_folder_with_metadata(self):
content_folder = self._add_folder("content", "content")
other_stl_name = self._add_file((content_folder, "crazyradio.stl"), content_folder + "/crazyradio.stl", FILE_CRAZYRADIO_STL)
self.storage.remove_file(other_stl_name)
self.storage.remove_folder(content_folder, recursive=False)
def test_list(self):
bp_case_stl = self._add_file("bp_case.stl", "bp_case.stl", FILE_BP_CASE_STL)
self._add_file("bp_case.gcode", "bp_case.gcode", FILE_BP_CASE_GCODE, links=[("model", dict(name=bp_case_stl))])
content_folder = self._add_folder("content", "content")
self._add_file((content_folder, "crazyradio.stl"), content_folder + "/crazyradio.stl", FILE_CRAZYRADIO_STL)
self._add_folder("empty", "empty")
file_list = self.storage.list_files()
self.assertEquals(4, len(file_list))
self.assertTrue("bp_case.stl" in file_list)
self.assertTrue("bp_case.gcode" in file_list)
self.assertTrue("content" in file_list)
self.assertTrue("empty" in file_list)
self.assertEquals("model", file_list["bp_case.stl"]["type"])
self.assertEquals(FILE_BP_CASE_STL.hash, file_list["bp_case.stl"]["hash"])
self.assertEquals("machinecode", file_list["bp_case.gcode"]["type"])
self.assertEquals(FILE_BP_CASE_GCODE.hash, file_list["bp_case.gcode"]["hash"])
self.assertEquals("folder", file_list[content_folder]["type"])
self.assertEquals(1, len(file_list[content_folder]["children"]))
self.assertTrue("crazyradio.stl" in file_list["content"]["children"])
self.assertEquals("model", file_list["content"]["children"]["crazyradio.stl"]["type"])
self.assertEquals(FILE_CRAZYRADIO_STL.hash, file_list["content"]["children"]["crazyradio.stl"]["hash"])
self.assertEquals("folder", file_list["empty"]["type"])
self.assertEquals(0, len(file_list["empty"]["children"]))
def test_add_link_model(self):
stl_name = self._add_file("bp_case.stl", "bp_case.stl", FILE_BP_CASE_STL)
gcode_name = self._add_file("bp_case.gcode", "bp_case.gcode", FILE_BP_CASE_GCODE)
self.storage.add_link(gcode_name, "model", dict(name=stl_name))
stl_metadata = self.storage.get_metadata(stl_name)
gcode_metadata = self.storage.get_metadata(gcode_name)
# forward link
self.assertEquals(1, len(gcode_metadata["links"]))
link = gcode_metadata["links"][0]
self.assertEquals("model", link["rel"])
self.assertTrue("name" in link)
self.assertEquals(stl_name, link["name"])
self.assertTrue("hash" in link)
self.assertEquals(FILE_BP_CASE_STL.hash, link["hash"])
# reverse link
self.assertEquals(1, len(stl_metadata["links"]))
link = stl_metadata["links"][0]
self.assertEquals("machinecode", link["rel"])
self.assertTrue("name" in link)
self.assertEquals(gcode_name, link["name"])
self.assertTrue("hash" in link)
self.assertEquals(FILE_BP_CASE_GCODE.hash, link["hash"])
def test_add_link_machinecode(self):
stl_name = self._add_file("bp_case.stl", "bp_case.stl", FILE_BP_CASE_STL)
gcode_name = self._add_file("bp_case.gcode", "bp_case.gcode", FILE_BP_CASE_GCODE)
self.storage.add_link(stl_name, "machinecode", dict(name=gcode_name))
stl_metadata = self.storage.get_metadata(stl_name)
gcode_metadata = self.storage.get_metadata(gcode_name)
# forward link
self.assertEquals(1, len(gcode_metadata["links"]))
link = gcode_metadata["links"][0]
self.assertEquals("model", link["rel"])
self.assertTrue("name" in link)
self.assertEquals(stl_name, link["name"])
self.assertTrue("hash" in link)
self.assertEquals(FILE_BP_CASE_STL.hash, link["hash"])
# reverse link
self.assertEquals(1, len(stl_metadata["links"]))
link = stl_metadata["links"][0]
self.assertEquals("machinecode", link["rel"])
self.assertTrue("name" in link)
self.assertEquals(gcode_name, link["name"])
self.assertTrue("hash" in link)
self.assertEquals(FILE_BP_CASE_GCODE.hash, link["hash"])
def test_remove_link(self):
stl_name = self._add_file("bp_case.stl", "bp_case.stl", FILE_BP_CASE_STL)
self.storage.add_link(stl_name, "web", dict(href="http://www.example.com"))
self.storage.add_link(stl_name, "web", dict(href="http://www.example2.com"))
stl_metadata = self.storage.get_metadata(stl_name)
self.assertEquals(2, len(stl_metadata["links"]))
self.storage.remove_link(stl_name, "web", dict(href="http://www.example.com"))
stl_metadata = self.storage.get_metadata(stl_name)
self.assertEquals(1, len(stl_metadata["links"]))
self.storage.remove_link(stl_name, "web", dict(href="wrong_href"))
stl_metadata = self.storage.get_metadata(stl_name)
self.assertEquals(1, len(stl_metadata["links"]))
def test_remove_link_bidirectional(self):
stl_name = self._add_file("bp_case.stl", "bp_case.stl", FILE_BP_CASE_STL)
gcode_name = self._add_file("bp_case.gcode", "bp_case.gcode", FILE_BP_CASE_GCODE)
self.storage.add_link(stl_name, "machinecode", dict(name=gcode_name))
self.storage.add_link(stl_name, "web", dict(href="http://www.example.com"))
stl_metadata = self.storage.get_metadata(stl_name)
gcode_metadata = self.storage.get_metadata(gcode_name)
self.assertEquals(1, len(gcode_metadata["links"]))
self.assertEquals(2, len(stl_metadata["links"]))
self.storage.remove_link(gcode_name, "model", dict(name=stl_name, hash=FILE_BP_CASE_STL.hash))
stl_metadata = self.storage.get_metadata(stl_name)
gcode_metadata = self.storage.get_metadata(gcode_name)
self.assertEquals(0, len(gcode_metadata["links"]))
self.assertEquals(1, len(stl_metadata["links"]))
@data(
("some_file.gco", "some_file.gco"),
("some_file with (parentheses) and ümläuts and digits 123.gco", "some_file_with_(parentheses)_and_umlauts_and_digits_123.gco"),
("pengüino pequeño.stl", "penguino_pequeno.stl")
)
@unpack
def test_sanitize_name(self, input, expected):
actual = self.storage.sanitize_name(input)
self.assertEquals(expected, actual)
@data(
"some/folder/still/left.gco",
"also\\no\\backslashes.gco"
)
def test_sanitize_name_invalid(self, input):
try:
self.storage.sanitize_name(input)
self.fail("expected a ValueError")
except ValueError as e:
self.assertEquals("name must not contain / or \\", e.message)
@data(
("folder/with/subfolder", "/folder/with/subfolder"),
("folder/with/subfolder/../other/folder", "/folder/with/other/folder"),
("/folder/with/leading/slash", "/folder/with/leading/slash"),
("folder/with/leading/dot", "/folder/with/leading/dot")
)
@unpack
def test_sanitize_path(self, input, expected):
actual = self.storage.sanitize_path(input)
self.assertTrue(actual.startswith(self.basefolder))
self.assertEquals(expected, actual[len(self.basefolder):].replace(os.path.sep, "/"))
@data(
"../../folder/out/of/the/basefolder",
"some/folder/../../../and/then/back"
)
def test_sanitize_path_invalid(self, input):
try:
self.storage.sanitize_path(input)
self.fail("expected a ValueError")
except ValueError as e:
self.assertTrue(e.message.startswith("path not contained in base folder: "))
@data(
("some/folder/and/some file.gco", "/some/folder/and", "some_file.gco"),
(("some", "folder", "and", "some file.gco"), "/some/folder/and", "some_file.gco"),
("some file.gco", "/", "some_file.gco"),
(("some file.gco",), "/", "some_file.gco"),
("", "/", ""),
("some/folder/with/trailing/slash/", "/some/folder/with/trailing/slash", ""),
(("some", "folder", ""), "/some/folder", "")
)
@unpack
def test_sanitize(self, input, expected_path, expected_name):
actual = self.storage.sanitize(input)
self.assertTrue(isinstance(actual, tuple))
self.assertEquals(2, len(actual))
actual_path, actual_name = actual
self.assertTrue(actual_path.startswith(self.basefolder))
actual_path = actual_path[len(self.basefolder):].replace(os.path.sep, "/")
if not actual_path.startswith("/"):
# if the actual path originally was just the base folder, we just stripped
# away everything, so let's add a / again so the behaviour matches the
# other preprocessing of our test data here
actual_path = "/" + actual_path
self.assertEquals(expected_path, actual_path)
self.assertEquals(expected_name, actual_name)
def _add_file(self, path, expected_path, file_object, links=None, overwrite=False):
sanitized_path = self.storage.add_file(path, file_object, links=links, allow_overwrite=overwrite)
split_path = sanitized_path.split("/")
if len(split_path) == 1:
file_path = os.path.join(self.basefolder, split_path[0])
folder_path = self.basefolder
else:
file_path = os.path.join(self.basefolder, os.path.join(*split_path))
folder_path = os.path.join(self.basefolder, os.path.join(*split_path[:-1]))
self.assertEquals(expected_path, sanitized_path)
self.assertTrue(os.path.exists(file_path))
self.assertTrue(os.path.exists(os.path.join(folder_path, ".metadata.yaml")))
metadata = self.storage.get_metadata(sanitized_path)
self.assertIsNotNone(metadata)
# assert hash
self.assertTrue("hash" in metadata)
self.assertEquals(file_object.hash, metadata["hash"])
# assert presence of links if supplied
if links:
self.assertTrue("links" in metadata)
return sanitized_path
def _add_folder(self, path, expected_path):
sanitized_path = self.storage.add_folder(path)
self.assertEquals(expected_path, sanitized_path)
self.assertTrue(os.path.exists(os.path.join(self.basefolder, os.path.join(*sanitized_path.split("/")))))
self.assertTrue(os.path.isdir(os.path.join(self.basefolder, os.path.join(*sanitized_path.split("/")))))
return sanitized_path
|
xiangel/hue
|
refs/heads/master
|
desktop/core/ext-py/python-ldap-2.3.13/Demo/simplebrowse.py
|
40
|
#! python
#
# simple LDAP server browsing example
#
import ldap
import string
from traceback import print_exc
url = "ldap://ldap.openldap.org/"
dn = "dc=openldap,dc=org"
print "Connecting to", url
l = ldap.initialize(url)
l.bind_s("", "", ldap.AUTH_SIMPLE);
lastdn = dn
dnlist = None
while 1:
#-- read a command
try:
cmd = raw_input(dn + "> ")
except EOFError:
print
break
try:
if cmd == "?":
print "cd <dn> - change DN to <dn>"
print "cd <n> - change DN to number <n> of last 'ls'"
print "cd - - change to previous DN"
print "cd .. - change to one-level higher DN"
print "cd - change to root DN"
print "ls - list children of crrent DN"
print ". - show attributes of current DN"
print "/<expr> - list descendents matching filter <expr>"
print "? - show this help"
elif cmd == "ls":
print "Children of", `dn`, ":"
dnlist = []
#
# List the children at one level down from the current dn
# We use the filter 'objectclass=*' to match everything.
# We're not interested in attributes at this stage, so
# we specify [] as the list of attribute names to retreive.
#
for name,attrs in l.search_s(dn, ldap.SCOPE_ONELEVEL,
"objectclass=*", []):
#-- shorten resulting dns for output brevity
if name.startswith(dn+", "):
shortname = "+ "+name[len(dn)+2:]
elif name.endswith(", "+dn):
shortname = name[:-len(dn)-2]+" +"
else:
shortname = name
print " %3d. %s" % (len(dnlist), shortname)
dnlist.append(name)
elif cmd == "cd":
dn = ""
dnlist = None
elif cmd.startswith("cd "):
arg = cmd[3:]
if arg == '-':
lastdn,dn = dn,lastdn
elif arg == '..':
dn = string.join(ldap.explode_dn(dn)[1:], ",")
dn = string.strip(dn)
else:
try:
i = int(arg)
except:
godn = arg
else:
if dnlist is None:
print "do an ls first"
else:
godn = dnlist[i]
lastdn = dn
dn = godn
elif cmd == ".":
#
# Retrieve all the attributes for the current dn.
# We construct a search using SCOPE_BASE (ie just the
# given DN) and again filter with "objectclass=*".
# No attributes are listed, so the default is for
# the client to receive all attributes on the DN.
#
print "Attributes of", `dn`, ":"
for name,attrs in l.search_s(dn, ldap.SCOPE_BASE,
"objectclass=*"):
print " %-24s" % name
for k,vals in attrs.items():
for v in vals:
if len(v) > 200:
v = `v[:200]` + \
("... (%d bytes)" % len(v))
else:
v = `v`
print " %-12s: %s" % (k, v)
elif cmd.startswith("/"):
#
# Search descendent objects to match a given filter.
# We use SCOPE_SUBTREE to indicate descendents, and
# again specify an empty attribute list to indicate
# that we're not interested in them.
#
expr = cmd[1:]
print "Descendents matching filter", `expr`, ":"
for name,attrs in l.search_s(dn, ldap.SCOPE_SUBTREE,
expr, []):
print " %24s", name
else:
print "unknown command - try '?' for help"
except:
print_exc()
|
almeidapaulopt/erpnext
|
refs/heads/develop
|
erpnext/accounts/general_ledger.py
|
2
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe, erpnext
from frappe.utils import flt, cstr, cint
from frappe import _
from frappe.model.meta import get_field_precision
from erpnext.accounts.doctype.budget.budget import validate_expense_against_budget
class StockAccountInvalidTransaction(frappe.ValidationError): pass
def make_gl_entries(gl_map, cancel=False, adv_adj=False, merge_entries=True, update_outstanding='Yes', from_repost=False):
if gl_map:
if not cancel:
gl_map = process_gl_map(gl_map, merge_entries)
if gl_map and len(gl_map) > 1:
save_entries(gl_map, adv_adj, update_outstanding, from_repost)
else:
frappe.throw(_("Incorrect number of General Ledger Entries found. You might have selected a wrong Account in the transaction."))
else:
delete_gl_entries(gl_map, adv_adj=adv_adj, update_outstanding=update_outstanding)
def process_gl_map(gl_map, merge_entries=True):
if merge_entries:
gl_map = merge_similar_entries(gl_map)
for entry in gl_map:
# toggle debit, credit if negative entry
if flt(entry.debit) < 0:
entry.credit = flt(entry.credit) - flt(entry.debit)
entry.debit = 0.0
if flt(entry.debit_in_account_currency) < 0:
entry.credit_in_account_currency = \
flt(entry.credit_in_account_currency) - flt(entry.debit_in_account_currency)
entry.debit_in_account_currency = 0.0
if flt(entry.credit) < 0:
entry.debit = flt(entry.debit) - flt(entry.credit)
entry.credit = 0.0
if flt(entry.credit_in_account_currency) < 0:
entry.debit_in_account_currency = \
flt(entry.debit_in_account_currency) - flt(entry.credit_in_account_currency)
entry.credit_in_account_currency = 0.0
return gl_map
def merge_similar_entries(gl_map):
merged_gl_map = []
for entry in gl_map:
# if there is already an entry in this account then just add it
# to that entry
same_head = check_if_in_list(entry, merged_gl_map)
if same_head:
same_head.debit = flt(same_head.debit) + flt(entry.debit)
same_head.debit_in_account_currency = \
flt(same_head.debit_in_account_currency) + flt(entry.debit_in_account_currency)
same_head.credit = flt(same_head.credit) + flt(entry.credit)
same_head.credit_in_account_currency = \
flt(same_head.credit_in_account_currency) + flt(entry.credit_in_account_currency)
else:
merged_gl_map.append(entry)
# filter zero debit and credit entries
merged_gl_map = filter(lambda x: flt(x.debit, 9)!=0 or flt(x.credit, 9)!=0, merged_gl_map)
merged_gl_map = list(merged_gl_map)
return merged_gl_map
def check_if_in_list(gle, gl_map):
for e in gl_map:
if e.account == gle.account \
and cstr(e.get('party_type'))==cstr(gle.get('party_type')) \
and cstr(e.get('party'))==cstr(gle.get('party')) \
and cstr(e.get('against_voucher'))==cstr(gle.get('against_voucher')) \
and cstr(e.get('against_voucher_type')) == cstr(gle.get('against_voucher_type')) \
and cstr(e.get('cost_center')) == cstr(gle.get('cost_center')) \
and cstr(e.get('project')) == cstr(gle.get('project')):
return e
def save_entries(gl_map, adv_adj, update_outstanding, from_repost=False):
if not from_repost:
validate_account_for_perpetual_inventory(gl_map)
round_off_debit_credit(gl_map)
for entry in gl_map:
make_entry(entry, adv_adj, update_outstanding, from_repost)
# check against budget
if not from_repost:
validate_expense_against_budget(entry)
def make_entry(args, adv_adj, update_outstanding, from_repost=False):
args.update({"doctype": "GL Entry"})
gle = frappe.get_doc(args)
gle.flags.ignore_permissions = 1
gle.flags.from_repost = from_repost
gle.insert()
gle.run_method("on_update_with_args", adv_adj, update_outstanding, from_repost)
gle.submit()
def validate_account_for_perpetual_inventory(gl_map):
if cint(erpnext.is_perpetual_inventory_enabled(gl_map[0].company)) \
and gl_map[0].voucher_type=="Journal Entry":
aii_accounts = [d[0] for d in frappe.db.sql("""select name from tabAccount
where account_type = 'Stock' and is_group=0""")]
for entry in gl_map:
if entry.account in aii_accounts:
frappe.throw(_("Account: {0} can only be updated via Stock Transactions")
.format(entry.account), StockAccountInvalidTransaction)
def round_off_debit_credit(gl_map):
precision = get_field_precision(frappe.get_meta("GL Entry").get_field("debit"),
currency=frappe.db.get_value("Company", gl_map[0].company, "default_currency", cache=True))
debit_credit_diff = 0.0
for entry in gl_map:
entry.debit = flt(entry.debit, precision)
entry.credit = flt(entry.credit, precision)
debit_credit_diff += entry.debit - entry.credit
debit_credit_diff = flt(debit_credit_diff, precision)
if gl_map[0]["voucher_type"] in ("Journal Entry", "Payment Entry"):
allowance = 5.0 / (10**precision)
else:
allowance = .5
if abs(debit_credit_diff) >= allowance:
frappe.throw(_("Debit and Credit not equal for {0} #{1}. Difference is {2}.")
.format(gl_map[0].voucher_type, gl_map[0].voucher_no, debit_credit_diff))
elif abs(debit_credit_diff) >= (1.0 / (10**precision)):
make_round_off_gle(gl_map, debit_credit_diff)
def make_round_off_gle(gl_map, debit_credit_diff):
round_off_account, round_off_cost_center = get_round_off_account_and_cost_center(gl_map[0].company)
round_off_gle = frappe._dict()
for k in ["voucher_type", "voucher_no", "company",
"posting_date", "remarks", "is_opening"]:
round_off_gle[k] = gl_map[0][k]
round_off_gle.update({
"account": round_off_account,
"debit_in_account_currency": abs(debit_credit_diff) if debit_credit_diff < 0 else 0,
"credit_in_account_currency": debit_credit_diff if debit_credit_diff > 0 else 0,
"debit": abs(debit_credit_diff) if debit_credit_diff < 0 else 0,
"credit": debit_credit_diff if debit_credit_diff > 0 else 0,
"cost_center": round_off_cost_center,
"party_type": None,
"party": None,
"against_voucher_type": None,
"against_voucher": None
})
gl_map.append(round_off_gle)
def get_round_off_account_and_cost_center(company):
round_off_account, round_off_cost_center = frappe.db.get_value("Company", company,
["round_off_account", "round_off_cost_center"]) or [None, None]
if not round_off_account:
frappe.throw(_("Please mention Round Off Account in Company"))
if not round_off_cost_center:
frappe.throw(_("Please mention Round Off Cost Center in Company"))
return round_off_account, round_off_cost_center
def delete_gl_entries(gl_entries=None, voucher_type=None, voucher_no=None,
adv_adj=False, update_outstanding="Yes"):
from erpnext.accounts.doctype.gl_entry.gl_entry import validate_balance_type, \
check_freezing_date, update_outstanding_amt, validate_frozen_account
if not gl_entries:
gl_entries = frappe.db.sql("""
select account, posting_date, party_type, party, cost_center, fiscal_year,voucher_type,
voucher_no, against_voucher_type, against_voucher, cost_center, company
from `tabGL Entry`
where voucher_type=%s and voucher_no=%s""", (voucher_type, voucher_no), as_dict=True)
if gl_entries:
check_freezing_date(gl_entries[0]["posting_date"], adv_adj)
frappe.db.sql("""delete from `tabGL Entry` where voucher_type=%s and voucher_no=%s""",
(voucher_type or gl_entries[0]["voucher_type"], voucher_no or gl_entries[0]["voucher_no"]))
for entry in gl_entries:
validate_frozen_account(entry["account"], adv_adj)
validate_balance_type(entry["account"], adv_adj)
if not adv_adj:
validate_expense_against_budget(entry)
if entry.get("against_voucher") and update_outstanding == 'Yes' and not adv_adj:
update_outstanding_amt(entry["account"], entry.get("party_type"), entry.get("party"), entry.get("against_voucher_type"),
entry.get("against_voucher"), on_cancel=True)
|
CuonDeveloper/cuon
|
refs/heads/master
|
cuon_client/cuon_newclient/bin/cuon/User/user.py
|
1
|
# -*- coding: utf-8 -*-
##Copyright (C) [2003-2005] [Juergen Hamel, D-32584 Loehne]
##This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as
##published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version.
##This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
##warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
##for more details.
##You should have received a copy of the GNU General Public License along with this program; if not, write to the
##Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
import os.path
import cuon.TypeDefs
#from cuon.Windows.windows import windows
from cuon.Databases.dumps import dumps
from gtk import TRUE, FALSE
from cuon.TypeDefs.defaultValues import defaultValues
class User(defaultValues):
"""
@author: Jürgen Hamel
@organization: Cyrus-Computer GmbH, D-32584 Löhne
@copyright: by Jürgen Hamel
@license: GPL ( GNU GENERAL PUBLIC LICENSE )
@contact: jh@cyrus.de
"""
def __init__(self):
"""
Variables:
1. self.userName: Name of the User
"""
defaultValues.__init__(self)
# self.openDB()
# self.td = self.loadObject('td')
# self.closeDB()
# self.rpc = cuon.XMLRPC.xmlrpc.myXmlRpc()
self.userName = 'EMPTY'
self.dicTest = {}
self.sessionID = 0
#self.openDB()
#self.td = self.loadObject('td')
#self.closeDB()
self.Database = 'cuon'
# setting for locales
self.userLocales ='de'
self.userEncoding = 'utf-8'
self.Encode = True
self.userPdfEncoding = 'latin-1'
self.userType = 'cuon'
self.userDateFormatString = "%d.%m.%Y"
self.userDateTimeFormatString = "%d.%m.%Y %H:%M"
self.userDateTimeFormatEncoding = "%Y.%m.%d %H:%M:%S"
self.userTimeFormatString = "%H:%M"
self.sDebug = 'NO'
self.serverAddress = None
self.userSQLDateFormat = 'DD.MM.YYYY'
self.userSQLTimeFormat = 'HH24:MI'
self.userSQLDateTimeFormat = 'DD.MM.YYYY HH24:MI'
self.prefPath = {}
self.prefApps = {}
self.serverSqlDateFormat = '%Y-%m-%d'
self.client = 0
self.prefPath['tmp'] = os.path.normpath(self.td.cuon_path) + '/'
self.prefPath['StandardInvoice1'] = os.path.normpath(self.td.cuon_path + '/' + 'Invoice' )
self.prefPath['StandardSupply1'] = os.path.normpath(self.td.cuon_path + '/' + 'Delivery' )
self.prefPath['StandardPickup1'] = os.path.normpath(self.td.cuon_path + '/' + 'Pickup' )
self.prefPath['AddressLists'] = os.path.normpath(self.td.cuon_path + '/' + 'address' )
self.prefPath['ArticleLists'] = os.path.normpath(self.td.cuon_path + '/' + 'article' )
self.prefPath['StandardCAB1'] = os.path.normpath(self.td.cuon_path + '/' + 'address' )
self.prefPath['ReportStandardInvoice1'] = os.path.normpath(self.td.cuon_path + '/' + 'Reports' )
self.prefPath['ReportStandardSupply1'] = os.path.normpath(self.td.cuon_path + '/' + 'Reports' )
self.prefPath['ReportStandardPickup1'] = os.path.normpath(self.td.cuon_path + '/' + 'Reports' )
self.prefPath['ReportAddressLists'] = os.path.normpath(self.td.cuon_path + '/' + 'Reports' )
self.prefPath['ReportArticleLists'] = os.path.normpath(self.td.cuon_path + '/' + 'Reports' )
self.prefPath['ReportStockGoodsLists'] = os.path.normpath(self.td.cuon_path + '/' + 'Reports' )
self.prefPath['ReportStandardFinancesCAB'] = os.path.normpath(self.td.cuon_path + '/' + 'Reports' )
self.prefColor = {'FG':0, 'BG':0}
self.prefDMS = {}
#Scanner prefs
self.prefDMS['scan_device'] = 'plustek:libusb:002:002'
self.prefDMS['scan_r'] = {'x':1024.0, 'y':768.0}
self.prefDMS['scan_mode'] = 'color'
self.prefDMS['scan_contrast'] = 0.0
self.prefDMS['scan_brightness'] = 0.0
self.prefDMS['scan_white_level'] = 0.0
self.prefDMS['scan_depth'] = 24
self.prefDMS['scan_resolution'] = 300
# Executables
self.prefDMS['exe'] = {}
self.prefDMS['exe']['writer'] = '/usr/bin/oowriter2'
self.prefDMS['exe']['calc'] = '/usr/bin/oocalc2'
self.prefDMS['exe']['draw'] = '/usr/bin/oodraw2'
self.prefDMS['exe']['impress'] = '/usr/bin/ooimpress2'
self.prefDMS['exe']['image'] = '/usr/bin/gimp'
self.prefDMS['exe']['music'] = '/usr/bin/xmms'
self.prefDMS['exe']['ogg'] = '/usr/bin/xmms'
self.prefDMS['exe']['wav'] = '/usr/bin/xmms'
self.prefDMS['exe']['pdf'] = '/usr/bin/evince'
self.prefDMS['exe']['tex'] = '/usr/bin/xemacs'
self.prefDMS['exe']['ltx'] = '/usr/bin/xemacs'
self.prefDMS['exe']['txt'] = '/usr/bin/gedit'
self.prefDMS['exe']['flowchart'] = '/usr/bin/dia'
self.prefDMS['exe']['googleearth'] = 'googleearth'
self.prefDMS['exe']['internet'] = '/usr/bin/firefox'
self.prefDMS['exe']['html'] = '/usr/bin/firefox'
self.prefApps['PDF']=self.prefDMS['exe']['pdf']
self.prefDMS['fileformat'] = {}
self.setFileFormats()
self.dicUser = {}
self.sqlDicUser = {}
self.dicUserKeys = {}
# setting for files and path
self.pathAddressPhoneListing1 = os.path.abspath('.')
print self.pathAddressPhoneListing1
# setting keys for eachWindow
#normal
self.setDicUserKeys('edit','e')
self.setDicUserKeys('delete','d')
self.setDicUserKeys('new','n')
self.setDicUserKeys('print','p')
self.setDicUserKeys('save','s')
#Address
self.setDicUserKeys('address_edit','e')
self.setDicUserKeys('address_delete','d')
self.setDicUserKeys('address_new','n')
self.setDicUserKeys('address_save','s')
self.setDicUserKeys('address_print','p')
self.setDicUserKeys('address_partner_edit','e')
self.setDicUserKeys('address_partner_delete','d')
self.setDicUserKeys('address_partner_new','n')
self.setDicUserKeys('address_partner_print','p')
self.setDicUserKeys('address_partner_save','s')
# articles
self.setDicUserKeys('articles_edit','e')
self.setDicUserKeys('articles_delete','d')
self.setDicUserKeys('articles_new','n')
self.setDicUserKeys('articles_print','p')
self.setDicUserKeys('articles_purchase_edit','e')
self.setDicUserKeys('articles_purchase_delete','d')
self.setDicUserKeys('articles_purchase_new','n')
self.setDicUserKeys('articles_purchase_print','p')
# staff
self.setDicUserKeys('staff_edit','e')
self.setDicUserKeys('staff_delete','d')
self.setDicUserKeys('staff_new','n')
self.setDicUserKeys('staff_print','p')
self.setDicUserKeys('staff_fee_edit','e')
self.setDicUserKeys('staff_fee_delete','d')
self.setDicUserKeys('staff_fee_new','n')
self.setDicUserKeys('staff_fee_print','p')
# project
self.setDicUserKeys('project_edit','e')
self.setDicUserKeys('project_delete','d')
self.setDicUserKeys('project_new','n')
self.setDicUserKeys('project_print','p')
# botany
self.setDicUserKeys('botany_edit','e')
self.setDicUserKeys('botany_delete','d')
self.setDicUserKeys('botany_new','n')
self.setDicUserKeys('botany_print','p')
# hibernation
self.setDicUserKeys('hibernation_edit','e')
self.setDicUserKeys('hibernation_delete','d')
self.setDicUserKeys('hibernation_new','n')
self.setDicUserKeys('hibernation_save','s')
self.setDicUserKeys('hibernation_print','p')
# hibernation_plant
self.setDicUserKeys('hibernation_plant_edit','f')
self.setDicUserKeys('hibernation_plant_delete','g')
self.setDicUserKeys('hibernation_plant_new','h')
self.setDicUserKeys('hibernation_plant_save','a')
self.setDicUserKeys('hibernation_plant_print','t')
self.refreshDicUser()
def setFileFormats(self):
# File-format
self.prefDMS['fileformat'] = {}
self.prefDMS['fileformat']['scanImage'] = {'format':'Image Scanner', 'suffix':['NONE'], 'executable': 'INTERN'}
self.prefDMS['fileformat']['LINK'] = {'format':'LINK', 'suffix':['NONE'], 'executables': 'INTERN'}
self.prefDMS['fileformat']['oow'] = {'format':'Open Office Writer', 'suffix':['sxw', 'sdw','odt','ott'], 'executable': self.prefDMS['exe']['writer'] }
self.prefDMS['fileformat']['ooc'] = {'format':'Open Office Calc', 'suffix':['sxc','sdc','ods','ots'], 'executable': self.prefDMS['exe']['calc']}
self.prefDMS['fileformat']['ood'] = {'format':'Open Office Draw', 'suffix':['sxd','odg','otg'], 'executable': self.prefDMS['exe']['draw']}
self.prefDMS['fileformat']['ooi'] = {'format':'Open Office Impress', 'suffix':['sti','sxi','odp','otp'], 'executable': self.prefDMS['exe']['impress']}
self.prefDMS['fileformat']['gimp'] = {'format':'Gimp', 'suffix':['xcf','jpg','gif','png'], 'executable': self.prefDMS['exe']['image']}
self.prefDMS['fileformat']['mp3'] = {'format':'MP3', 'suffix':['mp3'], 'executable': self.prefDMS['exe']['music']}
self.prefDMS['fileformat']['ogg'] = {'format':'OGG', 'suffix':['ogg'], 'executable': self.prefDMS['exe']['ogg']}
self.prefDMS['fileformat']['wav'] = {'format':'WAV', 'suffix':['wav'], 'executable': self.prefDMS['exe']['wav']}
self.prefDMS['fileformat']['txt'] = {'format':'Text', 'suffix':['txt'], 'executable': self.prefDMS['exe']['txt']}
self.prefDMS['fileformat']['tex'] = {'format':'TEX', 'suffix':['tex',], 'executable': self.prefDMS['exe']['tex']}
self.prefDMS['fileformat']['latex'] = {'format':'LATEX', 'suffix':['ltx',], 'executable': self.prefDMS['exe']['ltx']}
self.prefDMS['fileformat']['pdf'] = {'format':'Adobe PDF', 'suffix':['pdf',], 'executable': self.prefDMS['exe']['pdf']}
self.prefDMS['fileformat']['dia'] = {'format':'DIA', 'suffix':['dia'], 'executable': self.prefDMS['exe']['flowchart']}
self.prefDMS['fileformat']['googleearth'] = {'format':'KMZ', 'suffix':['kmz','kml','eta'], 'executable': self.prefDMS['exe']['googleearth']}
self.prefDMS['fileformat']['html'] = {'format':'HTML', 'suffix':['html','htm'], 'executable': self.prefDMS['exe']['html']}
def refreshDicUser(self):
'''
set self.dicuser to actual values
'''
self.dicUser['Locales'] = self.userLocales
self.dicUser['Database'] = self.Database
self.dicUser['Encoding'] = self.userEncoding
self.dicUser['Encode'] = self.Encode
self.dicUser['DateTimeformatString'] = self.userDateTimeFormatString
self.dicUser['DateformatString'] = self.userDateFormatString
self.dicUser['DateTimeformatEncoding'] = self.userDateTimeFormatEncoding
#self.dicUser['serverAddress'] = self.serverAddress
self.dicUser['SQLDateFormat'] = self.userSQLDateFormat
self.dicUser['SQLTimeFormat'] = self.userSQLTimeFormat
self.dicUser['SQLDateTimeFormat'] = self.userSQLDateTimeFormat
self.dicUser['Name'] = self.userName
#self.dicUser['Password'] = self.userPassword
self.dicUser['Debug'] = self.sDebug
self.dicUser['prefPath'] = self.prefPath
self.dicUser['SessionID'] = self.getSessionID()
self.dicUser['userType'] = self.userType
self.dicUser['prefColor'] = self.prefColor
self.dicUser['prefDMS'] = self.prefDMS
self.dicUser['prefApps'] = self.prefApps
self.dicUser['client'] = self.client
self.refreshSqlDicUser()
def refreshSqlDicUser(self):
self.sqlDicUser['Name'] = self.userName
self.sqlDicUser['SessionID'] = self.getSessionID()
self.sqlDicUser['userType'] = self.userType
self.sqlDicUser['client'] = self.client
self.sqlDicUser['Locales'] = self.userLocales
self.sqlDicUser['Database'] = self.Database
self.sqlDicUser['Encoding'] = self.userEncoding
self.sqlDicUser['Encode'] = self.Encode
self.sqlDicUser['DateTimeformatString'] = self.userDateTimeFormatString
self.sqlDicUser['DateformatString'] = self.userDateFormatString
self.sqlDicUser['DateTimeformatEncoding'] = self.userDateTimeFormatEncoding
self.sqlDicUser['SQLDateFormat'] = self.userSQLDateFormat
self.sqlDicUser['SQLTimeFormat'] = self.userSQLTimeFormat
self.sqlDicUser['SQLDateTimeFormat'] = self.userSQLDateTimeFormat
def getUser(self, result):
try:
self.prefPath['StandardInvoice1'] = result['path_to_docs_invoices']
self.prefPath['StandardSupply1'] = result['path_to_docs_supply']
self.prefPath['StandardPickup1'] = result['path_to_docs_pickup']
self.prefPath['AddressLists'] = result['path_to_docs_address_lists']
self.prefPath['ReportStandardInvoice1'] = result['path_to_report_invoices']
self.prefPath['ReportStandardSupply1'] = result['path_to_report_supply']
self.prefPath['ReportStandardPickup1'] = result['path_to_report_pickup']
self.prefPath['ReportAddressLists'] = result['path_to_report_address_lists']
self.prefDMS['scan_device'] = result['scanner_device']
self.prefDMS['scan_r'] = {'x':result['scanner_brx'], 'y':result['scanner_bry']}
self.prefDMS['scan_mode'] = result['scanner_mode']
self.prefDMS['scan_contrast'] = result['scanner_contrast']
self.prefDMS['scan_brightness'] = result['scanner_brightness']
self.prefDMS['scan_white_level'] = result['scanner_white_level']
self.prefDMS['scan_depth'] = result['scanner_depth']
self.prefDMS['scan_resolution'] = result['scanner_resolution']
# Executables
self.prefDMS['exe']['writer'] = result['exe_oowriter']
self.prefDMS['exe']['calc'] = result['exe_oocalc']
self.prefDMS['exe']['draw'] = result['exe_oodraw']
self.prefDMS['exe']['impress'] = result['exe_ooimpress']
self.prefDMS['exe']['image'] = result['exe_image']
self.prefDMS['exe']['music'] = result['exe_music']
self.prefDMS['exe']['ogg'] = result['exe_ogg']
self.prefDMS['exe']['wav'] = result['exe_wav']
self.prefDMS['exe']['pdf'] = result['exe_pdf']
self.prefDMS['exe']['tex'] = result['exe_tex']
self.prefDMS['exe']['ltx'] = result['exe_ltx']
self.prefDMS['exe']['txt'] = result['exe_txt']
self.prefDMS['exe']['flowchart'] = result['exe_flowchart']
self.prefDMS['exe']['googleearth'] = result['exe_googleearth']
self.prefDMS['exe']['internet'] = result['exe_internet']
self.prefDMS['exe']['html'] = result['exe_html']
self.prefApps['PDF'] = self.prefDMS['exe']['pdf']
print "prefApps['PDF'] 0=", self.prefDMS['exe']['pdf']
print "prefApps['PDF'] 1= ", prefApps['PDF']
except Exception, param:
print Exception
print param
self.setFileFormats()
self.refreshDicUser()
return self
def getDicUser(self):
'''
@return: Dictionary with user-infos
'''
return self.dicUser
def getSqlDicUser(self):
return self.sqlDicUser
def getDicUserKeys(self):
return self.dicUserKeys
def setDicUserKeys(self, dKey, sKey):
self.dicUserKeys[dKey] = sKey
def setUserName(self, s):
"""@param s: Name of the User """
self.userName = s
self.refreshDicUser()
def getUserName(self):
"""@return: Name of the user"""
return self.userName
def setSessionID(self, sid):
'''
set the sessionID
@param sid: session-id
'''
self.sessionID = sid
self.refreshDicUser()
def getSessionID(self):
return self.sessionID
def setDebug(self, sDebug='NO'):
self.sDebug = sDebug
print 'sDebug(User) = ' + sDebug
self.refreshDicUser()
def getDebug(self):
if self.sDebug == 'YES':
return TRUE
else:
return FALSE
|
bdh1011/wau
|
refs/heads/master
|
venv/lib/python2.7/site-packages/celery/tests/utils/test_mail.py
|
8
|
from __future__ import absolute_import
from celery.utils.mail import Message, Mailer, SSLError
from celery.tests.case import Case, Mock, patch
msg = Message(to='george@vandelay.com', sender='elaine@pendant.com',
subject="What's up with Jerry?", body='???!')
class test_Message(Case):
def test_repr(self):
self.assertTrue(repr(msg))
def test_str(self):
self.assertTrue(str(msg))
class test_Mailer(Case):
def test_send_wrapper(self):
mailer = Mailer()
mailer._send = Mock()
mailer.send(msg)
mailer._send.assert_called_with(msg)
@patch('smtplib.SMTP_SSL', create=True)
def test_send_ssl_tls(self, SMTP_SSL):
mailer = Mailer(use_ssl=True, use_tls=True)
client = SMTP_SSL.return_value = Mock()
mailer._send(msg)
self.assertTrue(client.starttls.called)
self.assertEqual(client.ehlo.call_count, 2)
client.quit.assert_called_with()
client.sendmail.assert_called_with(msg.sender, msg.to, str(msg))
mailer = Mailer(use_ssl=True, use_tls=True, user='foo',
password='bar')
mailer._send(msg)
client.login.assert_called_with('foo', 'bar')
@patch('smtplib.SMTP')
def test_send(self, SMTP):
client = SMTP.return_value = Mock()
mailer = Mailer(use_ssl=False, use_tls=False)
mailer._send(msg)
client.sendmail.assert_called_With(msg.sender, msg.to, str(msg))
client.quit.side_effect = SSLError()
mailer._send(msg)
client.close.assert_called_with()
|
tomsitter/cloudhealth
|
refs/heads/master
|
profile/tests.py
|
24123
|
from django.test import TestCase
# Create your tests here.
|
ChristineLaMuse/mozillians
|
refs/heads/master
|
vendor-local/lib/python/celery/signals.py
|
12
|
# -*- coding: utf-8 -*-
"""
celery.signals
~~~~~~~~~~~~~~
See :ref:`signals`.
:copyright: (c) 2009 - 2012 by Ask Solem.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
from .utils.dispatch import Signal
task_sent = Signal(providing_args=["task_id", "task",
"args", "kwargs",
"eta", "taskset"])
task_prerun = Signal(providing_args=["task_id", "task",
"args", "kwargs"])
task_postrun = Signal(providing_args=["task_id", "task",
"args", "kwargs", "retval"])
task_failure = Signal(providing_args=["task_id", "exception",
"args", "kwargs", "traceback",
"einfo"])
celeryd_init = Signal(providing_args=["instance"])
worker_init = Signal(providing_args=[])
worker_process_init = Signal(providing_args=[])
worker_ready = Signal(providing_args=[])
worker_shutdown = Signal(providing_args=[])
setup_logging = Signal(providing_args=["loglevel", "logfile",
"format", "colorize"])
after_setup_logger = Signal(providing_args=["logger", "loglevel", "logfile",
"format", "colorize"])
after_setup_task_logger = Signal(providing_args=["logger", "loglevel",
"logfile", "format",
"colorize"])
beat_init = Signal(providing_args=[])
beat_embedded_init = Signal(providing_args=[])
eventlet_pool_started = Signal(providing_args=[])
eventlet_pool_preshutdown = Signal(providing_args=[])
eventlet_pool_postshutdown = Signal(providing_args=[])
eventlet_pool_apply = Signal(providing_args=["target", "args", "kwargs"])
|
krux/duplicity-pkg
|
refs/heads/master
|
duplicity/backends/giobackend.py
|
3
|
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright 2009 Michael Terry <mike@mterry.name>
#
# This file is part of duplicity.
#
# Duplicity is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# Duplicity is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with duplicity; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import os
import types
import subprocess
import atexit
import signal
from gi.repository import Gio #@UnresolvedImport
from gi.repository import GLib #@UnresolvedImport
import duplicity.backend
from duplicity.backend import retry
from duplicity import log
from duplicity import util
from duplicity.errors import * #@UnusedWildImport
def ensure_dbus():
# GIO requires a dbus session bus which can start the gvfs daemons
# when required. So we make sure that such a bus exists and that our
# environment points to it.
if 'DBUS_SESSION_BUS_ADDRESS' not in os.environ:
output = subprocess.Popen(['dbus-launch'], stdout=subprocess.PIPE).communicate()[0]
lines = output.split('\n')
for line in lines:
parts = line.split('=', 1)
if len(parts) == 2:
if parts[0] == 'DBUS_SESSION_BUS_PID': # cleanup at end
atexit.register(os.kill, int(parts[1]), signal.SIGTERM)
os.environ[parts[0]] = parts[1]
class DupMountOperation(Gio.MountOperation):
"""A simple MountOperation that grabs the password from the environment
or the user.
"""
def __init__(self, backend):
Gio.MountOperation.__init__(self)
self.backend = backend
self.connect('ask-password', self.ask_password_cb)
self.connect('ask-question', self.ask_question_cb)
def ask_password_cb(self, *args, **kwargs):
self.set_password(self.backend.get_password())
self.reply(Gio.MountOperationResult.HANDLED)
def ask_question_cb(self, *args, **kwargs):
# Obviously just always answering with the first choice is a naive
# approach. But there's no easy way to allow for answering questions
# in duplicity's typical run-from-cron mode with environment variables.
# And only a couple gvfs backends ask questions: 'sftp' does about
# new hosts and 'afc' does if the device is locked. 0 should be a
# safe choice.
self.set_choice(0)
self.reply(Gio.MountOperationResult.HANDLED)
class GIOBackend(duplicity.backend.Backend):
"""Use this backend when saving to a GIO URL.
This is a bit of a meta-backend, in that it can handle multiple schemas.
URLs look like schema://user@server/path.
"""
def __init__(self, parsed_url):
duplicity.backend.Backend.__init__(self, parsed_url)
ensure_dbus()
self.remote_file = Gio.File.new_for_uri(parsed_url.url_string)
# Now we make sure the location is mounted
op = DupMountOperation(self)
loop = GLib.MainLoop()
self.remote_file.mount_enclosing_volume(Gio.MountMountFlags.NONE,
op, None, self.done_with_mount,
loop)
loop.run() # halt program until we're done mounting
# Now make the directory if it doesn't exist
try:
self.remote_file.make_directory_with_parents(None)
except GLib.GError, e:
if e.code != Gio.IOErrorEnum.EXISTS:
raise
def done_with_mount(self, fileobj, result, loop):
try:
fileobj.mount_enclosing_volume_finish(result)
except GLib.GError, e:
# check for NOT_SUPPORTED because some schemas (e.g. file://) validly don't
if e.code != Gio.IOErrorEnum.ALREADY_MOUNTED and e.code != Gio.IOErrorEnum.NOT_SUPPORTED:
log.FatalError(_("Connection failed, please check your password: %s")
% str(e), log.ErrorCode.connection_failed)
loop.quit()
def handle_error(self, raise_error, e, op, file1=None, file2=None):
if raise_error:
raise e
code = log.ErrorCode.backend_error
if isinstance(e, GLib.GError):
if e.code == Gio.IOErrorEnum.PERMISSION_DENIED:
code = log.ErrorCode.backend_permission_denied
elif e.code == Gio.IOErrorEnum.NOT_FOUND:
code = log.ErrorCode.backend_not_found
elif e.code == Gio.IOErrorEnum.NO_SPACE:
code = log.ErrorCode.backend_no_space
extra = ' '.join([util.escape(x) for x in [file1, file2] if x])
extra = ' '.join([op, extra])
log.FatalError(str(e), code, extra)
def copy_progress(self, *args, **kwargs):
pass
@retry
def copy_file(self, op, source, target, raise_errors=False):
log.Info(_("Writing %s") % target.get_parse_name())
try:
source.copy(target,
Gio.FileCopyFlags.OVERWRITE | Gio.FileCopyFlags.NOFOLLOW_SYMLINKS,
None, self.copy_progress, None)
except Exception, e:
self.handle_error(raise_errors, e, op, source.get_parse_name(),
target.get_parse_name())
def put(self, source_path, remote_filename = None):
"""Copy file to remote"""
if not remote_filename:
remote_filename = source_path.get_filename()
source_file = Gio.File.new_for_path(source_path.name)
target_file = self.remote_file.get_child(remote_filename)
self.copy_file('put', source_file, target_file)
def get(self, filename, local_path):
"""Get file and put in local_path (Path object)"""
source_file = self.remote_file.get_child(filename)
target_file = Gio.File.new_for_path(local_path.name)
self.copy_file('get', source_file, target_file)
local_path.setdata()
@retry
def list(self, raise_errors=False):
"""List files in that directory"""
files = []
try:
enum = self.remote_file.enumerate_children(Gio.FILE_ATTRIBUTE_STANDARD_NAME,
Gio.FileQueryInfoFlags.NOFOLLOW_SYMLINKS,
None)
info = enum.next_file(None)
while info:
files.append(info.get_name())
info = enum.next_file(None)
except Exception, e:
self.handle_error(raise_errors, e, 'list',
self.remote_file.get_parse_name())
return files
@retry
def delete(self, filename_list, raise_errors=False):
"""Delete all files in filename list"""
assert type(filename_list) is not types.StringType
for filename in filename_list:
target_file = self.remote_file.get_child(filename)
try:
target_file.delete(None)
except Exception, e:
if isinstance(e, GLib.GError):
if e.code == Gio.IOErrorEnum.NOT_FOUND:
continue
self.handle_error(raise_errors, e, 'delete',
target_file.get_parse_name())
return
@retry
def _query_file_info(self, filename, raise_errors=False):
"""Query attributes on filename"""
target_file = self.remote_file.get_child(filename)
attrs = Gio.FILE_ATTRIBUTE_STANDARD_SIZE
try:
info = target_file.query_info(attrs, Gio.FileQueryInfoFlags.NONE,
None)
return {'size': info.get_size()}
except Exception, e:
if isinstance(e, GLib.GError):
if e.code == Gio.IOErrorEnum.NOT_FOUND:
return {'size': -1} # early exit, no need to retry
if raise_errors:
raise e
else:
return {'size': None}
|
hainn8x/gnuradio
|
refs/heads/master
|
gnuradio-runtime/python/gnuradio/gr/gateway.py
|
24
|
#
# Copyright 2011-2012 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
import runtime_swig as gr
from runtime_swig import io_signature, io_signaturev
from runtime_swig import block_gw_message_type
from runtime_swig import block_gateway
import numpy
########################################################################
# Magic to turn pointers into numpy arrays
# http://docs.scipy.org/doc/numpy/reference/arrays.interface.html
########################################################################
def pointer_to_ndarray(addr, dtype, nitems):
class array_like:
__array_interface__ = {
'data' : (int(addr), False),
'typestr' : dtype.base.str,
'descr' : dtype.base.descr,
'shape' : (nitems,) + dtype.shape,
'strides' : None,
'version' : 3
}
return numpy.asarray(array_like()).view(dtype.base)
########################################################################
# Handler that does callbacks from C++
########################################################################
class gateway_handler(gr.feval_ll):
#dont put a constructor, it wont work
def init(self, callback):
self._callback = callback
def eval(self, arg):
try: self._callback()
except Exception as ex:
print("handler caught exception: %s"%ex)
import traceback; traceback.print_exc()
raise ex
return 0
########################################################################
# Handler that does callbacks from C++
########################################################################
class msg_handler(gr.feval_p):
#dont put a constructor, it wont work
def init(self, callback):
self._callback = callback
def eval(self, arg):
try: self._callback(arg)
except Exception as ex:
print("handler caught exception: %s"%ex)
import traceback; traceback.print_exc()
raise ex
return 0
########################################################################
# The guts that make this into a gr block
########################################################################
class gateway_block(object):
def __init__(self, name, in_sig, out_sig, work_type, factor):
#ensure that the sigs are iterable dtypes
def sig_to_dtype_sig(sig):
if sig is None: sig = ()
return map(numpy.dtype, sig)
self.__in_sig = sig_to_dtype_sig(in_sig)
self.__out_sig = sig_to_dtype_sig(out_sig)
#cache the ranges to iterate when dispatching work
self.__in_indexes = range(len(self.__in_sig))
self.__out_indexes = range(len(self.__out_sig))
#convert the signatures into gr.io_signatures
def sig_to_gr_io_sigv(sig):
if not len(sig): return io_signature(0, 0, 0)
return io_signaturev(len(sig), len(sig), [s.itemsize for s in sig])
gr_in_sig = sig_to_gr_io_sigv(self.__in_sig)
gr_out_sig = sig_to_gr_io_sigv(self.__out_sig)
#create internal gateway block
self.__handler = gateway_handler()
self.__handler.init(self.__gr_block_handle)
self.__gateway = block_gateway(
self.__handler, name, gr_in_sig, gr_out_sig, work_type, factor)
self.__message = self.__gateway.block_message()
#dict to keep references to all message handlers
self.__msg_handlers = {}
#register block functions
prefix = 'block__'
for attr in [x for x in dir(self.__gateway) if x.startswith(prefix)]:
setattr(self, attr.replace(prefix, ''), getattr(self.__gateway, attr))
self.pop_msg_queue = lambda: gr.block_gw_pop_msg_queue_safe(self.__gateway)
def to_basic_block(self):
"""
Makes this block connectable by hier/top block python
"""
return self.__gateway.to_basic_block()
def __gr_block_handle(self):
"""
Dispatch tasks according to the action type specified in the message.
"""
if self.__message.action == gr.block_gw_message_type.ACTION_GENERAL_WORK:
self.__message.general_work_args_return_value = self.general_work(
input_items=[pointer_to_ndarray(
self.__message.general_work_args_input_items[i],
self.__in_sig[i],
self.__message.general_work_args_ninput_items[i]
) for i in self.__in_indexes],
output_items=[pointer_to_ndarray(
self.__message.general_work_args_output_items[i],
self.__out_sig[i],
self.__message.general_work_args_noutput_items
) for i in self.__out_indexes],
)
elif self.__message.action == gr.block_gw_message_type.ACTION_WORK:
self.__message.work_args_return_value = self.work(
input_items=[pointer_to_ndarray(
self.__message.work_args_input_items[i],
self.__in_sig[i],
self.__message.work_args_ninput_items
) for i in self.__in_indexes],
output_items=[pointer_to_ndarray(
self.__message.work_args_output_items[i],
self.__out_sig[i],
self.__message.work_args_noutput_items
) for i in self.__out_indexes],
)
elif self.__message.action == gr.block_gw_message_type.ACTION_FORECAST:
self.forecast(
noutput_items=self.__message.forecast_args_noutput_items,
ninput_items_required=self.__message.forecast_args_ninput_items_required,
)
elif self.__message.action == gr.block_gw_message_type.ACTION_START:
self.__message.start_args_return_value = self.start()
elif self.__message.action == gr.block_gw_message_type.ACTION_STOP:
self.__message.stop_args_return_value = self.stop()
def forecast(self, noutput_items, ninput_items_required):
"""
forecast is only called from a general block
this is the default implementation
"""
for ninput_item in ninput_items_required:
ninput_item = noutput_items + self.history() - 1;
return
def general_work(self, *args, **kwargs):
"""general work to be overloaded in a derived class"""
raise NotImplementedError("general work not implemented")
def work(self, *args, **kwargs):
"""work to be overloaded in a derived class"""
raise NotImplementedError("work not implemented")
def start(self): return True
def stop(self): return True
def set_msg_handler(self, which_port, handler_func):
handler = msg_handler()
handler.init(handler_func)
self.__gateway.set_msg_handler_feval(which_port, handler)
# Save handler object in class so it's not garbage collected
self.__msg_handlers[which_port] = handler
########################################################################
# Wrappers for the user to inherit from
########################################################################
class basic_block(gateway_block):
def __init__(self, name, in_sig, out_sig):
gateway_block.__init__(self,
name=name,
in_sig=in_sig,
out_sig=out_sig,
work_type=gr.GR_BLOCK_GW_WORK_GENERAL,
factor=1, #not relevant factor
)
class sync_block(gateway_block):
def __init__(self, name, in_sig, out_sig):
gateway_block.__init__(self,
name=name,
in_sig=in_sig,
out_sig=out_sig,
work_type=gr.GR_BLOCK_GW_WORK_SYNC,
factor=1,
)
class decim_block(gateway_block):
def __init__(self, name, in_sig, out_sig, decim):
gateway_block.__init__(self,
name=name,
in_sig=in_sig,
out_sig=out_sig,
work_type=gr.GR_BLOCK_GW_WORK_DECIM,
factor=decim,
)
class interp_block(gateway_block):
def __init__(self, name, in_sig, out_sig, interp):
gateway_block.__init__(self,
name=name,
in_sig=in_sig,
out_sig=out_sig,
work_type=gr.GR_BLOCK_GW_WORK_INTERP,
factor=interp,
)
|
mytliulei/Scapy
|
refs/heads/master
|
scapy/contrib/ppi_cace.py
|
19
|
## This file is (hopefully) part of Scapy
## See http://www.secdev.org/projects/scapy for more informations
## <jellch@harris.com>
## This program is published under a GPLv2 license
# scapy.contrib.description = PPI CACE
# scapy.contrib.status = loads
"""
CACE PPI types
"""
import logging,struct
from scapy.config import conf
from scapy.packet import *
from scapy.fields import *
from scapy.layers.l2 import Ether
from scapy.layers.dot11 import Dot11
from scapy.contrib.ppi import *
PPI_DOT11COMMON = 2
PPI_DOT11NMAC = 3
PPI_DOT11NMACPHY = 4
PPI_SPECTRUMMAP = 5
PPI_PROCESSINFO = 6
PPI_CAPTUREINFO = 7
PPI_AGGREGATION = 8
PPI_DOT3 = 9
# PPI 802.11 Common Field Header Fields
class dBmByteField(Field):
def __init__(self, name, default):
Field.__init__(self, name, default, "b")
def i2repr(self, pkt, val):
if (val != None):
val = "%4d dBm" % val
return val
class PPITSFTField(LELongField):
def i2h(self, pkt, val):
flags = 0
if (pkt):
flags = pkt.getfieldval("Pkt_Flags")
if not flags:
flags = 0
if (flags & 0x02):
scale = 1e-3
else:
scale = 1e-6
tout = scale * float(val)
return tout
def h2i(self, pkt, val):
scale = 1e6
if pkt:
flags = pkt.getfieldval("Pkt_Flags")
if flags:
if (flags & 0x02):
scale = 1e3
tout = int((scale * val) + 0.5)
return tout
_PPIDot11CommonChFlags = ['','','','','Turbo','CCK','OFDM','2GHz','5GHz',
'PassiveOnly','Dynamic CCK-OFDM','GSFK']
_PPIDot11CommonPktFlags = ['FCS','TSFT_ms','FCS_Invalid','PHY_Error']
# PPI 802.11 Common Field Header
class Dot11Common(Packet):
name = "PPI 802.11-Common"
fields_desc = [ LEShortField('pfh_type',PPI_DOT11COMMON),
LEShortField('pfh_length', 20),
PPITSFTField('TSF_Timer', 0),
FlagsField('Pkt_Flags',0, -16, _PPIDot11CommonPktFlags),
LEShortField('Rate',0),
LEShortField('Ch_Freq',0),
FlagsField('Ch_Flags', 0, -16, _PPIDot11CommonChFlags),
ByteField('FHSS_Hop',0),
ByteField('FHSS_Pat',0),
dBmByteField('Antsignal',-128),
dBmByteField('Antnoise',-128)]
def extract_padding(self, p):
return "",p
#Hopefully other CACE defined types will be added here.
#Add the dot11common layer to the PPI array
addPPIType(PPI_DOT11COMMON, Dot11Common)
|
Solthis/Fugen-2.0
|
refs/heads/master
|
template_processor/array_template_processor.py
|
1
|
# coding: utf-8
# Copyright 2017 Solthis.
#
# This file is part of Fugen 2.0.
#
# Fugen 2.0 is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Fugen 2.0 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Fugen 2.0. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
from template_processor.base_template_processor import BaseTemplateProcessor
class ArrayTemplateProcessor(BaseTemplateProcessor):
def __init__(self, array, fuchia_database):
super(ArrayTemplateProcessor, self).__init__(fuchia_database)
self.array = array
def get_row_number(self):
if isinstance(self.array, np.ndarray):
return self.array.shape[0]
return len(self.array)
def get_column_number(self):
if isinstance(self.array, np.ndarray):
return self.array.shape[1]
if len(self.array) == 0:
return 0
return len(self.array)
def get_cell_content(self, i, j):
if isinstance(self.array, np.ndarray):
return self.array[i, j]
return self.array[i][j]
|
idahu29/payment-redis
|
refs/heads/master
|
test.py
|
12
|
import pytest
from basedir import basedir
import os
import shutil
import sys
def main():
argv = []
argv.extend(sys.argv[1:])
pytest.main(argv)
try:
os.remove(os.path.join(basedir, '.coverage'))
except OSError:
pass
try:
shutil.rmtree(os.path.join(basedir, '.cache'))
except OSError:
pass
try:
shutil.rmtree(os.path.join(basedir, 'tests/.cache'))
except OSError:
pass
if __name__ == '__main__':
main()
|
riftstation/riftstation
|
refs/heads/master
|
tools/travis/template_dm_generator.py
|
132
|
#!/usr/bin/env python
import os
import os.path
import sys
folders = ["_maps/RandomRuins", "_maps/RandomZLevels", "_maps/shuttles",
"_maps/templates"]
generated = "_maps/templates.dm"
template_filenames = []
def find_dm(path):
L = []
for dirpath, dirnames, filenames in os.walk(path):
for name in filenames:
if name.endswith(".dmm"):
s = os.path.join(dirpath, name)
s = s.replace("_maps/","")
L.append(s)
return L
for folder in folders:
template_filenames.extend(find_dm(folder))
with open(generated, 'w') as f:
for template in template_filenames:
f.write('''#include "{}"\n'''.format(template))
|
odootr/odoo
|
refs/heads/8.0
|
addons/hr_timesheet_sheet/report/__init__.py
|
342
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import hr_timesheet_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
consulo/consulo-python
|
refs/heads/master
|
plugin/src/test/resources/refactoring/changeSignature/classMethod.after.py
|
73
|
class A:
def fo<caret>o(self, a, b):
pass
class B(A):
def foo(self, a, b):
pass
class С(A):
def foo(self, a, b):
pass
a = A()
a.foo(1, 2)
b = B()
b.foo(2, 2)
|
snnn/tensorflow
|
refs/heads/master
|
tensorflow/contrib/eager/python/examples/linear_regression/linear_regression_test.py
|
39
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit tests for linear regression example under TensorFlow eager execution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import os
import shutil
import tempfile
import time
import tensorflow as tf
import tensorflow.contrib.eager as tfe
from tensorflow.contrib.eager.python.examples.linear_regression import linear_regression
def device():
return "/device:GPU:0" if tfe.num_gpus() > 0 else "/device:CPU:0"
class LinearRegressionTest(tf.test.TestCase):
def setUp(self):
super(LinearRegressionTest, self).setUp()
self._tmp_logdir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self._tmp_logdir)
super(LinearRegressionTest, self).tearDown()
def testSyntheticDataset(self):
true_w = tf.random_uniform([3, 1])
true_b = [1.0]
batch_size = 10
num_batches = 2
noise_level = 0.
dataset = linear_regression.synthetic_dataset(true_w, true_b, noise_level,
batch_size, num_batches)
it = tfe.Iterator(dataset)
for _ in range(2):
(xs, ys) = it.next()
self.assertEqual((batch_size, 3), xs.shape)
self.assertEqual((batch_size, 1), ys.shape)
self.assertEqual(tf.float32, xs.dtype)
self.assertEqual(tf.float32, ys.dtype)
with self.assertRaises(StopIteration):
it.next()
def testLinearRegression(self):
true_w = [[1.0], [-0.5], [2.0]]
true_b = [1.0]
model = linear_regression.LinearModel()
dataset = linear_regression.synthetic_dataset(
true_w, true_b, noise_level=0., batch_size=64, num_batches=40)
with tf.device(device()):
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1)
linear_regression.fit(model, dataset, optimizer, logdir=self._tmp_logdir)
self.assertAllClose(true_w, model.variables[0].numpy(), rtol=1e-2)
self.assertAllClose(true_b, model.variables[1].numpy(), rtol=1e-2)
self.assertTrue(glob.glob(os.path.join(self._tmp_logdir, "events.out.*")))
class EagerLinearRegressionBenchmark(tf.test.Benchmark):
def benchmarkEagerLinearRegression(self):
num_epochs = 10
num_batches = 200
batch_size = 64
dataset = linear_regression.synthetic_dataset(
w=tf.random_uniform([3, 1]),
b=tf.random_uniform([1]),
noise_level=0.01,
batch_size=batch_size,
num_batches=num_batches)
burn_in_dataset = dataset.take(10)
model = linear_regression.LinearModel()
with tf.device(device()):
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1)
# Perform burn-in.
linear_regression.fit(model, burn_in_dataset, optimizer)
start_time = time.time()
for _ in range(num_epochs):
linear_regression.fit(model, dataset, optimizer)
wall_time = time.time() - start_time
examples_per_sec = num_epochs * num_batches * batch_size / wall_time
self.report_benchmark(
name="eager_train_%s" %
("gpu" if tfe.num_gpus() > 0 else "cpu"),
iters=num_epochs * num_batches,
extras={"examples_per_sec": examples_per_sec},
wall_time=wall_time)
if __name__ == "__main__":
tf.enable_eager_execution()
tf.test.main()
|
yize/grunt-tps
|
refs/heads/master
|
tasks/lib/python/Lib/python2.7/email/test/test_email.py
|
54
|
# Copyright (C) 2001-2010 Python Software Foundation
# Contact: email-sig@python.org
# email package unit tests
import os
import sys
import time
import base64
import difflib
import unittest
import warnings
import textwrap
from cStringIO import StringIO
import email
from email.Charset import Charset
from email.Header import Header, decode_header, make_header
from email.Parser import Parser, HeaderParser
from email.Generator import Generator, DecodedGenerator
from email.Message import Message
from email.MIMEAudio import MIMEAudio
from email.MIMEText import MIMEText
from email.MIMEImage import MIMEImage
from email.MIMEBase import MIMEBase
from email.MIMEMessage import MIMEMessage
from email.MIMEMultipart import MIMEMultipart
from email import Utils
from email import Errors
from email import Encoders
from email import Iterators
from email import base64MIME
from email import quopriMIME
from test.test_support import findfile, run_unittest
from email.test import __file__ as landmark
NL = '\n'
EMPTYSTRING = ''
SPACE = ' '
def openfile(filename, mode='r'):
path = os.path.join(os.path.dirname(landmark), 'data', filename)
return open(path, mode)
# Base test class
class TestEmailBase(unittest.TestCase):
def ndiffAssertEqual(self, first, second):
"""Like assertEqual except use ndiff for readable output."""
if first != second:
sfirst = str(first)
ssecond = str(second)
diff = difflib.ndiff(sfirst.splitlines(), ssecond.splitlines())
fp = StringIO()
print >> fp, NL, NL.join(diff)
raise self.failureException, fp.getvalue()
def _msgobj(self, filename):
fp = openfile(findfile(filename))
try:
msg = email.message_from_file(fp)
finally:
fp.close()
return msg
# Test various aspects of the Message class's API
class TestMessageAPI(TestEmailBase):
def test_get_all(self):
eq = self.assertEqual
msg = self._msgobj('msg_20.txt')
eq(msg.get_all('cc'), ['ccc@zzz.org', 'ddd@zzz.org', 'eee@zzz.org'])
eq(msg.get_all('xx', 'n/a'), 'n/a')
def test_getset_charset(self):
eq = self.assertEqual
msg = Message()
eq(msg.get_charset(), None)
charset = Charset('iso-8859-1')
msg.set_charset(charset)
eq(msg['mime-version'], '1.0')
eq(msg.get_content_type(), 'text/plain')
eq(msg['content-type'], 'text/plain; charset="iso-8859-1"')
eq(msg.get_param('charset'), 'iso-8859-1')
eq(msg['content-transfer-encoding'], 'quoted-printable')
eq(msg.get_charset().input_charset, 'iso-8859-1')
# Remove the charset
msg.set_charset(None)
eq(msg.get_charset(), None)
eq(msg['content-type'], 'text/plain')
# Try adding a charset when there's already MIME headers present
msg = Message()
msg['MIME-Version'] = '2.0'
msg['Content-Type'] = 'text/x-weird'
msg['Content-Transfer-Encoding'] = 'quinted-puntable'
msg.set_charset(charset)
eq(msg['mime-version'], '2.0')
eq(msg['content-type'], 'text/x-weird; charset="iso-8859-1"')
eq(msg['content-transfer-encoding'], 'quinted-puntable')
def test_set_charset_from_string(self):
eq = self.assertEqual
msg = Message()
msg.set_charset('us-ascii')
eq(msg.get_charset().input_charset, 'us-ascii')
eq(msg['content-type'], 'text/plain; charset="us-ascii"')
def test_set_payload_with_charset(self):
msg = Message()
charset = Charset('iso-8859-1')
msg.set_payload('This is a string payload', charset)
self.assertEqual(msg.get_charset().input_charset, 'iso-8859-1')
def test_get_charsets(self):
eq = self.assertEqual
msg = self._msgobj('msg_08.txt')
charsets = msg.get_charsets()
eq(charsets, [None, 'us-ascii', 'iso-8859-1', 'iso-8859-2', 'koi8-r'])
msg = self._msgobj('msg_09.txt')
charsets = msg.get_charsets('dingbat')
eq(charsets, ['dingbat', 'us-ascii', 'iso-8859-1', 'dingbat',
'koi8-r'])
msg = self._msgobj('msg_12.txt')
charsets = msg.get_charsets()
eq(charsets, [None, 'us-ascii', 'iso-8859-1', None, 'iso-8859-2',
'iso-8859-3', 'us-ascii', 'koi8-r'])
def test_get_filename(self):
eq = self.assertEqual
msg = self._msgobj('msg_04.txt')
filenames = [p.get_filename() for p in msg.get_payload()]
eq(filenames, ['msg.txt', 'msg.txt'])
msg = self._msgobj('msg_07.txt')
subpart = msg.get_payload(1)
eq(subpart.get_filename(), 'dingusfish.gif')
def test_get_filename_with_name_parameter(self):
eq = self.assertEqual
msg = self._msgobj('msg_44.txt')
filenames = [p.get_filename() for p in msg.get_payload()]
eq(filenames, ['msg.txt', 'msg.txt'])
def test_get_boundary(self):
eq = self.assertEqual
msg = self._msgobj('msg_07.txt')
# No quotes!
eq(msg.get_boundary(), 'BOUNDARY')
def test_set_boundary(self):
eq = self.assertEqual
# This one has no existing boundary parameter, but the Content-Type:
# header appears fifth.
msg = self._msgobj('msg_01.txt')
msg.set_boundary('BOUNDARY')
header, value = msg.items()[4]
eq(header.lower(), 'content-type')
eq(value, 'text/plain; charset="us-ascii"; boundary="BOUNDARY"')
# This one has a Content-Type: header, with a boundary, stuck in the
# middle of its headers. Make sure the order is preserved; it should
# be fifth.
msg = self._msgobj('msg_04.txt')
msg.set_boundary('BOUNDARY')
header, value = msg.items()[4]
eq(header.lower(), 'content-type')
eq(value, 'multipart/mixed; boundary="BOUNDARY"')
# And this one has no Content-Type: header at all.
msg = self._msgobj('msg_03.txt')
self.assertRaises(Errors.HeaderParseError,
msg.set_boundary, 'BOUNDARY')
def test_make_boundary(self):
msg = MIMEMultipart('form-data')
# Note that when the boundary gets created is an implementation
# detail and might change.
self.assertEqual(msg.items()[0][1], 'multipart/form-data')
# Trigger creation of boundary
msg.as_string()
self.assertEqual(msg.items()[0][1][:33],
'multipart/form-data; boundary="==')
# XXX: there ought to be tests of the uniqueness of the boundary, too.
def test_message_rfc822_only(self):
# Issue 7970: message/rfc822 not in multipart parsed by
# HeaderParser caused an exception when flattened.
fp = openfile(findfile('msg_46.txt'))
msgdata = fp.read()
parser = email.Parser.HeaderParser()
msg = parser.parsestr(msgdata)
out = StringIO()
gen = email.Generator.Generator(out, True, 0)
gen.flatten(msg, False)
self.assertEqual(out.getvalue(), msgdata)
def test_get_decoded_payload(self):
eq = self.assertEqual
msg = self._msgobj('msg_10.txt')
# The outer message is a multipart
eq(msg.get_payload(decode=True), None)
# Subpart 1 is 7bit encoded
eq(msg.get_payload(0).get_payload(decode=True),
'This is a 7bit encoded message.\n')
# Subpart 2 is quopri
eq(msg.get_payload(1).get_payload(decode=True),
'\xa1This is a Quoted Printable encoded message!\n')
# Subpart 3 is base64
eq(msg.get_payload(2).get_payload(decode=True),
'This is a Base64 encoded message.')
# Subpart 4 is base64 with a trailing newline, which
# used to be stripped (issue 7143).
eq(msg.get_payload(3).get_payload(decode=True),
'This is a Base64 encoded message.\n')
# Subpart 5 has no Content-Transfer-Encoding: header.
eq(msg.get_payload(4).get_payload(decode=True),
'This has no Content-Transfer-Encoding: header.\n')
def test_get_decoded_uu_payload(self):
eq = self.assertEqual
msg = Message()
msg.set_payload('begin 666 -\n+:&5L;&\\@=V]R;&0 \n \nend\n')
for cte in ('x-uuencode', 'uuencode', 'uue', 'x-uue'):
msg['content-transfer-encoding'] = cte
eq(msg.get_payload(decode=True), 'hello world')
# Now try some bogus data
msg.set_payload('foo')
eq(msg.get_payload(decode=True), 'foo')
def test_decode_bogus_uu_payload_quietly(self):
msg = Message()
msg.set_payload('begin 664 foo.txt\n%<W1F=0000H \n \nend\n')
msg['Content-Transfer-Encoding'] = 'x-uuencode'
old_stderr = sys.stderr
try:
sys.stderr = sfp = StringIO()
# We don't care about the payload
msg.get_payload(decode=True)
finally:
sys.stderr = old_stderr
self.assertEqual(sfp.getvalue(), '')
def test_decoded_generator(self):
eq = self.assertEqual
msg = self._msgobj('msg_07.txt')
fp = openfile('msg_17.txt')
try:
text = fp.read()
finally:
fp.close()
s = StringIO()
g = DecodedGenerator(s)
g.flatten(msg)
eq(s.getvalue(), text)
def test__contains__(self):
msg = Message()
msg['From'] = 'Me'
msg['to'] = 'You'
# Check for case insensitivity
self.assertTrue('from' in msg)
self.assertTrue('From' in msg)
self.assertTrue('FROM' in msg)
self.assertTrue('to' in msg)
self.assertTrue('To' in msg)
self.assertTrue('TO' in msg)
def test_as_string(self):
eq = self.assertEqual
msg = self._msgobj('msg_01.txt')
fp = openfile('msg_01.txt')
try:
# BAW 30-Mar-2009 Evil be here. So, the generator is broken with
# respect to long line breaking. It's also not idempotent when a
# header from a parsed message is continued with tabs rather than
# spaces. Before we fixed bug 1974 it was reversedly broken,
# i.e. headers that were continued with spaces got continued with
# tabs. For Python 2.x there's really no good fix and in Python
# 3.x all this stuff is re-written to be right(er). Chris Withers
# convinced me that using space as the default continuation
# character is less bad for more applications.
text = fp.read().replace('\t', ' ')
finally:
fp.close()
eq(text, msg.as_string())
fullrepr = str(msg)
lines = fullrepr.split('\n')
self.assertTrue(lines[0].startswith('From '))
eq(text, NL.join(lines[1:]))
def test_bad_param(self):
msg = email.message_from_string("Content-Type: blarg; baz; boo\n")
self.assertEqual(msg.get_param('baz'), '')
def test_missing_filename(self):
msg = email.message_from_string("From: foo\n")
self.assertEqual(msg.get_filename(), None)
def test_bogus_filename(self):
msg = email.message_from_string(
"Content-Disposition: blarg; filename\n")
self.assertEqual(msg.get_filename(), '')
def test_missing_boundary(self):
msg = email.message_from_string("From: foo\n")
self.assertEqual(msg.get_boundary(), None)
def test_get_params(self):
eq = self.assertEqual
msg = email.message_from_string(
'X-Header: foo=one; bar=two; baz=three\n')
eq(msg.get_params(header='x-header'),
[('foo', 'one'), ('bar', 'two'), ('baz', 'three')])
msg = email.message_from_string(
'X-Header: foo; bar=one; baz=two\n')
eq(msg.get_params(header='x-header'),
[('foo', ''), ('bar', 'one'), ('baz', 'two')])
eq(msg.get_params(), None)
msg = email.message_from_string(
'X-Header: foo; bar="one"; baz=two\n')
eq(msg.get_params(header='x-header'),
[('foo', ''), ('bar', 'one'), ('baz', 'two')])
def test_get_param_liberal(self):
msg = Message()
msg['Content-Type'] = 'Content-Type: Multipart/mixed; boundary = "CPIMSSMTPC06p5f3tG"'
self.assertEqual(msg.get_param('boundary'), 'CPIMSSMTPC06p5f3tG')
def test_get_param(self):
eq = self.assertEqual
msg = email.message_from_string(
"X-Header: foo=one; bar=two; baz=three\n")
eq(msg.get_param('bar', header='x-header'), 'two')
eq(msg.get_param('quuz', header='x-header'), None)
eq(msg.get_param('quuz'), None)
msg = email.message_from_string(
'X-Header: foo; bar="one"; baz=two\n')
eq(msg.get_param('foo', header='x-header'), '')
eq(msg.get_param('bar', header='x-header'), 'one')
eq(msg.get_param('baz', header='x-header'), 'two')
# XXX: We are not RFC-2045 compliant! We cannot parse:
# msg["Content-Type"] = 'text/plain; weird="hey; dolly? [you] @ <\\"home\\">?"'
# msg.get_param("weird")
# yet.
def test_get_param_funky_continuation_lines(self):
msg = self._msgobj('msg_22.txt')
self.assertEqual(msg.get_payload(1).get_param('name'), 'wibble.JPG')
def test_get_param_with_semis_in_quotes(self):
msg = email.message_from_string(
'Content-Type: image/pjpeg; name="Jim&&Jill"\n')
self.assertEqual(msg.get_param('name'), 'Jim&&Jill')
self.assertEqual(msg.get_param('name', unquote=False),
'"Jim&&Jill"')
def test_get_param_with_quotes(self):
msg = email.message_from_string(
'Content-Type: foo; bar*0="baz\\"foobar"; bar*1="\\"baz"')
self.assertEqual(msg.get_param('bar'), 'baz"foobar"baz')
msg = email.message_from_string(
"Content-Type: foo; bar*0=\"baz\\\"foobar\"; bar*1=\"\\\"baz\"")
self.assertEqual(msg.get_param('bar'), 'baz"foobar"baz')
def test_has_key(self):
msg = email.message_from_string('Header: exists')
self.assertTrue(msg.has_key('header'))
self.assertTrue(msg.has_key('Header'))
self.assertTrue(msg.has_key('HEADER'))
self.assertFalse(msg.has_key('headeri'))
def test_set_param(self):
eq = self.assertEqual
msg = Message()
msg.set_param('charset', 'iso-2022-jp')
eq(msg.get_param('charset'), 'iso-2022-jp')
msg.set_param('importance', 'high value')
eq(msg.get_param('importance'), 'high value')
eq(msg.get_param('importance', unquote=False), '"high value"')
eq(msg.get_params(), [('text/plain', ''),
('charset', 'iso-2022-jp'),
('importance', 'high value')])
eq(msg.get_params(unquote=False), [('text/plain', ''),
('charset', '"iso-2022-jp"'),
('importance', '"high value"')])
msg.set_param('charset', 'iso-9999-xx', header='X-Jimmy')
eq(msg.get_param('charset', header='X-Jimmy'), 'iso-9999-xx')
def test_del_param(self):
eq = self.assertEqual
msg = self._msgobj('msg_05.txt')
eq(msg.get_params(),
[('multipart/report', ''), ('report-type', 'delivery-status'),
('boundary', 'D1690A7AC1.996856090/mail.example.com')])
old_val = msg.get_param("report-type")
msg.del_param("report-type")
eq(msg.get_params(),
[('multipart/report', ''),
('boundary', 'D1690A7AC1.996856090/mail.example.com')])
msg.set_param("report-type", old_val)
eq(msg.get_params(),
[('multipart/report', ''),
('boundary', 'D1690A7AC1.996856090/mail.example.com'),
('report-type', old_val)])
def test_del_param_on_other_header(self):
msg = Message()
msg.add_header('Content-Disposition', 'attachment', filename='bud.gif')
msg.del_param('filename', 'content-disposition')
self.assertEqual(msg['content-disposition'], 'attachment')
def test_set_type(self):
eq = self.assertEqual
msg = Message()
self.assertRaises(ValueError, msg.set_type, 'text')
msg.set_type('text/plain')
eq(msg['content-type'], 'text/plain')
msg.set_param('charset', 'us-ascii')
eq(msg['content-type'], 'text/plain; charset="us-ascii"')
msg.set_type('text/html')
eq(msg['content-type'], 'text/html; charset="us-ascii"')
def test_set_type_on_other_header(self):
msg = Message()
msg['X-Content-Type'] = 'text/plain'
msg.set_type('application/octet-stream', 'X-Content-Type')
self.assertEqual(msg['x-content-type'], 'application/octet-stream')
def test_get_content_type_missing(self):
msg = Message()
self.assertEqual(msg.get_content_type(), 'text/plain')
def test_get_content_type_missing_with_default_type(self):
msg = Message()
msg.set_default_type('message/rfc822')
self.assertEqual(msg.get_content_type(), 'message/rfc822')
def test_get_content_type_from_message_implicit(self):
msg = self._msgobj('msg_30.txt')
self.assertEqual(msg.get_payload(0).get_content_type(),
'message/rfc822')
def test_get_content_type_from_message_explicit(self):
msg = self._msgobj('msg_28.txt')
self.assertEqual(msg.get_payload(0).get_content_type(),
'message/rfc822')
def test_get_content_type_from_message_text_plain_implicit(self):
msg = self._msgobj('msg_03.txt')
self.assertEqual(msg.get_content_type(), 'text/plain')
def test_get_content_type_from_message_text_plain_explicit(self):
msg = self._msgobj('msg_01.txt')
self.assertEqual(msg.get_content_type(), 'text/plain')
def test_get_content_maintype_missing(self):
msg = Message()
self.assertEqual(msg.get_content_maintype(), 'text')
def test_get_content_maintype_missing_with_default_type(self):
msg = Message()
msg.set_default_type('message/rfc822')
self.assertEqual(msg.get_content_maintype(), 'message')
def test_get_content_maintype_from_message_implicit(self):
msg = self._msgobj('msg_30.txt')
self.assertEqual(msg.get_payload(0).get_content_maintype(), 'message')
def test_get_content_maintype_from_message_explicit(self):
msg = self._msgobj('msg_28.txt')
self.assertEqual(msg.get_payload(0).get_content_maintype(), 'message')
def test_get_content_maintype_from_message_text_plain_implicit(self):
msg = self._msgobj('msg_03.txt')
self.assertEqual(msg.get_content_maintype(), 'text')
def test_get_content_maintype_from_message_text_plain_explicit(self):
msg = self._msgobj('msg_01.txt')
self.assertEqual(msg.get_content_maintype(), 'text')
def test_get_content_subtype_missing(self):
msg = Message()
self.assertEqual(msg.get_content_subtype(), 'plain')
def test_get_content_subtype_missing_with_default_type(self):
msg = Message()
msg.set_default_type('message/rfc822')
self.assertEqual(msg.get_content_subtype(), 'rfc822')
def test_get_content_subtype_from_message_implicit(self):
msg = self._msgobj('msg_30.txt')
self.assertEqual(msg.get_payload(0).get_content_subtype(), 'rfc822')
def test_get_content_subtype_from_message_explicit(self):
msg = self._msgobj('msg_28.txt')
self.assertEqual(msg.get_payload(0).get_content_subtype(), 'rfc822')
def test_get_content_subtype_from_message_text_plain_implicit(self):
msg = self._msgobj('msg_03.txt')
self.assertEqual(msg.get_content_subtype(), 'plain')
def test_get_content_subtype_from_message_text_plain_explicit(self):
msg = self._msgobj('msg_01.txt')
self.assertEqual(msg.get_content_subtype(), 'plain')
def test_get_content_maintype_error(self):
msg = Message()
msg['Content-Type'] = 'no-slash-in-this-string'
self.assertEqual(msg.get_content_maintype(), 'text')
def test_get_content_subtype_error(self):
msg = Message()
msg['Content-Type'] = 'no-slash-in-this-string'
self.assertEqual(msg.get_content_subtype(), 'plain')
def test_replace_header(self):
eq = self.assertEqual
msg = Message()
msg.add_header('First', 'One')
msg.add_header('Second', 'Two')
msg.add_header('Third', 'Three')
eq(msg.keys(), ['First', 'Second', 'Third'])
eq(msg.values(), ['One', 'Two', 'Three'])
msg.replace_header('Second', 'Twenty')
eq(msg.keys(), ['First', 'Second', 'Third'])
eq(msg.values(), ['One', 'Twenty', 'Three'])
msg.add_header('First', 'Eleven')
msg.replace_header('First', 'One Hundred')
eq(msg.keys(), ['First', 'Second', 'Third', 'First'])
eq(msg.values(), ['One Hundred', 'Twenty', 'Three', 'Eleven'])
self.assertRaises(KeyError, msg.replace_header, 'Fourth', 'Missing')
def test_broken_base64_payload(self):
x = 'AwDp0P7//y6LwKEAcPa/6Q=9'
msg = Message()
msg['content-type'] = 'audio/x-midi'
msg['content-transfer-encoding'] = 'base64'
msg.set_payload(x)
self.assertEqual(msg.get_payload(decode=True), x)
def test_get_content_charset(self):
msg = Message()
msg.set_charset('us-ascii')
self.assertEqual('us-ascii', msg.get_content_charset())
msg.set_charset(u'us-ascii')
self.assertEqual('us-ascii', msg.get_content_charset())
# Issue 5871: reject an attempt to embed a header inside a header value
# (header injection attack).
def test_embeded_header_via_Header_rejected(self):
msg = Message()
msg['Dummy'] = Header('dummy\nX-Injected-Header: test')
self.assertRaises(Errors.HeaderParseError, msg.as_string)
def test_embeded_header_via_string_rejected(self):
msg = Message()
msg['Dummy'] = 'dummy\nX-Injected-Header: test'
self.assertRaises(Errors.HeaderParseError, msg.as_string)
# Test the email.Encoders module
class TestEncoders(unittest.TestCase):
def test_encode_empty_payload(self):
eq = self.assertEqual
msg = Message()
msg.set_charset('us-ascii')
eq(msg['content-transfer-encoding'], '7bit')
def test_default_cte(self):
eq = self.assertEqual
# 7bit data and the default us-ascii _charset
msg = MIMEText('hello world')
eq(msg['content-transfer-encoding'], '7bit')
# Similar, but with 8bit data
msg = MIMEText('hello \xf8 world')
eq(msg['content-transfer-encoding'], '8bit')
# And now with a different charset
msg = MIMEText('hello \xf8 world', _charset='iso-8859-1')
eq(msg['content-transfer-encoding'], 'quoted-printable')
def test_encode7or8bit(self):
# Make sure a charset whose input character set is 8bit but
# whose output character set is 7bit gets a transfer-encoding
# of 7bit.
eq = self.assertEqual
msg = email.MIMEText.MIMEText('\xca\xb8', _charset='euc-jp')
eq(msg['content-transfer-encoding'], '7bit')
# Test long header wrapping
class TestLongHeaders(TestEmailBase):
def test_split_long_continuation(self):
eq = self.ndiffAssertEqual
msg = email.message_from_string("""\
Subject: bug demonstration
\t12345678911234567892123456789312345678941234567895123456789612345678971234567898112345678911234567892123456789112345678911234567892123456789
\tmore text
test
""")
sfp = StringIO()
g = Generator(sfp)
g.flatten(msg)
eq(sfp.getvalue(), """\
Subject: bug demonstration
12345678911234567892123456789312345678941234567895123456789612345678971234567898112345678911234567892123456789112345678911234567892123456789
more text
test
""")
def test_another_long_almost_unsplittable_header(self):
eq = self.ndiffAssertEqual
hstr = """\
bug demonstration
\t12345678911234567892123456789312345678941234567895123456789612345678971234567898112345678911234567892123456789112345678911234567892123456789
\tmore text"""
h = Header(hstr, continuation_ws='\t')
eq(h.encode(), """\
bug demonstration
\t12345678911234567892123456789312345678941234567895123456789612345678971234567898112345678911234567892123456789112345678911234567892123456789
\tmore text""")
h = Header(hstr)
eq(h.encode(), """\
bug demonstration
12345678911234567892123456789312345678941234567895123456789612345678971234567898112345678911234567892123456789112345678911234567892123456789
more text""")
def test_long_nonstring(self):
eq = self.ndiffAssertEqual
g = Charset("iso-8859-1")
cz = Charset("iso-8859-2")
utf8 = Charset("utf-8")
g_head = "Die Mieter treten hier ein werden mit einem Foerderband komfortabel den Korridor entlang, an s\xfcdl\xfcndischen Wandgem\xe4lden vorbei, gegen die rotierenden Klingen bef\xf6rdert. "
cz_head = "Finan\xe8ni metropole se hroutily pod tlakem jejich d\xf9vtipu.. "
utf8_head = u"\u6b63\u78ba\u306b\u8a00\u3046\u3068\u7ffb\u8a33\u306f\u3055\u308c\u3066\u3044\u307e\u305b\u3093\u3002\u4e00\u90e8\u306f\u30c9\u30a4\u30c4\u8a9e\u3067\u3059\u304c\u3001\u3042\u3068\u306f\u3067\u305f\u3089\u3081\u3067\u3059\u3002\u5b9f\u969b\u306b\u306f\u300cWenn ist das Nunstuck git und Slotermeyer? Ja! Beiherhund das Oder die Flipperwaldt gersput.\u300d\u3068\u8a00\u3063\u3066\u3044\u307e\u3059\u3002".encode("utf-8")
h = Header(g_head, g, header_name='Subject')
h.append(cz_head, cz)
h.append(utf8_head, utf8)
msg = Message()
msg['Subject'] = h
sfp = StringIO()
g = Generator(sfp)
g.flatten(msg)
eq(sfp.getvalue(), """\
Subject: =?iso-8859-1?q?Die_Mieter_treten_hier_ein_werden_mit_einem_Foerd?=
=?iso-8859-1?q?erband_komfortabel_den_Korridor_entlang=2C_an_s=FCdl=FCndi?=
=?iso-8859-1?q?schen_Wandgem=E4lden_vorbei=2C_gegen_die_rotierenden_Kling?=
=?iso-8859-1?q?en_bef=F6rdert=2E_?= =?iso-8859-2?q?Finan=E8ni_met?=
=?iso-8859-2?q?ropole_se_hroutily_pod_tlakem_jejich_d=F9vtipu=2E=2E_?=
=?utf-8?b?5q2j56K644Gr6KiA44GG44Go57+76Kiz44Gv44GV44KM44Gm44GE?=
=?utf-8?b?44G+44Gb44KT44CC5LiA6YOo44Gv44OJ44Kk44OE6Kqe44Gn44GZ44GM44CB?=
=?utf-8?b?44GC44Go44Gv44Gn44Gf44KJ44KB44Gn44GZ44CC5a6f6Zqb44Gr44Gv44CM?=
=?utf-8?q?Wenn_ist_das_Nunstuck_git_und_Slotermeyer=3F_Ja!_Beiherhund_das?=
=?utf-8?b?IE9kZXIgZGllIEZsaXBwZXJ3YWxkdCBnZXJzcHV0LuOAjeOBqOiogOOBow==?=
=?utf-8?b?44Gm44GE44G+44GZ44CC?=
""")
eq(h.encode(), """\
=?iso-8859-1?q?Die_Mieter_treten_hier_ein_werden_mit_einem_Foerd?=
=?iso-8859-1?q?erband_komfortabel_den_Korridor_entlang=2C_an_s=FCdl=FCndi?=
=?iso-8859-1?q?schen_Wandgem=E4lden_vorbei=2C_gegen_die_rotierenden_Kling?=
=?iso-8859-1?q?en_bef=F6rdert=2E_?= =?iso-8859-2?q?Finan=E8ni_met?=
=?iso-8859-2?q?ropole_se_hroutily_pod_tlakem_jejich_d=F9vtipu=2E=2E_?=
=?utf-8?b?5q2j56K644Gr6KiA44GG44Go57+76Kiz44Gv44GV44KM44Gm44GE?=
=?utf-8?b?44G+44Gb44KT44CC5LiA6YOo44Gv44OJ44Kk44OE6Kqe44Gn44GZ44GM44CB?=
=?utf-8?b?44GC44Go44Gv44Gn44Gf44KJ44KB44Gn44GZ44CC5a6f6Zqb44Gr44Gv44CM?=
=?utf-8?q?Wenn_ist_das_Nunstuck_git_und_Slotermeyer=3F_Ja!_Beiherhund_das?=
=?utf-8?b?IE9kZXIgZGllIEZsaXBwZXJ3YWxkdCBnZXJzcHV0LuOAjeOBqOiogOOBow==?=
=?utf-8?b?44Gm44GE44G+44GZ44CC?=""")
def test_long_header_encode(self):
eq = self.ndiffAssertEqual
h = Header('wasnipoop; giraffes="very-long-necked-animals"; '
'spooge="yummy"; hippos="gargantuan"; marshmallows="gooey"',
header_name='X-Foobar-Spoink-Defrobnit')
eq(h.encode(), '''\
wasnipoop; giraffes="very-long-necked-animals";
spooge="yummy"; hippos="gargantuan"; marshmallows="gooey"''')
def test_long_header_encode_with_tab_continuation(self):
eq = self.ndiffAssertEqual
h = Header('wasnipoop; giraffes="very-long-necked-animals"; '
'spooge="yummy"; hippos="gargantuan"; marshmallows="gooey"',
header_name='X-Foobar-Spoink-Defrobnit',
continuation_ws='\t')
eq(h.encode(), '''\
wasnipoop; giraffes="very-long-necked-animals";
\tspooge="yummy"; hippos="gargantuan"; marshmallows="gooey"''')
def test_header_splitter(self):
eq = self.ndiffAssertEqual
msg = MIMEText('')
# It'd be great if we could use add_header() here, but that doesn't
# guarantee an order of the parameters.
msg['X-Foobar-Spoink-Defrobnit'] = (
'wasnipoop; giraffes="very-long-necked-animals"; '
'spooge="yummy"; hippos="gargantuan"; marshmallows="gooey"')
sfp = StringIO()
g = Generator(sfp)
g.flatten(msg)
eq(sfp.getvalue(), '''\
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
X-Foobar-Spoink-Defrobnit: wasnipoop; giraffes="very-long-necked-animals";
spooge="yummy"; hippos="gargantuan"; marshmallows="gooey"
''')
def test_no_semis_header_splitter(self):
eq = self.ndiffAssertEqual
msg = Message()
msg['From'] = 'test@dom.ain'
msg['References'] = SPACE.join(['<%d@dom.ain>' % i for i in range(10)])
msg.set_payload('Test')
sfp = StringIO()
g = Generator(sfp)
g.flatten(msg)
eq(sfp.getvalue(), """\
From: test@dom.ain
References: <0@dom.ain> <1@dom.ain> <2@dom.ain> <3@dom.ain> <4@dom.ain>
<5@dom.ain> <6@dom.ain> <7@dom.ain> <8@dom.ain> <9@dom.ain>
Test""")
def test_no_split_long_header(self):
eq = self.ndiffAssertEqual
hstr = 'References: ' + 'x' * 80
h = Header(hstr, continuation_ws='\t')
eq(h.encode(), """\
References: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx""")
def test_splitting_multiple_long_lines(self):
eq = self.ndiffAssertEqual
hstr = """\
from babylon.socal-raves.org (localhost [127.0.0.1]); by babylon.socal-raves.org (Postfix) with ESMTP id B570E51B81; for <mailman-admin@babylon.socal-raves.org>; Sat, 2 Feb 2002 17:00:06 -0800 (PST)
\tfrom babylon.socal-raves.org (localhost [127.0.0.1]); by babylon.socal-raves.org (Postfix) with ESMTP id B570E51B81; for <mailman-admin@babylon.socal-raves.org>; Sat, 2 Feb 2002 17:00:06 -0800 (PST)
\tfrom babylon.socal-raves.org (localhost [127.0.0.1]); by babylon.socal-raves.org (Postfix) with ESMTP id B570E51B81; for <mailman-admin@babylon.socal-raves.org>; Sat, 2 Feb 2002 17:00:06 -0800 (PST)
"""
h = Header(hstr, continuation_ws='\t')
eq(h.encode(), """\
from babylon.socal-raves.org (localhost [127.0.0.1]);
\tby babylon.socal-raves.org (Postfix) with ESMTP id B570E51B81;
\tfor <mailman-admin@babylon.socal-raves.org>;
\tSat, 2 Feb 2002 17:00:06 -0800 (PST)
\tfrom babylon.socal-raves.org (localhost [127.0.0.1]);
\tby babylon.socal-raves.org (Postfix) with ESMTP id B570E51B81;
\tfor <mailman-admin@babylon.socal-raves.org>;
\tSat, 2 Feb 2002 17:00:06 -0800 (PST)
\tfrom babylon.socal-raves.org (localhost [127.0.0.1]);
\tby babylon.socal-raves.org (Postfix) with ESMTP id B570E51B81;
\tfor <mailman-admin@babylon.socal-raves.org>;
\tSat, 2 Feb 2002 17:00:06 -0800 (PST)""")
def test_splitting_first_line_only_is_long(self):
eq = self.ndiffAssertEqual
hstr = """\
from modemcable093.139-201-24.que.mc.videotron.ca ([24.201.139.93] helo=cthulhu.gerg.ca)
\tby kronos.mems-exchange.org with esmtp (Exim 4.05)
\tid 17k4h5-00034i-00
\tfor test@mems-exchange.org; Wed, 28 Aug 2002 11:25:20 -0400"""
h = Header(hstr, maxlinelen=78, header_name='Received',
continuation_ws='\t')
eq(h.encode(), """\
from modemcable093.139-201-24.que.mc.videotron.ca ([24.201.139.93]
\thelo=cthulhu.gerg.ca)
\tby kronos.mems-exchange.org with esmtp (Exim 4.05)
\tid 17k4h5-00034i-00
\tfor test@mems-exchange.org; Wed, 28 Aug 2002 11:25:20 -0400""")
def test_long_8bit_header(self):
eq = self.ndiffAssertEqual
msg = Message()
h = Header('Britische Regierung gibt', 'iso-8859-1',
header_name='Subject')
h.append('gr\xfcnes Licht f\xfcr Offshore-Windkraftprojekte')
msg['Subject'] = h
eq(msg.as_string(), """\
Subject: =?iso-8859-1?q?Britische_Regierung_gibt?= =?iso-8859-1?q?gr=FCnes?=
=?iso-8859-1?q?_Licht_f=FCr_Offshore-Windkraftprojekte?=
""")
def test_long_8bit_header_no_charset(self):
eq = self.ndiffAssertEqual
msg = Message()
msg['Reply-To'] = 'Britische Regierung gibt gr\xfcnes Licht f\xfcr Offshore-Windkraftprojekte <a-very-long-address@example.com>'
eq(msg.as_string(), """\
Reply-To: Britische Regierung gibt gr\xfcnes Licht f\xfcr Offshore-Windkraftprojekte <a-very-long-address@example.com>
""")
def test_long_to_header(self):
eq = self.ndiffAssertEqual
to = '"Someone Test #A" <someone@eecs.umich.edu>,<someone@eecs.umich.edu>,"Someone Test #B" <someone@umich.edu>, "Someone Test #C" <someone@eecs.umich.edu>, "Someone Test #D" <someone@eecs.umich.edu>'
msg = Message()
msg['To'] = to
eq(msg.as_string(0), '''\
To: "Someone Test #A" <someone@eecs.umich.edu>, <someone@eecs.umich.edu>,
"Someone Test #B" <someone@umich.edu>,
"Someone Test #C" <someone@eecs.umich.edu>,
"Someone Test #D" <someone@eecs.umich.edu>
''')
def test_long_line_after_append(self):
eq = self.ndiffAssertEqual
s = 'This is an example of string which has almost the limit of header length.'
h = Header(s)
h.append('Add another line.')
eq(h.encode(), """\
This is an example of string which has almost the limit of header length.
Add another line.""")
def test_shorter_line_with_append(self):
eq = self.ndiffAssertEqual
s = 'This is a shorter line.'
h = Header(s)
h.append('Add another sentence. (Surprise?)')
eq(h.encode(),
'This is a shorter line. Add another sentence. (Surprise?)')
def test_long_field_name(self):
eq = self.ndiffAssertEqual
fn = 'X-Very-Very-Very-Long-Header-Name'
gs = "Die Mieter treten hier ein werden mit einem Foerderband komfortabel den Korridor entlang, an s\xfcdl\xfcndischen Wandgem\xe4lden vorbei, gegen die rotierenden Klingen bef\xf6rdert. "
h = Header(gs, 'iso-8859-1', header_name=fn)
# BAW: this seems broken because the first line is too long
eq(h.encode(), """\
=?iso-8859-1?q?Die_Mieter_treten_hier_?=
=?iso-8859-1?q?ein_werden_mit_einem_Foerderband_komfortabel_den_Korridor_?=
=?iso-8859-1?q?entlang=2C_an_s=FCdl=FCndischen_Wandgem=E4lden_vorbei=2C_g?=
=?iso-8859-1?q?egen_die_rotierenden_Klingen_bef=F6rdert=2E_?=""")
def test_long_received_header(self):
h = 'from FOO.TLD (vizworld.acl.foo.tld [123.452.678.9]) by hrothgar.la.mastaler.com (tmda-ofmipd) with ESMTP; Wed, 05 Mar 2003 18:10:18 -0700'
msg = Message()
msg['Received-1'] = Header(h, continuation_ws='\t')
msg['Received-2'] = h
self.assertEqual(msg.as_string(), """\
Received-1: from FOO.TLD (vizworld.acl.foo.tld [123.452.678.9]) by
\throthgar.la.mastaler.com (tmda-ofmipd) with ESMTP;
\tWed, 05 Mar 2003 18:10:18 -0700
Received-2: from FOO.TLD (vizworld.acl.foo.tld [123.452.678.9]) by
hrothgar.la.mastaler.com (tmda-ofmipd) with ESMTP;
Wed, 05 Mar 2003 18:10:18 -0700
""")
def test_string_headerinst_eq(self):
h = '<15975.17901.207240.414604@sgigritzmann1.mathematik.tu-muenchen.de> (David Bremner\'s message of "Thu, 6 Mar 2003 13:58:21 +0100")'
msg = Message()
msg['Received'] = Header(h, header_name='Received',
continuation_ws='\t')
msg['Received'] = h
self.ndiffAssertEqual(msg.as_string(), """\
Received: <15975.17901.207240.414604@sgigritzmann1.mathematik.tu-muenchen.de>
\t(David Bremner's message of "Thu, 6 Mar 2003 13:58:21 +0100")
Received: <15975.17901.207240.414604@sgigritzmann1.mathematik.tu-muenchen.de>
(David Bremner's message of "Thu, 6 Mar 2003 13:58:21 +0100")
""")
def test_long_unbreakable_lines_with_continuation(self):
eq = self.ndiffAssertEqual
msg = Message()
t = """\
iVBORw0KGgoAAAANSUhEUgAAADAAAAAwBAMAAAClLOS0AAAAGFBMVEUAAAAkHiJeRUIcGBi9
locQDQ4zJykFBAXJfWDjAAACYUlEQVR4nF2TQY/jIAyFc6lydlG5x8Nyp1Y69wj1PN2I5gzp"""
msg['Face-1'] = t
msg['Face-2'] = Header(t, header_name='Face-2')
eq(msg.as_string(), """\
Face-1: iVBORw0KGgoAAAANSUhEUgAAADAAAAAwBAMAAAClLOS0AAAAGFBMVEUAAAAkHiJeRUIcGBi9
locQDQ4zJykFBAXJfWDjAAACYUlEQVR4nF2TQY/jIAyFc6lydlG5x8Nyp1Y69wj1PN2I5gzp
Face-2: iVBORw0KGgoAAAANSUhEUgAAADAAAAAwBAMAAAClLOS0AAAAGFBMVEUAAAAkHiJeRUIcGBi9
locQDQ4zJykFBAXJfWDjAAACYUlEQVR4nF2TQY/jIAyFc6lydlG5x8Nyp1Y69wj1PN2I5gzp
""")
def test_another_long_multiline_header(self):
eq = self.ndiffAssertEqual
m = '''\
Received: from siimage.com ([172.25.1.3]) by zima.siliconimage.com with Microsoft SMTPSVC(5.0.2195.4905);
Wed, 16 Oct 2002 07:41:11 -0700'''
msg = email.message_from_string(m)
eq(msg.as_string(), '''\
Received: from siimage.com ([172.25.1.3]) by zima.siliconimage.com with
Microsoft SMTPSVC(5.0.2195.4905); Wed, 16 Oct 2002 07:41:11 -0700
''')
def test_long_lines_with_different_header(self):
eq = self.ndiffAssertEqual
h = """\
List-Unsubscribe: <https://lists.sourceforge.net/lists/listinfo/spamassassin-talk>,
<mailto:spamassassin-talk-request@lists.sourceforge.net?subject=unsubscribe>"""
msg = Message()
msg['List'] = h
msg['List'] = Header(h, header_name='List')
eq(msg.as_string(), """\
List: List-Unsubscribe: <https://lists.sourceforge.net/lists/listinfo/spamassassin-talk>,
<mailto:spamassassin-talk-request@lists.sourceforge.net?subject=unsubscribe>
List: List-Unsubscribe: <https://lists.sourceforge.net/lists/listinfo/spamassassin-talk>,
<mailto:spamassassin-talk-request@lists.sourceforge.net?subject=unsubscribe>
""")
# Test mangling of "From " lines in the body of a message
class TestFromMangling(unittest.TestCase):
def setUp(self):
self.msg = Message()
self.msg['From'] = 'aaa@bbb.org'
self.msg.set_payload("""\
From the desk of A.A.A.:
Blah blah blah
""")
def test_mangled_from(self):
s = StringIO()
g = Generator(s, mangle_from_=True)
g.flatten(self.msg)
self.assertEqual(s.getvalue(), """\
From: aaa@bbb.org
>From the desk of A.A.A.:
Blah blah blah
""")
def test_dont_mangle_from(self):
s = StringIO()
g = Generator(s, mangle_from_=False)
g.flatten(self.msg)
self.assertEqual(s.getvalue(), """\
From: aaa@bbb.org
From the desk of A.A.A.:
Blah blah blah
""")
def test_mangle_from_in_preamble_and_epilog(self):
s = StringIO()
g = Generator(s, mangle_from_=True)
msg = email.message_from_string(textwrap.dedent("""\
From: foo@bar.com
Mime-Version: 1.0
Content-Type: multipart/mixed; boundary=XXX
From somewhere unknown
--XXX
Content-Type: text/plain
foo
--XXX--
From somewhere unknowable
"""))
g.flatten(msg)
self.assertEqual(len([1 for x in s.getvalue().split('\n')
if x.startswith('>From ')]), 2)
# Test the basic MIMEAudio class
class TestMIMEAudio(unittest.TestCase):
def setUp(self):
# Make sure we pick up the audiotest.au that lives in email/test/data.
# In Python, there's an audiotest.au living in Lib/test but that isn't
# included in some binary distros that don't include the test
# package. The trailing empty string on the .join() is significant
# since findfile() will do a dirname().
datadir = os.path.join(os.path.dirname(landmark), 'data', '')
fp = open(findfile('audiotest.au', datadir), 'rb')
try:
self._audiodata = fp.read()
finally:
fp.close()
self._au = MIMEAudio(self._audiodata)
def test_guess_minor_type(self):
self.assertEqual(self._au.get_content_type(), 'audio/basic')
def test_encoding(self):
payload = self._au.get_payload()
self.assertEqual(base64.decodestring(payload), self._audiodata)
def test_checkSetMinor(self):
au = MIMEAudio(self._audiodata, 'fish')
self.assertEqual(au.get_content_type(), 'audio/fish')
def test_add_header(self):
eq = self.assertEqual
unless = self.assertTrue
self._au.add_header('Content-Disposition', 'attachment',
filename='audiotest.au')
eq(self._au['content-disposition'],
'attachment; filename="audiotest.au"')
eq(self._au.get_params(header='content-disposition'),
[('attachment', ''), ('filename', 'audiotest.au')])
eq(self._au.get_param('filename', header='content-disposition'),
'audiotest.au')
missing = []
eq(self._au.get_param('attachment', header='content-disposition'), '')
unless(self._au.get_param('foo', failobj=missing,
header='content-disposition') is missing)
# Try some missing stuff
unless(self._au.get_param('foobar', missing) is missing)
unless(self._au.get_param('attachment', missing,
header='foobar') is missing)
# Test the basic MIMEImage class
class TestMIMEImage(unittest.TestCase):
def setUp(self):
fp = openfile('PyBanner048.gif')
try:
self._imgdata = fp.read()
finally:
fp.close()
self._im = MIMEImage(self._imgdata)
def test_guess_minor_type(self):
self.assertEqual(self._im.get_content_type(), 'image/gif')
def test_encoding(self):
payload = self._im.get_payload()
self.assertEqual(base64.decodestring(payload), self._imgdata)
def test_checkSetMinor(self):
im = MIMEImage(self._imgdata, 'fish')
self.assertEqual(im.get_content_type(), 'image/fish')
def test_add_header(self):
eq = self.assertEqual
unless = self.assertTrue
self._im.add_header('Content-Disposition', 'attachment',
filename='dingusfish.gif')
eq(self._im['content-disposition'],
'attachment; filename="dingusfish.gif"')
eq(self._im.get_params(header='content-disposition'),
[('attachment', ''), ('filename', 'dingusfish.gif')])
eq(self._im.get_param('filename', header='content-disposition'),
'dingusfish.gif')
missing = []
eq(self._im.get_param('attachment', header='content-disposition'), '')
unless(self._im.get_param('foo', failobj=missing,
header='content-disposition') is missing)
# Try some missing stuff
unless(self._im.get_param('foobar', missing) is missing)
unless(self._im.get_param('attachment', missing,
header='foobar') is missing)
# Test the basic MIMEText class
class TestMIMEText(unittest.TestCase):
def setUp(self):
self._msg = MIMEText('hello there')
def test_types(self):
eq = self.assertEqual
unless = self.assertTrue
eq(self._msg.get_content_type(), 'text/plain')
eq(self._msg.get_param('charset'), 'us-ascii')
missing = []
unless(self._msg.get_param('foobar', missing) is missing)
unless(self._msg.get_param('charset', missing, header='foobar')
is missing)
def test_payload(self):
self.assertEqual(self._msg.get_payload(), 'hello there')
self.assertTrue(not self._msg.is_multipart())
def test_charset(self):
eq = self.assertEqual
msg = MIMEText('hello there', _charset='us-ascii')
eq(msg.get_charset().input_charset, 'us-ascii')
eq(msg['content-type'], 'text/plain; charset="us-ascii"')
def test_7bit_unicode_input(self):
eq = self.assertEqual
msg = MIMEText(u'hello there', _charset='us-ascii')
eq(msg.get_charset().input_charset, 'us-ascii')
eq(msg['content-type'], 'text/plain; charset="us-ascii"')
def test_7bit_unicode_input_no_charset(self):
eq = self.assertEqual
msg = MIMEText(u'hello there')
eq(msg.get_charset(), 'us-ascii')
eq(msg['content-type'], 'text/plain; charset="us-ascii"')
self.assertTrue('hello there' in msg.as_string())
def test_8bit_unicode_input(self):
teststr = u'\u043a\u0438\u0440\u0438\u043b\u0438\u0446\u0430'
eq = self.assertEqual
msg = MIMEText(teststr, _charset='utf-8')
eq(msg.get_charset().output_charset, 'utf-8')
eq(msg['content-type'], 'text/plain; charset="utf-8"')
eq(msg.get_payload(decode=True), teststr.encode('utf-8'))
def test_8bit_unicode_input_no_charset(self):
teststr = u'\u043a\u0438\u0440\u0438\u043b\u0438\u0446\u0430'
self.assertRaises(UnicodeEncodeError, MIMEText, teststr)
# Test complicated multipart/* messages
class TestMultipart(TestEmailBase):
def setUp(self):
fp = openfile('PyBanner048.gif')
try:
data = fp.read()
finally:
fp.close()
container = MIMEBase('multipart', 'mixed', boundary='BOUNDARY')
image = MIMEImage(data, name='dingusfish.gif')
image.add_header('content-disposition', 'attachment',
filename='dingusfish.gif')
intro = MIMEText('''\
Hi there,
This is the dingus fish.
''')
container.attach(intro)
container.attach(image)
container['From'] = 'Barry <barry@digicool.com>'
container['To'] = 'Dingus Lovers <cravindogs@cravindogs.com>'
container['Subject'] = 'Here is your dingus fish'
now = 987809702.54848599
timetuple = time.localtime(now)
if timetuple[-1] == 0:
tzsecs = time.timezone
else:
tzsecs = time.altzone
if tzsecs > 0:
sign = '-'
else:
sign = '+'
tzoffset = ' %s%04d' % (sign, tzsecs // 36)
container['Date'] = time.strftime(
'%a, %d %b %Y %H:%M:%S',
time.localtime(now)) + tzoffset
self._msg = container
self._im = image
self._txt = intro
def test_hierarchy(self):
# convenience
eq = self.assertEqual
unless = self.assertTrue
raises = self.assertRaises
# tests
m = self._msg
unless(m.is_multipart())
eq(m.get_content_type(), 'multipart/mixed')
eq(len(m.get_payload()), 2)
raises(IndexError, m.get_payload, 2)
m0 = m.get_payload(0)
m1 = m.get_payload(1)
unless(m0 is self._txt)
unless(m1 is self._im)
eq(m.get_payload(), [m0, m1])
unless(not m0.is_multipart())
unless(not m1.is_multipart())
def test_empty_multipart_idempotent(self):
text = """\
Content-Type: multipart/mixed; boundary="BOUNDARY"
MIME-Version: 1.0
Subject: A subject
To: aperson@dom.ain
From: bperson@dom.ain
--BOUNDARY
--BOUNDARY--
"""
msg = Parser().parsestr(text)
self.ndiffAssertEqual(text, msg.as_string())
def test_no_parts_in_a_multipart_with_none_epilogue(self):
outer = MIMEBase('multipart', 'mixed')
outer['Subject'] = 'A subject'
outer['To'] = 'aperson@dom.ain'
outer['From'] = 'bperson@dom.ain'
outer.set_boundary('BOUNDARY')
self.ndiffAssertEqual(outer.as_string(), '''\
Content-Type: multipart/mixed; boundary="BOUNDARY"
MIME-Version: 1.0
Subject: A subject
To: aperson@dom.ain
From: bperson@dom.ain
--BOUNDARY
--BOUNDARY--''')
def test_no_parts_in_a_multipart_with_empty_epilogue(self):
outer = MIMEBase('multipart', 'mixed')
outer['Subject'] = 'A subject'
outer['To'] = 'aperson@dom.ain'
outer['From'] = 'bperson@dom.ain'
outer.preamble = ''
outer.epilogue = ''
outer.set_boundary('BOUNDARY')
self.ndiffAssertEqual(outer.as_string(), '''\
Content-Type: multipart/mixed; boundary="BOUNDARY"
MIME-Version: 1.0
Subject: A subject
To: aperson@dom.ain
From: bperson@dom.ain
--BOUNDARY
--BOUNDARY--
''')
def test_one_part_in_a_multipart(self):
eq = self.ndiffAssertEqual
outer = MIMEBase('multipart', 'mixed')
outer['Subject'] = 'A subject'
outer['To'] = 'aperson@dom.ain'
outer['From'] = 'bperson@dom.ain'
outer.set_boundary('BOUNDARY')
msg = MIMEText('hello world')
outer.attach(msg)
eq(outer.as_string(), '''\
Content-Type: multipart/mixed; boundary="BOUNDARY"
MIME-Version: 1.0
Subject: A subject
To: aperson@dom.ain
From: bperson@dom.ain
--BOUNDARY
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
hello world
--BOUNDARY--''')
def test_seq_parts_in_a_multipart_with_empty_preamble(self):
eq = self.ndiffAssertEqual
outer = MIMEBase('multipart', 'mixed')
outer['Subject'] = 'A subject'
outer['To'] = 'aperson@dom.ain'
outer['From'] = 'bperson@dom.ain'
outer.preamble = ''
msg = MIMEText('hello world')
outer.attach(msg)
outer.set_boundary('BOUNDARY')
eq(outer.as_string(), '''\
Content-Type: multipart/mixed; boundary="BOUNDARY"
MIME-Version: 1.0
Subject: A subject
To: aperson@dom.ain
From: bperson@dom.ain
--BOUNDARY
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
hello world
--BOUNDARY--''')
def test_seq_parts_in_a_multipart_with_none_preamble(self):
eq = self.ndiffAssertEqual
outer = MIMEBase('multipart', 'mixed')
outer['Subject'] = 'A subject'
outer['To'] = 'aperson@dom.ain'
outer['From'] = 'bperson@dom.ain'
outer.preamble = None
msg = MIMEText('hello world')
outer.attach(msg)
outer.set_boundary('BOUNDARY')
eq(outer.as_string(), '''\
Content-Type: multipart/mixed; boundary="BOUNDARY"
MIME-Version: 1.0
Subject: A subject
To: aperson@dom.ain
From: bperson@dom.ain
--BOUNDARY
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
hello world
--BOUNDARY--''')
def test_seq_parts_in_a_multipart_with_none_epilogue(self):
eq = self.ndiffAssertEqual
outer = MIMEBase('multipart', 'mixed')
outer['Subject'] = 'A subject'
outer['To'] = 'aperson@dom.ain'
outer['From'] = 'bperson@dom.ain'
outer.epilogue = None
msg = MIMEText('hello world')
outer.attach(msg)
outer.set_boundary('BOUNDARY')
eq(outer.as_string(), '''\
Content-Type: multipart/mixed; boundary="BOUNDARY"
MIME-Version: 1.0
Subject: A subject
To: aperson@dom.ain
From: bperson@dom.ain
--BOUNDARY
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
hello world
--BOUNDARY--''')
def test_seq_parts_in_a_multipart_with_empty_epilogue(self):
eq = self.ndiffAssertEqual
outer = MIMEBase('multipart', 'mixed')
outer['Subject'] = 'A subject'
outer['To'] = 'aperson@dom.ain'
outer['From'] = 'bperson@dom.ain'
outer.epilogue = ''
msg = MIMEText('hello world')
outer.attach(msg)
outer.set_boundary('BOUNDARY')
eq(outer.as_string(), '''\
Content-Type: multipart/mixed; boundary="BOUNDARY"
MIME-Version: 1.0
Subject: A subject
To: aperson@dom.ain
From: bperson@dom.ain
--BOUNDARY
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
hello world
--BOUNDARY--
''')
def test_seq_parts_in_a_multipart_with_nl_epilogue(self):
eq = self.ndiffAssertEqual
outer = MIMEBase('multipart', 'mixed')
outer['Subject'] = 'A subject'
outer['To'] = 'aperson@dom.ain'
outer['From'] = 'bperson@dom.ain'
outer.epilogue = '\n'
msg = MIMEText('hello world')
outer.attach(msg)
outer.set_boundary('BOUNDARY')
eq(outer.as_string(), '''\
Content-Type: multipart/mixed; boundary="BOUNDARY"
MIME-Version: 1.0
Subject: A subject
To: aperson@dom.ain
From: bperson@dom.ain
--BOUNDARY
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
hello world
--BOUNDARY--
''')
def test_message_external_body(self):
eq = self.assertEqual
msg = self._msgobj('msg_36.txt')
eq(len(msg.get_payload()), 2)
msg1 = msg.get_payload(1)
eq(msg1.get_content_type(), 'multipart/alternative')
eq(len(msg1.get_payload()), 2)
for subpart in msg1.get_payload():
eq(subpart.get_content_type(), 'message/external-body')
eq(len(subpart.get_payload()), 1)
subsubpart = subpart.get_payload(0)
eq(subsubpart.get_content_type(), 'text/plain')
def test_double_boundary(self):
# msg_37.txt is a multipart that contains two dash-boundary's in a
# row. Our interpretation of RFC 2046 calls for ignoring the second
# and subsequent boundaries.
msg = self._msgobj('msg_37.txt')
self.assertEqual(len(msg.get_payload()), 3)
def test_nested_inner_contains_outer_boundary(self):
eq = self.ndiffAssertEqual
# msg_38.txt has an inner part that contains outer boundaries. My
# interpretation of RFC 2046 (based on sections 5.1 and 5.1.2) say
# these are illegal and should be interpreted as unterminated inner
# parts.
msg = self._msgobj('msg_38.txt')
sfp = StringIO()
Iterators._structure(msg, sfp)
eq(sfp.getvalue(), """\
multipart/mixed
multipart/mixed
multipart/alternative
text/plain
text/plain
text/plain
text/plain
""")
def test_nested_with_same_boundary(self):
eq = self.ndiffAssertEqual
# msg 39.txt is similarly evil in that it's got inner parts that use
# the same boundary as outer parts. Again, I believe the way this is
# parsed is closest to the spirit of RFC 2046
msg = self._msgobj('msg_39.txt')
sfp = StringIO()
Iterators._structure(msg, sfp)
eq(sfp.getvalue(), """\
multipart/mixed
multipart/mixed
multipart/alternative
application/octet-stream
application/octet-stream
text/plain
""")
def test_boundary_in_non_multipart(self):
msg = self._msgobj('msg_40.txt')
self.assertEqual(msg.as_string(), '''\
MIME-Version: 1.0
Content-Type: text/html; boundary="--961284236552522269"
----961284236552522269
Content-Type: text/html;
Content-Transfer-Encoding: 7Bit
<html></html>
----961284236552522269--
''')
def test_boundary_with_leading_space(self):
eq = self.assertEqual
msg = email.message_from_string('''\
MIME-Version: 1.0
Content-Type: multipart/mixed; boundary=" XXXX"
-- XXXX
Content-Type: text/plain
-- XXXX
Content-Type: text/plain
-- XXXX--
''')
self.assertTrue(msg.is_multipart())
eq(msg.get_boundary(), ' XXXX')
eq(len(msg.get_payload()), 2)
def test_boundary_without_trailing_newline(self):
m = Parser().parsestr("""\
Content-Type: multipart/mixed; boundary="===============0012394164=="
MIME-Version: 1.0
--===============0012394164==
Content-Type: image/file1.jpg
MIME-Version: 1.0
Content-Transfer-Encoding: base64
YXNkZg==
--===============0012394164==--""")
self.assertEqual(m.get_payload(0).get_payload(), 'YXNkZg==')
# Test some badly formatted messages
class TestNonConformant(TestEmailBase):
def test_parse_missing_minor_type(self):
eq = self.assertEqual
msg = self._msgobj('msg_14.txt')
eq(msg.get_content_type(), 'text/plain')
eq(msg.get_content_maintype(), 'text')
eq(msg.get_content_subtype(), 'plain')
def test_same_boundary_inner_outer(self):
unless = self.assertTrue
msg = self._msgobj('msg_15.txt')
# XXX We can probably eventually do better
inner = msg.get_payload(0)
unless(hasattr(inner, 'defects'))
self.assertEqual(len(inner.defects), 1)
unless(isinstance(inner.defects[0],
Errors.StartBoundaryNotFoundDefect))
def test_multipart_no_boundary(self):
unless = self.assertTrue
msg = self._msgobj('msg_25.txt')
unless(isinstance(msg.get_payload(), str))
self.assertEqual(len(msg.defects), 2)
unless(isinstance(msg.defects[0], Errors.NoBoundaryInMultipartDefect))
unless(isinstance(msg.defects[1],
Errors.MultipartInvariantViolationDefect))
def test_invalid_content_type(self):
eq = self.assertEqual
neq = self.ndiffAssertEqual
msg = Message()
# RFC 2045, $5.2 says invalid yields text/plain
msg['Content-Type'] = 'text'
eq(msg.get_content_maintype(), 'text')
eq(msg.get_content_subtype(), 'plain')
eq(msg.get_content_type(), 'text/plain')
# Clear the old value and try something /really/ invalid
del msg['content-type']
msg['Content-Type'] = 'foo'
eq(msg.get_content_maintype(), 'text')
eq(msg.get_content_subtype(), 'plain')
eq(msg.get_content_type(), 'text/plain')
# Still, make sure that the message is idempotently generated
s = StringIO()
g = Generator(s)
g.flatten(msg)
neq(s.getvalue(), 'Content-Type: foo\n\n')
def test_no_start_boundary(self):
eq = self.ndiffAssertEqual
msg = self._msgobj('msg_31.txt')
eq(msg.get_payload(), """\
--BOUNDARY
Content-Type: text/plain
message 1
--BOUNDARY
Content-Type: text/plain
message 2
--BOUNDARY--
""")
def test_no_separating_blank_line(self):
eq = self.ndiffAssertEqual
msg = self._msgobj('msg_35.txt')
eq(msg.as_string(), """\
From: aperson@dom.ain
To: bperson@dom.ain
Subject: here's something interesting
counter to RFC 2822, there's no separating newline here
""")
def test_lying_multipart(self):
unless = self.assertTrue
msg = self._msgobj('msg_41.txt')
unless(hasattr(msg, 'defects'))
self.assertEqual(len(msg.defects), 2)
unless(isinstance(msg.defects[0], Errors.NoBoundaryInMultipartDefect))
unless(isinstance(msg.defects[1],
Errors.MultipartInvariantViolationDefect))
def test_missing_start_boundary(self):
outer = self._msgobj('msg_42.txt')
# The message structure is:
#
# multipart/mixed
# text/plain
# message/rfc822
# multipart/mixed [*]
#
# [*] This message is missing its start boundary
bad = outer.get_payload(1).get_payload(0)
self.assertEqual(len(bad.defects), 1)
self.assertTrue(isinstance(bad.defects[0],
Errors.StartBoundaryNotFoundDefect))
def test_first_line_is_continuation_header(self):
eq = self.assertEqual
m = ' Line 1\nLine 2\nLine 3'
msg = email.message_from_string(m)
eq(msg.keys(), [])
eq(msg.get_payload(), 'Line 2\nLine 3')
eq(len(msg.defects), 1)
self.assertTrue(isinstance(msg.defects[0],
Errors.FirstHeaderLineIsContinuationDefect))
eq(msg.defects[0].line, ' Line 1\n')
# Test RFC 2047 header encoding and decoding
class TestRFC2047(unittest.TestCase):
def test_rfc2047_multiline(self):
eq = self.assertEqual
s = """Re: =?mac-iceland?q?r=8Aksm=9Arg=8Cs?= baz
foo bar =?mac-iceland?q?r=8Aksm=9Arg=8Cs?="""
dh = decode_header(s)
eq(dh, [
('Re:', None),
('r\x8aksm\x9arg\x8cs', 'mac-iceland'),
('baz foo bar', None),
('r\x8aksm\x9arg\x8cs', 'mac-iceland')])
eq(str(make_header(dh)),
"""Re: =?mac-iceland?q?r=8Aksm=9Arg=8Cs?= baz foo bar
=?mac-iceland?q?r=8Aksm=9Arg=8Cs?=""")
def test_whitespace_eater_unicode(self):
eq = self.assertEqual
s = '=?ISO-8859-1?Q?Andr=E9?= Pirard <pirard@dom.ain>'
dh = decode_header(s)
eq(dh, [('Andr\xe9', 'iso-8859-1'), ('Pirard <pirard@dom.ain>', None)])
hu = unicode(make_header(dh)).encode('latin-1')
eq(hu, 'Andr\xe9 Pirard <pirard@dom.ain>')
def test_whitespace_eater_unicode_2(self):
eq = self.assertEqual
s = 'The =?iso-8859-1?b?cXVpY2sgYnJvd24gZm94?= jumped over the =?iso-8859-1?b?bGF6eSBkb2c=?='
dh = decode_header(s)
eq(dh, [('The', None), ('quick brown fox', 'iso-8859-1'),
('jumped over the', None), ('lazy dog', 'iso-8859-1')])
hu = make_header(dh).__unicode__()
eq(hu, u'The quick brown fox jumped over the lazy dog')
def test_rfc2047_without_whitespace(self):
s = 'Sm=?ISO-8859-1?B?9g==?=rg=?ISO-8859-1?B?5Q==?=sbord'
dh = decode_header(s)
self.assertEqual(dh, [(s, None)])
def test_rfc2047_with_whitespace(self):
s = 'Sm =?ISO-8859-1?B?9g==?= rg =?ISO-8859-1?B?5Q==?= sbord'
dh = decode_header(s)
self.assertEqual(dh, [('Sm', None), ('\xf6', 'iso-8859-1'),
('rg', None), ('\xe5', 'iso-8859-1'),
('sbord', None)])
def test_rfc2047_B_bad_padding(self):
s = '=?iso-8859-1?B?%s?='
data = [ # only test complete bytes
('dm==', 'v'), ('dm=', 'v'), ('dm', 'v'),
('dmk=', 'vi'), ('dmk', 'vi')
]
for q, a in data:
dh = decode_header(s % q)
self.assertEqual(dh, [(a, 'iso-8859-1')])
def test_rfc2047_Q_invalid_digits(self):
# issue 10004.
s = '=?iso-8659-1?Q?andr=e9=zz?='
self.assertEqual(decode_header(s),
[(b'andr\xe9=zz', 'iso-8659-1')])
# Test the MIMEMessage class
class TestMIMEMessage(TestEmailBase):
def setUp(self):
fp = openfile('msg_11.txt')
try:
self._text = fp.read()
finally:
fp.close()
def test_type_error(self):
self.assertRaises(TypeError, MIMEMessage, 'a plain string')
def test_valid_argument(self):
eq = self.assertEqual
unless = self.assertTrue
subject = 'A sub-message'
m = Message()
m['Subject'] = subject
r = MIMEMessage(m)
eq(r.get_content_type(), 'message/rfc822')
payload = r.get_payload()
unless(isinstance(payload, list))
eq(len(payload), 1)
subpart = payload[0]
unless(subpart is m)
eq(subpart['subject'], subject)
def test_bad_multipart(self):
eq = self.assertEqual
msg1 = Message()
msg1['Subject'] = 'subpart 1'
msg2 = Message()
msg2['Subject'] = 'subpart 2'
r = MIMEMessage(msg1)
self.assertRaises(Errors.MultipartConversionError, r.attach, msg2)
def test_generate(self):
# First craft the message to be encapsulated
m = Message()
m['Subject'] = 'An enclosed message'
m.set_payload('Here is the body of the message.\n')
r = MIMEMessage(m)
r['Subject'] = 'The enclosing message'
s = StringIO()
g = Generator(s)
g.flatten(r)
self.assertEqual(s.getvalue(), """\
Content-Type: message/rfc822
MIME-Version: 1.0
Subject: The enclosing message
Subject: An enclosed message
Here is the body of the message.
""")
def test_parse_message_rfc822(self):
eq = self.assertEqual
unless = self.assertTrue
msg = self._msgobj('msg_11.txt')
eq(msg.get_content_type(), 'message/rfc822')
payload = msg.get_payload()
unless(isinstance(payload, list))
eq(len(payload), 1)
submsg = payload[0]
self.assertTrue(isinstance(submsg, Message))
eq(submsg['subject'], 'An enclosed message')
eq(submsg.get_payload(), 'Here is the body of the message.\n')
def test_dsn(self):
eq = self.assertEqual
unless = self.assertTrue
# msg 16 is a Delivery Status Notification, see RFC 1894
msg = self._msgobj('msg_16.txt')
eq(msg.get_content_type(), 'multipart/report')
unless(msg.is_multipart())
eq(len(msg.get_payload()), 3)
# Subpart 1 is a text/plain, human readable section
subpart = msg.get_payload(0)
eq(subpart.get_content_type(), 'text/plain')
eq(subpart.get_payload(), """\
This report relates to a message you sent with the following header fields:
Message-id: <002001c144a6$8752e060$56104586@oxy.edu>
Date: Sun, 23 Sep 2001 20:10:55 -0700
From: "Ian T. Henry" <henryi@oxy.edu>
To: SoCal Raves <scr@socal-raves.org>
Subject: [scr] yeah for Ians!!
Your message cannot be delivered to the following recipients:
Recipient address: jangel1@cougar.noc.ucla.edu
Reason: recipient reached disk quota
""")
# Subpart 2 contains the machine parsable DSN information. It
# consists of two blocks of headers, represented by two nested Message
# objects.
subpart = msg.get_payload(1)
eq(subpart.get_content_type(), 'message/delivery-status')
eq(len(subpart.get_payload()), 2)
# message/delivery-status should treat each block as a bunch of
# headers, i.e. a bunch of Message objects.
dsn1 = subpart.get_payload(0)
unless(isinstance(dsn1, Message))
eq(dsn1['original-envelope-id'], '0GK500B4HD0888@cougar.noc.ucla.edu')
eq(dsn1.get_param('dns', header='reporting-mta'), '')
# Try a missing one <wink>
eq(dsn1.get_param('nsd', header='reporting-mta'), None)
dsn2 = subpart.get_payload(1)
unless(isinstance(dsn2, Message))
eq(dsn2['action'], 'failed')
eq(dsn2.get_params(header='original-recipient'),
[('rfc822', ''), ('jangel1@cougar.noc.ucla.edu', '')])
eq(dsn2.get_param('rfc822', header='final-recipient'), '')
# Subpart 3 is the original message
subpart = msg.get_payload(2)
eq(subpart.get_content_type(), 'message/rfc822')
payload = subpart.get_payload()
unless(isinstance(payload, list))
eq(len(payload), 1)
subsubpart = payload[0]
unless(isinstance(subsubpart, Message))
eq(subsubpart.get_content_type(), 'text/plain')
eq(subsubpart['message-id'],
'<002001c144a6$8752e060$56104586@oxy.edu>')
def test_epilogue(self):
eq = self.ndiffAssertEqual
fp = openfile('msg_21.txt')
try:
text = fp.read()
finally:
fp.close()
msg = Message()
msg['From'] = 'aperson@dom.ain'
msg['To'] = 'bperson@dom.ain'
msg['Subject'] = 'Test'
msg.preamble = 'MIME message'
msg.epilogue = 'End of MIME message\n'
msg1 = MIMEText('One')
msg2 = MIMEText('Two')
msg.add_header('Content-Type', 'multipart/mixed', boundary='BOUNDARY')
msg.attach(msg1)
msg.attach(msg2)
sfp = StringIO()
g = Generator(sfp)
g.flatten(msg)
eq(sfp.getvalue(), text)
def test_no_nl_preamble(self):
eq = self.ndiffAssertEqual
msg = Message()
msg['From'] = 'aperson@dom.ain'
msg['To'] = 'bperson@dom.ain'
msg['Subject'] = 'Test'
msg.preamble = 'MIME message'
msg.epilogue = ''
msg1 = MIMEText('One')
msg2 = MIMEText('Two')
msg.add_header('Content-Type', 'multipart/mixed', boundary='BOUNDARY')
msg.attach(msg1)
msg.attach(msg2)
eq(msg.as_string(), """\
From: aperson@dom.ain
To: bperson@dom.ain
Subject: Test
Content-Type: multipart/mixed; boundary="BOUNDARY"
MIME message
--BOUNDARY
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
One
--BOUNDARY
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
Two
--BOUNDARY--
""")
def test_default_type(self):
eq = self.assertEqual
fp = openfile('msg_30.txt')
try:
msg = email.message_from_file(fp)
finally:
fp.close()
container1 = msg.get_payload(0)
eq(container1.get_default_type(), 'message/rfc822')
eq(container1.get_content_type(), 'message/rfc822')
container2 = msg.get_payload(1)
eq(container2.get_default_type(), 'message/rfc822')
eq(container2.get_content_type(), 'message/rfc822')
container1a = container1.get_payload(0)
eq(container1a.get_default_type(), 'text/plain')
eq(container1a.get_content_type(), 'text/plain')
container2a = container2.get_payload(0)
eq(container2a.get_default_type(), 'text/plain')
eq(container2a.get_content_type(), 'text/plain')
def test_default_type_with_explicit_container_type(self):
eq = self.assertEqual
fp = openfile('msg_28.txt')
try:
msg = email.message_from_file(fp)
finally:
fp.close()
container1 = msg.get_payload(0)
eq(container1.get_default_type(), 'message/rfc822')
eq(container1.get_content_type(), 'message/rfc822')
container2 = msg.get_payload(1)
eq(container2.get_default_type(), 'message/rfc822')
eq(container2.get_content_type(), 'message/rfc822')
container1a = container1.get_payload(0)
eq(container1a.get_default_type(), 'text/plain')
eq(container1a.get_content_type(), 'text/plain')
container2a = container2.get_payload(0)
eq(container2a.get_default_type(), 'text/plain')
eq(container2a.get_content_type(), 'text/plain')
def test_default_type_non_parsed(self):
eq = self.assertEqual
neq = self.ndiffAssertEqual
# Set up container
container = MIMEMultipart('digest', 'BOUNDARY')
container.epilogue = ''
# Set up subparts
subpart1a = MIMEText('message 1\n')
subpart2a = MIMEText('message 2\n')
subpart1 = MIMEMessage(subpart1a)
subpart2 = MIMEMessage(subpart2a)
container.attach(subpart1)
container.attach(subpart2)
eq(subpart1.get_content_type(), 'message/rfc822')
eq(subpart1.get_default_type(), 'message/rfc822')
eq(subpart2.get_content_type(), 'message/rfc822')
eq(subpart2.get_default_type(), 'message/rfc822')
neq(container.as_string(0), '''\
Content-Type: multipart/digest; boundary="BOUNDARY"
MIME-Version: 1.0
--BOUNDARY
Content-Type: message/rfc822
MIME-Version: 1.0
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
message 1
--BOUNDARY
Content-Type: message/rfc822
MIME-Version: 1.0
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
message 2
--BOUNDARY--
''')
del subpart1['content-type']
del subpart1['mime-version']
del subpart2['content-type']
del subpart2['mime-version']
eq(subpart1.get_content_type(), 'message/rfc822')
eq(subpart1.get_default_type(), 'message/rfc822')
eq(subpart2.get_content_type(), 'message/rfc822')
eq(subpart2.get_default_type(), 'message/rfc822')
neq(container.as_string(0), '''\
Content-Type: multipart/digest; boundary="BOUNDARY"
MIME-Version: 1.0
--BOUNDARY
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
message 1
--BOUNDARY
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
message 2
--BOUNDARY--
''')
def test_mime_attachments_in_constructor(self):
eq = self.assertEqual
text1 = MIMEText('')
text2 = MIMEText('')
msg = MIMEMultipart(_subparts=(text1, text2))
eq(len(msg.get_payload()), 2)
eq(msg.get_payload(0), text1)
eq(msg.get_payload(1), text2)
def test_default_multipart_constructor(self):
msg = MIMEMultipart()
self.assertTrue(msg.is_multipart())
# A general test of parser->model->generator idempotency. IOW, read a message
# in, parse it into a message object tree, then without touching the tree,
# regenerate the plain text. The original text and the transformed text
# should be identical. Note: that we ignore the Unix-From since that may
# contain a changed date.
class TestIdempotent(TestEmailBase):
def _msgobj(self, filename):
fp = openfile(filename)
try:
data = fp.read()
finally:
fp.close()
msg = email.message_from_string(data)
return msg, data
def _idempotent(self, msg, text):
eq = self.ndiffAssertEqual
s = StringIO()
g = Generator(s, maxheaderlen=0)
g.flatten(msg)
eq(text, s.getvalue())
def test_parse_text_message(self):
eq = self.assertEqual
msg, text = self._msgobj('msg_01.txt')
eq(msg.get_content_type(), 'text/plain')
eq(msg.get_content_maintype(), 'text')
eq(msg.get_content_subtype(), 'plain')
eq(msg.get_params()[1], ('charset', 'us-ascii'))
eq(msg.get_param('charset'), 'us-ascii')
eq(msg.preamble, None)
eq(msg.epilogue, None)
self._idempotent(msg, text)
def test_parse_untyped_message(self):
eq = self.assertEqual
msg, text = self._msgobj('msg_03.txt')
eq(msg.get_content_type(), 'text/plain')
eq(msg.get_params(), None)
eq(msg.get_param('charset'), None)
self._idempotent(msg, text)
def test_simple_multipart(self):
msg, text = self._msgobj('msg_04.txt')
self._idempotent(msg, text)
def test_MIME_digest(self):
msg, text = self._msgobj('msg_02.txt')
self._idempotent(msg, text)
def test_long_header(self):
msg, text = self._msgobj('msg_27.txt')
self._idempotent(msg, text)
def test_MIME_digest_with_part_headers(self):
msg, text = self._msgobj('msg_28.txt')
self._idempotent(msg, text)
def test_mixed_with_image(self):
msg, text = self._msgobj('msg_06.txt')
self._idempotent(msg, text)
def test_multipart_report(self):
msg, text = self._msgobj('msg_05.txt')
self._idempotent(msg, text)
def test_dsn(self):
msg, text = self._msgobj('msg_16.txt')
self._idempotent(msg, text)
def test_preamble_epilogue(self):
msg, text = self._msgobj('msg_21.txt')
self._idempotent(msg, text)
def test_multipart_one_part(self):
msg, text = self._msgobj('msg_23.txt')
self._idempotent(msg, text)
def test_multipart_no_parts(self):
msg, text = self._msgobj('msg_24.txt')
self._idempotent(msg, text)
def test_no_start_boundary(self):
msg, text = self._msgobj('msg_31.txt')
self._idempotent(msg, text)
def test_rfc2231_charset(self):
msg, text = self._msgobj('msg_32.txt')
self._idempotent(msg, text)
def test_more_rfc2231_parameters(self):
msg, text = self._msgobj('msg_33.txt')
self._idempotent(msg, text)
def test_text_plain_in_a_multipart_digest(self):
msg, text = self._msgobj('msg_34.txt')
self._idempotent(msg, text)
def test_nested_multipart_mixeds(self):
msg, text = self._msgobj('msg_12a.txt')
self._idempotent(msg, text)
def test_message_external_body_idempotent(self):
msg, text = self._msgobj('msg_36.txt')
self._idempotent(msg, text)
def test_content_type(self):
eq = self.assertEqual
unless = self.assertTrue
# Get a message object and reset the seek pointer for other tests
msg, text = self._msgobj('msg_05.txt')
eq(msg.get_content_type(), 'multipart/report')
# Test the Content-Type: parameters
params = {}
for pk, pv in msg.get_params():
params[pk] = pv
eq(params['report-type'], 'delivery-status')
eq(params['boundary'], 'D1690A7AC1.996856090/mail.example.com')
eq(msg.preamble, 'This is a MIME-encapsulated message.\n')
eq(msg.epilogue, '\n')
eq(len(msg.get_payload()), 3)
# Make sure the subparts are what we expect
msg1 = msg.get_payload(0)
eq(msg1.get_content_type(), 'text/plain')
eq(msg1.get_payload(), 'Yadda yadda yadda\n')
msg2 = msg.get_payload(1)
eq(msg2.get_content_type(), 'text/plain')
eq(msg2.get_payload(), 'Yadda yadda yadda\n')
msg3 = msg.get_payload(2)
eq(msg3.get_content_type(), 'message/rfc822')
self.assertTrue(isinstance(msg3, Message))
payload = msg3.get_payload()
unless(isinstance(payload, list))
eq(len(payload), 1)
msg4 = payload[0]
unless(isinstance(msg4, Message))
eq(msg4.get_payload(), 'Yadda yadda yadda\n')
def test_parser(self):
eq = self.assertEqual
unless = self.assertTrue
msg, text = self._msgobj('msg_06.txt')
# Check some of the outer headers
eq(msg.get_content_type(), 'message/rfc822')
# Make sure the payload is a list of exactly one sub-Message, and that
# that submessage has a type of text/plain
payload = msg.get_payload()
unless(isinstance(payload, list))
eq(len(payload), 1)
msg1 = payload[0]
self.assertTrue(isinstance(msg1, Message))
eq(msg1.get_content_type(), 'text/plain')
self.assertTrue(isinstance(msg1.get_payload(), str))
eq(msg1.get_payload(), '\n')
# Test various other bits of the package's functionality
class TestMiscellaneous(TestEmailBase):
def test_message_from_string(self):
fp = openfile('msg_01.txt')
try:
text = fp.read()
finally:
fp.close()
msg = email.message_from_string(text)
s = StringIO()
# Don't wrap/continue long headers since we're trying to test
# idempotency.
g = Generator(s, maxheaderlen=0)
g.flatten(msg)
self.assertEqual(text, s.getvalue())
def test_message_from_file(self):
fp = openfile('msg_01.txt')
try:
text = fp.read()
fp.seek(0)
msg = email.message_from_file(fp)
s = StringIO()
# Don't wrap/continue long headers since we're trying to test
# idempotency.
g = Generator(s, maxheaderlen=0)
g.flatten(msg)
self.assertEqual(text, s.getvalue())
finally:
fp.close()
def test_message_from_string_with_class(self):
unless = self.assertTrue
fp = openfile('msg_01.txt')
try:
text = fp.read()
finally:
fp.close()
# Create a subclass
class MyMessage(Message):
pass
msg = email.message_from_string(text, MyMessage)
unless(isinstance(msg, MyMessage))
# Try something more complicated
fp = openfile('msg_02.txt')
try:
text = fp.read()
finally:
fp.close()
msg = email.message_from_string(text, MyMessage)
for subpart in msg.walk():
unless(isinstance(subpart, MyMessage))
def test_message_from_file_with_class(self):
unless = self.assertTrue
# Create a subclass
class MyMessage(Message):
pass
fp = openfile('msg_01.txt')
try:
msg = email.message_from_file(fp, MyMessage)
finally:
fp.close()
unless(isinstance(msg, MyMessage))
# Try something more complicated
fp = openfile('msg_02.txt')
try:
msg = email.message_from_file(fp, MyMessage)
finally:
fp.close()
for subpart in msg.walk():
unless(isinstance(subpart, MyMessage))
def test__all__(self):
module = __import__('email')
all = module.__all__
all.sort()
self.assertEqual(all, [
# Old names
'Charset', 'Encoders', 'Errors', 'Generator',
'Header', 'Iterators', 'MIMEAudio', 'MIMEBase',
'MIMEImage', 'MIMEMessage', 'MIMEMultipart',
'MIMENonMultipart', 'MIMEText', 'Message',
'Parser', 'Utils', 'base64MIME',
# new names
'base64mime', 'charset', 'encoders', 'errors', 'generator',
'header', 'iterators', 'message', 'message_from_file',
'message_from_string', 'mime', 'parser',
'quopriMIME', 'quoprimime', 'utils',
])
def test_formatdate(self):
now = time.time()
self.assertEqual(Utils.parsedate(Utils.formatdate(now))[:6],
time.gmtime(now)[:6])
def test_formatdate_localtime(self):
now = time.time()
self.assertEqual(
Utils.parsedate(Utils.formatdate(now, localtime=True))[:6],
time.localtime(now)[:6])
def test_formatdate_usegmt(self):
now = time.time()
self.assertEqual(
Utils.formatdate(now, localtime=False),
time.strftime('%a, %d %b %Y %H:%M:%S -0000', time.gmtime(now)))
self.assertEqual(
Utils.formatdate(now, localtime=False, usegmt=True),
time.strftime('%a, %d %b %Y %H:%M:%S GMT', time.gmtime(now)))
def test_parsedate_none(self):
self.assertEqual(Utils.parsedate(''), None)
def test_parsedate_compact(self):
# The FWS after the comma is optional
self.assertEqual(Utils.parsedate('Wed,3 Apr 2002 14:58:26 +0800'),
Utils.parsedate('Wed, 3 Apr 2002 14:58:26 +0800'))
def test_parsedate_no_dayofweek(self):
eq = self.assertEqual
eq(Utils.parsedate_tz('25 Feb 2003 13:47:26 -0800'),
(2003, 2, 25, 13, 47, 26, 0, 1, -1, -28800))
def test_parsedate_compact_no_dayofweek(self):
eq = self.assertEqual
eq(Utils.parsedate_tz('5 Feb 2003 13:47:26 -0800'),
(2003, 2, 5, 13, 47, 26, 0, 1, -1, -28800))
def test_parsedate_acceptable_to_time_functions(self):
eq = self.assertEqual
timetup = Utils.parsedate('5 Feb 2003 13:47:26 -0800')
t = int(time.mktime(timetup))
eq(time.localtime(t)[:6], timetup[:6])
eq(int(time.strftime('%Y', timetup)), 2003)
timetup = Utils.parsedate_tz('5 Feb 2003 13:47:26 -0800')
t = int(time.mktime(timetup[:9]))
eq(time.localtime(t)[:6], timetup[:6])
eq(int(time.strftime('%Y', timetup[:9])), 2003)
def test_mktime_tz(self):
self.assertEqual(Utils.mktime_tz((1970, 1, 1, 0, 0, 0,
-1, -1, -1, 0)), 0)
self.assertEqual(Utils.mktime_tz((1970, 1, 1, 0, 0, 0,
-1, -1, -1, 1234)), -1234)
def test_parsedate_y2k(self):
"""Test for parsing a date with a two-digit year.
Parsing a date with a two-digit year should return the correct
four-digit year. RFC822 allows two-digit years, but RFC2822 (which
obsoletes RFC822) requires four-digit years.
"""
self.assertEqual(Utils.parsedate_tz('25 Feb 03 13:47:26 -0800'),
Utils.parsedate_tz('25 Feb 2003 13:47:26 -0800'))
self.assertEqual(Utils.parsedate_tz('25 Feb 71 13:47:26 -0800'),
Utils.parsedate_tz('25 Feb 1971 13:47:26 -0800'))
def test_parseaddr_empty(self):
self.assertEqual(Utils.parseaddr('<>'), ('', ''))
self.assertEqual(Utils.formataddr(Utils.parseaddr('<>')), '')
def test_noquote_dump(self):
self.assertEqual(
Utils.formataddr(('A Silly Person', 'person@dom.ain')),
'A Silly Person <person@dom.ain>')
def test_escape_dump(self):
self.assertEqual(
Utils.formataddr(('A (Very) Silly Person', 'person@dom.ain')),
r'"A \(Very\) Silly Person" <person@dom.ain>')
a = r'A \(Special\) Person'
b = 'person@dom.ain'
self.assertEqual(Utils.parseaddr(Utils.formataddr((a, b))), (a, b))
def test_escape_backslashes(self):
self.assertEqual(
Utils.formataddr(('Arthur \Backslash\ Foobar', 'person@dom.ain')),
r'"Arthur \\Backslash\\ Foobar" <person@dom.ain>')
a = r'Arthur \Backslash\ Foobar'
b = 'person@dom.ain'
self.assertEqual(Utils.parseaddr(Utils.formataddr((a, b))), (a, b))
def test_name_with_dot(self):
x = 'John X. Doe <jxd@example.com>'
y = '"John X. Doe" <jxd@example.com>'
a, b = ('John X. Doe', 'jxd@example.com')
self.assertEqual(Utils.parseaddr(x), (a, b))
self.assertEqual(Utils.parseaddr(y), (a, b))
# formataddr() quotes the name if there's a dot in it
self.assertEqual(Utils.formataddr((a, b)), y)
def test_parseaddr_preserves_quoted_pairs_in_addresses(self):
# issue 10005. Note that in the third test the second pair of
# backslashes is not actually a quoted pair because it is not inside a
# comment or quoted string: the address being parsed has a quoted
# string containing a quoted backslash, followed by 'example' and two
# backslashes, followed by another quoted string containing a space and
# the word 'example'. parseaddr copies those two backslashes
# literally. Per rfc5322 this is not technically correct since a \ may
# not appear in an address outside of a quoted string. It is probably
# a sensible Postel interpretation, though.
eq = self.assertEqual
eq(Utils.parseaddr('""example" example"@example.com'),
('', '""example" example"@example.com'))
eq(Utils.parseaddr('"\\"example\\" example"@example.com'),
('', '"\\"example\\" example"@example.com'))
eq(Utils.parseaddr('"\\\\"example\\\\" example"@example.com'),
('', '"\\\\"example\\\\" example"@example.com'))
def test_multiline_from_comment(self):
x = """\
Foo
\tBar <foo@example.com>"""
self.assertEqual(Utils.parseaddr(x), ('Foo Bar', 'foo@example.com'))
def test_quote_dump(self):
self.assertEqual(
Utils.formataddr(('A Silly; Person', 'person@dom.ain')),
r'"A Silly; Person" <person@dom.ain>')
def test_fix_eols(self):
eq = self.assertEqual
eq(Utils.fix_eols('hello'), 'hello')
eq(Utils.fix_eols('hello\n'), 'hello\r\n')
eq(Utils.fix_eols('hello\r'), 'hello\r\n')
eq(Utils.fix_eols('hello\r\n'), 'hello\r\n')
eq(Utils.fix_eols('hello\n\r'), 'hello\r\n\r\n')
def test_charset_richcomparisons(self):
eq = self.assertEqual
ne = self.assertNotEqual
cset1 = Charset()
cset2 = Charset()
eq(cset1, 'us-ascii')
eq(cset1, 'US-ASCII')
eq(cset1, 'Us-AsCiI')
eq('us-ascii', cset1)
eq('US-ASCII', cset1)
eq('Us-AsCiI', cset1)
ne(cset1, 'usascii')
ne(cset1, 'USASCII')
ne(cset1, 'UsAsCiI')
ne('usascii', cset1)
ne('USASCII', cset1)
ne('UsAsCiI', cset1)
eq(cset1, cset2)
eq(cset2, cset1)
def test_getaddresses(self):
eq = self.assertEqual
eq(Utils.getaddresses(['aperson@dom.ain (Al Person)',
'Bud Person <bperson@dom.ain>']),
[('Al Person', 'aperson@dom.ain'),
('Bud Person', 'bperson@dom.ain')])
def test_getaddresses_nasty(self):
eq = self.assertEqual
eq(Utils.getaddresses(['foo: ;']), [('', '')])
eq(Utils.getaddresses(
['[]*-- =~$']),
[('', ''), ('', ''), ('', '*--')])
eq(Utils.getaddresses(
['foo: ;', '"Jason R. Mastaler" <jason@dom.ain>']),
[('', ''), ('Jason R. Mastaler', 'jason@dom.ain')])
def test_getaddresses_embedded_comment(self):
"""Test proper handling of a nested comment"""
eq = self.assertEqual
addrs = Utils.getaddresses(['User ((nested comment)) <foo@bar.com>'])
eq(addrs[0][1], 'foo@bar.com')
def test_utils_quote_unquote(self):
eq = self.assertEqual
msg = Message()
msg.add_header('content-disposition', 'attachment',
filename='foo\\wacky"name')
eq(msg.get_filename(), 'foo\\wacky"name')
def test_get_body_encoding_with_bogus_charset(self):
charset = Charset('not a charset')
self.assertEqual(charset.get_body_encoding(), 'base64')
def test_get_body_encoding_with_uppercase_charset(self):
eq = self.assertEqual
msg = Message()
msg['Content-Type'] = 'text/plain; charset=UTF-8'
eq(msg['content-type'], 'text/plain; charset=UTF-8')
charsets = msg.get_charsets()
eq(len(charsets), 1)
eq(charsets[0], 'utf-8')
charset = Charset(charsets[0])
eq(charset.get_body_encoding(), 'base64')
msg.set_payload('hello world', charset=charset)
eq(msg.get_payload(), 'aGVsbG8gd29ybGQ=\n')
eq(msg.get_payload(decode=True), 'hello world')
eq(msg['content-transfer-encoding'], 'base64')
# Try another one
msg = Message()
msg['Content-Type'] = 'text/plain; charset="US-ASCII"'
charsets = msg.get_charsets()
eq(len(charsets), 1)
eq(charsets[0], 'us-ascii')
charset = Charset(charsets[0])
eq(charset.get_body_encoding(), Encoders.encode_7or8bit)
msg.set_payload('hello world', charset=charset)
eq(msg.get_payload(), 'hello world')
eq(msg['content-transfer-encoding'], '7bit')
def test_charsets_case_insensitive(self):
lc = Charset('us-ascii')
uc = Charset('US-ASCII')
self.assertEqual(lc.get_body_encoding(), uc.get_body_encoding())
def test_partial_falls_inside_message_delivery_status(self):
eq = self.ndiffAssertEqual
# The Parser interface provides chunks of data to FeedParser in 8192
# byte gulps. SF bug #1076485 found one of those chunks inside
# message/delivery-status header block, which triggered an
# unreadline() of NeedMoreData.
msg = self._msgobj('msg_43.txt')
sfp = StringIO()
Iterators._structure(msg, sfp)
eq(sfp.getvalue(), """\
multipart/report
text/plain
message/delivery-status
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/rfc822-headers
""")
# Test the iterator/generators
class TestIterators(TestEmailBase):
def test_body_line_iterator(self):
eq = self.assertEqual
neq = self.ndiffAssertEqual
# First a simple non-multipart message
msg = self._msgobj('msg_01.txt')
it = Iterators.body_line_iterator(msg)
lines = list(it)
eq(len(lines), 6)
neq(EMPTYSTRING.join(lines), msg.get_payload())
# Now a more complicated multipart
msg = self._msgobj('msg_02.txt')
it = Iterators.body_line_iterator(msg)
lines = list(it)
eq(len(lines), 43)
fp = openfile('msg_19.txt')
try:
neq(EMPTYSTRING.join(lines), fp.read())
finally:
fp.close()
def test_typed_subpart_iterator(self):
eq = self.assertEqual
msg = self._msgobj('msg_04.txt')
it = Iterators.typed_subpart_iterator(msg, 'text')
lines = []
subparts = 0
for subpart in it:
subparts += 1
lines.append(subpart.get_payload())
eq(subparts, 2)
eq(EMPTYSTRING.join(lines), """\
a simple kind of mirror
to reflect upon our own
a simple kind of mirror
to reflect upon our own
""")
def test_typed_subpart_iterator_default_type(self):
eq = self.assertEqual
msg = self._msgobj('msg_03.txt')
it = Iterators.typed_subpart_iterator(msg, 'text', 'plain')
lines = []
subparts = 0
for subpart in it:
subparts += 1
lines.append(subpart.get_payload())
eq(subparts, 1)
eq(EMPTYSTRING.join(lines), """\
Hi,
Do you like this message?
-Me
""")
def test_pushCR_LF(self):
'''FeedParser BufferedSubFile.push() assumed it received complete
line endings. A CR ending one push() followed by a LF starting
the next push() added an empty line.
'''
imt = [
("a\r \n", 2),
("b", 0),
("c\n", 1),
("", 0),
("d\r\n", 1),
("e\r", 0),
("\nf", 1),
("\r\n", 1),
]
from email.feedparser import BufferedSubFile, NeedMoreData
bsf = BufferedSubFile()
om = []
nt = 0
for il, n in imt:
bsf.push(il)
nt += n
n1 = 0
while True:
ol = bsf.readline()
if ol == NeedMoreData:
break
om.append(ol)
n1 += 1
self.assertTrue(n == n1)
self.assertTrue(len(om) == nt)
self.assertTrue(''.join([il for il, n in imt]) == ''.join(om))
class TestParsers(TestEmailBase):
def test_header_parser(self):
eq = self.assertEqual
# Parse only the headers of a complex multipart MIME document
fp = openfile('msg_02.txt')
try:
msg = HeaderParser().parse(fp)
finally:
fp.close()
eq(msg['from'], 'ppp-request@zzz.org')
eq(msg['to'], 'ppp@zzz.org')
eq(msg.get_content_type(), 'multipart/mixed')
self.assertFalse(msg.is_multipart())
self.assertTrue(isinstance(msg.get_payload(), str))
def test_whitespace_continuation(self):
eq = self.assertEqual
# This message contains a line after the Subject: header that has only
# whitespace, but it is not empty!
msg = email.message_from_string("""\
From: aperson@dom.ain
To: bperson@dom.ain
Subject: the next line has a space on it
\x20
Date: Mon, 8 Apr 2002 15:09:19 -0400
Message-ID: spam
Here's the message body
""")
eq(msg['subject'], 'the next line has a space on it\n ')
eq(msg['message-id'], 'spam')
eq(msg.get_payload(), "Here's the message body\n")
def test_whitespace_continuation_last_header(self):
eq = self.assertEqual
# Like the previous test, but the subject line is the last
# header.
msg = email.message_from_string("""\
From: aperson@dom.ain
To: bperson@dom.ain
Date: Mon, 8 Apr 2002 15:09:19 -0400
Message-ID: spam
Subject: the next line has a space on it
\x20
Here's the message body
""")
eq(msg['subject'], 'the next line has a space on it\n ')
eq(msg['message-id'], 'spam')
eq(msg.get_payload(), "Here's the message body\n")
def test_crlf_separation(self):
eq = self.assertEqual
fp = openfile('msg_26.txt', mode='rb')
try:
msg = Parser().parse(fp)
finally:
fp.close()
eq(len(msg.get_payload()), 2)
part1 = msg.get_payload(0)
eq(part1.get_content_type(), 'text/plain')
eq(part1.get_payload(), 'Simple email with attachment.\r\n\r\n')
part2 = msg.get_payload(1)
eq(part2.get_content_type(), 'application/riscos')
def test_multipart_digest_with_extra_mime_headers(self):
eq = self.assertEqual
neq = self.ndiffAssertEqual
fp = openfile('msg_28.txt')
try:
msg = email.message_from_file(fp)
finally:
fp.close()
# Structure is:
# multipart/digest
# message/rfc822
# text/plain
# message/rfc822
# text/plain
eq(msg.is_multipart(), 1)
eq(len(msg.get_payload()), 2)
part1 = msg.get_payload(0)
eq(part1.get_content_type(), 'message/rfc822')
eq(part1.is_multipart(), 1)
eq(len(part1.get_payload()), 1)
part1a = part1.get_payload(0)
eq(part1a.is_multipart(), 0)
eq(part1a.get_content_type(), 'text/plain')
neq(part1a.get_payload(), 'message 1\n')
# next message/rfc822
part2 = msg.get_payload(1)
eq(part2.get_content_type(), 'message/rfc822')
eq(part2.is_multipart(), 1)
eq(len(part2.get_payload()), 1)
part2a = part2.get_payload(0)
eq(part2a.is_multipart(), 0)
eq(part2a.get_content_type(), 'text/plain')
neq(part2a.get_payload(), 'message 2\n')
def test_three_lines(self):
# A bug report by Andrew McNamara
lines = ['From: Andrew Person <aperson@dom.ain',
'Subject: Test',
'Date: Tue, 20 Aug 2002 16:43:45 +1000']
msg = email.message_from_string(NL.join(lines))
self.assertEqual(msg['date'], 'Tue, 20 Aug 2002 16:43:45 +1000')
def test_strip_line_feed_and_carriage_return_in_headers(self):
eq = self.assertEqual
# For [ 1002475 ] email message parser doesn't handle \r\n correctly
value1 = 'text'
value2 = 'more text'
m = 'Header: %s\r\nNext-Header: %s\r\n\r\nBody\r\n\r\n' % (
value1, value2)
msg = email.message_from_string(m)
eq(msg.get('Header'), value1)
eq(msg.get('Next-Header'), value2)
def test_rfc2822_header_syntax(self):
eq = self.assertEqual
m = '>From: foo\nFrom: bar\n!"#QUX;~: zoo\n\nbody'
msg = email.message_from_string(m)
eq(len(msg.keys()), 3)
keys = msg.keys()
keys.sort()
eq(keys, ['!"#QUX;~', '>From', 'From'])
eq(msg.get_payload(), 'body')
def test_rfc2822_space_not_allowed_in_header(self):
eq = self.assertEqual
m = '>From foo@example.com 11:25:53\nFrom: bar\n!"#QUX;~: zoo\n\nbody'
msg = email.message_from_string(m)
eq(len(msg.keys()), 0)
def test_rfc2822_one_character_header(self):
eq = self.assertEqual
m = 'A: first header\nB: second header\nCC: third header\n\nbody'
msg = email.message_from_string(m)
headers = msg.keys()
headers.sort()
eq(headers, ['A', 'B', 'CC'])
eq(msg.get_payload(), 'body')
def test_CRLFLF_at_end_of_part(self):
# issue 5610: feedparser should not eat two chars from body part ending
# with "\r\n\n".
m = (
"From: foo@bar.com\n"
"To: baz\n"
"Mime-Version: 1.0\n"
"Content-Type: multipart/mixed; boundary=BOUNDARY\n"
"\n"
"--BOUNDARY\n"
"Content-Type: text/plain\n"
"\n"
"body ending with CRLF newline\r\n"
"\n"
"--BOUNDARY--\n"
)
msg = email.message_from_string(m)
self.assertTrue(msg.get_payload(0).get_payload().endswith('\r\n'))
class TestBase64(unittest.TestCase):
def test_len(self):
eq = self.assertEqual
eq(base64MIME.base64_len('hello'),
len(base64MIME.encode('hello', eol='')))
for size in range(15):
if size == 0 : bsize = 0
elif size <= 3 : bsize = 4
elif size <= 6 : bsize = 8
elif size <= 9 : bsize = 12
elif size <= 12: bsize = 16
else : bsize = 20
eq(base64MIME.base64_len('x'*size), bsize)
def test_decode(self):
eq = self.assertEqual
eq(base64MIME.decode(''), '')
eq(base64MIME.decode('aGVsbG8='), 'hello')
eq(base64MIME.decode('aGVsbG8=', 'X'), 'hello')
eq(base64MIME.decode('aGVsbG8NCndvcmxk\n', 'X'), 'helloXworld')
def test_encode(self):
eq = self.assertEqual
eq(base64MIME.encode(''), '')
eq(base64MIME.encode('hello'), 'aGVsbG8=\n')
# Test the binary flag
eq(base64MIME.encode('hello\n'), 'aGVsbG8K\n')
eq(base64MIME.encode('hello\n', 0), 'aGVsbG8NCg==\n')
# Test the maxlinelen arg
eq(base64MIME.encode('xxxx ' * 20, maxlinelen=40), """\
eHh4eCB4eHh4IHh4eHggeHh4eCB4eHh4IHh4eHgg
eHh4eCB4eHh4IHh4eHggeHh4eCB4eHh4IHh4eHgg
eHh4eCB4eHh4IHh4eHggeHh4eCB4eHh4IHh4eHgg
eHh4eCB4eHh4IA==
""")
# Test the eol argument
eq(base64MIME.encode('xxxx ' * 20, maxlinelen=40, eol='\r\n'), """\
eHh4eCB4eHh4IHh4eHggeHh4eCB4eHh4IHh4eHgg\r
eHh4eCB4eHh4IHh4eHggeHh4eCB4eHh4IHh4eHgg\r
eHh4eCB4eHh4IHh4eHggeHh4eCB4eHh4IHh4eHgg\r
eHh4eCB4eHh4IA==\r
""")
def test_header_encode(self):
eq = self.assertEqual
he = base64MIME.header_encode
eq(he('hello'), '=?iso-8859-1?b?aGVsbG8=?=')
eq(he('hello\nworld'), '=?iso-8859-1?b?aGVsbG8NCndvcmxk?=')
# Test the charset option
eq(he('hello', charset='iso-8859-2'), '=?iso-8859-2?b?aGVsbG8=?=')
# Test the keep_eols flag
eq(he('hello\nworld', keep_eols=True),
'=?iso-8859-1?b?aGVsbG8Kd29ybGQ=?=')
# Test the maxlinelen argument
eq(he('xxxx ' * 20, maxlinelen=40), """\
=?iso-8859-1?b?eHh4eCB4eHh4IHh4eHggeHg=?=
=?iso-8859-1?b?eHggeHh4eCB4eHh4IHh4eHg=?=
=?iso-8859-1?b?IHh4eHggeHh4eCB4eHh4IHg=?=
=?iso-8859-1?b?eHh4IHh4eHggeHh4eCB4eHg=?=
=?iso-8859-1?b?eCB4eHh4IHh4eHggeHh4eCA=?=
=?iso-8859-1?b?eHh4eCB4eHh4IHh4eHgg?=""")
# Test the eol argument
eq(he('xxxx ' * 20, maxlinelen=40, eol='\r\n'), """\
=?iso-8859-1?b?eHh4eCB4eHh4IHh4eHggeHg=?=\r
=?iso-8859-1?b?eHggeHh4eCB4eHh4IHh4eHg=?=\r
=?iso-8859-1?b?IHh4eHggeHh4eCB4eHh4IHg=?=\r
=?iso-8859-1?b?eHh4IHh4eHggeHh4eCB4eHg=?=\r
=?iso-8859-1?b?eCB4eHh4IHh4eHggeHh4eCA=?=\r
=?iso-8859-1?b?eHh4eCB4eHh4IHh4eHgg?=""")
class TestQuopri(unittest.TestCase):
def setUp(self):
self.hlit = [chr(x) for x in range(ord('a'), ord('z')+1)] + \
[chr(x) for x in range(ord('A'), ord('Z')+1)] + \
[chr(x) for x in range(ord('0'), ord('9')+1)] + \
['!', '*', '+', '-', '/', ' ']
self.hnon = [chr(x) for x in range(256) if chr(x) not in self.hlit]
assert len(self.hlit) + len(self.hnon) == 256
self.blit = [chr(x) for x in range(ord(' '), ord('~')+1)] + ['\t']
self.blit.remove('=')
self.bnon = [chr(x) for x in range(256) if chr(x) not in self.blit]
assert len(self.blit) + len(self.bnon) == 256
def test_header_quopri_check(self):
for c in self.hlit:
self.assertFalse(quopriMIME.header_quopri_check(c))
for c in self.hnon:
self.assertTrue(quopriMIME.header_quopri_check(c))
def test_body_quopri_check(self):
for c in self.blit:
self.assertFalse(quopriMIME.body_quopri_check(c))
for c in self.bnon:
self.assertTrue(quopriMIME.body_quopri_check(c))
def test_header_quopri_len(self):
eq = self.assertEqual
hql = quopriMIME.header_quopri_len
enc = quopriMIME.header_encode
for s in ('hello', 'h@e@l@l@o@'):
# Empty charset and no line-endings. 7 == RFC chrome
eq(hql(s), len(enc(s, charset='', eol=''))-7)
for c in self.hlit:
eq(hql(c), 1)
for c in self.hnon:
eq(hql(c), 3)
def test_body_quopri_len(self):
eq = self.assertEqual
bql = quopriMIME.body_quopri_len
for c in self.blit:
eq(bql(c), 1)
for c in self.bnon:
eq(bql(c), 3)
def test_quote_unquote_idempotent(self):
for x in range(256):
c = chr(x)
self.assertEqual(quopriMIME.unquote(quopriMIME.quote(c)), c)
def test_header_encode(self):
eq = self.assertEqual
he = quopriMIME.header_encode
eq(he('hello'), '=?iso-8859-1?q?hello?=')
eq(he('hello\nworld'), '=?iso-8859-1?q?hello=0D=0Aworld?=')
# Test the charset option
eq(he('hello', charset='iso-8859-2'), '=?iso-8859-2?q?hello?=')
# Test the keep_eols flag
eq(he('hello\nworld', keep_eols=True), '=?iso-8859-1?q?hello=0Aworld?=')
# Test a non-ASCII character
eq(he('hello\xc7there'), '=?iso-8859-1?q?hello=C7there?=')
# Test the maxlinelen argument
eq(he('xxxx ' * 20, maxlinelen=40), """\
=?iso-8859-1?q?xxxx_xxxx_xxxx_xxxx_xx?=
=?iso-8859-1?q?xx_xxxx_xxxx_xxxx_xxxx?=
=?iso-8859-1?q?_xxxx_xxxx_xxxx_xxxx_x?=
=?iso-8859-1?q?xxx_xxxx_xxxx_xxxx_xxx?=
=?iso-8859-1?q?x_xxxx_xxxx_?=""")
# Test the eol argument
eq(he('xxxx ' * 20, maxlinelen=40, eol='\r\n'), """\
=?iso-8859-1?q?xxxx_xxxx_xxxx_xxxx_xx?=\r
=?iso-8859-1?q?xx_xxxx_xxxx_xxxx_xxxx?=\r
=?iso-8859-1?q?_xxxx_xxxx_xxxx_xxxx_x?=\r
=?iso-8859-1?q?xxx_xxxx_xxxx_xxxx_xxx?=\r
=?iso-8859-1?q?x_xxxx_xxxx_?=""")
def test_decode(self):
eq = self.assertEqual
eq(quopriMIME.decode(''), '')
eq(quopriMIME.decode('hello'), 'hello')
eq(quopriMIME.decode('hello', 'X'), 'hello')
eq(quopriMIME.decode('hello\nworld', 'X'), 'helloXworld')
def test_encode(self):
eq = self.assertEqual
eq(quopriMIME.encode(''), '')
eq(quopriMIME.encode('hello'), 'hello')
# Test the binary flag
eq(quopriMIME.encode('hello\r\nworld'), 'hello\nworld')
eq(quopriMIME.encode('hello\r\nworld', 0), 'hello\nworld')
# Test the maxlinelen arg
eq(quopriMIME.encode('xxxx ' * 20, maxlinelen=40), """\
xxxx xxxx xxxx xxxx xxxx xxxx xxxx xxxx=
xxxx xxxx xxxx xxxx xxxx xxxx xxxx xxx=
x xxxx xxxx xxxx xxxx=20""")
# Test the eol argument
eq(quopriMIME.encode('xxxx ' * 20, maxlinelen=40, eol='\r\n'), """\
xxxx xxxx xxxx xxxx xxxx xxxx xxxx xxxx=\r
xxxx xxxx xxxx xxxx xxxx xxxx xxxx xxx=\r
x xxxx xxxx xxxx xxxx=20""")
eq(quopriMIME.encode("""\
one line
two line"""), """\
one line
two line""")
# Test the Charset class
class TestCharset(unittest.TestCase):
def tearDown(self):
from email import Charset as CharsetModule
try:
del CharsetModule.CHARSETS['fake']
except KeyError:
pass
def test_idempotent(self):
eq = self.assertEqual
# Make sure us-ascii = no Unicode conversion
c = Charset('us-ascii')
s = 'Hello World!'
sp = c.to_splittable(s)
eq(s, c.from_splittable(sp))
# test 8-bit idempotency with us-ascii
s = '\xa4\xa2\xa4\xa4\xa4\xa6\xa4\xa8\xa4\xaa'
sp = c.to_splittable(s)
eq(s, c.from_splittable(sp))
def test_body_encode(self):
eq = self.assertEqual
# Try a charset with QP body encoding
c = Charset('iso-8859-1')
eq('hello w=F6rld', c.body_encode('hello w\xf6rld'))
# Try a charset with Base64 body encoding
c = Charset('utf-8')
eq('aGVsbG8gd29ybGQ=\n', c.body_encode('hello world'))
# Try a charset with None body encoding
c = Charset('us-ascii')
eq('hello world', c.body_encode('hello world'))
# Try the convert argument, where input codec != output codec
c = Charset('euc-jp')
# With apologies to Tokio Kikuchi ;)
try:
eq('\x1b$B5FCO;~IW\x1b(B',
c.body_encode('\xb5\xc6\xc3\xcf\xbb\xfe\xc9\xd7'))
eq('\xb5\xc6\xc3\xcf\xbb\xfe\xc9\xd7',
c.body_encode('\xb5\xc6\xc3\xcf\xbb\xfe\xc9\xd7', False))
except LookupError:
# We probably don't have the Japanese codecs installed
pass
# Testing SF bug #625509, which we have to fake, since there are no
# built-in encodings where the header encoding is QP but the body
# encoding is not.
from email import Charset as CharsetModule
CharsetModule.add_charset('fake', CharsetModule.QP, None)
c = Charset('fake')
eq('hello w\xf6rld', c.body_encode('hello w\xf6rld'))
def test_unicode_charset_name(self):
charset = Charset(u'us-ascii')
self.assertEqual(str(charset), 'us-ascii')
self.assertRaises(Errors.CharsetError, Charset, 'asc\xffii')
def test_codecs_aliases_accepted(self):
charset = Charset('utf8')
self.assertEqual(str(charset), 'utf-8')
# Test multilingual MIME headers.
class TestHeader(TestEmailBase):
def test_simple(self):
eq = self.ndiffAssertEqual
h = Header('Hello World!')
eq(h.encode(), 'Hello World!')
h.append(' Goodbye World!')
eq(h.encode(), 'Hello World! Goodbye World!')
def test_simple_surprise(self):
eq = self.ndiffAssertEqual
h = Header('Hello World!')
eq(h.encode(), 'Hello World!')
h.append('Goodbye World!')
eq(h.encode(), 'Hello World! Goodbye World!')
def test_header_needs_no_decoding(self):
h = 'no decoding needed'
self.assertEqual(decode_header(h), [(h, None)])
def test_long(self):
h = Header("I am the very model of a modern Major-General; I've information vegetable, animal, and mineral; I know the kings of England, and I quote the fights historical from Marathon to Waterloo, in order categorical; I'm very well acquainted, too, with matters mathematical; I understand equations, both the simple and quadratical; about binomial theorem I'm teeming with a lot o' news, with many cheerful facts about the square of the hypotenuse.",
maxlinelen=76)
for l in h.encode(splitchars=' ').split('\n '):
self.assertTrue(len(l) <= 76)
def test_multilingual(self):
eq = self.ndiffAssertEqual
g = Charset("iso-8859-1")
cz = Charset("iso-8859-2")
utf8 = Charset("utf-8")
g_head = "Die Mieter treten hier ein werden mit einem Foerderband komfortabel den Korridor entlang, an s\xfcdl\xfcndischen Wandgem\xe4lden vorbei, gegen die rotierenden Klingen bef\xf6rdert. "
cz_head = "Finan\xe8ni metropole se hroutily pod tlakem jejich d\xf9vtipu.. "
utf8_head = u"\u6b63\u78ba\u306b\u8a00\u3046\u3068\u7ffb\u8a33\u306f\u3055\u308c\u3066\u3044\u307e\u305b\u3093\u3002\u4e00\u90e8\u306f\u30c9\u30a4\u30c4\u8a9e\u3067\u3059\u304c\u3001\u3042\u3068\u306f\u3067\u305f\u3089\u3081\u3067\u3059\u3002\u5b9f\u969b\u306b\u306f\u300cWenn ist das Nunstuck git und Slotermeyer? Ja! Beiherhund das Oder die Flipperwaldt gersput.\u300d\u3068\u8a00\u3063\u3066\u3044\u307e\u3059\u3002".encode("utf-8")
h = Header(g_head, g)
h.append(cz_head, cz)
h.append(utf8_head, utf8)
enc = h.encode()
eq(enc, """\
=?iso-8859-1?q?Die_Mieter_treten_hier_ein_werden_mit_einem_Foerderband_ko?=
=?iso-8859-1?q?mfortabel_den_Korridor_entlang=2C_an_s=FCdl=FCndischen_Wan?=
=?iso-8859-1?q?dgem=E4lden_vorbei=2C_gegen_die_rotierenden_Klingen_bef=F6?=
=?iso-8859-1?q?rdert=2E_?= =?iso-8859-2?q?Finan=E8ni_metropole_se_hroutily?=
=?iso-8859-2?q?_pod_tlakem_jejich_d=F9vtipu=2E=2E_?= =?utf-8?b?5q2j56K6?=
=?utf-8?b?44Gr6KiA44GG44Go57+76Kiz44Gv44GV44KM44Gm44GE44G+44Gb44KT44CC?=
=?utf-8?b?5LiA6YOo44Gv44OJ44Kk44OE6Kqe44Gn44GZ44GM44CB44GC44Go44Gv44Gn?=
=?utf-8?b?44Gf44KJ44KB44Gn44GZ44CC5a6f6Zqb44Gr44Gv44CMV2VubiBpc3QgZGFz?=
=?utf-8?q?_Nunstuck_git_und_Slotermeyer=3F_Ja!_Beiherhund_das_Oder_die_Fl?=
=?utf-8?b?aXBwZXJ3YWxkdCBnZXJzcHV0LuOAjeOBqOiogOOBo+OBpuOBhOOBvuOBmQ==?=
=?utf-8?b?44CC?=""")
eq(decode_header(enc),
[(g_head, "iso-8859-1"), (cz_head, "iso-8859-2"),
(utf8_head, "utf-8")])
ustr = unicode(h)
eq(ustr.encode('utf-8'),
'Die Mieter treten hier ein werden mit einem Foerderband '
'komfortabel den Korridor entlang, an s\xc3\xbcdl\xc3\xbcndischen '
'Wandgem\xc3\xa4lden vorbei, gegen die rotierenden Klingen '
'bef\xc3\xb6rdert. Finan\xc4\x8dni metropole se hroutily pod '
'tlakem jejich d\xc5\xafvtipu.. \xe6\xad\xa3\xe7\xa2\xba\xe3\x81'
'\xab\xe8\xa8\x80\xe3\x81\x86\xe3\x81\xa8\xe7\xbf\xbb\xe8\xa8\xb3'
'\xe3\x81\xaf\xe3\x81\x95\xe3\x82\x8c\xe3\x81\xa6\xe3\x81\x84\xe3'
'\x81\xbe\xe3\x81\x9b\xe3\x82\x93\xe3\x80\x82\xe4\xb8\x80\xe9\x83'
'\xa8\xe3\x81\xaf\xe3\x83\x89\xe3\x82\xa4\xe3\x83\x84\xe8\xaa\x9e'
'\xe3\x81\xa7\xe3\x81\x99\xe3\x81\x8c\xe3\x80\x81\xe3\x81\x82\xe3'
'\x81\xa8\xe3\x81\xaf\xe3\x81\xa7\xe3\x81\x9f\xe3\x82\x89\xe3\x82'
'\x81\xe3\x81\xa7\xe3\x81\x99\xe3\x80\x82\xe5\xae\x9f\xe9\x9a\x9b'
'\xe3\x81\xab\xe3\x81\xaf\xe3\x80\x8cWenn ist das Nunstuck git '
'und Slotermeyer? Ja! Beiherhund das Oder die Flipperwaldt '
'gersput.\xe3\x80\x8d\xe3\x81\xa8\xe8\xa8\x80\xe3\x81\xa3\xe3\x81'
'\xa6\xe3\x81\x84\xe3\x81\xbe\xe3\x81\x99\xe3\x80\x82')
# Test make_header()
newh = make_header(decode_header(enc))
eq(newh, enc)
def test_header_ctor_default_args(self):
eq = self.ndiffAssertEqual
h = Header()
eq(h, '')
h.append('foo', Charset('iso-8859-1'))
eq(h, '=?iso-8859-1?q?foo?=')
def test_explicit_maxlinelen(self):
eq = self.ndiffAssertEqual
hstr = 'A very long line that must get split to something other than at the 76th character boundary to test the non-default behavior'
h = Header(hstr)
eq(h.encode(), '''\
A very long line that must get split to something other than at the 76th
character boundary to test the non-default behavior''')
h = Header(hstr, header_name='Subject')
eq(h.encode(), '''\
A very long line that must get split to something other than at the
76th character boundary to test the non-default behavior''')
h = Header(hstr, maxlinelen=1024, header_name='Subject')
eq(h.encode(), hstr)
def test_us_ascii_header(self):
eq = self.assertEqual
s = 'hello'
x = decode_header(s)
eq(x, [('hello', None)])
h = make_header(x)
eq(s, h.encode())
def test_string_charset(self):
eq = self.assertEqual
h = Header()
h.append('hello', 'iso-8859-1')
eq(h, '=?iso-8859-1?q?hello?=')
## def test_unicode_error(self):
## raises = self.assertRaises
## raises(UnicodeError, Header, u'[P\xf6stal]', 'us-ascii')
## raises(UnicodeError, Header, '[P\xf6stal]', 'us-ascii')
## h = Header()
## raises(UnicodeError, h.append, u'[P\xf6stal]', 'us-ascii')
## raises(UnicodeError, h.append, '[P\xf6stal]', 'us-ascii')
## raises(UnicodeError, Header, u'\u83ca\u5730\u6642\u592b', 'iso-8859-1')
def test_utf8_shortest(self):
eq = self.assertEqual
h = Header(u'p\xf6stal', 'utf-8')
eq(h.encode(), '=?utf-8?q?p=C3=B6stal?=')
h = Header(u'\u83ca\u5730\u6642\u592b', 'utf-8')
eq(h.encode(), '=?utf-8?b?6I+K5Zyw5pmC5aSr?=')
def test_bad_8bit_header(self):
raises = self.assertRaises
eq = self.assertEqual
x = 'Ynwp4dUEbay Auction Semiar- No Charge \x96 Earn Big'
raises(UnicodeError, Header, x)
h = Header()
raises(UnicodeError, h.append, x)
eq(str(Header(x, errors='replace')), x)
h.append(x, errors='replace')
eq(str(h), x)
def test_encoded_adjacent_nonencoded(self):
eq = self.assertEqual
h = Header()
h.append('hello', 'iso-8859-1')
h.append('world')
s = h.encode()
eq(s, '=?iso-8859-1?q?hello?= world')
h = make_header(decode_header(s))
eq(h.encode(), s)
def test_whitespace_eater(self):
eq = self.assertEqual
s = 'Subject: =?koi8-r?b?8NLP18XSy8EgzsEgxsnOwczYztk=?= =?koi8-r?q?=CA?= zz.'
parts = decode_header(s)
eq(parts, [('Subject:', None), ('\xf0\xd2\xcf\xd7\xc5\xd2\xcb\xc1 \xce\xc1 \xc6\xc9\xce\xc1\xcc\xd8\xce\xd9\xca', 'koi8-r'), ('zz.', None)])
hdr = make_header(parts)
eq(hdr.encode(),
'Subject: =?koi8-r?b?8NLP18XSy8EgzsEgxsnOwczYztnK?= zz.')
def test_broken_base64_header(self):
raises = self.assertRaises
s = 'Subject: =?EUC-KR?B?CSixpLDtKSC/7Liuvsax4iC6uLmwMcijIKHaILzSwd/H0SC8+LCjwLsgv7W/+Mj3I ?='
raises(Errors.HeaderParseError, decode_header, s)
# Issue 1078919
def test_ascii_add_header(self):
msg = Message()
msg.add_header('Content-Disposition', 'attachment',
filename='bud.gif')
self.assertEqual('attachment; filename="bud.gif"',
msg['Content-Disposition'])
def test_nonascii_add_header_via_triple(self):
msg = Message()
msg.add_header('Content-Disposition', 'attachment',
filename=('iso-8859-1', '', 'Fu\xdfballer.ppt'))
self.assertEqual(
'attachment; filename*="iso-8859-1\'\'Fu%DFballer.ppt"',
msg['Content-Disposition'])
def test_encode_unaliased_charset(self):
# Issue 1379416: when the charset has no output conversion,
# output was accidentally getting coerced to unicode.
res = Header('abc','iso-8859-2').encode()
self.assertEqual(res, '=?iso-8859-2?q?abc?=')
self.assertIsInstance(res, str)
# Test RFC 2231 header parameters (en/de)coding
class TestRFC2231(TestEmailBase):
def test_get_param(self):
eq = self.assertEqual
msg = self._msgobj('msg_29.txt')
eq(msg.get_param('title'),
('us-ascii', 'en', 'This is even more ***fun*** isn\'t it!'))
eq(msg.get_param('title', unquote=False),
('us-ascii', 'en', '"This is even more ***fun*** isn\'t it!"'))
def test_set_param(self):
eq = self.assertEqual
msg = Message()
msg.set_param('title', 'This is even more ***fun*** isn\'t it!',
charset='us-ascii')
eq(msg.get_param('title'),
('us-ascii', '', 'This is even more ***fun*** isn\'t it!'))
msg.set_param('title', 'This is even more ***fun*** isn\'t it!',
charset='us-ascii', language='en')
eq(msg.get_param('title'),
('us-ascii', 'en', 'This is even more ***fun*** isn\'t it!'))
msg = self._msgobj('msg_01.txt')
msg.set_param('title', 'This is even more ***fun*** isn\'t it!',
charset='us-ascii', language='en')
self.ndiffAssertEqual(msg.as_string(), """\
Return-Path: <bbb@zzz.org>
Delivered-To: bbb@zzz.org
Received: by mail.zzz.org (Postfix, from userid 889)
id 27CEAD38CC; Fri, 4 May 2001 14:05:44 -0400 (EDT)
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
Message-ID: <15090.61304.110929.45684@aaa.zzz.org>
From: bbb@ddd.com (John X. Doe)
To: bbb@zzz.org
Subject: This is a test message
Date: Fri, 4 May 2001 14:05:44 -0400
Content-Type: text/plain; charset=us-ascii;
title*="us-ascii'en'This%20is%20even%20more%20%2A%2A%2Afun%2A%2A%2A%20isn%27t%20it%21"
Hi,
Do you like this message?
-Me
""")
def test_del_param(self):
eq = self.ndiffAssertEqual
msg = self._msgobj('msg_01.txt')
msg.set_param('foo', 'bar', charset='us-ascii', language='en')
msg.set_param('title', 'This is even more ***fun*** isn\'t it!',
charset='us-ascii', language='en')
msg.del_param('foo', header='Content-Type')
eq(msg.as_string(), """\
Return-Path: <bbb@zzz.org>
Delivered-To: bbb@zzz.org
Received: by mail.zzz.org (Postfix, from userid 889)
id 27CEAD38CC; Fri, 4 May 2001 14:05:44 -0400 (EDT)
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
Message-ID: <15090.61304.110929.45684@aaa.zzz.org>
From: bbb@ddd.com (John X. Doe)
To: bbb@zzz.org
Subject: This is a test message
Date: Fri, 4 May 2001 14:05:44 -0400
Content-Type: text/plain; charset="us-ascii";
title*="us-ascii'en'This%20is%20even%20more%20%2A%2A%2Afun%2A%2A%2A%20isn%27t%20it%21"
Hi,
Do you like this message?
-Me
""")
def test_rfc2231_get_content_charset(self):
eq = self.assertEqual
msg = self._msgobj('msg_32.txt')
eq(msg.get_content_charset(), 'us-ascii')
def test_rfc2231_no_language_or_charset(self):
m = '''\
Content-Transfer-Encoding: 8bit
Content-Disposition: inline; filename="file____C__DOCUMENTS_20AND_20SETTINGS_FABIEN_LOCAL_20SETTINGS_TEMP_nsmail.htm"
Content-Type: text/html; NAME*0=file____C__DOCUMENTS_20AND_20SETTINGS_FABIEN_LOCAL_20SETTINGS_TEM; NAME*1=P_nsmail.htm
'''
msg = email.message_from_string(m)
param = msg.get_param('NAME')
self.assertFalse(isinstance(param, tuple))
self.assertEqual(
param,
'file____C__DOCUMENTS_20AND_20SETTINGS_FABIEN_LOCAL_20SETTINGS_TEMP_nsmail.htm')
def test_rfc2231_no_language_or_charset_in_filename(self):
m = '''\
Content-Disposition: inline;
\tfilename*0*="''This%20is%20even%20more%20";
\tfilename*1*="%2A%2A%2Afun%2A%2A%2A%20";
\tfilename*2="is it not.pdf"
'''
msg = email.message_from_string(m)
self.assertEqual(msg.get_filename(),
'This is even more ***fun*** is it not.pdf')
def test_rfc2231_no_language_or_charset_in_filename_encoded(self):
m = '''\
Content-Disposition: inline;
\tfilename*0*="''This%20is%20even%20more%20";
\tfilename*1*="%2A%2A%2Afun%2A%2A%2A%20";
\tfilename*2="is it not.pdf"
'''
msg = email.message_from_string(m)
self.assertEqual(msg.get_filename(),
'This is even more ***fun*** is it not.pdf')
def test_rfc2231_partly_encoded(self):
m = '''\
Content-Disposition: inline;
\tfilename*0="''This%20is%20even%20more%20";
\tfilename*1*="%2A%2A%2Afun%2A%2A%2A%20";
\tfilename*2="is it not.pdf"
'''
msg = email.message_from_string(m)
self.assertEqual(
msg.get_filename(),
'This%20is%20even%20more%20***fun*** is it not.pdf')
def test_rfc2231_partly_nonencoded(self):
m = '''\
Content-Disposition: inline;
\tfilename*0="This%20is%20even%20more%20";
\tfilename*1="%2A%2A%2Afun%2A%2A%2A%20";
\tfilename*2="is it not.pdf"
'''
msg = email.message_from_string(m)
self.assertEqual(
msg.get_filename(),
'This%20is%20even%20more%20%2A%2A%2Afun%2A%2A%2A%20is it not.pdf')
def test_rfc2231_no_language_or_charset_in_boundary(self):
m = '''\
Content-Type: multipart/alternative;
\tboundary*0*="''This%20is%20even%20more%20";
\tboundary*1*="%2A%2A%2Afun%2A%2A%2A%20";
\tboundary*2="is it not.pdf"
'''
msg = email.message_from_string(m)
self.assertEqual(msg.get_boundary(),
'This is even more ***fun*** is it not.pdf')
def test_rfc2231_no_language_or_charset_in_charset(self):
# This is a nonsensical charset value, but tests the code anyway
m = '''\
Content-Type: text/plain;
\tcharset*0*="This%20is%20even%20more%20";
\tcharset*1*="%2A%2A%2Afun%2A%2A%2A%20";
\tcharset*2="is it not.pdf"
'''
msg = email.message_from_string(m)
self.assertEqual(msg.get_content_charset(),
'this is even more ***fun*** is it not.pdf')
def test_rfc2231_bad_encoding_in_filename(self):
m = '''\
Content-Disposition: inline;
\tfilename*0*="bogus'xx'This%20is%20even%20more%20";
\tfilename*1*="%2A%2A%2Afun%2A%2A%2A%20";
\tfilename*2="is it not.pdf"
'''
msg = email.message_from_string(m)
self.assertEqual(msg.get_filename(),
'This is even more ***fun*** is it not.pdf')
def test_rfc2231_bad_encoding_in_charset(self):
m = """\
Content-Type: text/plain; charset*=bogus''utf-8%E2%80%9D
"""
msg = email.message_from_string(m)
# This should return None because non-ascii characters in the charset
# are not allowed.
self.assertEqual(msg.get_content_charset(), None)
def test_rfc2231_bad_character_in_charset(self):
m = """\
Content-Type: text/plain; charset*=ascii''utf-8%E2%80%9D
"""
msg = email.message_from_string(m)
# This should return None because non-ascii characters in the charset
# are not allowed.
self.assertEqual(msg.get_content_charset(), None)
def test_rfc2231_bad_character_in_filename(self):
m = '''\
Content-Disposition: inline;
\tfilename*0*="ascii'xx'This%20is%20even%20more%20";
\tfilename*1*="%2A%2A%2Afun%2A%2A%2A%20";
\tfilename*2*="is it not.pdf%E2"
'''
msg = email.message_from_string(m)
self.assertEqual(msg.get_filename(),
u'This is even more ***fun*** is it not.pdf\ufffd')
def test_rfc2231_unknown_encoding(self):
m = """\
Content-Transfer-Encoding: 8bit
Content-Disposition: inline; filename*=X-UNKNOWN''myfile.txt
"""
msg = email.message_from_string(m)
self.assertEqual(msg.get_filename(), 'myfile.txt')
def test_rfc2231_single_tick_in_filename_extended(self):
eq = self.assertEqual
m = """\
Content-Type: application/x-foo;
\tname*0*=\"Frank's\"; name*1*=\" Document\"
"""
msg = email.message_from_string(m)
charset, language, s = msg.get_param('name')
eq(charset, None)
eq(language, None)
eq(s, "Frank's Document")
def test_rfc2231_single_tick_in_filename(self):
m = """\
Content-Type: application/x-foo; name*0=\"Frank's\"; name*1=\" Document\"
"""
msg = email.message_from_string(m)
param = msg.get_param('name')
self.assertFalse(isinstance(param, tuple))
self.assertEqual(param, "Frank's Document")
def test_rfc2231_tick_attack_extended(self):
eq = self.assertEqual
m = """\
Content-Type: application/x-foo;
\tname*0*=\"us-ascii'en-us'Frank's\"; name*1*=\" Document\"
"""
msg = email.message_from_string(m)
charset, language, s = msg.get_param('name')
eq(charset, 'us-ascii')
eq(language, 'en-us')
eq(s, "Frank's Document")
def test_rfc2231_tick_attack(self):
m = """\
Content-Type: application/x-foo;
\tname*0=\"us-ascii'en-us'Frank's\"; name*1=\" Document\"
"""
msg = email.message_from_string(m)
param = msg.get_param('name')
self.assertFalse(isinstance(param, tuple))
self.assertEqual(param, "us-ascii'en-us'Frank's Document")
def test_rfc2231_no_extended_values(self):
eq = self.assertEqual
m = """\
Content-Type: application/x-foo; name=\"Frank's Document\"
"""
msg = email.message_from_string(m)
eq(msg.get_param('name'), "Frank's Document")
def test_rfc2231_encoded_then_unencoded_segments(self):
eq = self.assertEqual
m = """\
Content-Type: application/x-foo;
\tname*0*=\"us-ascii'en-us'My\";
\tname*1=\" Document\";
\tname*2*=\" For You\"
"""
msg = email.message_from_string(m)
charset, language, s = msg.get_param('name')
eq(charset, 'us-ascii')
eq(language, 'en-us')
eq(s, 'My Document For You')
def test_rfc2231_unencoded_then_encoded_segments(self):
eq = self.assertEqual
m = """\
Content-Type: application/x-foo;
\tname*0=\"us-ascii'en-us'My\";
\tname*1*=\" Document\";
\tname*2*=\" For You\"
"""
msg = email.message_from_string(m)
charset, language, s = msg.get_param('name')
eq(charset, 'us-ascii')
eq(language, 'en-us')
eq(s, 'My Document For You')
# Tests to ensure that signed parts of an email are completely preserved, as
# required by RFC1847 section 2.1. Note that these are incomplete, because the
# email package does not currently always preserve the body. See issue 1670765.
class TestSigned(TestEmailBase):
def _msg_and_obj(self, filename):
fp = openfile(findfile(filename))
try:
original = fp.read()
msg = email.message_from_string(original)
finally:
fp.close()
return original, msg
def _signed_parts_eq(self, original, result):
# Extract the first mime part of each message
import re
repart = re.compile(r'^--([^\n]+)\n(.*?)\n--\1$', re.S | re.M)
inpart = repart.search(original).group(2)
outpart = repart.search(result).group(2)
self.assertEqual(outpart, inpart)
def test_long_headers_as_string(self):
original, msg = self._msg_and_obj('msg_45.txt')
result = msg.as_string()
self._signed_parts_eq(original, result)
def test_long_headers_flatten(self):
original, msg = self._msg_and_obj('msg_45.txt')
fp = StringIO()
Generator(fp).flatten(msg)
result = fp.getvalue()
self._signed_parts_eq(original, result)
def _testclasses():
mod = sys.modules[__name__]
return [getattr(mod, name) for name in dir(mod) if name.startswith('Test')]
def suite():
suite = unittest.TestSuite()
for testclass in _testclasses():
suite.addTest(unittest.makeSuite(testclass))
return suite
def test_main():
for testclass in _testclasses():
run_unittest(testclass)
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
moto-timo/ironpython3
|
refs/heads/master
|
Tests/interop/net/field/__init__.py
|
3
|
#####################################################################################
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# This source code is subject to terms and conditions of the Apache License, Version 2.0. A
# copy of the license can be found in the License.html file at the root of this distribution. If
# you cannot locate the Apache License, Version 2.0, please send an email to
# ironpy@microsoft.com. By using this source code in any fashion, you are agreeing to be bound
# by the terms of the Apache License, Version 2.0.
#
# You must not remove this notice, or any other, from this software.
#
#
#####################################################################################
'''
* Where the field is defined
- value type (enum), reference type,
- generic value/reference type (bound with value/reference type)
* Field type
- value type: built-in number types, enum, user defined struct
- reference type, interface?
- Nullable<T>
- array of something
- generic type parameter and its' constructed type
* Field modifier
- const (literal),
- readonly
- static / instance
* set/get via Type|object (dot) (Static|Intance) Field
* set value with something with different type, or, none
- convert succeed, or fail
* repeating from the derived class or its instance
* (python) __set__/__get__/__delete__/__str__/__repr__
* (python) Type.__dict__['Field'], and possible __set__/__get__, GetValue/SetValue
* Use field as by-ref arguments
* Other operations against field
- Augment: +=, <<=
- Continuous dot operator
- Call operator: ()
'''
|
pinterest/kingpin
|
refs/heads/master
|
kingpin/tests/manageddata/test_managed_hashmap.py
|
1
|
#!/usr/bin/python
#
# Copyright 2016 Pinterest, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from unittest import TestCase
from kingpin.manageddata.managed_datastructures import ManagedHashMap
import mock_zk_config_manager
TEST_ZK_HOSTS = ['observerzookeeper010:2181']
TEST_AWS_KEYFILE = "test_keyfile"
TEST_S3_BUCKET = "test_bucket"
class ConfigBasedManagedHashMapTestCase(TestCase):
def setUp(self):
self.mock_zk_config_manager = mock.patch(
"kingpin.manageddata.managed_datastructures.ZKConfigManager",
mock_zk_config_manager.MockZkConfigManager)
self.mock_zk_config_manager.start()
self.managed_map = ManagedHashMap(
'test_domain', 'test_key', 'test_name', 'test_description',
TEST_ZK_HOSTS, TEST_AWS_KEYFILE, TEST_S3_BUCKET)
def tearDown(self):
self.mock_zk_config_manager.stop()
# Test map operations: set, set_many, get, remove etc
def test_map_operations(self):
self.assertEqual([], self.managed_map.get_keys())
self.assertEqual(1, self.managed_map.set('key_foo', 'val_bar'))
self.assertEqual('val_bar', self.managed_map.get('key_foo'))
self.assertEqual(None, self.managed_map.get('key_foo2'))
many_items = {'key_foo2': 'val_bar2', 'key_foo3': 'val_bar3'}
self.assertEqual(2, self.managed_map.set_many(many_items))
self.assertEqual('val_bar2', self.managed_map.get('key_foo2'))
self.assertEqual(0, self.managed_map.set_many(many_items))
many_items['key_foo3'] = 'val_bar3_3'
self.assertEqual(1, self.managed_map.set_many(many_items))
get_many = self.managed_map.get_many(['key_foo2', 'key_foo3'])
self.assertEqual(2, len(get_many))
self.assertEqual('val_bar2', get_many['key_foo2'])
self.assertEqual('val_bar3_3', get_many['key_foo3'])
self.assertEqual(True, self.managed_map.contains('key_foo3'))
self.assertEqual(False, self.managed_map.contains('key_foo5'))
self.assertEqual(True, isinstance(self.managed_map.get_all(), dict))
self.assertEqual(3, len(self.managed_map.get_all()))
self.assertEqual(0, self.managed_map.remove('unknown_key'))
self.assertEqual(1, self.managed_map.remove('key_foo'))
self.assertEqual(2, len(self.managed_map.get_keys()))
self.managed_map.delete()
self.assertEqual([], self.managed_map.get_keys())
# Check set_map().
many_items2 = {'key_foo12': 'val_bar12', 'key_foo13': 'val_bar13'}
self.assertEqual(2, self.managed_map.set_map(many_items2))
self.assertEqual(many_items2, self.managed_map.get_all())
# test map operations when having key/value type specified
def test_type_enforcement(self):
mm = ManagedHashMap(
'test_domain', 'test_key1', 'test_name', 'test_description',
TEST_ZK_HOSTS, TEST_AWS_KEYFILE, TEST_S3_BUCKET,
key_type=str, value_type=int)
self.assertTrue(mm.key_type is str)
self.assertTrue(mm.value_type is int)
mm.set_map({})
self.assertEqual([], mm.get_keys())
self.assertEqual(1, mm.set("foo", "123"))
self.assertEqual(1, mm.set("bar", 456))
self.assertEqual(1, mm.set(789, 789))
self.assertEqual(set(["foo", "bar", "789"]), set(mm.get_keys()))
self.assertEqual(123, mm.get("foo"))
self.assertEqual(456, mm.get("bar"))
self.assertEqual(789, mm.get("789"))
# operations with invalid values
self.assertRaises(ValueError, mm.set, "abc", "xyz")
many_items = {"aaa": "111", "bbb": "bla"}
self.assertRaises(ValueError, mm.set_many, many_items)
self.assertRaises(ValueError, mm.set_map, many_items)
self.assertEqual(set(["foo", "bar", "789"]), set(mm.get_keys())) # invalid operations do not change data
many_items = {"aaa": "111", "bbb": 222} # valid new dict
mm.set_map(many_items)
self.assertEqual(set(["aaa", "bbb"]), set(mm.get_keys()))
# test remove
mm = ManagedHashMap(
'test_domain', 'test_key2', 'test_name', 'test_description',
TEST_ZK_HOSTS, TEST_AWS_KEYFILE, TEST_S3_BUCKET,
key_type=int, value_type=str)
mm.set_map({111: "aaa", 222: "bbb", "333": "ccc"})
self.assertEqual(set([111, 222, 333]), set(mm.get_keys()))
self.assertRaises(ValueError, mm.remove, "xxx")
self.assertEqual(set([111, 222, 333]), set(mm.get_keys()))
self.assertEqual(1, mm.remove("111")) # given key in string is able to convert
self.assertEqual(set([222, 333]), set(mm.get_keys()))
|
ahmed-mahran/hue
|
refs/heads/master
|
desktop/core/ext-py/Django-1.6.10/django/contrib/auth/tests/custom_user.py
|
185
|
from django.db import models
from django.contrib.auth.models import (
BaseUserManager,
AbstractBaseUser,
AbstractUser,
UserManager,
PermissionsMixin,
Group,
Permission,
)
# The custom User uses email as the unique identifier, and requires
# that every user provide a date of birth. This lets us test
# changes in username datatype, and non-text required fields.
class CustomUserManager(BaseUserManager):
def create_user(self, email, date_of_birth, password=None):
"""
Creates and saves a User with the given email and password.
"""
if not email:
raise ValueError('Users must have an email address')
user = self.model(
email=self.normalize_email(email),
date_of_birth=date_of_birth,
)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password, date_of_birth):
u = self.create_user(email, password=password, date_of_birth=date_of_birth)
u.is_admin = True
u.save(using=self._db)
return u
class CustomUser(AbstractBaseUser):
email = models.EmailField(verbose_name='email address', max_length=255, unique=True)
is_active = models.BooleanField(default=True)
is_admin = models.BooleanField(default=False)
date_of_birth = models.DateField()
custom_objects = CustomUserManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['date_of_birth']
class Meta:
app_label = 'auth'
def get_full_name(self):
return self.email
def get_short_name(self):
return self.email
def __unicode__(self):
return self.email
# Maybe required?
def get_group_permissions(self, obj=None):
return set()
def get_all_permissions(self, obj=None):
return set()
def has_perm(self, perm, obj=None):
return True
def has_perms(self, perm_list, obj=None):
return True
def has_module_perms(self, app_label):
return True
# Admin required fields
@property
def is_staff(self):
return self.is_admin
# At this point, temporarily remove the groups and user_permissions M2M
# fields from the AbstractUser class, so they don't clash with the related_name
# that sets.
old_au_local_m2m = AbstractUser._meta.local_many_to_many
old_pm_local_m2m = PermissionsMixin._meta.local_many_to_many
groups = models.ManyToManyField(Group, blank=True)
groups.contribute_to_class(PermissionsMixin, "groups")
user_permissions = models.ManyToManyField(Permission, blank=True)
user_permissions.contribute_to_class(PermissionsMixin, "user_permissions")
PermissionsMixin._meta.local_many_to_many = [groups, user_permissions]
AbstractUser._meta.local_many_to_many = [groups, user_permissions]
# The extension user is a simple extension of the built-in user class,
# adding a required date_of_birth field. This allows us to check for
# any hard references to the name "User" in forms/handlers etc.
class ExtensionUser(AbstractUser):
date_of_birth = models.DateField()
custom_objects = UserManager()
REQUIRED_FIELDS = AbstractUser.REQUIRED_FIELDS + ['date_of_birth']
class Meta:
app_label = 'auth'
# The CustomPermissionsUser users email as the identifier, but uses the normal
# Django permissions model. This allows us to check that the PermissionsMixin
# includes everything that is needed to interact with the ModelBackend.
class CustomPermissionsUserManager(CustomUserManager):
def create_superuser(self, email, password, date_of_birth):
u = self.create_user(email, password=password, date_of_birth=date_of_birth)
u.is_superuser = True
u.save(using=self._db)
return u
class CustomPermissionsUser(AbstractBaseUser, PermissionsMixin):
email = models.EmailField(verbose_name='email address', max_length=255, unique=True)
date_of_birth = models.DateField()
custom_objects = CustomPermissionsUserManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['date_of_birth']
class Meta:
app_label = 'auth'
def get_full_name(self):
return self.email
def get_short_name(self):
return self.email
def __unicode__(self):
return self.email
class IsActiveTestUser1(AbstractBaseUser):
"""
This test user class and derivatives test the default is_active behavior
"""
username = models.CharField(max_length=30, unique=True)
custom_objects = BaseUserManager()
USERNAME_FIELD = 'username'
class Meta:
app_label = 'auth'
# the is_active attr is provided by AbstractBaseUser
class CustomUserNonUniqueUsername(AbstractBaseUser):
"A user with a non-unique username"
username = models.CharField(max_length=30)
USERNAME_FIELD = 'username'
class Meta:
app_label = 'auth'
class CustomUserNonListRequiredFields(AbstractBaseUser):
"A user with a non-list REQUIRED_FIELDS"
username = models.CharField(max_length=30, unique=True)
date_of_birth = models.DateField()
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = 'date_of_birth'
class Meta:
app_label = 'auth'
class CustomUserBadRequiredFields(AbstractBaseUser):
"A user with a non-unique username"
username = models.CharField(max_length=30, unique=True)
date_of_birth = models.DateField()
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = ['username', 'date_of_birth']
class Meta:
app_label = 'auth'
# Undo swap hack
AbstractUser._meta.local_many_to_many = old_au_local_m2m
PermissionsMixin._meta.local_many_to_many = old_pm_local_m2m
|
samuto/Honeybee
|
refs/heads/master
|
src/Honeybee_Annual Daylight Simulation.py
|
1
|
#
# Honeybee: A Plugin for Environmental Analysis (GPL) started by Mostapha Sadeghipour Roudsari
#
# This file is part of Honeybee.
#
# Copyright (c) 2013-2015, Mostapha Sadeghipour Roudsari <Sadeghipour@gmail.com>
# Honeybee is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published
# by the Free Software Foundation; either version 3 of the License,
# or (at your option) any later version.
#
# Honeybee is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Honeybee; If not, see <http://www.gnu.org/licenses/>.
#
# @license GPL-3.0+ <http://spdx.org/licenses/GPL-3.0+>
"""
Analysis Recipe for Annual Daylighting Simulation
-
Provided by Honeybee 0.0.57
Args:
north_: Input a vector to be used as a true North direction for the sun path or a number between 0 and 360 that represents the degrees off from the y-axis to make North. The default North direction is set to the Y-axis (0 degrees).
_epwWeatherFile: epw weather file address on your system
_testPoints: Test points
ptsVectors_: Point vectors
_radParameters_: Radiance parameters
_DSParameters_: Daysim parameters
Returns:
analysisRecipe: Recipe for annual climate based daylighting simulation
"""
ghenv.Component.Name = "Honeybee_Annual Daylight Simulation"
ghenv.Component.NickName = 'annualDaylightSimulation'
ghenv.Component.Message = 'VER 0.0.57\nJUL_06_2015'
ghenv.Component.Category = "Honeybee"
ghenv.Component.SubCategory = "03 | Daylight | Recipes"
#compatibleHBVersion = VER 0.0.56\nFEB_01_2015
#compatibleLBVersion = VER 0.0.59\nFEB_01_2015
try: ghenv.Component.AdditionalHelpFromDocStrings = "1"
except: pass
import scriptcontext as sc
import Rhino as rc
import Grasshopper.Kernel as gh
import os
import math
def isAllNone(dataList):
for item in dataList:
if item!=None: return False
return True
def main():
north_ = 0
# check for Honeybee
if not sc.sticky.has_key('honeybee_release') or not sc.sticky.has_key('ladybug_release'):
msg = "You should first let Honeybee and Ladybug to fly..."
w = gh.GH_RuntimeMessageLevel.Warning
ghenv.Component.AddRuntimeMessage(w, msg)
return
try:
if not sc.sticky['honeybee_release'].isCompatible(ghenv.Component): return -1
except:
warning = "You need a newer version of Honeybee to use this compoent." + \
" Use updateHoneybee component to update userObjects.\n" + \
"If you have already updated userObjects drag Honeybee_Honeybee component " + \
"into canvas and try again."
w = gh.GH_RuntimeMessageLevel.Warning
ghenv.Component.AddRuntimeMessage(w, warning)
return
lb_preparation = sc.sticky["ladybug_Preparation"]()
if north_!=None:
northAngle, northVector = lb_preparation.angle2north(north_)
else:
northAngle = 0
DLAnalysisRecipe = sc.sticky["honeybee_DLAnalysisRecipe"]
analysisRecipe = DLAnalysisRecipe(2, _epwWeatherFile, _testPoints, ptsVectors_,
_radParameters_, _DSParameters_, testMesh_, math.degrees(northAngle), ghenv.Component)
if (_testPoints.DataCount==0 or isAllNone(_testPoints.AllData())) \
and not (_DSParameters_ and _DSParameters_.runAnnualGlare \
and _DSParameters_.onlyAnnualGlare):
analysisRecipe = None
w = gh.GH_RuntimeMessageLevel.Warning
ghenv.Component.AddRuntimeMessage(w, "testPoints are missing!")
# generate the vectors if the vectors are not there
elif len(analysisRecipe.vectors)==0:
analysisRecipe.vectors = []
for ptListCount, ptList in enumerate(analysisRecipe.testPts):
analysisRecipe.vectors.append([])
for pt in ptList:
analysisRecipe.vectors[ptListCount].append(rc.Geometry.Vector3d.ZAxis)
# add a single test point if it is only glare analysis so Daysim won't crash
if (_DSParameters_ and _DSParameters_.runAnnualGlare \
and _DSParameters_.onlyAnnualGlare):
analysisRecipe.testPts = [[rc.Geometry.Point3d.Origin]]
analysisRecipe.vectors = [[rc.Geometry.Vector3d.ZAxis]]
if not os.path.isfile(_epwWeatherFile):
analysisRecipe = None
print "Can't find the weather file at: " + _epwWeatherFile
w = gh.GH_RuntimeMessageLevel.Warning
ghenv.Component.AddRuntimeMessage(w, "Can't find the weather file at: " + _epwWeatherFile)
return analysisRecipe
if _epwWeatherFile and _testPoints:
_testPoints.SimplifyPaths()
ptsVectors_.SimplifyPaths()
analysisRecipe = main()
|
githubmlai/numpy
|
refs/heads/master
|
numpy/f2py/crackfortran.py
|
53
|
#!/usr/bin/env python
"""
crackfortran --- read fortran (77,90) code and extract declaration information.
Copyright 1999-2004 Pearu Peterson all rights reserved,
Pearu Peterson <pearu@ioc.ee>
Permission to use, modify, and distribute this software is given under the
terms of the NumPy License.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Date: 2005/09/27 07:13:49 $
Pearu Peterson
Usage of crackfortran:
======================
Command line keys: -quiet,-verbose,-fix,-f77,-f90,-show,-h <pyffilename>
-m <module name for f77 routines>,--ignore-contains
Functions: crackfortran, crack2fortran
The following Fortran statements/constructions are supported
(or will be if needed):
block data,byte,call,character,common,complex,contains,data,
dimension,double complex,double precision,end,external,function,
implicit,integer,intent,interface,intrinsic,
logical,module,optional,parameter,private,public,
program,real,(sequence?),subroutine,type,use,virtual,
include,pythonmodule
Note: 'virtual' is mapped to 'dimension'.
Note: 'implicit integer (z) static (z)' is 'implicit static (z)' (this is minor bug).
Note: code after 'contains' will be ignored until its scope ends.
Note: 'common' statement is extended: dimensions are moved to variable definitions
Note: f2py directive: <commentchar>f2py<line> is read as <line>
Note: pythonmodule is introduced to represent Python module
Usage:
`postlist=crackfortran(files,funcs)`
`postlist` contains declaration information read from the list of files `files`.
`crack2fortran(postlist)` returns a fortran code to be saved to pyf-file
`postlist` has the following structure:
*** it is a list of dictionaries containing `blocks':
B = {'block','body','vars','parent_block'[,'name','prefix','args','result',
'implicit','externals','interfaced','common','sortvars',
'commonvars','note']}
B['block'] = 'interface' | 'function' | 'subroutine' | 'module' |
'program' | 'block data' | 'type' | 'pythonmodule'
B['body'] --- list containing `subblocks' with the same structure as `blocks'
B['parent_block'] --- dictionary of a parent block:
C['body'][<index>]['parent_block'] is C
B['vars'] --- dictionary of variable definitions
B['sortvars'] --- dictionary of variable definitions sorted by dependence (independent first)
B['name'] --- name of the block (not if B['block']=='interface')
B['prefix'] --- prefix string (only if B['block']=='function')
B['args'] --- list of argument names if B['block']== 'function' | 'subroutine'
B['result'] --- name of the return value (only if B['block']=='function')
B['implicit'] --- dictionary {'a':<variable definition>,'b':...} | None
B['externals'] --- list of variables being external
B['interfaced'] --- list of variables being external and defined
B['common'] --- dictionary of common blocks (list of objects)
B['commonvars'] --- list of variables used in common blocks (dimensions are moved to variable definitions)
B['from'] --- string showing the 'parents' of the current block
B['use'] --- dictionary of modules used in current block:
{<modulename>:{['only':<0|1>],['map':{<local_name1>:<use_name1>,...}]}}
B['note'] --- list of LaTeX comments on the block
B['f2pyenhancements'] --- optional dictionary
{'threadsafe':'','fortranname':<name>,
'callstatement':<C-expr>|<multi-line block>,
'callprotoargument':<C-expr-list>,
'usercode':<multi-line block>|<list of multi-line blocks>,
'pymethoddef:<multi-line block>'
}
B['entry'] --- dictionary {entryname:argslist,..}
B['varnames'] --- list of variable names given in the order of reading the
Fortran code, useful for derived types.
B['saved_interface'] --- a string of scanned routine signature, defines explicit interface
*** Variable definition is a dictionary
D = B['vars'][<variable name>] =
{'typespec'[,'attrspec','kindselector','charselector','=','typename']}
D['typespec'] = 'byte' | 'character' | 'complex' | 'double complex' |
'double precision' | 'integer' | 'logical' | 'real' | 'type'
D['attrspec'] --- list of attributes (e.g. 'dimension(<arrayspec>)',
'external','intent(in|out|inout|hide|c|callback|cache|aligned4|aligned8|aligned16)',
'optional','required', etc)
K = D['kindselector'] = {['*','kind']} (only if D['typespec'] =
'complex' | 'integer' | 'logical' | 'real' )
C = D['charselector'] = {['*','len','kind']}
(only if D['typespec']=='character')
D['='] --- initialization expression string
D['typename'] --- name of the type if D['typespec']=='type'
D['dimension'] --- list of dimension bounds
D['intent'] --- list of intent specifications
D['depend'] --- list of variable names on which current variable depends on
D['check'] --- list of C-expressions; if C-expr returns zero, exception is raised
D['note'] --- list of LaTeX comments on the variable
*** Meaning of kind/char selectors (few examples):
D['typespec>']*K['*']
D['typespec'](kind=K['kind'])
character*C['*']
character(len=C['len'],kind=C['kind'])
(see also fortran type declaration statement formats below)
Fortran 90 type declaration statement format (F77 is subset of F90)
====================================================================
(Main source: IBM XL Fortran 5.1 Language Reference Manual)
type declaration = <typespec> [[<attrspec>]::] <entitydecl>
<typespec> = byte |
character[<charselector>] |
complex[<kindselector>] |
double complex |
double precision |
integer[<kindselector>] |
logical[<kindselector>] |
real[<kindselector>] |
type(<typename>)
<charselector> = * <charlen> |
([len=]<len>[,[kind=]<kind>]) |
(kind=<kind>[,len=<len>])
<kindselector> = * <intlen> |
([kind=]<kind>)
<attrspec> = comma separated list of attributes.
Only the following attributes are used in
building up the interface:
external
(parameter --- affects '=' key)
optional
intent
Other attributes are ignored.
<intentspec> = in | out | inout
<arrayspec> = comma separated list of dimension bounds.
<entitydecl> = <name> [[*<charlen>][(<arrayspec>)] | [(<arrayspec>)]*<charlen>]
[/<init_expr>/ | =<init_expr>] [,<entitydecl>]
In addition, the following attributes are used: check,depend,note
TODO:
* Apply 'parameter' attribute (e.g. 'integer parameter :: i=2' 'real x(i)'
-> 'real x(2)')
The above may be solved by creating appropriate preprocessor program, for example.
"""
from __future__ import division, absolute_import, print_function
import sys
import string
import fileinput
import re
import pprint
import os
import copy
import platform
from . import __version__
from .auxfuncs import *
f2py_version = __version__.version
# Global flags:
strictf77=1 # Ignore `!' comments unless line[0]=='!'
sourcecodeform='fix' # 'fix','free'
quiet=0 # Be verbose if 0 (Obsolete: not used any more)
verbose=1 # Be quiet if 0, extra verbose if > 1.
tabchar=4*' '
pyffilename=''
f77modulename=''
skipemptyends=0 # for old F77 programs without 'program' statement
ignorecontains=1
dolowercase=1
debug=[]
# Global variables
groupcounter=0
grouplist={groupcounter:[]}
neededmodule=-1
expectbegin=1
skipblocksuntil=-1
usermodules=[]
f90modulevars={}
gotnextfile=1
filepositiontext=''
currentfilename=''
skipfunctions=[]
skipfuncs=[]
onlyfuncs=[]
include_paths=[]
previous_context = None
def reset_global_f2py_vars():
global groupcounter, grouplist, neededmodule, expectbegin, \
skipblocksuntil, usermodules, f90modulevars, gotnextfile, \
filepositiontext, currentfilename, skipfunctions, skipfuncs, \
onlyfuncs, include_paths, previous_context, \
strictf77, sourcecodeform, quiet, verbose, tabchar, pyffilename, \
f77modulename, skipemptyends, ignorecontains, dolowercase, debug
# flags
strictf77 = 1
sourcecodeform = 'fix'
quiet = 0
verbose = 1
tabchar = 4*' '
pyffilename = ''
f77modulename = ''
skipemptyends = 0
ignorecontains = 1
dolowercase = 1
debug = []
# variables
groupcounter = 0
grouplist = {groupcounter:[]}
neededmodule =-1
expectbegin = 1
skipblocksuntil = -1
usermodules = []
f90modulevars = {}
gotnextfile = 1
filepositiontext = ''
currentfilename = ''
skipfunctions = []
skipfuncs = []
onlyfuncs = []
include_paths = []
previous_context = None
###### Some helper functions
def show(o,f=0):pprint.pprint(o)
errmess=sys.stderr.write
def outmess(line,flag=1):
global filepositiontext
if not verbose: return
if not quiet:
if flag:sys.stdout.write(filepositiontext)
sys.stdout.write(line)
re._MAXCACHE=50
defaultimplicitrules={}
for c in "abcdefghopqrstuvwxyz$_": defaultimplicitrules[c]={'typespec':'real'}
for c in "ijklmn": defaultimplicitrules[c]={'typespec':'integer'}
del c
badnames={}
invbadnames={}
for n in ['int', 'double', 'float', 'char', 'short', 'long', 'void', 'case', 'while',
'return', 'signed', 'unsigned', 'if', 'for', 'typedef', 'sizeof', 'union',
'struct', 'static', 'register', 'new', 'break', 'do', 'goto', 'switch',
'continue', 'else', 'inline', 'extern', 'delete', 'const', 'auto',
'len', 'rank', 'shape', 'index', 'slen', 'size', '_i',
'max', 'min',
'flen', 'fshape',
'string', 'complex_double', 'float_double', 'stdin', 'stderr', 'stdout',
'type', 'default']:
badnames[n]=n+'_bn'
invbadnames[n+'_bn']=n
def rmbadname1(name):
if name in badnames:
errmess('rmbadname1: Replacing "%s" with "%s".\n'%(name, badnames[name]))
return badnames[name]
return name
def rmbadname(names): return [rmbadname1(_m) for _m in names]
def undo_rmbadname1(name):
if name in invbadnames:
errmess('undo_rmbadname1: Replacing "%s" with "%s".\n'\
%(name, invbadnames[name]))
return invbadnames[name]
return name
def undo_rmbadname(names): return [undo_rmbadname1(_m) for _m in names]
def getextension(name):
i=name.rfind('.')
if i==-1: return ''
if '\\' in name[i:]: return ''
if '/' in name[i:]: return ''
return name[i+1:]
is_f_file = re.compile(r'.*[.](for|ftn|f77|f)\Z', re.I).match
_has_f_header = re.compile(r'-[*]-\s*fortran\s*-[*]-', re.I).search
_has_f90_header = re.compile(r'-[*]-\s*f90\s*-[*]-', re.I).search
_has_fix_header = re.compile(r'-[*]-\s*fix\s*-[*]-', re.I).search
_free_f90_start = re.compile(r'[^c*]\s*[^\s\d\t]', re.I).match
def is_free_format(file):
"""Check if file is in free format Fortran."""
# f90 allows both fixed and free format, assuming fixed unless
# signs of free format are detected.
result = 0
f = open(file, 'r')
line = f.readline()
n = 15 # the number of non-comment lines to scan for hints
if _has_f_header(line):
n = 0
elif _has_f90_header(line):
n = 0
result = 1
while n>0 and line:
if line[0]!='!' and line.strip():
n -= 1
if (line[0]!='\t' and _free_f90_start(line[:5])) or line[-2:-1]=='&':
result = 1
break
line = f.readline()
f.close()
return result
####### Read fortran (77,90) code
def readfortrancode(ffile,dowithline=show,istop=1):
"""
Read fortran codes from files and
1) Get rid of comments, line continuations, and empty lines; lower cases.
2) Call dowithline(line) on every line.
3) Recursively call itself when statement \"include '<filename>'\" is met.
"""
global gotnextfile, filepositiontext, currentfilename, sourcecodeform, strictf77,\
beginpattern, quiet, verbose, dolowercase, include_paths
if not istop:
saveglobals=gotnextfile, filepositiontext, currentfilename, sourcecodeform, strictf77,\
beginpattern, quiet, verbose, dolowercase
if ffile==[]: return
localdolowercase = dolowercase
cont=0
finalline=''
ll=''
commentline=re.compile(r'(?P<line>([^"]*["][^"]*["][^"!]*|[^\']*\'[^\']*\'[^\'!]*|[^!\'"]*))!{1}(?P<rest>.*)')
includeline=re.compile(r'\s*include\s*(\'|")(?P<name>[^\'"]*)(\'|")', re.I)
cont1=re.compile(r'(?P<line>.*)&\s*\Z')
cont2=re.compile(r'(\s*&|)(?P<line>.*)')
mline_mark = re.compile(r".*?'''")
if istop: dowithline('', -1)
ll, l1='', ''
spacedigits=[' '] + [str(_m) for _m in range(10)]
filepositiontext=''
fin=fileinput.FileInput(ffile)
while True:
l=fin.readline()
if not l: break
if fin.isfirstline():
filepositiontext=''
currentfilename=fin.filename()
gotnextfile=1
l1=l
strictf77=0
sourcecodeform='fix'
ext = os.path.splitext(currentfilename)[1]
if is_f_file(currentfilename) and \
not (_has_f90_header(l) or _has_fix_header(l)):
strictf77=1
elif is_free_format(currentfilename) and not _has_fix_header(l):
sourcecodeform='free'
if strictf77: beginpattern=beginpattern77
else: beginpattern=beginpattern90
outmess('\tReading file %s (format:%s%s)\n'\
%(repr(currentfilename), sourcecodeform,
strictf77 and ',strict' or ''))
l=l.expandtabs().replace('\xa0', ' ')
while not l=='': # Get rid of newline characters
if l[-1] not in "\n\r\f": break
l=l[:-1]
if not strictf77:
r=commentline.match(l)
if r:
l=r.group('line')+' ' # Strip comments starting with `!'
rl=r.group('rest')
if rl[:4].lower()=='f2py': # f2py directive
l = l + 4*' '
r=commentline.match(rl[4:])
if r: l=l+r.group('line')
else: l = l + rl[4:]
if l.strip()=='': # Skip empty line
cont=0
continue
if sourcecodeform=='fix':
if l[0] in ['*', 'c', '!', 'C', '#']:
if l[1:5].lower()=='f2py': # f2py directive
l=' '+l[5:]
else: # Skip comment line
cont=0
continue
elif strictf77:
if len(l)>72: l=l[:72]
if not (l[0] in spacedigits):
raise Exception('readfortrancode: Found non-(space,digit) char '
'in the first column.\n\tAre you sure that '
'this code is in fix form?\n\tline=%s' % repr(l))
if (not cont or strictf77) and (len(l)>5 and not l[5]==' '):
# Continuation of a previous line
ll=ll+l[6:]
finalline=''
origfinalline=''
else:
if not strictf77:
# F90 continuation
r=cont1.match(l)
if r: l=r.group('line') # Continuation follows ..
if cont:
ll=ll+cont2.match(l).group('line')
finalline=''
origfinalline=''
else:
l=' '+l[5:] # clean up line beginning from possible digits.
if localdolowercase: finalline=ll.lower()
else: finalline=ll
origfinalline=ll
ll=l
cont=(r is not None)
else:
l=' '+l[5:] # clean up line beginning from possible digits.
if localdolowercase: finalline=ll.lower()
else: finalline=ll
origfinalline =ll
ll=l
elif sourcecodeform=='free':
if not cont and ext=='.pyf' and mline_mark.match(l):
l = l + '\n'
while True:
lc = fin.readline()
if not lc:
errmess('Unexpected end of file when reading multiline\n')
break
l = l + lc
if mline_mark.match(lc):
break
l = l.rstrip()
r=cont1.match(l)
if r: l=r.group('line') # Continuation follows ..
if cont:
ll=ll+cont2.match(l).group('line')
finalline=''
origfinalline=''
else:
if localdolowercase: finalline=ll.lower()
else: finalline=ll
origfinalline =ll
ll=l
cont=(r is not None)
else:
raise ValueError("Flag sourcecodeform must be either 'fix' or 'free': %s"%repr(sourcecodeform))
filepositiontext='Line #%d in %s:"%s"\n\t' % (fin.filelineno()-1, currentfilename, l1)
m=includeline.match(origfinalline)
if m:
fn=m.group('name')
if os.path.isfile(fn):
readfortrancode(fn, dowithline=dowithline, istop=0)
else:
include_dirs = [os.path.dirname(currentfilename)] + include_paths
foundfile = 0
for inc_dir in include_dirs:
fn1 = os.path.join(inc_dir, fn)
if os.path.isfile(fn1):
foundfile = 1
readfortrancode(fn1, dowithline=dowithline, istop=0)
break
if not foundfile:
outmess('readfortrancode: could not find include file %s in %s. Ignoring.\n'%(repr(fn), os.pathsep.join(include_dirs)))
else:
dowithline(finalline)
l1=ll
if localdolowercase:
finalline=ll.lower()
else: finalline=ll
origfinalline = ll
filepositiontext='Line #%d in %s:"%s"\n\t' % (fin.filelineno()-1, currentfilename, l1)
m=includeline.match(origfinalline)
if m:
fn=m.group('name')
if os.path.isfile(fn):
readfortrancode(fn, dowithline=dowithline, istop=0)
else:
include_dirs = [os.path.dirname(currentfilename)] + include_paths
foundfile = 0
for inc_dir in include_dirs:
fn1 = os.path.join(inc_dir, fn)
if os.path.isfile(fn1):
foundfile = 1
readfortrancode(fn1, dowithline=dowithline, istop=0)
break
if not foundfile:
outmess('readfortrancode: could not find include file %s in %s. Ignoring.\n'%(repr(fn), os.pathsep.join(include_dirs)))
else:
dowithline(finalline)
filepositiontext=''
fin.close()
if istop: dowithline('', 1)
else:
gotnextfile, filepositiontext, currentfilename, sourcecodeform, strictf77,\
beginpattern, quiet, verbose, dolowercase=saveglobals
########### Crack line
beforethisafter=r'\s*(?P<before>%s(?=\s*(\b(%s)\b)))'+ \
r'\s*(?P<this>(\b(%s)\b))'+ \
r'\s*(?P<after>%s)\s*\Z'
##
fortrantypes='character|logical|integer|real|complex|double\s*(precision\s*(complex|)|complex)|type(?=\s*\([\w\s,=(*)]*\))|byte'
typespattern=re.compile(beforethisafter%('', fortrantypes, fortrantypes, '.*'), re.I), 'type'
typespattern4implicit=re.compile(beforethisafter%('', fortrantypes+'|static|automatic|undefined', fortrantypes+'|static|automatic|undefined', '.*'), re.I)
#
functionpattern=re.compile(beforethisafter%('([a-z]+[\w\s(=*+-/)]*?|)', 'function', 'function', '.*'), re.I), 'begin'
subroutinepattern=re.compile(beforethisafter%('[a-z\s]*?', 'subroutine', 'subroutine', '.*'), re.I), 'begin'
#modulepattern=re.compile(beforethisafter%('[a-z\s]*?','module','module','.*'),re.I),'begin'
#
groupbegins77=r'program|block\s*data'
beginpattern77=re.compile(beforethisafter%('', groupbegins77, groupbegins77, '.*'), re.I), 'begin'
groupbegins90=groupbegins77+r'|module(?!\s*procedure)|python\s*module|interface|type(?!\s*\()'
beginpattern90=re.compile(beforethisafter%('', groupbegins90, groupbegins90, '.*'), re.I), 'begin'
groupends=r'end|endprogram|endblockdata|endmodule|endpythonmodule|endinterface'
endpattern=re.compile(beforethisafter%('', groupends, groupends, '[\w\s]*'), re.I), 'end'
#endifs='end\s*(if|do|where|select|while|forall)'
endifs='(end\s*(if|do|where|select|while|forall))|(module\s*procedure)'
endifpattern=re.compile(beforethisafter%('[\w]*?', endifs, endifs, '[\w\s]*'), re.I), 'endif'
#
implicitpattern=re.compile(beforethisafter%('', 'implicit', 'implicit', '.*'), re.I), 'implicit'
dimensionpattern=re.compile(beforethisafter%('', 'dimension|virtual', 'dimension|virtual', '.*'), re.I), 'dimension'
externalpattern=re.compile(beforethisafter%('', 'external', 'external', '.*'), re.I), 'external'
optionalpattern=re.compile(beforethisafter%('', 'optional', 'optional', '.*'), re.I), 'optional'
requiredpattern=re.compile(beforethisafter%('', 'required', 'required', '.*'), re.I), 'required'
publicpattern=re.compile(beforethisafter%('', 'public', 'public', '.*'), re.I), 'public'
privatepattern=re.compile(beforethisafter%('', 'private', 'private', '.*'), re.I), 'private'
intrisicpattern=re.compile(beforethisafter%('', 'intrisic', 'intrisic', '.*'), re.I), 'intrisic'
intentpattern=re.compile(beforethisafter%('', 'intent|depend|note|check', 'intent|depend|note|check', '\s*\(.*?\).*'), re.I), 'intent'
parameterpattern=re.compile(beforethisafter%('', 'parameter', 'parameter', '\s*\(.*'), re.I), 'parameter'
datapattern=re.compile(beforethisafter%('', 'data', 'data', '.*'), re.I), 'data'
callpattern=re.compile(beforethisafter%('', 'call', 'call', '.*'), re.I), 'call'
entrypattern=re.compile(beforethisafter%('', 'entry', 'entry', '.*'), re.I), 'entry'
callfunpattern=re.compile(beforethisafter%('', 'callfun', 'callfun', '.*'), re.I), 'callfun'
commonpattern=re.compile(beforethisafter%('', 'common', 'common', '.*'), re.I), 'common'
usepattern=re.compile(beforethisafter%('', 'use', 'use', '.*'), re.I), 'use'
containspattern=re.compile(beforethisafter%('', 'contains', 'contains', ''), re.I), 'contains'
formatpattern=re.compile(beforethisafter%('', 'format', 'format', '.*'), re.I), 'format'
## Non-fortran and f2py-specific statements
f2pyenhancementspattern=re.compile(beforethisafter%('', 'threadsafe|fortranname|callstatement|callprotoargument|usercode|pymethoddef', 'threadsafe|fortranname|callstatement|callprotoargument|usercode|pymethoddef', '.*'), re.I|re.S), 'f2pyenhancements'
multilinepattern = re.compile(r"\s*(?P<before>''')(?P<this>.*?)(?P<after>''')\s*\Z", re.S), 'multiline'
##
def _simplifyargs(argsline):
a = []
for n in markoutercomma(argsline).split('@,@'):
for r in '(),':
n = n.replace(r, '_')
a.append(n)
return ','.join(a)
crackline_re_1 = re.compile(r'\s*(?P<result>\b[a-z]+[\w]*\b)\s*[=].*', re.I)
def crackline(line,reset=0):
"""
reset=-1 --- initialize
reset=0 --- crack the line
reset=1 --- final check if mismatch of blocks occured
Cracked data is saved in grouplist[0].
"""
global beginpattern, groupcounter, groupname, groupcache, grouplist, gotnextfile,\
filepositiontext, currentfilename, neededmodule, expectbegin, skipblocksuntil,\
skipemptyends, previous_context
if ';' in line and not (f2pyenhancementspattern[0].match(line) or
multilinepattern[0].match(line)):
for l in line.split(';'):
assert reset==0, repr(reset) # XXX: non-zero reset values need testing
crackline(l, reset)
return
if reset<0:
groupcounter=0
groupname={groupcounter:''}
groupcache={groupcounter:{}}
grouplist={groupcounter:[]}
groupcache[groupcounter]['body']=[]
groupcache[groupcounter]['vars']={}
groupcache[groupcounter]['block']=''
groupcache[groupcounter]['name']=''
neededmodule=-1
skipblocksuntil=-1
return
if reset>0:
fl=0
if f77modulename and neededmodule==groupcounter: fl=2
while groupcounter>fl:
outmess('crackline: groupcounter=%s groupname=%s\n'%(repr(groupcounter), repr(groupname)))
outmess('crackline: Mismatch of blocks encountered. Trying to fix it by assuming "end" statement.\n')
grouplist[groupcounter-1].append(groupcache[groupcounter])
grouplist[groupcounter-1][-1]['body']=grouplist[groupcounter]
del grouplist[groupcounter]
groupcounter=groupcounter-1
if f77modulename and neededmodule==groupcounter:
grouplist[groupcounter-1].append(groupcache[groupcounter])
grouplist[groupcounter-1][-1]['body']=grouplist[groupcounter]
del grouplist[groupcounter]
groupcounter=groupcounter-1 # end interface
grouplist[groupcounter-1].append(groupcache[groupcounter])
grouplist[groupcounter-1][-1]['body']=grouplist[groupcounter]
del grouplist[groupcounter]
groupcounter=groupcounter-1 # end module
neededmodule=-1
return
if line=='': return
flag=0
for pat in [dimensionpattern, externalpattern, intentpattern, optionalpattern,
requiredpattern,
parameterpattern, datapattern, publicpattern, privatepattern,
intrisicpattern,
endifpattern, endpattern,
formatpattern,
beginpattern, functionpattern, subroutinepattern,
implicitpattern, typespattern, commonpattern,
callpattern, usepattern, containspattern,
entrypattern,
f2pyenhancementspattern,
multilinepattern
]:
m = pat[0].match(line)
if m:
break
flag=flag+1
if not m:
re_1 = crackline_re_1
if 0<=skipblocksuntil<=groupcounter:return
if 'externals' in groupcache[groupcounter]:
for name in groupcache[groupcounter]['externals']:
if name in invbadnames:
name=invbadnames[name]
if 'interfaced' in groupcache[groupcounter] and name in groupcache[groupcounter]['interfaced']:
continue
m1=re.match(r'(?P<before>[^"]*)\b%s\b\s*@\(@(?P<args>[^@]*)@\)@.*\Z'%name, markouterparen(line), re.I)
if m1:
m2 = re_1.match(m1.group('before'))
a = _simplifyargs(m1.group('args'))
if m2:
line='callfun %s(%s) result (%s)'%(name, a, m2.group('result'))
else: line='callfun %s(%s)'%(name, a)
m = callfunpattern[0].match(line)
if not m:
outmess('crackline: could not resolve function call for line=%s.\n'%repr(line))
return
analyzeline(m, 'callfun', line)
return
if verbose>1 or (verbose==1 and currentfilename.lower().endswith('.pyf')):
previous_context = None
outmess('crackline:%d: No pattern for line\n'%(groupcounter))
return
elif pat[1]=='end':
if 0<=skipblocksuntil<groupcounter:
groupcounter=groupcounter-1
if skipblocksuntil<=groupcounter: return
if groupcounter<=0:
raise Exception('crackline: groupcounter(=%s) is nonpositive. '
'Check the blocks.' \
% (groupcounter))
m1 = beginpattern[0].match((line))
if (m1) and (not m1.group('this')==groupname[groupcounter]):
raise Exception('crackline: End group %s does not match with '
'previous Begin group %s\n\t%s' % \
(repr(m1.group('this')), repr(groupname[groupcounter]),
filepositiontext)
)
if skipblocksuntil==groupcounter:
skipblocksuntil=-1
grouplist[groupcounter-1].append(groupcache[groupcounter])
grouplist[groupcounter-1][-1]['body']=grouplist[groupcounter]
del grouplist[groupcounter]
groupcounter=groupcounter-1
if not skipemptyends:
expectbegin=1
elif pat[1] == 'begin':
if 0<=skipblocksuntil<=groupcounter:
groupcounter=groupcounter+1
return
gotnextfile=0
analyzeline(m, pat[1], line)
expectbegin=0
elif pat[1]=='endif':
pass
elif pat[1]=='contains':
if ignorecontains: return
if 0<=skipblocksuntil<=groupcounter: return
skipblocksuntil=groupcounter
else:
if 0<=skipblocksuntil<=groupcounter:return
analyzeline(m, pat[1], line)
def markouterparen(line):
l='';f=0
for c in line:
if c=='(':
f=f+1
if f==1: l=l+'@(@'; continue
elif c==')':
f=f-1
if f==0: l=l+'@)@'; continue
l=l+c
return l
def markoutercomma(line,comma=','):
l='';f=0
cc=''
for c in line:
if (not cc or cc==')') and c=='(':
f=f+1
cc = ')'
elif not cc and c=='\'' and (not l or l[-1]!='\\'):
f=f+1
cc = '\''
elif c==cc:
f=f-1
if f==0:
cc=''
elif c==comma and f==0:
l=l+'@'+comma+'@'
continue
l=l+c
assert not f, repr((f, line, l, cc))
return l
def unmarkouterparen(line):
r = line.replace('@(@', '(').replace('@)@', ')')
return r
def appenddecl(decl,decl2,force=1):
if not decl: decl={}
if not decl2: return decl
if decl is decl2: return decl
for k in list(decl2.keys()):
if k=='typespec':
if force or k not in decl:
decl[k]=decl2[k]
elif k=='attrspec':
for l in decl2[k]:
decl=setattrspec(decl, l, force)
elif k=='kindselector':
decl=setkindselector(decl, decl2[k], force)
elif k=='charselector':
decl=setcharselector(decl, decl2[k], force)
elif k in ['=', 'typename']:
if force or k not in decl:
decl[k]=decl2[k]
elif k=='note':
pass
elif k in ['intent', 'check', 'dimension', 'optional', 'required']:
errmess('appenddecl: "%s" not implemented.\n'%k)
else:
raise Exception('appenddecl: Unknown variable definition key:' + \
str(k))
return decl
selectpattern=re.compile(r'\s*(?P<this>(@\(@.*?@\)@|[*][\d*]+|[*]\s*@\(@.*?@\)@|))(?P<after>.*)\Z', re.I)
nameargspattern=re.compile(r'\s*(?P<name>\b[\w$]+\b)\s*(@\(@\s*(?P<args>[\w\s,]*)\s*@\)@|)\s*((result(\s*@\(@\s*(?P<result>\b[\w$]+\b)\s*@\)@|))|(bind\s*@\(@\s*(?P<bind>.*)\s*@\)@))*\s*\Z', re.I)
callnameargspattern=re.compile(r'\s*(?P<name>\b[\w$]+\b)\s*@\(@\s*(?P<args>.*)\s*@\)@\s*\Z', re.I)
real16pattern = re.compile(r'([-+]?(?:\d+(?:\.\d*)?|\d*\.\d+))[dD]((?:[-+]?\d+)?)')
real8pattern = re.compile(r'([-+]?((?:\d+(?:\.\d*)?|\d*\.\d+))[eE]((?:[-+]?\d+)?)|(\d+\.\d*))')
_intentcallbackpattern = re.compile(r'intent\s*\(.*?\bcallback\b', re.I)
def _is_intent_callback(vdecl):
for a in vdecl.get('attrspec', []):
if _intentcallbackpattern.match(a):
return 1
return 0
def _resolvenameargspattern(line):
line = markouterparen(line)
m1=nameargspattern.match(line)
if m1:
return m1.group('name'), m1.group('args'), m1.group('result'), m1.group('bind')
m1=callnameargspattern.match(line)
if m1:
return m1.group('name'), m1.group('args'), None, None
return None, [], None, None
def analyzeline(m, case, line):
global groupcounter, groupname, groupcache, grouplist, filepositiontext,\
currentfilename, f77modulename, neededinterface, neededmodule, expectbegin,\
gotnextfile, previous_context
block=m.group('this')
if case != 'multiline':
previous_context = None
if expectbegin and case not in ['begin', 'call', 'callfun', 'type'] \
and not skipemptyends and groupcounter<1:
newname=os.path.basename(currentfilename).split('.')[0]
outmess('analyzeline: no group yet. Creating program group with name "%s".\n'%newname)
gotnextfile=0
groupcounter=groupcounter+1
groupname[groupcounter]='program'
groupcache[groupcounter]={}
grouplist[groupcounter]=[]
groupcache[groupcounter]['body']=[]
groupcache[groupcounter]['vars']={}
groupcache[groupcounter]['block']='program'
groupcache[groupcounter]['name']=newname
groupcache[groupcounter]['from']='fromsky'
expectbegin=0
if case in ['begin', 'call', 'callfun']:
# Crack line => block,name,args,result
block = block.lower()
if re.match(r'block\s*data', block, re.I): block='block data'
if re.match(r'python\s*module', block, re.I): block='python module'
name, args, result, bind = _resolvenameargspattern(m.group('after'))
if name is None:
if block=='block data':
name = '_BLOCK_DATA_'
else:
name = ''
if block not in ['interface', 'block data']:
outmess('analyzeline: No name/args pattern found for line.\n')
previous_context = (block, name, groupcounter)
if args: args=rmbadname([x.strip() for x in markoutercomma(args).split('@,@')])
else: args=[]
if '' in args:
while '' in args:
args.remove('')
outmess('analyzeline: argument list is malformed (missing argument).\n')
# end of crack line => block,name,args,result
needmodule=0
needinterface=0
if case in ['call', 'callfun']:
needinterface=1
if 'args' not in groupcache[groupcounter]:
return
if name not in groupcache[groupcounter]['args']:
return
for it in grouplist[groupcounter]:
if it['name']==name:
return
if name in groupcache[groupcounter]['interfaced']:
return
block={'call':'subroutine','callfun':'function'}[case]
if f77modulename and neededmodule==-1 and groupcounter<=1:
neededmodule=groupcounter+2
needmodule=1
if block != 'interface':
needinterface=1
# Create new block(s)
groupcounter=groupcounter+1
groupcache[groupcounter]={}
grouplist[groupcounter]=[]
if needmodule:
if verbose>1:
outmess('analyzeline: Creating module block %s\n'%repr(f77modulename), 0)
groupname[groupcounter]='module'
groupcache[groupcounter]['block']='python module'
groupcache[groupcounter]['name']=f77modulename
groupcache[groupcounter]['from']=''
groupcache[groupcounter]['body']=[]
groupcache[groupcounter]['externals']=[]
groupcache[groupcounter]['interfaced']=[]
groupcache[groupcounter]['vars']={}
groupcounter=groupcounter+1
groupcache[groupcounter]={}
grouplist[groupcounter]=[]
if needinterface:
if verbose>1:
outmess('analyzeline: Creating additional interface block (groupcounter=%s).\n' % (groupcounter), 0)
groupname[groupcounter]='interface'
groupcache[groupcounter]['block']='interface'
groupcache[groupcounter]['name']='unknown_interface'
groupcache[groupcounter]['from']='%s:%s'%(groupcache[groupcounter-1]['from'], groupcache[groupcounter-1]['name'])
groupcache[groupcounter]['body']=[]
groupcache[groupcounter]['externals']=[]
groupcache[groupcounter]['interfaced']=[]
groupcache[groupcounter]['vars']={}
groupcounter=groupcounter+1
groupcache[groupcounter]={}
grouplist[groupcounter]=[]
groupname[groupcounter]=block
groupcache[groupcounter]['block']=block
if not name: name='unknown_'+block
groupcache[groupcounter]['prefix']=m.group('before')
groupcache[groupcounter]['name']=rmbadname1(name)
groupcache[groupcounter]['result']=result
if groupcounter==1:
groupcache[groupcounter]['from']=currentfilename
else:
if f77modulename and groupcounter==3:
groupcache[groupcounter]['from']='%s:%s'%(groupcache[groupcounter-1]['from'], currentfilename)
else:
groupcache[groupcounter]['from']='%s:%s'%(groupcache[groupcounter-1]['from'], groupcache[groupcounter-1]['name'])
for k in list(groupcache[groupcounter].keys()):
if not groupcache[groupcounter][k]:
del groupcache[groupcounter][k]
groupcache[groupcounter]['args']=args
groupcache[groupcounter]['body']=[]
groupcache[groupcounter]['externals']=[]
groupcache[groupcounter]['interfaced']=[]
groupcache[groupcounter]['vars']={}
groupcache[groupcounter]['entry']={}
# end of creation
if block=='type':
groupcache[groupcounter]['varnames'] = []
if case in ['call', 'callfun']: # set parents variables
if name not in groupcache[groupcounter-2]['externals']:
groupcache[groupcounter-2]['externals'].append(name)
groupcache[groupcounter]['vars']=copy.deepcopy(groupcache[groupcounter-2]['vars'])
#try: del groupcache[groupcounter]['vars'][groupcache[groupcounter-2]['name']]
#except: pass
try: del groupcache[groupcounter]['vars'][name][groupcache[groupcounter]['vars'][name]['attrspec'].index('external')]
except: pass
if block in ['function', 'subroutine']: # set global attributes
try: groupcache[groupcounter]['vars'][name]=appenddecl(groupcache[groupcounter]['vars'][name], groupcache[groupcounter-2]['vars'][''])
except: pass
if case=='callfun': # return type
if result and result in groupcache[groupcounter]['vars']:
if not name==result:
groupcache[groupcounter]['vars'][name]=appenddecl(groupcache[groupcounter]['vars'][name], groupcache[groupcounter]['vars'][result])
#if groupcounter>1: # name is interfaced
try: groupcache[groupcounter-2]['interfaced'].append(name)
except: pass
if block=='function':
t=typespattern[0].match(m.group('before')+' '+name)
if t:
typespec, selector, attr, edecl=cracktypespec0(t.group('this'), t.group('after'))
updatevars(typespec, selector, attr, edecl)
if case in ['call', 'callfun']:
grouplist[groupcounter-1].append(groupcache[groupcounter])
grouplist[groupcounter-1][-1]['body']=grouplist[groupcounter]
del grouplist[groupcounter]
groupcounter=groupcounter-1 # end routine
grouplist[groupcounter-1].append(groupcache[groupcounter])
grouplist[groupcounter-1][-1]['body']=grouplist[groupcounter]
del grouplist[groupcounter]
groupcounter=groupcounter-1 # end interface
elif case=='entry':
name, args, result, bind=_resolvenameargspattern(m.group('after'))
if name is not None:
if args:
args=rmbadname([x.strip() for x in markoutercomma(args).split('@,@')])
else: args=[]
assert result is None, repr(result)
groupcache[groupcounter]['entry'][name] = args
previous_context = ('entry', name, groupcounter)
elif case=='type':
typespec, selector, attr, edecl=cracktypespec0(block, m.group('after'))
last_name = updatevars(typespec, selector, attr, edecl)
if last_name is not None:
previous_context = ('variable', last_name, groupcounter)
elif case in ['dimension', 'intent', 'optional', 'required', 'external', 'public', 'private', 'intrisic']:
edecl=groupcache[groupcounter]['vars']
ll=m.group('after').strip()
i=ll.find('::')
if i<0 and case=='intent':
i=markouterparen(ll).find('@)@')-2
ll=ll[:i+1]+'::'+ll[i+1:]
i=ll.find('::')
if ll[i:]=='::' and 'args' in groupcache[groupcounter]:
outmess('All arguments will have attribute %s%s\n'%(m.group('this'), ll[:i]))
ll = ll + ','.join(groupcache[groupcounter]['args'])
if i<0:i=0;pl=''
else: pl=ll[:i].strip();ll=ll[i+2:]
ch = markoutercomma(pl).split('@,@')
if len(ch)>1:
pl = ch[0]
outmess('analyzeline: cannot handle multiple attributes without type specification. Ignoring %r.\n' % (','.join(ch[1:])))
last_name = None
for e in [x.strip() for x in markoutercomma(ll).split('@,@')]:
m1=namepattern.match(e)
if not m1:
if case in ['public', 'private']: k=''
else:
print(m.groupdict())
outmess('analyzeline: no name pattern found in %s statement for %s. Skipping.\n'%(case, repr(e)))
continue
else:
k=rmbadname1(m1.group('name'))
if k not in edecl:
edecl[k]={}
if case=='dimension':
ap=case+m1.group('after')
if case=='intent':
ap=m.group('this')+pl
if _intentcallbackpattern.match(ap):
if k not in groupcache[groupcounter]['args']:
if groupcounter>1:
if '__user__' not in groupcache[groupcounter-2]['name']:
outmess('analyzeline: missing __user__ module (could be nothing)\n')
if k!=groupcache[groupcounter]['name']: # fixes ticket 1693
outmess('analyzeline: appending intent(callback) %s'\
' to %s arguments\n' % (k, groupcache[groupcounter]['name']))
groupcache[groupcounter]['args'].append(k)
else:
errmess('analyzeline: intent(callback) %s is ignored' % (k))
else:
errmess('analyzeline: intent(callback) %s is already'\
' in argument list' % (k))
if case in ['optional', 'required', 'public', 'external', 'private', 'intrisic']:
ap=case
if 'attrspec' in edecl[k]:
edecl[k]['attrspec'].append(ap)
else:
edecl[k]['attrspec']=[ap]
if case=='external':
if groupcache[groupcounter]['block']=='program':
outmess('analyzeline: ignoring program arguments\n')
continue
if k not in groupcache[groupcounter]['args']:
#outmess('analyzeline: ignoring external %s (not in arguments list)\n'%(`k`))
continue
if 'externals' not in groupcache[groupcounter]:
groupcache[groupcounter]['externals']=[]
groupcache[groupcounter]['externals'].append(k)
last_name = k
groupcache[groupcounter]['vars']=edecl
if last_name is not None:
previous_context = ('variable', last_name, groupcounter)
elif case=='parameter':
edecl=groupcache[groupcounter]['vars']
ll=m.group('after').strip()[1:-1]
last_name = None
for e in markoutercomma(ll).split('@,@'):
try:
k, initexpr=[x.strip() for x in e.split('=')]
except:
outmess('analyzeline: could not extract name,expr in parameter statement "%s" of "%s"\n'%(e, ll));continue
params = get_parameters(edecl)
k=rmbadname1(k)
if k not in edecl:
edecl[k]={}
if '=' in edecl[k] and (not edecl[k]['=']==initexpr):
outmess('analyzeline: Overwriting the value of parameter "%s" ("%s") with "%s".\n'%(k, edecl[k]['='], initexpr))
t = determineexprtype(initexpr, params)
if t:
if t.get('typespec')=='real':
tt = list(initexpr)
for m in real16pattern.finditer(initexpr):
tt[m.start():m.end()] = list(\
initexpr[m.start():m.end()].lower().replace('d', 'e'))
initexpr = ''.join(tt)
elif t.get('typespec')=='complex':
initexpr = initexpr[1:].lower().replace('d', 'e').\
replace(',', '+1j*(')
try:
v = eval(initexpr, {}, params)
except (SyntaxError, NameError, TypeError) as msg:
errmess('analyzeline: Failed to evaluate %r. Ignoring: %s\n'\
% (initexpr, msg))
continue
edecl[k]['='] = repr(v)
if 'attrspec' in edecl[k]:
edecl[k]['attrspec'].append('parameter')
else: edecl[k]['attrspec']=['parameter']
last_name = k
groupcache[groupcounter]['vars']=edecl
if last_name is not None:
previous_context = ('variable', last_name, groupcounter)
elif case=='implicit':
if m.group('after').strip().lower()=='none':
groupcache[groupcounter]['implicit']=None
elif m.group('after'):
if 'implicit' in groupcache[groupcounter]:
impl=groupcache[groupcounter]['implicit']
else: impl={}
if impl is None:
outmess('analyzeline: Overwriting earlier "implicit none" statement.\n')
impl={}
for e in markoutercomma(m.group('after')).split('@,@'):
decl={}
m1=re.match(r'\s*(?P<this>.*?)\s*(\(\s*(?P<after>[a-z-, ]+)\s*\)\s*|)\Z', e, re.I)
if not m1:
outmess('analyzeline: could not extract info of implicit statement part "%s"\n'%(e));continue
m2=typespattern4implicit.match(m1.group('this'))
if not m2:
outmess('analyzeline: could not extract types pattern of implicit statement part "%s"\n'%(e));continue
typespec, selector, attr, edecl=cracktypespec0(m2.group('this'), m2.group('after'))
kindselect, charselect, typename=cracktypespec(typespec, selector)
decl['typespec']=typespec
decl['kindselector']=kindselect
decl['charselector']=charselect
decl['typename']=typename
for k in list(decl.keys()):
if not decl[k]: del decl[k]
for r in markoutercomma(m1.group('after')).split('@,@'):
if '-' in r:
try: begc, endc=[x.strip() for x in r.split('-')]
except:
outmess('analyzeline: expected "<char>-<char>" instead of "%s" in range list of implicit statement\n'%r);continue
else: begc=endc=r.strip()
if not len(begc)==len(endc)==1:
outmess('analyzeline: expected "<char>-<char>" instead of "%s" in range list of implicit statement (2)\n'%r);continue
for o in range(ord(begc), ord(endc)+1):
impl[chr(o)]=decl
groupcache[groupcounter]['implicit']=impl
elif case=='data':
ll=[]
dl='';il='';f=0;fc=1;inp=0
for c in m.group('after'):
if not inp:
if c=="'": fc=not fc
if c=='/' and fc: f=f+1;continue
if c=='(': inp = inp + 1
elif c==')': inp = inp - 1
if f==0: dl=dl+c
elif f==1: il=il+c
elif f==2:
dl = dl.strip()
if dl.startswith(','):
dl = dl[1:].strip()
ll.append([dl, il])
dl=c;il='';f=0
if f==2:
dl = dl.strip()
if dl.startswith(','):
dl = dl[1:].strip()
ll.append([dl, il])
vars={}
if 'vars' in groupcache[groupcounter]:
vars=groupcache[groupcounter]['vars']
last_name = None
for l in ll:
l=[x.strip() for x in l]
if l[0][0]==',':l[0]=l[0][1:]
if l[0][0]=='(':
outmess('analyzeline: implied-DO list "%s" is not supported. Skipping.\n'%l[0])
continue
#if '(' in l[0]:
# #outmess('analyzeline: ignoring this data statement.\n')
# continue
i=0;j=0;llen=len(l[1])
for v in rmbadname([x.strip() for x in markoutercomma(l[0]).split('@,@')]):
if v[0]=='(':
outmess('analyzeline: implied-DO list "%s" is not supported. Skipping.\n'%v)
# XXX: subsequent init expressions may get wrong values.
# Ignoring since data statements are irrelevant for wrapping.
continue
fc=0
while (i<llen) and (fc or not l[1][i]==','):
if l[1][i]=="'": fc=not fc
i=i+1
i=i+1
#v,l[1][j:i-1]=name,initvalue
if v not in vars:
vars[v]={}
if '=' in vars[v] and not vars[v]['=']==l[1][j:i-1]:
outmess('analyzeline: changing init expression of "%s" ("%s") to "%s"\n'%(v, vars[v]['='], l[1][j:i-1]))
vars[v]['=']=l[1][j:i-1]
j=i
last_name = v
groupcache[groupcounter]['vars']=vars
if last_name is not None:
previous_context = ('variable', last_name, groupcounter)
elif case=='common':
line=m.group('after').strip()
if not line[0]=='/':line='//'+line
cl=[]
f=0;bn='';ol=''
for c in line:
if c=='/':f=f+1;continue
if f>=3:
bn = bn.strip()
if not bn: bn='_BLNK_'
cl.append([bn, ol])
f=f-2;bn='';ol=''
if f%2: bn=bn+c
else: ol=ol+c
bn = bn.strip()
if not bn: bn='_BLNK_'
cl.append([bn, ol])
commonkey={}
if 'common' in groupcache[groupcounter]:
commonkey=groupcache[groupcounter]['common']
for c in cl:
if c[0] in commonkey:
outmess('analyzeline: previously defined common block encountered. Skipping.\n')
continue
commonkey[c[0]]=[]
for i in [x.strip() for x in markoutercomma(c[1]).split('@,@')]:
if i: commonkey[c[0]].append(i)
groupcache[groupcounter]['common']=commonkey
previous_context = ('common', bn, groupcounter)
elif case=='use':
m1=re.match(r'\A\s*(?P<name>\b[\w]+\b)\s*((,(\s*\bonly\b\s*:|(?P<notonly>))\s*(?P<list>.*))|)\s*\Z', m.group('after'), re.I)
if m1:
mm=m1.groupdict()
if 'use' not in groupcache[groupcounter]:
groupcache[groupcounter]['use']={}
name=m1.group('name')
groupcache[groupcounter]['use'][name]={}
isonly=0
if 'list' in mm and mm['list'] is not None:
if 'notonly' in mm and mm['notonly'] is None:
isonly=1
groupcache[groupcounter]['use'][name]['only']=isonly
ll=[x.strip() for x in mm['list'].split(',')]
rl={}
for l in ll:
if '=' in l:
m2=re.match(r'\A\s*(?P<local>\b[\w]+\b)\s*=\s*>\s*(?P<use>\b[\w]+\b)\s*\Z', l, re.I)
if m2: rl[m2.group('local').strip()]=m2.group('use').strip()
else:
outmess('analyzeline: Not local=>use pattern found in %s\n'%repr(l))
else:
rl[l]=l
groupcache[groupcounter]['use'][name]['map']=rl
else:
pass
else:
print(m.groupdict())
outmess('analyzeline: Could not crack the use statement.\n')
elif case in ['f2pyenhancements']:
if 'f2pyenhancements' not in groupcache[groupcounter]:
groupcache[groupcounter]['f2pyenhancements'] = {}
d = groupcache[groupcounter]['f2pyenhancements']
if m.group('this')=='usercode' and 'usercode' in d:
if isinstance(d['usercode'], str):
d['usercode'] = [d['usercode']]
d['usercode'].append(m.group('after'))
else:
d[m.group('this')] = m.group('after')
elif case=='multiline':
if previous_context is None:
if verbose:
outmess('analyzeline: No context for multiline block.\n')
return
gc = groupcounter
#gc = previous_context[2]
appendmultiline(groupcache[gc],
previous_context[:2],
m.group('this'))
else:
if verbose>1:
print(m.groupdict())
outmess('analyzeline: No code implemented for line.\n')
def appendmultiline(group, context_name, ml):
if 'f2pymultilines' not in group:
group['f2pymultilines'] = {}
d = group['f2pymultilines']
if context_name not in d:
d[context_name] = []
d[context_name].append(ml)
return
def cracktypespec0(typespec, ll):
selector=None
attr=None
if re.match(r'double\s*complex', typespec, re.I): typespec='double complex'
elif re.match(r'double\s*precision', typespec, re.I): typespec='double precision'
else: typespec=typespec.strip().lower()
m1=selectpattern.match(markouterparen(ll))
if not m1:
outmess('cracktypespec0: no kind/char_selector pattern found for line.\n')
return
d=m1.groupdict()
for k in list(d.keys()): d[k]=unmarkouterparen(d[k])
if typespec in ['complex', 'integer', 'logical', 'real', 'character', 'type']:
selector=d['this']
ll=d['after']
i=ll.find('::')
if i>=0:
attr=ll[:i].strip()
ll=ll[i+2:]
return typespec, selector, attr, ll
#####
namepattern=re.compile(r'\s*(?P<name>\b[\w]+\b)\s*(?P<after>.*)\s*\Z', re.I)
kindselector=re.compile(r'\s*(\(\s*(kind\s*=)?\s*(?P<kind>.*)\s*\)|[*]\s*(?P<kind2>.*?))\s*\Z', re.I)
charselector=re.compile(r'\s*(\((?P<lenkind>.*)\)|[*]\s*(?P<charlen>.*))\s*\Z', re.I)
lenkindpattern=re.compile(r'\s*(kind\s*=\s*(?P<kind>.*?)\s*(@,@\s*len\s*=\s*(?P<len>.*)|)|(len\s*=\s*|)(?P<len2>.*?)\s*(@,@\s*(kind\s*=\s*|)(?P<kind2>.*)|))\s*\Z', re.I)
lenarraypattern=re.compile(r'\s*(@\(@\s*(?!/)\s*(?P<array>.*?)\s*@\)@\s*[*]\s*(?P<len>.*?)|([*]\s*(?P<len2>.*?)|)\s*(@\(@\s*(?!/)\s*(?P<array2>.*?)\s*@\)@|))\s*(=\s*(?P<init>.*?)|(@\(@|)/\s*(?P<init2>.*?)\s*/(@\)@|)|)\s*\Z', re.I)
def removespaces(expr):
expr=expr.strip()
if len(expr)<=1: return expr
expr2=expr[0]
for i in range(1, len(expr)-1):
if expr[i]==' ' and \
((expr[i+1] in "()[]{}=+-/* ") or (expr[i-1] in "()[]{}=+-/* ")): continue
expr2=expr2+expr[i]
expr2=expr2+expr[-1]
return expr2
def markinnerspaces(line):
l='';f=0
cc='\''
cc1='"'
cb=''
for c in line:
if cb=='\\' and c in ['\\', '\'', '"']:
l=l+c
cb=c
continue
if f==0 and c in ['\'', '"']: cc=c; cc1={'\'':'"','"':'\''}[c]
if c==cc:f=f+1
elif c==cc:f=f-1
elif c==' ' and f==1: l=l+'@_@'; continue
l=l+c;cb=c
return l
def updatevars(typespec, selector, attrspec, entitydecl):
global groupcache, groupcounter
last_name = None
kindselect, charselect, typename=cracktypespec(typespec, selector)
if attrspec:
attrspec=[x.strip() for x in markoutercomma(attrspec).split('@,@')]
l = []
c = re.compile(r'(?P<start>[a-zA-Z]+)')
for a in attrspec:
if not a:
continue
m = c.match(a)
if m:
s = m.group('start').lower()
a = s + a[len(s):]
l.append(a)
attrspec = l
el=[x.strip() for x in markoutercomma(entitydecl).split('@,@')]
el1=[]
for e in el:
for e1 in [x.strip() for x in markoutercomma(removespaces(markinnerspaces(e)), comma=' ').split('@ @')]:
if e1: el1.append(e1.replace('@_@', ' '))
for e in el1:
m=namepattern.match(e)
if not m:
outmess('updatevars: no name pattern found for entity=%s. Skipping.\n'%(repr(e)))
continue
ename=rmbadname1(m.group('name'))
edecl={}
if ename in groupcache[groupcounter]['vars']:
edecl=groupcache[groupcounter]['vars'][ename].copy()
not_has_typespec = 'typespec' not in edecl
if not_has_typespec:
edecl['typespec']=typespec
elif typespec and (not typespec==edecl['typespec']):
outmess('updatevars: attempt to change the type of "%s" ("%s") to "%s". Ignoring.\n' % (ename, edecl['typespec'], typespec))
if 'kindselector' not in edecl:
edecl['kindselector']=copy.copy(kindselect)
elif kindselect:
for k in list(kindselect.keys()):
if k in edecl['kindselector'] and (not kindselect[k]==edecl['kindselector'][k]):
outmess('updatevars: attempt to change the kindselector "%s" of "%s" ("%s") to "%s". Ignoring.\n' % (k, ename, edecl['kindselector'][k], kindselect[k]))
else: edecl['kindselector'][k]=copy.copy(kindselect[k])
if 'charselector' not in edecl and charselect:
if not_has_typespec:
edecl['charselector']=charselect
else:
errmess('updatevars:%s: attempt to change empty charselector to %r. Ignoring.\n' \
%(ename, charselect))
elif charselect:
for k in list(charselect.keys()):
if k in edecl['charselector'] and (not charselect[k]==edecl['charselector'][k]):
outmess('updatevars: attempt to change the charselector "%s" of "%s" ("%s") to "%s". Ignoring.\n' % (k, ename, edecl['charselector'][k], charselect[k]))
else: edecl['charselector'][k]=copy.copy(charselect[k])
if 'typename' not in edecl:
edecl['typename']=typename
elif typename and (not edecl['typename']==typename):
outmess('updatevars: attempt to change the typename of "%s" ("%s") to "%s". Ignoring.\n' % (ename, edecl['typename'], typename))
if 'attrspec' not in edecl:
edecl['attrspec']=copy.copy(attrspec)
elif attrspec:
for a in attrspec:
if a not in edecl['attrspec']:
edecl['attrspec'].append(a)
else:
edecl['typespec']=copy.copy(typespec)
edecl['kindselector']=copy.copy(kindselect)
edecl['charselector']=copy.copy(charselect)
edecl['typename']=typename
edecl['attrspec']=copy.copy(attrspec)
if m.group('after'):
m1=lenarraypattern.match(markouterparen(m.group('after')))
if m1:
d1=m1.groupdict()
for lk in ['len', 'array', 'init']:
if d1[lk+'2'] is not None: d1[lk]=d1[lk+'2']; del d1[lk+'2']
for k in list(d1.keys()):
if d1[k] is not None: d1[k]=unmarkouterparen(d1[k])
else: del d1[k]
if 'len' in d1 and 'array' in d1:
if d1['len']=='':
d1['len']=d1['array']
del d1['array']
else:
d1['array']=d1['array']+','+d1['len']
del d1['len']
errmess('updatevars: "%s %s" is mapped to "%s %s(%s)"\n'%(typespec, e, typespec, ename, d1['array']))
if 'array' in d1:
dm = 'dimension(%s)'%d1['array']
if 'attrspec' not in edecl or (not edecl['attrspec']):
edecl['attrspec']=[dm]
else:
edecl['attrspec'].append(dm)
for dm1 in edecl['attrspec']:
if dm1[:9]=='dimension' and dm1!=dm:
del edecl['attrspec'][-1]
errmess('updatevars:%s: attempt to change %r to %r. Ignoring.\n' \
% (ename, dm1, dm))
break
if 'len' in d1:
if typespec in ['complex', 'integer', 'logical', 'real']:
if ('kindselector' not in edecl) or (not edecl['kindselector']):
edecl['kindselector']={}
edecl['kindselector']['*']=d1['len']
elif typespec == 'character':
if ('charselector' not in edecl) or (not edecl['charselector']):
edecl['charselector']={}
if 'len' in edecl['charselector']:
del edecl['charselector']['len']
edecl['charselector']['*']=d1['len']
if 'init' in d1:
if '=' in edecl and (not edecl['=']==d1['init']):
outmess('updatevars: attempt to change the init expression of "%s" ("%s") to "%s". Ignoring.\n' % (ename, edecl['='], d1['init']))
else:
edecl['=']=d1['init']
else:
outmess('updatevars: could not crack entity declaration "%s". Ignoring.\n'%(ename+m.group('after')))
for k in list(edecl.keys()):
if not edecl[k]:
del edecl[k]
groupcache[groupcounter]['vars'][ename]=edecl
if 'varnames' in groupcache[groupcounter]:
groupcache[groupcounter]['varnames'].append(ename)
last_name = ename
return last_name
def cracktypespec(typespec, selector):
kindselect=None
charselect=None
typename=None
if selector:
if typespec in ['complex', 'integer', 'logical', 'real']:
kindselect=kindselector.match(selector)
if not kindselect:
outmess('cracktypespec: no kindselector pattern found for %s\n'%(repr(selector)))
return
kindselect=kindselect.groupdict()
kindselect['*']=kindselect['kind2']
del kindselect['kind2']
for k in list(kindselect.keys()):
if not kindselect[k]: del kindselect[k]
for k, i in list(kindselect.items()):
kindselect[k] = rmbadname1(i)
elif typespec=='character':
charselect=charselector.match(selector)
if not charselect:
outmess('cracktypespec: no charselector pattern found for %s\n'%(repr(selector)))
return
charselect=charselect.groupdict()
charselect['*']=charselect['charlen']
del charselect['charlen']
if charselect['lenkind']:
lenkind=lenkindpattern.match(markoutercomma(charselect['lenkind']))
lenkind=lenkind.groupdict()
for lk in ['len', 'kind']:
if lenkind[lk+'2']:
lenkind[lk]=lenkind[lk+'2']
charselect[lk]=lenkind[lk]
del lenkind[lk+'2']
del charselect['lenkind']
for k in list(charselect.keys()):
if not charselect[k]: del charselect[k]
for k, i in list(charselect.items()):
charselect[k] = rmbadname1(i)
elif typespec=='type':
typename=re.match(r'\s*\(\s*(?P<name>\w+)\s*\)', selector, re.I)
if typename: typename=typename.group('name')
else: outmess('cracktypespec: no typename found in %s\n'%(repr(typespec+selector)))
else:
outmess('cracktypespec: no selector used for %s\n'%(repr(selector)))
return kindselect, charselect, typename
######
def setattrspec(decl,attr,force=0):
if not decl:
decl={}
if not attr:
return decl
if 'attrspec' not in decl:
decl['attrspec']=[attr]
return decl
if force: decl['attrspec'].append(attr)
if attr in decl['attrspec']: return decl
if attr=='static' and 'automatic' not in decl['attrspec']:
decl['attrspec'].append(attr)
elif attr=='automatic' and 'static' not in decl['attrspec']:
decl['attrspec'].append(attr)
elif attr=='public' and 'private' not in decl['attrspec']:
decl['attrspec'].append(attr)
elif attr=='private' and 'public' not in decl['attrspec']:
decl['attrspec'].append(attr)
else:
decl['attrspec'].append(attr)
return decl
def setkindselector(decl,sel,force=0):
if not decl:
decl={}
if not sel:
return decl
if 'kindselector' not in decl:
decl['kindselector']=sel
return decl
for k in list(sel.keys()):
if force or k not in decl['kindselector']:
decl['kindselector'][k]=sel[k]
return decl
def setcharselector(decl,sel,force=0):
if not decl:
decl={}
if not sel:
return decl
if 'charselector' not in decl:
decl['charselector']=sel
return decl
for k in list(sel.keys()):
if force or k not in decl['charselector']:
decl['charselector'][k]=sel[k]
return decl
def getblockname(block,unknown='unknown'):
if 'name' in block:
return block['name']
return unknown
###### post processing
def setmesstext(block):
global filepositiontext
try:
filepositiontext='In: %s:%s\n'%(block['from'], block['name'])
except:
pass
def get_usedict(block):
usedict = {}
if 'parent_block' in block:
usedict = get_usedict(block['parent_block'])
if 'use' in block:
usedict.update(block['use'])
return usedict
def get_useparameters(block, param_map=None):
global f90modulevars
if param_map is None:
param_map = {}
usedict = get_usedict(block)
if not usedict:
return param_map
for usename, mapping in list(usedict.items()):
usename = usename.lower()
if usename not in f90modulevars:
outmess('get_useparameters: no module %s info used by %s\n' % (usename, block.get('name')))
continue
mvars = f90modulevars[usename]
params = get_parameters(mvars)
if not params:
continue
# XXX: apply mapping
if mapping:
errmess('get_useparameters: mapping for %s not impl.' % (mapping))
for k, v in list(params.items()):
if k in param_map:
outmess('get_useparameters: overriding parameter %s with'\
' value from module %s' % (repr(k), repr(usename)))
param_map[k] = v
return param_map
def postcrack2(block,tab='',param_map=None):
global f90modulevars
if not f90modulevars:
return block
if isinstance(block, list):
ret = []
for g in block:
g = postcrack2(g, tab=tab+'\t', param_map=param_map)
ret.append(g)
return ret
setmesstext(block)
outmess('%sBlock: %s\n'%(tab, block['name']), 0)
if param_map is None:
param_map = get_useparameters(block)
if param_map is not None and 'vars' in block:
vars = block['vars']
for n in list(vars.keys()):
var = vars[n]
if 'kindselector' in var:
kind = var['kindselector']
if 'kind' in kind:
val = kind['kind']
if val in param_map:
kind['kind'] = param_map[val]
new_body = []
for b in block['body']:
b = postcrack2(b, tab=tab+'\t', param_map=param_map)
new_body.append(b)
block['body'] = new_body
return block
def postcrack(block,args=None,tab=''):
"""
TODO:
function return values
determine expression types if in argument list
"""
global usermodules, onlyfunctions
if isinstance(block, list):
gret=[]
uret=[]
for g in block:
setmesstext(g)
g=postcrack(g, tab=tab+'\t')
if 'name' in g and '__user__' in g['name']: # sort user routines to appear first
uret.append(g)
else:
gret.append(g)
return uret+gret
setmesstext(block)
if not isinstance(block, dict) and 'block' not in block:
raise Exception('postcrack: Expected block dictionary instead of ' + \
str(block))
if 'name' in block and not block['name']=='unknown_interface':
outmess('%sBlock: %s\n'%(tab, block['name']), 0)
blocktype=block['block']
block=analyzeargs(block)
block=analyzecommon(block)
block['vars']=analyzevars(block)
block['sortvars']=sortvarnames(block['vars'])
if 'args' in block and block['args']:
args=block['args']
block['body']=analyzebody(block, args, tab=tab)
userisdefined=[]
## fromuser = []
if 'use' in block:
useblock=block['use']
for k in list(useblock.keys()):
if '__user__' in k:
userisdefined.append(k)
## if 'map' in useblock[k]:
## for n in useblock[k]['map'].itervalues():
## if n not in fromuser: fromuser.append(n)
else: useblock={}
name=''
if 'name' in block:
name=block['name']
if 'externals' in block and block['externals']:# and not userisdefined: # Build a __user__ module
interfaced=[]
if 'interfaced' in block:
interfaced=block['interfaced']
mvars=copy.copy(block['vars'])
if name:
mname=name+'__user__routines'
else:
mname='unknown__user__routines'
if mname in userisdefined:
i=1
while '%s_%i'%(mname, i) in userisdefined: i=i+1
mname='%s_%i'%(mname, i)
interface={'block':'interface','body':[],'vars':{},'name':name+'_user_interface'}
for e in block['externals']:
## if e in fromuser:
## outmess(' Skipping %s that is defined explicitly in another use statement\n'%(`e`))
## continue
if e in interfaced:
edef=[]
j=-1
for b in block['body']:
j=j+1
if b['block']=='interface':
i=-1
for bb in b['body']:
i=i+1
if 'name' in bb and bb['name']==e:
edef=copy.copy(bb)
del b['body'][i]
break
if edef:
if not b['body']: del block['body'][j]
del interfaced[interfaced.index(e)]
break
interface['body'].append(edef)
else:
if e in mvars and not isexternal(mvars[e]):
interface['vars'][e]=mvars[e]
if interface['vars'] or interface['body']:
block['interfaced']=interfaced
mblock={'block':'python module','body':[interface],'vars':{},'name':mname,'interfaced':block['externals']}
useblock[mname]={}
usermodules.append(mblock)
if useblock:
block['use']=useblock
return block
def sortvarnames(vars):
indep = []
dep = []
for v in list(vars.keys()):
if 'depend' in vars[v] and vars[v]['depend']:
dep.append(v)
#print '%s depends on %s'%(v,vars[v]['depend'])
else: indep.append(v)
n = len(dep)
i = 0
while dep: #XXX: How to catch dependence cycles correctly?
v = dep[0]
fl = 0
for w in dep[1:]:
if w in vars[v]['depend']:
fl = 1
break
if fl:
dep = dep[1:]+[v]
i = i + 1
if i>n:
errmess('sortvarnames: failed to compute dependencies because'
' of cyclic dependencies between '
+', '.join(dep)+'\n')
indep = indep + dep
break
else:
indep.append(v)
dep = dep[1:]
n = len(dep)
i = 0
#print indep
return indep
def analyzecommon(block):
if not hascommon(block): return block
commonvars=[]
for k in list(block['common'].keys()):
comvars=[]
for e in block['common'][k]:
m=re.match(r'\A\s*\b(?P<name>.*?)\b\s*(\((?P<dims>.*?)\)|)\s*\Z', e, re.I)
if m:
dims=[]
if m.group('dims'):
dims=[x.strip() for x in markoutercomma(m.group('dims')).split('@,@')]
n=m.group('name').strip()
if n in block['vars']:
if 'attrspec' in block['vars'][n]:
block['vars'][n]['attrspec'].append('dimension(%s)'%(','.join(dims)))
else:
block['vars'][n]['attrspec']=['dimension(%s)'%(','.join(dims))]
else:
if dims:
block['vars'][n]={'attrspec':['dimension(%s)'%(','.join(dims))]}
else: block['vars'][n]={}
if n not in commonvars: commonvars.append(n)
else:
n=e
errmess('analyzecommon: failed to extract "<name>[(<dims>)]" from "%s" in common /%s/.\n'%(e, k))
comvars.append(n)
block['common'][k]=comvars
if 'commonvars' not in block:
block['commonvars']=commonvars
else:
block['commonvars']=block['commonvars']+commonvars
return block
def analyzebody(block,args,tab=''):
global usermodules, skipfuncs, onlyfuncs, f90modulevars
setmesstext(block)
body=[]
for b in block['body']:
b['parent_block'] = block
if b['block'] in ['function', 'subroutine']:
if args is not None and b['name'] not in args:
continue
else:
as_=b['args']
if b['name'] in skipfuncs:
continue
if onlyfuncs and b['name'] not in onlyfuncs:
continue
b['saved_interface'] = crack2fortrangen(b, '\n'+' '*6, as_interface=True)
else: as_=args
b=postcrack(b, as_, tab=tab+'\t')
if b['block']=='interface' and not b['body']:
if 'f2pyenhancements' not in b:
continue
if b['block'].replace(' ', '')=='pythonmodule':
usermodules.append(b)
else:
if b['block']=='module':
f90modulevars[b['name']] = b['vars']
body.append(b)
return body
def buildimplicitrules(block):
setmesstext(block)
implicitrules=defaultimplicitrules
attrrules={}
if 'implicit' in block:
if block['implicit'] is None:
implicitrules=None
if verbose>1:
outmess('buildimplicitrules: no implicit rules for routine %s.\n'%repr(block['name']))
else:
for k in list(block['implicit'].keys()):
if block['implicit'][k].get('typespec') not in ['static', 'automatic']:
implicitrules[k]=block['implicit'][k]
else:
attrrules[k]=block['implicit'][k]['typespec']
return implicitrules, attrrules
def myeval(e,g=None,l=None):
r = eval(e, g, l)
if type(r) in [type(0), type(0.0)]:
return r
raise ValueError('r=%r' % (r))
getlincoef_re_1 = re.compile(r'\A\b\w+\b\Z', re.I)
def getlincoef(e, xset): # e = a*x+b ; x in xset
try:
c = int(myeval(e, {}, {}))
return 0, c, None
except: pass
if getlincoef_re_1.match(e):
return 1, 0, e
len_e = len(e)
for x in xset:
if len(x)>len_e: continue
if re.search(r'\w\s*\([^)]*\b'+x+r'\b', e):
# skip function calls having x as an argument, e.g max(1, x)
continue
re_1 = re.compile(r'(?P<before>.*?)\b'+x+r'\b(?P<after>.*)', re.I)
m = re_1.match(e)
if m:
try:
m1 = re_1.match(e)
while m1:
ee = '%s(%s)%s'%(m1.group('before'), 0, m1.group('after'))
m1 = re_1.match(ee)
b = myeval(ee, {}, {})
m1 = re_1.match(e)
while m1:
ee = '%s(%s)%s'%(m1.group('before'), 1, m1.group('after'))
m1 = re_1.match(ee)
a = myeval(ee, {}, {}) - b
m1 = re_1.match(e)
while m1:
ee = '%s(%s)%s'%(m1.group('before'), 0.5, m1.group('after'))
m1 = re_1.match(ee)
c = myeval(ee, {}, {})
# computing another point to be sure that expression is linear
m1 = re_1.match(e)
while m1:
ee = '%s(%s)%s'%(m1.group('before'), 1.5, m1.group('after'))
m1 = re_1.match(ee)
c2 = myeval(ee, {}, {})
if (a*0.5+b==c and a*1.5+b==c2):
return a, b, x
except: pass
break
return None, None, None
_varname_match = re.compile(r'\A[a-z]\w*\Z').match
def getarrlen(dl,args,star='*'):
edl = []
try: edl.append(myeval(dl[0], {}, {}))
except: edl.append(dl[0])
try: edl.append(myeval(dl[1], {}, {}))
except: edl.append(dl[1])
if isinstance(edl[0], int):
p1 = 1-edl[0]
if p1==0: d = str(dl[1])
elif p1<0: d = '%s-%s'%(dl[1], -p1)
else: d = '%s+%s'%(dl[1], p1)
elif isinstance(edl[1], int):
p1 = 1+edl[1]
if p1==0: d='-(%s)' % (dl[0])
else: d='%s-(%s)' % (p1, dl[0])
else: d = '%s-(%s)+1'%(dl[1], dl[0])
try: return repr(myeval(d, {}, {})), None, None
except: pass
d1, d2=getlincoef(dl[0], args), getlincoef(dl[1], args)
if None not in [d1[0], d2[0]]:
if (d1[0], d2[0])==(0, 0):
return repr(d2[1]-d1[1]+1), None, None
b = d2[1] - d1[1] + 1
d1 = (d1[0], 0, d1[2])
d2 = (d2[0], b, d2[2])
if d1[0]==0 and d2[2] in args:
if b<0: return '%s * %s - %s'%(d2[0], d2[2], -b), d2[2], '+%s)/(%s)'%(-b, d2[0])
elif b: return '%s * %s + %s'%(d2[0], d2[2], b), d2[2], '-%s)/(%s)'%(b, d2[0])
else: return '%s * %s'%(d2[0], d2[2]), d2[2], ')/(%s)'%(d2[0])
if d2[0]==0 and d1[2] in args:
if b<0: return '%s * %s - %s'%(-d1[0], d1[2], -b), d1[2], '+%s)/(%s)'%(-b, -d1[0])
elif b: return '%s * %s + %s'%(-d1[0], d1[2], b), d1[2], '-%s)/(%s)'%(b, -d1[0])
else: return '%s * %s'%(-d1[0], d1[2]), d1[2], ')/(%s)'%(-d1[0])
if d1[2]==d2[2] and d1[2] in args:
a = d2[0] - d1[0]
if not a: return repr(b), None, None
if b<0: return '%s * %s - %s'%(a, d1[2], -b), d2[2], '+%s)/(%s)'%(-b, a)
elif b: return '%s * %s + %s'%(a, d1[2], b), d2[2], '-%s)/(%s)'%(b, a)
else: return '%s * %s'%(a, d1[2]), d2[2], ')/(%s)'%(a)
if d1[0]==d2[0]==1:
c = str(d1[2])
if c not in args:
if _varname_match(c):
outmess('\tgetarrlen:variable "%s" undefined\n' % (c))
c = '(%s)'%c
if b==0: d='%s-%s' % (d2[2], c)
elif b<0: d='%s-%s-%s' % (d2[2], c, -b)
else: d='%s-%s+%s' % (d2[2], c, b)
elif d1[0]==0:
c2 = str(d2[2])
if c2 not in args:
if _varname_match(c2):
outmess('\tgetarrlen:variable "%s" undefined\n' % (c2))
c2 = '(%s)'%c2
if d2[0]==1: pass
elif d2[0]==-1: c2='-%s' %c2
else: c2='%s*%s'%(d2[0], c2)
if b==0: d=c2
elif b<0: d='%s-%s' % (c2, -b)
else: d='%s+%s' % (c2, b)
elif d2[0]==0:
c1 = str(d1[2])
if c1 not in args:
if _varname_match(c1):
outmess('\tgetarrlen:variable "%s" undefined\n' % (c1))
c1 = '(%s)'%c1
if d1[0]==1: c1='-%s'%c1
elif d1[0]==-1: c1='+%s'%c1
elif d1[0]<0: c1='+%s*%s'%(-d1[0], c1)
else: c1 = '-%s*%s' % (d1[0], c1)
if b==0: d=c1
elif b<0: d='%s-%s' % (c1, -b)
else: d='%s+%s' % (c1, b)
else:
c1 = str(d1[2])
if c1 not in args:
if _varname_match(c1):
outmess('\tgetarrlen:variable "%s" undefined\n' % (c1))
c1 = '(%s)'%c1
if d1[0]==1: c1='-%s'%c1
elif d1[0]==-1: c1='+%s'%c1
elif d1[0]<0: c1='+%s*%s'%(-d1[0], c1)
else: c1 = '-%s*%s' % (d1[0], c1)
c2 = str(d2[2])
if c2 not in args:
if _varname_match(c2):
outmess('\tgetarrlen:variable "%s" undefined\n' % (c2))
c2 = '(%s)'%c2
if d2[0]==1: pass
elif d2[0]==-1: c2='-%s' %c2
else: c2='%s*%s'%(d2[0], c2)
if b==0: d='%s%s' % (c2, c1)
elif b<0: d='%s%s-%s' % (c2, c1, -b)
else: d='%s%s+%s' % (c2, c1, b)
return d, None, None
word_pattern = re.compile(r'\b[a-z][\w$]*\b', re.I)
def _get_depend_dict(name, vars, deps):
if name in vars:
words = vars[name].get('depend', [])
if '=' in vars[name] and not isstring(vars[name]):
for word in word_pattern.findall(vars[name]['=']):
if word not in words and word in vars:
words.append(word)
for word in words[:]:
for w in deps.get(word, []) \
or _get_depend_dict(word, vars, deps):
if w not in words:
words.append(w)
else:
outmess('_get_depend_dict: no dependence info for %s\n' % (repr(name)))
words = []
deps[name] = words
return words
def _calc_depend_dict(vars):
names = list(vars.keys())
depend_dict = {}
for n in names:
_get_depend_dict(n, vars, depend_dict)
return depend_dict
def get_sorted_names(vars):
"""
"""
depend_dict = _calc_depend_dict(vars)
names = []
for name in list(depend_dict.keys()):
if not depend_dict[name]:
names.append(name)
del depend_dict[name]
while depend_dict:
for name, lst in list(depend_dict.items()):
new_lst = [n for n in lst if n in depend_dict]
if not new_lst:
names.append(name)
del depend_dict[name]
else:
depend_dict[name] = new_lst
return [name for name in names if name in vars]
def _kind_func(string):
#XXX: return something sensible.
if string[0] in "'\"":
string = string[1:-1]
if real16pattern.match(string):
return 8
elif real8pattern.match(string):
return 4
return 'kind('+string+')'
def _selected_int_kind_func(r):
#XXX: This should be processor dependent
m = 10**r
if m<=2**8: return 1
if m<=2**16: return 2
if m<=2**32: return 4
if m<=2**63: return 8
if m<=2**128: return 16
return -1
def _selected_real_kind_func(p, r=0, radix=0):
#XXX: This should be processor dependent
# This is only good for 0 <= p <= 20
if p < 7: return 4
if p < 16: return 8
if platform.machine().lower().startswith('power'):
if p <= 20:
return 16
else:
if p < 19:
return 10
elif p <= 20:
return 16
return -1
def get_parameters(vars, global_params={}):
params = copy.copy(global_params)
g_params = copy.copy(global_params)
for name, func in [('kind', _kind_func),
('selected_int_kind', _selected_int_kind_func),
('selected_real_kind', _selected_real_kind_func),
]:
if name not in g_params:
g_params[name] = func
param_names = []
for n in get_sorted_names(vars):
if 'attrspec' in vars[n] and 'parameter' in vars[n]['attrspec']:
param_names.append(n)
kind_re = re.compile(r'\bkind\s*\(\s*(?P<value>.*)\s*\)', re.I)
selected_int_kind_re = re.compile(r'\bselected_int_kind\s*\(\s*(?P<value>.*)\s*\)', re.I)
selected_kind_re = re.compile(r'\bselected_(int|real)_kind\s*\(\s*(?P<value>.*)\s*\)', re.I)
for n in param_names:
if '=' in vars[n]:
v = vars[n]['=']
if islogical(vars[n]):
v = v.lower()
for repl in [
('.false.', 'False'),
('.true.', 'True'),
#TODO: test .eq., .neq., etc replacements.
]:
v = v.replace(*repl)
v = kind_re.sub(r'kind("\1")', v)
v = selected_int_kind_re.sub(r'selected_int_kind(\1)', v)
if isinteger(vars[n]) and not selected_kind_re.match(v):
v = v.split('_')[0]
if isdouble(vars[n]):
tt = list(v)
for m in real16pattern.finditer(v):
tt[m.start():m.end()] = list(\
v[m.start():m.end()].lower().replace('d', 'e'))
v = ''.join(tt)
if iscomplex(vars[n]):
if v[0]=='(' and v[-1]==')':
l = markoutercomma(v[1:-1]).split('@,@')
try:
params[n] = eval(v, g_params, params)
except Exception as msg:
params[n] = v
#print params
outmess('get_parameters: got "%s" on %s\n' % (msg, repr(v)))
if isstring(vars[n]) and isinstance(params[n], int):
params[n] = chr(params[n])
nl = n.lower()
if nl!=n:
params[nl] = params[n]
else:
print(vars[n])
outmess('get_parameters:parameter %s does not have value?!\n'%(repr(n)))
return params
def _eval_length(length, params):
if length in ['(:)', '(*)', '*']:
return '(*)'
return _eval_scalar(length, params)
_is_kind_number = re.compile(r'\d+_').match
def _eval_scalar(value, params):
if _is_kind_number(value):
value = value.split('_')[0]
try:
value = str(eval(value, {}, params))
except (NameError, SyntaxError):
return value
except Exception as msg:
errmess('"%s" in evaluating %r '\
'(available names: %s)\n' \
% (msg, value, list(params.keys())))
return value
def analyzevars(block):
global f90modulevars
setmesstext(block)
implicitrules, attrrules=buildimplicitrules(block)
vars=copy.copy(block['vars'])
if block['block']=='function' and block['name'] not in vars:
vars[block['name']]={}
if '' in block['vars']:
del vars['']
if 'attrspec' in block['vars']['']:
gen=block['vars']['']['attrspec']
for n in list(vars.keys()):
for k in ['public', 'private']:
if k in gen:
vars[n]=setattrspec(vars[n], k)
svars=[]
args = block['args']
for a in args:
try:
vars[a]
svars.append(a)
except KeyError:
pass
for n in list(vars.keys()):
if n not in args: svars.append(n)
params = get_parameters(vars, get_useparameters(block))
dep_matches = {}
name_match = re.compile(r'\w[\w\d_$]*').match
for v in list(vars.keys()):
m = name_match(v)
if m:
n = v[m.start():m.end()]
try:
dep_matches[n]
except KeyError:
dep_matches[n] = re.compile(r'.*\b%s\b'%(v), re.I).match
for n in svars:
if n[0] in list(attrrules.keys()):
vars[n]=setattrspec(vars[n], attrrules[n[0]])
if 'typespec' not in vars[n]:
if not('attrspec' in vars[n] and 'external' in vars[n]['attrspec']):
if implicitrules:
ln0 = n[0].lower()
for k in list(implicitrules[ln0].keys()):
if k=='typespec' and implicitrules[ln0][k]=='undefined':
continue
if k not in vars[n]:
vars[n][k]=implicitrules[ln0][k]
elif k=='attrspec':
for l in implicitrules[ln0][k]:
vars[n]=setattrspec(vars[n], l)
elif n in block['args']:
outmess('analyzevars: typespec of variable %s is not defined in routine %s.\n'%(repr(n), block['name']))
if 'charselector' in vars[n]:
if 'len' in vars[n]['charselector']:
l = vars[n]['charselector']['len']
try:
l = str(eval(l, {}, params))
except:
pass
vars[n]['charselector']['len'] = l
if 'kindselector' in vars[n]:
if 'kind' in vars[n]['kindselector']:
l = vars[n]['kindselector']['kind']
try:
l = str(eval(l, {}, params))
except:
pass
vars[n]['kindselector']['kind'] = l
savelindims = {}
if 'attrspec' in vars[n]:
attr=vars[n]['attrspec']
attr.reverse()
vars[n]['attrspec']=[]
dim, intent, depend, check, note=None, None, None, None, None
for a in attr:
if a[:9]=='dimension': dim=(a[9:].strip())[1:-1]
elif a[:6]=='intent': intent=(a[6:].strip())[1:-1]
elif a[:6]=='depend': depend=(a[6:].strip())[1:-1]
elif a[:5]=='check': check=(a[5:].strip())[1:-1]
elif a[:4]=='note': note=(a[4:].strip())[1:-1]
else: vars[n]=setattrspec(vars[n], a)
if intent:
if 'intent' not in vars[n]:
vars[n]['intent']=[]
for c in [x.strip() for x in markoutercomma(intent).split('@,@')]:
# Remove spaces so that 'in out' becomes 'inout'
tmp = c.replace(' ', '')
if tmp not in vars[n]['intent']:
vars[n]['intent'].append(tmp)
intent=None
if note:
note=note.replace('\\n\\n', '\n\n')
note=note.replace('\\n ', '\n')
if 'note' not in vars[n]:
vars[n]['note']=[note]
else:
vars[n]['note'].append(note)
note=None
if depend is not None:
if 'depend' not in vars[n]:
vars[n]['depend']=[]
for c in rmbadname([x.strip() for x in markoutercomma(depend).split('@,@')]):
if c not in vars[n]['depend']:
vars[n]['depend'].append(c)
depend=None
if check is not None:
if 'check' not in vars[n]:
vars[n]['check']=[]
for c in [x.strip() for x in markoutercomma(check).split('@,@')]:
if c not in vars[n]['check']:
vars[n]['check'].append(c)
check=None
if dim and 'dimension' not in vars[n]:
vars[n]['dimension']=[]
for d in rmbadname([x.strip() for x in markoutercomma(dim).split('@,@')]):
star = '*'
if d==':':
star=':'
if d in params:
d = str(params[d])
for p in list(params.keys()):
m = re.match(r'(?P<before>.*?)\b'+p+r'\b(?P<after>.*)', d, re.I)
if m:
#outmess('analyzevars:replacing parameter %s in %s (dimension of %s) with %s\n'%(`p`,`d`,`n`,`params[p]`))
d = m.group('before')+str(params[p])+m.group('after')
if d==star:
dl = [star]
else:
dl=markoutercomma(d, ':').split('@:@')
if len(dl)==2 and '*' in dl: # e.g. dimension(5:*)
dl = ['*']
d = '*'
if len(dl)==1 and not dl[0]==star: dl = ['1', dl[0]]
if len(dl)==2:
d, v, di = getarrlen(dl, list(block['vars'].keys()))
if d[:4] == '1 * ': d = d[4:]
if di and di[-4:] == '/(1)': di = di[:-4]
if v: savelindims[d] = v, di
vars[n]['dimension'].append(d)
if 'dimension' in vars[n]:
if isintent_c(vars[n]):
shape_macro = 'shape'
else:
shape_macro = 'shape'#'fshape'
if isstringarray(vars[n]):
if 'charselector' in vars[n]:
d = vars[n]['charselector']
if '*' in d:
d = d['*']
errmess('analyzevars: character array "character*%s %s(%s)" is considered as "character %s(%s)"; "intent(c)" is forced.\n'\
%(d, n,
','.join(vars[n]['dimension']),
n, ','.join(vars[n]['dimension']+[d])))
vars[n]['dimension'].append(d)
del vars[n]['charselector']
if 'intent' not in vars[n]:
vars[n]['intent'] = []
if 'c' not in vars[n]['intent']:
vars[n]['intent'].append('c')
else:
errmess("analyzevars: charselector=%r unhandled." % (d))
if 'check' not in vars[n] and 'args' in block and n in block['args']:
flag = 'depend' not in vars[n]
if flag:
vars[n]['depend']=[]
vars[n]['check']=[]
if 'dimension' in vars[n]:
#/----< no check
#vars[n]['check'].append('rank(%s)==%s'%(n,len(vars[n]['dimension'])))
i=-1; ni=len(vars[n]['dimension'])
for d in vars[n]['dimension']:
ddeps=[] # dependecies of 'd'
ad=''
pd=''
#origd = d
if d not in vars:
if d in savelindims:
pd, ad='(', savelindims[d][1]
d = savelindims[d][0]
else:
for r in block['args']:
#for r in block['vars'].iterkeys():
if r not in vars:
continue
if re.match(r'.*?\b'+r+r'\b', d, re.I):
ddeps.append(r)
if d in vars:
if 'attrspec' in vars[d]:
for aa in vars[d]['attrspec']:
if aa[:6]=='depend':
ddeps += aa[6:].strip()[1:-1].split(',')
if 'depend' in vars[d]:
ddeps=ddeps+vars[d]['depend']
i=i+1
if d in vars and ('depend' not in vars[d]) \
and ('=' not in vars[d]) and (d not in vars[n]['depend']) \
and l_or(isintent_in, isintent_inout, isintent_inplace)(vars[n]):
vars[d]['depend']=[n]
if ni>1:
vars[d]['=']='%s%s(%s,%s)%s'% (pd, shape_macro, n, i, ad)
else:
vars[d]['=']='%slen(%s)%s'% (pd, n, ad)
# /---< no check
if 1 and 'check' not in vars[d]:
if ni>1:
vars[d]['check']=['%s%s(%s,%i)%s==%s'\
%(pd, shape_macro, n, i, ad, d)]
else:
vars[d]['check']=['%slen(%s)%s>=%s'%(pd, n, ad, d)]
if 'attrspec' not in vars[d]:
vars[d]['attrspec']=['optional']
if ('optional' not in vars[d]['attrspec']) and\
('required' not in vars[d]['attrspec']):
vars[d]['attrspec'].append('optional')
elif d not in ['*', ':']:
#/----< no check
#if ni>1: vars[n]['check'].append('shape(%s,%i)==%s'%(n,i,d))
#else: vars[n]['check'].append('len(%s)>=%s'%(n,d))
if flag:
if d in vars:
if n not in ddeps:
vars[n]['depend'].append(d)
else:
vars[n]['depend'] = vars[n]['depend'] + ddeps
elif isstring(vars[n]):
length='1'
if 'charselector' in vars[n]:
if '*' in vars[n]['charselector']:
length = _eval_length(vars[n]['charselector']['*'],
params)
vars[n]['charselector']['*']=length
elif 'len' in vars[n]['charselector']:
length = _eval_length(vars[n]['charselector']['len'],
params)
del vars[n]['charselector']['len']
vars[n]['charselector']['*']=length
if not vars[n]['check']:
del vars[n]['check']
if flag and not vars[n]['depend']:
del vars[n]['depend']
if '=' in vars[n]:
if 'attrspec' not in vars[n]:
vars[n]['attrspec']=[]
if ('optional' not in vars[n]['attrspec']) and \
('required' not in vars[n]['attrspec']):
vars[n]['attrspec'].append('optional')
if 'depend' not in vars[n]:
vars[n]['depend']=[]
for v, m in list(dep_matches.items()):
if m(vars[n]['=']): vars[n]['depend'].append(v)
if not vars[n]['depend']: del vars[n]['depend']
if isscalar(vars[n]):
vars[n]['='] = _eval_scalar(vars[n]['='], params)
for n in list(vars.keys()):
if n==block['name']: # n is block name
if 'note' in vars[n]:
block['note']=vars[n]['note']
if block['block']=='function':
if 'result' in block and block['result'] in vars:
vars[n]=appenddecl(vars[n], vars[block['result']])
if 'prefix' in block:
pr=block['prefix']; ispure=0; isrec=1
pr1=pr.replace('pure', '')
ispure=(not pr==pr1)
pr=pr1.replace('recursive', '')
isrec=(not pr==pr1)
m=typespattern[0].match(pr)
if m:
typespec, selector, attr, edecl=cracktypespec0(m.group('this'), m.group('after'))
kindselect, charselect, typename=cracktypespec(typespec, selector)
vars[n]['typespec']=typespec
if kindselect:
if 'kind' in kindselect:
try:
kindselect['kind'] = eval(kindselect['kind'], {}, params)
except:
pass
vars[n]['kindselector']=kindselect
if charselect: vars[n]['charselector']=charselect
if typename: vars[n]['typename']=typename
if ispure: vars[n]=setattrspec(vars[n], 'pure')
if isrec: vars[n]=setattrspec(vars[n], 'recursive')
else:
outmess('analyzevars: prefix (%s) were not used\n'%repr(block['prefix']))
if not block['block'] in ['module', 'pythonmodule', 'python module', 'block data']:
if 'commonvars' in block:
neededvars=copy.copy(block['args']+block['commonvars'])
else:
neededvars=copy.copy(block['args'])
for n in list(vars.keys()):
if l_or(isintent_callback, isintent_aux)(vars[n]):
neededvars.append(n)
if 'entry' in block:
neededvars.extend(list(block['entry'].keys()))
for k in list(block['entry'].keys()):
for n in block['entry'][k]:
if n not in neededvars:
neededvars.append(n)
if block['block']=='function':
if 'result' in block:
neededvars.append(block['result'])
else:
neededvars.append(block['name'])
if block['block'] in ['subroutine', 'function']:
name = block['name']
if name in vars and 'intent' in vars[name]:
block['intent'] = vars[name]['intent']
if block['block'] == 'type':
neededvars.extend(list(vars.keys()))
for n in list(vars.keys()):
if n not in neededvars:
del vars[n]
return vars
analyzeargs_re_1 = re.compile(r'\A[a-z]+[\w$]*\Z', re.I)
def expr2name(a, block, args=[]):
orig_a = a
a_is_expr = not analyzeargs_re_1.match(a)
if a_is_expr: # `a` is an expression
implicitrules, attrrules=buildimplicitrules(block)
at=determineexprtype(a, block['vars'], implicitrules)
na='e_'
for c in a:
c = c.lower()
if c not in string.ascii_lowercase+string.digits: c='_'
na=na+c
if na[-1]=='_': na=na+'e'
else: na=na+'_e'
a=na
while a in block['vars'] or a in block['args']:
a=a+'r'
if a in args:
k = 1
while a + str(k) in args:
k = k + 1
a = a + str(k)
if a_is_expr:
block['vars'][a]=at
else:
if a not in block['vars']:
if orig_a in block['vars']:
block['vars'][a] = block['vars'][orig_a]
else:
block['vars'][a]={}
if 'externals' in block and orig_a in block['externals']+block['interfaced']:
block['vars'][a]=setattrspec(block['vars'][a], 'external')
return a
def analyzeargs(block):
setmesstext(block)
implicitrules, attrrules=buildimplicitrules(block)
if 'args' not in block:
block['args']=[]
args=[]
for a in block['args']:
a = expr2name(a, block, args)
args.append(a)
block['args']=args
if 'entry' in block:
for k, args1 in list(block['entry'].items()):
for a in args1:
if a not in block['vars']:
block['vars'][a]={}
for b in block['body']:
if b['name'] in args:
if 'externals' not in block:
block['externals']=[]
if b['name'] not in block['externals']:
block['externals'].append(b['name'])
if 'result' in block and block['result'] not in block['vars']:
block['vars'][block['result']]={}
return block
determineexprtype_re_1 = re.compile(r'\A\(.+?[,].+?\)\Z', re.I)
determineexprtype_re_2 = re.compile(r'\A[+-]?\d+(_(P<name>[\w]+)|)\Z', re.I)
determineexprtype_re_3 = re.compile(r'\A[+-]?[\d.]+[\d+-de.]*(_(P<name>[\w]+)|)\Z', re.I)
determineexprtype_re_4 = re.compile(r'\A\(.*\)\Z', re.I)
determineexprtype_re_5 = re.compile(r'\A(?P<name>\w+)\s*\(.*?\)\s*\Z', re.I)
def _ensure_exprdict(r):
if isinstance(r, int):
return {'typespec':'integer'}
if isinstance(r, float):
return {'typespec':'real'}
if isinstance(r, complex):
return {'typespec':'complex'}
if isinstance(r, dict):
return r
raise AssertionError(repr(r))
def determineexprtype(expr,vars,rules={}):
if expr in vars:
return _ensure_exprdict(vars[expr])
expr=expr.strip()
if determineexprtype_re_1.match(expr):
return {'typespec':'complex'}
m=determineexprtype_re_2.match(expr)
if m:
if 'name' in m.groupdict() and m.group('name'):
outmess('determineexprtype: selected kind types not supported (%s)\n'%repr(expr))
return {'typespec':'integer'}
m = determineexprtype_re_3.match(expr)
if m:
if 'name' in m.groupdict() and m.group('name'):
outmess('determineexprtype: selected kind types not supported (%s)\n'%repr(expr))
return {'typespec':'real'}
for op in ['+', '-', '*', '/']:
for e in [x.strip() for x in markoutercomma(expr, comma=op).split('@'+op+'@')]:
if e in vars:
return _ensure_exprdict(vars[e])
t={}
if determineexprtype_re_4.match(expr): # in parenthesis
t=determineexprtype(expr[1:-1], vars, rules)
else:
m = determineexprtype_re_5.match(expr)
if m:
rn=m.group('name')
t=determineexprtype(m.group('name'), vars, rules)
if t and 'attrspec' in t:
del t['attrspec']
if not t:
if rn[0] in rules:
return _ensure_exprdict(rules[rn[0]])
if expr[0] in '\'"':
return {'typespec':'character','charselector':{'*':'*'}}
if not t:
outmess('determineexprtype: could not determine expressions (%s) type.\n'%(repr(expr)))
return t
######
def crack2fortrangen(block,tab='\n', as_interface=False):
global skipfuncs, onlyfuncs
setmesstext(block)
ret=''
if isinstance(block, list):
for g in block:
if g and g['block'] in ['function', 'subroutine']:
if g['name'] in skipfuncs:
continue
if onlyfuncs and g['name'] not in onlyfuncs:
continue
ret=ret+crack2fortrangen(g, tab, as_interface=as_interface)
return ret
prefix=''
name=''
args=''
blocktype=block['block']
if blocktype=='program': return ''
argsl = []
if 'name' in block:
name=block['name']
if 'args' in block:
vars = block['vars']
for a in block['args']:
a = expr2name(a, block, argsl)
if not isintent_callback(vars[a]):
argsl.append(a)
if block['block']=='function' or argsl:
args='(%s)'%','.join(argsl)
f2pyenhancements = ''
if 'f2pyenhancements' in block:
for k in list(block['f2pyenhancements'].keys()):
f2pyenhancements = '%s%s%s %s'%(f2pyenhancements, tab+tabchar, k, block['f2pyenhancements'][k])
intent_lst = block.get('intent', [])[:]
if blocktype=='function' and 'callback' in intent_lst:
intent_lst.remove('callback')
if intent_lst:
f2pyenhancements = '%s%sintent(%s) %s'%\
(f2pyenhancements, tab+tabchar,
','.join(intent_lst), name)
use=''
if 'use' in block:
use=use2fortran(block['use'], tab+tabchar)
common=''
if 'common' in block:
common=common2fortran(block['common'], tab+tabchar)
if name=='unknown_interface': name=''
result=''
if 'result' in block:
result=' result (%s)'%block['result']
if block['result'] not in argsl:
argsl.append(block['result'])
#if 'prefix' in block:
# prefix=block['prefix']+' '
body=crack2fortrangen(block['body'], tab+tabchar)
vars=vars2fortran(block, block['vars'], argsl, tab+tabchar, as_interface=as_interface)
mess=''
if 'from' in block and not as_interface:
mess='! in %s'%block['from']
if 'entry' in block:
entry_stmts = ''
for k, i in list(block['entry'].items()):
entry_stmts = '%s%sentry %s(%s)' \
% (entry_stmts, tab+tabchar, k, ','.join(i))
body = body + entry_stmts
if blocktype=='block data' and name=='_BLOCK_DATA_':
name = ''
ret='%s%s%s %s%s%s %s%s%s%s%s%s%send %s %s'%(tab, prefix, blocktype, name, args, result, mess, f2pyenhancements, use, vars, common, body, tab, blocktype, name)
return ret
def common2fortran(common,tab=''):
ret=''
for k in list(common.keys()):
if k=='_BLNK_':
ret='%s%scommon %s'%(ret, tab, ','.join(common[k]))
else:
ret='%s%scommon /%s/ %s'%(ret, tab, k, ','.join(common[k]))
return ret
def use2fortran(use,tab=''):
ret=''
for m in list(use.keys()):
ret='%s%suse %s,'%(ret, tab, m)
if use[m]=={}:
if ret and ret[-1]==',': ret=ret[:-1]
continue
if 'only' in use[m] and use[m]['only']:
ret='%s only:'%(ret)
if 'map' in use[m] and use[m]['map']:
c=' '
for k in list(use[m]['map'].keys()):
if k==use[m]['map'][k]:
ret='%s%s%s'%(ret, c, k); c=','
else:
ret='%s%s%s=>%s'%(ret, c, k, use[m]['map'][k]); c=','
if ret and ret[-1]==',': ret=ret[:-1]
return ret
def true_intent_list(var):
lst = var['intent']
ret = []
for intent in lst:
try:
c = eval('isintent_%s(var)' % intent)
except NameError:
c = 0
if c:
ret.append(intent)
return ret
def vars2fortran(block,vars,args,tab='', as_interface=False):
"""
TODO:
public sub
...
"""
setmesstext(block)
ret=''
nout=[]
for a in args:
if a in block['vars']:
nout.append(a)
if 'commonvars' in block:
for a in block['commonvars']:
if a in vars:
if a not in nout:
nout.append(a)
else:
errmess('vars2fortran: Confused?!: "%s" is not defined in vars.\n'%a)
if 'varnames' in block:
nout.extend(block['varnames'])
if not as_interface:
for a in list(vars.keys()):
if a not in nout:
nout.append(a)
for a in nout:
if 'depend' in vars[a]:
for d in vars[a]['depend']:
if d in vars and 'depend' in vars[d] and a in vars[d]['depend']:
errmess('vars2fortran: Warning: cross-dependence between variables "%s" and "%s"\n'%(a, d))
if 'externals' in block and a in block['externals']:
if isintent_callback(vars[a]):
ret='%s%sintent(callback) %s'%(ret, tab, a)
ret='%s%sexternal %s'%(ret, tab, a)
if isoptional(vars[a]):
ret='%s%soptional %s'%(ret, tab, a)
if a in vars and 'typespec' not in vars[a]:
continue
cont=1
for b in block['body']:
if a==b['name'] and b['block']=='function':
cont=0;break
if cont:
continue
if a not in vars:
show(vars)
outmess('vars2fortran: No definition for argument "%s".\n'%a)
continue
if a==block['name'] and not block['block']=='function':
continue
if 'typespec' not in vars[a]:
if 'attrspec' in vars[a] and 'external' in vars[a]['attrspec']:
if a in args:
ret='%s%sexternal %s'%(ret, tab, a)
continue
show(vars[a])
outmess('vars2fortran: No typespec for argument "%s".\n'%a)
continue
vardef=vars[a]['typespec']
if vardef=='type' and 'typename' in vars[a]:
vardef='%s(%s)'%(vardef, vars[a]['typename'])
selector={}
if 'kindselector' in vars[a]:
selector=vars[a]['kindselector']
elif 'charselector' in vars[a]:
selector=vars[a]['charselector']
if '*' in selector:
if selector['*'] in ['*', ':']:
vardef='%s*(%s)'%(vardef, selector['*'])
else:
vardef='%s*%s'%(vardef, selector['*'])
else:
if 'len' in selector:
vardef='%s(len=%s'%(vardef, selector['len'])
if 'kind' in selector:
vardef='%s,kind=%s)'%(vardef, selector['kind'])
else:
vardef='%s)'%(vardef)
elif 'kind' in selector:
vardef='%s(kind=%s)'%(vardef, selector['kind'])
c=' '
if 'attrspec' in vars[a]:
attr=[]
for l in vars[a]['attrspec']:
if l not in ['external']:
attr.append(l)
if attr:
vardef='%s, %s'%(vardef, ','.join(attr))
c=','
if 'dimension' in vars[a]:
# if not isintent_c(vars[a]):
# vars[a]['dimension'].reverse()
vardef='%s%sdimension(%s)'%(vardef, c, ','.join(vars[a]['dimension']))
c=','
if 'intent' in vars[a]:
lst = true_intent_list(vars[a])
if lst:
vardef='%s%sintent(%s)'%(vardef, c, ','.join(lst))
c=','
if 'check' in vars[a]:
vardef='%s%scheck(%s)'%(vardef, c, ','.join(vars[a]['check']))
c=','
if 'depend' in vars[a]:
vardef='%s%sdepend(%s)'%(vardef, c, ','.join(vars[a]['depend']))
c=','
if '=' in vars[a]:
v = vars[a]['=']
if vars[a]['typespec'] in ['complex', 'double complex']:
try:
v = eval(v)
v = '(%s,%s)' % (v.real, v.imag)
except:
pass
vardef='%s :: %s=%s'%(vardef, a, v)
else:
vardef='%s :: %s'%(vardef, a)
ret='%s%s%s'%(ret, tab, vardef)
return ret
######
def crackfortran(files):
global usermodules
outmess('Reading fortran codes...\n', 0)
readfortrancode(files, crackline)
outmess('Post-processing...\n', 0)
usermodules=[]
postlist=postcrack(grouplist[0])
outmess('Post-processing (stage 2)...\n', 0)
postlist=postcrack2(postlist)
return usermodules+postlist
def crack2fortran(block):
global f2py_version
pyf=crack2fortrangen(block)+'\n'
header="""! -*- f90 -*-
! Note: the context of this file is case sensitive.
"""
footer="""
! This file was auto-generated with f2py (version:%s).
! See http://cens.ioc.ee/projects/f2py2e/
"""%(f2py_version)
return header+pyf+footer
if __name__ == "__main__":
files=[]
funcs=[]
f=1;f2=0;f3=0
showblocklist=0
for l in sys.argv[1:]:
if l=='': pass
elif l[0]==':':
f=0
elif l=='-quiet':
quiet=1
verbose=0
elif l=='-verbose':
verbose=2
quiet=0
elif l=='-fix':
if strictf77:
outmess('Use option -f90 before -fix if Fortran 90 code is in fix form.\n', 0)
skipemptyends=1
sourcecodeform='fix'
elif l=='-skipemptyends':
skipemptyends=1
elif l=='--ignore-contains':
ignorecontains=1
elif l=='-f77':
strictf77=1
sourcecodeform='fix'
elif l=='-f90':
strictf77=0
sourcecodeform='free'
skipemptyends=1
elif l=='-h':
f2=1
elif l=='-show':
showblocklist=1
elif l=='-m':
f3=1
elif l[0]=='-':
errmess('Unknown option %s\n'%repr(l))
elif f2:
f2=0
pyffilename=l
elif f3:
f3=0
f77modulename=l
elif f:
try:
open(l).close()
files.append(l)
except IOError as detail:
errmess('IOError: %s\n'%str(detail))
else:
funcs.append(l)
if not strictf77 and f77modulename and not skipemptyends:
outmess("""\
Warning: You have specifyied module name for non Fortran 77 code
that should not need one (expect if you are scanning F90 code
for non module blocks but then you should use flag -skipemptyends
and also be sure that the files do not contain programs without program statement).
""", 0)
postlist=crackfortran(files, funcs)
if pyffilename:
outmess('Writing fortran code to file %s\n'%repr(pyffilename), 0)
pyf=crack2fortran(postlist)
f=open(pyffilename, 'w')
f.write(pyf)
f.close()
if showblocklist:
show(postlist)
|
cdrooom/odoo
|
refs/heads/master
|
addons/l10n_hn/__openerp__.py
|
8
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2009-2010 Salvatore Josué Trimarchi Pinto <salvatore@trigluu.com>
# (http://trigluu.com)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
#
# This module provides a minimal Honduran chart of accounts that can be use
# to build upon a more complex one. It also includes a chart of taxes and
# the Lempira currency.
#
# This module is based on the Guatemalan chart of accounts:
# Copyright (c) 2009-2010 Soluciones Tecnologócias Prisma S.A. All Rights Reserved.
# José Rodrigo Fernández Menegazzo, Soluciones Tecnologócias Prisma S.A.
# (http://www.solucionesprisma.com)
#
# This module works with OpenERP 6.0 to 8.0
#
{
'name': 'Honduras - Accounting',
'version': '0.1',
'category': 'Localization/Account Charts',
'description': """
This is the base module to manage the accounting chart for Honduras.
====================================================================
Agrega una nomenclatura contable para Honduras. También incluye impuestos y la
moneda Lempira. -- Adds accounting chart for Honduras. It also includes taxes
and the Lempira currency.""",
'author': 'Salvatore Josue Trimarchi Pinto',
'website': 'http://trigluu.com',
'depends': ['base', 'account', 'account_chart'],
'data': [
'account_types.xml',
'account_chart.xml',
'account_tax.xml',
'l10n_hn_base.xml',
],
'demo': [],
'installable': True,
}
|
roadmapper/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/fortios/fortios_switch_controller_switch_group.py
|
7
|
#!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_switch_controller_switch_group
short_description: Configure FortiSwitch switch groups in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify switch_controller feature and switch_group category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.9"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
state:
description:
- Indicates whether to create or remove the object.
type: str
required: true
choices:
- present
- absent
switch_controller_switch_group:
description:
- Configure FortiSwitch switch groups.
default: null
type: dict
suboptions:
description:
description:
- Optional switch group description.
type: str
members:
description:
- FortiSwitch members belonging to this switch group.
type: list
suboptions:
name:
description:
- Managed device ID. Source switch-controller.managed-switch.switch-id.
required: true
type: str
name:
description:
- Switch group name.
required: true
type: str
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Configure FortiSwitch switch groups.
fortios_switch_controller_switch_group:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
state: "present"
switch_controller_switch_group:
description: "<your_own_value>"
members:
-
name: "default_name_5 (source switch-controller.managed-switch.switch-id)"
name: "default_name_6"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_switch_controller_switch_group_data(json):
option_list = ['description', 'members', 'name']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for i, elem in enumerate(data):
data[i] = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def switch_controller_switch_group(data, fos):
vdom = data['vdom']
state = data['state']
switch_controller_switch_group_data = data['switch_controller_switch_group']
filtered_data = underscore_to_hyphen(filter_switch_controller_switch_group_data(switch_controller_switch_group_data))
if state == "present":
return fos.set('switch-controller',
'switch-group',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('switch-controller',
'switch-group',
mkey=filtered_data['name'],
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_switch_controller(data, fos):
if data['switch_controller_switch_group']:
resp = switch_controller_switch_group(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"state": {"required": True, "type": "str",
"choices": ["present", "absent"]},
"switch_controller_switch_group": {
"required": False, "type": "dict", "default": None,
"options": {
"description": {"required": False, "type": "str"},
"members": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}},
"name": {"required": True, "type": "str"}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_switch_controller(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_switch_controller(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
|
yashodhank/erpnext
|
refs/heads/develop
|
erpnext/hr/doctype/appraisal_template_goal/appraisal_template_goal.py
|
121
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class AppraisalTemplateGoal(Document):
pass
|
arodic/three.js
|
refs/heads/master
|
utils/exporters/blender/tests/scripts/exporter.py
|
295
|
import os
import argparse
import sys
import io_three
from io_three.exporter import constants
try:
separator = sys.argv.index('--')
except IndexError:
print('ERROR: no parameters specified')
sys.exit(1)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('filepath')
for key, value in constants.EXPORT_OPTIONS.items():
if not isinstance(value, bool):
kwargs = {'type': type(value), 'default': value}
else:
kwargs = {'action':'store_true'}
parser.add_argument('--%s' % key, **kwargs)
return vars(parser.parse_args(sys.argv[separator+1:]))
def main():
args = parse_args()
args[constants.ENABLE_PRECISION] = True
args[constants.INDENT] = True
if args[constants.SCENE]:
io_three.exporter.export_scene(args['filepath'], args)
else:
io_three.exporter.export_geometry(args['filepath'], args)
if __name__ == '__main__':
main()
|
nthuion/nthuion-backend
|
refs/heads/master
|
tests/__init__.py
|
12133432
| |
MajinBui/django-portal
|
refs/heads/master
|
portal/migrations/__init__.py
|
12133432
| |
dwiajik/twit-macet-mining-v3
|
refs/heads/master
|
clustering/birch.py
|
12133432
| |
dburr/SchoolIdolAPI
|
refs/heads/master
|
web/templatetags/__init__.py
|
12133432
| |
hackerbot/DjangoDev
|
refs/heads/master
|
tests/migrations/related_models_app/__init__.py
|
12133432
| |
mushtaqak/edx-platform
|
refs/heads/master
|
lms/djangoapps/survey/migrations/__init__.py
|
12133432
| |
sacnayak/ssnayak-viz
|
refs/heads/master
|
lib/httplib2/test/brokensocket/socket.py
|
314
|
from realsocket import gaierror, error, getaddrinfo, SOCK_STREAM
|
tedder/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/f5/bigip_asm_policy_signature_set.py
|
9
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2018, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigip_asm_policy_signature_set
short_description: Manages Signature Sets on ASM policy
description:
- Manages Signature Sets on ASM policy.
version_added: 2.8
options:
name:
description:
- Specifies the name of the signature sets to apply on or remove from the ASM policy.
- Apart from built-in signature sets that ship with the device, users can use user created
signature sets.
- When C(All Response Signatures), configures all signatures in the attack signature
pool that can review responses.
- When C(All Signatures), configures all attack signatures in the attack signature pool.
- When C(Apache Struts Signatures), configures signatures that target attacks against
the Apache Struts web servers. Only available in version 13.x and up.
- When C(Apache Tomcat Signatures), configures signatures that target attacks against
the Apache Tomcat web servers. Only available in version 13.x and up.
- When C(Cisco Signatures), configures signatures that target attacks against Cisco systems.
Only available in version 13.x and up.
- When C(Command Execution Signatures), configures signatures involving attacks perpetrated by executing commands.
- When C(Cross Site Scripting Signatures), configures signatures that target attacks caused
by cross-site scripting techniques.
- When C(Directory Indexing Signatures), configures signatures targeting attacks that browse directory listings.
- When C(Generic Detection Signatures), configures signatures targeting well-known
or common web and application attacks.
- When C(HTTP Response Splitting Signatures), configures signatures targeting attacks that
take advantage of responses for which input values have not been sanitized.
- When C(High Accuracy Detection Evasion Signatures), configures signatures with a high level of accuracy
that produce few false positives when identifying evasion attacks. Only available in version 13.x and up.
- When C(High Accuracy Signatures), configures signatures with a high level of accuracy
that produce few false positives when identifying evasion attacks.
- When C(IIS and Windows Signatures), configures signatures that target attacks against IIS
and Windows based systems. Only available in version 13.x and up.
- When C(Information Leakage Signatures), configures signatures targeting attacks that are looking for system data
or debugging information that shows where the system is vulnerable to attack.
- When C(Java Servlets/JSP Signatures), configures signatures that target attacks against Java Servlets
and Java Server Pages (JSP) based applications. Only available in version 13.x and up.
- When C(Low Accuracy Signatures), configures signatures that may result in more false positives
when identifying attacks.
- When C(Medium Accuracy Signatures), configures signatures with a medium level of accuracy
when identifying attacks.
- When C(OS Command Injection Signatures), configures signatures targeting attacks
that attempt to run system level commands through a vulnerable application.
- When C(OWA Signatures), configures signatures that target attacks against
the Microsoft Outlook Web Access (OWA) application.
- When C(Other Application Attacks Signatures), configures signatures targeting miscellaneous attacks,
including session fixation, local file access, injection attempts, header tampering
and so on, affecting many applications.
- When C(Path Traversal Signatures), configures signatures targeting attacks that attempt to access files
and directories that are stored outside the web root folder.
- When C(Predictable Resource Location Signatures), configures signatures targeting attacks that attempt
to uncover hidden website content and functionality by forceful browsing, or by directory and file enumeration.
- When C(Remote File Include Signatures), configures signatures targeting attacks that attempt to exploit
a remote file include vulnerability that could enable a remote attacker to execute arbitrary commands
on the server hosting the application.
- When C(SQL Injection Signatures), configures signatures targeting attacks that attempt to insert (inject)
a SQL query using the input data from a client to an application.
- When C(Server Side Code Injection Signatures), configures signatures targeting code injection attacks
on the server side.
- When C(WebSphere signatures), configures signatures targeting attacks on many computing platforms
that are integrated using WebSphere including general database, Microsoft Windows, IIS,
Microsoft SQL Server, Apache, Oracle, Unix/Linux, IBM DB2, PostgreSQL, and XML.
- When C(XPath Injection Signatures), configures signatures targeting attacks that attempt to gain access
to data structures or bypass permissions when a web site uses user-supplied information
to construct XPath queries for XML data.
required: True
policy_name:
description:
- Specifies the name of an existing ASM policy to add or remove signature sets.
required: True
alarm:
description:
- Specifies if the security policy logs the request data in the Statistics screen,
when a request matches a signature that is included in the signature set.
type: bool
block:
description:
- Effective when the security policy`s enforcement mode is Blocking.
- Determines how the system treats requests that match a signature included in the signature set.
- When C(yes) the system blocks all requests that match a signature,
and provides the client with a support ID number.
- When C(no) the system accepts those requests.
type: bool
learn:
description:
- Specifies if the security policy learns all requests that match a signature
that is included in the signature set.
type: bool
state:
description:
- When C(present), ensures that the resource exists.
- When C(absent), ensures the resource is removed.
default: present
choices:
- present
- absent
partition:
description:
- This parameter is only used when identifying ASM policy.
default: Common
notes:
- This module is primarily used as a component of configuring ASM policy in Ansible Galaxy ASM Policy Role.
extends_documentation_fragment: f5
author:
- Wojciech Wypior (@wojtek0806)
'''
EXAMPLES = r'''
- name: Add Signature Set to ASM Policy
bigip_asm_policy_signature_set:
name: IIS and Windows Signatures
policy_name: FooPolicy
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
- name: Remove Signature Set to ASM Policy
bigip_asm_policy_signature_set:
name: IIS and Windows Signatures
policy_name: FooPolicy
state: absent
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
'''
RETURN = r'''
policy_name:
description: The name of the ASM policy
returned: changed
type: str
sample: FooPolicy
name:
description: The name of Signature Set added/removed on ASM policy
returned: changed
type: str
sample: Cisco Signatures
alarm:
description: Specifies whether the security policy logs the request data in the Statistics screen
returned: changed
type: bool
sample: yes
block:
description: Determines how the system treats requests that match a signature included in the signature set
returned: changed
type: bool
sample: no
learn:
description: Specifies if the policy learns all requests that match a signature that is included in the signature set
returned: changed
type: bool
sample: yes
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
from distutils.version import LooseVersion
try:
from library.module_utils.network.f5.bigip import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import cleanup_tokens
from library.module_utils.network.f5.common import fq_name
from library.module_utils.network.f5.common import transform_name
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.common import exit_json
from library.module_utils.network.f5.common import fail_json
from library.module_utils.network.f5.common import flatten_boolean
from library.module_utils.network.f5.icontrol import tmos_version
from library.module_utils.network.f5.icontrol import module_provisioned
except ImportError:
from ansible.module_utils.network.f5.bigip import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import cleanup_tokens
from ansible.module_utils.network.f5.common import fq_name
from ansible.module_utils.network.f5.common import transform_name
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.common import exit_json
from ansible.module_utils.network.f5.common import fail_json
from ansible.module_utils.network.f5.common import flatten_boolean
from ansible.module_utils.network.f5.icontrol import tmos_version
from ansible.module_utils.network.f5.icontrol import module_provisioned
class Parameters(AnsibleF5Parameters):
api_map = {
}
api_attributes = [
'alarm',
'block',
'learn',
]
returnables = [
'policy_name',
'name',
'alarm',
'block',
'learn',
]
updatables = [
'alarm',
'block',
'learn',
]
class ApiParameters(Parameters):
pass
class ModuleParameters(Parameters):
@property
def alarm(self):
result = flatten_boolean(self._values['alarm'])
if result:
if result == 'yes':
return True
return False
@property
def block(self):
result = flatten_boolean(self._values['block'])
if result:
if result == 'yes':
return True
return False
@property
def learn(self):
result = flatten_boolean(self._values['learn'])
if result:
if result == 'yes':
return True
return False
def _signature_set_exists_on_device(self, name):
uri = "https://{0}:{1}/mgmt/tm/asm/signature-sets".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
query = "?$select=name"
resp = self.client.api.get(uri + query)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
if any(p['name'] == name for p in response['items']):
return True
return False
@property
def name(self):
if self._values['name'] is None:
return None
version = tmos_version(self.client)
if LooseVersion(version) < LooseVersion('13.0.0'):
name_list = [
'All Response Signatures',
'All Signatures',
'Command Execution Signatures',
'Cross Site Scripting Signatures',
'Directory Indexing Signatures',
'Generic Detection Signatures',
'HTTP Response Splitting Signatures',
'High Accuracy Signatures',
'Information Leakage Signatures',
'Low Accuracy Signatures',
'Medium Accuracy Signatures',
'OS Command Injection Signatures',
'OWA Signatures',
'Other Application Attacks Signatures',
'Path Traversal Signatures',
'Predictable Resource Location Signatures',
'Remote File Include Signatures',
'SQL Injection Signatures',
'Server Side Code Injection Signatures',
'WebSphere signatures',
'XPath Injection Signatures'
]
else:
name_list = [
'All Response Signatures',
'All Signatures',
'Apache Struts Signatures',
'Apache Tomcat Signatures',
'Cisco Signatures',
'Command Execution Signatures',
'Cross Site Scripting Signatures',
'Directory Indexing Signatures',
'Generic Detection Signatures',
'HTTP Response Splitting Signatures',
'High Accuracy Detection Evasion Signatures',
'High Accuracy Signatures',
'IIS and Windows Signatures',
'Information Leakage Signatures',
'Java Servlets/JSP Signatures',
'Low Accuracy Signatures',
'Medium Accuracy Signatures',
'OS Command Injection Signatures',
'OWA Signatures',
'Other Application Attacks Signatures',
'Path Traversal Signatures',
'Predictable Resource Location Signatures',
'Remote File Include Signatures',
'SQL Injection Signatures',
'Server Side Code Injection Signatures',
'WebSphere signatures',
'XPath Injection Signatures'
]
if self._values['name'] in name_list:
return self._values['name']
if self._signature_set_exists_on_device(self._values['name']):
return self._values['name']
raise F5ModuleError(
"The specified signature {0} set does not exist.".format(
self._values['name']
)
)
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class UsableChanges(Changes):
pass
class ReportableChanges(Changes):
@property
def alarm(self):
return flatten_boolean(self._values['alarm'])
@property
def learn(self):
return flatten_boolean(self._values['learn'])
@property
def block(self):
return flatten_boolean(self._values['block'])
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = kwargs.get('client', None)
self.want = ModuleParameters(params=self.module.params, client=self.client)
self.have = ApiParameters()
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.client.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def exec_module(self):
if not module_provisioned(self.client, 'asm'):
raise F5ModuleError(
"ASM must be provisioned to use this module."
)
changed = False
result = dict()
state = self.want.state
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def absent(self):
if self.exists():
return self.remove()
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the resource.")
return True
def create(self):
self._set_changed_options()
if self.module.check_mode:
return True
self.create_on_device()
return True
def exists(self):
policy_id = self._get_policy_id()
set_link = self._get_signature_set_link()
uri = 'https://{0}:{1}/mgmt/tm/asm/policies/{2}/signature-sets/'.format(
self.client.provider['server'],
self.client.provider['server_port'],
policy_id,
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'items' in response and response['items'] != []:
for st in response['items']:
if st['signatureSetReference'] == set_link:
self.want.ss_id = st['id']
return True
return False
def _get_signature_set_link(self):
result = None
signature_set = self.want.name
uri = "https://{0}:{1}/mgmt/tm/asm/signature-sets".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
query = "?$select=name"
resp = self.client.api.get(uri + query)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
if 'items' in response and response['items'] != []:
for item in response['items']:
if item['name'] == signature_set:
result = dict(link=item['selfLink'])
return result
def _get_policy_id(self):
policy_id = None
uri = "https://{0}:{1}/mgmt/tm/asm/policies/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
query = "?$filter=contains(name,'{0}')+and+contains(partition,'{1}')&$select=name,id".format(
self.want.policy_name, self.want.partition
)
resp = self.client.api.get(uri + query)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'items' in response and response['items'] != []:
policy_id = response['items'][0]['id']
if not policy_id:
raise F5ModuleError(
"The policy with the name {0} does not exist".format(self.want.policy_name)
)
return policy_id
def create_on_device(self):
policy_id = self._get_policy_id()
params = self.changes.api_params()
params['signatureSetReference'] = self._get_signature_set_link()
uri = "https://{0}:{1}/mgmt/tm/asm/policies/{2}/signature-sets/".format(
self.client.provider['server'],
self.client.provider['server_port'],
policy_id
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 409]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return True
def update_on_device(self):
policy_id = self._get_policy_id()
params = self.changes.api_params()
uri = "https://{0}:{1}/mgmt/tm/asm/policies/{2}/signature-sets/{3}".format(
self.client.provider['server'],
self.client.provider['server_port'],
policy_id,
self.want.ss_id
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def remove_from_device(self):
policy_id = self._get_policy_id()
uri = 'https://{0}:{1}/mgmt/tm/asm/policies/{2}/signature-sets/{3}'.format(
self.client.provider['server'],
self.client.provider['server_port'],
policy_id,
self.want.ss_id
)
response = self.client.api.delete(uri)
if response.status in [200, 201]:
return True
raise F5ModuleError(response.content)
def read_current_from_device(self):
policy_id = self._get_policy_id()
uri = "https://{0}:{1}/mgmt/tm/asm/policies/{2}/signature-sets/{3}".format(
self.client.provider['server'],
self.client.provider['server_port'],
policy_id,
self.want.ss_id
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return ApiParameters(params=response)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
policy_name=dict(
required=True
),
name=dict(
required=True
),
alarm=dict(
type='bool'
),
block=dict(
type='bool'
),
learn=dict(
type='bool'
),
state=dict(
default='present',
choices=['present', 'absent']
),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
)
client = F5RestClient(**module.params)
try:
mm = ModuleManager(module=module, client=client)
results = mm.exec_module()
cleanup_tokens(client)
exit_json(module, results, client)
except F5ModuleError as ex:
cleanup_tokens(client)
fail_json(module, ex, client)
if __name__ == '__main__':
main()
|
ThinkOpen-Solutions/odoo
|
refs/heads/stable
|
addons/website_crm_partner_assign/models/res_partner.py
|
314
|
# -*- coding: utf-8 -*-
from openerp.osv import osv, fields
class res_partner_grade(osv.osv):
_inherit = 'res.partner.grade'
_columns = {
'website_published': fields.boolean('Published On Website', copy=False),
}
|
msabramo/ansible
|
refs/heads/devel
|
lib/ansible/modules/web_infrastructure/ansible_tower/tower_group.py
|
33
|
#!/usr/bin/python
#coding: utf-8 -*-
# (c) 2017, Wayne Witzel III <wayne@riotousliving.com>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: tower_group
author: "Wayne Witzel III (@wwitzel3)"
version_added: "2.3"
short_description: create, update, or destroy Ansible Tower group.
description:
- Create, update, or destroy Ansible Tower groups. See
U(https://www.ansible.com/tower) for an overview.
options:
name:
description:
- The name to use for the group.
required: True
description:
description:
- The description to use for the group.
required: False
default: null
inventory:
description:
- Inventory the group should be made a member of.
required: True
variables:
description:
- Variables to use for the group, use '@' for a file.
required: False
default: null
credential:
description:
- Credential to use for the group.
required: False
default: null
source:
description:
- The source to use for this group.
required: False
default: null,
choices: ["manual", "file", "ec2", "rax", "vmware", "gce", "azure", "azure_rm", "openstack", "satellite6" , "cloudforms", "custom"]
source_regions:
description:
- Regions for cloud provider.
required: False
default: null
source_vars:
description:
- Override variables from source with variables from this field.
required: False
default: null
instance_filters:
description:
- Comma-separated list of filter expressions for matching hosts.
required: False
default: null
group_by:
description:
- Limit groups automatically created from inventory source.
required: False
default: null
source_script:
description:
- Inventory script to be used when group type is "custom".
required: False
default: null
overwrite:
description:
- Delete child roups and hosts not found in source.
required: False
default: False
overwrite_vars:
description:
- Override vars in child groups and hosts with those from external source.
required: False
default: null
update_on_launch:
description:
- Refresh inventory data from its source each time a job is run.
required: False
default: False
state:
description:
- Desired state of the resource.
required: False
default: "present"
choices: ["present", "absent"]
tower_host:
description:
- URL to your Tower instance.
required: False
default: null
tower_username:
description:
- Username for your Tower instance.
required: False
default: null
tower_password:
description:
- Password for your Tower instance.
required: False
default: null
tower_verify_ssl:
description:
- Dis/allow insecure connections to Tower. If C(no), SSL certificates will not be validated.
This should only be used on personally controlled sites using self-signed certificates.
required: False
default: True
tower_config_file:
description:
- Path to the Tower config file. See notes.
required: False
default: null
requirements:
- "python >= 2.6"
- "ansible-tower-cli >= 3.0.2"
notes:
- If no I(config_file) is provided we will attempt to use the tower-cli library
defaults to find your Tower host information.
- I(config_file) should contain Tower configuration in the following format
host=hostname
username=username
password=password
'''
EXAMPLES = '''
- name: Add tower group
tower_group:
name: localhost
description: "Local Host Group"
inventory: "Local Inventory"
state: present
tower_config_file: "~/tower_cli.cfg"
'''
try:
import os
import tower_cli
import tower_cli.utils.exceptions as exc
from tower_cli.conf import settings
from ansible.module_utils.ansible_tower import tower_auth_config, tower_check_mode
HAS_TOWER_CLI = True
except ImportError:
HAS_TOWER_CLI = False
def main():
module = AnsibleModule(
argument_spec = dict(
name = dict(required=True),
description = dict(),
inventory = dict(required=True),
variables = dict(),
credential = dict(),
source = dict(choices=["manual", "file", "ec2", "rax", "vmware",
"gce", "azure", "azure_rm", "openstack",
"satellite6" , "cloudforms", "custom"], default="manual"),
source_regions = dict(),
source_vars = dict(),
instance_filters = dict(),
group_by = dict(),
source_script = dict(),
overwrite = dict(type='bool', default=False),
overwrite_vars = dict(),
update_on_launch = dict(type='bool' , default=False),
tower_host = dict(),
tower_username = dict(),
tower_password = dict(no_log=True),
tower_verify_ssl = dict(type='bool', default=True),
tower_config_file = dict(type='path'),
state = dict(choices=['present', 'absent'], default='present'),
),
supports_check_mode=True
)
if not HAS_TOWER_CLI:
module.fail_json(msg='ansible-tower-cli required for this module')
name = module.params.get('name')
inventory = module.params.get('inventory')
credential = module.params.get('credential')
state = module.params.get('state')
variables = module.params.get('variables')
if variables:
if variables.startswith('@'):
filename = os.path.expanduser(variables[1:])
variables = module.contents_from_file(filename)
json_output = {'group': name, 'state': state}
tower_auth = tower_auth_config(module)
with settings.runtime_values(**tower_auth):
tower_check_mode(module)
group = tower_cli.get_resource('group')
try:
params = module.params.copy()
params['create_on_missing'] = True
params['variables'] = variables
inv_res = tower_cli.get_resource('inventory')
inv = inv_res.get(name=inventory)
params['inventory'] = inv['id']
if credential:
cred_res = tower_cli.get_resource('credential')
cred = cred_res.get(name=credential)
params['credential'] = cred['id']
if state == 'present':
result = group.modify(**params)
json_output['id'] = result['id']
elif state == 'absent':
result = group.delete(**params)
except (exc.NotFound) as excinfo:
module.fail_json(msg='Failed to update the group, inventory not found: {0}'.format(excinfo), changed=False)
except (exc.ConnectionError, exc.BadRequest, exc.NotFound) as excinfo:
module.fail_json(msg='Failed to update the group: {0}'.format(excinfo), changed=False)
json_output['changed'] = result['changed']
module.exit_json(**json_output)
from ansible.module_utils.basic import AnsibleModule
if __name__ == '__main__':
main()
|
maru-sama/pyblosxom
|
refs/heads/master
|
Pyblosxom/plugins/rst_parser.py
|
3
|
#######################################################################
# This file is part of Pyblosxom.
#
# Copyright (c) 2003, 2004, 2005 Sean Bowman
# Copyright (c) 2011 Will Kahn-Greene
#
# Pyblosxom is distributed under the MIT license. See the file
# LICENSE for distribution details.
#######################################################################
"""
Summary
=======
A reStructuredText entry formatter for pyblosxom. reStructuredText is
part of the docutils project (http://docutils.sourceforge.net/). To
use, you need a *recent* version of docutils. A development snapshot
(http://docutils.sourceforge.net/#development-snapshots) will work
fine.
Install
=======
This plugin comes with Pyblosxom. To install, do the following:
1. Add ``Pyblosxom.plugins.rst_parser`` to the ``load_plugins`` list
in your ``config.py`` file.
2. Install docutils. Instructions are at
http://docutils.sourceforge.net/
Usage
=====
Blog entries with a ``.rst`` extension will be parsed as
restructuredText.
You can also configure this as your default preformatter for ``.txt``
files by configuring it in your config file as follows::
py['parser'] = 'reST'
Additionally, you can do this on an entry-by-entry basis by adding a
``#parser reST`` line in the metadata section. For example::
My Little Blog Entry
#parser reST
My main story...
Configuration
=============
There's two optional configuration parameter you can for additional
control over the rendered HTML::
# To set the starting level for the rendered heading elements.
# 1 is the default.
py['reST_initial_header_level'] = 1
# Enable or disable the promotion of a lone top-level section title to
# document title (and subsequent section title to document subtitle
# promotion); enabled by default.
py['reST_transform_doctitle'] = 1
.. Note::
If you're not seeing headings that you think should be there, try
changing the ``reST_initial_header_level`` property to 0.
"""
__author__ = "Sean Bowman"
__email__ = "sean dot bowman at acm dot org"
__version__ = "2011-10-23"
__url__ = "http://pyblosxom.github.com/"
__description__ = "restructured text support for blog entries"
__category__ = "text"
__license__ = "MIT"
__registrytags__ = "1.5, core"
from docutils.core import publish_parts
from Pyblosxom import tools
from Pyblosxom.memcache import memcache_decorator
PREFORMATTER_ID = 'reST'
FILE_EXT = 'rst'
def verify_installation(args):
# no configuration needed
return 1
def cb_entryparser(args):
args[FILE_EXT] = readfile
return args
def cb_preformat(args):
if args.get("parser", None) == PREFORMATTER_ID:
return parse(''.join(args['story']), args['request'])
@memcache_decorator('rst_parser')
def _parse(initial_header_level, transform_doctitle, story):
parts = publish_parts(
story,
writer_name='html',
settings_overrides={
'initial_header_level': initial_header_level,
'doctitle_xform': transform_doctitle,
'syntax_highlight': 'short'
})
return parts['body']
def parse(story, request):
config = request.getConfiguration()
initial_header_level = config.get('reST_initial_header_level', 1)
transform_doctitle = config.get('reST_transform_doctitle', 1)
return _parse(initial_header_level, transform_doctitle, story)
def readfile(filename, request):
entry_data = {}
lines = open(filename).readlines()
if len(lines) == 0:
return {"title": "", "body": ""}
title = lines.pop(0).strip()
# absorb meta data
while lines and lines[0].startswith("#"):
meta = lines.pop(0)
# remove the hash
meta = meta[1:].strip()
meta = meta.split(" ", 1)
# if there's no value, we append a 1
if len(meta) == 1:
meta.append("1")
entry_data[meta[0].strip()] = meta[1].strip()
body = parse(''.join(lines), request)
entry_data["title"] = title
entry_data["body"] = body
# Call the postformat callbacks
tools.run_callback('postformat', {'request': request,
'entry_data': entry_data})
return entry_data
|
quxiaolong1504/django
|
refs/heads/master
|
tests/context_processors/urls.py
|
452
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^request_attrs/$', views.request_processor),
url(r'^debug/$', views.debug_processor),
]
|
tempbottle/h-store
|
refs/heads/master
|
tests/scripts/xml2/exceptions.py
|
9
|
class XMLException(Exception):
"""General XML Exception.
"""
pass
class InvalidXML(XMLException):
"""Invalid XML Exception.
This is raised when the XML is mal-formed.
"""
pass
class InvalidObject(XMLException):
"""Invalid Python object exception.
This is raised when the Python object passed to the XML generator is invalid.
"""
pass
|
derDavidT/sympy
|
refs/heads/master
|
sympy/liealgebras/type_b.py
|
76
|
from __future__ import print_function, division
from .cartan_type import Standard_Cartan
from sympy.core.compatibility import range
from sympy.matrices import eye
class TypeB(Standard_Cartan):
def __new__(cls, n):
if n < 2:
raise ValueError("n can not be less than 2")
return Standard_Cartan.__new__(cls, "B", n)
def dimension(self):
"""Dimension of the vector space V underlying the Lie algebra
Examples
========
>>> from sympy.liealgebras.cartan_type import CartanType
>>> c = CartanType("B3")
>>> c.dimension()
3
"""
return self.n
def basic_root(self, i, j):
"""
This is a method just to generate roots
with a 1 iin the ith position and a -1
in the jth postion.
"""
root = [0]*self.n
root[i] = 1
root[j] = -1
return root
def simple_root(self, i):
"""
Every lie algebra has a unique root system.
Given a root system Q, there is a subset of the
roots such that an element of Q is called a
simple root if it cannot be written as the sum
of two elements in Q. If we let D denote the
set of simple roots, then it is clear that every
element of Q can be written as a linear combination
of elements of D with all coefficients non-negative.
In B_n the first n-1 simple roots are the same as the
roots in A_(n-1) (a 1 in the ith position, a -1 in
the (i+1)th position, and zeroes elsewhere). The n-th
simple root is the root with a 1 in the nth position
and zeroes elsewhere.
This method returns the ith simple root for the B series.
Examples
========
>>> from sympy.liealgebras.cartan_type import CartanType
>>> c = CartanType("B3")
>>> c.simple_root(2)
[0, 1, -1]
"""
n = self.n
if i < n:
return self.basic_root(i-1, i)
else:
root = [0]*self.n
root[n-1] = 1
return root
def positive_roots(self):
"""
This method generates all the positive roots of
A_n. This is half of all of the roots of B_n;
by multiplying all the positive roots by -1 we
get the negative roots.
Examples
========
>>> from sympy.liealgebras.cartan_type import CartanType
>>> c = CartanType("A3")
>>> c.positive_roots()
{1: [1, -1, 0, 0], 2: [1, 0, -1, 0], 3: [1, 0, 0, -1], 4: [0, 1, -1, 0],
5: [0, 1, 0, -1], 6: [0, 0, 1, -1]}
"""
n = self.n
posroots = {}
k = 0
for i in range(0, n-1):
for j in range(i+1, n):
k += 1
posroots[k] = self.basic_root(i, j)
k += 1
root = self.basic_root(i, j)
root[j] = 1
posroots[k] = root
for i in range(0, n):
k += 1
root = [0]*n
root[i] = 1
posroots[k] = root
return posroots
def roots(self):
"""
Returns the total number of roots for B_n"
"""
n = self.n
return 2*(n**2)
def cartan_matrix(self):
"""
Returns the Cartan matrix for B_n.
The Cartan matrix matrix for a Lie algebra is
generated by assigning an ordering to the simple
roots, (alpha[1], ...., alpha[l]). Then the ijth
entry of the Cartan matrix is (<alpha[i],alpha[j]>).
Examples
========
>>> from sympy.liealgebras.cartan_type import CartanType
>>> c = CartanType('B4')
>>> c.cartan_matrix()
Matrix([
[ 2, -1, 0, 0],
[-1, 2, -1, 0],
[ 0, -1, 2, -2],
[ 0, 0, -1, 2]])
"""
n = self.n
m = 2* eye(n)
i = 1
while i < n-1:
m[i, i+1] = -1
m[i, i-1] = -1
i += 1
m[0, 1] = -1
m[n-2, n-1] = -2
m[n-1, n-2] = -1
return m
def basis(self):
"""
Returns the number of independent generators of B_n
"""
n = self.n
return (n**2 - n)/2
def lie_algebra(self):
"""
Returns the Lie algebra associated with B_n
"""
n = self.n
return "so(" + str(2*n) + ")"
def dynkin_diagram(self):
n = self.n
diag = "---".join("0" for i in range(1, n)) + "=>=0\n"
diag += " ".join(str(i) for i in range(1, n+1))
return diag
|
m1z0r3/gshark
|
refs/heads/master
|
api/statistic.py
|
1
|
#!/usr/bin/env python3
import os
import ipaddress
import geoip2.database
from collections import Counter
from . import tshark
from .blacklist import BlackList
def get_result(input_files, filter):
database = os.path.join('tools', 'geoip2', 'GeoLite2-Country.mmdb')
reader = geoip2.database.Reader(database)
blacklist_ip = os.path.join('tools', 'blacklist', 'ip')
blacklist_domain = os.path.join('tools', 'blacklist', 'domain')
blacklist = BlackList(blacklist_ip, blacklist_domain)
ips, protocols, http_hosts = [], [], []
for file in input_files:
fields = ['ip.src', 'ip.dst', '_ws.col.Protocol', 'http.host']
cmd_result = tshark.fields(file, filter, fields, two_pass=True)
for line in cmd_result:
elements = line.split('\t')
if elements[0] and is_ipaddress(elements[0]):
ips.append(elements[0])
if elements[1] and is_ipaddress(elements[1]):
ips.append(elements[1])
if elements[2]:
protocols.append(elements[2])
if len(elements) >= 4 and elements[3]:
http_hosts.append((elements[1], elements[3]))
# Update ip blacklist by host
for ip, host in set(http_hosts):
if host and not is_ipaddress(host.split(':')[0]): # Skip if host is in 'ip:port' format
if blacklist.is_malicious_domain(host):
blacklist.ips.append(ip)
# Calcuate statistics of protocols
label_p, data_p = get_label_and_data(Counter(protocols))
ratio_p = [data * 100 / sum(data_p) for data in data_p]
result_p = {'label': label_p, 'data': data_p, 'ratio': ratio_p}
# Calcuate statistics of all ips
label_ai, data_ai = get_label_and_data(Counter(ips))
ratio_ai = [data * 100 / sum(data_ai) for data in data_ai]
result_ai = {'label': label_ai, 'data': data_ai, 'ratio': ratio_ai}
# Check whether ips are included in blacklist
black_ips = {}
for ip, data in zip(label_ai, data_ai):
if blacklist.is_malicious_ip(ip):
black_ips[ip] = data
# Calcuate statistics of blacklist ips
label_bi, data_bi = get_label_and_data(black_ips)
ratio_bi = [data * 100 / sum(data_bi) for data in data_bi]
result_bi = {'label': label_bi, 'data': data_bi, 'ratio': ratio_bi}
result_i = {'all': result_ai, 'black': result_bi}
# Get country name by GeoIP
countries, black_countries = {}, {}
for ip, count in zip(label_ai, data_ai):
try:
country_name = reader.country(ip).country.name or 'Unknown'
except: # NOQA
ipv4 = ipaddress.ip_address(ip)
country_name = 'Private' if ipv4.is_private else 'Unknown'
if country_name in countries:
countries[country_name] += int(count)
else:
countries[country_name] = int(count)
if blacklist.is_malicious_ip(ip):
if country_name in black_countries:
black_countries[country_name] += int(count)
else:
black_countries[country_name] = int(count)
# Calcuate statistics of all countries
label_ac, data_ac = get_label_and_data(countries)
ratio_ac = [data * 100 / sum(data_ac) for data in data_ac]
result_ac = {'label': label_ac, 'data': data_ac, 'ratio': ratio_ac}
# Calcuate statistics of blacklist countries
label_bc, data_bc = get_label_and_data(black_countries)
ratio_bc = [data * 100 / sum(data_bc) for data in data_bc]
result_bc = {'label': label_bc, 'data': data_bc, 'ratio': ratio_bc}
result_c = {'all': result_ac, 'black': result_bc}
return {'protocol': result_p, 'ip': result_i, 'country': result_c}
def get_label_and_data(counter):
label, data = [], []
for key, value in sorted(counter.items(), key=lambda x: -x[1]):
label.append(key)
data.append(value)
return label, data
def is_ipaddress(ip):
try:
ipaddress.ip_address(ip)
except ValueError:
return False
return True
|
brainwane/zulip
|
refs/heads/master
|
zerver/migrations/0106_subscription_push_notifications.py
|
7
|
# Generated by Django 1.11.4 on 2017-09-08 17:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('zerver', '0105_userprofile_enable_stream_push_notifications'),
]
operations = [
migrations.AddField(
model_name='subscription',
name='push_notifications',
field=models.BooleanField(default=False),
),
]
|
LRGH/amoco
|
refs/heads/release
|
amoco/arch/sparc/spec_v8.py
|
1
|
# -*- coding: utf-8 -*-
# This code is part of Amoco
# Copyright (C) 2012-2013 Axel Tillequin (bdcht3@gmail.com)
# published under GPLv2 license
# spec_xxx files are providers for instruction objects.
# These objects are wrapped and created by disasm.py.
# ref: The SPARC Architecture Manual Version 8, Revision SAV080SI9308.
# 72 basic instructions, all encoded in 32 bits.
from amoco.arch.sparc import env
from amoco.arch.core import *
# -------------------------------------------------------
# instruction sparcs decoders
# -------------------------------------------------------
ISPECS = []
# format 3
# ---------
# ld instructions:
@ispec(
"32[ 11 rd(5) 0 a 1001 =op3(6) rs1(5) i asi(8) rs2(5) =simm13(13) ]",
mnemonic="ldsb",
)
@ispec(
"32[ 11 rd(5) 0 a 1010 =op3(6) rs1(5) i asi(8) rs2(5) =simm13(13) ]",
mnemonic="ldsh",
)
@ispec(
"32[ 11 rd(5) 0 a 0001 =op3(6) rs1(5) i asi(8) rs2(5) =simm13(13) ]",
mnemonic="ldub",
)
@ispec(
"32[ 11 rd(5) 0 a 0010 =op3(6) rs1(5) i asi(8) rs2(5) =simm13(13) ]",
mnemonic="lduh",
)
@ispec(
"32[ 11 rd(5) 0 a 0000 =op3(6) rs1(5) i asi(8) rs2(5) =simm13(13) ]", mnemonic="ld"
)
@ispec(
"32[ 11 rd(5) 0 a 0011 =op3(6) rs1(5) i asi(8) rs2(5) =simm13(13) ]", mnemonic="ldd"
)
@ispec(
"32[ 11 rd(5) 0 a 1101 =op3(6) rs1(5) i asi(8) rs2(5) =simm13(13) ]",
mnemonic="ldstub",
)
@ispec(
"32[ 11 rd(5) 0 a 1111 =op3(6) rs1(5) i asi(8) rs2(5) =simm13(13) ]",
mnemonic="swap",
)
def sparc_ld_(obj, rd, a, op3, rs1, i, asi, rs2, simm13):
adr = env.r[rs1]
if i == 0:
adr += env.r[rs2]
if a == 1:
obj.mnemonic += "a"
src = env.ptr(adr, seg=asi)
else:
adr += env.cst(simm13, 13).signextend(32)
if a == 1:
raise InstructionError(obj)
src = env.ptr(adr)
dst = env.r[rd]
if op3 & 0xF == 0b0011 and rd % 1 == 1:
raise InstructionError(obj)
obj.operands = [src, dst]
obj.type = type_data_processing
# ld fsr/csr instructions:
@ispec(
"32[ 11 rd(5) 1 a 0000 =op3(6) rs1(5) i unused(8) rs2(5) =simm13(13) ]",
mnemonic="ld",
)
@ispec(
"32[ 11 rd(5) 1 a 0011 =op3(6) rs1(5) i unused(8) rs2(5) =simm13(13) ]",
mnemonic="ldd",
)
@ispec(
"32[ 11 rd(5) 1 a 0001 =op3(6) rs1(5) i unused(8) rs2(5) =simm13(13) ]",
mnemonic="ld",
)
def sparc_ldf_ldc(obj, rd, a, op3, rs1, i, unused, rs2, simm13):
adr = env.r[rs1]
if i == 0:
adr += env.r[rs2]
else:
adr += env.cst(simm13, 13).signextend(32)
src = env.ptr(adr)
dst = env.f[rd] if a == 0 else env.c[rd]
if op3 & 0xF == 0b0001:
dst = env.fsr if a == 0 else env.csr
obj.operands = [src, dst]
obj.type = type_data_processing
# st instructions:
@ispec(
"32[ 11 rd(5) 0 a 0101 =op3(6) rs1(5) i asi(8) rs2(5) =simm13(13) ]", mnemonic="stb"
)
@ispec(
"32[ 11 rd(5) 0 a 0110 =op3(6) rs1(5) i asi(8) rs2(5) =simm13(13) ]", mnemonic="sth"
)
@ispec(
"32[ 11 rd(5) 0 a 0100 =op3(6) rs1(5) i asi(8) rs2(5) =simm13(13) ]", mnemonic="st"
)
@ispec(
"32[ 11 rd(5) 0 a 0111 =op3(6) rs1(5) i asi(8) rs2(5) =simm13(13) ]", mnemonic="std"
)
def sparc_st_(obj, rd, a, op3, rs1, i, asi, rs2, simm13):
adr = env.r[rs1]
if i == 0:
adr += env.r[rs2]
if a == 1:
obj.mnemonic += "a"
dst = env.ptr(adr, asi)
else:
adr += env.cst(simm13, 13).signextend(32)
dst = env.ptr(adr)
if a == 1:
raise InstructionError(obj)
src = env.r[rd]
if obj.mnemonic == "std" and rd % 1 == 1:
raise InstructionError(obj)
obj.operands = [src, dst]
obj.type = type_data_processing
# st f/c instructions:
@ispec(
"32[ 11 rd(5) 1 a 0100 =op3(6) rs1(5) i unused(8) rs2(5) =simm13(13) ]",
mnemonic="st",
)
@ispec(
"32[ 11 rd(5) 1 a 0111 =op3(6) rs1(5) i unused(8) rs2(5) =simm13(13) ]",
mnemonic="std",
)
@ispec(
"32[ 11 rd(5) 1 a 0101 =op3(6) rs1(5) i unused(8) rs2(5) =simm13(13) ]",
mnemonic="st",
)
@ispec(
"32[ 11 rd(5) 1 a 0110 =op3(6) rs1(5) i unused(8) rs2(5) =simm13(13) ]",
mnemonic="std",
)
def sparc_stf_stc(obj, rd, a, op3, rs1, i, unused, rs2, simm13):
adr = env.r[rs1]
if i == 0:
adr += env.r[rs2]
else:
adr += env.cst(simm13, 13).signextend(32)
dst = env.ptr(adr)
src = env.f[rd] if a == 0 else env.c[rd]
if op3 & 0xF == 0b0101:
src = env.fsr if a == 0 else env.csr
elif op3 & 0xF == 0b0110:
src = env.fq if a == 0 else env.cq
obj.operands = [src, dst]
obj.type = type_data_processing
@ispec("32[ 10 rd(5) 0 a 0001 rs1(5) i -------- rs2(5) =simm13(13) ]", mnemonic="and")
@ispec("32[ 10 rd(5) 0 a 0101 rs1(5) i -------- rs2(5) =simm13(13) ]", mnemonic="andn")
@ispec("32[ 10 rd(5) 0 a 0010 rs1(5) i -------- rs2(5) =simm13(13) ]", mnemonic="or")
@ispec("32[ 10 rd(5) 0 a 0110 rs1(5) i -------- rs2(5) =simm13(13) ]", mnemonic="orn")
@ispec("32[ 10 rd(5) 0 a 0011 rs1(5) i -------- rs2(5) =simm13(13) ]", mnemonic="xor")
@ispec("32[ 10 rd(5) 0 a 0111 rs1(5) i -------- rs2(5) =simm13(13) ]", mnemonic="xnor")
def sparc_logic_(obj, rd, a, rs1, i, rs2, simm13):
obj.misc["icc"] = a == 1
src1 = env.r[rs1]
src2 = env.r[rs2] if i == 0 else env.cst(simm13, 13).signextend(32)
dst = env.r[rd]
obj.operands = [src1, src2, dst]
obj.type = type_data_processing
@ispec("32[ 10 rd(5) 0 a 0000 rs1(5) i -------- rs2(5) =simm13(13) ]", mnemonic="add")
@ispec("32[ 10 rd(5) 0 a 1000 rs1(5) i -------- rs2(5) =simm13(13) ]", mnemonic="addx")
@ispec("32[ 10 rd(5) 0 a 0100 rs1(5) i -------- rs2(5) =simm13(13) ]", mnemonic="sub")
@ispec("32[ 10 rd(5) 0 a 1100 rs1(5) i -------- rs2(5) =simm13(13) ]", mnemonic="subx")
@ispec("32[ 10 rd(5) 0 a 1010 rs1(5) i -------- rs2(5) =simm13(13) ]", mnemonic="umul")
@ispec("32[ 10 rd(5) 0 a 1011 rs1(5) i -------- rs2(5) =simm13(13) ]", mnemonic="smul")
@ispec("32[ 10 rd(5) 0 a 1110 rs1(5) i -------- rs2(5) =simm13(13) ]", mnemonic="udiv")
@ispec("32[ 10 rd(5) 0 a 1111 rs1(5) i -------- rs2(5) =simm13(13) ]", mnemonic="sdiv")
def sparc_arith_(obj, rd, a, rs1, i, rs2, simm13):
obj.misc["icc"] = a == 1
src1 = env.r[rs1]
src2 = env.r[rs2] if i == 0 else env.cst(simm13, 13).signextend(32)
dst = env.r[rd]
obj.operands = [src1, src2, dst]
obj.type = type_data_processing
@ispec("32[ 10 rd(5) 100101 rs1(5) i -------- rs2(5) ]", mnemonic="sll")
@ispec("32[ 10 rd(5) 100110 rs1(5) i -------- rs2(5) ]", mnemonic="srl")
@ispec("32[ 10 rd(5) 100111 rs1(5) i -------- rs2(5) ]", mnemonic="sra")
def sparc_shift_(obj, rd, rs1, i, rs2):
src1 = env.r[rs1]
src2 = env.r[rs2] if i == 0 else env.cst(rs2, 5)
dst = env.r[rd]
obj.operands = [src1, src2, dst]
obj.type = type_data_processing
@ispec("32[ 10 rd(5) 100000 rs1(5) i -------- rs2(5) =simm13(13) ]", mnemonic="taddcc")
@ispec(
"32[ 10 rd(5) 100010 rs1(5) i -------- rs2(5) =simm13(13) ]", mnemonic="taddcctv"
)
@ispec("32[ 10 rd(5) 100001 rs1(5) i -------- rs2(5) =simm13(13) ]", mnemonic="tsubcc")
@ispec(
"32[ 10 rd(5) 100011 rs1(5) i -------- rs2(5) =simm13(13) ]", mnemonic="tsubcctv"
)
@ispec("32[ 10 rd(5) 100100 rs1(5) i -------- rs2(5) =simm13(13) ]", mnemonic="mulscc")
@ispec("32[ 10 rd(5) 111100 rs1(5) i -------- rs2(5) =simm13(13) ]", mnemonic="save")
@ispec("32[ 10 rd(5) 111101 rs1(5) i -------- rs2(5) =simm13(13) ]", mnemonic="restore")
def sparc_tagged_(obj, rd, rs1, i, rs2, simm13):
src1 = env.r[rs1]
src2 = env.r[rs2] if i == 0 else env.cst(simm13, 13).signextend(32)
dst = env.r[rd]
obj.operands = [src1, src2, dst]
obj.type = type_data_processing
@ispec("32[ 10 rd(5) 111000 rs1(5) i -------- rs2(5) =simm13(13) ]", mnemonic="jmpl")
def sparc_jmpl(obj, rd, rs1, i, rs2, simm13):
src1 = env.r[rs1]
src2 = env.r[rs2] if i == 0 else env.cst(simm13, 13).signextend(32)
adr = src1 + src2
dst = env.r[rd]
obj.operands = [adr, dst]
obj.misc["delayed"] = True
obj.type = type_control_flow
@ispec("32[ 10 ----- 111001 rs1(5) i -------- rs2(5) =simm13(13) ]", mnemonic="rett")
def sparc_rett(obj, rs1, i, rs2, simm13):
src1 = env.r[rs1]
src2 = env.r[rs2] if i == 0 else env.cst(simm13, 13).signextend(32)
adr = src1 + src2
obj.operands = [adr]
obj.type = type_control_flow
@ispec("32[ 10 - .cond(4) 111010 rs1(5) i -------- rs2(5) =imm7(7) ]", mnemonic="t")
def sparc_Ticc(obj, rs1, i, rs2, imm7):
src1 = env.r[rs1]
src2 = env.r[rs2] if i == 0 else env.cst(imm7, 7).signextend(32)
obj.src = (src1 + src2)[0:7]
obj.operands = [obj.src]
obj.type = type_control_flow
@ispec("32[ 10 rd(5) 101000 rs1(5) - ------------- ]", mnemonic="rd", _src=env.y)
@ispec("32[ 10 rd(5) 101001 rs1(5) - ------------- ]", mnemonic="rd", _src=env.psr)
@ispec("32[ 10 rd(5) 101010 rs1(5) - ------------- ]", mnemonic="rd", _src=env.wim)
@ispec("32[ 10 rd(5) 101011 rs1(5) - ------------- ]", mnemonic="rd", _src=env.tbr)
def sparc_rd_(obj, rd, rs1, _src):
if _src == env.y:
if rs1 == 15 and rd == 0:
obj.mnemonic = "stbar"
elif rd != 0:
_src = env.asr[rd]
dst = env.r[rd]
obj.operands = [_src, dst]
obj.type = type_other
@ispec(
"32[ 10 rd(5) 101000 rs1(5) i -------- rs2(5) =simm13(13) ]",
mnemonic="wr",
_dst=env.y,
)
@ispec(
"32[ 10 rd(5) 101001 rs1(5) i -------- rs2(5) =simm13(13) ]",
mnemonic="wr",
_dst=env.psr,
)
@ispec(
"32[ 10 rd(5) 101010 rs1(5) i -------- rs2(5) =simm13(13) ]",
mnemonic="wr",
_dst=env.wim,
)
@ispec(
"32[ 10 rd(5) 101011 rs1(5) i -------- rs2(5) =simm13(13) ]",
mnemonic="wr",
_dst=env.tbr,
)
def sparc_wr_(obj, rd, rs1, i, rs2, simm13, _dst):
if _dst == env.y:
if rs1 != 0:
_dst = env.asr[rs1]
src1 = env.r[rs1]
src2 = env.r[rs2] if i == 0 else env.cst(simm13, 13).signextend(32)
obj.operands = [src1, src2, _dst]
obj.type = type_other
@ispec("32[ 10 ----- 111011 rs1(5) i -------- rs2(5) =simm13(13) ]", mnemonic="flush")
def sparc_flush(obj, rs1, i, rs2, simm13):
src1 = env.r[rs1]
src2 = env.r[rs2] if i == 0 else env.cst(simm13, 13).signextend(32)
obj.operands = [(src1 + src2)]
obj.type = type_cpu_state
# format 2
# ---------
@ispec("32[ 00 rd(5) 100 imm22(22) ]", mnemonic="sethi")
def sparc_sethi(obj, rd, imm22):
if rd == 0 and imm22 == 0:
obj.mnemonic = "nop"
else:
src = env.cst(imm22, 22)
dst = env.r[rd]
obj.operands = [src, dst]
obj.type = type_data_processing
@ispec("32[ 00 a .cond(4) 010 disp22(22) ]", mnemonic="b")
@ispec("32[ 00 a .cond(4) 110 disp22(22) ]", mnemonic="fb")
@ispec("32[ 00 a .cond(4) 111 disp22(22) ]", mnemonic="cb")
def sparc_Bicc(obj, a, disp22):
obj.operands = [env.cst(disp22, 22).signextend(32)]
obj.misc["delayed"] = True
obj.misc["annul"] = a == 1
obj.type = type_control_flow
@ispec("32[ 00 ----- 000 const22(22) ]", mnemonic="unimp")
def sparc_unimp(obj, const22):
obj.operands = [env.cst(const22, 22)]
obj.type = type_undefined
# format 1
# ---------
@ispec("32[ 01 disp30(30) ]", mnemonic="call")
def sparc_call(obj, disp30):
obj.operands = [env.cst(disp30, 30).signextend(32)]
obj.misc["delayed"] = True
obj.type = type_control_flow
#####
@ispec("32[ 10 rd(5) 110100 rs1(5) 011000100 rs2(5) ]", mnemonic="fitos")
@ispec("32[ 10 rd(5) 110100 rs1(5) 011001000 rs2(5) ]", mnemonic="fitod")
@ispec("32[ 10 rd(5) 110100 rs1(5) 011001100 rs2(5) ]", mnemonic="fitoq")
@ispec("32[ 10 rd(5) 110100 rs1(5) 011010001 rs2(5) ]", mnemonic="fstoi")
@ispec("32[ 10 rd(5) 110100 rs1(5) 011010010 rs2(5) ]", mnemonic="fdtoi")
@ispec("32[ 10 rd(5) 110100 rs1(5) 011010011 rs2(5) ]", mnemonic="fqtoi")
@ispec("32[ 10 rd(5) 110100 rs1(5) 011001001 rs2(5) ]", mnemonic="fstod")
@ispec("32[ 10 rd(5) 110100 rs1(5) 011001101 rs2(5) ]", mnemonic="fstoq")
@ispec("32[ 10 rd(5) 110100 rs1(5) 011000110 rs2(5) ]", mnemonic="fdtos")
@ispec("32[ 10 rd(5) 110100 rs1(5) 011001110 rs2(5) ]", mnemonic="fdtoq")
@ispec("32[ 10 rd(5) 110100 rs1(5) 011000111 rs2(5) ]", mnemonic="fqtos")
@ispec("32[ 10 rd(5) 110100 rs1(5) 011001011 rs2(5) ]", mnemonic="fqtod")
@ispec("32[ 10 rd(5) 110100 rs1(5) 000000001 rs2(5) ]", mnemonic="fmovs")
@ispec("32[ 10 rd(5) 110100 rs1(5) 000000101 rs2(5) ]", mnemonic="fnegs")
@ispec("32[ 10 rd(5) 110100 rs1(5) 000001001 rs2(5) ]", mnemonic="fabss")
@ispec("32[ 10 rd(5) 110100 rs1(5) 000101001 rs2(5) ]", mnemonic="fsqrts")
@ispec("32[ 10 rd(5) 110100 rs1(5) 000101010 rs2(5) ]", mnemonic="fsqrtd")
@ispec("32[ 10 rd(5) 110100 rs1(5) 000101011 rs2(5) ]", mnemonic="fsqrtq")
def sparc_FPop1_group1(obj, rd, rs1, rs2):
src = env.f[rs2]
dst = env.f[rd]
obj.operands = [src, dst]
obj.type = type_other
@ispec("32[ 10 rd(5) 110100 rs1(5) 001000001 rs2(5) ]", mnemonic="fadds")
@ispec("32[ 10 rd(5) 110100 rs1(5) 001000010 rs2(5) ]", mnemonic="faddd")
@ispec("32[ 10 rd(5) 110100 rs1(5) 001000011 rs2(5) ]", mnemonic="faddq")
@ispec("32[ 10 rd(5) 110100 rs1(5) 001000101 rs2(5) ]", mnemonic="fsubs")
@ispec("32[ 10 rd(5) 110100 rs1(5) 001000110 rs2(5) ]", mnemonic="fsubd")
@ispec("32[ 10 rd(5) 110100 rs1(5) 001000111 rs2(5) ]", mnemonic="fsubq")
@ispec("32[ 10 rd(5) 110100 rs1(5) 001001001 rs2(5) ]", mnemonic="fmuls")
@ispec("32[ 10 rd(5) 110100 rs1(5) 001001010 rs2(5) ]", mnemonic="fmuld")
@ispec("32[ 10 rd(5) 110100 rs1(5) 001001011 rs2(5) ]", mnemonic="fmulq")
@ispec("32[ 10 rd(5) 110100 rs1(5) 001101001 rs2(5) ]", mnemonic="fsmuld")
@ispec("32[ 10 rd(5) 110100 rs1(5) 001101110 rs2(5) ]", mnemonic="fdmulq")
@ispec("32[ 10 rd(5) 110100 rs1(5) 001001101 rs2(5) ]", mnemonic="fdivs")
@ispec("32[ 10 rd(5) 110100 rs1(5) 001001110 rs2(5) ]", mnemonic="fdivd")
@ispec("32[ 10 rd(5) 110100 rs1(5) 001001111 rs2(5) ]", mnemonic="fdivq")
def sparc_FPop1_group2(obj, rd, rs1, rs2):
src1 = env.f[rs1]
src2 = env.f[rs2]
dst = env.f[rd]
obj.operands = [src1, src2, dst]
obj.type = type_other
@ispec("32[ 10 rd(5) 110101 rs1(5) 001010001 rs2(5) ]", mnemonic="fcmps")
@ispec("32[ 10 rd(5) 110101 rs1(5) 001010010 rs2(5) ]", mnemonic="fcmpd")
@ispec("32[ 10 rd(5) 110101 rs1(5) 001010011 rs2(5) ]", mnemonic="fcmpq")
@ispec("32[ 10 rd(5) 110101 rs1(5) 001010101 rs2(5) ]", mnemonic="fcmpes")
@ispec("32[ 10 rd(5) 110101 rs1(5) 001010110 rs2(5) ]", mnemonic="fcmped")
@ispec("32[ 10 rd(5) 110101 rs1(5) 001010111 rs2(5) ]", mnemonic="fcmpeq")
def sparc_FPop2_(obj, rd, rs1, rs2):
src1 = env.f[rs1]
src2 = env.f[rs2]
obj.operands = [src1, src2]
obj.type = type_other
@ispec("32[ 10 rd(5) 110110 rs1(5) opc(9) rs2(5) ]", mnemonic="cpop1")
@ispec("32[ 10 rd(5) 110111 rs1(5) opc(9) rs2(5) ]", mnemonic="cpop2")
def sparc_CPop(obj, rd, rs1, opc, rs2):
obj.operands = [opc, env.c[rs1], env.c[rs2], env.c[rd]]
obj.type = type_other
|
dndtools/dndtools
|
refs/heads/master
|
dndtools/dnd/items/__init__.py
|
1349
|
# -*- coding: utf-8 -*-
|
dhilipsiva/test
|
refs/heads/master
|
print_file.py
|
1
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# vim: fenc=utf-8
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
#
"""
File name: print_file.py
Author: dhilipsiva <dhilipsiva@gmail.com>
Date created: 2015-12-04
"""
def __getattribute__(name):
print("ATTR: ", name)
locals()[name]
FOO = "foo"
print(__file__)
|
exaroth/python_koans
|
refs/heads/master
|
python2/koans/about_methods.py
|
1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Partially based on AboutMethods in the Ruby Koans
#
from runner.koan import *
def my_global_function(a, b):
return a + b
class AboutMethods(Koan):
def test_calling_a_global_function(self):
self.assertEqual(5, my_global_function(2, 3))
# NOTE: Wrong number of arguments is not a SYNTAX error, but a
# runtime error.
def test_calling_functions_with_wrong_number_of_arguments(self):
try:
my_global_function()
except Exception as exception:
# NOTE: The .__name__ attribute will convert the class
# into a string value.
self.assertEqual("TypeError", exception.__class__.__name__)
self.assertMatch(
r'my_global_function\(\) takes exactly 2 arguments \(0 given\)',
exception[0])
try:
my_global_function(1, 2, 3)
except Exception as e:
# Note, watch out for parenthesis. They need slashes in front!
self.assertMatch(
r'my_global_function\(\) takes exactly 2 arguments \(3 given\)'
, e[0])
# ------------------------------------------------------------------
def pointless_method(self, a, b):
sum = a + b
def test_which_does_not_return_anything(self):
self.assertEqual(None, self.pointless_method(1, 2))
# Notice that methods accessed from class scope do not require
# you to pass the first "self" argument?
# ------------------------------------------------------------------
def method_with_defaults(self, a, b='default_value'):
return [a, b]
def test_calling_with_default_values(self):
self.assertEqual([1, "default_value"], self.method_with_defaults(1))
self.assertEqual([1,2], self.method_with_defaults(1, 2))
# ------------------------------------------------------------------
def method_with_var_args(self, *args):
return args
def test_calling_with_variable_arguments(self):
self.assertEqual(tuple(), self.method_with_var_args())
self.assertEqual(('one', ), self.method_with_var_args('one'))
self.assertEqual(("one", "two"), self.method_with_var_args('one', 'two'))
# ------------------------------------------------------------------
def function_with_the_same_name(self, a, b):
return a + b
def test_functions_without_self_arg_are_global_functions(self):
def function_with_the_same_name(a, b):
return a * b
self.assertEqual(12, function_with_the_same_name(3, 4))
def test_calling_methods_in_same_class_with_explicit_receiver(self):
def function_with_the_same_name(a, b):
return a * b
self.assertEqual(7, self.function_with_the_same_name(3, 4))
# ------------------------------------------------------------------
def another_method_with_the_same_name(self):
return 10
link_to_overlapped_method = another_method_with_the_same_name
def another_method_with_the_same_name(self):
return 42
def test_that_old_methods_are_hidden_by_redefinitions(self):
self.assertEqual(42, self.another_method_with_the_same_name())
def test_that_overlapped_method_is_still_there(self):
self.assertEqual(10, self.link_to_overlapped_method())
# ------------------------------------------------------------------
def empty_method(self):
pass
def test_methods_that_do_nothing_need_to_use_pass_as_a_filler(self):
self.assertEqual(None, self.empty_method())
def test_pass_does_nothing_at_all(self):
"You"
"shall"
"not"
pass
self.assertEqual(True, "Still got to this line" != None)
# ------------------------------------------------------------------
def one_line_method(self): return 'Madagascar'
def test_no_indentation_required_for_one_line_statement_bodies(self):
self.assertEqual("Madagascar", self.one_line_method())
# ------------------------------------------------------------------
def method_with_documentation(self):
"A string placed at the beginning of a function is used for documentation"
return "ok"
def test_the_documentation_can_be_viewed_with_the_doc_method(self):
self.assertMatch(
"A string placed at the beginning of a function is used for documentation"
, self.method_with_documentation.__doc__)
# ------------------------------------------------------------------
class Dog(object):
def name(self):
return "Fido"
def _tail(self):
# Prefixing a method with an underscore implies private scope
return "wagging"
def __password(self):
return 'password' # Genius!
def test_calling_methods_in_other_objects(self):
rover = self.Dog()
self.assertEqual("Fido", rover.name())
def test_private_access_is_implied_but_not_enforced(self):
rover = self.Dog()
# This is a little rude, but legal
self.assertEqual("wagging", rover._tail())
def test_double_underscore_attribute_prefixes_cause_name_mangling(self):
"""Attributes names that start with a double underscore get
mangled when an instance is created."""
rover = self.Dog()
try:
#This may not be possible...
password = rover.__password()
except Exception as ex:
self.assertEqual("AttributeError", ex.__class__.__name__)
# But this still is!
self.assertEqual("password", rover._Dog__password())
# Name mangling exists to avoid name clash issues when subclassing.
# It is not for providing effective access protection
|
fertozudo/umatoo
|
refs/heads/master
|
lib/django/contrib/gis/db/backends/mysql/operations.py
|
328
|
from django.contrib.gis.db.backends.base.adapter import WKTAdapter
from django.contrib.gis.db.backends.base.operations import \
BaseSpatialOperations
from django.contrib.gis.db.backends.utils import SpatialOperator
from django.contrib.gis.db.models import aggregates
from django.db.backends.mysql.operations import DatabaseOperations
from django.utils.functional import cached_property
class MySQLOperations(BaseSpatialOperations, DatabaseOperations):
mysql = True
name = 'mysql'
select = 'AsText(%s)'
from_wkb = 'GeomFromWKB'
from_text = 'GeomFromText'
Adapter = WKTAdapter
Adaptor = Adapter # Backwards-compatibility alias.
gis_operators = {
'bbcontains': SpatialOperator(func='MBRContains'), # For consistency w/PostGIS API
'bboverlaps': SpatialOperator(func='MBROverlaps'), # .. ..
'contained': SpatialOperator(func='MBRWithin'), # .. ..
'contains': SpatialOperator(func='MBRContains'),
'disjoint': SpatialOperator(func='MBRDisjoint'),
'equals': SpatialOperator(func='MBREqual'),
'exact': SpatialOperator(func='MBREqual'),
'intersects': SpatialOperator(func='MBRIntersects'),
'overlaps': SpatialOperator(func='MBROverlaps'),
'same_as': SpatialOperator(func='MBREqual'),
'touches': SpatialOperator(func='MBRTouches'),
'within': SpatialOperator(func='MBRWithin'),
}
function_names = {
'Distance': 'ST_Distance',
'Length': 'GLength',
'Union': 'ST_Union',
}
disallowed_aggregates = (
aggregates.Collect, aggregates.Extent, aggregates.Extent3D,
aggregates.MakeLine, aggregates.Union,
)
@cached_property
def unsupported_functions(self):
unsupported = {
'AsGeoJSON', 'AsGML', 'AsKML', 'AsSVG', 'BoundingCircle',
'Difference', 'ForceRHR', 'GeoHash', 'Intersection', 'MemSize',
'Perimeter', 'PointOnSurface', 'Reverse', 'Scale', 'SnapToGrid',
'SymDifference', 'Transform', 'Translate',
}
if self.connection.mysql_version < (5, 6, 1):
unsupported.update({'Distance', 'Union'})
return unsupported
def geo_db_type(self, f):
return f.geom_type
def get_geom_placeholder(self, f, value, compiler):
"""
The placeholder here has to include MySQL's WKT constructor. Because
MySQL does not support spatial transformations, there is no need to
modify the placeholder based on the contents of the given value.
"""
if hasattr(value, 'as_sql'):
placeholder, _ = compiler.compile(value)
else:
placeholder = '%s(%%s)' % self.from_text
return placeholder
|
hardanimal/UFT_UPGEM
|
refs/heads/master
|
Lib/site-packages/pip-1.2.1-py2.7.egg/pip/commands/__init__.py
|
9480
|
#
|
dsprenkels/servo
|
refs/heads/master
|
tests/wpt/css-tests/tools/html5lib/html5lib/trie/__init__.py
|
1735
|
from __future__ import absolute_import, division, unicode_literals
from .py import Trie as PyTrie
Trie = PyTrie
try:
from .datrie import Trie as DATrie
except ImportError:
pass
else:
Trie = DATrie
|
susingha/0x_tools
|
refs/heads/master
|
scripts/checktermcolors.py
|
11
|
#!/usr/bin/env python
# Copyright (C) 2006 by Johannes Zellner, <johannes@zellner.org>
# modified by mac@calmar.ws to fit my output needs
# modified by crncosta@carloscosta.org to fit my output needs
import sys
import os
def echo(msg):
os.system('echo -n "' + str(msg) + '"')
def out(n):
os.system("tput setab " + str(n) + "; echo -n " + ("\"% 4d\"" % n))
os.system("tput setab 0")
# normal colors 1 - 16
os.system("tput setaf 16")
for n in range(8):
out(n)
echo("\n")
for n in range(8, 16):
out(n)
echo("\n")
echo("\n")
y=16
while y < 231:
for z in range(0,6):
out(y)
y += 1
echo("\n")
echo("\n")
for n in range(232, 256):
out(n)
if n == 237 or n == 243 or n == 249:
echo("\n")
echo("\n")
os.system("tput setaf 7")
os.system("tput setab 0")
|
virtuous/msm7230-gb-kernel
|
refs/heads/master
|
tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py
|
886
|
# Util.py - Python extension for perf trace, miscellaneous utility code
#
# Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
NSECS_PER_SEC = 1000000000
def avg(total, n):
return total / n
def nsecs(secs, nsecs):
return secs * NSECS_PER_SEC + nsecs
def nsecs_secs(nsecs):
return nsecs / NSECS_PER_SEC
def nsecs_nsecs(nsecs):
return nsecs % NSECS_PER_SEC
def nsecs_str(nsecs):
str = "%5u.%09u" % (nsecs_secs(nsecs), nsecs_nsecs(nsecs)),
return str
def clear_term():
print("\x1b[H\x1b[2J")
|
ZhangAustin/GroundHog
|
refs/heads/master
|
tutorials/generate.py
|
15
|
#!/usr/bin/python2.7
"""
Script that parses the wikipedia dump, and generates the dataset in a nice
numpy format (i.e. in numpy.npz files).
Call :
generate_word,py --help
"""
from collections import Counter
import ConfigParser
import argparse
import os
import time
import sys
import numpy
def construct_vocabulary(dataset, oov_rate, level):
filename = os.path.join(dataset, 'train')
fd = open(filename, 'rt')
txt = fd.read()
if level == 'words':
txt = txt.replace('\n', ' \n ')
txt = txt.replace(' ', ' ')
txt = txt.split(' ')
txt = [x for x in txt if x != '']
# Order the words
print ' .. sorting words'
all_items = Counter(txt).items()
no_end = [x for x in all_items if x[0] !='\n']
freqs = [x for x in all_items if x[0] == '\n'] + \
sorted(no_end,
key=lambda t: t[1],
reverse=True)
print ' .. shrinking the vocabulary size'
# Decide length
all_freq = float(sum([x[1] for x in freqs]))
up_to = len(freqs)
oov = 0.
remove_word = True
while remove_word:
up_to -= 1
oov += float(freqs[up_to][1])
if oov / all_freq > oov_rate:
remove_word = False
up_to += 1
freqs = freqs[:up_to]
words = [x[0] for x in freqs]
return dict(zip(words, range(up_to))), [x[1]/all_freq for x in freqs],freqs
def grab_text(path, filename, vocab, oov_default, dtype, level):
filename = os.path.join(path, filename)
fd = open(filename, 'rt')
txt = fd.read()
if level == 'words':
txt = txt.replace('\n', ' \n ')
txt = txt.replace(' ', ' ')
txt = txt.split(' ')
txt = [x for x in txt if x != '']
return numpy.asarray(
[vocab.get(w, oov_default) for w in txt],
dtype=dtype)
else:
return numpy.array(
[vocab.get(w, oov_default) for w in txt],
dtype=dtype)
def main(parser):
o = parser.parse_args()
dataset = o.path
print 'Constructing the vocabulary ..'
vocab, freqs, freq_wd = construct_vocabulary(dataset, o.oov_rate, o.level)
vocab['<unk>'] = numpy.max(list(vocab.values()))+1
oov_default = vocab["<unk>"]
print "EOL", vocab["\n"]
print 'Constructing train set'
train = grab_text(dataset, 'train', vocab, oov_default, o.dtype, o.level)
print 'Constructing valid set'
valid = grab_text(dataset, 'valid', vocab, oov_default, o.dtype, o.level)
print 'Constructing test set'
test = grab_text(dataset, 'test', vocab, oov_default, o.dtype, o.level)
print 'Saving data'
if o.level == 'words':
data = {'train_words': train, 'valid_words': valid, 'test_words': test, 'n_words': len(vocab)}
else:
data = {'train_chars': train, 'valid_chars': valid, 'test_chars': test, 'n_chars': len(vocab)}
keys = {'oov': oov_default, 'freqs': numpy.array(freqs), 'vocabulary': vocab, 'freq_wd': freq_wd}
all_keys = dict(keys.items() + data.items())
numpy.savez(o.dest, **all_keys)
inv_map = [None] * len(vocab.items())
for k, v in vocab.items():
inv_map[v] = k
if o.level == 'words':
numpy.savez(o.dest+"_dict", unique_words=inv_map)
else:
numpy.savez(o.dest+"_dict", unique_chars=inv_map)
print '... Done'
def get_parser():
usage = """
This script generates more numpy friendly format of the dataset from a text
file. The script will save the entire file into a numpy .npz file. The file
will contain the following fields:
'train' : array/matrix where each element (word or letter) is
represented by an index from 0 to vocabulary size or the
oov value (out of vocabulary). It is the training data.
'test' : array where each element (word or letter) is represented by an
index from 0 to vocabulary size or the oov value. This is the
test value.
'valid' : array where each element (word or letter) is represented by an
index from 0 to vocabulary size or the oov value. This is the
validation set.
'oov' : The value representing the out of vocabulary word
'vocab_size' : The size of the vocabulary (this number does not account
for oov
FIXME: The current script supports generating a .npz file with either character
sequences or word sequences only.
"""
parser = argparse.ArgumentParser(usage=usage)
parser.add_argument('path',
default="ntst",
help=('path to the dataset files: you should have {path}/train, {path}/test and {path}/valid'))
parser.add_argument('--dest',
help=('Where to save the processed dataset (i.e. '
'under what name and at what path). It will generate {dest}.npz and {dest}_dict.npz'),
default='tmp_data')
parser.add_argument('--level',
help=('Processing level. Either `words` or `chars`. '
'If set to word, the result dataset has one '
'token per word, otherwise a token per letter'),
default='words')
"""
parser.add_argument('--n-chains',
type=int,
help=('Number of parallel chains for the training '
'data. The way it works, is that it takes the '
'training set and divides it in `n_chains` that '
'should be processed in parallel by your model'),
default=1)
"""
parser.add_argument('--oov-rate',
type=float,
help=('Defines dictionary size. If for example '
'oov_rate is set to 0.01 (meaning 10%) it means '
'that we can shrink our dictionary such that '
'remaining unrepresented words of the **train** '
'set is less then 10%. If set to 0, all words in '
'the training set will be added to the '
'dictionary'),
default=0.)
parser.add_argument('--dtype',
help='dtype in which to store data',
default='int32')
return parser
if __name__ == '__main__':
main(get_parser())
|
shurain/autokey
|
refs/heads/master
|
src/lib/qtui/detectdialog.py
|
50
|
#!/usr/bin/env python
# coding=UTF-8
#
# Generated by pykdeuic4 from detectdialog.ui on Sat Mar 10 13:47:59 2012
#
# WARNING! All changes to this file will be lost.
from PyKDE4 import kdecore
from PyKDE4 import kdeui
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName(_fromUtf8("Form"))
Form.resize(400, 240)
self.verticalLayout = QtGui.QVBoxLayout(Form)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.groupBox = QtGui.QGroupBox(Form)
self.groupBox.setLayoutDirection(QtCore.Qt.LeftToRight)
self.groupBox.setObjectName(_fromUtf8("groupBox"))
self.verticalLayout_2 = QtGui.QVBoxLayout(self.groupBox)
self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2"))
self.titleLabel = QtGui.QLabel(self.groupBox)
self.titleLabel.setObjectName(_fromUtf8("titleLabel"))
self.verticalLayout_2.addWidget(self.titleLabel)
self.classLabel = QtGui.QLabel(self.groupBox)
self.classLabel.setObjectName(_fromUtf8("classLabel"))
self.verticalLayout_2.addWidget(self.classLabel)
self.verticalLayout.addWidget(self.groupBox)
self.kbuttongroup = KButtonGroup(Form)
self.kbuttongroup.setObjectName(_fromUtf8("kbuttongroup"))
self.verticalLayout_3 = QtGui.QVBoxLayout(self.kbuttongroup)
self.verticalLayout_3.setObjectName(_fromUtf8("verticalLayout_3"))
self.classButton = QtGui.QRadioButton(self.kbuttongroup)
self.classButton.setObjectName(_fromUtf8("classButton"))
self.verticalLayout_3.addWidget(self.classButton)
self.titleButton = QtGui.QRadioButton(self.kbuttongroup)
self.titleButton.setObjectName(_fromUtf8("titleButton"))
self.verticalLayout_3.addWidget(self.titleButton)
self.verticalLayout.addWidget(self.kbuttongroup)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(kdecore.i18n(_fromUtf8("Form")))
self.groupBox.setTitle(kdecore.i18n(_fromUtf8("Window information of selected window")))
self.titleLabel.setText(kdecore.i18n(_fromUtf8("TextLabel")))
self.classLabel.setText(kdecore.i18n(_fromUtf8("TextLabel")))
self.kbuttongroup.setTitle(kdecore.i18n(_fromUtf8("Window property selection")))
self.classButton.setText(kdecore.i18n(_fromUtf8("Window class (entire application)")))
self.titleButton.setText(kdecore.i18n(_fromUtf8("Window title")))
from PyKDE4.kdeui import KButtonGroup
|
jmwright/cadquery-freecad-module
|
refs/heads/master
|
Libs/certifi/core.py
|
20
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
certifi.py
~~~~~~~~~~
This module returns the installation location of cacert.pem.
"""
import os
def where():
f = os.path.dirname(__file__)
return os.path.join(f, 'cacert.pem')
if __name__ == '__main__':
print(where())
|
kmdouglass/PolymerCpp
|
refs/heads/master
|
PolymerCpp/__init__.py
|
12133432
| |
TeamEOS/external_chromium_org
|
refs/heads/lp5.0
|
tools/perf/page_sets/page_cycler/morejs.py
|
9
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# pylint: disable=W0401,W0614
from telemetry.page.actions.all_page_actions import *
from telemetry.page import page as page_module
from telemetry.page import page_set as page_set_module
class MorejsPage(page_module.Page):
def __init__(self, url, page_set):
super(MorejsPage, self).__init__(url=url, page_set=page_set)
class MorejsPageSet(page_set_module.PageSet):
""" More JS page_cycler benchmark """
def __init__(self):
super(MorejsPageSet, self).__init__(
# pylint: disable=C0301
serving_dirs=set(['../../../../data/page_cycler/morejs']),
bucket=page_set_module.PARTNER_BUCKET)
urls_list = [
'file://../../../../data/page_cycler/morejs/blog.chromium.org/',
'file://../../../../data/page_cycler/morejs/dev.chromium.org/',
'file://../../../../data/page_cycler/morejs/googleblog.blogspot.com1/',
'file://../../../../data/page_cycler/morejs/googleblog.blogspot.com2/',
'file://../../../../data/page_cycler/morejs/test.blogspot.com/',
'file://../../../../data/page_cycler/morejs/www.igoogle.com/',
'file://../../../../data/page_cycler/morejs/www.techcrunch.com/',
'file://../../../../data/page_cycler/morejs/www.webkit.org/',
'file://../../../../data/page_cycler/morejs/www.yahoo.com/'
]
for url in urls_list:
self.AddPage(MorejsPage(url, self))
|
odootr/odoo
|
refs/heads/8.0
|
addons/website_sale_delivery/controllers/__init__.py
|
7372
|
import main
|
karlp/libsigrok
|
refs/heads/tenma7730
|
bindings/python/setup.py
|
3
|
##
## This file is part of the libsigrok project.
##
## Copyright (C) 2013 Martin Ling <martin-sigrok@earth.li>
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
##
from setuptools import setup, find_packages, Extension
from distutils.command.build_py import build_py as _build_py
from distutils.command.build_ext import build_ext as _build_ext
import numpy as np
import os
import sys
import re
import shlex
srcdir = os.path.dirname(os.path.abspath(__file__))
os.chdir('bindings/python')
srcdir = os.path.relpath(srcdir)
srcdir_parent = os.path.normpath(os.path.join(srcdir, '..'))
# Override the default compile flags used by distutils.
os.environ['OPT'] = ''
# Parse the command line arguments for VAR=value assignments,
# and apply them as environment variables.
while len(sys.argv) > 1:
match = re.match(r'([A-Z]+)=(.*)', sys.argv[1])
if match is None:
break
os.environ[match.group(1)] = match.group(2)
del sys.argv[1]
includes = ['../../include', '../cxx/include']
includes += [os.path.normpath(os.path.join(srcdir, path)) for path in includes]
includes += ['../..', np.get_include()]
ldadd = shlex.split(os.environ.get('LDADD', ''))
libdirs = ['../../.libs', '../cxx/.libs'] + \
[l[2:] for l in ldadd if l.startswith('-L')]
libs = [l[2:] for l in ldadd if l.startswith('-l')] + ['sigrokcxx']
def vpath(file):
vfile = os.path.join(srcdir, file)
return vfile if os.path.exists(vfile) else file
def unvpath(file):
return os.path.relpath(file, srcdir) if file.startswith(srcdir) else file
class build_py(_build_py):
def find_package_modules(self, package, pkg_dir):
mods = _build_py.find_package_modules(self, package, pkg_dir)
vmods = _build_py.find_package_modules(self, package, vpath(pkg_dir))
mods.extend([mod for mod in vmods if mod not in mods])
return mods
def check_package(self, package, package_dir):
return _build_py.check_package(self, package, vpath(package_dir))
class build_ext(_build_ext):
def spawn (self, cmd):
cmd[1:-1] = [arg if arg.startswith('-') else unvpath(arg) for arg in
cmd[1:-1]]
_build_ext.spawn(self, cmd)
def swig_sources (self, sources, extension):
return [unvpath(src) for src in
_build_ext.swig_sources(self, sources, extension)]
setup(
name = 'libsigrok',
namespace_packages = ['sigrok'],
packages = find_packages(srcdir),
version = os.environ.get('VERSION'),
description = "libsigrok API wrapper",
zip_safe = False,
ext_modules = [
Extension('sigrok.core._classes',
sources = [vpath('sigrok/core/classes.i')],
swig_opts = ['-c++', '-threads', '-Isigrok/core', '-I..', '-I' + srcdir_parent] +
['-I%s' % i for i in includes],
extra_compile_args = ['-Wno-uninitialized'],
include_dirs = includes,
library_dirs = libdirs,
libraries = libs)
],
cmdclass = {'build_py': build_py, 'build_ext': build_ext},
)
|
kholidfu/django
|
refs/heads/master
|
tests/migrations/test_migrations_squashed/0002_second.py
|
564
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("migrations", "0001_initial")]
operations = [
migrations.DeleteModel("Tribble"),
migrations.RemoveField("Author", "silly_field"),
migrations.AddField("Author", "rating", models.IntegerField(default=0)),
migrations.CreateModel(
"Book",
[
("id", models.AutoField(primary_key=True)),
("author", models.ForeignKey("migrations.Author", models.SET_NULL, null=True)),
],
)
]
|
lampwins/netbox
|
refs/heads/develop
|
netbox/secrets/views.py
|
1
|
import base64
from django.contrib import messages
from django.contrib.auth.decorators import permission_required
from django.contrib.auth.mixins import PermissionRequiredMixin
from django.db.models import Count
from django.shortcuts import get_object_or_404, redirect, render
from django.urls import reverse
from django.views.generic import View
from dcim.models import Device
from utilities.views import (
BulkDeleteView, BulkEditView, BulkImportView, ObjectDeleteView, ObjectEditView, ObjectListView,
)
from . import filters, forms, tables
from .decorators import userkey_required
from .models import SecretRole, Secret, SessionKey
def get_session_key(request):
"""
Extract and decode the session key sent with a request. Returns None if no session key was provided.
"""
session_key = request.COOKIES.get('session_key', None)
if session_key is not None:
return base64.b64decode(session_key)
return session_key
#
# Secret roles
#
class SecretRoleListView(PermissionRequiredMixin, ObjectListView):
permission_required = 'secrets.view_secretrole'
queryset = SecretRole.objects.annotate(secret_count=Count('secrets'))
table = tables.SecretRoleTable
template_name = 'secrets/secretrole_list.html'
class SecretRoleCreateView(PermissionRequiredMixin, ObjectEditView):
permission_required = 'secrets.add_secretrole'
model = SecretRole
model_form = forms.SecretRoleForm
default_return_url = 'secrets:secretrole_list'
class SecretRoleEditView(SecretRoleCreateView):
permission_required = 'secrets.change_secretrole'
class SecretRoleBulkImportView(PermissionRequiredMixin, BulkImportView):
permission_required = 'secrets.add_secretrole'
model_form = forms.SecretRoleCSVForm
table = tables.SecretRoleTable
default_return_url = 'secrets:secretrole_list'
class SecretRoleBulkDeleteView(PermissionRequiredMixin, BulkDeleteView):
permission_required = 'secrets.delete_secretrole'
queryset = SecretRole.objects.annotate(secret_count=Count('secrets'))
table = tables.SecretRoleTable
default_return_url = 'secrets:secretrole_list'
#
# Secrets
#
class SecretListView(PermissionRequiredMixin, ObjectListView):
permission_required = 'secrets.view_secret'
queryset = Secret.objects.select_related('role', 'device')
filter = filters.SecretFilter
filter_form = forms.SecretFilterForm
table = tables.SecretTable
template_name = 'secrets/secret_list.html'
class SecretView(PermissionRequiredMixin, View):
permission_required = 'secrets.view_secret'
def get(self, request, pk):
secret = get_object_or_404(Secret, pk=pk)
return render(request, 'secrets/secret.html', {
'secret': secret,
})
@permission_required('secrets.add_secret')
@userkey_required()
def secret_add(request, pk):
# Retrieve device
device = get_object_or_404(Device, pk=pk)
secret = Secret(device=device)
session_key = get_session_key(request)
if request.method == 'POST':
form = forms.SecretForm(request.POST, instance=secret)
if form.is_valid():
# We need a valid session key in order to create a Secret
if session_key is None:
form.add_error(None, "No session key was provided with the request. Unable to encrypt secret data.")
# Create and encrypt the new Secret
else:
master_key = None
try:
sk = SessionKey.objects.get(userkey__user=request.user)
master_key = sk.get_master_key(session_key)
except SessionKey.DoesNotExist:
form.add_error(None, "No session key found for this user.")
if master_key is not None:
secret = form.save(commit=False)
secret.plaintext = str(form.cleaned_data['plaintext'])
secret.encrypt(master_key)
secret.save()
form.save_m2m()
messages.success(request, "Added new secret: {}.".format(secret))
if '_addanother' in request.POST:
return redirect('dcim:device_addsecret', pk=device.pk)
else:
return redirect('secrets:secret', pk=secret.pk)
else:
form = forms.SecretForm(instance=secret)
return render(request, 'secrets/secret_edit.html', {
'secret': secret,
'form': form,
'return_url': device.get_absolute_url(),
})
@permission_required('secrets.change_secret')
@userkey_required()
def secret_edit(request, pk):
secret = get_object_or_404(Secret, pk=pk)
session_key = get_session_key(request)
if request.method == 'POST':
form = forms.SecretForm(request.POST, instance=secret)
if form.is_valid():
# Re-encrypt the Secret if a plaintext and session key have been provided.
if form.cleaned_data['plaintext'] and session_key is not None:
# Retrieve the master key using the provided session key
master_key = None
try:
sk = SessionKey.objects.get(userkey__user=request.user)
master_key = sk.get_master_key(session_key)
except SessionKey.DoesNotExist:
form.add_error(None, "No session key found for this user.")
# Create and encrypt the new Secret
if master_key is not None:
secret = form.save(commit=False)
secret.plaintext = form.cleaned_data['plaintext']
secret.encrypt(master_key)
secret.save()
messages.success(request, "Modified secret {}.".format(secret))
return redirect('secrets:secret', pk=secret.pk)
else:
form.add_error(None, "Invalid session key. Unable to encrypt secret data.")
# We can't save the plaintext without a session key.
elif form.cleaned_data['plaintext']:
form.add_error(None, "No session key was provided with the request. Unable to encrypt secret data.")
# If no new plaintext was specified, a session key is not needed.
else:
secret = form.save()
messages.success(request, "Modified secret {}.".format(secret))
return redirect('secrets:secret', pk=secret.pk)
else:
form = forms.SecretForm(instance=secret)
return render(request, 'secrets/secret_edit.html', {
'secret': secret,
'form': form,
'return_url': reverse('secrets:secret', kwargs={'pk': secret.pk}),
})
class SecretDeleteView(PermissionRequiredMixin, ObjectDeleteView):
permission_required = 'secrets.delete_secret'
model = Secret
default_return_url = 'secrets:secret_list'
class SecretBulkImportView(BulkImportView):
permission_required = 'secrets.add_secret'
model_form = forms.SecretCSVForm
table = tables.SecretTable
template_name = 'secrets/secret_import.html'
default_return_url = 'secrets:secret_list'
widget_attrs = {'class': 'requires-session-key'}
master_key = None
def _save_obj(self, obj_form):
"""
Encrypt each object before saving it to the database.
"""
obj = obj_form.save(commit=False)
obj.encrypt(self.master_key)
obj.save()
return obj
def post(self, request):
# Grab the session key from cookies.
session_key = request.COOKIES.get('session_key')
if session_key:
# Attempt to derive the master key using the provided session key.
try:
sk = SessionKey.objects.get(userkey__user=request.user)
self.master_key = sk.get_master_key(base64.b64decode(session_key))
except SessionKey.DoesNotExist:
messages.error(request, "No session key found for this user.")
if self.master_key is not None:
return super().post(request)
else:
messages.error(request, "Invalid private key! Unable to encrypt secret data.")
else:
messages.error(request, "No session key was provided with the request. Unable to encrypt secret data.")
return render(request, self.template_name, {
'form': self._import_form(request.POST),
'fields': self.model_form().fields,
'obj_type': self.model_form._meta.model._meta.verbose_name,
'return_url': self.get_return_url(request),
})
class SecretBulkEditView(PermissionRequiredMixin, BulkEditView):
permission_required = 'secrets.change_secret'
queryset = Secret.objects.select_related('role', 'device')
filter = filters.SecretFilter
table = tables.SecretTable
form = forms.SecretBulkEditForm
default_return_url = 'secrets:secret_list'
class SecretBulkDeleteView(PermissionRequiredMixin, BulkDeleteView):
permission_required = 'secrets.delete_secret'
queryset = Secret.objects.select_related('role', 'device')
filter = filters.SecretFilter
table = tables.SecretTable
default_return_url = 'secrets:secret_list'
|
ESS-LLP/erpnext
|
refs/heads/develop
|
erpnext/patches/v9_0/add_healthcare_domain.py
|
23
|
# Copyright (c) 2017, Frappe and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
def execute():
domain = 'Healthcare'
if not frappe.db.exists('Domain', domain):
frappe.get_doc({
'doctype': 'Domain',
'domain': domain
}).insert(ignore_permissions=True)
|
losnikitos/googleads-python-lib
|
refs/heads/master
|
examples/dfp/v201411/creative_template_service/get_all_creative_templates.py
|
4
|
#!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example gets all creative templates.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
# Import appropriate modules from the client library.
from googleads import dfp
def main(client):
# Initialize appropriate service.
creative_template_service = client.GetService(
'CreativeTemplateService', version='v201411')
# Create a filter statement.
statement = dfp.FilterStatement()
# Get creative templates by statement.
while True:
response = creative_template_service.getCreativeTemplatesByStatement(
statement.ToStatement())
if 'results' in response:
# Display results.
for template in response['results']:
print ('Creative template with id \'%s\', name \'%s\', and type \'%s\' '
'was found.' % (template['id'],
template['name'],
template['type']))
statement.offset += dfp.SUGGESTED_PAGE_LIMIT
else:
break
print '\nNumber of results found: %s' % response['totalResultSetSize']
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client)
|
itmard/zconf_flask_intro
|
refs/heads/master
|
code/c3-flask-routing-with-variables.py
|
1
|
from flask import Flask
app = Flask(__name__)
@app.route('/user/<username>')
def show_user_profile(username):
return 'User %s' % username
@app.route('/post/<int:post_id>')
def show_post(post_id):
return 'Post %d' % post_id
if __name__ == "__main__":
app.run(debug=True)
|
saisankargochhayat/algo_quest
|
refs/heads/master
|
intro_to_algo/lec_2/docdist8.py
|
1
|
#!/usr/bin/python
# docdist8.py - treat whole file as a single "line"
#
# Original version by Erik D. Demaine on January 31, 2011,
# based on code by Ronald L. Rivest (see docdist[1-7].py).
#
# Usage:
# docdist8.py filename1 filename2
#
# This program computes the "distance" between two text files
# as the angle between their word frequency vectors (in radians).
#
# For each input file, a word-frequency vector is computed as follows:
# (1) the specified file is read in
# (2) it is converted into a list of alphanumeric "words"
# Here a "word" is a sequence of consecutive alphanumeric
# characters. Non-alphanumeric characters are treated as blanks.
# Case is not significant.
# (3) for each word, its frequency of occurrence is determined
# (4) the word/frequency lists are sorted into order alphabetically
#
# The "distance" between two vectors is the angle between them.
# If x = (x1, x2, ..., xn) is the first vector (xi = freq of word i)
# and y = (y1, y2, ..., yn) is the second vector,
# then the angle between them is defined as:
# d(x,y) = arccos(inner_product(x,y) / (norm(x)*norm(y)))
# where:
# inner_product(x,y) = x1*y1 + x2*y2 + ... xn*yn
# norm(x) = sqrt(inner_product(x,x))
import math
# math.acos(x) is the arccosine of x.
# math.sqrt(x) is the square root of x.
import string
import sys
def read_file(filename):
"""
Read the text file with the given filename;
return a list of the lines of text in the file.
"""
try:
f = open(filename, 'r')
return f.read()
except IOError:
print "Error opening or reading input file: ",filename
sys.exit()
# global variables needed for fast parsing
# translation table maps upper case to lower case and punctuation to spaces
translation_table = string.maketrans(string.punctuation+string.uppercase,
" "*len(string.punctuation)+string.lowercase)
def get_words_from_text(text):
"""
Parse the given text into words.
Return list of all words found.
"""
text = text.translate(translation_table)
word_list = text.split()
return word_list
def count_frequency(word_list):
"""
Return a dictionary mapping words to frequency.
"""
D = {}
for new_word in word_list:
if new_word in D:
D[new_word] = D[new_word]+1
else:
D[new_word] = 1
return D
def word_frequencies_for_file(filename):
"""
Return dictionary of (word,frequency) pairs for the given file.
"""
text = read_file(filename)
word_list = get_words_from_text(text)
freq_mapping = count_frequency(word_list)
return freq_mapping
def inner_product(D1,D2):
"""
Inner product between two vectors, where vectors
are represented as dictionaries of (word,freq) pairs.
Example: inner_product({"and":3,"of":2,"the":5},
{"and":4,"in":1,"of":1,"this":2}) = 14.0
"""
sum = 0.0
for key in D1:
if key in D2:
sum += D1[key] * D2[key]
return sum
def vector_angle(D1,D2):
"""
The input is a list of (word,freq) pairs, sorted alphabetically.
Return the angle between these two vectors.
"""
numerator = inner_product(D1,D2)
denominator = math.sqrt(inner_product(D1,D1)*inner_product(D2,D2))
return math.acos(numerator/denominator)
def main():
if len(sys.argv) != 3:
print "Usage: docdist8.py filename_1 filename_2"
else:
filename_1 = sys.argv[1]
filename_2 = sys.argv[2]
sorted_word_list_1 = word_frequencies_for_file(filename_1)
sorted_word_list_2 = word_frequencies_for_file(filename_2)
distance = vector_angle(sorted_word_list_1,sorted_word_list_2)
print "The distance between the documents is: %0.6f (radians)"%distance
if __name__ == "__main__":
import cProfile
cProfile.run("main()")
|
xaratustrah/barion
|
refs/heads/main
|
version.py
|
1
|
"""
Barion
-- GUI Application --
Jul 2015 Xaratustrah
Mar 2016 Xaratustrah
"""
__version_info__ = (2, 0, 9)
__version__ = '.'.join('%d' % d for d in __version_info__)
|
ParticulateFlow/Palabos-PFM
|
refs/heads/master
|
scons/scons-local-2.1.0/SCons/Tool/javac.py
|
21
|
"""SCons.Tool.javac
Tool-specific initialization for javac.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "src/engine/SCons/Tool/javac.py 5357 2011/09/09 21:31:03 bdeegan"
import os
import os.path
import SCons.Action
import SCons.Builder
from SCons.Node.FS import _my_normcase
from SCons.Tool.JavaCommon import parse_java_file
import SCons.Util
def classname(path):
"""Turn a string (path name) into a Java class name."""
return os.path.normpath(path).replace(os.sep, '.')
def emit_java_classes(target, source, env):
"""Create and return lists of source java files
and their corresponding target class files.
"""
java_suffix = env.get('JAVASUFFIX', '.java')
class_suffix = env.get('JAVACLASSSUFFIX', '.class')
target[0].must_be_same(SCons.Node.FS.Dir)
classdir = target[0]
s = source[0].rentry().disambiguate()
if isinstance(s, SCons.Node.FS.File):
sourcedir = s.dir.rdir()
elif isinstance(s, SCons.Node.FS.Dir):
sourcedir = s.rdir()
else:
raise SCons.Errors.UserError("Java source must be File or Dir, not '%s'" % s.__class__)
slist = []
js = _my_normcase(java_suffix)
for entry in source:
entry = entry.rentry().disambiguate()
if isinstance(entry, SCons.Node.FS.File):
slist.append(entry)
elif isinstance(entry, SCons.Node.FS.Dir):
result = SCons.Util.OrderedDict()
dirnode = entry.rdir()
def find_java_files(arg, dirpath, filenames):
java_files = sorted([n for n in filenames
if _my_normcase(n).endswith(js)])
mydir = dirnode.Dir(dirpath)
java_paths = [mydir.File(f) for f in java_files]
for jp in java_paths:
arg[jp] = True
for dirpath, dirnames, filenames in os.walk(dirnode.get_abspath()):
find_java_files(result, dirpath, filenames)
entry.walk(find_java_files, result)
slist.extend(list(result.keys()))
else:
raise SCons.Errors.UserError("Java source must be File or Dir, not '%s'" % entry.__class__)
version = env.get('JAVAVERSION', '1.4')
full_tlist = []
for f in slist:
tlist = []
source_file_based = True
pkg_dir = None
if not f.is_derived():
pkg_dir, classes = parse_java_file(f.rfile().get_abspath(), version)
if classes:
source_file_based = False
if pkg_dir:
d = target[0].Dir(pkg_dir)
p = pkg_dir + os.sep
else:
d = target[0]
p = ''
for c in classes:
t = d.File(c + class_suffix)
t.attributes.java_classdir = classdir
t.attributes.java_sourcedir = sourcedir
t.attributes.java_classname = classname(p + c)
tlist.append(t)
if source_file_based:
base = f.name[:-len(java_suffix)]
if pkg_dir:
t = target[0].Dir(pkg_dir).File(base + class_suffix)
else:
t = target[0].File(base + class_suffix)
t.attributes.java_classdir = classdir
t.attributes.java_sourcedir = f.dir
t.attributes.java_classname = classname(base)
tlist.append(t)
for t in tlist:
t.set_specific_source([f])
full_tlist.extend(tlist)
return full_tlist, slist
JavaAction = SCons.Action.Action('$JAVACCOM', '$JAVACCOMSTR')
JavaBuilder = SCons.Builder.Builder(action = JavaAction,
emitter = emit_java_classes,
target_factory = SCons.Node.FS.Entry,
source_factory = SCons.Node.FS.Entry)
class pathopt(object):
"""
Callable object for generating javac-style path options from
a construction variable (e.g. -classpath, -sourcepath).
"""
def __init__(self, opt, var, default=None):
self.opt = opt
self.var = var
self.default = default
def __call__(self, target, source, env, for_signature):
path = env[self.var]
if path and not SCons.Util.is_List(path):
path = [path]
if self.default:
path = path + [ env[self.default] ]
if path:
return [self.opt, os.pathsep.join(path)]
#return self.opt + " " + os.pathsep.join(path)
else:
return []
#return ""
def Java(env, target, source, *args, **kw):
"""
A pseudo-Builder wrapper around the separate JavaClass{File,Dir}
Builders.
"""
if not SCons.Util.is_List(target):
target = [target]
if not SCons.Util.is_List(source):
source = [source]
# Pad the target list with repetitions of the last element in the
# list so we have a target for every source element.
target = target + ([target[-1]] * (len(source) - len(target)))
java_suffix = env.subst('$JAVASUFFIX')
result = []
for t, s in zip(target, source):
if isinstance(s, SCons.Node.FS.Base):
if isinstance(s, SCons.Node.FS.File):
b = env.JavaClassFile
else:
b = env.JavaClassDir
else:
if os.path.isfile(s):
b = env.JavaClassFile
elif os.path.isdir(s):
b = env.JavaClassDir
elif s[-len(java_suffix):] == java_suffix:
b = env.JavaClassFile
else:
b = env.JavaClassDir
result.extend(b(t, s, *args, **kw))
return result
def generate(env):
"""Add Builders and construction variables for javac to an Environment."""
java_file = SCons.Tool.CreateJavaFileBuilder(env)
java_class = SCons.Tool.CreateJavaClassFileBuilder(env)
java_class_dir = SCons.Tool.CreateJavaClassDirBuilder(env)
java_class.add_emitter(None, emit_java_classes)
java_class.add_emitter(env.subst('$JAVASUFFIX'), emit_java_classes)
java_class_dir.emitter = emit_java_classes
env.AddMethod(Java)
env['JAVAC'] = 'javac'
env['JAVACFLAGS'] = SCons.Util.CLVar('')
env['JAVABOOTCLASSPATH'] = []
env['JAVACLASSPATH'] = []
env['JAVASOURCEPATH'] = []
env['_javapathopt'] = pathopt
env['_JAVABOOTCLASSPATH'] = '${_javapathopt("-bootclasspath", "JAVABOOTCLASSPATH")} '
env['_JAVACLASSPATH'] = '${_javapathopt("-classpath", "JAVACLASSPATH")} '
env['_JAVASOURCEPATH'] = '${_javapathopt("-sourcepath", "JAVASOURCEPATH", "_JAVASOURCEPATHDEFAULT")} '
env['_JAVASOURCEPATHDEFAULT'] = '${TARGET.attributes.java_sourcedir}'
env['_JAVACCOM'] = '$JAVAC $JAVACFLAGS $_JAVABOOTCLASSPATH $_JAVACLASSPATH -d ${TARGET.attributes.java_classdir} $_JAVASOURCEPATH $SOURCES'
env['JAVACCOM'] = "${TEMPFILE('$_JAVACCOM')}"
env['JAVACLASSSUFFIX'] = '.class'
env['JAVASUFFIX'] = '.java'
def exists(env):
return 1
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
markyzq/kernel-drm-rockchip
|
refs/heads/drm-next
|
tools/perf/python/twatch.py
|
1565
|
#! /usr/bin/python
# -*- python -*-
# -*- coding: utf-8 -*-
# twatch - Experimental use of the perf python interface
# Copyright (C) 2011 Arnaldo Carvalho de Melo <acme@redhat.com>
#
# This application is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2.
#
# This application is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
import perf
def main():
cpus = perf.cpu_map()
threads = perf.thread_map()
evsel = perf.evsel(task = 1, comm = 1, mmap = 0,
wakeup_events = 1, watermark = 1,
sample_id_all = 1,
sample_type = perf.SAMPLE_PERIOD | perf.SAMPLE_TID | perf.SAMPLE_CPU)
evsel.open(cpus = cpus, threads = threads);
evlist = perf.evlist(cpus, threads)
evlist.add(evsel)
evlist.mmap()
while True:
evlist.poll(timeout = -1)
for cpu in cpus:
event = evlist.read_on_cpu(cpu)
if not event:
continue
print "cpu: %2d, pid: %4d, tid: %4d" % (event.sample_cpu,
event.sample_pid,
event.sample_tid),
print event
if __name__ == '__main__':
main()
|
gnychis/grforwarder
|
refs/heads/txsync
|
gr-wxgui/src/python/forms/forms.py
|
15
|
#
# Copyright 2009 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
"""
The forms module contains general purpose wx-gui forms for gnuradio apps.
The forms follow a layered model:
* internal layer
* deals with the wxgui objects directly
* implemented in event handler and update methods
* translation layer
* translates the between the external and internal layers
* handles parsing errors between layers
* external layer
* provided external access to the user
* set_value, get_value, and optional callback
* set and get through optional pubsub and key
Known problems:
* An empty label in the radio box still consumes space.
* The static text cannot resize the parent at runtime.
"""
EXT_KEY = 'external'
INT_KEY = 'internal'
import wx
import sys
from gnuradio.gr.pubsub import pubsub
import converters
EVT_DATA = wx.PyEventBinder(wx.NewEventType())
class DataEvent(wx.PyEvent):
def __init__(self, data):
wx.PyEvent.__init__(self, wx.NewId(), EVT_DATA.typeId)
self.data = data
def make_bold(widget):
font = widget.GetFont()
font.SetWeight(wx.FONTWEIGHT_BOLD)
widget.SetFont(font)
########################################################################
# Base Class Form
########################################################################
class _form_base(pubsub, wx.BoxSizer):
def __init__(self, parent=None, sizer=None, proportion=0, flag=wx.EXPAND, ps=None, key='', value=None, callback=None, converter=converters.identity_converter()):
pubsub.__init__(self)
wx.BoxSizer.__init__(self, wx.HORIZONTAL)
self._parent = parent
self._key = key
self._converter = converter
self._callback = callback
self._widgets = list()
#add to the sizer if provided
if sizer: sizer.Add(self, proportion, flag)
#proxy the pubsub and key into this form
if ps is not None:
assert key
self.proxy(EXT_KEY, ps, key)
#no pubsub passed, must set initial value
else: self.set_value(value)
def __str__(self):
return "Form: %s -> %s"%(self.__class__, self._key)
def _add_widget(self, widget, label='', flag=0, label_prop=0, widget_prop=1):
"""
Add the main widget to this object sizer.
If label is passed, add a label as well.
Register the widget and the label in the widgets list (for enable/disable).
Bind the update handler to the widget for data events.
This ensures that the gui thread handles updating widgets.
Setup the pusub triggers for external and internal.
@param widget the main widget
@param label the optional label
@param flag additional flags for widget
@param label_prop the proportion for the label
@param widget_prop the proportion for the widget
"""
#setup data event
widget.Bind(EVT_DATA, lambda x: self._update(x.data))
update = lambda x: wx.PostEvent(widget, DataEvent(x))
#register widget
self._widgets.append(widget)
#create optional label
if not label: self.Add(widget, widget_prop, wx.ALIGN_CENTER_VERTICAL | flag)
else:
label_text = wx.StaticText(self._parent, label='%s: '%label)
self._widgets.append(label_text)
self.Add(label_text, label_prop, wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_LEFT)
self.Add(widget, widget_prop, wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_RIGHT | flag)
#initialize without triggering pubsubs
self._translate_external_to_internal(self[EXT_KEY])
update(self[INT_KEY])
#subscribe all the functions
self.subscribe(INT_KEY, update)
self.subscribe(INT_KEY, self._translate_internal_to_external)
self.subscribe(EXT_KEY, self._translate_external_to_internal)
def _translate_external_to_internal(self, external):
try:
internal = self._converter.external_to_internal(external)
#prevent infinite loop between internal and external pubsub keys by only setting if changed
if self[INT_KEY] != internal: self[INT_KEY] = internal
except Exception, e:
self._err_msg(external, e)
self[INT_KEY] = self[INT_KEY] #reset to last good setting
def _translate_internal_to_external(self, internal):
try:
external = self._converter.internal_to_external(internal)
#prevent infinite loop between internal and external pubsub keys by only setting if changed
if self[EXT_KEY] != external: self[EXT_KEY] = external
except Exception, e:
self._err_msg(internal, e)
self[EXT_KEY] = self[EXT_KEY] #reset to last good setting
if self._callback: self._callback(self[EXT_KEY])
def _err_msg(self, value, e):
print >> sys.stderr, self, 'Error translating value: "%s"\n\t%s\n\t%s'%(value, e, self._converter.help())
#override in subclasses to handle the wxgui object
def _update(self, value): raise NotImplementedError
def _handle(self, event): raise NotImplementedError
#provide a set/get interface for this form
def get_value(self): return self[EXT_KEY]
def set_value(self, value): self[EXT_KEY] = value
def Disable(self, disable=True): self.Enable(not disable)
def Enable(self, enable=True):
if enable:
for widget in self._widgets: widget.Enable()
else:
for widget in self._widgets: widget.Disable()
########################################################################
# Base Class Chooser Form
########################################################################
class _chooser_base(_form_base):
def __init__(self, choices=[], labels=None, **kwargs):
_form_base.__init__(self, converter=converters.chooser_converter(choices), **kwargs)
self._choices = choices
self._labels = map(str, labels or choices)
########################################################################
# Base Class Slider Form
########################################################################
class _slider_base(_form_base):
def __init__(self, label='', length=-1, converter=None, num_steps=100, style=wx.SL_HORIZONTAL, **kwargs):
_form_base.__init__(self, converter=converter, **kwargs)
if style & wx.SL_HORIZONTAL: slider_size = wx.Size(length, -1)
elif style & wx.SL_VERTICAL: slider_size = wx.Size(-1, length)
else: raise NotImplementedError
self._slider = wx.Slider(self._parent, minValue=0, maxValue=num_steps, size=slider_size, style=style)
self._slider.Bind(wx.EVT_SCROLL, self._handle)
self._add_widget(self._slider, label, flag=wx.EXPAND)
def _handle(self, event): self[INT_KEY] = self._slider.GetValue()
def _update(self, value): self._slider.SetValue(int(round(value)))
########################################################################
# Static Text Form
########################################################################
class static_text(_form_base):
"""
A text box form.
@param parent the parent widget
@param sizer add this widget to sizer if provided (optional)
@param proportion the proportion when added to the sizer (default=0)
@param flag the flag argument when added to the sizer (default=wx.EXPAND)
@param ps the pubsub object (optional)
@param key the pubsub key (optional)
@param value the default value (optional)
@param label title label for this widget (optional)
@param width the width of the form in px
@param bold true to bold-ify the text (default=False)
@param units a suffix to add after the text
@param converter forms.str_converter(), int_converter(), float_converter()...
"""
def __init__(self, label='', width=-1, bold=False, units='', converter=converters.str_converter(), **kwargs):
self._units = units
_form_base.__init__(self, converter=converter, **kwargs)
self._static_text = wx.StaticText(self._parent, size=wx.Size(width, -1))
if bold: make_bold(self._static_text)
self._add_widget(self._static_text, label)
def _update(self, label):
if self._units: label += ' ' + self._units
self._static_text.SetLabel(label); self._parent.Layout()
########################################################################
# Text Box Form
########################################################################
class text_box(_form_base):
"""
A text box form.
@param parent the parent widget
@param sizer add this widget to sizer if provided (optional)
@param proportion the proportion when added to the sizer (default=0)
@param flag the flag argument when added to the sizer (default=wx.EXPAND)
@param ps the pubsub object (optional)
@param key the pubsub key (optional)
@param value the default value (optional)
@param label title label for this widget (optional)
@param width the width of the form in px
@param converter forms.str_converter(), int_converter(), float_converter()...
"""
def __init__(self, label='', width=-1, converter=converters.eval_converter(), **kwargs):
_form_base.__init__(self, converter=converter, **kwargs)
self._text_box = wx.TextCtrl(self._parent, size=wx.Size(width, -1), style=wx.TE_PROCESS_ENTER)
self._default_bg_colour = self._text_box.GetBackgroundColour()
self._text_box.Bind(wx.EVT_TEXT_ENTER, self._handle)
self._text_box.Bind(wx.EVT_TEXT, self._update_color)
self._add_widget(self._text_box, label)
def _update_color(self, *args):
if self._text_box.GetValue() == self[INT_KEY]:
self._text_box.SetBackgroundColour(self._default_bg_colour)
else: self._text_box.SetBackgroundColour('#EEDDDD')
def _handle(self, event): self[INT_KEY] = self._text_box.GetValue()
def _update(self, value): self._text_box.SetValue(value); self._update_color()
########################################################################
# Slider Form
# Linear Slider
# Logarithmic Slider
########################################################################
class slider(_slider_base):
"""
A generic linear slider.
@param parent the parent widget
@param sizer add this widget to sizer if provided (optional)
@param proportion the proportion when added to the sizer (default=0)
@param flag the flag argument when added to the sizer (default=wx.EXPAND)
@param ps the pubsub object (optional)
@param key the pubsub key (optional)
@param value the default value (optional)
@param label title label for this widget (optional)
@param length the length of the slider in px (optional)
@param style wx.SL_HORIZONTAL or wx.SL_VERTICAL (default=horizontal)
@param minimum the minimum value
@param maximum the maximum value
@param num_steps the number of slider steps (or specify step_size)
@param step_size the step between slider jumps (or specify num_steps)
@param cast a cast function, int, or float (default=float)
"""
def __init__(self, minimum=-100, maximum=100, num_steps=100, step_size=None, cast=float, **kwargs):
assert step_size or num_steps
if step_size is not None: num_steps = (maximum - minimum)/step_size
converter = converters.slider_converter(minimum=minimum, maximum=maximum, num_steps=num_steps, cast=cast)
_slider_base.__init__(self, converter=converter, num_steps=num_steps, **kwargs)
class log_slider(_slider_base):
"""
A generic logarithmic slider.
The sliders min and max values are base**min_exp and base**max_exp.
@param parent the parent widget
@param sizer add this widget to sizer if provided (optional)
@param proportion the proportion when added to the sizer (default=0)
@param flag the flag argument when added to the sizer (default=wx.EXPAND)
@param ps the pubsub object (optional)
@param key the pubsub key (optional)
@param value the default value (optional)
@param label title label for this widget (optional)
@param length the length of the slider in px (optional)
@param style wx.SL_HORIZONTAL or wx.SL_VERTICAL (default=horizontal)
@param min_exp the minimum exponent
@param max_exp the maximum exponent
@param base the exponent base in base**exp
@param num_steps the number of slider steps (or specify step_size)
@param step_size the exponent step size (or specify num_steps)
"""
def __init__(self, min_exp=0, max_exp=1, base=10, num_steps=100, step_size=None, **kwargs):
assert step_size or num_steps
if step_size is not None: num_steps = (max_exp - min_exp)/step_size
converter = converters.log_slider_converter(min_exp=min_exp, max_exp=max_exp, num_steps=num_steps, base=base)
_slider_base.__init__(self, converter=converter, num_steps=num_steps, **kwargs)
########################################################################
# Gauge Form
########################################################################
class gauge(_form_base):
"""
A gauge bar.
The gauge displays floating point values between the minimum and maximum.
@param parent the parent widget
@param sizer add this widget to sizer if provided (optional)
@param proportion the proportion when added to the sizer (default=0)
@param flag the flag argument when added to the sizer (default=wx.EXPAND)
@param ps the pubsub object (optional)
@param key the pubsub key (optional)
@param value the default value (optional)
@param label title label for this widget (optional)
@param length the length of the slider in px (optional)
@param style wx.GA_HORIZONTAL or wx.GA_VERTICAL (default=horizontal)
@param minimum the minimum value
@param maximum the maximum value
@param num_steps the number of slider steps (or specify step_size)
@param step_size the step between slider jumps (or specify num_steps)
"""
def __init__(self, label='', length=-1, minimum=-100, maximum=100, num_steps=100, step_size=None, style=wx.GA_HORIZONTAL, **kwargs):
assert step_size or num_steps
if step_size is not None: num_steps = (maximum - minimum)/step_size
converter = converters.slider_converter(minimum=minimum, maximum=maximum, num_steps=num_steps, cast=float)
_form_base.__init__(self, converter=converter, **kwargs)
if style & wx.SL_HORIZONTAL: gauge_size = wx.Size(length, -1)
elif style & wx.SL_VERTICAL: gauge_size = wx.Size(-1, length)
else: raise NotImplementedError
self._gauge = wx.Gauge(self._parent, range=num_steps, size=gauge_size, style=style)
self._add_widget(self._gauge, label, flag=wx.EXPAND)
def _update(self, value): self._gauge.SetValue(value)
########################################################################
# Check Box Form
########################################################################
class check_box(_form_base):
"""
Create a check box form.
@param parent the parent widget
@param sizer add this widget to sizer if provided (optional)
@param proportion the proportion when added to the sizer (default=0)
@param flag the flag argument when added to the sizer (default=wx.EXPAND)
@param ps the pubsub object (optional)
@param key the pubsub key (optional)
@param value the default value (optional)
@param true the value for form when checked (default=True)
@param false the value for form when unchecked (default=False)
@param label title label for this widget (optional)
"""
def __init__(self, label='', true=True, false=False, **kwargs):
_form_base.__init__(self, converter=converters.bool_converter(true=true, false=false), **kwargs)
self._check_box = wx.CheckBox(self._parent, style=wx.CHK_2STATE, label=label)
self._check_box.Bind(wx.EVT_CHECKBOX, self._handle)
self._add_widget(self._check_box)
def _handle(self, event): self[INT_KEY] = self._check_box.IsChecked()
def _update(self, checked): self._check_box.SetValue(checked)
########################################################################
# Drop Down Chooser Form
########################################################################
class drop_down(_chooser_base):
"""
Create a drop down menu form.
@param parent the parent widget
@param sizer add this widget to sizer if provided (optional)
@param proportion the proportion when added to the sizer (default=0)
@param flag the flag argument when added to the sizer (default=wx.EXPAND)
@param ps the pubsub object (optional)
@param key the pubsub key (optional)
@param value the default value (optional)
@param choices list of possible values
@param labels list of labels for each choice (default=choices)
@param label title label for this widget (optional)
@param width the form width in px (optional)
"""
def __init__(self, label='', width=-1, **kwargs):
_chooser_base.__init__(self, **kwargs)
self._drop_down = wx.Choice(self._parent, choices=self._labels, size=wx.Size(width, -1))
self._drop_down.Bind(wx.EVT_CHOICE, self._handle)
self._add_widget(self._drop_down, label, widget_prop=0, label_prop=1)
def _handle(self, event): self[INT_KEY] = self._drop_down.GetSelection()
def _update(self, i): self._drop_down.SetSelection(i)
########################################################################
# Button Chooser Form
# Circularly move through the choices with each click.
# Can be a single-click button with one choice.
# Can be a 2-state button with two choices.
########################################################################
class button(_chooser_base):
"""
Create a multi-state button.
@param parent the parent widget
@param sizer add this widget to sizer if provided (optional)
@param proportion the proportion when added to the sizer (default=0)
@param flag the flag argument when added to the sizer (default=wx.EXPAND)
@param ps the pubsub object (optional)
@param key the pubsub key (optional)
@param value the default value (optional)
@param choices list of possible values
@param labels list of labels for each choice (default=choices)
@param width the width of the button in pixels (optional)
@param style style arguments (optional)
@param label title label for this widget (optional)
"""
def __init__(self, label='', style=0, width=-1, **kwargs):
_chooser_base.__init__(self, **kwargs)
self._button = wx.Button(self._parent, size=wx.Size(width, -1), style=style)
self._button.Bind(wx.EVT_BUTTON, self._handle)
self._add_widget(self._button, label, widget_prop=((not style&wx.BU_EXACTFIT) and 1 or 0))
def _handle(self, event): self[INT_KEY] = (self[INT_KEY] + 1)%len(self._choices) #circularly increment index
def _update(self, i): self._button.SetLabel(self._labels[i]); self.Layout()
class toggle_button(button):
"""
Create a dual-state button.
This button will alternate between True and False when clicked.
@param parent the parent widget
@param sizer add this widget to sizer if provided (optional)
@param proportion the proportion when added to the sizer (default=0)
@param flag the flag argument when added to the sizer (default=wx.EXPAND)
@param ps the pubsub object (optional)
@param key the pubsub key (optional)
@param value the default value (optional)
@param width the width of the button in pixels (optional)
@param style style arguments (optional)
@param true_label the button's label in the true state
@param false_label the button's label in the false state
"""
def __init__(self, true_label='On (click to stop)', false_label='Off (click to start)', **kwargs):
button.__init__(self, choices=[True, False], labels=[true_label, false_label], **kwargs)
class single_button(toggle_button):
"""
Create a single state button.
This button will callback() when clicked.
For use when state holding is not important.
@param parent the parent widget
@param sizer add this widget to sizer if provided (optional)
@param proportion the proportion when added to the sizer (default=0)
@param flag the flag argument when added to the sizer (default=wx.EXPAND)
@param ps the pubsub object (optional)
@param key the pubsub key (optional)
@param value the default value (optional)
@param width the width of the button in pixels (optional)
@param style style arguments (optional)
@param label the button's label
"""
def __init__(self, label='click for callback', **kwargs):
toggle_button.__init__(self, true_label=label, false_label=label, value=True, **kwargs)
########################################################################
# Radio Buttons Chooser Form
########################################################################
class radio_buttons(_chooser_base):
"""
Create a radio button form.
@param parent the parent widget
@param sizer add this widget to sizer if provided (optional)
@param proportion the proportion when added to the sizer (default=0)
@param flag the flag argument when added to the sizer (default=wx.EXPAND)
@param ps the pubsub object (optional)
@param key the pubsub key (optional)
@param value the default value (optional)
@param choices list of possible values
@param labels list of labels for each choice (default=choices)
@param major_dimension the number of rows/cols (default=auto)
@param label title label for this widget (optional)
@param style useful style args: wx.RA_HORIZONTAL, wx.RA_VERTICAL, wx.NO_BORDER (default=wx.RA_HORIZONTAL)
"""
def __init__(self, style=wx.RA_HORIZONTAL, label='', major_dimension=0, **kwargs):
_chooser_base.__init__(self, **kwargs)
#create radio buttons
self._radio_buttons = wx.RadioBox(self._parent, choices=self._labels, style=style, label=label, majorDimension=major_dimension)
self._radio_buttons.Bind(wx.EVT_RADIOBOX, self._handle)
self._add_widget(self._radio_buttons)
def _handle(self, event): self[INT_KEY] = self._radio_buttons.GetSelection()
def _update(self, i): self._radio_buttons.SetSelection(i)
########################################################################
# Notebook Chooser Form
# The notebook pages/tabs are for selecting between choices.
# A page must be added to the notebook for each choice.
########################################################################
class notebook(_chooser_base):
def __init__(self, pages, notebook, **kwargs):
_chooser_base.__init__(self, **kwargs)
assert len(pages) == len(self._choices)
self._notebook = notebook
self._notebook.Bind(wx.EVT_NOTEBOOK_PAGE_CHANGED, self._handle)
#add pages, setting the label on each tab
for i, page in enumerate(pages):
self._notebook.AddPage(page, self._labels[i])
self._add_widget(self._notebook)
def _handle(self, event): self[INT_KEY] = self._notebook.GetSelection()
def _update(self, i): self._notebook.SetSelection(i)
# ----------------------------------------------------------------
# Stand-alone test application
# ----------------------------------------------------------------
import wx
from gnuradio.wxgui import gui
class app_gui (object):
def __init__(self, frame, panel, vbox, top_block, options, args):
def callback(v): print v
radio_buttons(
sizer=vbox,
parent=panel,
choices=[2, 4, 8, 16],
labels=['two', 'four', 'eight', 'sixteen'],
value=4,
style=wx.RA_HORIZONTAL,
label='test radio long string',
callback=callback,
#major_dimension = 2,
)
radio_buttons(
sizer=vbox,
parent=panel,
choices=[2, 4, 8, 16],
labels=['two', 'four', 'eight', 'sixteen'],
value=4,
style=wx.RA_VERTICAL,
label='test radio long string',
callback=callback,
#major_dimension = 2,
)
radio_buttons(
sizer=vbox,
parent=panel,
choices=[2, 4, 8, 16],
labels=['two', 'four', 'eight', 'sixteen'],
value=4,
style=wx.RA_VERTICAL | wx.NO_BORDER,
callback=callback,
#major_dimension = 2,
)
button(
sizer=vbox,
parent=panel,
choices=[2, 4, 8, 16],
labels=['two', 'four', 'eight', 'sixteen'],
value=2,
label='button value',
callback=callback,
#width=100,
)
drop_down(
sizer=vbox,
parent=panel,
choices=[2, 4, 8, 16],
value=2,
label='Choose One',
callback=callback,
)
check_box(
sizer=vbox,
parent=panel,
value=False,
label='check me',
callback=callback,
)
text_box(
sizer=vbox,
parent=panel,
value=3,
label='text box',
callback=callback,
width=200,
)
static_text(
sizer=vbox,
parent=panel,
value='bob',
label='static text',
width=-1,
bold=True,
)
slider(
sizer=vbox,
parent=panel,
value=12,
label='slider',
callback=callback,
)
log_slider(
sizer=vbox,
parent=panel,
value=12,
label='slider',
callback=callback,
)
slider(
sizer=vbox,
parent=panel,
value=12,
label='slider',
callback=callback,
style=wx.SL_VERTICAL,
length=30,
)
toggle_button(
sizer=vbox,
parent=panel,
value=True,
label='toggle it',
callback=callback,
)
single_button(
sizer=vbox,
parent=panel,
label='sig test',
callback=callback,
)
if __name__ == "__main__":
try:
# Create the GUI application
app = gui.app(
gui=app_gui, # User interface class
title="Test Forms", # Top window title
)
# And run it
app.MainLoop()
except RuntimeError, e:
print e
sys.exit(1)
|
brett-lempereur/pgnet-mappy
|
refs/heads/master
|
mappy.py
|
2
|
"""
Script to extract location information from photograph metadata
and export it into a CSV file.
"""
import csv
import glob
import operator
import os
import sys
import pyexiv2
# Constants for the EXIF keys we will extract.
LATITUDE = "Exif.GPSInfo.GPSLatitude"
LATITUDE_REF = "Exif.GPSInfo.GPSLatitudeRef"
LONGITUDE = "Exif.GPSInfo.GPSLongitude"
LONGITUDE_REF = "Exif.GPSInfo.GPSLongitudeRef"
MAKE = "Exif.Image.Make"
MODEL = "Exif.Image.Model"
TIMESTAMP = "Exif.Image.DateTime"
# Extract command-line arguments.
source = sys.argv[1]
target = sys.argv[2]
# Verify that the source path exists.
if not os.path.isdir(source):
print "Source does not exist."
sys.exit(1)
# Display the source and target.
print "Source:", source
print "Target:", target
# List all JPEG files in the source.
pattern = os.path.join(source, "*.jpg")
filenames = glob.glob(pattern)
# Display the list of filenames.
print "Filenames:", filenames
# Extract the photograph metadata into a table.
table = []
for photograph in filenames:
# Read the metadata from the photograph.
metadata = pyexiv2.ImageMetadata(photograph)
metadata.read()
# Extract the location in degrees, minutes, and seconds.
lat_deg, lat_min, lat_sec = metadata[LATITUDE].value
lon_deg, lon_min, lon_sec = metadata[LONGITUDE].value
# Convert the degrees, minutes, and seconds to decimal.
latitude = (lat_deg + (lat_min / 60.0) + (lat_sec / 3600.0))
longitude = (lon_deg + (lon_min / 60.0) + (lon_sec / 3600.0))
if metadata[LATITUDE_REF].value != "N":
latitude *= -1
if metadata[LONGITUDE_REF].value != "E":
longitude *= -1
# Extract descriptive information from the metadata.
make = metadata[MAKE].value
model = metadata[MODEL].value
timestamp = metadata[TIMESTAMP].value
# Extract the name of the photograph from its full path.
name = os.path.basename(photograph)
# Add the information to the table.
table.append((name, timestamp, make, model, latitude, longitude))
# Display the table of photograph metadata.
print "Table:", table
# Sort the table by timestamp.
table.sort(key=operator.itemgetter(1))
# Display the table of photograph metadata.
print "Sorted table:", table
# Open the output file for writing.
with open(target, "wb") as handle:
writer = csv.writer(handle)
writer.writerow(["Name", "Time", "Make", "Model", "Latitude", "Longitude"])
writer.writerows(table)
# Let the user know that we've finished.
print "Mappy is finished!"
|
allison-group/indigo-bondorder
|
refs/heads/master
|
archive/src/indigox/ball.py
|
1
|
from copy import deepcopy
from indigox.config import BALL_DATA_FILE, INFINITY, MAX_SOLUTIONS
from indigox.misc import BondOrderAssignment, graph_to_dist_graph, node_energy
try:
import BALLCore as BALL
BALL_AVAILABLE = True
BALL_ELEMENTS = dict(
H=BALL.PTE['H'], He=BALL.PTE['HE'], Li=BALL.PTE['LI'],
Be=BALL.PTE['BE'], B=BALL.PTE['B'], C=BALL.PTE['C'],
N=BALL.PTE['N'], O=BALL.PTE['O'], F=BALL.PTE['F'],
Ne=BALL.PTE['NE'], Na=BALL.PTE['NA'], Mg=BALL.PTE['MG'],
Al=BALL.PTE['AL'], Si=BALL.PTE['SI'], P=BALL.PTE['P'],
S=BALL.PTE['S'], Cl=BALL.PTE['CL'], Ar=BALL.PTE['AR'],
K=BALL.PTE['K'], Ca=BALL.PTE['CA'], Sc=BALL.PTE['SC'],
Ti=BALL.PTE['TI'], V=BALL.PTE['V'], Cr=BALL.PTE['CR'],
Mn=BALL.PTE['MN'], Fe=BALL.PTE['FE'], Co=BALL.PTE['CO'],
Ni=BALL.PTE['NI'], Cu=BALL.PTE['CU'], Zn=BALL.PTE['ZN'],
Ga=BALL.PTE['GA'], Ge=BALL.PTE['GE'], As=BALL.PTE['AS'],
Se=BALL.PTE['SE'], Br=BALL.PTE['BR'], Kr=BALL.PTE['KR'],
Rb=BALL.PTE['RB'], Sr=BALL.PTE['SR'], Y=BALL.PTE['Y'],
Zr=BALL.PTE['ZR'], Nb=BALL.PTE['NB'], Mo=BALL.PTE['MO'],
Tc=BALL.PTE['TC'], Ru=BALL.PTE['RU'], Rh=BALL.PTE['RH'],
Pd=BALL.PTE['PD'], Ag=BALL.PTE['AG'], Cd=BALL.PTE['CD'],
In=BALL.PTE['IN'], Sn=BALL.PTE['SN'], Sb=BALL.PTE['SB'],
Te=BALL.PTE['TE'], I=BALL.PTE['I'], Xe=BALL.PTE['XE'],
Cs=BALL.PTE['CS'], Ba=BALL.PTE['BA'], La=BALL.PTE['LA'],
Ce=BALL.PTE['CE'], Pr=BALL.PTE['PR'], Nd=BALL.PTE['ND'],
Pm=BALL.PTE['PM'], Sm=BALL.PTE['SM'], Eu=BALL.PTE['EU'],
Gd=BALL.PTE['GD'], Tb=BALL.PTE['TB'], Dy=BALL.PTE['DY'],
Ho=BALL.PTE['HO'], Er=BALL.PTE['ER'], Tm=BALL.PTE['TM'],
Yb=BALL.PTE['YB'], Lu=BALL.PTE['LU'], Hf=BALL.PTE['HF'],
Ta=BALL.PTE['TA'], W=BALL.PTE['W'], Re=BALL.PTE['RE'],
Os=BALL.PTE['OS'], Ir=BALL.PTE['IR'], Pt=BALL.PTE['PT'],
Au=BALL.PTE['AU'], Hg=BALL.PTE['HG'], Tl=BALL.PTE['TL'],
Pb=BALL.PTE['PB'], Bi=BALL.PTE['BI'], At=BALL.PTE['AT'],
Rn=BALL.PTE['RN'], Fr=BALL.PTE['FR'], Ra=BALL.PTE['RA'],
Ac=BALL.PTE['AC'], Th=BALL.PTE['TH'], Pa=BALL.PTE['PA'],
U=BALL.PTE['U'], Np=BALL.PTE['NP'], Pu=BALL.PTE['PU'],
Po=BALL.PTE['PO'], Am=BALL.PTE['AM'], Cm=BALL.PTE['CM'],
Bk=BALL.PTE['BK'], Cf=BALL.PTE['CF'], Es=BALL.PTE['ES'],
Fm=BALL.PTE['FM'], Md=BALL.PTE['MD'], No=BALL.PTE['NO'],
Lr=BALL.PTE['LR'], Rf=BALL.PTE['RF'], Db=BALL.PTE['DB'],
Sg=BALL.PTE['SG'], Bh=BALL.PTE['BH'], Hs=BALL.PTE['HS'],
Mt=BALL.PTE['MT'],)
# setup the bond order processor
bop = BALL.AssignBondOrderProcessor()
# alias' for long name
opts = BALL.AssignBondOrderProcessor.Option
algo = BALL.AssignBondOrderProcessor.Algorithm
bop.options.setBool(opts.KEKULIZE_RINGS, True)
bop.options.setBool(opts.OVERWRITE_SINGLE_BOND_ORDERS, True)
bop.options.setBool(opts.OVERWRITE_DOUBLE_BOND_ORDERS, True)
bop.options.setBool(opts.OVERWRITE_TRIPLE_BOND_ORDERS, True)
bop.options.set(opts.ALGORITHM, algo.A_STAR)
bop.options.setReal(opts.BOND_LENGTH_WEIGHTING, 0)
bop.options.setInteger(opts.MAX_NUMBER_OF_SOLUTIONS, MAX_SOLUTIONS)
bop.options.setBool(opts.COMPUTE_ALSO_NON_OPTIMAL_SOLUTIONS, False)
bop.options.setBool(opts.ADD_HYDROGENS, False)
bop.options.set(opts.INIFile, str(BALL_DATA_FILE))
except ImportError:
BALL_AVAILABLE = False
class BallOpt(BondOrderAssignment):
def __init__(self, G):
self.init_G = G
def initialise(self):
self.G = graph_to_dist_graph(self.init_G)
self.system = BALL.System()
self.mol = BALL.Molecule()
self.atoms = {}
self.bonds = []
for a, d in self.init_G.nodes(True):
ball_e = BALL_ELEMENTS[d['element']]
atom = BALL.Atom()
atom.setName(str(a))
atom.setElement(ball_e)
self.atoms[a] = atom
for a, b, d in self.init_G.edges(data=True):
bond = self.atoms[a].createBond(self.atoms[b])
bond.setOrder(1)
self.bonds.append(bond)
for atom in self.atoms.values():
self.mol.insert(atom)
self.system.insert(self.mol)
def run(self):
if not BALL_AVAILABLE:
self.log.warning('BALL method is unavailable as BALLCore could not '
'be loaded.')
for x in self.init_G:
self.init_G.node[x]['formal_charge'] = 0
for y in self.init_G[x]:
self.init_G[x][y]['order'] = 1
return self.init_G, INFINITY
else:
self.log.warning("BALL method selected. Formal charges will not be "
"optimised.")
best_ene = INFINITY * INFINITY
best_g = None
self.initialise()
self.system.apply(bop)
for i in range(bop.getNumberOfComputedSolutions()):
bop.apply(i)
for atom in BALL.atoms(self.system):
a = int(str(atom.getName()))
fc = int(atom.getFormalCharge())
self.G.node[(a,)]['fc'] = fc
for bond in BALL.bonds(self.system):
a = int(str(bond.getFirstAtom().getName()))
b = int(str(bond.getSecondAtom().getName()))
bo = int(bond.getOrder())
if a > b:
a, b = b, a
self.G.node[(a, b)]['e-'] = bo * 2
i_ene = round(sum(node_energy(self.G, n) for n in self.G),5)
if i_ene < best_ene:
best_ene = i_ene
best_g = self.assignment_to_graph()
return best_g, best_ene
def assignment_to_graph(self):
G = deepcopy(self.init_G)
for v in self.G:
if len(v) == 1:
G.node[v[0]]['formal_charge'] = 0
if len(v) == 2:
G[v[0]][v[1]]['order'] = self.G.node[v]['e-'] // 2
return G
|
ruibarreira/linuxtrail
|
refs/heads/master
|
usr/lib/python2.7/dist-packages/pygments/lexers/modeling.py
|
71
|
# -*- coding: utf-8 -*-
"""
pygments.lexers.modeling
~~~~~~~~~~~~~~~~~~~~~~~~
Lexers for modeling languages.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, bygroups, using, default
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation
from pygments.lexers.html import HtmlLexer
from pygments.lexers import _stan_builtins
__all__ = ['ModelicaLexer', 'BugsLexer', 'JagsLexer', 'StanLexer']
class ModelicaLexer(RegexLexer):
"""
For `Modelica <http://www.modelica.org/>`_ source code.
.. versionadded:: 1.1
"""
name = 'Modelica'
aliases = ['modelica']
filenames = ['*.mo']
mimetypes = ['text/x-modelica']
flags = re.DOTALL | re.MULTILINE
_name = r"(?:'(?:[^\\']|\\.)+'|[a-zA-Z_]\w*)"
tokens = {
'whitespace': [
(u'[\\s\ufeff]+', Text),
(r'//[^\n]*\n?', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline)
],
'root': [
include('whitespace'),
(r'"', String.Double, 'string'),
(r'[()\[\]{},;]+', Punctuation),
(r'\.?[*^/+-]|\.|<>|[<>:=]=?', Operator),
(r'\d+(\.?\d*[eE][-+]?\d+|\.\d*)', Number.Float),
(r'\d+', Number.Integer),
(r'(abs|acos|actualStream|array|asin|assert|AssertionLevel|atan|'
r'atan2|backSample|Boolean|cardinality|cat|ceil|change|Clock|'
r'Connections|cos|cosh|cross|delay|diagonal|div|edge|exp|'
r'ExternalObject|fill|floor|getInstanceName|hold|homotopy|'
r'identity|inStream|integer|Integer|interval|inverse|isPresent|'
r'linspace|log|log10|matrix|max|min|mod|ndims|noClock|noEvent|'
r'ones|outerProduct|pre|previous|product|Real|reinit|rem|rooted|'
r'sample|scalar|semiLinear|shiftSample|sign|sin|sinh|size|skew|'
r'smooth|spatialDistribution|sqrt|StateSelect|String|subSample|'
r'sum|superSample|symmetric|tan|tanh|terminal|terminate|time|'
r'transpose|vector|zeros)\b', Name.Builtin),
(r'(algorithm|annotation|break|connect|constant|constrainedby|der|'
r'discrete|each|else|elseif|elsewhen|encapsulated|enumeration|'
r'equation|exit|expandable|extends|external|final|flow|for|if|'
r'import|impure|in|initial|inner|input|loop|nondiscrete|outer|'
r'output|parameter|partial|protected|public|pure|redeclare|'
r'replaceable|return|stream|then|when|while)\b',
Keyword.Reserved),
(r'(and|not|or)\b', Operator.Word),
(r'(block|class|connector|end|function|model|operator|package|'
r'record|type)\b', Keyword.Reserved, 'class'),
(r'(false|true)\b', Keyword.Constant),
(r'within\b', Keyword.Reserved, 'package-prefix'),
(_name, Name)
],
'class': [
include('whitespace'),
(r'(function|record)\b', Keyword.Reserved),
(r'(if|for|when|while)\b', Keyword.Reserved, '#pop'),
(_name, Name.Class, '#pop'),
default('#pop')
],
'package-prefix': [
include('whitespace'),
(_name, Name.Namespace, '#pop'),
default('#pop')
],
'string': [
(r'"', String.Double, '#pop'),
(r'\\[\'"?\\abfnrtv]', String.Escape),
(r'(?i)<\s*html\s*>([^\\"]|\\.)+?(<\s*/\s*html\s*>|(?="))',
using(HtmlLexer)),
(r'<|\\?[^"\\<]+', String.Double)
]
}
class BugsLexer(RegexLexer):
"""
Pygments Lexer for `OpenBugs <http://www.openbugs.net/>`_ and WinBugs
models.
.. versionadded:: 1.6
"""
name = 'BUGS'
aliases = ['bugs', 'winbugs', 'openbugs']
filenames = ['*.bug']
_FUNCTIONS = (
# Scalar functions
'abs', 'arccos', 'arccosh', 'arcsin', 'arcsinh', 'arctan', 'arctanh',
'cloglog', 'cos', 'cosh', 'cumulative', 'cut', 'density', 'deviance',
'equals', 'expr', 'gammap', 'ilogit', 'icloglog', 'integral', 'log',
'logfact', 'loggam', 'logit', 'max', 'min', 'phi', 'post.p.value',
'pow', 'prior.p.value', 'probit', 'replicate.post', 'replicate.prior',
'round', 'sin', 'sinh', 'solution', 'sqrt', 'step', 'tan', 'tanh',
'trunc',
# Vector functions
'inprod', 'interp.lin', 'inverse', 'logdet', 'mean', 'eigen.vals',
'ode', 'prod', 'p.valueM', 'rank', 'ranked', 'replicate.postM',
'sd', 'sort', 'sum',
# Special
'D', 'I', 'F', 'T', 'C')
""" OpenBUGS built-in functions
From http://www.openbugs.info/Manuals/ModelSpecification.html#ContentsAII
This also includes
- T, C, I : Truncation and censoring.
``T`` and ``C`` are in OpenBUGS. ``I`` in WinBUGS.
- D : ODE
- F : Functional http://www.openbugs.info/Examples/Functionals.html
"""
_DISTRIBUTIONS = ('dbern', 'dbin', 'dcat', 'dnegbin', 'dpois',
'dhyper', 'dbeta', 'dchisqr', 'ddexp', 'dexp',
'dflat', 'dgamma', 'dgev', 'df', 'dggamma', 'dgpar',
'dloglik', 'dlnorm', 'dlogis', 'dnorm', 'dpar',
'dt', 'dunif', 'dweib', 'dmulti', 'ddirch', 'dmnorm',
'dmt', 'dwish')
""" OpenBUGS built-in distributions
Functions from
http://www.openbugs.info/Manuals/ModelSpecification.html#ContentsAI
"""
tokens = {
'whitespace': [
(r"\s+", Text),
],
'comments': [
# Comments
(r'#.*$', Comment.Single),
],
'root': [
# Comments
include('comments'),
include('whitespace'),
# Block start
(r'(model)(\s+)(\{)',
bygroups(Keyword.Namespace, Text, Punctuation)),
# Reserved Words
(r'(for|in)(?![\w.])', Keyword.Reserved),
# Built-in Functions
(r'(%s)(?=\s*\()'
% r'|'.join(_FUNCTIONS + _DISTRIBUTIONS),
Name.Builtin),
# Regular variable names
(r'[A-Za-z][\w.]*', Name),
# Number Literals
(r'[-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?', Number),
# Punctuation
(r'\[|\]|\(|\)|:|,|;', Punctuation),
# Assignment operators
# SLexer makes these tokens Operators.
(r'<-|~', Operator),
# Infix and prefix operators
(r'\+|-|\*|/', Operator),
# Block
(r'[{}]', Punctuation),
]
}
def analyse_text(text):
if re.search(r"^\s*model\s*{", text, re.M):
return 0.7
else:
return 0.0
class JagsLexer(RegexLexer):
"""
Pygments Lexer for JAGS.
.. versionadded:: 1.6
"""
name = 'JAGS'
aliases = ['jags']
filenames = ['*.jag', '*.bug']
# JAGS
_FUNCTIONS = (
'abs', 'arccos', 'arccosh', 'arcsin', 'arcsinh', 'arctan', 'arctanh',
'cos', 'cosh', 'cloglog',
'equals', 'exp', 'icloglog', 'ifelse', 'ilogit', 'log', 'logfact',
'loggam', 'logit', 'phi', 'pow', 'probit', 'round', 'sin', 'sinh',
'sqrt', 'step', 'tan', 'tanh', 'trunc', 'inprod', 'interp.lin',
'logdet', 'max', 'mean', 'min', 'prod', 'sum', 'sd', 'inverse',
'rank', 'sort', 't', 'acos', 'acosh', 'asin', 'asinh', 'atan',
# Truncation/Censoring (should I include)
'T', 'I')
# Distributions with density, probability and quartile functions
_DISTRIBUTIONS = tuple('[dpq]%s' % x for x in
('bern', 'beta', 'dchiqsqr', 'ddexp', 'dexp',
'df', 'gamma', 'gen.gamma', 'logis', 'lnorm',
'negbin', 'nchisqr', 'norm', 'par', 'pois', 'weib'))
# Other distributions without density and probability
_OTHER_DISTRIBUTIONS = (
'dt', 'dunif', 'dbetabin', 'dbern', 'dbin', 'dcat', 'dhyper',
'ddirch', 'dmnorm', 'dwish', 'dmt', 'dmulti', 'dbinom', 'dchisq',
'dnbinom', 'dweibull', 'ddirich')
tokens = {
'whitespace': [
(r"\s+", Text),
],
'names': [
# Regular variable names
(r'[a-zA-Z][\w.]*\b', Name),
],
'comments': [
# do not use stateful comments
(r'(?s)/\*.*?\*/', Comment.Multiline),
# Comments
(r'#.*$', Comment.Single),
],
'root': [
# Comments
include('comments'),
include('whitespace'),
# Block start
(r'(model|data)(\s+)(\{)',
bygroups(Keyword.Namespace, Text, Punctuation)),
(r'var(?![\w.])', Keyword.Declaration),
# Reserved Words
(r'(for|in)(?![\w.])', Keyword.Reserved),
# Builtins
# Need to use lookahead because . is a valid char
(r'(%s)(?=\s*\()' % r'|'.join(_FUNCTIONS
+ _DISTRIBUTIONS
+ _OTHER_DISTRIBUTIONS),
Name.Builtin),
# Names
include('names'),
# Number Literals
(r'[-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?', Number),
(r'\[|\]|\(|\)|:|,|;', Punctuation),
# Assignment operators
(r'<-|~', Operator),
# # JAGS includes many more than OpenBUGS
(r'\+|-|\*|\/|\|\|[&]{2}|[<>=]=?|\^|%.*?%', Operator),
(r'[{}]', Punctuation),
]
}
def analyse_text(text):
if re.search(r'^\s*model\s*\{', text, re.M):
if re.search(r'^\s*data\s*\{', text, re.M):
return 0.9
elif re.search(r'^\s*var', text, re.M):
return 0.9
else:
return 0.3
else:
return 0
class StanLexer(RegexLexer):
"""Pygments Lexer for Stan models.
The Stan modeling language is specified in the *Stan Modeling Language
User's Guide and Reference Manual, v2.4.0*,
`pdf <https://github.com/stan-dev/stan/releases/download/v2.4.0/stan-reference-2.4.0.pdf>`__.
.. versionadded:: 1.6
"""
name = 'Stan'
aliases = ['stan']
filenames = ['*.stan']
tokens = {
'whitespace': [
(r"\s+", Text),
],
'comments': [
(r'(?s)/\*.*?\*/', Comment.Multiline),
# Comments
(r'(//|#).*$', Comment.Single),
],
'root': [
# Stan is more restrictive on strings than this regex
(r'"[^"]*"', String),
# Comments
include('comments'),
# block start
include('whitespace'),
# Block start
(r'(%s)(\s*)(\{)' %
r'|'.join(('functions', 'data', r'transformed\s+?data',
'parameters', r'transformed\s+parameters',
'model', r'generated\s+quantities')),
bygroups(Keyword.Namespace, Text, Punctuation)),
# Reserved Words
(r'(%s)\b' % r'|'.join(_stan_builtins.KEYWORDS), Keyword),
# Truncation
(r'T(?=\s*\[)', Keyword),
# Data types
(r'(%s)\b' % r'|'.join(_stan_builtins.TYPES), Keyword.Type),
# Punctuation
(r"[;:,\[\]()]", Punctuation),
# Builtin
(r'(%s)(?=\s*\()'
% r'|'.join(_stan_builtins.FUNCTIONS
+ _stan_builtins.DISTRIBUTIONS),
Name.Builtin),
# Special names ending in __, like lp__
(r'[A-Za-z]\w*__\b', Name.Builtin.Pseudo),
(r'(%s)\b' % r'|'.join(_stan_builtins.RESERVED), Keyword.Reserved),
# Regular variable names
(r'[A-Za-z]\w*\b', Name),
# Real Literals
(r'-?[0-9]+(\.[0-9]+)?[eE]-?[0-9]+', Number.Float),
(r'-?[0-9]*\.[0-9]*', Number.Float),
# Integer Literals
(r'-?[0-9]+', Number.Integer),
# Assignment operators
# SLexer makes these tokens Operators.
(r'<-|~', Operator),
# Infix, prefix and postfix operators (and = )
(r"\+|-|\.?\*|\.?/|\\|'|\^|==?|!=?|<=?|>=?|\|\||&&", Operator),
# Block delimiters
(r'[{}]', Punctuation),
]
}
def analyse_text(text):
if re.search(r'^\s*parameters\s*\{', text, re.M):
return 1.0
else:
return 0.0
|
wenderen/servo
|
refs/heads/master
|
tests/wpt/harness/wptrunner/testrunner.py
|
59
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import unicode_literals
import multiprocessing
import sys
import threading
import traceback
from Queue import Empty
from multiprocessing import Process, current_process, Queue
from mozlog import structuredlog
# Special value used as a sentinal in various commands
Stop = object()
class MessageLogger(object):
def __init__(self, message_func):
self.send_message = message_func
def _log_data(self, action, **kwargs):
self.send_message("log", action, kwargs)
def process_output(self, process, data, command):
self._log_data("process_output", process=process, data=data, command=command)
def _log_func(level_name):
def log(self, message):
self._log_data(level_name.lower(), message=message)
log.__doc__ = """Log a message with level %s
:param message: The string message to log
""" % level_name
log.__name__ = str(level_name).lower()
return log
# Create all the methods on StructuredLog for debug levels
for level_name in structuredlog.log_levels:
setattr(MessageLogger, level_name.lower(), _log_func(level_name))
class TestRunner(object):
def __init__(self, test_queue, command_queue, result_queue, executor):
"""Class implementing the main loop for running tests.
This class delegates the job of actually running a test to the executor
that is passed in.
:param test_queue: subprocess.Queue containing the tests to run
:param command_queue: subprocess.Queue used to send commands to the
process
:param result_queue: subprocess.Queue used to send results to the
parent TestManager process
:param executor: TestExecutor object that will actually run a test.
"""
self.test_queue = test_queue
self.command_queue = command_queue
self.result_queue = result_queue
self.executor = executor
self.name = current_process().name
self.logger = MessageLogger(self.send_message)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.teardown()
def setup(self):
self.executor.setup(self)
def teardown(self):
self.executor.teardown()
self.send_message("runner_teardown")
self.result_queue = None
self.command_queue = None
self.browser = None
def run(self):
"""Main loop accepting commands over the pipe and triggering
the associated methods"""
self.setup()
commands = {"run_test": self.run_test,
"stop": self.stop,
"wait": self.wait}
while True:
command, args = self.command_queue.get()
try:
rv = commands[command](*args)
except Exception:
self.send_message("error",
"Error running command %s with arguments %r:\n%s" %
(command, args, traceback.format_exc()))
else:
if rv is Stop:
break
def stop(self):
return Stop
def run_test(self):
if not self.executor.is_alive():
self.send_message("restart_runner")
return
try:
# Need to block here just to allow for contention with other processes
test = self.test_queue.get(block=True, timeout=1)
except Empty:
# If we are running tests in groups (e.g. by-dir) then this queue might be
# empty but there could be other test queues. restart_runner won't actually
# start the runner if there aren't any more tests to run
self.send_message("restart_runner")
return
else:
self.send_message("test_start", test)
try:
return self.executor.run_test(test)
except Exception:
self.logger.critical(traceback.format_exc())
raise
def wait(self):
self.executor.protocol.wait()
self.send_message("after_test_ended", True)
def send_message(self, command, *args):
self.result_queue.put((command, args))
def start_runner(test_queue, runner_command_queue, runner_result_queue,
executor_cls, executor_kwargs,
executor_browser_cls, executor_browser_kwargs,
stop_flag):
"""Launch a TestRunner in a new process"""
try:
browser = executor_browser_cls(**executor_browser_kwargs)
executor = executor_cls(browser, **executor_kwargs)
with TestRunner(test_queue, runner_command_queue, runner_result_queue, executor) as runner:
try:
runner.run()
except KeyboardInterrupt:
stop_flag.set()
except Exception:
runner_result_queue.put(("log", ("critical", {"message": traceback.format_exc()})))
print >> sys.stderr, traceback.format_exc()
stop_flag.set()
finally:
runner_command_queue = None
runner_result_queue = None
manager_count = 0
def next_manager_number():
global manager_count
local = manager_count = manager_count + 1
return local
class TestRunnerManager(threading.Thread):
init_lock = threading.Lock()
def __init__(self, suite_name, test_queue, test_source_cls, browser_cls, browser_kwargs,
executor_cls, executor_kwargs, stop_flag, pause_after_test=False,
pause_on_unexpected=False, restart_on_unexpected=True, debug_info=None):
"""Thread that owns a single TestRunner process and any processes required
by the TestRunner (e.g. the Firefox binary).
TestRunnerManagers are responsible for launching the browser process and the
runner process, and for logging the test progress. The actual test running
is done by the TestRunner. In particular they:
* Start the binary of the program under test
* Start the TestRunner
* Tell the TestRunner to start a test, if any
* Log that the test started
* Log the test results
* Take any remedial action required e.g. restart crashed or hung
processes
"""
self.suite_name = suite_name
self.test_queue = test_queue
self.test_source_cls = test_source_cls
self.browser_cls = browser_cls
self.browser_kwargs = browser_kwargs
self.executor_cls = executor_cls
self.executor_kwargs = executor_kwargs
self.test_source = None
self.browser = None
self.browser_pid = None
self.browser_started = False
# Flags used to shut down this thread if we get a sigint
self.parent_stop_flag = stop_flag
self.child_stop_flag = multiprocessing.Event()
self.pause_after_test = pause_after_test
self.pause_on_unexpected = pause_on_unexpected
self.restart_on_unexpected = restart_on_unexpected
self.debug_info = debug_info
self.manager_number = next_manager_number()
self.command_queue = Queue()
self.remote_queue = Queue()
self.test_runner_proc = None
threading.Thread.__init__(self, name="Thread-TestrunnerManager-%i" % self.manager_number)
# This is started in the actual new thread
self.logger = None
# The test that is currently running
self.test = None
self.unexpected_count = 0
# This may not really be what we want
self.daemon = True
self.init_fail_count = 0
self.max_init_fails = 5
self.init_timer = None
self.restart_count = 0
self.max_restarts = 5
def run(self):
"""Main loop for the TestManager.
TestManagers generally receive commands from their
TestRunner updating them on the status of a test. They
may also have a stop flag set by the main thread indicating
that the manager should shut down the next time the event loop
spins."""
self.logger = structuredlog.StructuredLogger(self.suite_name)
with self.browser_cls(self.logger, **self.browser_kwargs) as browser, self.test_source_cls(self.test_queue) as test_source:
self.browser = browser
self.test_source = test_source
try:
if self.init() is Stop:
return
while True:
commands = {"init_succeeded": self.init_succeeded,
"init_failed": self.init_failed,
"test_start": self.test_start,
"test_ended": self.test_ended,
"after_test_ended": self.after_test_ended,
"restart_runner": self.restart_runner,
"runner_teardown": self.runner_teardown,
"log": self.log,
"error": self.error}
try:
command, data = self.command_queue.get(True, 1)
except IOError:
if not self.should_stop():
self.logger.error("Got IOError from poll")
self.restart_count += 1
if self.restart_runner() is Stop:
break
except Empty:
command = None
if self.should_stop():
self.logger.debug("A flag was set; stopping")
break
if command is not None:
self.restart_count = 0
if commands[command](*data) is Stop:
break
else:
if (self.debug_info and self.debug_info.interactive and
self.browser_started and not browser.is_alive()):
self.logger.debug("Debugger exited")
break
if not self.test_runner_proc.is_alive():
if not self.command_queue.empty():
# We got a new message so process that
continue
# If we got to here the runner presumably shut down
# unexpectedly
self.logger.info("Test runner process shut down")
if self.test is not None:
# This could happen if the test runner crashed for some other
# reason
# Need to consider the unlikely case where one test causes the
# runner process to repeatedly die
self.logger.critical("Last test did not complete")
break
self.logger.warning(
"More tests found, but runner process died, restarting")
self.restart_count += 1
if self.restart_runner() is Stop:
break
finally:
self.logger.debug("TestRunnerManager main loop terminating, starting cleanup")
self.stop_runner()
self.teardown()
self.logger.debug("TestRunnerManager main loop terminated")
def should_stop(self):
return self.child_stop_flag.is_set() or self.parent_stop_flag.is_set()
def init(self):
"""Launch the browser that is being tested,
and the TestRunner process that will run the tests."""
# It seems that this lock is helpful to prevent some race that otherwise
# sometimes stops the spawned processes initalising correctly, and
# leaves this thread hung
if self.init_timer is not None:
self.init_timer.cancel()
self.logger.debug("Init called, starting browser and runner")
def init_failed():
# This is called from a seperate thread, so we send a message to the
# main loop so we get back onto the manager thread
self.logger.debug("init_failed called from timer")
if self.command_queue:
self.command_queue.put(("init_failed", ()))
else:
self.logger.debug("Setting child stop flag in init_failed")
self.child_stop_flag.set()
with self.init_lock:
# Guard against problems initialising the browser or the browser
# remote control method
if self.debug_info is None:
self.init_timer = threading.Timer(self.browser.init_timeout, init_failed)
test_queue = self.test_source.get_queue()
if test_queue is None:
self.logger.info("No more tests")
return Stop
try:
if self.init_timer is not None:
self.init_timer.start()
self.browser.start()
self.browser_pid = self.browser.pid()
self.start_test_runner(test_queue)
except:
self.logger.warning("Failure during init %s" % traceback.format_exc())
if self.init_timer is not None:
self.init_timer.cancel()
self.logger.error(traceback.format_exc())
succeeded = False
else:
succeeded = True
self.browser_started = True
# This has to happen after the lock is released
if not succeeded:
self.init_failed()
def init_succeeded(self):
"""Callback when we have started the browser, started the remote
control connection, and we are ready to start testing."""
self.logger.debug("Init succeeded")
if self.init_timer is not None:
self.init_timer.cancel()
self.init_fail_count = 0
self.start_next_test()
def init_failed(self):
"""Callback when starting the browser or the remote control connect
fails."""
self.init_fail_count += 1
self.logger.warning("Init failed %i" % self.init_fail_count)
if self.init_timer is not None:
self.init_timer.cancel()
if self.init_fail_count < self.max_init_fails:
self.restart_runner()
else:
self.logger.critical("Test runner failed to initialise correctly; shutting down")
return Stop
def start_test_runner(self, test_queue):
# Note that we need to be careful to start the browser before the
# test runner to ensure that any state set when the browser is started
# can be passed in to the test runner.
assert self.command_queue is not None
assert self.remote_queue is not None
self.logger.info("Starting runner")
executor_browser_cls, executor_browser_kwargs = self.browser.executor_browser()
args = (test_queue,
self.remote_queue,
self.command_queue,
self.executor_cls,
self.executor_kwargs,
executor_browser_cls,
executor_browser_kwargs,
self.child_stop_flag)
self.test_runner_proc = Process(target=start_runner,
args=args,
name="Thread-TestRunner-%i" % self.manager_number)
self.test_runner_proc.start()
self.logger.debug("Test runner started")
def send_message(self, command, *args):
self.remote_queue.put((command, args))
def cleanup(self):
if self.init_timer is not None:
self.init_timer.cancel()
self.logger.debug("TestManager cleanup")
while True:
try:
self.logger.warning(" ".join(map(repr, self.command_queue.get_nowait())))
except Empty:
break
while True:
try:
self.logger.warning(" ".join(map(repr, self.remote_queue.get_nowait())))
except Empty:
break
def teardown(self):
self.logger.debug("teardown in testrunnermanager")
self.test_runner_proc = None
self.command_queue.close()
self.remote_queue.close()
self.command_queue = None
self.remote_queue = None
def ensure_runner_stopped(self):
if self.test_runner_proc is None:
return
self.test_runner_proc.join(10)
if self.test_runner_proc.is_alive():
# This might leak a file handle from the queue
self.logger.warning("Forcibly terminating runner process")
self.test_runner_proc.terminate()
self.test_runner_proc.join(10)
else:
self.logger.debug("Testrunner exited with code %i" % self.test_runner_proc.exitcode)
def runner_teardown(self):
self.ensure_runner_stopped()
return Stop
def stop_runner(self):
"""Stop the TestRunner and the Firefox binary."""
self.logger.debug("Stopping runner")
if self.test_runner_proc is None:
return
try:
self.browser.stop()
self.browser_started = False
if self.test_runner_proc.is_alive():
self.send_message("stop")
self.ensure_runner_stopped()
finally:
self.cleanup()
def start_next_test(self):
self.send_message("run_test")
def test_start(self, test):
self.test = test
self.logger.test_start(test.id)
def test_ended(self, test, results):
"""Handle the end of a test.
Output the result of each subtest, and the result of the overall
harness to the logs.
"""
assert test == self.test
# Write the result of each subtest
file_result, test_results = results
subtest_unexpected = False
for result in test_results:
if test.disabled(result.name):
continue
expected = test.expected(result.name)
is_unexpected = expected != result.status
if is_unexpected:
self.unexpected_count += 1
self.logger.debug("Unexpected count in this thread %i" % self.unexpected_count)
subtest_unexpected = True
self.logger.test_status(test.id,
result.name,
result.status,
message=result.message,
expected=expected,
stack=result.stack)
# TODO: consider changing result if there is a crash dump file
# Write the result of the test harness
expected = test.expected()
status = file_result.status if file_result.status != "EXTERNAL-TIMEOUT" else "TIMEOUT"
is_unexpected = expected != status
if is_unexpected:
self.unexpected_count += 1
self.logger.debug("Unexpected count in this thread %i" % self.unexpected_count)
if status == "CRASH":
self.browser.log_crash(process=self.browser_pid, test=test.id)
self.logger.test_end(test.id,
status,
message=file_result.message,
expected=expected,
extra=file_result.extra)
self.test = None
restart_before_next = (test.restart_after or
file_result.status in ("CRASH", "EXTERNAL-TIMEOUT") or
((subtest_unexpected or is_unexpected)
and self.restart_on_unexpected))
if (self.pause_after_test or
(self.pause_on_unexpected and (subtest_unexpected or is_unexpected))):
self.logger.info("Pausing until the browser exits")
self.send_message("wait")
else:
self.after_test_ended(restart_before_next)
def after_test_ended(self, restart_before_next):
# Handle starting the next test, with a runner restart if required
if restart_before_next:
return self.restart_runner()
else:
return self.start_next_test()
def restart_runner(self):
"""Stop and restart the TestRunner"""
if self.restart_count >= self.max_restarts:
return Stop
self.stop_runner()
return self.init()
def log(self, action, kwargs):
getattr(self.logger, action)(**kwargs)
def error(self, message):
self.logger.error(message)
self.restart_runner()
class TestQueue(object):
def __init__(self, test_source_cls, test_type, tests, **kwargs):
self.queue = None
self.test_source_cls = test_source_cls
self.test_type = test_type
self.tests = tests
self.kwargs = kwargs
def __enter__(self):
if not self.tests[self.test_type]:
return None
self.queue = Queue()
has_tests = self.test_source_cls.queue_tests(self.queue,
self.test_type,
self.tests,
**self.kwargs)
# There is a race condition that means sometimes we continue
# before the tests have been written to the underlying pipe.
# Polling the pipe for data here avoids that
self.queue._reader.poll(10)
assert not self.queue.empty()
return self.queue
def __exit__(self, *args, **kwargs):
if self.queue is not None:
self.queue.close()
self.queue = None
class ManagerGroup(object):
def __init__(self, suite_name, size, test_source_cls, test_source_kwargs,
browser_cls, browser_kwargs,
executor_cls, executor_kwargs,
pause_after_test=False,
pause_on_unexpected=False,
restart_on_unexpected=True,
debug_info=None):
"""Main thread object that owns all the TestManager threads."""
self.suite_name = suite_name
self.size = size
self.test_source_cls = test_source_cls
self.test_source_kwargs = test_source_kwargs
self.browser_cls = browser_cls
self.browser_kwargs = browser_kwargs
self.executor_cls = executor_cls
self.executor_kwargs = executor_kwargs
self.pause_after_test = pause_after_test
self.pause_on_unexpected = pause_on_unexpected
self.restart_on_unexpected = restart_on_unexpected
self.debug_info = debug_info
self.pool = set()
# Event that is polled by threads so that they can gracefully exit in the face
# of sigint
self.stop_flag = threading.Event()
self.logger = structuredlog.StructuredLogger(suite_name)
self.test_queue = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.stop()
def run(self, test_type, tests):
"""Start all managers in the group"""
self.logger.debug("Using %i processes" % self.size)
self.test_queue = TestQueue(self.test_source_cls,
test_type,
tests,
**self.test_source_kwargs)
with self.test_queue as test_queue:
if test_queue is None:
self.logger.info("No %s tests to run" % test_type)
return
for _ in range(self.size):
manager = TestRunnerManager(self.suite_name,
test_queue,
self.test_source_cls,
self.browser_cls,
self.browser_kwargs,
self.executor_cls,
self.executor_kwargs,
self.stop_flag,
self.pause_after_test,
self.pause_on_unexpected,
self.restart_on_unexpected,
self.debug_info)
manager.start()
self.pool.add(manager)
self.wait()
def is_alive(self):
"""Boolean indicating whether any manager in the group is still alive"""
return any(manager.is_alive() for manager in self.pool)
def wait(self):
"""Wait for all the managers in the group to finish"""
for item in self.pool:
item.join()
def stop(self):
"""Set the stop flag so that all managers in the group stop as soon
as possible"""
self.stop_flag.set()
self.logger.debug("Stop flag set in ManagerGroup")
def unexpected_count(self):
return sum(item.unexpected_count for item in self.pool)
|
BhallaLab/moose
|
refs/heads/master
|
packaging_scripts/cmake_sanity_check.py
|
5
|
#!/usr/bin/env python
"""cmake_sanity_check.py: Check if Cmake files are ok.
Last modified: Sat Jan 18, 2014 05:01PM
"""
__author__ = "Dilawar Singh"
__copyright__ = "Copyright 2013, Dilawar Singh and NCBS Bangalore"
__credits__ = ["NCBS Bangalore"]
__license__ = "GNU GPL"
__version__ = "1.0.0"
__maintainer__ = "Dilawar Singh"
__email__ = "dilawars@ncbs.res.in"
__status__ = "Development"
import sys
import os
import re
from collections import defaultdict
makefiles = {}
cmakefiles = {}
makedirs = set()
cmakedirs = set()
def check(d):
searchMakefiles(d)
checkMissingCMake()
checkSrcs()
def checkMissingCMake():
if (makedirs - cmakedirs):
print("[Failed] Test 1")
print("Following directories have Makefile but not a CMakeFiles.txt file.")
print("%s" % "\t\n".join(makedirs - cmakedirs))
def searchMakefiles(d):
for d, subd, fs in os.walk(d):
if "CMakeLists.txt" in fs:
cmakedirs.add(d)
cmakefiles[d] = fs
if "Makefile" in fs:
makedirs.add(d)
makefiles[d] = fs
else: pass
def checkSrcs():
objPat = re.compile(r"\w+\.o")
srcPat = re.compile(r"\w+\.cpp")
srcs = []
csrcs = []
for d in makefiles:
with open(os.path.join(d, "Makefile"), "r") as f:
txt = f.read()
srcs = objPat.findall(txt)
try:
with open(os.path.join(d, "CMakeLists.txt"), "r") as f:
txt = f.read()
csrcs = srcPat.findall(txt)
except:
print("Dir {} does not have CMakeLists.txt".format(d))
csrcs = []
#print("[TEST 2] Checking if CMake is creating extra objects")
for csr in csrcs:
objName = csr.replace(".cpp", ".o")
if objName in srcs:
pass
else:
#print(" Failed: In dir {}, CMake is creating extra object {}".format(d, objName))
pass
print("[TEST 3] Checking if CMake is missing some objects")
for obj in srcs:
srcName = obj.replace(".o", ".cpp")
if srcName in csrcs: pass
else:
print(" Failed: In dir {}, CMake is missing object {}".format(d,
srcName))
def main():
dir = sys.argv[1]
check(dir)
if __name__ == '__main__':
main()
|
michaelgallacher/intellij-community
|
refs/heads/master
|
python/testData/codeInsight/controlflow/lambdadefaultparameter.py
|
83
|
{(lambda ii=ii: ii) for ii in range(5)}
|
CARocha/ciat_plataforma
|
refs/heads/master
|
monitoreo/indicador01/__init__.py
|
12133432
| |
d120/kifplan
|
refs/heads/master
|
oplan/management/__init__.py
|
12133432
| |
twisted/txaws
|
refs/heads/master
|
txaws/client/__init__.py
|
12133432
| |
Alegzander/blockchain-sport-loto-prototype-backend
|
refs/heads/master
|
frontend/utils/__init__.py
|
12133432
| |
xingwu1/autorest
|
refs/heads/master
|
ClientRuntimes/Python/msrest/msrest/paging.py
|
11
|
# --------------------------------------------------------------------------
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the ""Software""), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# --------------------------------------------------------------------------
import collections
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse
from .serialization import Deserializer
from .pipeline import ClientRawResponse
class Paged(collections.Iterable):
"""A container for paged REST responses.
:param requests.Response response: server response object.
:param callable command: Function to retrieve the next page of items.
:param dict classes: A dictionary of class dependencies for
deserialization.
"""
_validation = {}
_attribute_map = {}
def __init__(self, command, classes, raw_headers=None):
self.next_link = ""
self.current_page = []
self._derserializer = Deserializer(classes)
self._get_next = command
self._response = None
self._raw_headers = raw_headers
def __iter__(self):
"""Iterate over response items in current page, automatically
retrieves next page.
"""
for i in self.current_page:
yield i
while self.next_link is not None:
for i in self.next():
yield i
@classmethod
def _get_subtype_map(cls):
"""Required for parity to Model object for deserialization."""
return {}
@property
def raw(self):
raw = ClientRawResponse(self.current_page, self._response)
if self._raw_headers:
raw.add_headers(self._raw_headers)
return raw
def _validate_url(self):
"""Validate next page URL."""
if self.next_link:
parsed = urlparse(self.next_link)
if not parsed.scheme or not parsed.netloc:
raise ValueError("Invalid URL: " + self.next_link)
def get(self, url):
"""Get arbitrary page.
:param str url: URL to arbitrary page results.
"""
self.next_link = url
return self.next()
def reset(self):
"""Reset iterator to first page."""
self.next_link = ""
self.current_page = []
def next(self):
"""Get next page."""
if self.next_link is None:
raise GeneratorExit("End of paging")
self._validate_url()
self._response = self._get_next(self.next_link)
self._derserializer(self, self._response)
return self.current_page
|
vighneshbirodkar/scikit-image
|
refs/heads/master
|
skimage/draw/setup.py
|
1
|
#!/usr/bin/env python
import os
from skimage._build import cython
base_path = os.path.abspath(os.path.dirname(__file__))
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration, get_numpy_include_dirs
config = Configuration('draw', parent_package, top_path)
config.add_data_dir('tests')
cython(['_draw.pyx'], working_path=base_path)
config.add_extension('_draw', sources=['_draw.c'],
include_dirs=[get_numpy_include_dirs(), '../_shared'])
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(maintainer='scikit-image developers',
author='scikit-image developers',
maintainer_email='scikit-image@python.org',
description='Drawing',
url='https://github.com/scikit-image/scikit-image',
license='SciPy License (BSD Style)',
**(configuration(top_path='').todict())
)
|
ramnes/qtile
|
refs/heads/master
|
libqtile/widget/base.py
|
2
|
# Copyright (c) 2008-2010 Aldo Cortesi
# Copyright (c) 2011 Florian Mounier
# Copyright (c) 2011 Kenji_Takahashi
# Copyright (c) 2011 Paul Colomiets
# Copyright (c) 2012 roger
# Copyright (c) 2012 Craig Barnes
# Copyright (c) 2012-2015 Tycho Andersen
# Copyright (c) 2013 dequis
# Copyright (c) 2013 David R. Andersen
# Copyright (c) 2013 Tao Sauvage
# Copyright (c) 2014-2015 Sean Vig
# Copyright (c) 2014 Justin Bronder
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import asyncio
import subprocess
from typing import Any, List, Tuple
from libqtile import bar, configurable, confreader
from libqtile.command.base import CommandError, CommandObject, ItemT
from libqtile.log_utils import logger
# Each widget class must define which bar orientation(s) it supports by setting
# these bits in an 'orientations' class attribute. Simply having the attribute
# inherited by superclasses is discouraged, because if a superclass that was
# only supporting one orientation, adds support for the other, its subclasses
# will have to be adapted too, in general. ORIENTATION_NONE is only added for
# completeness' sake.
# +------------------------+--------------------+--------------------+
# | Widget bits | Horizontal bar | Vertical bar |
# +========================+====================+====================+
# | ORIENTATION_NONE | ConfigError raised | ConfigError raised |
# +------------------------+--------------------+--------------------+
# | ORIENTATION_HORIZONTAL | Widget displayed | ConfigError raised |
# | | horizontally | |
# +------------------------+--------------------+--------------------+
# | ORIENTATION_VERTICAL | ConfigError raised | Widget displayed |
# | | | vertically |
# +------------------------+--------------------+--------------------+
# | ORIENTATION_BOTH | Widget displayed | Widget displayed |
# | | horizontally | vertically |
# +------------------------+--------------------+--------------------+
class _Orientations(int):
def __new__(cls, value, doc):
return super().__new__(cls, value)
def __init__(self, value, doc):
self.doc = doc
def __str__(self):
return self.doc
def __repr__(self):
return self.doc
ORIENTATION_NONE = _Orientations(0, 'none')
ORIENTATION_HORIZONTAL = _Orientations(1, 'horizontal only')
ORIENTATION_VERTICAL = _Orientations(2, 'vertical only')
ORIENTATION_BOTH = _Orientations(3, 'horizontal and vertical')
class _Widget(CommandObject, configurable.Configurable):
"""Base Widget class
If length is set to the special value `bar.STRETCH`, the bar itself will
set the length to the maximum remaining space, after all other widgets have
been configured.
In horizontal bars, 'length' corresponds to the width of the widget; in
vertical bars, it corresponds to the widget's height.
The offsetx and offsety attributes are set by the Bar after all widgets
have been configured.
Callback functions can be assigned to button presses by passing a dict to the
'callbacks' kwarg. No arguments are passed to the callback function so, if
you need access to the qtile object, it needs to be imported into your code.
For example:
.. code-block:: python
from libqtile import qtile
def open_calendar():
qtile.cmd_spawn('gsimplecal next_month')
clock = widget.Clock(mouse_callbacks={'Button1': open_calendar})
When the clock widget receives a click with button 1, the ``open_calendar`` function
will be executed. Callbacks can be assigned to other buttons by adding more entries
to the passed dictionary.
"""
orientations = ORIENTATION_BOTH
offsetx: int = 0
offsety: int = 0
defaults = [
("background", None, "Widget background color"),
("mouse_callbacks", {}, "Dict of mouse button press callback functions."),
] # type: List[Tuple[str, Any, str]]
def __init__(self, length, **config):
"""
length: bar.STRETCH, bar.CALCULATED, or a specified length.
"""
CommandObject.__init__(self)
self.name = self.__class__.__name__.lower()
if "name" in config:
self.name = config["name"]
configurable.Configurable.__init__(self, **config)
self.add_defaults(_Widget.defaults)
if length in (bar.CALCULATED, bar.STRETCH):
self.length_type = length
self.length = 0
else:
assert isinstance(length, int)
self.length_type = bar.STATIC
self.length = length
self.configured = False
@property
def length(self):
if self.length_type == bar.CALCULATED:
return int(self.calculate_length())
return self._length
@length.setter
def length(self, value):
self._length = value
@property
def width(self):
if self.bar.horizontal:
return self.length
return self.bar.size
@property
def height(self):
if self.bar.horizontal:
return self.bar.size
return self.length
@property
def offset(self):
if self.bar.horizontal:
return self.offsetx
return self.offsety
# Do not start the name with "test", or nosetests will try to test it
# directly (prepend an underscore instead)
def _test_orientation_compatibility(self, horizontal):
if horizontal:
if not self.orientations & ORIENTATION_HORIZONTAL:
raise confreader.ConfigError(
self.__class__.__name__ +
" is not compatible with the orientation of the bar."
)
elif not self.orientations & ORIENTATION_VERTICAL:
raise confreader.ConfigError(
self.__class__.__name__ +
" is not compatible with the orientation of the bar."
)
def timer_setup(self):
""" This is called exactly once, after the widget has been configured
and timers are available to be set up. """
pass
def _configure(self, qtile, bar):
self.qtile = qtile
self.bar = bar
self.drawer = bar.window.create_drawer(self.bar.width, self.bar.height)
if not self.configured:
self.qtile.call_soon(self.timer_setup)
self.qtile.call_soon(asyncio.create_task, self._config_async())
async def _config_async(self):
"""
This is called once when the main eventloop has started. this
happens after _configure has been run.
Widgets that need to use asyncio coroutines after this point may
wish to initialise the relevant code (e.g. connections to dbus
using dbus_next) here.
"""
pass
def finalize(self):
if hasattr(self, 'layout') and self.layout:
self.layout.finalize()
self.drawer.finalize()
def clear(self):
self.drawer.set_source_rgb(self.bar.background)
self.drawer.fillrect(self.offsetx, self.offsety, self.width,
self.height)
def info(self):
return dict(
name=self.name,
offset=self.offset,
length=self.length,
width=self.width,
height=self.height,
)
def add_callbacks(self, defaults):
"""Add default callbacks with a lower priority than user-specified callbacks."""
defaults.update(self.mouse_callbacks)
self.mouse_callbacks = defaults
def button_press(self, x, y, button):
name = 'Button{0}'.format(button)
if name in self.mouse_callbacks:
self.mouse_callbacks[name]()
def button_release(self, x, y, button):
pass
def get(self, q, name):
"""
Utility function for quick retrieval of a widget by name.
"""
w = q.widgets_map.get(name)
if not w:
raise CommandError("No such widget: %s" % name)
return w
def _items(self, name: str) -> ItemT:
if name == "bar":
return True, []
return None
def _select(self, name, sel):
if name == "bar":
return self.bar
def cmd_info(self):
"""
Info for this object.
"""
return self.info()
def draw(self):
"""
Method that draws the widget. You may call this explicitly to
redraw the widget, but only if the length of the widget hasn't
changed. If it has, you must call bar.draw instead.
"""
raise NotImplementedError
def calculate_length(self):
"""
Must be implemented if the widget can take CALCULATED for length.
It must return the width of the widget if it's installed in a
horizontal bar; it must return the height of the widget if it's
installed in a vertical bar. Usually you will test the orientation
of the bar with 'self.bar.horizontal'.
"""
raise NotImplementedError
def timeout_add(self, seconds, method, method_args=()):
"""
This method calls either ``.call_later`` with given arguments.
"""
return self.qtile.call_later(seconds, self._wrapper, method,
*method_args)
def call_process(self, command, **kwargs):
"""
This method uses `subprocess.check_output` to run the given command
and return the string from stdout, which is decoded when using
Python 3.
"""
output = subprocess.check_output(command, **kwargs)
output = output.decode()
return output
def _wrapper(self, method, *method_args):
try:
method(*method_args)
except: # noqa: E722
logger.exception('got exception from widget timer')
def create_mirror(self):
return Mirror(self)
def mouse_enter(self, x, y):
pass
def mouse_leave(self, x, y):
pass
UNSPECIFIED = bar.Obj("UNSPECIFIED")
class _TextBox(_Widget):
"""
Base class for widgets that are just boxes containing text.
"""
orientations = ORIENTATION_HORIZONTAL
defaults = [
("font", "sans", "Default font"),
("fontsize", None, "Font size. Calculated if None."),
("padding", None, "Padding. Calculated if None."),
("foreground", "ffffff", "Foreground colour"),
(
"fontshadow",
None,
"font shadow color, default is None(no shadow)"
),
("markup", True, "Whether or not to use pango markup"),
("fmt", "{}", "How to format the text"),
('max_chars', 0, 'Maximum number of characters to display in widget.'),
] # type: List[Tuple[str, Any, str]]
def __init__(self, text=" ", width=bar.CALCULATED, **config):
self.layout = None
_Widget.__init__(self, width, **config)
self._text = text
self.add_defaults(_TextBox.defaults)
@property
def text(self):
return self._text
@text.setter
def text(self, value):
if len(value) > self.max_chars > 0:
value = value[:self.max_chars] + "…"
self._text = value
if self.layout:
self.layout.text = self.formatted_text
@property
def formatted_text(self):
return self.fmt.format(self._text)
@property
def foreground(self):
return self._foreground
@foreground.setter
def foreground(self, fg):
self._foreground = fg
if self.layout:
self.layout.colour = fg
@property
def font(self):
return self._font
@font.setter
def font(self, value):
self._font = value
if self.layout:
self.layout.font = value
@property
def fontshadow(self):
return self._fontshadow
@fontshadow.setter
def fontshadow(self, value):
self._fontshadow = value
if self.layout:
self.layout.font_shadow = value
@property
def actual_padding(self):
if self.padding is None:
return self.fontsize / 2
else:
return self.padding
def _configure(self, qtile, bar):
_Widget._configure(self, qtile, bar)
if self.fontsize is None:
self.fontsize = self.bar.height - self.bar.height / 5
self.layout = self.drawer.textlayout(
self.formatted_text,
self.foreground,
self.font,
self.fontsize,
self.fontshadow,
markup=self.markup,
)
def calculate_length(self):
if self.text:
return min(
self.layout.width,
self.bar.width
) + self.actual_padding * 2
else:
return 0
def can_draw(self):
can_draw = self.layout is not None \
and not self.layout.finalized() \
and self.offsetx is not None # if the bar hasn't placed us yet
return can_draw
def draw(self):
if not self.can_draw():
return
self.drawer.clear(self.background or self.bar.background)
self.layout.draw(
self.actual_padding or 0,
int(self.bar.height / 2.0 - self.layout.height / 2.0) + 1
)
self.drawer.draw(offsetx=self.offsetx, width=self.width)
def cmd_set_font(self, font=UNSPECIFIED, fontsize=UNSPECIFIED,
fontshadow=UNSPECIFIED):
"""
Change the font used by this widget. If font is None, the current
font is used.
"""
if font is not UNSPECIFIED:
self.font = font
if fontsize is not UNSPECIFIED:
self.fontsize = fontsize
if fontshadow is not UNSPECIFIED:
self.fontshadow = fontshadow
self.bar.draw()
def info(self):
d = _Widget.info(self)
d['foreground'] = self.foreground
d['text'] = self.formatted_text
return d
def update(self, text):
if self.text == text:
return
if text is None:
text = ""
old_width = self.layout.width
self.text = text
# If our width hasn't changed, we just draw ourselves. Otherwise,
# we draw the whole bar.
if self.layout.width == old_width:
self.draw()
else:
self.bar.draw()
class InLoopPollText(_TextBox):
""" A common interface for polling some 'fast' information, munging it, and
rendering the result in a text box. You probably want to use
ThreadPoolText instead.
('fast' here means that this runs /in/ the event loop, so don't block! If
you want to run something nontrivial, use ThreadedPollWidget.) """
defaults = [
("update_interval", 600, "Update interval in seconds, if none, the "
"widget updates whenever the event loop is idle."),
] # type: List[Tuple[str, Any, str]]
def __init__(self, default_text="N/A", width=bar.CALCULATED, **config):
_TextBox.__init__(self, default_text, width, **config)
self.add_defaults(InLoopPollText.defaults)
def timer_setup(self):
update_interval = self.tick()
# If self.update_interval is defined and .tick() returns None, re-call
# after self.update_interval
if update_interval is None and self.update_interval is not None:
self.timeout_add(self.update_interval, self.timer_setup)
# We can change the update interval by returning something from .tick()
elif update_interval:
self.timeout_add(update_interval, self.timer_setup)
# If update_interval is False, we won't re-call
def _configure(self, qtile, bar):
should_tick = self.configured
_TextBox._configure(self, qtile, bar)
# Update when we are being re-configured.
if should_tick:
self.tick()
def button_press(self, x, y, button):
self.tick()
_TextBox.button_press(self, x, y, button)
def poll(self):
return 'N/A'
def tick(self):
text = self.poll()
self.update(text)
class ThreadPoolText(_TextBox):
""" A common interface for wrapping blocking events which when triggered
will update a textbox.
The poll method is intended to wrap a blocking function which may take
quite a while to return anything. It will be executed as a future and
should return updated text when completed. It may also return None to
disable any further updates.
param: text - Initial text to display.
"""
defaults = [
("update_interval", 600, "Update interval in seconds, if none, the "
"widget updates whenever it's done'."),
] # type: List[Tuple[str, Any, str]]
def __init__(self, text, **config):
super().__init__(text, width=bar.CALCULATED, **config)
self.add_defaults(ThreadPoolText.defaults)
def timer_setup(self):
def on_done(future):
try:
result = future.result()
except Exception:
result = None
logger.exception('poll() raised exceptions, not rescheduling')
if result is not None:
try:
self.update(result)
if self.update_interval is not None:
self.timeout_add(self.update_interval, self.timer_setup)
else:
self.timer_setup()
except Exception:
logger.exception('Failed to reschedule.')
else:
logger.warning('poll() returned None, not rescheduling')
future = self.qtile.run_in_executor(self.poll)
future.add_done_callback(on_done)
def poll(self):
pass
# these two classes below look SUSPICIOUSLY similar
class PaddingMixin(configurable.Configurable):
"""Mixin that provides padding(_x|_y|)
To use it, subclass and add this to __init__:
self.add_defaults(base.PaddingMixin.defaults)
"""
defaults = [
("padding", 3, "Padding inside the box"),
("padding_x", None, "X Padding. Overrides 'padding' if set"),
("padding_y", None, "Y Padding. Overrides 'padding' if set"),
] # type: List[Tuple[str, Any, str]]
padding_x = configurable.ExtraFallback('padding_x', 'padding')
padding_y = configurable.ExtraFallback('padding_y', 'padding')
class MarginMixin(configurable.Configurable):
"""Mixin that provides margin(_x|_y|)
To use it, subclass and add this to __init__:
self.add_defaults(base.MarginMixin.defaults)
"""
defaults = [
("margin", 3, "Margin inside the box"),
("margin_x", None, "X Margin. Overrides 'margin' if set"),
("margin_y", None, "Y Margin. Overrides 'margin' if set"),
] # type: List[Tuple[str, Any, str]]
margin_x = configurable.ExtraFallback('margin_x', 'margin')
margin_y = configurable.ExtraFallback('margin_y', 'margin')
class Mirror(_Widget):
"""
A widget for showing the same widget content in more than one place, for
instance, on bars across multiple screens.
You don't need to use it directly; instead, just instantiate your widget
once and hand it in to multiple bars. For instance::
cpu = widget.CPUGraph()
clock = widget.Clock()
screens = [
Screen(top=bar.Bar([widget.GroupBox(), cpu, clock])),
Screen(top=bar.Bar([widget.GroupBox(), cpu, clock])),
]
Widgets can be passed to more than one bar, so that there don't need to be
any duplicates executing the same code all the time, and they'll always be
visually identical.
This works for all widgets that use `drawers` (and nothing else) to display
their contents. Currently, this is all widgets except for `Systray`.
"""
def __init__(self, reflection):
_Widget.__init__(self, reflection.length)
reflection.draw = self.hook(reflection.draw)
self.reflects = reflection
self._length = 0
@property
def length(self):
return self.reflects.length
@length.setter
def length(self, value):
self._length = value
def hook(self, draw):
def _():
draw()
self.draw()
return _
def draw(self):
if self._length != self.reflects.length:
self._length = self.length
self.bar.draw()
else:
self.reflects.drawer.paint_to(self.drawer)
self.drawer.draw(offsetx=self.offset, width=self.width)
def button_press(self, x, y, button):
self.reflects.button_press(x, y, button)
|
springcoil/pymc3
|
refs/heads/master
|
pymc3/math.py
|
1
|
from __future__ import division
import sys
import theano.tensor as tt
# pylint: disable=unused-import
import theano
from theano.tensor import (
constant, flatten, zeros_like, ones_like, stack, concatenate, sum, prod,
lt, gt, le, ge, eq, neq, switch, clip, where, and_, or_, abs_, exp, log,
cos, sin, tan, cosh, sinh, tanh, sqr, sqrt, erf, erfc, erfinv, erfcinv, dot,
maximum, minimum, sgn, ceil, floor)
from theano.tensor.nlinalg import det, matrix_inverse, extract_diag, matrix_dot, trace
import theano.tensor.slinalg
import theano.sparse
from theano.tensor.nnet import sigmoid
from theano.gof import Op, Apply
import numpy as np
import scipy as sp
import scipy.sparse
from scipy.linalg import block_diag as scipy_block_diag
from pymc3.theanof import floatX, largest_common_dtype, ix_
# pylint: enable=unused-import
def tround(*args, **kwargs):
"""
Temporary function to silence round warning in Theano. Please remove
when the warning disappears.
"""
kwargs['mode'] = 'half_to_even'
return tt.round(*args, **kwargs)
def logsumexp(x, axis=None):
# Adapted from https://github.com/Theano/Theano/issues/1563
x_max = tt.max(x, axis=axis, keepdims=True)
return tt.log(tt.sum(tt.exp(x - x_max), axis=axis, keepdims=True)) + x_max
def logaddexp(a, b):
diff = b - a
return tt.switch(diff > 0,
b + tt.log1p(tt.exp(-diff)),
a + tt.log1p(tt.exp(diff)))
def invlogit(x, eps=sys.float_info.epsilon):
return (1. - 2. * eps) / (1. + tt.exp(-x)) + eps
def logit(p):
return tt.log(p / (floatX(1) - p))
def flatten_list(tensors):
return tt.concatenate([var.ravel() for var in tensors])
class LogDet(Op):
"""Compute the logarithm of the absolute determinant of a square
matrix M, log(abs(det(M))) on the CPU. Avoids det(M) overflow/
underflow.
Note
----
Once PR #3959 (https://github.com/Theano/Theano/pull/3959/) by harpone is merged,
this must be removed.
"""
def make_node(self, x):
x = theano.tensor.as_tensor_variable(x)
o = theano.tensor.scalar(dtype=x.dtype)
return Apply(self, [x], [o])
def perform(self, node, inputs, outputs, params=None):
try:
(x,) = inputs
(z,) = outputs
s = np.linalg.svd(x, compute_uv=False)
log_det = np.sum(np.log(np.abs(s)))
z[0] = np.asarray(log_det, dtype=x.dtype)
except Exception:
print('Failed to compute logdet of {}.'.format(x))
raise
def grad(self, inputs, g_outputs):
[gz] = g_outputs
[x] = inputs
return [gz * matrix_inverse(x).T]
def __str__(self):
return "LogDet"
logdet = LogDet()
def probit(p):
return -sqrt(2.) * erfcinv(2. * p)
def invprobit(x):
return .5 * erfc(-x / sqrt(2.))
def expand_packed_triangular(n, packed, lower=True, diagonal_only=False):
R"""Convert a packed triangular matrix into a two dimensional array.
Triangular matrices can be stored with better space efficiancy by
storing the non-zero values in a one-dimensional array. We number
the elements by row like this (for lower or upper triangular matrices)::
[[0 - - -] [[0 1 2 3]
[1 2 - -] [- 4 5 6]
[3 4 5 -] [- - 7 8]
[6 7 8 9]] [- - - 9]
Parameters
----------
n : int
The number of rows of the triangular matrix.
packed : theano.vector
The matrix in packed format.
lower : bool, default=True
If true, assume that the matrix is lower triangular.
diagonal_only : bool
If true, return only the diagonal of the matrix.
"""
if packed.ndim != 1:
raise ValueError('Packed triagular is not one dimensional.')
if not isinstance(n, int):
raise TypeError('n must be an integer')
if diagonal_only and lower:
diag_idxs = np.arange(1, n + 1).cumsum() - 1
return packed[diag_idxs]
elif diagonal_only and not lower:
diag_idxs = np.arange(2, n + 2)[::-1].cumsum() - n - 1
return packed[diag_idxs]
elif lower:
out = tt.zeros((n, n), dtype=theano.config.floatX)
idxs = np.tril_indices(n)
return tt.set_subtensor(out[idxs], packed)
elif not lower:
out = tt.zeros((n, n), dtype=theano.config.floatX)
idxs = np.triu_indices(n)
return tt.set_subtensor(out[idxs], packed)
class BatchedDiag(tt.Op):
"""
Fast BatchedDiag allocation
"""
__props__ = ()
def make_node(self, diag):
diag = tt.as_tensor_variable(diag)
if diag.type.ndim != 2:
raise TypeError('data argument must be a matrix', diag.type)
return tt.Apply(self, [diag], [tt.tensor3(dtype=diag.dtype)])
def perform(self, node, ins, outs, params=None):
(C,) = ins
(z,) = outs
bc = C.shape[0]
dim = C.shape[-1]
Cd = np.zeros((bc, dim, dim), C.dtype)
bidx = np.repeat(np.arange(bc), dim)
didx = np.tile(np.arange(dim), bc)
Cd[bidx, didx, didx] = C.flatten()
z[0] = Cd
def grad(self, inputs, gout):
(gz,) = gout
idx = tt.arange(gz.shape[-1])
return [gz[..., idx, idx]]
def infer_shape(self, nodes, shapes):
return [(shapes[0][0], ) + (shapes[0][1],) * 2]
def batched_diag(C):
C = tt.as_tensor(C)
dim = C.shape[-1]
if C.ndim == 2:
# diag -> matrices
return BatchedDiag()(C)
elif C.ndim == 3:
# matrices -> diag
idx = tt.arange(dim)
return C[..., idx, idx]
else:
raise ValueError('Input should be 2 or 3 dimensional')
class BlockDiagonalMatrix(Op):
__props__ = ('sparse', 'format')
def __init__(self, sparse=False, format='csr'):
if format not in ('csr', 'csc'):
raise ValueError("format must be one of: 'csr', 'csc', got {}".format(format))
self.sparse = sparse
self.format = format
def make_node(self, *matrices):
if not matrices:
raise ValueError('no matrices to allocate')
matrices = list(map(tt.as_tensor, matrices))
if any(mat.type.ndim != 2 for mat in matrices):
raise TypeError('all data arguments must be matrices')
if self.sparse:
out_type = theano.sparse.matrix(self.format, dtype=largest_common_dtype(matrices))
else:
out_type = theano.tensor.matrix(dtype=largest_common_dtype(matrices))
return tt.Apply(self, matrices, [out_type])
def perform(self, node, inputs, output_storage, params=None):
dtype = largest_common_dtype(inputs)
if self.sparse:
output_storage[0][0] = sp.sparse.block_diag(
inputs, self.format, dtype
)
else:
output_storage[0][0] = scipy_block_diag(*inputs).astype(dtype)
def grad(self, inputs, gout):
shapes = tt.stack([i.shape for i in inputs])
index_end = shapes.cumsum(0)
index_begin = index_end - shapes
slices = [ix_(tt.arange(index_begin[i, 0], index_end[i, 0]),
tt.arange(index_begin[i, 1], index_end[i, 1])
) for i in range(len(inputs))]
return [gout[0][slc] for slc in slices]
def infer_shape(self, nodes, shapes):
first, second = zip(*shapes)
return [(tt.add(*first), tt.add(*second))]
def block_diagonal(matrices, sparse=False, format='csr'):
"""See scipy.sparse.block_diag or
scipy.linalg.block_diag for reference
Parameters
----------
matrices : tensors
format : str (default 'csr')
must be one of: 'csr', 'csc'
sparse : bool (default False)
if True return sparse format
Returns
-------
matrix
"""
if len(matrices) == 1: # graph optimization
return matrices[0]
return BlockDiagonalMatrix(sparse=sparse, format=format)(*matrices)
|
flychensc/orange
|
refs/heads/master
|
backend/apps.py
|
16
|
from django.apps import AppConfig
class BackendConfig(AppConfig):
name = 'backend'
|
aaronzink/tensorflow-visual-inspection
|
refs/heads/master
|
models/object_detection/models/faster_rcnn_inception_resnet_v2_feature_extractor_test.py
|
21
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for models.faster_rcnn_inception_resnet_v2_feature_extractor."""
import tensorflow as tf
from object_detection.models import faster_rcnn_inception_resnet_v2_feature_extractor as frcnn_inc_res
class FasterRcnnInceptionResnetV2FeatureExtractorTest(tf.test.TestCase):
def _build_feature_extractor(self, first_stage_features_stride):
return frcnn_inc_res.FasterRCNNInceptionResnetV2FeatureExtractor(
is_training=False,
first_stage_features_stride=first_stage_features_stride,
reuse_weights=None,
weight_decay=0.0)
def test_extract_proposal_features_returns_expected_size(self):
feature_extractor = self._build_feature_extractor(
first_stage_features_stride=16)
preprocessed_inputs = tf.random_uniform(
[1, 299, 299, 3], maxval=255, dtype=tf.float32)
rpn_feature_map = feature_extractor.extract_proposal_features(
preprocessed_inputs, scope='TestScope')
features_shape = tf.shape(rpn_feature_map)
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
features_shape_out = sess.run(features_shape)
self.assertAllEqual(features_shape_out, [1, 19, 19, 1088])
def test_extract_proposal_features_stride_eight(self):
feature_extractor = self._build_feature_extractor(
first_stage_features_stride=8)
preprocessed_inputs = tf.random_uniform(
[1, 224, 224, 3], maxval=255, dtype=tf.float32)
rpn_feature_map = feature_extractor.extract_proposal_features(
preprocessed_inputs, scope='TestScope')
features_shape = tf.shape(rpn_feature_map)
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
features_shape_out = sess.run(features_shape)
self.assertAllEqual(features_shape_out, [1, 28, 28, 1088])
def test_extract_proposal_features_half_size_input(self):
feature_extractor = self._build_feature_extractor(
first_stage_features_stride=16)
preprocessed_inputs = tf.random_uniform(
[1, 112, 112, 3], maxval=255, dtype=tf.float32)
rpn_feature_map = feature_extractor.extract_proposal_features(
preprocessed_inputs, scope='TestScope')
features_shape = tf.shape(rpn_feature_map)
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
features_shape_out = sess.run(features_shape)
self.assertAllEqual(features_shape_out, [1, 7, 7, 1088])
def test_extract_proposal_features_dies_on_invalid_stride(self):
with self.assertRaises(ValueError):
self._build_feature_extractor(first_stage_features_stride=99)
def test_extract_proposal_features_dies_with_incorrect_rank_inputs(self):
feature_extractor = self._build_feature_extractor(
first_stage_features_stride=16)
preprocessed_inputs = tf.random_uniform(
[224, 224, 3], maxval=255, dtype=tf.float32)
with self.assertRaises(ValueError):
feature_extractor.extract_proposal_features(
preprocessed_inputs, scope='TestScope')
def test_extract_box_classifier_features_returns_expected_size(self):
feature_extractor = self._build_feature_extractor(
first_stage_features_stride=16)
proposal_feature_maps = tf.random_uniform(
[2, 17, 17, 1088], maxval=255, dtype=tf.float32)
proposal_classifier_features = (
feature_extractor.extract_box_classifier_features(
proposal_feature_maps, scope='TestScope'))
features_shape = tf.shape(proposal_classifier_features)
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
features_shape_out = sess.run(features_shape)
self.assertAllEqual(features_shape_out, [2, 8, 8, 1536])
if __name__ == '__main__':
tf.test.main()
|
ocwc/ocwc-members
|
refs/heads/master
|
members/elections/api.py
|
1
|
# -*- coding: utf-8 -*-
from rest_framework import generics
from .models import Candidate, Election
from .serializers import CandidatePublicSerializer
class ElectionCandidatesListAPIView(generics.ListAPIView):
model = Candidate
serializer_class = CandidatePublicSerializer
def get_queryset(self):
view_nominations_key = self.kwargs.pop('key')
return Election.objects.get(view_nominations_key=view_nominations_key).candidate_set.filter(
vetted=True).order_by('seat_type', 'order')
|
manipopopo/tensorflow
|
refs/heads/master
|
tensorflow/contrib/kernel_methods/python/losses.py
|
30
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of kernel-methods-related loss operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops.losses import losses
def sparse_multiclass_hinge_loss(
labels,
logits,
weights=1.0,
scope=None,
loss_collection=ops.GraphKeys.LOSSES,
reduction=losses.Reduction.SUM_BY_NONZERO_WEIGHTS):
"""Adds Ops for computing the multiclass hinge loss.
The implementation is based on the following paper:
On the Algorithmic Implementation of Multiclass Kernel-based Vector Machines
by Crammer and Singer.
link: http://jmlr.csail.mit.edu/papers/volume2/crammer01a/crammer01a.pdf
This is a generalization of standard (binary) hinge loss. For a given instance
with correct label c*, the loss is given by:
$$loss = max_{c != c*} logits_c - logits_{c*} + 1.$$
or equivalently
$$loss = max_c { logits_c - logits_{c*} + I_{c != c*} }$$
where \\(I_{c != c*} = 1\ \text{if}\ c != c*\\) and 0 otherwise.
Args:
labels: `Tensor` of shape [batch_size] or [batch_size, 1]. Corresponds to
the ground truth. Each entry must be an index in `[0, num_classes)`.
logits: `Tensor` of shape [batch_size, num_classes] corresponding to the
unscaled logits. Its dtype should be either `float32` or `float64`.
weights: Optional (python) scalar or `Tensor`. If a non-scalar `Tensor`, its
rank should be either 1 ([batch_size]) or 2 ([batch_size, 1]).
scope: The scope for the operations performed in computing the loss.
loss_collection: collection to which the loss will be added.
reduction: Type of reduction to apply to loss.
Returns:
Weighted loss float `Tensor`. If `reduction` is `NONE`, this has the same
shape as `labels`; otherwise, it is a scalar.
Raises:
ValueError: If `logits`, `labels` or `weights` have invalid or inconsistent
shapes.
ValueError: If `labels` tensor has invalid dtype.
"""
with ops.name_scope(scope, 'sparse_multiclass_hinge_loss', (logits,
labels)) as scope:
# Check logits Tensor has valid rank.
logits_rank = logits.get_shape().ndims
if logits_rank != 2:
raise ValueError(
'logits should have rank 2 ([batch_size, num_classes]). Given rank is'
' {}'.format(logits_rank))
logits_shape = array_ops.shape(logits)
batch_size, num_classes = logits_shape[0], logits_shape[1]
logits = math_ops.to_float(logits)
# Check labels have valid type.
if labels.dtype != dtypes.int32 and labels.dtype != dtypes.int64:
raise ValueError(
'Invalid dtype for labels: {}. Acceptable dtypes: int32 and int64'.
format(labels.dtype))
# Check labels and weights have valid ranks and are consistent.
labels_rank = labels.get_shape().ndims
if labels_rank not in [1, 2]:
raise ValueError(
'labels should have rank 1 ([batch_size]) or 2 ([batch_size, 1]). '
'Given rank is {}'.format(labels_rank))
with ops.control_dependencies([
check_ops.assert_less(labels, math_ops.cast(num_classes, labels.dtype))
]):
labels = array_ops.reshape(labels, shape=[-1])
weights = ops.convert_to_tensor(weights)
weights_rank = weights.get_shape().ndims
if weights_rank not in [0, 1, 2]:
raise ValueError(
'non-scalar weights should have rank 1 ([batch_size]) or 2 '
'([batch_size, 1]). Given rank is {}'.format(labels_rank))
if weights_rank > 0:
weights = array_ops.reshape(weights, shape=[-1])
# Check weights and labels have the same number of elements.
weights.get_shape().assert_is_compatible_with(labels.get_shape())
# Compute the logits tensor corresponding to the correct class per instance.
example_indices = array_ops.reshape(
math_ops.range(batch_size), shape=[batch_size, 1])
indices = array_ops.concat(
[
example_indices,
array_ops.reshape(
math_ops.cast(labels, example_indices.dtype),
shape=[batch_size, 1])
],
axis=1)
label_logits = array_ops.reshape(
array_ops.gather_nd(params=logits, indices=indices),
shape=[batch_size, 1])
one_cold_labels = array_ops.one_hot(
indices=labels, depth=num_classes, on_value=0.0, off_value=1.0)
margin = logits - label_logits + one_cold_labels
margin = nn_ops.relu(margin)
loss = math_ops.reduce_max(margin, axis=1)
return losses.compute_weighted_loss(
loss, weights, scope, loss_collection, reduction=reduction)
|
Chilledheart/chromium
|
refs/heads/master
|
tools/telemetry/third_party/gsutilz/third_party/boto/boto/ecs/__init__.py
|
153
|
# Copyright (c) 2010 Chris Moyer http://coredumped.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import boto
from boto.connection import AWSQueryConnection, AWSAuthConnection
from boto.exception import BotoServerError
import time
import urllib
import xml.sax
from boto.ecs.item import ItemSet
from boto import handler
class ECSConnection(AWSQueryConnection):
"""
ECommerce Connection
For more information on how to use this module see:
http://blog.coredumped.org/2010/09/search-for-books-on-amazon-using-boto.html
"""
APIVersion = '2010-11-01'
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None, host='ecs.amazonaws.com',
debug=0, https_connection_factory=None, path='/',
security_token=None, profile_name=None):
super(ECSConnection, self).__init__(aws_access_key_id, aws_secret_access_key,
is_secure, port, proxy, proxy_port, proxy_user, proxy_pass,
host, debug, https_connection_factory, path,
security_token=security_token,
profile_name=profile_name)
def _required_auth_capability(self):
return ['ecs']
def get_response(self, action, params, page=0, itemSet=None):
"""
Utility method to handle calls to ECS and parsing of responses.
"""
params['Service'] = "AWSECommerceService"
params['Operation'] = action
if page:
params['ItemPage'] = page
response = self.make_request(None, params, "/onca/xml")
body = response.read().decode('utf-8')
boto.log.debug(body)
if response.status != 200:
boto.log.error('%s %s' % (response.status, response.reason))
boto.log.error('%s' % body)
raise BotoServerError(response.status, response.reason, body)
if itemSet is None:
rs = ItemSet(self, action, params, page)
else:
rs = itemSet
h = handler.XmlHandler(rs, self)
xml.sax.parseString(body.encode('utf-8'), h)
if not rs.is_valid:
raise BotoServerError(response.status, '{Code}: {Message}'.format(**rs.errors[0]))
return rs
#
# Group methods
#
def item_search(self, search_index, **params):
"""
Returns items that satisfy the search criteria, including one or more search
indices.
For a full list of search terms,
:see: http://docs.amazonwebservices.com/AWSECommerceService/2010-09-01/DG/index.html?ItemSearch.html
"""
params['SearchIndex'] = search_index
return self.get_response('ItemSearch', params)
def item_lookup(self, **params):
"""
Returns items that satisfy the lookup query.
For a full list of parameters, see:
http://s3.amazonaws.com/awsdocs/Associates/2011-08-01/prod-adv-api-dg-2011-08-01.pdf
"""
return self.get_response('ItemLookup', params)
|
sserrot/champion_relationships
|
refs/heads/master
|
venv/Lib/site-packages/networkx/algorithms/assortativity/neighbor_degree.py
|
7
|
#-*- coding: utf-8 -*-
# Copyright (C) 2011 by
# Jordi Torrents <jtorrents@milnou.net>
# Aric Hagberg <hagberg@lanl.gov>
# All rights reserved.
# BSD license.
import networkx as nx
__author__ = """\n""".join(['Jordi Torrents <jtorrents@milnou.net>',
'Aric Hagberg (hagberg@lanl.gov)'])
__all__ = ["average_neighbor_degree"]
def _average_nbr_deg(G, source_degree, target_degree, nodes=None, weight=None):
# average degree of neighbors
avg = {}
for n, deg in source_degree(nodes, weight=weight):
# normalize but not by zero degree
if deg == 0:
deg = 1
nbrdeg = target_degree(G[n])
if weight is None:
avg[n] = sum(d for n, d in nbrdeg) / float(deg)
else:
avg[n] = sum((G[n][nbr].get(weight, 1) * d
for nbr, d in nbrdeg)) / float(deg)
return avg
def average_neighbor_degree(G, source='out', target='out',
nodes=None, weight=None):
r"""Returns the average degree of the neighborhood of each node.
The average neighborhood degree of a node `i` is
.. math::
k_{nn,i} = \frac{1}{|N(i)|} \sum_{j \in N(i)} k_j
where `N(i)` are the neighbors of node `i` and `k_j` is
the degree of node `j` which belongs to `N(i)`. For weighted
graphs, an analogous measure can be defined [1]_,
.. math::
k_{nn,i}^{w} = \frac{1}{s_i} \sum_{j \in N(i)} w_{ij} k_j
where `s_i` is the weighted degree of node `i`, `w_{ij}`
is the weight of the edge that links `i` and `j` and
`N(i)` are the neighbors of node `i`.
Parameters
----------
G : NetworkX graph
source : string ("in"|"out")
Directed graphs only.
Use "in"- or "out"-degree for source node.
target : string ("in"|"out")
Directed graphs only.
Use "in"- or "out"-degree for target node.
nodes : list or iterable, optional
Compute neighbor degree for specified nodes. The default is
all nodes in the graph.
weight : string or None, optional (default=None)
The edge attribute that holds the numerical value used as a weight.
If None, then each edge has weight 1.
Returns
-------
d: dict
A dictionary keyed by node with average neighbors degree value.
Examples
--------
>>> G=nx.path_graph(4)
>>> G.edges[0, 1]['weight'] = 5
>>> G.edges[2, 3]['weight'] = 3
>>> nx.average_neighbor_degree(G)
{0: 2.0, 1: 1.5, 2: 1.5, 3: 2.0}
>>> nx.average_neighbor_degree(G, weight='weight')
{0: 2.0, 1: 1.1666666666666667, 2: 1.25, 3: 2.0}
>>> G=nx.DiGraph()
>>> nx.add_path(G, [0, 1, 2, 3])
>>> nx.average_neighbor_degree(G, source='in', target='in')
{0: 1.0, 1: 1.0, 2: 1.0, 3: 0.0}
>>> nx.average_neighbor_degree(G, source='out', target='out')
{0: 1.0, 1: 1.0, 2: 0.0, 3: 0.0}
Notes
-----
For directed graphs you can also specify in-degree or out-degree
by passing keyword arguments.
See Also
--------
average_degree_connectivity
References
----------
.. [1] A. Barrat, M. Barthélemy, R. Pastor-Satorras, and A. Vespignani,
"The architecture of complex weighted networks".
PNAS 101 (11): 3747–3752 (2004).
"""
source_degree = G.degree
target_degree = G.degree
if G.is_directed():
direction = {'out': G.out_degree,
'in': G.in_degree}
source_degree = direction[source]
target_degree = direction[target]
return _average_nbr_deg(G, source_degree, target_degree,
nodes=nodes, weight=weight)
# obsolete
# def average_neighbor_in_degree(G, nodes=None, weight=None):
# if not G.is_directed():
# raise nx.NetworkXError("Not defined for undirected graphs.")
# return _average_nbr_deg(G, G.in_degree, G.in_degree, nodes, weight)
# average_neighbor_in_degree.__doc__=average_neighbor_degree.__doc__
# def average_neighbor_out_degree(G, nodes=None, weight=None):
# if not G.is_directed():
# raise nx.NetworkXError("Not defined for undirected graphs.")
# return _average_nbr_deg(G, G.out_degree, G.out_degree, nodes, weight)
# average_neighbor_out_degree.__doc__=average_neighbor_degree.__doc__
|
deepesch/scikit-learn
|
refs/heads/master
|
sklearn/ensemble/tests/test_base.py
|
284
|
"""
Testing for the base module (sklearn.ensemble.base).
"""
# Authors: Gilles Louppe
# License: BSD 3 clause
from numpy.testing import assert_equal
from nose.tools import assert_true
from sklearn.utils.testing import assert_raise_message
from sklearn.datasets import load_iris
from sklearn.ensemble import BaggingClassifier
from sklearn.linear_model import Perceptron
def test_base():
# Check BaseEnsemble methods.
ensemble = BaggingClassifier(base_estimator=Perceptron(), n_estimators=3)
iris = load_iris()
ensemble.fit(iris.data, iris.target)
ensemble.estimators_ = [] # empty the list and create estimators manually
ensemble._make_estimator()
ensemble._make_estimator()
ensemble._make_estimator()
ensemble._make_estimator(append=False)
assert_equal(3, len(ensemble))
assert_equal(3, len(ensemble.estimators_))
assert_true(isinstance(ensemble[0], Perceptron))
def test_base_zero_n_estimators():
# Check that instantiating a BaseEnsemble with n_estimators<=0 raises
# a ValueError.
ensemble = BaggingClassifier(base_estimator=Perceptron(), n_estimators=0)
iris = load_iris()
assert_raise_message(ValueError,
"n_estimators must be greater than zero, got 0.",
ensemble.fit, iris.data, iris.target)
|
contentful/contentful.py
|
refs/heads/master
|
contentful/sync_page.py
|
1
|
from six.moves.urllib.parse import urlsplit, parse_qs
from .resource import Resource
"""
contentful.sync_page
~~~~~~~~~~~~~~~~~~~~
This module implements the SyncPage class.
API Reference: https://www.contentful.com/developers/docs/references/content-delivery-api/#/reference/synchronization
:copyright: (c) 2016 by Contentful GmbH.
:license: MIT, see LICENSE for more details.
"""
class SyncPage(Resource):
"""
API Reference: https://www.contentful.com/developers/docs/references/content-delivery-api/#/reference/synchronization
"""
def __init__(self, item, **kwargs):
super(SyncPage, self).__init__(item, **kwargs)
self.next_sync_url = item.get('nextSyncUrl', '')
self.next_page_url = item.get('nextPageUrl', '')
self.next_sync_token = self._get_sync_token()
self.items = self._hydrate_items()
def next(self, client):
"""Fetches next SyncPage
:param client: CDA Client.
:return: :class:`SyncPage <SyncPage>`
:rtype: contentful.sync_page.SyncPage
"""
return client.sync({'sync_token': self.next_sync_token})
def _get_sync_token(self):
url_parts = urlsplit(self.next_sync_url or self.next_page_url)
querystring = parse_qs(url_parts.query)
return querystring['sync_token'][0]
def _hydrate_items(self):
from .resource_builder import ResourceBuilder
items = []
for item in self.raw.get('items', []):
items.append(
ResourceBuilder(
self.default_locale,
True,
item
).build()
)
return items
def __repr__(self):
return "<SyncPage next_sync_token='{0}'>".format(
self.next_sync_token
)
|
qenter/vlc-android
|
refs/heads/master
|
toolchains/arm/lib/python2.7/tabnanny.py
|
394
|
#! /usr/bin/env python
"""The Tab Nanny despises ambiguous indentation. She knows no mercy.
tabnanny -- Detection of ambiguous indentation
For the time being this module is intended to be called as a script.
However it is possible to import it into an IDE and use the function
check() described below.
Warning: The API provided by this module is likely to change in future
releases; such changes may not be backward compatible.
"""
# Released to the public domain, by Tim Peters, 15 April 1998.
# XXX Note: this is now a standard library module.
# XXX The API needs to undergo changes however; the current code is too
# XXX script-like. This will be addressed later.
__version__ = "6"
import os
import sys
import getopt
import tokenize
if not hasattr(tokenize, 'NL'):
raise ValueError("tokenize.NL doesn't exist -- tokenize module too old")
__all__ = ["check", "NannyNag", "process_tokens"]
verbose = 0
filename_only = 0
def errprint(*args):
sep = ""
for arg in args:
sys.stderr.write(sep + str(arg))
sep = " "
sys.stderr.write("\n")
def main():
global verbose, filename_only
try:
opts, args = getopt.getopt(sys.argv[1:], "qv")
except getopt.error, msg:
errprint(msg)
return
for o, a in opts:
if o == '-q':
filename_only = filename_only + 1
if o == '-v':
verbose = verbose + 1
if not args:
errprint("Usage:", sys.argv[0], "[-v] file_or_directory ...")
return
for arg in args:
check(arg)
class NannyNag(Exception):
"""
Raised by tokeneater() if detecting an ambiguous indent.
Captured and handled in check().
"""
def __init__(self, lineno, msg, line):
self.lineno, self.msg, self.line = lineno, msg, line
def get_lineno(self):
return self.lineno
def get_msg(self):
return self.msg
def get_line(self):
return self.line
def check(file):
"""check(file_or_dir)
If file_or_dir is a directory and not a symbolic link, then recursively
descend the directory tree named by file_or_dir, checking all .py files
along the way. If file_or_dir is an ordinary Python source file, it is
checked for whitespace related problems. The diagnostic messages are
written to standard output using the print statement.
"""
if os.path.isdir(file) and not os.path.islink(file):
if verbose:
print "%r: listing directory" % (file,)
names = os.listdir(file)
for name in names:
fullname = os.path.join(file, name)
if (os.path.isdir(fullname) and
not os.path.islink(fullname) or
os.path.normcase(name[-3:]) == ".py"):
check(fullname)
return
try:
f = open(file)
except IOError, msg:
errprint("%r: I/O Error: %s" % (file, msg))
return
if verbose > 1:
print "checking %r ..." % file
try:
process_tokens(tokenize.generate_tokens(f.readline))
except tokenize.TokenError, msg:
errprint("%r: Token Error: %s" % (file, msg))
return
except IndentationError, msg:
errprint("%r: Indentation Error: %s" % (file, msg))
return
except NannyNag, nag:
badline = nag.get_lineno()
line = nag.get_line()
if verbose:
print "%r: *** Line %d: trouble in tab city! ***" % (file, badline)
print "offending line: %r" % (line,)
print nag.get_msg()
else:
if ' ' in file: file = '"' + file + '"'
if filename_only: print file
else: print file, badline, repr(line)
return
if verbose:
print "%r: Clean bill of health." % (file,)
class Whitespace:
# the characters used for space and tab
S, T = ' \t'
# members:
# raw
# the original string
# n
# the number of leading whitespace characters in raw
# nt
# the number of tabs in raw[:n]
# norm
# the normal form as a pair (count, trailing), where:
# count
# a tuple such that raw[:n] contains count[i]
# instances of S * i + T
# trailing
# the number of trailing spaces in raw[:n]
# It's A Theorem that m.indent_level(t) ==
# n.indent_level(t) for all t >= 1 iff m.norm == n.norm.
# is_simple
# true iff raw[:n] is of the form (T*)(S*)
def __init__(self, ws):
self.raw = ws
S, T = Whitespace.S, Whitespace.T
count = []
b = n = nt = 0
for ch in self.raw:
if ch == S:
n = n + 1
b = b + 1
elif ch == T:
n = n + 1
nt = nt + 1
if b >= len(count):
count = count + [0] * (b - len(count) + 1)
count[b] = count[b] + 1
b = 0
else:
break
self.n = n
self.nt = nt
self.norm = tuple(count), b
self.is_simple = len(count) <= 1
# return length of longest contiguous run of spaces (whether or not
# preceding a tab)
def longest_run_of_spaces(self):
count, trailing = self.norm
return max(len(count)-1, trailing)
def indent_level(self, tabsize):
# count, il = self.norm
# for i in range(len(count)):
# if count[i]:
# il = il + (i/tabsize + 1)*tabsize * count[i]
# return il
# quicker:
# il = trailing + sum (i/ts + 1)*ts*count[i] =
# trailing + ts * sum (i/ts + 1)*count[i] =
# trailing + ts * sum i/ts*count[i] + count[i] =
# trailing + ts * [(sum i/ts*count[i]) + (sum count[i])] =
# trailing + ts * [(sum i/ts*count[i]) + num_tabs]
# and note that i/ts*count[i] is 0 when i < ts
count, trailing = self.norm
il = 0
for i in range(tabsize, len(count)):
il = il + i/tabsize * count[i]
return trailing + tabsize * (il + self.nt)
# return true iff self.indent_level(t) == other.indent_level(t)
# for all t >= 1
def equal(self, other):
return self.norm == other.norm
# return a list of tuples (ts, i1, i2) such that
# i1 == self.indent_level(ts) != other.indent_level(ts) == i2.
# Intended to be used after not self.equal(other) is known, in which
# case it will return at least one witnessing tab size.
def not_equal_witness(self, other):
n = max(self.longest_run_of_spaces(),
other.longest_run_of_spaces()) + 1
a = []
for ts in range(1, n+1):
if self.indent_level(ts) != other.indent_level(ts):
a.append( (ts,
self.indent_level(ts),
other.indent_level(ts)) )
return a
# Return True iff self.indent_level(t) < other.indent_level(t)
# for all t >= 1.
# The algorithm is due to Vincent Broman.
# Easy to prove it's correct.
# XXXpost that.
# Trivial to prove n is sharp (consider T vs ST).
# Unknown whether there's a faster general way. I suspected so at
# first, but no longer.
# For the special (but common!) case where M and N are both of the
# form (T*)(S*), M.less(N) iff M.len() < N.len() and
# M.num_tabs() <= N.num_tabs(). Proof is easy but kinda long-winded.
# XXXwrite that up.
# Note that M is of the form (T*)(S*) iff len(M.norm[0]) <= 1.
def less(self, other):
if self.n >= other.n:
return False
if self.is_simple and other.is_simple:
return self.nt <= other.nt
n = max(self.longest_run_of_spaces(),
other.longest_run_of_spaces()) + 1
# the self.n >= other.n test already did it for ts=1
for ts in range(2, n+1):
if self.indent_level(ts) >= other.indent_level(ts):
return False
return True
# return a list of tuples (ts, i1, i2) such that
# i1 == self.indent_level(ts) >= other.indent_level(ts) == i2.
# Intended to be used after not self.less(other) is known, in which
# case it will return at least one witnessing tab size.
def not_less_witness(self, other):
n = max(self.longest_run_of_spaces(),
other.longest_run_of_spaces()) + 1
a = []
for ts in range(1, n+1):
if self.indent_level(ts) >= other.indent_level(ts):
a.append( (ts,
self.indent_level(ts),
other.indent_level(ts)) )
return a
def format_witnesses(w):
firsts = map(lambda tup: str(tup[0]), w)
prefix = "at tab size"
if len(w) > 1:
prefix = prefix + "s"
return prefix + " " + ', '.join(firsts)
def process_tokens(tokens):
INDENT = tokenize.INDENT
DEDENT = tokenize.DEDENT
NEWLINE = tokenize.NEWLINE
JUNK = tokenize.COMMENT, tokenize.NL
indents = [Whitespace("")]
check_equal = 0
for (type, token, start, end, line) in tokens:
if type == NEWLINE:
# a program statement, or ENDMARKER, will eventually follow,
# after some (possibly empty) run of tokens of the form
# (NL | COMMENT)* (INDENT | DEDENT+)?
# If an INDENT appears, setting check_equal is wrong, and will
# be undone when we see the INDENT.
check_equal = 1
elif type == INDENT:
check_equal = 0
thisguy = Whitespace(token)
if not indents[-1].less(thisguy):
witness = indents[-1].not_less_witness(thisguy)
msg = "indent not greater e.g. " + format_witnesses(witness)
raise NannyNag(start[0], msg, line)
indents.append(thisguy)
elif type == DEDENT:
# there's nothing we need to check here! what's important is
# that when the run of DEDENTs ends, the indentation of the
# program statement (or ENDMARKER) that triggered the run is
# equal to what's left at the top of the indents stack
# Ouch! This assert triggers if the last line of the source
# is indented *and* lacks a newline -- then DEDENTs pop out
# of thin air.
# assert check_equal # else no earlier NEWLINE, or an earlier INDENT
check_equal = 1
del indents[-1]
elif check_equal and type not in JUNK:
# this is the first "real token" following a NEWLINE, so it
# must be the first token of the next program statement, or an
# ENDMARKER; the "line" argument exposes the leading whitespace
# for this statement; in the case of ENDMARKER, line is an empty
# string, so will properly match the empty string with which the
# "indents" stack was seeded
check_equal = 0
thisguy = Whitespace(line)
if not indents[-1].equal(thisguy):
witness = indents[-1].not_equal_witness(thisguy)
msg = "indent not equal e.g. " + format_witnesses(witness)
raise NannyNag(start[0], msg, line)
if __name__ == '__main__':
main()
|
syphar/django
|
refs/heads/master
|
tests/db_functions/test_datetime.py
|
6
|
from __future__ import unicode_literals
from datetime import datetime
import pytz
from django.conf import settings
from django.db import connection
from django.db.models import DateField, DateTimeField, IntegerField, TimeField
from django.db.models.functions import (
Extract, ExtractDay, ExtractHour, ExtractMinute, ExtractMonth,
ExtractSecond, ExtractWeekDay, ExtractYear, Trunc, TruncDate, TruncDay,
TruncHour, TruncMinute, TruncMonth, TruncSecond, TruncTime, TruncYear,
)
from django.test import TestCase, override_settings
from django.utils import timezone
from .models import DTModel
def microsecond_support(value):
return value if connection.features.supports_microsecond_precision else value.replace(microsecond=0)
def truncate_to(value, kind, tzinfo=None):
# Convert to target timezone before truncation
if tzinfo is not None:
value = value.astimezone(tzinfo)
def truncate(value, kind):
if kind == 'second':
return value.replace(microsecond=0)
if kind == 'minute':
return value.replace(second=0, microsecond=0)
if kind == 'hour':
return value.replace(minute=0, second=0, microsecond=0)
if kind == 'day':
if isinstance(value, datetime):
return value.replace(hour=0, minute=0, second=0, microsecond=0)
return value
if kind == 'month':
if isinstance(value, datetime):
return value.replace(day=1, hour=0, minute=0, second=0, microsecond=0)
return value.replace(day=1)
# otherwise, truncate to year
if isinstance(value, datetime):
return value.replace(month=1, day=1, hour=0, minute=0, second=0, microsecond=0)
return value.replace(month=1, day=1)
value = truncate(value, kind)
if tzinfo is not None:
# If there was a daylight saving transition, then reset the timezone.
value = timezone.make_aware(value.replace(tzinfo=None), tzinfo)
return value
@override_settings(USE_TZ=False)
class DateFunctionTests(TestCase):
def create_model(self, start_datetime, end_datetime):
return DTModel.objects.create(
name=start_datetime.isoformat(),
start_datetime=start_datetime, end_datetime=end_datetime,
start_date=start_datetime.date(), end_date=end_datetime.date(),
start_time=start_datetime.time(), end_time=end_datetime.time(),
duration=(end_datetime - start_datetime),
)
def test_extract_year_exact_lookup(self):
"""
Extract year uses a BETWEEN filter to compare the year to allow indexes
to be used.
"""
start_datetime = datetime(2015, 6, 15, 14, 10)
end_datetime = datetime(2016, 6, 15, 14, 10)
if settings.USE_TZ:
start_datetime = timezone.make_aware(start_datetime, is_dst=False)
end_datetime = timezone.make_aware(end_datetime, is_dst=False)
self.create_model(start_datetime, end_datetime)
self.create_model(end_datetime, start_datetime)
qs = DTModel.objects.filter(start_datetime__year__exact=2015)
self.assertEqual(qs.count(), 1)
query_string = str(qs.query).lower()
self.assertEqual(query_string.count(' between '), 1)
self.assertEqual(query_string.count('extract'), 0)
# exact is implied and should be the same
qs = DTModel.objects.filter(start_datetime__year=2015)
self.assertEqual(qs.count(), 1)
query_string = str(qs.query).lower()
self.assertEqual(query_string.count(' between '), 1)
self.assertEqual(query_string.count('extract'), 0)
# date and datetime fields should behave the same
qs = DTModel.objects.filter(start_date__year=2015)
self.assertEqual(qs.count(), 1)
query_string = str(qs.query).lower()
self.assertEqual(query_string.count(' between '), 1)
self.assertEqual(query_string.count('extract'), 0)
def test_extract_year_greaterthan_lookup(self):
start_datetime = datetime(2015, 6, 15, 14, 10)
end_datetime = datetime(2016, 6, 15, 14, 10)
if settings.USE_TZ:
start_datetime = timezone.make_aware(start_datetime, is_dst=False)
end_datetime = timezone.make_aware(end_datetime, is_dst=False)
self.create_model(start_datetime, end_datetime)
self.create_model(end_datetime, start_datetime)
qs = DTModel.objects.filter(start_datetime__year__gt=2015)
self.assertEqual(qs.count(), 1)
self.assertEqual(str(qs.query).lower().count('extract'), 0)
qs = DTModel.objects.filter(start_datetime__year__gte=2015)
self.assertEqual(qs.count(), 2)
self.assertEqual(str(qs.query).lower().count('extract'), 0)
def test_extract_year_lessthan_lookup(self):
start_datetime = datetime(2015, 6, 15, 14, 10)
end_datetime = datetime(2016, 6, 15, 14, 10)
if settings.USE_TZ:
start_datetime = timezone.make_aware(start_datetime, is_dst=False)
end_datetime = timezone.make_aware(end_datetime, is_dst=False)
self.create_model(start_datetime, end_datetime)
self.create_model(end_datetime, start_datetime)
qs = DTModel.objects.filter(start_datetime__year__lt=2016)
self.assertEqual(qs.count(), 1)
self.assertEqual(str(qs.query).count('extract'), 0)
qs = DTModel.objects.filter(start_datetime__year__lte=2016)
self.assertEqual(qs.count(), 2)
self.assertEqual(str(qs.query).count('extract'), 0)
def test_extract_func(self):
start_datetime = microsecond_support(datetime(2015, 6, 15, 14, 30, 50, 321))
end_datetime = microsecond_support(datetime(2016, 6, 15, 14, 10, 50, 123))
if settings.USE_TZ:
start_datetime = timezone.make_aware(start_datetime, is_dst=False)
end_datetime = timezone.make_aware(end_datetime, is_dst=False)
self.create_model(start_datetime, end_datetime)
self.create_model(end_datetime, start_datetime)
with self.assertRaisesMessage(ValueError, 'lookup_name must be provided'):
Extract('start_datetime')
msg = 'Extract input expression must be DateField, DateTimeField, or TimeField.'
with self.assertRaisesMessage(ValueError, msg):
list(DTModel.objects.annotate(extracted=Extract('name', 'hour')))
with self.assertRaisesMessage(
ValueError, "Cannot extract time component 'second' from DateField 'start_date'."):
list(DTModel.objects.annotate(extracted=Extract('start_date', 'second')))
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=Extract('start_datetime', 'year')).order_by('start_datetime'),
[(start_datetime, start_datetime.year), (end_datetime, end_datetime.year)],
lambda m: (m.start_datetime, m.extracted)
)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=Extract('start_datetime', 'month')).order_by('start_datetime'),
[(start_datetime, start_datetime.month), (end_datetime, end_datetime.month)],
lambda m: (m.start_datetime, m.extracted)
)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=Extract('start_datetime', 'day')).order_by('start_datetime'),
[(start_datetime, start_datetime.day), (end_datetime, end_datetime.day)],
lambda m: (m.start_datetime, m.extracted)
)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=Extract('start_datetime', 'week_day')).order_by('start_datetime'),
[
(start_datetime, (start_datetime.isoweekday() % 7) + 1),
(end_datetime, (end_datetime.isoweekday() % 7) + 1)
],
lambda m: (m.start_datetime, m.extracted)
)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=Extract('start_datetime', 'hour')).order_by('start_datetime'),
[(start_datetime, start_datetime.hour), (end_datetime, end_datetime.hour)],
lambda m: (m.start_datetime, m.extracted)
)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=Extract('start_datetime', 'minute')).order_by('start_datetime'),
[(start_datetime, start_datetime.minute), (end_datetime, end_datetime.minute)],
lambda m: (m.start_datetime, m.extracted)
)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=Extract('start_datetime', 'second')).order_by('start_datetime'),
[(start_datetime, start_datetime.second), (end_datetime, end_datetime.second)],
lambda m: (m.start_datetime, m.extracted)
)
self.assertEqual(DTModel.objects.filter(start_datetime__year=Extract('start_datetime', 'year')).count(), 2)
self.assertEqual(DTModel.objects.filter(start_datetime__hour=Extract('start_datetime', 'hour')).count(), 2)
self.assertEqual(DTModel.objects.filter(start_date__month=Extract('start_date', 'month')).count(), 2)
self.assertEqual(DTModel.objects.filter(start_time__hour=Extract('start_time', 'hour')).count(), 2)
def test_extract_year_func(self):
start_datetime = microsecond_support(datetime(2015, 6, 15, 14, 30, 50, 321))
end_datetime = microsecond_support(datetime(2016, 6, 15, 14, 10, 50, 123))
if settings.USE_TZ:
start_datetime = timezone.make_aware(start_datetime, is_dst=False)
end_datetime = timezone.make_aware(end_datetime, is_dst=False)
self.create_model(start_datetime, end_datetime)
self.create_model(end_datetime, start_datetime)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=ExtractYear('start_datetime')).order_by('start_datetime'),
[(start_datetime, start_datetime.year), (end_datetime, end_datetime.year)],
lambda m: (m.start_datetime, m.extracted)
)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=ExtractYear('start_date')).order_by('start_datetime'),
[(start_datetime, start_datetime.year), (end_datetime, end_datetime.year)],
lambda m: (m.start_datetime, m.extracted)
)
self.assertEqual(DTModel.objects.filter(start_datetime__year=ExtractYear('start_datetime')).count(), 2)
def test_extract_month_func(self):
start_datetime = microsecond_support(datetime(2015, 6, 15, 14, 30, 50, 321))
end_datetime = microsecond_support(datetime(2016, 6, 15, 14, 10, 50, 123))
if settings.USE_TZ:
start_datetime = timezone.make_aware(start_datetime, is_dst=False)
end_datetime = timezone.make_aware(end_datetime, is_dst=False)
self.create_model(start_datetime, end_datetime)
self.create_model(end_datetime, start_datetime)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=ExtractMonth('start_datetime')).order_by('start_datetime'),
[(start_datetime, start_datetime.month), (end_datetime, end_datetime.month)],
lambda m: (m.start_datetime, m.extracted)
)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=ExtractMonth('start_date')).order_by('start_datetime'),
[(start_datetime, start_datetime.month), (end_datetime, end_datetime.month)],
lambda m: (m.start_datetime, m.extracted)
)
self.assertEqual(DTModel.objects.filter(start_datetime__month=ExtractMonth('start_datetime')).count(), 2)
def test_extract_day_func(self):
start_datetime = microsecond_support(datetime(2015, 6, 15, 14, 30, 50, 321))
end_datetime = microsecond_support(datetime(2016, 6, 15, 14, 10, 50, 123))
if settings.USE_TZ:
start_datetime = timezone.make_aware(start_datetime, is_dst=False)
end_datetime = timezone.make_aware(end_datetime, is_dst=False)
self.create_model(start_datetime, end_datetime)
self.create_model(end_datetime, start_datetime)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=ExtractDay('start_datetime')).order_by('start_datetime'),
[(start_datetime, start_datetime.day), (end_datetime, end_datetime.day)],
lambda m: (m.start_datetime, m.extracted)
)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=ExtractDay('start_date')).order_by('start_datetime'),
[(start_datetime, start_datetime.day), (end_datetime, end_datetime.day)],
lambda m: (m.start_datetime, m.extracted)
)
self.assertEqual(DTModel.objects.filter(start_datetime__day=ExtractDay('start_datetime')).count(), 2)
def test_extract_weekday_func(self):
start_datetime = microsecond_support(datetime(2015, 6, 15, 14, 30, 50, 321))
end_datetime = microsecond_support(datetime(2016, 6, 15, 14, 10, 50, 123))
if settings.USE_TZ:
start_datetime = timezone.make_aware(start_datetime, is_dst=False)
end_datetime = timezone.make_aware(end_datetime, is_dst=False)
self.create_model(start_datetime, end_datetime)
self.create_model(end_datetime, start_datetime)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=ExtractWeekDay('start_datetime')).order_by('start_datetime'),
[
(start_datetime, (start_datetime.isoweekday() % 7) + 1),
(end_datetime, (end_datetime.isoweekday() % 7) + 1),
],
lambda m: (m.start_datetime, m.extracted)
)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=ExtractWeekDay('start_date')).order_by('start_datetime'),
[
(start_datetime, (start_datetime.isoweekday() % 7) + 1),
(end_datetime, (end_datetime.isoweekday() % 7) + 1),
],
lambda m: (m.start_datetime, m.extracted)
)
self.assertEqual(DTModel.objects.filter(start_datetime__week_day=ExtractWeekDay('start_datetime')).count(), 2)
def test_extract_hour_func(self):
start_datetime = microsecond_support(datetime(2015, 6, 15, 14, 30, 50, 321))
end_datetime = microsecond_support(datetime(2016, 6, 15, 14, 10, 50, 123))
if settings.USE_TZ:
start_datetime = timezone.make_aware(start_datetime, is_dst=False)
end_datetime = timezone.make_aware(end_datetime, is_dst=False)
self.create_model(start_datetime, end_datetime)
self.create_model(end_datetime, start_datetime)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=ExtractHour('start_datetime')).order_by('start_datetime'),
[(start_datetime, start_datetime.hour), (end_datetime, end_datetime.hour)],
lambda m: (m.start_datetime, m.extracted)
)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=ExtractHour('start_time')).order_by('start_datetime'),
[(start_datetime, start_datetime.hour), (end_datetime, end_datetime.hour)],
lambda m: (m.start_datetime, m.extracted)
)
self.assertEqual(DTModel.objects.filter(start_datetime__hour=ExtractHour('start_datetime')).count(), 2)
def test_extract_minute_func(self):
start_datetime = microsecond_support(datetime(2015, 6, 15, 14, 30, 50, 321))
end_datetime = microsecond_support(datetime(2016, 6, 15, 14, 10, 50, 123))
if settings.USE_TZ:
start_datetime = timezone.make_aware(start_datetime, is_dst=False)
end_datetime = timezone.make_aware(end_datetime, is_dst=False)
self.create_model(start_datetime, end_datetime)
self.create_model(end_datetime, start_datetime)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=ExtractMinute('start_datetime')).order_by('start_datetime'),
[(start_datetime, start_datetime.minute), (end_datetime, end_datetime.minute)],
lambda m: (m.start_datetime, m.extracted)
)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=ExtractMinute('start_time')).order_by('start_datetime'),
[(start_datetime, start_datetime.minute), (end_datetime, end_datetime.minute)],
lambda m: (m.start_datetime, m.extracted)
)
self.assertEqual(DTModel.objects.filter(start_datetime__minute=ExtractMinute('start_datetime')).count(), 2)
def test_extract_second_func(self):
start_datetime = microsecond_support(datetime(2015, 6, 15, 14, 30, 50, 321))
end_datetime = microsecond_support(datetime(2016, 6, 15, 14, 10, 50, 123))
if settings.USE_TZ:
start_datetime = timezone.make_aware(start_datetime, is_dst=False)
end_datetime = timezone.make_aware(end_datetime, is_dst=False)
self.create_model(start_datetime, end_datetime)
self.create_model(end_datetime, start_datetime)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=ExtractSecond('start_datetime')).order_by('start_datetime'),
[(start_datetime, start_datetime.second), (end_datetime, end_datetime.second)],
lambda m: (m.start_datetime, m.extracted)
)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=ExtractSecond('start_time')).order_by('start_datetime'),
[(start_datetime, start_datetime.second), (end_datetime, end_datetime.second)],
lambda m: (m.start_datetime, m.extracted)
)
self.assertEqual(DTModel.objects.filter(start_datetime__second=ExtractSecond('start_datetime')).count(), 2)
def test_trunc_func(self):
start_datetime = microsecond_support(datetime(2015, 6, 15, 14, 30, 50, 321))
end_datetime = microsecond_support(datetime(2016, 6, 15, 14, 10, 50, 123))
if settings.USE_TZ:
start_datetime = timezone.make_aware(start_datetime, is_dst=False)
end_datetime = timezone.make_aware(end_datetime, is_dst=False)
self.create_model(start_datetime, end_datetime)
self.create_model(end_datetime, start_datetime)
msg = 'output_field must be either DateField, TimeField, or DateTimeField'
with self.assertRaisesMessage(ValueError, msg):
list(DTModel.objects.annotate(truncated=Trunc('start_datetime', 'year', output_field=IntegerField())))
with self.assertRaisesMessage(AssertionError, "'name' isn't a DateField, TimeField, or DateTimeField."):
list(DTModel.objects.annotate(truncated=Trunc('name', 'year', output_field=DateTimeField())))
with self.assertRaisesMessage(ValueError, "Cannot truncate DateField 'start_date' to DateTimeField"):
list(DTModel.objects.annotate(truncated=Trunc('start_date', 'second')))
with self.assertRaisesMessage(ValueError, "Cannot truncate TimeField 'start_time' to DateTimeField"):
list(DTModel.objects.annotate(truncated=Trunc('start_time', 'month')))
with self.assertRaisesMessage(ValueError, "Cannot truncate DateField 'start_date' to DateTimeField"):
list(DTModel.objects.annotate(truncated=Trunc('start_date', 'month', output_field=DateTimeField())))
with self.assertRaisesMessage(ValueError, "Cannot truncate TimeField 'start_time' to DateTimeField"):
list(DTModel.objects.annotate(truncated=Trunc('start_time', 'second', output_field=DateTimeField())))
def test_datetime_kind(kind):
self.assertQuerysetEqual(
DTModel.objects.annotate(
truncated=Trunc('start_datetime', kind, output_field=DateTimeField())
).order_by('start_datetime'),
[
(start_datetime, truncate_to(start_datetime, kind)),
(end_datetime, truncate_to(end_datetime, kind))
],
lambda m: (m.start_datetime, m.truncated)
)
def test_date_kind(kind):
self.assertQuerysetEqual(
DTModel.objects.annotate(
truncated=Trunc('start_date', kind, output_field=DateField())
).order_by('start_datetime'),
[
(start_datetime, truncate_to(start_datetime.date(), kind)),
(end_datetime, truncate_to(end_datetime.date(), kind))
],
lambda m: (m.start_datetime, m.truncated)
)
def test_time_kind(kind):
self.assertQuerysetEqual(
DTModel.objects.annotate(
truncated=Trunc('start_time', kind, output_field=TimeField())
).order_by('start_datetime'),
[
(start_datetime, truncate_to(start_datetime.time(), kind)),
(end_datetime, truncate_to(end_datetime.time(), kind))
],
lambda m: (m.start_datetime, m.truncated)
)
test_date_kind('year')
test_date_kind('month')
test_date_kind('day')
test_time_kind('hour')
test_time_kind('minute')
test_time_kind('second')
test_datetime_kind('year')
test_datetime_kind('month')
test_datetime_kind('day')
test_datetime_kind('hour')
test_datetime_kind('minute')
test_datetime_kind('second')
qs = DTModel.objects.filter(start_datetime__date=Trunc('start_datetime', 'day', output_field=DateField()))
self.assertEqual(qs.count(), 2)
def test_trunc_year_func(self):
start_datetime = microsecond_support(datetime(2015, 6, 15, 14, 30, 50, 321))
end_datetime = truncate_to(microsecond_support(datetime(2016, 6, 15, 14, 10, 50, 123)), 'year')
if settings.USE_TZ:
start_datetime = timezone.make_aware(start_datetime, is_dst=False)
end_datetime = timezone.make_aware(end_datetime, is_dst=False)
self.create_model(start_datetime, end_datetime)
self.create_model(end_datetime, start_datetime)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=TruncYear('start_datetime')).order_by('start_datetime'),
[
(start_datetime, truncate_to(start_datetime, 'year')),
(end_datetime, truncate_to(end_datetime, 'year')),
],
lambda m: (m.start_datetime, m.extracted)
)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=TruncYear('start_date')).order_by('start_datetime'),
[
(start_datetime, truncate_to(start_datetime.date(), 'year')),
(end_datetime, truncate_to(end_datetime.date(), 'year')),
],
lambda m: (m.start_datetime, m.extracted)
)
self.assertEqual(DTModel.objects.filter(start_datetime=TruncYear('start_datetime')).count(), 1)
with self.assertRaisesMessage(ValueError, "Cannot truncate TimeField 'start_time' to DateTimeField"):
list(DTModel.objects.annotate(truncated=TruncYear('start_time')))
with self.assertRaisesMessage(ValueError, "Cannot truncate TimeField 'start_time' to DateTimeField"):
list(DTModel.objects.annotate(truncated=TruncYear('start_time', output_field=TimeField())))
def test_trunc_month_func(self):
start_datetime = microsecond_support(datetime(2015, 6, 15, 14, 30, 50, 321))
end_datetime = truncate_to(microsecond_support(datetime(2016, 6, 15, 14, 10, 50, 123)), 'month')
if settings.USE_TZ:
start_datetime = timezone.make_aware(start_datetime, is_dst=False)
end_datetime = timezone.make_aware(end_datetime, is_dst=False)
self.create_model(start_datetime, end_datetime)
self.create_model(end_datetime, start_datetime)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=TruncMonth('start_datetime')).order_by('start_datetime'),
[
(start_datetime, truncate_to(start_datetime, 'month')),
(end_datetime, truncate_to(end_datetime, 'month')),
],
lambda m: (m.start_datetime, m.extracted)
)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=TruncMonth('start_date')).order_by('start_datetime'),
[
(start_datetime, truncate_to(start_datetime.date(), 'month')),
(end_datetime, truncate_to(end_datetime.date(), 'month')),
],
lambda m: (m.start_datetime, m.extracted)
)
self.assertEqual(DTModel.objects.filter(start_datetime=TruncMonth('start_datetime')).count(), 1)
with self.assertRaisesMessage(ValueError, "Cannot truncate TimeField 'start_time' to DateTimeField"):
list(DTModel.objects.annotate(truncated=TruncMonth('start_time')))
with self.assertRaisesMessage(ValueError, "Cannot truncate TimeField 'start_time' to DateTimeField"):
list(DTModel.objects.annotate(truncated=TruncMonth('start_time', output_field=TimeField())))
def test_trunc_date_func(self):
start_datetime = microsecond_support(datetime(2015, 6, 15, 14, 30, 50, 321))
end_datetime = microsecond_support(datetime(2016, 6, 15, 14, 10, 50, 123))
if settings.USE_TZ:
start_datetime = timezone.make_aware(start_datetime, is_dst=False)
end_datetime = timezone.make_aware(end_datetime, is_dst=False)
self.create_model(start_datetime, end_datetime)
self.create_model(end_datetime, start_datetime)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=TruncDate('start_datetime')).order_by('start_datetime'),
[
(start_datetime, start_datetime.date()),
(end_datetime, end_datetime.date()),
],
lambda m: (m.start_datetime, m.extracted)
)
self.assertEqual(DTModel.objects.filter(start_datetime__date=TruncDate('start_datetime')).count(), 2)
with self.assertRaisesMessage(ValueError, "Cannot truncate TimeField 'start_time' to DateField"):
list(DTModel.objects.annotate(truncated=TruncDate('start_time')))
with self.assertRaisesMessage(ValueError, "Cannot truncate TimeField 'start_time' to DateField"):
list(DTModel.objects.annotate(truncated=TruncDate('start_time', output_field=TimeField())))
def test_trunc_time_func(self):
start_datetime = microsecond_support(datetime(2015, 6, 15, 14, 30, 50, 321))
end_datetime = microsecond_support(datetime(2016, 6, 15, 14, 10, 50, 123))
if settings.USE_TZ:
start_datetime = timezone.make_aware(start_datetime, is_dst=False)
end_datetime = timezone.make_aware(end_datetime, is_dst=False)
self.create_model(start_datetime, end_datetime)
self.create_model(end_datetime, start_datetime)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=TruncTime('start_datetime')).order_by('start_datetime'),
[
(start_datetime, start_datetime.time()),
(end_datetime, end_datetime.time()),
],
lambda m: (m.start_datetime, m.extracted)
)
self.assertEqual(DTModel.objects.filter(start_datetime__time=TruncTime('start_datetime')).count(), 2)
with self.assertRaisesMessage(ValueError, "Cannot truncate DateField 'start_date' to TimeField"):
list(DTModel.objects.annotate(truncated=TruncTime('start_date')))
with self.assertRaisesMessage(ValueError, "Cannot truncate DateField 'start_date' to TimeField"):
list(DTModel.objects.annotate(truncated=TruncTime('start_date', output_field=DateField())))
def test_trunc_day_func(self):
start_datetime = microsecond_support(datetime(2015, 6, 15, 14, 30, 50, 321))
end_datetime = truncate_to(microsecond_support(datetime(2016, 6, 15, 14, 10, 50, 123)), 'day')
if settings.USE_TZ:
start_datetime = timezone.make_aware(start_datetime, is_dst=False)
end_datetime = timezone.make_aware(end_datetime, is_dst=False)
self.create_model(start_datetime, end_datetime)
self.create_model(end_datetime, start_datetime)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=TruncDay('start_datetime')).order_by('start_datetime'),
[
(start_datetime, truncate_to(start_datetime, 'day')),
(end_datetime, truncate_to(end_datetime, 'day')),
],
lambda m: (m.start_datetime, m.extracted)
)
self.assertEqual(DTModel.objects.filter(start_datetime=TruncDay('start_datetime')).count(), 1)
with self.assertRaisesMessage(ValueError, "Cannot truncate TimeField 'start_time' to DateTimeField"):
list(DTModel.objects.annotate(truncated=TruncDay('start_time')))
with self.assertRaisesMessage(ValueError, "Cannot truncate TimeField 'start_time' to DateTimeField"):
list(DTModel.objects.annotate(truncated=TruncDay('start_time', output_field=TimeField())))
def test_trunc_hour_func(self):
start_datetime = microsecond_support(datetime(2015, 6, 15, 14, 30, 50, 321))
end_datetime = truncate_to(microsecond_support(datetime(2016, 6, 15, 14, 10, 50, 123)), 'hour')
if settings.USE_TZ:
start_datetime = timezone.make_aware(start_datetime, is_dst=False)
end_datetime = timezone.make_aware(end_datetime, is_dst=False)
self.create_model(start_datetime, end_datetime)
self.create_model(end_datetime, start_datetime)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=TruncHour('start_datetime')).order_by('start_datetime'),
[
(start_datetime, truncate_to(start_datetime, 'hour')),
(end_datetime, truncate_to(end_datetime, 'hour')),
],
lambda m: (m.start_datetime, m.extracted)
)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=TruncHour('start_time')).order_by('start_datetime'),
[
(start_datetime, truncate_to(start_datetime.time(), 'hour')),
(end_datetime, truncate_to(end_datetime.time(), 'hour')),
],
lambda m: (m.start_datetime, m.extracted)
)
self.assertEqual(DTModel.objects.filter(start_datetime=TruncHour('start_datetime')).count(), 1)
with self.assertRaisesMessage(ValueError, "Cannot truncate DateField 'start_date' to DateTimeField"):
list(DTModel.objects.annotate(truncated=TruncHour('start_date')))
with self.assertRaisesMessage(ValueError, "Cannot truncate DateField 'start_date' to DateTimeField"):
list(DTModel.objects.annotate(truncated=TruncHour('start_date', output_field=DateField())))
def test_trunc_minute_func(self):
start_datetime = microsecond_support(datetime(2015, 6, 15, 14, 30, 50, 321))
end_datetime = truncate_to(microsecond_support(datetime(2016, 6, 15, 14, 10, 50, 123)), 'minute')
if settings.USE_TZ:
start_datetime = timezone.make_aware(start_datetime, is_dst=False)
end_datetime = timezone.make_aware(end_datetime, is_dst=False)
self.create_model(start_datetime, end_datetime)
self.create_model(end_datetime, start_datetime)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=TruncMinute('start_datetime')).order_by('start_datetime'),
[
(start_datetime, truncate_to(start_datetime, 'minute')),
(end_datetime, truncate_to(end_datetime, 'minute')),
],
lambda m: (m.start_datetime, m.extracted)
)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=TruncMinute('start_time')).order_by('start_datetime'),
[
(start_datetime, truncate_to(start_datetime.time(), 'minute')),
(end_datetime, truncate_to(end_datetime.time(), 'minute')),
],
lambda m: (m.start_datetime, m.extracted)
)
self.assertEqual(DTModel.objects.filter(start_datetime=TruncMinute('start_datetime')).count(), 1)
with self.assertRaisesMessage(ValueError, "Cannot truncate DateField 'start_date' to DateTimeField"):
list(DTModel.objects.annotate(truncated=TruncMinute('start_date')))
with self.assertRaisesMessage(ValueError, "Cannot truncate DateField 'start_date' to DateTimeField"):
list(DTModel.objects.annotate(truncated=TruncMinute('start_date', output_field=DateField())))
def test_trunc_second_func(self):
start_datetime = microsecond_support(datetime(2015, 6, 15, 14, 30, 50, 321))
end_datetime = truncate_to(microsecond_support(datetime(2016, 6, 15, 14, 10, 50, 123)), 'second')
if settings.USE_TZ:
start_datetime = timezone.make_aware(start_datetime, is_dst=False)
end_datetime = timezone.make_aware(end_datetime, is_dst=False)
self.create_model(start_datetime, end_datetime)
self.create_model(end_datetime, start_datetime)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=TruncSecond('start_datetime')).order_by('start_datetime'),
[
(start_datetime, truncate_to(start_datetime, 'second')),
(end_datetime, truncate_to(end_datetime, 'second'))
],
lambda m: (m.start_datetime, m.extracted)
)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=TruncSecond('start_time')).order_by('start_datetime'),
[
(start_datetime, truncate_to(start_datetime.time(), 'second')),
(end_datetime, truncate_to(end_datetime.time(), 'second'))
],
lambda m: (m.start_datetime, m.extracted)
)
result = 1 if connection.features.supports_microsecond_precision else 2
self.assertEqual(DTModel.objects.filter(start_datetime=TruncSecond('start_datetime')).count(), result)
with self.assertRaisesMessage(ValueError, "Cannot truncate DateField 'start_date' to DateTimeField"):
list(DTModel.objects.annotate(truncated=TruncSecond('start_date')))
with self.assertRaisesMessage(ValueError, "Cannot truncate DateField 'start_date' to DateTimeField"):
list(DTModel.objects.annotate(truncated=TruncSecond('start_date', output_field=DateField())))
@override_settings(USE_TZ=True, TIME_ZONE='UTC')
class DateFunctionWithTimeZoneTests(DateFunctionTests):
def test_extract_func_with_timezone(self):
start_datetime = microsecond_support(datetime(2015, 6, 15, 23, 30, 1, 321))
end_datetime = microsecond_support(datetime(2015, 6, 16, 13, 11, 27, 123))
start_datetime = timezone.make_aware(start_datetime, is_dst=False)
end_datetime = timezone.make_aware(end_datetime, is_dst=False)
self.create_model(start_datetime, end_datetime)
melb = pytz.timezone('Australia/Melbourne')
qs = DTModel.objects.annotate(
day=Extract('start_datetime', 'day'),
day_melb=Extract('start_datetime', 'day', tzinfo=melb),
weekday=ExtractWeekDay('start_datetime'),
weekday_melb=ExtractWeekDay('start_datetime', tzinfo=melb),
hour=ExtractHour('start_datetime'),
hour_melb=ExtractHour('start_datetime', tzinfo=melb),
).order_by('start_datetime')
utc_model = qs.get()
self.assertEqual(utc_model.day, 15)
self.assertEqual(utc_model.day_melb, 16)
self.assertEqual(utc_model.weekday, 2)
self.assertEqual(utc_model.weekday_melb, 3)
self.assertEqual(utc_model.hour, 23)
self.assertEqual(utc_model.hour_melb, 9)
with timezone.override(melb):
melb_model = qs.get()
self.assertEqual(melb_model.day, 16)
self.assertEqual(melb_model.day_melb, 16)
self.assertEqual(melb_model.weekday, 3)
self.assertEqual(melb_model.weekday_melb, 3)
self.assertEqual(melb_model.hour, 9)
self.assertEqual(melb_model.hour_melb, 9)
def test_extract_func_explicit_timezone_priority(self):
start_datetime = microsecond_support(datetime(2015, 6, 15, 23, 30, 1, 321))
end_datetime = microsecond_support(datetime(2015, 6, 16, 13, 11, 27, 123))
start_datetime = timezone.make_aware(start_datetime, is_dst=False)
end_datetime = timezone.make_aware(end_datetime, is_dst=False)
self.create_model(start_datetime, end_datetime)
melb = pytz.timezone('Australia/Melbourne')
with timezone.override(melb):
model = DTModel.objects.annotate(
day_melb=Extract('start_datetime', 'day'),
day_utc=Extract('start_datetime', 'day', tzinfo=timezone.utc),
).order_by('start_datetime').get()
self.assertEqual(model.day_melb, 16)
self.assertEqual(model.day_utc, 15)
def test_trunc_timezone_applied_before_truncation(self):
start_datetime = microsecond_support(datetime(2016, 1, 1, 1, 30, 50, 321))
end_datetime = microsecond_support(datetime(2016, 6, 15, 14, 10, 50, 123))
start_datetime = timezone.make_aware(start_datetime, is_dst=False)
end_datetime = timezone.make_aware(end_datetime, is_dst=False)
self.create_model(start_datetime, end_datetime)
melb = pytz.timezone('Australia/Melbourne')
pacific = pytz.timezone('US/Pacific')
model = DTModel.objects.annotate(
melb_year=TruncYear('start_datetime', tzinfo=melb),
pacific_year=TruncYear('start_datetime', tzinfo=pacific),
).order_by('start_datetime').get()
self.assertEqual(model.start_datetime, start_datetime)
self.assertEqual(model.melb_year, truncate_to(start_datetime, 'year', melb))
self.assertEqual(model.pacific_year, truncate_to(start_datetime, 'year', pacific))
self.assertEqual(model.start_datetime.year, 2016)
self.assertEqual(model.melb_year.year, 2016)
self.assertEqual(model.pacific_year.year, 2015)
def test_trunc_func_with_timezone(self):
"""
If the truncated datetime transitions to a different offset (daylight
saving) then the returned value will have that new timezone/offset.
"""
start_datetime = microsecond_support(datetime(2015, 6, 15, 14, 30, 50, 321))
end_datetime = microsecond_support(datetime(2016, 6, 15, 14, 10, 50, 123))
start_datetime = timezone.make_aware(start_datetime, is_dst=False)
end_datetime = timezone.make_aware(end_datetime, is_dst=False)
self.create_model(start_datetime, end_datetime)
self.create_model(end_datetime, start_datetime)
melb = pytz.timezone('Australia/Melbourne')
def test_datetime_kind(kind, tzinfo=melb):
self.assertQuerysetEqual(
DTModel.objects.annotate(
truncated=Trunc('start_datetime', kind, output_field=DateTimeField(), tzinfo=melb)
).order_by('start_datetime'),
[
(start_datetime, truncate_to(start_datetime.astimezone(melb), kind, melb)),
(end_datetime, truncate_to(end_datetime.astimezone(melb), kind, melb))
],
lambda m: (m.start_datetime, m.truncated)
)
def test_date_kind(kind, tzinfo=melb):
self.assertQuerysetEqual(
DTModel.objects.annotate(
truncated=Trunc('start_date', kind, output_field=DateField(), tzinfo=melb)
).order_by('start_datetime'),
[
(start_datetime, truncate_to(start_datetime.date(), kind)),
(end_datetime, truncate_to(end_datetime.date(), kind))
],
lambda m: (m.start_datetime, m.truncated)
)
def test_time_kind(kind, tzinfo=melb):
self.assertQuerysetEqual(
DTModel.objects.annotate(
truncated=Trunc('start_time', kind, output_field=TimeField(), tzinfo=melb)
).order_by('start_datetime'),
[
(start_datetime, truncate_to(start_datetime.time(), kind)),
(end_datetime, truncate_to(end_datetime.time(), kind))
],
lambda m: (m.start_datetime, m.truncated)
)
test_date_kind('year')
test_date_kind('month')
test_date_kind('day')
test_time_kind('hour')
test_time_kind('minute')
test_time_kind('second')
test_datetime_kind('year')
test_datetime_kind('month')
test_datetime_kind('day')
test_datetime_kind('hour')
test_datetime_kind('minute')
test_datetime_kind('second')
qs = DTModel.objects.filter(start_datetime__date=Trunc('start_datetime', 'day', output_field=DateField()))
self.assertEqual(qs.count(), 2)
|
Ruide/angr-dev
|
refs/heads/master
|
angr/angr/type_backend.py
|
5
|
import claripy
from .sim_type import SimTypePointer as Ptr, SimTypeTop as Top
class TypedValue(claripy.BackendObject):
def __init__(self, ty, value):
self.ty = ty
self.value = value
def __repr__(self):
return 'TypedValue(%s, %s)' % (repr(self.ty), repr(self.value))
class TypeBackend(claripy.Backend):
def __init__(self):
super(TypeBackend, self).__init__(solver_required=False)
self._op_expr['BVS'] = self._make_top
self._op_expr['BVV'] = self._make_top
self._op_raw['__add__'] = self._do_add
self._op_raw['__sub__'] = self._do_sub
self._op_raw['__and__'] = self._do_and
self._op_raw['__or__'] = self._do_or
self._op_raw['__xor__'] = self._do_xor
@staticmethod
def _make_top(ast, **kwargs): # pylint: disable=unused-argument
return TypedValue(Top(label=[]), ast)
def _do_add(self, *args):
if len(args) != 2:
return reduce(self._do_add, args)
a, b = args
good_a = type(a.ty) is Ptr
good_b = type(b.ty) is Ptr
val = a.value + b.value
out = TypedValue(Top(), val)
if good_a:
if not good_b:
out.ty = Ptr(a.ty.pts_to, offset=a.ty.offset + b.value)
elif good_b:
out.ty = Ptr(b.ty.pts_to, offset=b.ty.offset + a.value)
else:
out.ty = Top()
out.ty.label = a.ty.label + b.ty.label
return out
def _do_and(self, *args):
if len(args) != 2:
return reduce(self._do_and, args)
a, b = args
good_a = type(a.ty) is Ptr
good_b = type(b.ty) is Ptr
val = a.value & b.value
out = TypedValue(Top(), val)
if good_a:
if not good_b:
out.ty = Ptr(a.ty.pts_to, offset=a.ty.offset & b.value)
elif good_b:
out.ty = Ptr(b.ty.pts_to, offset=b.ty.offset & a.value)
else:
out.ty = Top()
out.ty.label = a.ty.label + b.ty.label
return out
def _do_or(self, *args):
if len(args) != 2:
return reduce(self._do_or, args)
a, b = args
good_a = type(a.ty) is Ptr
good_b = type(b.ty) is Ptr
val = a.value | b.value
out = TypedValue(Top(), val)
if good_a:
if not good_b:
out.ty = Ptr(a.ty.pts_to, offset=a.ty.offset | b.value)
elif good_b:
out.ty = Ptr(b.ty.pts_to, offset=b.ty.offset | a.value)
else:
out.ty = Top(label=[])
out.ty.label = a.ty.label + b.ty.label
return out
def _do_xor(self, *args):
if len(args) != 2:
return reduce(self._do_xor, args)
a, b = args
good_a = type(a.ty) is Ptr
good_b = type(b.ty) is Ptr
val = a.value ^ b.value
out = TypedValue(Top(), val)
if good_a:
if not good_b:
out.ty = Ptr(a.ty.pts_to, offset=a.ty.offset ^ b.value)
elif good_b:
out.ty = Ptr(b.ty.pts_to, offset=b.ty.offset ^ a.value)
else:
out.ty = Top()
out.ty.label = a.ty.label + b.ty.label
return out
def _do_sub(self, *args):
if len(args) != 2:
return reduce(self._do_sub, args)
a, b = args
good_a = type(a.ty) is Ptr
good_b = type(b.ty) is Ptr
val = a.value - b.value
out = TypedValue(Top(None), val)
if good_a and not good_b:
out.ty = Ptr(a.ty.pts_to, offset=a.ty.offset - b.value)
else:
out.ty = Top()
out.ty.label = a.ty.label + b.ty.label
return out
def apply_annotation(self, obj, a):
if type(a) is TypeAnnotation:
return TypedValue(a.ty, obj.value)
return obj
@staticmethod
def default_op(expr):
return TypedValue(Top(label=[]), expr)
#def convert(self, *args, **kwargs):
# import ipdb; ipdb.set_trace()
# return super(TypeBackend, self).convert(*args, **kwargs)
class TypeAnnotation(claripy.Annotation):
def __init__(self, ty):
self.ty = ty
@property
def eliminatable(self): #pylint:disable=no-self-use
return False
@property
def relocatable(self): #pylint:disable=no-self-use
return False
|
manahl/mockextras
|
refs/heads/master
|
mockextras/_matchers.py
|
1
|
# mockextras.matchers
# Matchers and Stubs for mock.
# Copyright (C) 2012-2015 Man AHL
# E-mail: ManAHLTech AT ahl DOT com
# mockextras 1.0.0
# https://github.com/manahl/mockextras
# Released subject to the BSD License
# Please see https://github.com/manahl/mockextras/blob/master/LICENSE.txt
__all__ = ['Any', 'Contains', 'AnyOf']
class Any(object):
"""Matchers act as wildcards when defining a stub or when asserting call arguments.
The Any matcher will match any object.
>>> whatever = Any()
>>> assert whatever == 'hello'
>>> assert whatever == 100
>>> assert whatever == range(10)
You can optionally specify a type so that Any only matches objects of that type.
>>> anystring = Any(basestring)
>>> assert anystring == 'hello'
>>> assert anystring == 'monkey'
>>> assert anystring == u'bonjour'
>>> assert anystring != ['hello', 'world']
Test additional predicates as needed:
>>> loud_short_string = (
... Any(str)
... .such_that(lambda s: len(s) < 6)
... .such_that(lambda s: s.upper() == s)
... )
>>> assert loud_short_string == "HI"
>>> assert loud_short_string != "TOO LONG"
>>> assert loud_short_string != "quiet"
Any can be used when specifying stubs:
>>> try:
... from unittest.mock Mock, call
... except ImportError:
... from mock import Mock, call
>>>
>>> from mockextras import stub
>>> mock = Mock()
>>> mock.side_effect = stub((call("hello", "world"), 100),
... (call("bye bye", Any()), 200))
>>> mock("bye bye", "world")
200
>>> mock("bye bye", "Fred")
200
>>> mock("bye bye", range(100))
200
>>> mock("bye bye", { 'a' : 1000, 'b' : 2000})
200
or when asserting call arguments:
>>> try:
... from unittest.mock Mock
... except ImportError:
... from mock import Mock
>>>
>>> mock = Mock()
>>> mock("bye bye", "world")
<Mock name='mock()' id='...'>
>>> mock.assert_called_once_with("bye bye", Any())
>>> mock("bye bye", "Fred")
<Mock name='mock()' id='...'>
>>> assert mock.call_args_list == [call("bye bye", "world"),
... call("bye bye", Any())]
"""
def __init__(self, cls=object, predicates=None):
self._cls = cls
self._predicates = predicates or []
def __eq__(self, other):
return (
isinstance(other, self._cls) and
all(predicate(other) for predicate in self._predicates)
)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
base = 'Any(%s)' % ('' if self._cls is object else self._cls)
such_thats = (
'.' +
'.'.join('such_that(%s)' % getattr(p, "__name__", p) for p in self._predicates)
) if self._predicates else ''
return base + such_thats
def such_that(self, predicate):
return Any(cls=self._cls, predicates=self._predicates + [predicate])
class Contains(object):
"""Matchers act as wildcards when defining a stub or when asserting call arguments.
The Contains matcher will match objects that contain the given value or substring.
>>> contains_five = Contains(5)
>>> assert contains_five == range(10)
>>> assert contains_five != range(4)
>>> contains_ello = Contains('ello')
>>> assert contains_ello == "hello"
>>> assert contains_ello != "bye bye"
Contains can be used when specifying stubs:
>>> try:
... from unittest.mock Mock, call
... except ImportError:
... from mock import Mock, call
>>>
>>> from mockextras import stub
>>> mock = Mock()
>>> mock.side_effect = stub((call("hello", "world"), 100),
... (call("bye bye", Contains('monkey')), 200))
>>> mock("bye bye", "uncle monkey")
200
or when asserting call arguments:
>>> try:
... from unittest.mock Mock
... except ImportError:
... from mock import Mock
>>>
>>> mock = Mock()
>>> mock("bye bye", "world")
<Mock name='mock()' id='...'>
>>> mock.assert_called_once_with("bye bye", Contains('or'))
>>> mock("bye bye", "Fred")
<Mock name='mock()' id='...'>
>>> assert mock.call_args_list == [call("bye bye", "world"),
... call("bye bye", Contains('red'))]
"""
def __init__(self, value):
self._value = value
def __eq__(self, other):
return self._value in other
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return 'Contains(%r)' % self._value
class AnyOf(object):
"""Matchers act as wildcards when defining a stub or when asserting call arguments.
The AnyOf matcher will ....
>>> is_a_small_prime = AnyOf(2,3,5,7,11,13)
>>> assert is_a_small_prime == 3
>>> assert is_a_small_prime != 4
AnyOf can be used when specifying stubs:
>>> try:
... from unittest.mock Mock, call
... except ImportError:
... from mock import Mock, call
>>>
>>> from mockextras import stub
>>> mock = Mock()
>>> mock.side_effect = stub((call("hello"), 100),
... (call(AnyOf('monkey', 'donkey', 'badger')), 200))
>>> mock("monkey")
200
or when asserting call arguments:
>>> try:
... from unittest.mock Mock
... except ImportError:
... from mock import Mock
>>>
>>> mock = Mock()
>>> mock("donkey")
<Mock name='mock()' id='...'>
>>> mock.assert_called_once_with(AnyOf('monkey', 'donkey', 'badger'))
>>> mock("monkey")
<Mock name='mock()' id='...'>
>>> assert mock.call_args_list == [call("donkey"),
... call(AnyOf('monkey', 'donkey', 'badger'))]
"""
def __init__(self, *args):
self._set = set(args)
def __eq__(self, other):
return other in self._set
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return 'AnyOf(%s)' % ', '.join(map(repr, self._set))
|
joequery/lesscpy
|
refs/heads/master
|
lesscpy/plib/variable.py
|
3
|
# -*- coding: utf8 -*-
"""
.. module:: lesscpy.plib.variable
:synopsis: Variable declaration
Copyright (c)
See LICENSE for details.
.. moduleauthor:: Johann T. Mariusson <jtm@robot.is>
"""
from .node import Node
class Variable(Node):
def parse(self, scope):
""" Parse function
args:
scope (Scope): Scope object
returns:
self
"""
self.name, _, self.value = self.tokens
if isinstance(self.name, tuple):
if len(self.name) > 1:
self.name, pad = self.name
self.value.append(pad)
else:
self.name = self.name[0]
scope.add_variable(self)
return self
def copy(self):
""" Return a copy of self
Returns:
Variable object
"""
return Variable([t for t in self.tokens])
def fmt(self, fills):
return ''
|
voxy/bluecanary
|
refs/heads/master
|
bluecanary/utilities/load_yaml.py
|
1
|
import os
from os.path import isfile, isdir
import click
from yaml import load
try:
from yaml import CLoader as Loader
except ImportError:
from yaml import Loader
from bluecanary.managers import (
AlarmsManager,
AWSCredentialsManager,
ConfigurationManager,
TagsManager,
)
def load_path(path):
if not path:
click.echo('Please provide either a file or directory path')
exit(1)
else:
paths = path
for path in paths:
if isfile(path):
load_yaml_file(path)
elif isdir(path):
load_yaml_directory(path)
def load_yaml_file(filepath):
with open(filepath) as fp:
parse_data(load(fp, Loader=Loader))
def load_yaml_directory(path=None):
if not path:
path = os.path.dirname(os.path.realpath(__file__))
for (dirpath, dirnames, filenames) in os.walk(path):
for filename in filenames:
if filename.lower().endswith('.yaml') or filename.lower().endswith('.yml'):
load_yaml_file(os.path.join(dirpath, filename))
def parse_data(data):
for profile in data.get('AWSProfiles', []):
AWSCredentialsManager.add_aws_profile(
aws_profile=profile['AWS_PROFILE'],
aws_access_key_id=profile['AWS_ACCESS_KEY_ID'],
aws_secret_access_key=profile['AWS_SECRET_ACCESS_KEY'],
aws_default_region=profile['AWS_DEFAULT_REGION'],
)
for alarm_group in data.get('AlarmGroups', []):
AlarmsManager.add_alarm_group(**alarm_group)
for tag_group in data.get('TagGroups', []):
TagsManager.add_tag_group(**tag_group)
if data.get('Configuration'):
for directory_path in data.get('Configuration').get('Plugins', []):
ConfigurationManager.add_plugins_directory(directory_path)
|
vLj2/docman
|
refs/heads/master
|
maintenance/tests.py
|
3
|
from django.test import TestCase
from django.http import HttpRequest
from django.conf import settings
from datetime import datetime, timedelta
from middleware import MaintenanceMiddleware
from models import Maintenance, MaintenanceFilter
class MaintenanceMiddlewareTest(TestCase):
def _create_request(self, path):
request = HttpRequest()
request.META = {
'SERVER_NAME': 'testserver',
'SERVER_PORT': 80
}
request.path = request.path_info = "/%s" % path
return request
def _create_maintenance_now(self):
maintenance = Maintenance()
maintenance.start_time = datetime.now() - timedelta(3600)
maintenance.end_time = datetime.now() + timedelta(3600)
return maintenance
def _create_maintenance_yesterday(self):
maintenance = Maintenance()
maintenance.start_time = datetime.now() - timedelta(1, 3600)
maintenance.end_time = datetime.now() - timedelta(1)
return maintenance
def test_should_always_allow_admin(self):
request = self._create_request('admin/')
middleware = MaintenanceMiddleware()
maintenance = self._create_maintenance_now()
maintenance.enabled = True
maintenance.description = "Testing admin request"
maintenance.save()
response = middleware.process_request(request)
self.assertEquals(None, response)
def test_should_return_none_on_empty_database(self):
request = self._create_request('testing/')
middleware = MaintenanceMiddleware()
self.assertEquals(0, Maintenance.objects.all().count())
self.assertEquals(None, middleware.process_request(request))
def test_should_return_none_on_enabled_and_inactive_object(self):
request = self._create_request('testing/')
middleware = MaintenanceMiddleware()
maintenance = self._create_maintenance_yesterday()
maintenance.enabled = True
maintenance.description = "Testing inactive object"
maintenance.save()
response = middleware.process_request(request)
self.assertEquals(1, Maintenance.objects.all().count())
self.assertEquals(None, response)
def test_should_return_none_on_enable_and_active_if_filter_misses(self):
request = self._create_request('testing/')
middleware = MaintenanceMiddleware()
maintenance = self._create_maintenance_now()
maintenance.enabled = True
maintenance.description = "Testing enabled and filtered object"
maintenance.save()
filter = MaintenanceFilter()
filter.maintenance = maintenance
filter.path = '/blog/'
filter.save()
response = middleware.process_request(request)
self.assertEquals(1, Maintenance.objects.all()[0].maintenancefilter_set.count())
self.assertEquals(1, Maintenance.objects.all().count())
self.assertEquals(None, response)
def test_should_return_none_on_disabled_and_active_object(self):
request = self._create_request('testing/')
middleware = MaintenanceMiddleware()
maintenance = self._create_maintenance_now()
maintenance.enabled = False
maintenance.description = "Testing disabled object"
maintenance.save()
response = middleware.process_request(request)
self.assertEquals(1, Maintenance.objects.all().count())
self.assertEquals(None, response)
def test_should_interrupt_on_enabled_and_active_object(self):
request = self._create_request('testing/')
middleware = MaintenanceMiddleware()
maintenance = self._create_maintenance_now()
maintenance.enabled = True
maintenance.description = "Testing enabled object"
maintenance.save()
response = middleware.process_request(request)
self.assertEquals(1, Maintenance.objects.all().count())
self.assertNotEquals(None, response)
|
wjkohnen/antlr4
|
refs/heads/master
|
runtime/Python3/src/antlr4/dfa/DFAState.py
|
4
|
#
# Copyright (c) 2012-2016 The ANTLR Project. All rights reserved.
# Use of this file is governed by the BSD 3-clause license that
# can be found in the LICENSE.txt file in the project root.
#/
# Map a predicate to a predicted alternative.#/
from io import StringIO
from antlr4.atn.ATNConfigSet import ATNConfigSet
from antlr4.atn.SemanticContext import SemanticContext
class PredPrediction(object):
def __init__(self, pred:SemanticContext, alt:int):
self.alt = alt
self.pred = pred
def __str__(self):
return "(" + str(self.pred) + ", " + str(self.alt) + ")"
# A DFA state represents a set of possible ATN configurations.
# As Aho, Sethi, Ullman p. 117 says "The DFA uses its state
# to keep track of all possible states the ATN can be in after
# reading each input symbol. That is to say, after reading
# input a1a2..an, the DFA is in a state that represents the
# subset T of the states of the ATN that are reachable from the
# ATN's start state along some path labeled a1a2..an."
# In conventional NFA→DFA conversion, therefore, the subset T
# would be a bitset representing the set of states the
# ATN could be in. We need to track the alt predicted by each
# state as well, however. More importantly, we need to maintain
# a stack of states, tracking the closure operations as they
# jump from rule to rule, emulating rule invocations (method calls).
# I have to add a stack to simulate the proper lookahead sequences for
# the underlying LL grammar from which the ATN was derived.
#
# <p>I use a set of ATNConfig objects not simple states. An ATNConfig
# is both a state (ala normal conversion) and a RuleContext describing
# the chain of rules (if any) followed to arrive at that state.</p>
#
# <p>A DFA state may have multiple references to a particular state,
# but with different ATN contexts (with same or different alts)
# meaning that state was reached via a different set of rule invocations.</p>
#/
class DFAState(object):
def __init__(self, stateNumber:int=-1, configs:ATNConfigSet=ATNConfigSet()):
self.stateNumber = stateNumber
self.configs = configs
# {@code edges[symbol]} points to target of symbol. Shift up by 1 so (-1)
# {@link Token#EOF} maps to {@code edges[0]}.
self.edges = None
self.isAcceptState = False
# if accept state, what ttype do we match or alt do we predict?
# This is set to {@link ATN#INVALID_ALT_NUMBER} when {@link #predicates}{@code !=null} or
# {@link #requiresFullContext}.
self.prediction = 0
self.lexerActionExecutor = None
# Indicates that this state was created during SLL prediction that
# discovered a conflict between the configurations in the state. Future
# {@link ParserATNSimulator#execATN} invocations immediately jumped doing
# full context prediction if this field is true.
self.requiresFullContext = False
# During SLL parsing, this is a list of predicates associated with the
# ATN configurations of the DFA state. When we have predicates,
# {@link #requiresFullContext} is {@code false} since full context prediction evaluates predicates
# on-the-fly. If this is not null, then {@link #prediction} is
# {@link ATN#INVALID_ALT_NUMBER}.
#
# <p>We only use these for non-{@link #requiresFullContext} but conflicting states. That
# means we know from the context (it's $ or we don't dip into outer
# context) that it's an ambiguity not a conflict.</p>
#
# <p>This list is computed by {@link ParserATNSimulator#predicateDFAState}.</p>
self.predicates = None
# Get the set of all alts mentioned by all ATN configurations in this
# DFA state.
def getAltSet(self):
if self.configs is not None:
return set(cfg.alt for cfg in self.configs) or None
return None
def __hash__(self):
return hash(self.configs)
# Two {@link DFAState} instances are equal if their ATN configuration sets
# are the same. This method is used to see if a state already exists.
#
# <p>Because the number of alternatives and number of ATN configurations are
# finite, there is a finite number of DFA states that can be processed.
# This is necessary to show that the algorithm terminates.</p>
#
# <p>Cannot test the DFA state numbers here because in
# {@link ParserATNSimulator#addDFAState} we need to know if any other state
# exists that has this exact set of ATN configurations. The
# {@link #stateNumber} is irrelevant.</p>
def __eq__(self, other):
# compare set of ATN configurations in this set with other
if self is other:
return True
elif not isinstance(other, DFAState):
return False
else:
return self.configs==other.configs
def __str__(self):
with StringIO() as buf:
buf.write(str(self.stateNumber))
buf.write(":")
buf.write(str(self.configs))
if self.isAcceptState:
buf.write("=>")
if self.predicates is not None:
buf.write(str(self.predicates))
else:
buf.write(str(self.prediction))
return buf.getvalue()
|
marco-lancini/Showcase
|
refs/heads/master
|
django/contrib/auth/tests/auth_backends.py
|
104
|
import warnings
from django.conf import settings
from django.contrib.auth.models import User, Group, Permission, AnonymousUser
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ImproperlyConfigured
from django.db import connection
from django.test import TestCase
from django.utils import unittest
class BackendTest(TestCase):
backend = 'django.contrib.auth.backends.ModelBackend'
def setUp(self):
self.curr_auth = settings.AUTHENTICATION_BACKENDS
settings.AUTHENTICATION_BACKENDS = (self.backend,)
User.objects.create_user('test', 'test@example.com', 'test')
User.objects.create_superuser('test2', 'test2@example.com', 'test')
def tearDown(self):
settings.AUTHENTICATION_BACKENDS = self.curr_auth
# The custom_perms test messes with ContentTypes, which will
# be cached; flush the cache to ensure there are no side effects
# Refs #14975, #14925
ContentType.objects.clear_cache()
def test_has_perm(self):
user = User.objects.get(username='test')
self.assertEqual(user.has_perm('auth.test'), False)
user.is_staff = True
user.save()
self.assertEqual(user.has_perm('auth.test'), False)
user.is_superuser = True
user.save()
self.assertEqual(user.has_perm('auth.test'), True)
user.is_staff = False
user.is_superuser = False
user.save()
self.assertEqual(user.has_perm('auth.test'), False)
user.is_staff = True
user.is_superuser = True
user.is_active = False
user.save()
self.assertEqual(user.has_perm('auth.test'), False)
def test_custom_perms(self):
user = User.objects.get(username='test')
content_type=ContentType.objects.get_for_model(Group)
perm = Permission.objects.create(name='test', content_type=content_type, codename='test')
user.user_permissions.add(perm)
user.save()
# reloading user to purge the _perm_cache
user = User.objects.get(username='test')
self.assertEqual(user.get_all_permissions() == set([u'auth.test']), True)
self.assertEqual(user.get_group_permissions(), set([]))
self.assertEqual(user.has_module_perms('Group'), False)
self.assertEqual(user.has_module_perms('auth'), True)
perm = Permission.objects.create(name='test2', content_type=content_type, codename='test2')
user.user_permissions.add(perm)
user.save()
perm = Permission.objects.create(name='test3', content_type=content_type, codename='test3')
user.user_permissions.add(perm)
user.save()
user = User.objects.get(username='test')
self.assertEqual(user.get_all_permissions(), set([u'auth.test2', u'auth.test', u'auth.test3']))
self.assertEqual(user.has_perm('test'), False)
self.assertEqual(user.has_perm('auth.test'), True)
self.assertEqual(user.has_perms(['auth.test2', 'auth.test3']), True)
perm = Permission.objects.create(name='test_group', content_type=content_type, codename='test_group')
group = Group.objects.create(name='test_group')
group.permissions.add(perm)
group.save()
user.groups.add(group)
user = User.objects.get(username='test')
exp = set([u'auth.test2', u'auth.test', u'auth.test3', u'auth.test_group'])
self.assertEqual(user.get_all_permissions(), exp)
self.assertEqual(user.get_group_permissions(), set([u'auth.test_group']))
self.assertEqual(user.has_perms(['auth.test3', 'auth.test_group']), True)
user = AnonymousUser()
self.assertEqual(user.has_perm('test'), False)
self.assertEqual(user.has_perms(['auth.test2', 'auth.test3']), False)
def test_has_no_object_perm(self):
"""Regressiontest for #12462"""
user = User.objects.get(username='test')
content_type=ContentType.objects.get_for_model(Group)
perm = Permission.objects.create(name='test', content_type=content_type, codename='test')
user.user_permissions.add(perm)
user.save()
self.assertEqual(user.has_perm('auth.test', 'object'), False)
self.assertEqual(user.get_all_permissions('object'), set([]))
self.assertEqual(user.has_perm('auth.test'), True)
self.assertEqual(user.get_all_permissions(), set(['auth.test']))
def test_get_all_superuser_permissions(self):
"A superuser has all permissions. Refs #14795"
user = User.objects.get(username='test2')
self.assertEqual(len(user.get_all_permissions()), len(Permission.objects.all()))
BackendTest = unittest.skipIf(not connection.features.supports_joins,
'Requires JOIN support')(BackendTest)
class TestObj(object):
pass
class SimpleRowlevelBackend(object):
supports_object_permissions = True
supports_inactive_user = False
# This class also supports tests for anonymous user permissions, and
# inactive user permissions via subclasses which just set the
# 'supports_anonymous_user' or 'supports_inactive_user' attribute.
def has_perm(self, user, perm, obj=None):
if not obj:
return # We only support row level perms
if isinstance(obj, TestObj):
if user.username == 'test2':
return True
elif user.is_anonymous() and perm == 'anon':
# not reached due to supports_anonymous_user = False
return True
elif not user.is_active and perm == 'inactive':
return True
return False
def has_module_perms(self, user, app_label):
if not user.is_anonymous() and not user.is_active:
return False
return app_label == "app1"
def get_all_permissions(self, user, obj=None):
if not obj:
return [] # We only support row level perms
if not isinstance(obj, TestObj):
return ['none']
if user.is_anonymous():
return ['anon']
if user.username == 'test2':
return ['simple', 'advanced']
else:
return ['simple']
def get_group_permissions(self, user, obj=None):
if not obj:
return # We only support row level perms
if not isinstance(obj, TestObj):
return ['none']
if 'test_group' in [group.name for group in user.groups.all()]:
return ['group_perm']
else:
return ['none']
class RowlevelBackendTest(TestCase):
"""
Tests for auth backend that supports object level permissions
"""
backend = 'django.contrib.auth.tests.auth_backends.SimpleRowlevelBackend'
def setUp(self):
self.curr_auth = settings.AUTHENTICATION_BACKENDS
settings.AUTHENTICATION_BACKENDS = tuple(self.curr_auth) + (self.backend,)
self.user1 = User.objects.create_user('test', 'test@example.com', 'test')
self.user2 = User.objects.create_user('test2', 'test2@example.com', 'test')
self.user3 = User.objects.create_user('test3', 'test3@example.com', 'test')
self.save_warnings_state()
warnings.filterwarnings('ignore', category=DeprecationWarning,
module='django.contrib.auth')
def tearDown(self):
settings.AUTHENTICATION_BACKENDS = self.curr_auth
self.restore_warnings_state()
# The get_group_permissions test messes with ContentTypes, which will
# be cached; flush the cache to ensure there are no side effects
# Refs #14975, #14925
ContentType.objects.clear_cache()
def test_has_perm(self):
self.assertEqual(self.user1.has_perm('perm', TestObj()), False)
self.assertEqual(self.user2.has_perm('perm', TestObj()), True)
self.assertEqual(self.user2.has_perm('perm'), False)
self.assertEqual(self.user2.has_perms(['simple', 'advanced'], TestObj()), True)
self.assertEqual(self.user3.has_perm('perm', TestObj()), False)
self.assertEqual(self.user3.has_perm('anon', TestObj()), False)
self.assertEqual(self.user3.has_perms(['simple', 'advanced'], TestObj()), False)
def test_get_all_permissions(self):
self.assertEqual(self.user1.get_all_permissions(TestObj()), set(['simple']))
self.assertEqual(self.user2.get_all_permissions(TestObj()), set(['simple', 'advanced']))
self.assertEqual(self.user2.get_all_permissions(), set([]))
def test_get_group_permissions(self):
content_type=ContentType.objects.get_for_model(Group)
group = Group.objects.create(name='test_group')
self.user3.groups.add(group)
self.assertEqual(self.user3.get_group_permissions(TestObj()), set(['group_perm']))
RowlevelBackendTest = unittest.skipIf(not connection.features.supports_joins,
'Requires JOIN support')(RowlevelBackendTest)
class AnonymousUserBackend(SimpleRowlevelBackend):
supports_anonymous_user = True
supports_inactive_user = False
class NoAnonymousUserBackend(SimpleRowlevelBackend):
supports_anonymous_user = False
supports_inactive_user = False
class AnonymousUserBackendTest(TestCase):
"""
Tests for AnonymousUser delegating to backend if it has 'supports_anonymous_user' = True
"""
backend = 'django.contrib.auth.tests.auth_backends.AnonymousUserBackend'
def setUp(self):
self.curr_auth = settings.AUTHENTICATION_BACKENDS
settings.AUTHENTICATION_BACKENDS = (self.backend,)
self.user1 = AnonymousUser()
def tearDown(self):
settings.AUTHENTICATION_BACKENDS = self.curr_auth
def test_has_perm(self):
self.assertEqual(self.user1.has_perm('perm', TestObj()), False)
self.assertEqual(self.user1.has_perm('anon', TestObj()), True)
def test_has_perms(self):
self.assertEqual(self.user1.has_perms(['anon'], TestObj()), True)
self.assertEqual(self.user1.has_perms(['anon', 'perm'], TestObj()), False)
def test_has_module_perms(self):
self.assertEqual(self.user1.has_module_perms("app1"), True)
self.assertEqual(self.user1.has_module_perms("app2"), False)
def test_get_all_permissions(self):
self.assertEqual(self.user1.get_all_permissions(TestObj()), set(['anon']))
class NoAnonymousUserBackendTest(TestCase):
"""
Tests that AnonymousUser does not delegate to backend if it has 'supports_anonymous_user' = False
"""
backend = 'django.contrib.auth.tests.auth_backends.NoAnonymousUserBackend'
def setUp(self):
self.curr_auth = settings.AUTHENTICATION_BACKENDS
settings.AUTHENTICATION_BACKENDS = tuple(self.curr_auth) + (self.backend,)
self.user1 = AnonymousUser()
def tearDown(self):
settings.AUTHENTICATION_BACKENDS = self.curr_auth
def test_has_perm(self):
self.assertEqual(self.user1.has_perm('perm', TestObj()), False)
self.assertEqual(self.user1.has_perm('anon', TestObj()), False)
def test_has_perms(self):
self.assertEqual(self.user1.has_perms(['anon'], TestObj()), False)
def test_has_module_perms(self):
self.assertEqual(self.user1.has_module_perms("app1"), False)
self.assertEqual(self.user1.has_module_perms("app2"), False)
def test_get_all_permissions(self):
self.assertEqual(self.user1.get_all_permissions(TestObj()), set())
class NoBackendsTest(TestCase):
"""
Tests that an appropriate error is raised if no auth backends are provided.
"""
def setUp(self):
self.old_AUTHENTICATION_BACKENDS = settings.AUTHENTICATION_BACKENDS
settings.AUTHENTICATION_BACKENDS = []
self.user = User.objects.create_user('test', 'test@example.com', 'test')
def tearDown(self):
settings.AUTHENTICATION_BACKENDS = self.old_AUTHENTICATION_BACKENDS
def test_raises_exception(self):
self.assertRaises(ImproperlyConfigured, self.user.has_perm, ('perm', TestObj(),))
class InActiveUserBackend(SimpleRowlevelBackend):
supports_anonymous_user = False
supports_inactive_user = True
class NoInActiveUserBackend(SimpleRowlevelBackend):
supports_anonymous_user = False
supports_inactive_user = False
class InActiveUserBackendTest(TestCase):
"""
Tests for a inactive user delegating to backend if it has 'supports_inactive_user' = True
"""
backend = 'django.contrib.auth.tests.auth_backends.InActiveUserBackend'
def setUp(self):
self.curr_auth = settings.AUTHENTICATION_BACKENDS
settings.AUTHENTICATION_BACKENDS = (self.backend,)
self.user1 = User.objects.create_user('test', 'test@example.com', 'test')
self.user1.is_active = False
self.user1.save()
def tearDown(self):
settings.AUTHENTICATION_BACKENDS = self.curr_auth
def test_has_perm(self):
self.assertEqual(self.user1.has_perm('perm', TestObj()), False)
self.assertEqual(self.user1.has_perm('inactive', TestObj()), True)
def test_has_module_perms(self):
self.assertEqual(self.user1.has_module_perms("app1"), False)
self.assertEqual(self.user1.has_module_perms("app2"), False)
class NoInActiveUserBackendTest(TestCase):
"""
Tests that an inactive user does not delegate to backend if it has 'supports_inactive_user' = False
"""
backend = 'django.contrib.auth.tests.auth_backends.NoInActiveUserBackend'
def setUp(self):
self.curr_auth = settings.AUTHENTICATION_BACKENDS
settings.AUTHENTICATION_BACKENDS = tuple(self.curr_auth) + (self.backend,)
self.user1 = User.objects.create_user('test', 'test@example.com', 'test')
self.user1.is_active = False
self.user1.save()
def tearDown(self):
settings.AUTHENTICATION_BACKENDS = self.curr_auth
def test_has_perm(self):
self.assertEqual(self.user1.has_perm('perm', TestObj()), False)
self.assertEqual(self.user1.has_perm('inactive', TestObj()), True)
def test_has_module_perms(self):
self.assertEqual(self.user1.has_module_perms("app1"), False)
self.assertEqual(self.user1.has_module_perms("app2"), False)
|
AlexanderFabisch/scikit-learn
|
refs/heads/master
|
examples/ensemble/plot_forest_importances_faces.py
|
403
|
"""
=================================================
Pixel importances with a parallel forest of trees
=================================================
This example shows the use of forests of trees to evaluate the importance
of the pixels in an image classification task (faces). The hotter the pixel,
the more important.
The code below also illustrates how the construction and the computation
of the predictions can be parallelized within multiple jobs.
"""
print(__doc__)
from time import time
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.ensemble import ExtraTreesClassifier
# Number of cores to use to perform parallel fitting of the forest model
n_jobs = 1
# Load the faces dataset
data = fetch_olivetti_faces()
X = data.images.reshape((len(data.images), -1))
y = data.target
mask = y < 5 # Limit to 5 classes
X = X[mask]
y = y[mask]
# Build a forest and compute the pixel importances
print("Fitting ExtraTreesClassifier on faces data with %d cores..." % n_jobs)
t0 = time()
forest = ExtraTreesClassifier(n_estimators=1000,
max_features=128,
n_jobs=n_jobs,
random_state=0)
forest.fit(X, y)
print("done in %0.3fs" % (time() - t0))
importances = forest.feature_importances_
importances = importances.reshape(data.images[0].shape)
# Plot pixel importances
plt.matshow(importances, cmap=plt.cm.hot)
plt.title("Pixel importances with forests of trees")
plt.show()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.