repo_name stringlengths 5 100 | ref stringlengths 12 67 | path stringlengths 4 244 | copies stringlengths 1 8 | content stringlengths 0 1.05M ⌀ |
|---|---|---|---|---|
ariabuckles/pyobjc-framework-Cocoa | refs/heads/master | PyObjCTest/test_nscachedimagerep.py | 3 | from PyObjCTools.TestSupport import *
from AppKit import *
class TestNSCachedImageRep (TestCase):
def testMethods(self):
self.assertArgIsBOOL(NSCachedImageRep.initWithSize_depth_separate_alpha_, 2)
self.assertArgIsBOOL(NSCachedImageRep.initWithSize_depth_separate_alpha_, 3)
if __name__ == "__main__":
main()
|
andresriancho/django-axes | refs/heads/master | axes/views.py | 6027 | # Create your views here.
|
Einsteinish/PyTune3 | refs/heads/master | vendor/paypal/standard/ipn/tests/test_forms.py | 19 | from django.test import TestCase
from paypal.standard.forms import PayPalPaymentsForm
class PaymentsFormTest(TestCase):
def test_form_render(self):
f = PayPalPaymentsForm(initial={'business':'me@mybusiness.com',
'amount': '10.50',
'shipping': '2.00',
})
rendered = f.render()
self.assertIn('''action="https://www.sandbox.paypal.com/cgi-bin/webscr"''', rendered)
self.assertIn('''value="me@mybusiness.com"''', rendered)
self.assertIn('''value="2.00"''', rendered)
self.assertIn('''value="10.50"''', rendered)
self.assertIn('''buynowCC''', rendered)
def test_form_endpont(self):
with self.settings(PAYPAL_TEST=False):
f = PayPalPaymentsForm(initial={})
self.assertNotIn('sandbox', f.render())
|
sktjdgns1189/android_kernel_samsung_jalteskt | refs/heads/cm-13.0 | Documentation/target/tcm_mod_builder.py | 4981 | #!/usr/bin/python
# The TCM v4 multi-protocol fabric module generation script for drivers/target/$NEW_MOD
#
# Copyright (c) 2010 Rising Tide Systems
# Copyright (c) 2010 Linux-iSCSI.org
#
# Author: nab@kernel.org
#
import os, sys
import subprocess as sub
import string
import re
import optparse
tcm_dir = ""
fabric_ops = []
fabric_mod_dir = ""
fabric_mod_port = ""
fabric_mod_init_port = ""
def tcm_mod_err(msg):
print msg
sys.exit(1)
def tcm_mod_create_module_subdir(fabric_mod_dir_var):
if os.path.isdir(fabric_mod_dir_var) == True:
return 1
print "Creating fabric_mod_dir: " + fabric_mod_dir_var
ret = os.mkdir(fabric_mod_dir_var)
if ret:
tcm_mod_err("Unable to mkdir " + fabric_mod_dir_var)
return
def tcm_mod_build_FC_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for FC Initiator Nport */\n"
buf += " u64 nport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Initiator Nport */\n"
buf += " char nport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* FC lport target portal group tag for TCM */\n"
buf += " u16 lport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_lport */\n"
buf += " struct " + fabric_mod_name + "_lport *lport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_lport {\n"
buf += " /* SCSI protocol the lport is providing */\n"
buf += " u8 lport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for FC Target Lport */\n"
buf += " u64 lport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Target Lport */\n"
buf += " char lport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_lport() */\n"
buf += " struct se_wwn lport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "lport"
fabric_mod_init_port = "nport"
return
def tcm_mod_build_SAS_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for SAS Initiator port */\n"
buf += " u64 iport_wwpn;\n"
buf += " /* ASCII formatted WWPN for Sas Initiator port */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* SAS port target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for SAS Target port */\n"
buf += " u64 tport_wwpn;\n"
buf += " /* ASCII formatted WWPN for SAS Target port */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_iSCSI_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* ASCII formatted InitiatorName */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* iSCSI target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* ASCII formatted TargetName for IQN */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_base_includes(proto_ident, fabric_mod_dir_val, fabric_mod_name):
if proto_ident == "FC":
tcm_mod_build_FC_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "SAS":
tcm_mod_build_SAS_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "iSCSI":
tcm_mod_build_iSCSI_include(fabric_mod_dir_val, fabric_mod_name)
else:
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
return
def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_configfs.c"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#include <linux/module.h>\n"
buf += "#include <linux/moduleparam.h>\n"
buf += "#include <linux/version.h>\n"
buf += "#include <generated/utsrelease.h>\n"
buf += "#include <linux/utsname.h>\n"
buf += "#include <linux/init.h>\n"
buf += "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/configfs.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include <target/target_core_fabric_configfs.h>\n"
buf += "#include <target/target_core_configfs.h>\n"
buf += "#include <target/configfs_macros.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "/* Local pointer to allocated TCM configfs fabric module */\n"
buf += "struct target_fabric_configfs *" + fabric_mod_name + "_fabric_configfs;\n\n"
buf += "static struct se_node_acl *" + fabric_mod_name + "_make_nodeacl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct se_node_acl *se_nacl, *se_nacl_new;\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n"
buf += " u32 nexus_depth;\n\n"
buf += " /* " + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n"
buf += " se_nacl_new = " + fabric_mod_name + "_alloc_fabric_acl(se_tpg);\n"
buf += " if (!se_nacl_new)\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += "//#warning FIXME: Hardcoded nexus depth in " + fabric_mod_name + "_make_nodeacl()\n"
buf += " nexus_depth = 1;\n"
buf += " /*\n"
buf += " * se_nacl_new may be released by core_tpg_add_initiator_node_acl()\n"
buf += " * when converting a NodeACL from demo mode -> explict\n"
buf += " */\n"
buf += " se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,\n"
buf += " name, nexus_depth);\n"
buf += " if (IS_ERR(se_nacl)) {\n"
buf += " " + fabric_mod_name + "_release_fabric_acl(se_tpg, se_nacl_new);\n"
buf += " return se_nacl;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Locate our struct " + fabric_mod_name + "_nacl and set the FC Nport WWPN\n"
buf += " */\n"
buf += " nacl = container_of(se_nacl, struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " nacl->" + fabric_mod_init_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&nacl->" + fabric_mod_init_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return se_nacl;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_nodeacl(struct se_node_acl *se_acl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_acl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
buf += "static struct se_portal_group *" + fabric_mod_name + "_make_tpg(\n"
buf += " struct se_wwn *wwn,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + "*" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg;\n"
buf += " unsigned long tpgt;\n"
buf += " int ret;\n\n"
buf += " if (strstr(name, \"tpgt_\") != name)\n"
buf += " return ERR_PTR(-EINVAL);\n"
buf += " if (strict_strtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)\n"
buf += " return ERR_PTR(-EINVAL);\n\n"
buf += " tpg = kzalloc(sizeof(struct " + fabric_mod_name + "_tpg), GFP_KERNEL);\n"
buf += " if (!tpg) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_tpg\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
buf += " tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n"
buf += " tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n"
buf += " ret = core_tpg_register(&" + fabric_mod_name + "_fabric_configfs->tf_ops, wwn,\n"
buf += " &tpg->se_tpg, (void *)tpg,\n"
buf += " TRANSPORT_TPG_TYPE_NORMAL);\n"
buf += " if (ret < 0) {\n"
buf += " kfree(tpg);\n"
buf += " return NULL;\n"
buf += " }\n"
buf += " return &tpg->se_tpg;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_tpg(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n\n"
buf += " core_tpg_deregister(se_tpg);\n"
buf += " kfree(tpg);\n"
buf += "}\n\n"
buf += "static struct se_wwn *" + fabric_mod_name + "_make_" + fabric_mod_port + "(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + ";\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n\n"
buf += " /* if (" + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n\n"
buf += " " + fabric_mod_port + " = kzalloc(sizeof(struct " + fabric_mod_name + "_" + fabric_mod_port + "), GFP_KERNEL);\n"
buf += " if (!" + fabric_mod_port + ") {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_" + fabric_mod_port + "\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " " + fabric_mod_port + "->" + fabric_mod_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&" + fabric_mod_port + "->" + fabric_mod_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_wwn;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_" + fabric_mod_port + "(struct se_wwn *wwn)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n"
buf += " kfree(" + fabric_mod_port + ");\n"
buf += "}\n\n"
buf += "static ssize_t " + fabric_mod_name + "_wwn_show_attr_version(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " char *page)\n"
buf += "{\n"
buf += " return sprintf(page, \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \"on \"UTS_RELEASE\"\\n\", " + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += "}\n\n"
buf += "TF_WWN_ATTR_RO(" + fabric_mod_name + ", version);\n\n"
buf += "static struct configfs_attribute *" + fabric_mod_name + "_wwn_attrs[] = {\n"
buf += " &" + fabric_mod_name + "_wwn_version.attr,\n"
buf += " NULL,\n"
buf += "};\n\n"
buf += "static struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n"
buf += " .get_fabric_name = " + fabric_mod_name + "_get_fabric_name,\n"
buf += " .get_fabric_proto_ident = " + fabric_mod_name + "_get_fabric_proto_ident,\n"
buf += " .tpg_get_wwn = " + fabric_mod_name + "_get_fabric_wwn,\n"
buf += " .tpg_get_tag = " + fabric_mod_name + "_get_tag,\n"
buf += " .tpg_get_default_depth = " + fabric_mod_name + "_get_default_depth,\n"
buf += " .tpg_get_pr_transport_id = " + fabric_mod_name + "_get_pr_transport_id,\n"
buf += " .tpg_get_pr_transport_id_len = " + fabric_mod_name + "_get_pr_transport_id_len,\n"
buf += " .tpg_parse_pr_out_transport_id = " + fabric_mod_name + "_parse_pr_out_transport_id,\n"
buf += " .tpg_check_demo_mode = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_check_demo_mode_cache = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_demo_mode_write_protect = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_prod_mode_write_protect = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_alloc_fabric_acl = " + fabric_mod_name + "_alloc_fabric_acl,\n"
buf += " .tpg_release_fabric_acl = " + fabric_mod_name + "_release_fabric_acl,\n"
buf += " .tpg_get_inst_index = " + fabric_mod_name + "_tpg_get_inst_index,\n"
buf += " .release_cmd = " + fabric_mod_name + "_release_cmd,\n"
buf += " .shutdown_session = " + fabric_mod_name + "_shutdown_session,\n"
buf += " .close_session = " + fabric_mod_name + "_close_session,\n"
buf += " .stop_session = " + fabric_mod_name + "_stop_session,\n"
buf += " .fall_back_to_erl0 = " + fabric_mod_name + "_reset_nexus,\n"
buf += " .sess_logged_in = " + fabric_mod_name + "_sess_logged_in,\n"
buf += " .sess_get_index = " + fabric_mod_name + "_sess_get_index,\n"
buf += " .sess_get_initiator_sid = NULL,\n"
buf += " .write_pending = " + fabric_mod_name + "_write_pending,\n"
buf += " .write_pending_status = " + fabric_mod_name + "_write_pending_status,\n"
buf += " .set_default_node_attributes = " + fabric_mod_name + "_set_default_node_attrs,\n"
buf += " .get_task_tag = " + fabric_mod_name + "_get_task_tag,\n"
buf += " .get_cmd_state = " + fabric_mod_name + "_get_cmd_state,\n"
buf += " .queue_data_in = " + fabric_mod_name + "_queue_data_in,\n"
buf += " .queue_status = " + fabric_mod_name + "_queue_status,\n"
buf += " .queue_tm_rsp = " + fabric_mod_name + "_queue_tm_rsp,\n"
buf += " .get_fabric_sense_len = " + fabric_mod_name + "_get_fabric_sense_len,\n"
buf += " .set_fabric_sense_len = " + fabric_mod_name + "_set_fabric_sense_len,\n"
buf += " .is_state_remove = " + fabric_mod_name + "_is_state_remove,\n"
buf += " /*\n"
buf += " * Setup function pointers for generic logic in target_core_fabric_configfs.c\n"
buf += " */\n"
buf += " .fabric_make_wwn = " + fabric_mod_name + "_make_" + fabric_mod_port + ",\n"
buf += " .fabric_drop_wwn = " + fabric_mod_name + "_drop_" + fabric_mod_port + ",\n"
buf += " .fabric_make_tpg = " + fabric_mod_name + "_make_tpg,\n"
buf += " .fabric_drop_tpg = " + fabric_mod_name + "_drop_tpg,\n"
buf += " .fabric_post_link = NULL,\n"
buf += " .fabric_pre_unlink = NULL,\n"
buf += " .fabric_make_np = NULL,\n"
buf += " .fabric_drop_np = NULL,\n"
buf += " .fabric_make_nodeacl = " + fabric_mod_name + "_make_nodeacl,\n"
buf += " .fabric_drop_nodeacl = " + fabric_mod_name + "_drop_nodeacl,\n"
buf += "};\n\n"
buf += "static int " + fabric_mod_name + "_register_configfs(void)\n"
buf += "{\n"
buf += " struct target_fabric_configfs *fabric;\n"
buf += " int ret;\n\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \" on \"UTS_RELEASE\"\\n\"," + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += " /*\n"
buf += " * Register the top level struct config_item_type with TCM core\n"
buf += " */\n"
buf += " fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name[4:] + "\");\n"
buf += " if (IS_ERR(fabric)) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_init() failed\\n\");\n"
buf += " return PTR_ERR(fabric);\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup fabric->tf_ops from our local " + fabric_mod_name + "_ops\n"
buf += " */\n"
buf += " fabric->tf_ops = " + fabric_mod_name + "_ops;\n"
buf += " /*\n"
buf += " * Setup default attribute lists for various fabric->tf_cit_tmpl\n"
buf += " */\n"
buf += " TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = " + fabric_mod_name + "_wwn_attrs;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;\n"
buf += " /*\n"
buf += " * Register the fabric for use within TCM\n"
buf += " */\n"
buf += " ret = target_fabric_configfs_register(fabric);\n"
buf += " if (ret < 0) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_register() failed\"\n"
buf += " \" for " + fabric_mod_name.upper() + "\\n\");\n"
buf += " return ret;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup our local pointer to *fabric\n"
buf += " */\n"
buf += " " + fabric_mod_name + "_fabric_configfs = fabric;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Set fabric -> " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void __exit " + fabric_mod_name + "_deregister_configfs(void)\n"
buf += "{\n"
buf += " if (!" + fabric_mod_name + "_fabric_configfs)\n"
buf += " return;\n\n"
buf += " target_fabric_configfs_deregister(" + fabric_mod_name + "_fabric_configfs);\n"
buf += " " + fabric_mod_name + "_fabric_configfs = NULL;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Cleared " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += "};\n\n"
buf += "static int __init " + fabric_mod_name + "_init(void)\n"
buf += "{\n"
buf += " int ret;\n\n"
buf += " ret = " + fabric_mod_name + "_register_configfs();\n"
buf += " if (ret < 0)\n"
buf += " return ret;\n\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void __exit " + fabric_mod_name + "_exit(void)\n"
buf += "{\n"
buf += " " + fabric_mod_name + "_deregister_configfs();\n"
buf += "};\n\n"
buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n"
buf += "MODULE_LICENSE(\"GPL\");\n"
buf += "module_init(" + fabric_mod_name + "_init);\n"
buf += "module_exit(" + fabric_mod_name + "_exit);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_scan_fabric_ops(tcm_dir):
fabric_ops_api = tcm_dir + "include/target/target_core_fabric.h"
print "Using tcm_mod_scan_fabric_ops: " + fabric_ops_api
process_fo = 0;
p = open(fabric_ops_api, 'r')
line = p.readline()
while line:
if process_fo == 0 and re.search('struct target_core_fabric_ops {', line):
line = p.readline()
continue
if process_fo == 0:
process_fo = 1;
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
continue
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
p.close()
return
def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
bufi = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.c"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
fi = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.h"
print "Writing file: " + fi
pi = open(fi, 'w')
if not pi:
tcm_mod_err("Unable to open file: " + fi)
buf = "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/list.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n"
buf += "#include <scsi/scsi.h>\n"
buf += "#include <scsi/scsi_host.h>\n"
buf += "#include <scsi/scsi_device.h>\n"
buf += "#include <scsi/scsi_cmnd.h>\n"
buf += "#include <scsi/libfc.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include <target/target_core_configfs.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "int " + fabric_mod_name + "_check_true(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_true(struct se_portal_group *);\n"
buf += "int " + fabric_mod_name + "_check_false(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_false(struct se_portal_group *);\n"
total_fabric_ops = len(fabric_ops)
i = 0
while i < total_fabric_ops:
fo = fabric_ops[i]
i += 1
# print "fabric_ops: " + fo
if re.search('get_fabric_name', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_name(void)\n"
buf += "{\n"
buf += " return \"" + fabric_mod_name[4:] + "\";\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_name(void);\n"
continue
if re.search('get_fabric_proto_ident', fo):
buf += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " u8 proto_id;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " proto_id = fc_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " proto_id = sas_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " proto_id = iscsi_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return proto_id;\n"
buf += "}\n\n"
bufi += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *);\n"
if re.search('get_wwn', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_name[0];\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *);\n"
if re.search('get_tag', fo):
buf += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " return tpg->" + fabric_mod_port + "_tpgt;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *);\n"
if re.search('get_default_depth', fo):
buf += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *);\n"
if re.search('get_pr_transport_id\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code,\n"
buf += " unsigned char *buf)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *, unsigned char *);\n"
if re.search('get_pr_transport_id_len\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *);\n"
if re.search('parse_pr_out_transport_id\)\(', fo):
buf += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " const char *buf,\n"
buf += " u32 *out_tid_len,\n"
buf += " char **port_nexus_ptr)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " char *tid = NULL;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " tid = fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " tid = sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " tid = iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
buf += " }\n\n"
buf += " return tid;\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(struct se_portal_group *,\n"
bufi += " const char *, u32 *, char **);\n"
if re.search('alloc_fabric_acl\)\(', fo):
buf += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n\n"
buf += " nacl = kzalloc(sizeof(struct " + fabric_mod_name + "_nacl), GFP_KERNEL);\n"
buf += " if (!nacl) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_nacl\\n\");\n"
buf += " return NULL;\n"
buf += " }\n\n"
buf += " return &nacl->se_node_acl;\n"
buf += "}\n\n"
bufi += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *);\n"
if re.search('release_fabric_acl\)\(', fo):
buf += "void " + fabric_mod_name + "_release_fabric_acl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_nacl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_fabric_acl(struct se_portal_group *,\n"
bufi += " struct se_node_acl *);\n"
if re.search('tpg_get_inst_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *);\n"
if re.search('\*release_cmd\)\(', fo):
buf += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *);\n"
if re.search('shutdown_session\)\(', fo):
buf += "int " + fabric_mod_name + "_shutdown_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_shutdown_session(struct se_session *);\n"
if re.search('close_session\)\(', fo):
buf += "void " + fabric_mod_name + "_close_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_close_session(struct se_session *);\n"
if re.search('stop_session\)\(', fo):
buf += "void " + fabric_mod_name + "_stop_session(struct se_session *se_sess, int sess_sleep , int conn_sleep)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_stop_session(struct se_session *, int, int);\n"
if re.search('fall_back_to_erl0\)\(', fo):
buf += "void " + fabric_mod_name + "_reset_nexus(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_reset_nexus(struct se_session *);\n"
if re.search('sess_logged_in\)\(', fo):
buf += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *);\n"
if re.search('sess_get_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *);\n"
if re.search('write_pending\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending(struct se_cmd *);\n"
if re.search('write_pending_status\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *);\n"
if re.search('set_default_node_attributes\)\(', fo):
buf += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *nacl)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *);\n"
if re.search('get_task_tag\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *);\n"
if re.search('get_cmd_state\)\(', fo):
buf += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *);\n"
if re.search('queue_data_in\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *);\n"
if re.search('queue_status\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_status(struct se_cmd *);\n"
if re.search('queue_tm_rsp\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n"
if re.search('get_fabric_sense_len\)\(', fo):
buf += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void);\n"
if re.search('set_fabric_sense_len\)\(', fo):
buf += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *se_cmd, u32 sense_length)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *, u32);\n"
if re.search('is_state_remove\)\(', fo):
buf += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
ret = pi.write(bufi)
if ret:
tcm_mod_err("Unable to write fi: " + fi)
pi.close()
return
def tcm_mod_build_kbuild(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Makefile"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf += fabric_mod_name + "-objs := " + fabric_mod_name + "_fabric.o \\\n"
buf += " " + fabric_mod_name + "_configfs.o\n"
buf += "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name + ".o\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_build_kconfig(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Kconfig"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "config " + fabric_mod_name.upper() + "\n"
buf += " tristate \"" + fabric_mod_name.upper() + " fabric module\"\n"
buf += " depends on TARGET_CORE && CONFIGFS_FS\n"
buf += " default n\n"
buf += " ---help---\n"
buf += " Say Y here to enable the " + fabric_mod_name.upper() + " fabric module\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_add_kbuild(tcm_dir, fabric_mod_name):
buf = "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name.lower() + "/\n"
kbuild = tcm_dir + "/drivers/target/Makefile"
f = open(kbuild, 'a')
f.write(buf)
f.close()
return
def tcm_mod_add_kconfig(tcm_dir, fabric_mod_name):
buf = "source \"drivers/target/" + fabric_mod_name.lower() + "/Kconfig\"\n"
kconfig = tcm_dir + "/drivers/target/Kconfig"
f = open(kconfig, 'a')
f.write(buf)
f.close()
return
def main(modname, proto_ident):
# proto_ident = "FC"
# proto_ident = "SAS"
# proto_ident = "iSCSI"
tcm_dir = os.getcwd();
tcm_dir += "/../../"
print "tcm_dir: " + tcm_dir
fabric_mod_name = modname
fabric_mod_dir = tcm_dir + "drivers/target/" + fabric_mod_name
print "Set fabric_mod_name: " + fabric_mod_name
print "Set fabric_mod_dir: " + fabric_mod_dir
print "Using proto_ident: " + proto_ident
if proto_ident != "FC" and proto_ident != "SAS" and proto_ident != "iSCSI":
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
ret = tcm_mod_create_module_subdir(fabric_mod_dir)
if ret:
print "tcm_mod_create_module_subdir() failed because module already exists!"
sys.exit(1)
tcm_mod_build_base_includes(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_scan_fabric_ops(tcm_dir)
tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_configfs(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kbuild(fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kconfig(fabric_mod_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Makefile..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kbuild(tcm_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Kconfig..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kconfig(tcm_dir, fabric_mod_name)
return
parser = optparse.OptionParser()
parser.add_option('-m', '--modulename', help='Module name', dest='modname',
action='store', nargs=1, type='string')
parser.add_option('-p', '--protoident', help='Protocol Ident', dest='protoident',
action='store', nargs=1, type='string')
(opts, args) = parser.parse_args()
mandatories = ['modname', 'protoident']
for m in mandatories:
if not opts.__dict__[m]:
print "mandatory option is missing\n"
parser.print_help()
exit(-1)
if __name__ == "__main__":
main(str(opts.modname), opts.protoident)
|
IvIePhisto/EnhAtts | refs/heads/master | enhatts/__init__.py | 1 | # -*- coding: UTF-8 -*-
'''\
EnhAtts – Enhanced Attributes
=============================
This project implements *properties on steroids* called "fields" for Python 2
and 3 (tested with CPython 2.7 and 3.3 as well as PyPy 2). It is based on the
"Felder" (german for *fields*) concept of the doctoral thesis of Patrick Lay
"Entwurf eines Objektmodells für semistrukturierte Daten im Kontext von XML
Content Management Systemen" (Rheinische Friedrich-Wilhelms Universität Bonn,
2006) and is developed as part of the diploma thesis of Michael Pohl
"Architektur und Implementierung des Objektmodells für ein Web Application
Framework" (Rheinische Friedrich-Wilhelms Universität Bonn, 2013-2014).
What is a Field?
----------------
A *field* is an attribute on a new-style class called *field container*. A
field has a name and a *field class*, which implements the access to the
attribute and is instantiated once per field container instance with the
field's name. On field containers the attribute ``FIELDS`` is defined allowing
access to the *fields mapping* – an ordered mapping which allows access to the
fields by their name.
The class :class:`Field` defines the *field protocol* and serves as a base
class. :class:`DataField` extends this class and adds basic capabilities for
storing data in the field container as an attribute with the name ``_``
appended by the field's name. :class:`ValueField` in contrast stores the value
in its own instance.
In addition to the usual setting, deleting and getting on container instances,
the field protocol also defines a default value, which is returned on getting
on a class, and preparation, which is called before setting to aquire the
value to set and which may raise an exception if the value is invalid.
Getting an item from the fields mapping on a field container instance returns
the field instance. Setting and deleting items in this mapping is the same as
the according access on the attribute. The fields mapping on a container class
allows only getting, which returns the field instance. Iterating over the
mapping is done in the order of the fields, the other mapping methods
:meth:`keys`, :meth:`values` and meth:`items` also use this order.
Setting the ``FIELDS`` attribute on a field container instance with a
dictionary allows to set multiple fields at once with the values from the
dictionary. First all given values are prepared; if at least one preparation
raises an exception, :class:`FieldPreparationErrors` is raised containing the
exceptions thrown by the preparations. If no preparation fails, all fields are
set to their values.
Defining a Field
----------------
A field is defined on a field container class by using the result of calling
:func:`field` as a class decorator. When the decorator is applied, it creates
the ``FIELDS`` class attribute containing a descriptor for the fields mapping,
if this does not yet exist. The decorator also registers the field on the
mapping by its name and creates another class attribute with the name being
the field's name and containing a descriptor for access to the field. The
order of the fields in the mapping is the order in which the field decorators
appear in the source code (i.e. in the reverse order of definition).
If the supplied field class is not a type object, the argument is used as a
string and the field class to use is determined from the module containing the
implementing class. This way a class extending a field container will use
another field class than the container, if there is an attribute on the
containing module with that name.
:func:`field` allows to specify attributes for the field class, in this case
:func:`tinkerpy.anonymous_class` is used to create an anonymous class based on
the given field class. If no field class is specified, :class:`DataField` is
used as the base class.
To omit creating a string for the field name, you can also retrieve an
attribute from :func:`field`, which is a function creating a field with
the name of the attribute becoming the field's name.
Examples
--------
Field Definition
^^^^^^^^^^^^^^^^
First we define some fields as an anonymous classes and a lazy looked up class
based on :class:`DataField`:
>>> class Data(DataField):
... description='This is data.'
...
... def show(self):
... return str(self.get())
...
>>> @field.number(
... prepare=lambda self, value, field_values: int(value),
... DEFAULT=None)
... @field('data', 'Data')
... class Test(object):
... pass
To retrieve the default values, get the attribute values on the container
class:
>>> Test.number is None
True
>>> Test.data
Traceback (most recent call last):
AttributeError: type object 'Data' has no attribute 'DEFAULT'
To access the field classes, use the fields mapping:
>>> print(Test.FIELDS['data'].description)
This is data.
Field Container Instances
^^^^^^^^^^^^^^^^^^^^^^^^^
On a container instance you can set and get the field values:
>>> test = Test()
>>> test.number
Traceback (most recent call last):
AttributeError: 'Test' object has no attribute '_FIELD_number'
>>> test.number = '1'
>>> test.data = None
>>> test.number == 1 and test.data is None
True
If preparation fails, the value is not set:
>>> test.number = 'a'
Traceback (most recent call last):
ValueError: invalid literal for int() with base 10: 'a'
>>> test.number == 1
True
You can also delete field values:
>>> del test.number
>>> del test.FIELDS['data']
>>> test.number
Traceback (most recent call last):
AttributeError: 'Test' object has no attribute '_FIELD_number'
>>> test.data
Traceback (most recent call last):
AttributeError: 'Test' object has no attribute '_FIELD_data'
Setting :attr:`DeleteField` as the value of a field also deletes the value:
>>> test.number = 1
>>> test.data = 'data'
>>> test.number = DeleteField
>>> test.FIELDS['data'] = DeleteField
>>> test.number
Traceback (most recent call last):
AttributeError: 'Test' object has no attribute '_FIELD_number'
>>> test.data
Traceback (most recent call last):
AttributeError: 'Test' object has no attribute '_FIELD_data'
Existence of a field is different from existence of the field value:
>>> hasattr(test, 'number')
False
>>> 'number' in test.FIELDS
True
>>> test.FIELDS['number']
<UNSET enhatts.<anonymous:enhatts.DataField> object>
The field instances are available on the field container instance's field
mapping:
>>> test.data = 1
>>> test.FIELDS['data'].show()
'1'
Setting Multiple Fields
^^^^^^^^^^^^^^^^^^^^^^^
By assigning a mapping to the container instance's fields mapping, you can set
multiple fields at once if no preparation fails:
>>> test.FIELDS = dict(number='2', data=3)
>>> test.number == 2 and test.data == 3
True
>>> try:
... test.FIELDS = dict(number='a', data=4)
... except FieldPreparationErrors as e:
... for field_name, error in e.items():
... print('{}: {}'.format(field_name, error))
number: invalid literal for int() with base 10: 'a'
>>> test.number == 2 and test.data == 3
True
Using :attr:`DeleteField` field values can also be deleted while setting
multiple fields:
>>> test.FIELDS = dict(number=DeleteField, data=0)
>>> not hasattr(test, 'number') and test.data == 0
True
Field Container Callbacks
^^^^^^^^^^^^^^^^^^^^^^^^^
A field container may define callable attributes (e.g. methods), which are
called while changing fields. :func:`FIELDS_before_prepare` is called before
the fields are prepared with the mapping of field values to set.
:func:`FIELDS_before_modifications` is called just before the fields are set
with a mutable mapping being a view on the field values, which keeps track of
the changes to apply. After the fields have been set
:func:`FIELDS_after_modifications` is called with an immutable mapping being
a view on the field values.
.. function:: .FIELDS_before_prepare(field_values)
Called before preparing the field values.
:param field_values: The mutable mapping from field name to field value
containing an entry for each field to set. Field values being
:attr:`DeleteField` denote the field to be deleted.
.. function:: .FIELDS_before_modifications(fields_proxy)
Called before modifying the fields.
:param fields_proxy: A mutable mapping from field name to field value for
all fields of the container, but with values being as they will be
after applying the modifications. Changes (setting or deleting items)
are not applied to the underlying fields mapping, but are executed
when the modifications are applied. The attributes ``changed`` and
``deleted`` contain iterators over the names of changed or deleted
fields.
.. function:: .FIELDS_after_modifications(fields_proxy)
Called after setting the fields with the prepared values.
:param fields_proxy: An immutable mapping from field name to field value
for all fields of the container. The attributes ``changed`` and
``deleted`` contain iterators over the names of changed or deleted
fields.
Here's an example field container which prints out information and sets the
field revision on changes:
>>> @field('revision')
... class CallbackTest(Test):
... def __init__(self, **fields):
... self.FIELDS = fields
...
... def FIELDS_before_prepare(self, field_values):
... print('Before preparation of:')
... for name in sorted(field_values.keys()):
... print(' {} = {}'.format(name, repr(field_values[name])))
...
... def FIELDS_before_modifications(self, fields_proxy):
... print('Changes:')
... for name in fields_proxy.changed:
... print(' {} = {}'.format(name, repr(fields_proxy[name])))
... print('To delete: {}'.format(', '.join(fields_proxy.deleted)))
... try:
... revision = self.revision + 1
... except AttributeError:
... revision = 0
... fields_proxy['revision'] = revision
...
... def FIELDS_after_modifications(self, fields_proxy):
... print('Revision: {}'.format(self.revision))
>>> callback_test = CallbackTest(number='1', data=None)
Before preparation of:
data = None
number = '1'
Changes:
number = 1
data = None
To delete:\x20
Revision: 0
>>> callback_test.FIELDS = dict(number=DeleteField, data='data')
Before preparation of:
data = 'data'
number = <enhatts.DeleteField>
Changes:
data = 'data'
To delete: number
Revision: 1
The callbacks are also executed if only a single field is modified:
>>> try:
... callback_test.number = None
... except TypeError as e:
... print('ERROR: Value cannot be converted by int().')
Before preparation of:
number = None
ERROR: Value cannot be converted by int().
>>> callback_test.number = '2'
Before preparation of:
number = '2'
Changes:
number = 2
To delete:\x20
Revision: 2
>>> del callback_test.number
Before preparation of:
number = <enhatts.DeleteField>
Changes:
To delete: number
Revision: 3
Inheritance
^^^^^^^^^^^
The fields on classes extending field containers are appended to the existing
fields. Fields can also be redefined, which doesn't change the position:
>>> class Data(Data):
... DEFAULT = False
...
>>> @field('attributes', ValueField, 'data')
... @field('number', DEFAULT=True)
... class Extending(Test):
... pass
...
>>> len(Extending.FIELDS)
3
>>> for name, field_obj in Extending.FIELDS.items():
... print('{}: {}'.format(name, field_obj))
...
number: <class 'enhatts.<anonymous:enhatts.DataField>'>
attributes: <class 'enhatts.ValueField'>
data: <class 'enhatts.Data'>
>>> print(repr(Extending.FIELDS))
FIELDS on <class 'enhatts.Extending'>: {number: <class 'enhatts.<anonymous:enhatts.DataField>'>, attributes: <class 'enhatts.ValueField'>, data: <class 'enhatts.Data'>}
>>> Extending.data is False
True
>>> Extending.number is True
True
>>> extending = Extending()
>>> extending.FIELDS = {'attributes': 2, 'data': 3}
>>> print(repr(extending.FIELDS)) #doctest: +ELLIPSIS
FIELDS on <enhatts.Extending object at 0x...>: {number: <UNSET enhatts.<anonymous:enhatts.DataField> object>, attributes: <enhatts.ValueField object: 2>, data: <enhatts.Data object: 3>}
Multiple inheritance works the same. We define a diamond inheritance:
>>> @field('a')
... @field('b')
... class A(object):
... pass
...
>>> @field('a')
... @field('c')
... class B(A): pass
...
>>> @field('d')
... @field('b')
... class C(A): pass
...
>>> @field('e')
... class D(B, C): pass
This leads to the following field orders:
>>> list(A.FIELDS.keys())
['a', 'b']
>>> list(B.FIELDS.keys())
['a', 'b', 'c']
>>> list(C.FIELDS.keys())
['a', 'b', 'd']
>>> list(D.FIELDS.keys())
['a', 'b', 'c', 'd', 'e']
API
---
.. function:: field(name, _field_class=DataField, _before=None, **attributes)
Creates a class decorator, which does the following on the class it is
applied to:
1. If it does not yet exist, it creates the attribute ``FIELDS`` on the
class containing a descriptor for access to the fields mapping.
1. It registers a field with name ``name`` on the fields mapping. If
``attributes`` are given an :func:`tinkerpy.anonymous_class` based on
``_field_class`` is used as the field class, otherwise
``_field_class`` is used as the field class.
If ``_field_class`` is not a class object (i.e. not of type
:class:`type`), it is interpreted as a string. This triggers lazy field
class lookup, meaning the class to use is taken from the module the
field container class is defined in.
:param name: The name of the field.
:type name: :class:`str`
:param _field_class: The class to use as a field class or as the base of
an anonmous field class. If this is not a new-style class, it is used
as a string value, this triggers lazy field class lookup.
:param _before: The field the newly defined field should be inserted
before. If this is :const:`None`, the field will be inserted as the
first.
:type _before: :class:`str`
:param attributes: If values are given, an
:func:`tinkerpy.anonymous_class` is created with these attributes and
`_field_class` as the base and used as the field class.
:returns: A class decorator which creates a field on the class it is
applied to.
You can also retrieve attributes from this object (except those from
:class:`object`) which returns functions calling :func:`field` with
``name`` being the retrieved attribute name.
.. function:: .name(_field_class=DataField, _before=None, **attributes)
Calls :func:`field` with the function name as the first argument
``name`` and the function arguments as the appropriate arguments to
:func:`field`.
.. autoclass:: Field
.. autoclass:: DataField
.. autoclass:: ValueField
.. autoclass:: FieldPreparationErrors
.. attribute:: DeleteField
A static value indicating to delete a field value when setting a single or
multiple fields.
'''
import collections
import sys
class Field(object):
'''\
This class defines the field protocol.
:param container: The field container, this becomes the :attr:`container`
value.
:param name: The name of the field, it becomes the :attr:`name` value.
A field class is instantiated once for each field container instance with
the field's name and the container as the argument. On access to the field
the methods defined here are called.
Getting
The return value of calling :meth:`default` is returned on getting a
field's value on a field container class.
On reading a field's value on a field container instance, the result
of calling :meth:`get` is returned.
Setting
Before setting a field's value, :meth:`prepare` is called. If this
does not raise an exception, :meth:`set` is called to write the
field's value.
Deleting
:meth:`delete` is called on deletion of a field's value.
The byte an Unicode string values returned by instances of this class
return the respective string values of the field value.
Comparisons compare field values. If there is no field value set, all
comparisons except ``!=`` return :const:`False`. If the compared value
also has a :meth:`get` method, the return value is used for comparison,
otherwise the compared value itself.
'''
def __init__(self, container, name):
self._container = container
self._name = name
@property
def container(self):
'''\
The field container instance.
'''
return self._container
@property
def name(self):
'''\
The name of the field. The field is accessible through an attribute of
this name and under this name in the ``FIELDS`` mapping on a field
container class and instance.
'''
return self._name
@classmethod
def default(cls, container_cls, name):
'''\
The default value of the field. This implementation returns the value
of the attribute ``DEFAULT`` on ``cls`` and thus will raise an
:class:`AttributeError` if this does not exist.
:param container_cls: The field container class.
:param name: The field name.
:raises AttributeError: if there is no attribute ``DEFAULT`` on
``cls``.
:returns: the value of the attribute ``DEFAULT`` on ``cls``.
'''
return cls.DEFAULT
def prepare(self, value, field_values):
'''\
Prepares the ``value`` to set on the field container instance
and should raise an exception, if ``value`` is not valid.
:param value: The value to prepare.
:param field_values: A read-only proxy mapping to the field values,
returning the current field values shadowed by all yet prepared
field values. The attributes ``changed`` and ``deleted`` contain
iterators over the names of changed or deleted fields.
:raises Exception: if ``value`` is not valid.
:returns: The prepared value, this implementation returns ``value``
unchanged.
'''
return value
def set(self, value):
'''\
Should set the field's value to ``value`` on the field container
instance or throw an exception, if the field should not be writeable.
This implmentation raises an :class:`AttributeError` and does nothing
else.
:param value: The value to write.
:raises AttributeError: in this implementation.
'''
raise AttributeError('Writing not allowed.')
def get(self):
'''\
Should return the field's value on the field container instance or
throw an exception, if the field should not be readable.
This implmentation raises an :class:`AttributeError` and does nothing
else.
:raises AttributeError: in this implementation.
:returns: should return the field's value.
'''
raise AttributeError('Reading not allowed.')
def delete(self):
'''\
Should delete the field's value on the field container instance or
throw an exception, if the field should not be deleteable.
This implmentation raises an :class:`AttributeError` and does nothing
else.
:raises AttributeError: in this implementation.
'''
raise AttributeError('Deletion not allowed.')
def __repr__(self):
object_repr = object.__repr__(self)
object_repr = object_repr[:object_repr.rfind(' at ')] + '>'
try:
value = self.get()
except AttributeError:
return '<UNSET {}'.format(object_repr[1:])
else:
return '{}: {}>'.format(object_repr[:-1], repr(value))
def _get_cmp_values(self, other):
value = self.get()
if hasattr(other, 'get'):
other_value = other.get()
else:
other_value = other
return value, other_value
def __eq__(self, other):
try:
value, other_value = self._get_cmp_values(other)
except AttributeError:
return False
return value == other_value
def __ne__(self, other):
try:
value, other_value = self._get_cmp_values(other)
except AttributeError:
return True
return value != other_value
def __lt__(self, other):
try:
value, other_value = self._get_cmp_values(other)
except AttributeError:
return False
return value < other_value
def __le__(self, other):
try:
value, other_value = self._get_cmp_values(other)
except AttributeError:
return False
return value <= other_value
def __gt__(self, other):
try:
value, other_value = self._get_cmp_values(other)
except AttributeError:
return False
return value > other_value
def __ge__(self, other):
try:
value, other_value = self._get_cmp_values(other)
except AttributeError:
return False
return value >= other_value
def __str__(self):
return str(self.get())
if sys.version_info[0] > 2:
def __bytes__(self):
return bytes(self.get())
else:
def __unicode__(self):
return unicode(self.get())
class DataField(Field):
'''\
A readable, writeable and deleteable :class:`Field` implementation, using
an attribute with name ``_FIELD_`` appended by the field's name on the field
container instance to store the field's value.
'''
def __init__(self, container, name):
Field.__init__(self, container, name)
self._attribute_name = '_FIELD_{}'.format(name)
def set(self, value):
'''\
Sets the field's value to ``value`` on the field container instance.
:param value: The value to write.
'''
setattr(self.container, self._attribute_name, value)
def get(self):
'''\
Returns the field's value on the field container instance.
:raises AttributeError: if the field's value is not set.
:returns: the field's value.
'''
return getattr(self.container, self._attribute_name)
def delete(self):
'''\
Deletes the field's value on the field container instance.
:raises AttributeError: if the field's value is not set.
'''
delattr(self.container, self._attribute_name)
class ValueField(Field):
'''\
A readable, writable and deletable :class:`Field` implementation, which
stores its data in an instance attribute accessible through the property
:attr:`value`.
'''
@property
def value(self):
'''\
The value stored in the field instance.
'''
return self._value
def set(self, value):
'''\
Sets the field's value to ``value``.
:param value: The value to write.
'''
self._value = value
def get(self):
'''\
Returns the field's value.
:raises AttributeError: if the field's value is not set.
:returns: the field's value.
'''
return self._value
def delete(self):
'''\
Deletes the field's value.
:raises AttributeError: if the field's value is not set.
'''
del self._value
class field(object):
'''\
Object to create fields.
'''
def __call__(self, name, _field_class=DataField, _before=None,
**attributes):
name = str(name)
if _before is not None:
_before = str(_before)
def decorator(cls):
from enhatts._descriptors import FieldDescriptor, fields_descriptor
fields = fields_descriptor(cls)
field_class = _field_class
if not isinstance(field_class, type):
field_class = str(field_class)
fields._register(name, field_class, _before, attributes)
FieldDescriptor(cls, name)
return cls
return decorator
def __getattr__(self, name):
return lambda _field_class=DataField, _before=None, **attributes: (
self(name, _field_class, _before, **attributes))
field = field()
class DeleteField(object):
'''\
A static value indicating to delete a field value when setting a single or
multiple fields.
'''
def __init__(self):
self._repr = self._create_repr()
@classmethod
def _create_repr(cls):
return '<{}.{}>'.format(cls.__module__, cls.__name__)
def __repr__(self):
return self._repr
__str__ = __repr__
DeleteField = DeleteField()
class FieldPreparationErrors(Exception, collections.Mapping):
'''\
This exception is thrown if preparation of at least one field fails, when
setting multiple field at once by assigning a mapping to the ``FIELDS``
attribute on a field container instance.
This exception is a mapping from field names to the appropriate exception
objects thrown by the :meth:`Field.prepare` calls.
'''
def __init__(self, exceptions):
message = 'Setting the field{} {} failed.'.format(
's' if len(exceptions) > 1 else '',
', '.join('"{}"'.format(name) for name in exceptions)
)
Exception.__init__(self, message)
self._exceptions = exceptions
def __getitem__(self, name):
return self._exceptions[name]
def __iter__(self):
return iter(self._exceptions)
def __len__(self):
return len(self._exceptions)
def __hash__(self):
hash_value = Exception.__hash__(self)
for name in self._exceptions:
current_hash = hash(name) ^ hash(self._exceptions[name])
hash_value = hash_value ^ current_hash
return hash_value
def __eq__(self, other):
return (isinstance(other, FieldPreparationErrors)
and self.message == other.message
and self._exceptions == other._exceptions)
del collections, sys |
jonashaag/django-nonrel-nohistory | refs/heads/master | tests/regressiontests/admin_scripts/models.py | 119 | from django.db import models
class Article(models.Model):
headline = models.CharField(max_length=100, default='Default headline')
pub_date = models.DateTimeField()
def __unicode__(self):
return self.headline
class Meta:
ordering = ('-pub_date', 'headline')
|
kristavan/helpqueue | refs/heads/master | hq/hq/settings.py | 1 | """
Django settings for hq project.
Generated by 'django-admin startproject' using Django 1.10.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '@a&7x7yw@9s)0ippo7&9byt9bjuzl&bnvc!9nd+me4*nkw5$8!'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'usermain',
'homepage',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'hq.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'hq.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
|
crafty78/ansible | refs/heads/devel | lib/ansible/modules/cloud/amazon/ec2_vpc_peer.py | 6 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['stableinterface'],
'supported_by': 'committer',
'version': '1.0'}
DOCUMENTATION = '''
module: ec2_vpc_peer
short_description: create, delete, accept, and reject VPC peering connections between two VPCs.
description:
- Read the AWS documentation for VPC Peering Connections
U(http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/vpc-peering.html)
version_added: "2.2"
options:
vpc_id:
description:
- VPC id of the requesting VPC.
required: false
peering_id:
description:
- Peering connection id.
required: false
peer_vpc_id:
description:
- VPC id of the accepting VPC.
required: false
peer_owner_id:
description:
- The AWS account number for cross account peering.
required: false
tags:
description:
- Dictionary of tags to look for and apply when creating a Peering Connection.
required: false
state:
description:
- Create, delete, accept, reject a peering connection.
required: false
default: present
choices: ['present', 'absent', 'accept', 'reject']
author: Mike Mochan(@mmochan)
extends_documentation_fragment: aws
requirements: [ botocore, boto3, json ]
'''
EXAMPLES = '''
# Complete example to create and accept a local peering connection.
- name: Create local account VPC peering Connection
ec2_vpc_peer:
region: ap-southeast-2
vpc_id: vpc-12345678
peer_vpc_id: vpc-87654321
state: present
tags:
Name: Peering conenction for VPC 21 to VPC 22
CostCode: CC1234
Project: phoenix
register: vpc_peer
- name: Accept local VPC peering request
ec2_vpc_peer:
region: ap-southeast-2
peering_id: "{{ vpc_peer.peering_id }}"
state: accept
register: action_peer
# Complete example to delete a local peering connection.
- name: Create local account VPC peering Connection
ec2_vpc_peer:
region: ap-southeast-2
vpc_id: vpc-12345678
peer_vpc_id: vpc-87654321
state: present
tags:
Name: Peering conenction for VPC 21 to VPC 22
CostCode: CC1234
Project: phoenix
register: vpc_peer
- name: delete a local VPC peering Connection
ec2_vpc_peer:
region: ap-southeast-2
peering_id: "{{ vpc_peer.peering_id }}"
state: absent
register: vpc_peer
# Complete example to create and accept a cross account peering connection.
- name: Create cross account VPC peering Connection
ec2_vpc_peer:
region: ap-southeast-2
vpc_id: vpc-12345678
peer_vpc_id: vpc-12345678
peer_owner_id: 123456789102
state: present
tags:
Name: Peering conenction for VPC 21 to VPC 22
CostCode: CC1234
Project: phoenix
register: vpc_peer
- name: Accept peering connection from remote account
ec2_vpc_peer:
region: ap-southeast-2
peering_id: "{{ vpc_peer.peering_id }}"
profile: bot03_profile_for_cross_account
state: accept
register: vpc_peer
# Complete example to create and reject a local peering connection.
- name: Create local account VPC peering Connection
ec2_vpc_peer:
region: ap-southeast-2
vpc_id: vpc-12345678
peer_vpc_id: vpc-87654321
state: present
tags:
Name: Peering conenction for VPC 21 to VPC 22
CostCode: CC1234
Project: phoenix
register: vpc_peer
- name: Reject a local VPC peering Connection
ec2_vpc_peer:
region: ap-southeast-2
peering_id: "{{ vpc_peer.peering_id }}"
state: reject
# Complete example to create and accept a cross account peering connection.
- name: Create cross account VPC peering Connection
ec2_vpc_peer:
region: ap-southeast-2
vpc_id: vpc-12345678
peer_vpc_id: vpc-12345678
peer_owner_id: 123456789102
state: present
tags:
Name: Peering conenction for VPC 21 to VPC 22
CostCode: CC1234
Project: phoenix
register: vpc_peer
- name: Accept a cross account VPC peering connection request
ec2_vpc_peer:
region: ap-southeast-2
peering_id: "{{ vpc_peer.peering_id }}"
profile: bot03_profile_for_cross_account
state: accept
tags:
Name: Peering conenction for VPC 21 to VPC 22
CostCode: CC1234
Project: phoenix
# Complete example to create and reject a cross account peering connection.
- name: Create cross account VPC peering Connection
ec2_vpc_peer:
region: ap-southeast-2
vpc_id: vpc-12345678
peer_vpc_id: vpc-12345678
peer_owner_id: 123456789102
state: present
tags:
Name: Peering conenction for VPC 21 to VPC 22
CostCode: CC1234
Project: phoenix
register: vpc_peer
- name: Reject a cross account VPC peering Connection
ec2_vpc_peer:
region: ap-southeast-2
peering_id: "{{ vpc_peer.peering_id }}"
profile: bot03_profile_for_cross_account
state: reject
'''
RETURN = '''
task:
description: The result of the create, accept, reject or delete action.
returned: success
type: dictionary
'''
try:
import json
import botocore
import boto3
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
def tags_changed(pcx_id, client, module):
changed = False
tags = dict()
if module.params.get('tags'):
tags = module.params.get('tags')
pcx = find_pcx_by_id(pcx_id, client, module)
if pcx['VpcPeeringConnections']:
pcx_values = [t.values() for t in pcx['VpcPeeringConnections'][0]['Tags']]
pcx_tags = [item for sublist in pcx_values for item in sublist]
tag_values = [[key, str(value)] for key, value in tags.items()]
tags = [item for sublist in tag_values for item in sublist]
if sorted(pcx_tags) == sorted(tags):
changed = False
return changed
else:
delete_tags(pcx_id, client, module)
create_tags(pcx_id, client, module)
changed = True
return changed
return changed
def describe_peering_connections(params, client):
result = client.describe_vpc_peering_connections(Filters=[
{'Name': 'requester-vpc-info.vpc-id', 'Values': [params['VpcId']]},
{'Name': 'accepter-vpc-info.vpc-id', 'Values': [params['PeerVpcId']]}
])
if result['VpcPeeringConnections'] == []:
result = client.describe_vpc_peering_connections(Filters=[
{'Name': 'requester-vpc-info.vpc-id', 'Values': [params['PeerVpcId']]},
{'Name': 'accepter-vpc-info.vpc-id', 'Values': [params['VpcId']]}
])
return result
def is_active(peering_conn):
return peering_conn['Status']['Code'] == 'active'
def is_pending(peering_conn):
return peering_conn['Status']['Code'] == 'pending-acceptance'
def create_peer_connection(client, module):
changed = False
params = dict()
params['VpcId'] = module.params.get('vpc_id')
params['PeerVpcId'] = module.params.get('peer_vpc_id')
if module.params.get('peer_owner_id'):
params['PeerOwnerId'] = str(module.params.get('peer_owner_id'))
params['DryRun'] = module.check_mode
peering_conns = describe_peering_connections(params, client)
for peering_conn in peering_conns['VpcPeeringConnections']:
pcx_id = peering_conn['VpcPeeringConnectionId']
if tags_changed(pcx_id, client, module):
changed = True
if is_active(peering_conn):
return (changed, peering_conn['VpcPeeringConnectionId'])
if is_pending(peering_conn):
return (changed, peering_conn['VpcPeeringConnectionId'])
try:
peering_conn = client.create_vpc_peering_connection(**params)
pcx_id = peering_conn['VpcPeeringConnection']['VpcPeeringConnectionId']
if module.params.get('tags'):
create_tags(pcx_id, client, module)
changed = True
return (changed, peering_conn['VpcPeeringConnection']['VpcPeeringConnectionId'])
except botocore.exceptions.ClientError as e:
module.fail_json(msg=str(e))
def peer_status(client, module):
params = dict()
params['VpcPeeringConnectionIds'] = [module.params.get('peering_id')]
vpc_peering_connection = client.describe_vpc_peering_connections(**params)
return vpc_peering_connection['VpcPeeringConnections'][0]['Status']['Code']
def accept_reject_delete(state, client, module):
changed = False
params = dict()
params['VpcPeeringConnectionId'] = module.params.get('peering_id')
params['DryRun'] = module.check_mode
invocations = {
'accept': client.accept_vpc_peering_connection,
'reject': client.reject_vpc_peering_connection,
'absent': client.delete_vpc_peering_connection
}
if state == 'absent' or peer_status(client, module) != 'active':
try:
invocations[state](**params)
if module.params.get('tags'):
create_tags(params['VpcPeeringConnectionId'], client, module)
changed = True
except botocore.exceptions.ClientError as e:
module.fail_json(msg=str(e))
if tags_changed(params['VpcPeeringConnectionId'], client, module):
changed = True
return changed, params['VpcPeeringConnectionId']
def load_tags(module):
tags = []
if module.params.get('tags'):
for name, value in module.params.get('tags').items():
tags.append({'Key': name, 'Value': str(value)})
return tags
def create_tags(pcx_id, client, module):
try:
delete_tags(pcx_id, client, module)
client.create_tags(Resources=[pcx_id], Tags=load_tags(module))
except botocore.exceptions.ClientError as e:
module.fail_json(msg=str(e))
def delete_tags(pcx_id, client, module):
try:
client.delete_tags(Resources=[pcx_id])
except botocore.exceptions.ClientError as e:
module.fail_json(msg=str(e))
def find_pcx_by_id(pcx_id, client, module):
try:
return client.describe_vpc_peering_connections(VpcPeeringConnectionIds=[pcx_id])
except botocore.exceptions.ClientError as e:
module.fail_json(msg=str(e))
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
vpc_id=dict(),
peer_vpc_id=dict(),
peering_id=dict(),
peer_owner_id=dict(),
tags=dict(required=False, type='dict'),
profile=dict(),
state=dict(default='present', choices=['present', 'absent', 'accept', 'reject'])
)
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO3:
module.fail_json(msg='json, botocore and boto3 are required.')
state = module.params.get('state').lower()
try:
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
client = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_kwargs)
except botocore.exceptions.NoCredentialsError as e:
module.fail_json(msg="Can't authorize connection - "+str(e))
if state == 'present':
(changed, results) = create_peer_connection(client, module)
module.exit_json(changed=changed, peering_id=results)
else:
(changed, results) = accept_reject_delete(state, client, module)
module.exit_json(changed=changed, peering_id=results)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()
|
lancezlin/airflow | refs/heads/master | airflow/hooks/__init__.py | 5 | '''
Imports the hooks dynamically while keeping the package API clean,
abstracting the underlying modules
'''
from airflow.utils import import_module_attrs as _import_module_attrs
from airflow.hooks.base_hook import BaseHook as _BaseHook
_hooks = {
'hive_hooks': [
'HiveCliHook',
'HiveMetastoreHook',
'HiveServer2Hook',
],
'hdfs_hook': ['HDFSHook'],
'mysql_hook': ['MySqlHook'],
'postgres_hook': ['PostgresHook'],
'presto_hook': ['PrestoHook'],
'samba_hook': ['SambaHook'],
'sqlite_hook': ['SqliteHook'],
'S3_hook': ['S3Hook'],
'http_hook': ['HttpHook'],
'druid_hook': ['DruidHook'],
'jdbc_hook': ['JdbcHook'],
'dbapi_hook': ['DbApiHook'],
'mssql_hook': ['MsSqlHook'],
'oracle_hook': ['OracleHook'],
}
_import_module_attrs(globals(), _hooks)
def integrate_plugins():
"""Integrate plugins to the context"""
from airflow.plugins_manager import hooks as _hooks
for _h in _hooks:
globals()[_h.__name__] = _h
|
grendel513/django-DefectDojo | refs/heads/master | cryptotest.py | 2 | import binascii, os
from Crypto.Cipher import AES
KEY = 'a0b8c7398c9363b3216ff1d001a1308e5f96a77dbf6bda2367f87519d80995fb'
IV = os.urandom(16)
def encrypt(key, iv, plaintext):
aes = AES.new(key, AES.MODE_CBC, iv, segment_size=128)
plaintext = _pad_string(plaintext)
encrypted_text = aes.encrypt(plaintext)
return binascii.b2a_hex(encrypted_text).rstrip()
def decrypt(key, iv, encrypted_text):
aes = AES.new(key, AES.MODE_CBC, iv, segment_size=128)
encrypted_text_bytes = binascii.a2b_hex(encrypted_text)
decrypted_text = aes.decrypt(encrypted_text_bytes)
decrypted_text = _unpad_string(decrypted_text)
return decrypted_text
def _pad_string(value):
length = len(value)
pad_size = 16 - (length % 16)
return value.ljust(length + pad_size, '\x00')
def _unpad_string(value):
while value[-1] == '\x00':
value = value[:-1]
return value
def prepare_for_save(IV, encrypted_value):
binascii.b2a_hex(encrypted_text).rstrip()
stored_value = "AES.1:" + binascii.b2a_hex(IV).rstrip() + ":" + encrypted_value
return stored_value
def prepare_for_view(encrypted_value):
encrypted_values = encrypted_value.split(":")
type = encrypted_values[0]
iv = binascii.a2b_hex(encrypted_values[1]).rstrip()
value = encrypted_values[2]
return decrypt(KEY, iv, value)
if __name__ == '__main__':
input_plaintext = 'The answer is no'
encrypted_text = encrypt(KEY, IV, input_plaintext)
print encrypted_text
decrypted_text = decrypt(KEY, IV, encrypted_text)
print decrypted_text
print prepare_for_save(IV, encrypted_text)
print "*****"
print prepare_for_view("AES.1:fff2e6659bef045f25f8249d36f58789:178e6f316b680b486e4e6b8cc79f589e")
assert decrypted_text == input_plaintext
|
odahoda/noisicaa | refs/heads/master | noisicaa/builtin_nodes/control_track/model.py | 1 | #!/usr/bin/python3
# @begin:license
#
# Copyright (c) 2015-2019, Benjamin Niemann <pink@odahoda.de>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @end:license
import logging
import random
from typing import cast, Any, Dict, Optional, Callable
from noisicaa import core
from noisicaa import audioproc
from noisicaa import node_db
from noisicaa import music
from noisicaa.music import node_connector
from . import node_description
from . import processor_messages
from . import _model
logger = logging.getLogger(__name__)
class ControlTrackConnector(node_connector.NodeConnector):
_node = None # type: ControlTrack
def __init__(self, **kwargs: Any) -> None:
super().__init__(**kwargs)
self.__node_id = self._node.pipeline_node_id
self.__listeners = core.ListenerMap[str]()
self.add_cleanup_function(self.__listeners.cleanup)
self.__point_ids = {} # type: Dict[int, int]
def _init_internal(self) -> None:
for point in self._node.points:
self.__add_point(point)
self.__listeners['points'] = self._node.points_changed.add(
self.__points_list_changed)
def __points_list_changed(self, change: music.PropertyChange) -> None:
if isinstance(change, music.PropertyListInsert):
self.__add_point(change.new_value)
elif isinstance(change, music.PropertyListDelete):
self.__remove_point(change.old_value)
else:
raise TypeError("Unsupported change type %s" % type(change))
def __add_point(self, point: 'ControlPoint') -> None:
point_id = self.__point_ids[point.id] = random.getrandbits(64)
self._emit_message(processor_messages.add_control_point(
node_id=self.__node_id,
id=point_id,
time=point.time,
value=point.value))
self.__listeners['cp:%s:time' % point.id] = point.time_changed.add(
lambda _: self.__point_changed(point))
self.__listeners['cp:%s:value' % point.id] = point.value_changed.add(
lambda _: self.__point_changed(point))
def __remove_point(self, point: 'ControlPoint') -> None:
point_id = self.__point_ids[point.id]
self._emit_message(processor_messages.remove_control_point(
node_id=self.__node_id,
id=point_id))
del self.__listeners['cp:%s:time' % point.id]
del self.__listeners['cp:%s:value' % point.id]
def __point_changed(self, point: 'ControlPoint') -> None:
point_id = self.__point_ids[point.id]
self._emit_message(processor_messages.remove_control_point(
node_id=self.__node_id,
id=point_id))
self._emit_message(processor_messages.add_control_point(
node_id=self.__node_id,
id=point_id,
time=point.time,
value=point.value))
class ControlPoint(_model.ControlPoint):
def create(
self, *,
time: Optional[audioproc.MusicalTime] = None, value: float = None,
**kwargs: Any) -> None:
super().create(**kwargs)
self.time = time
self.value = value
def _set_time(self, value: audioproc.MusicalTime) -> None:
if self.parent is not None:
if not self.is_first:
if value <= cast(ControlPoint, self.prev_sibling).time:
raise ValueError("Control point out of order.")
else:
if value < audioproc.MusicalTime(0, 4):
raise ValueError("Control point out of order.")
if not self.is_last:
if value >= cast(ControlPoint, self.next_sibling).time:
raise ValueError("Control point out of order.")
super()._set_time(value)
class ControlTrack(_model.ControlTrack):
@property
def description(self) -> node_db.NodeDescription:
return node_description.ControlTrackDescription
def create_node_connector(
self,
message_cb: Callable[[audioproc.ProcessorMessage], None],
audioproc_client: audioproc.AbstractAudioProcClient,
) -> ControlTrackConnector:
return ControlTrackConnector(
node=self, message_cb=message_cb, audioproc_client=audioproc_client)
def create_control_point(self, time: audioproc.MusicalTime, value: float) -> ControlPoint:
for insert_index, point in enumerate(self.points):
if point.time == time:
raise ValueError("Duplicate control point")
if point.time > time:
break
else:
insert_index = len(self.points)
control_point = self._pool.create(ControlPoint, time=time, value=value)
self.points.insert(insert_index, control_point)
return control_point
def delete_control_point(self, point: ControlPoint) -> None:
del self.points[point.index]
|
ojengwa/talk | refs/heads/master | venv/lib/python2.7/site-packages/django/core/mail/message.py | 43 | from __future__ import unicode_literals
import mimetypes
import os
import random
import sys
import time
from email import (charset as Charset, encoders as Encoders,
message_from_string, generator)
from email.message import Message
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.mime.base import MIMEBase
from email.mime.message import MIMEMessage
from email.header import Header
from email.utils import formatdate, getaddresses, formataddr, parseaddr
from django.conf import settings
from django.core.mail.utils import DNS_NAME
from django.utils.encoding import force_text
from django.utils import six
# Don't BASE64-encode UTF-8 messages so that we avoid unwanted attention from
# some spam filters.
utf8_charset = Charset.Charset('utf-8')
utf8_charset.body_encoding = None # Python defaults to BASE64
# Default MIME type to use on attachments (if it is not explicitly given
# and cannot be guessed).
DEFAULT_ATTACHMENT_MIME_TYPE = 'application/octet-stream'
class BadHeaderError(ValueError):
pass
# Copied from Python standard library, with the following modifications:
# * Used cached hostname for performance.
# * Added try/except to support lack of getpid() in Jython (#5496).
def make_msgid(idstring=None):
"""Returns a string suitable for RFC 2822 compliant Message-ID, e.g:
<20020201195627.33539.96671@nightshade.la.mastaler.com>
Optional idstring if given is a string used to strengthen the
uniqueness of the message id.
"""
timeval = time.time()
utcdate = time.strftime('%Y%m%d%H%M%S', time.gmtime(timeval))
try:
pid = os.getpid()
except AttributeError:
# No getpid() in Jython, for example.
pid = 1
randint = random.randrange(100000)
if idstring is None:
idstring = ''
else:
idstring = '.' + idstring
idhost = DNS_NAME
msgid = '<%s.%s.%s%s@%s>' % (utcdate, pid, randint, idstring, idhost)
return msgid
# Header names that contain structured address data (RFC #5322)
ADDRESS_HEADERS = set([
'from',
'sender',
'reply-to',
'to',
'cc',
'bcc',
'resent-from',
'resent-sender',
'resent-to',
'resent-cc',
'resent-bcc',
])
def forbid_multi_line_headers(name, val, encoding):
"""Forbids multi-line headers, to prevent header injection."""
encoding = encoding or settings.DEFAULT_CHARSET
val = force_text(val)
if '\n' in val or '\r' in val:
raise BadHeaderError("Header values can't contain newlines (got %r for header %r)" % (val, name))
try:
val.encode('ascii')
except UnicodeEncodeError:
if name.lower() in ADDRESS_HEADERS:
val = ', '.join(sanitize_address(addr, encoding)
for addr in getaddresses((val,)))
else:
val = Header(val, encoding).encode()
else:
if name.lower() == 'subject':
val = Header(val).encode()
return str(name), val
def sanitize_address(addr, encoding):
if isinstance(addr, six.string_types):
addr = parseaddr(force_text(addr))
nm, addr = addr
# This try-except clause is needed on Python 3 < 3.2.4
# http://bugs.python.org/issue14291
try:
nm = Header(nm, encoding).encode()
except UnicodeEncodeError:
nm = Header(nm, 'utf-8').encode()
try:
addr.encode('ascii')
except UnicodeEncodeError: # IDN
if '@' in addr:
localpart, domain = addr.split('@', 1)
localpart = str(Header(localpart, encoding))
domain = domain.encode('idna').decode('ascii')
addr = '@'.join([localpart, domain])
else:
addr = Header(addr, encoding).encode()
return formataddr((nm, addr))
class MIMEMixin():
def as_string(self, unixfrom=False, linesep='\n'):
"""Return the entire formatted message as a string.
Optional `unixfrom' when True, means include the Unix From_ envelope
header.
This overrides the default as_string() implementation to not mangle
lines that begin with 'From '. See bug #13433 for details.
"""
fp = six.StringIO()
g = generator.Generator(fp, mangle_from_=False)
if six.PY2:
g.flatten(self, unixfrom=unixfrom)
else:
g.flatten(self, unixfrom=unixfrom, linesep=linesep)
return fp.getvalue()
if six.PY2:
as_bytes = as_string
else:
def as_bytes(self, unixfrom=False, linesep='\n'):
"""Return the entire formatted message as bytes.
Optional `unixfrom' when True, means include the Unix From_ envelope
header.
This overrides the default as_bytes() implementation to not mangle
lines that begin with 'From '. See bug #13433 for details.
"""
fp = six.BytesIO()
g = generator.BytesGenerator(fp, mangle_from_=False)
g.flatten(self, unixfrom=unixfrom, linesep=linesep)
return fp.getvalue()
class SafeMIMEMessage(MIMEMixin, MIMEMessage):
def __setitem__(self, name, val):
# message/rfc822 attachments must be ASCII
name, val = forbid_multi_line_headers(name, val, 'ascii')
MIMEMessage.__setitem__(self, name, val)
class SafeMIMEText(MIMEMixin, MIMEText):
def __init__(self, text, subtype, charset):
self.encoding = charset
if charset == 'utf-8':
# Unfortunately, Python doesn't support setting a Charset instance
# as MIMEText init parameter (http://bugs.python.org/issue16324).
# We do it manually and trigger re-encoding of the payload.
MIMEText.__init__(self, text, subtype, None)
del self['Content-Transfer-Encoding']
# Workaround for versions without http://bugs.python.org/issue19063
if (3, 2) < sys.version_info < (3, 3, 4):
payload = text.encode(utf8_charset.output_charset)
self._payload = payload.decode('ascii', 'surrogateescape')
self.set_charset(utf8_charset)
else:
self.set_payload(text, utf8_charset)
self.replace_header('Content-Type', 'text/%s; charset="%s"' % (subtype, charset))
else:
MIMEText.__init__(self, text, subtype, charset)
def __setitem__(self, name, val):
name, val = forbid_multi_line_headers(name, val, self.encoding)
MIMEText.__setitem__(self, name, val)
class SafeMIMEMultipart(MIMEMixin, MIMEMultipart):
def __init__(self, _subtype='mixed', boundary=None, _subparts=None, encoding=None, **_params):
self.encoding = encoding
MIMEMultipart.__init__(self, _subtype, boundary, _subparts, **_params)
def __setitem__(self, name, val):
name, val = forbid_multi_line_headers(name, val, self.encoding)
MIMEMultipart.__setitem__(self, name, val)
class EmailMessage(object):
"""
A container for email information.
"""
content_subtype = 'plain'
mixed_subtype = 'mixed'
encoding = None # None => use settings default
def __init__(self, subject='', body='', from_email=None, to=None, bcc=None,
connection=None, attachments=None, headers=None, cc=None):
"""
Initialize a single email message (which can be sent to multiple
recipients).
All strings used to create the message can be unicode strings
(or UTF-8 bytestrings). The SafeMIMEText class will handle any
necessary encoding conversions.
"""
if to:
assert not isinstance(to, six.string_types), '"to" argument must be a list or tuple'
self.to = list(to)
else:
self.to = []
if cc:
assert not isinstance(cc, six.string_types), '"cc" argument must be a list or tuple'
self.cc = list(cc)
else:
self.cc = []
if bcc:
assert not isinstance(bcc, six.string_types), '"bcc" argument must be a list or tuple'
self.bcc = list(bcc)
else:
self.bcc = []
self.from_email = from_email or settings.DEFAULT_FROM_EMAIL
self.subject = subject
self.body = body
self.attachments = attachments or []
self.extra_headers = headers or {}
self.connection = connection
def get_connection(self, fail_silently=False):
from django.core.mail import get_connection
if not self.connection:
self.connection = get_connection(fail_silently=fail_silently)
return self.connection
def message(self):
encoding = self.encoding or settings.DEFAULT_CHARSET
msg = SafeMIMEText(self.body, self.content_subtype, encoding)
msg = self._create_message(msg)
msg['Subject'] = self.subject
msg['From'] = self.extra_headers.get('From', self.from_email)
msg['To'] = self.extra_headers.get('To', ', '.join(self.to))
if self.cc:
msg['Cc'] = ', '.join(self.cc)
# Email header names are case-insensitive (RFC 2045), so we have to
# accommodate that when doing comparisons.
header_names = [key.lower() for key in self.extra_headers]
if 'date' not in header_names:
msg['Date'] = formatdate()
if 'message-id' not in header_names:
msg['Message-ID'] = make_msgid()
for name, value in self.extra_headers.items():
if name.lower() in ('from', 'to'): # From and To are already handled
continue
msg[name] = value
return msg
def recipients(self):
"""
Returns a list of all recipients of the email (includes direct
addressees as well as Cc and Bcc entries).
"""
return self.to + self.cc + self.bcc
def send(self, fail_silently=False):
"""Sends the email message."""
if not self.recipients():
# Don't bother creating the network connection if there's nobody to
# send to.
return 0
return self.get_connection(fail_silently).send_messages([self])
def attach(self, filename=None, content=None, mimetype=None):
"""
Attaches a file with the given filename and content. The filename can
be omitted and the mimetype is guessed, if not provided.
If the first parameter is a MIMEBase subclass it is inserted directly
into the resulting message attachments.
"""
if isinstance(filename, MIMEBase):
assert content is None
assert mimetype is None
self.attachments.append(filename)
else:
assert content is not None
self.attachments.append((filename, content, mimetype))
def attach_file(self, path, mimetype=None):
"""Attaches a file from the filesystem."""
filename = os.path.basename(path)
with open(path, 'rb') as f:
content = f.read()
self.attach(filename, content, mimetype)
def _create_message(self, msg):
return self._create_attachments(msg)
def _create_attachments(self, msg):
if self.attachments:
encoding = self.encoding or settings.DEFAULT_CHARSET
body_msg = msg
msg = SafeMIMEMultipart(_subtype=self.mixed_subtype, encoding=encoding)
if self.body:
msg.attach(body_msg)
for attachment in self.attachments:
if isinstance(attachment, MIMEBase):
msg.attach(attachment)
else:
msg.attach(self._create_attachment(*attachment))
return msg
def _create_mime_attachment(self, content, mimetype):
"""
Converts the content, mimetype pair into a MIME attachment object.
If the mimetype is message/rfc822, content may be an
email.Message or EmailMessage object, as well as a str.
"""
basetype, subtype = mimetype.split('/', 1)
if basetype == 'text':
encoding = self.encoding or settings.DEFAULT_CHARSET
attachment = SafeMIMEText(content, subtype, encoding)
elif basetype == 'message' and subtype == 'rfc822':
# Bug #18967: per RFC2046 s5.2.1, message/rfc822 attachments
# must not be base64 encoded.
if isinstance(content, EmailMessage):
# convert content into an email.Message first
content = content.message()
elif not isinstance(content, Message):
# For compatibility with existing code, parse the message
# into an email.Message object if it is not one already.
content = message_from_string(content)
attachment = SafeMIMEMessage(content, subtype)
else:
# Encode non-text attachments with base64.
attachment = MIMEBase(basetype, subtype)
attachment.set_payload(content)
Encoders.encode_base64(attachment)
return attachment
def _create_attachment(self, filename, content, mimetype=None):
"""
Converts the filename, content, mimetype triple into a MIME attachment
object.
"""
if mimetype is None:
mimetype, _ = mimetypes.guess_type(filename)
if mimetype is None:
mimetype = DEFAULT_ATTACHMENT_MIME_TYPE
attachment = self._create_mime_attachment(content, mimetype)
if filename:
try:
filename.encode('ascii')
except UnicodeEncodeError:
if six.PY2:
filename = filename.encode('utf-8')
filename = ('utf-8', '', filename)
attachment.add_header('Content-Disposition', 'attachment',
filename=filename)
return attachment
class EmailMultiAlternatives(EmailMessage):
"""
A version of EmailMessage that makes it easy to send multipart/alternative
messages. For example, including text and HTML versions of the text is
made easier.
"""
alternative_subtype = 'alternative'
def __init__(self, subject='', body='', from_email=None, to=None, bcc=None,
connection=None, attachments=None, headers=None, alternatives=None,
cc=None):
"""
Initialize a single email message (which can be sent to multiple
recipients).
All strings used to create the message can be unicode strings (or UTF-8
bytestrings). The SafeMIMEText class will handle any necessary encoding
conversions.
"""
super(EmailMultiAlternatives, self).__init__(subject, body, from_email, to, bcc, connection, attachments, headers, cc)
self.alternatives = alternatives or []
def attach_alternative(self, content, mimetype):
"""Attach an alternative content representation."""
assert content is not None
assert mimetype is not None
self.alternatives.append((content, mimetype))
def _create_message(self, msg):
return self._create_attachments(self._create_alternatives(msg))
def _create_alternatives(self, msg):
encoding = self.encoding or settings.DEFAULT_CHARSET
if self.alternatives:
body_msg = msg
msg = SafeMIMEMultipart(_subtype=self.alternative_subtype, encoding=encoding)
if self.body:
msg.attach(body_msg)
for alternative in self.alternatives:
msg.attach(self._create_mime_attachment(*alternative))
return msg
|
neutronpy/neutronpy | refs/heads/master | neutronpy/crystal/sample.py | 3 | # -*- coding: utf-8 -*-
r"""Sample class for e.g. Instrument class
"""
import numpy as np
from .lattice import Lattice
from .tools import gram_schmidt
class Sample(Lattice):
u"""Private class containing sample information.
Parameters
----------
a : float
Unit cell length in angstroms
b : float
Unit cell length in angstroms
c : float
Unit cell length in angstroms
alpha : float
Angle between b and c in degrees
beta : float
Angle between a and c in degrees
gamma : float
Angle between a and b in degrees
u : array_like
First orientation vector
v : array_like
Second orientation vector
mosaic : float, optional
Horizontal sample mosaic (FWHM) in arc minutes
vmosaic : float, optional
Vertical sample mosaic (FWHM) in arc minutes
direct : ±1, optional
Direction of the crystal (left or right, -1 or +1, respectively)
width : float, optional
Sample width in cm. Default: 1
height : float, optional
Sample height in cm. Default: 1
depth : float, optional
Sample thickness in cm. Default: 1
shape : str, optional
Sample shape type. Accepts 'rectangular' or 'cylindrical'.
Default: 'rectangular'
distance : float, optional
Distance from source (used for Time of Flight resolution
calculations). Default: None
Attributes
----------
a
b
c
alpha
beta
gamma
u
v
mosaic
vmosaic
direct
width
height
depth
shape
astar
bstar
cstar
alpha_rad
beta_rad
gamma_rad
alphastar
betastar
gammastar
alphastar_rad
betastar_rad
gammastar_rad
abg_rad
reciprocal_abc
reciprocal_abg
reciprocal_abg_rad
lattice_type
volume
reciprocal_volume
G
Gstar
Bmatrix
Umatrix
UBmatrix
Methods
-------
get_d_spacing
get_q
get_two_theta
get_angle_between_planes
get_phi
"""
def __init__(self, a, b, c, alpha, beta, gamma, u=None, v=None, mosaic=None, vmosaic=None, direct=1,
width=None, height=None, depth=None, shape='rectangular', distance=None):
super(Sample, self).__init__(a, b, c, alpha, beta, gamma)
if u is not None:
self._u = np.array(u)
if v is not None:
self._v = np.array(v)
if mosaic is not None:
self.mosaic = mosaic
if vmosaic is not None:
self.vmosaic = vmosaic
self.dir = direct
self.shape_type = shape
if width is not None:
self.width = width
if height is not None:
self.height = height
if depth is not None:
self.depth = depth
if distance is not None:
self.distance = distance
def __repr__(self):
args = ', '.join([str(getattr(self, key)) for key in ['a', 'b', 'c', 'alpha', 'beta', 'gamma']])
kwargs = ', '.join(['{0}={1}'.format(key, getattr(self, key)) for key in
['u', 'v', 'mosaic', 'vmosaic', 'direct', 'width', 'height', 'depth', 'shape'] if
getattr(self, key, None) is not None])
return "Sample({0}, {1})".format(args, kwargs)
def __eq__(self, right):
self_parent_keys = sorted(list(self.__dict__.keys()))
right_parent_keys = sorted(list(right.__dict__.keys()))
if not np.all(self_parent_keys == right_parent_keys):
return False
for key, value in self.__dict__.items():
right_parent_val = getattr(right, key)
if not np.all(value == right_parent_val):
print(value, right_parent_val)
return False
return True
def __ne__(self, right):
return not self.__eq__(right)
@property
def u(self):
r"""First orientation vector
"""
return self._u
@u.setter
def u(self, vec):
self._u = np.array(vec)
@property
def v(self):
r"""Second orientation vector
"""
return self._v
@v.setter
def v(self, vec):
self._v = np.array(vec)
@property
def direct(self):
return self.dir
@direct.setter
def direct(self, value):
self.dir = value
@property
def Umatrix(self):
r"""Rotation matrix that rotates the sample's reference frame into the spectrometer's
"""
ortho_basis = gram_schmidt(np.vstack((self.u, self.v)))
return np.vstack((ortho_basis, np.cross(ortho_basis[0], ortho_basis[1])))
@property
def UBmatrix(self):
r"""Orientation matrix of the sample
"""
return self.Umatrix * self.Bmatrix
def get_phi(self, Q):
u"""Get out-of-plane scattering angle.
Parameters
----------
hkl: array_like
wavelength : float
Returns
-------
phi : float
The out-of-plane angle
"""
return self.get_angle_between_planes(Q, np.cross(self.u, self.v))
|
zahanm/foodpedia | refs/heads/master | django/db/backends/postgresql/creation.py | 247 | from django.db.backends.creation import BaseDatabaseCreation
from django.db.backends.util import truncate_name
class DatabaseCreation(BaseDatabaseCreation):
# This dictionary maps Field objects to their associated PostgreSQL column
# types, as strings. Column-type strings can contain format strings; they'll
# be interpolated against the values of Field.__dict__ before being output.
# If a column type is set to None, it won't be included in the output.
data_types = {
'AutoField': 'serial',
'BooleanField': 'boolean',
'CharField': 'varchar(%(max_length)s)',
'CommaSeparatedIntegerField': 'varchar(%(max_length)s)',
'DateField': 'date',
'DateTimeField': 'timestamp with time zone',
'DecimalField': 'numeric(%(max_digits)s, %(decimal_places)s)',
'FileField': 'varchar(%(max_length)s)',
'FilePathField': 'varchar(%(max_length)s)',
'FloatField': 'double precision',
'IntegerField': 'integer',
'BigIntegerField': 'bigint',
'IPAddressField': 'inet',
'NullBooleanField': 'boolean',
'OneToOneField': 'integer',
'PositiveIntegerField': 'integer CHECK ("%(column)s" >= 0)',
'PositiveSmallIntegerField': 'smallint CHECK ("%(column)s" >= 0)',
'SlugField': 'varchar(%(max_length)s)',
'SmallIntegerField': 'smallint',
'TextField': 'text',
'TimeField': 'time',
}
def sql_table_creation_suffix(self):
assert self.connection.settings_dict['TEST_COLLATION'] is None, "PostgreSQL does not support collation setting at database creation time."
if self.connection.settings_dict['TEST_CHARSET']:
return "WITH ENCODING '%s'" % self.connection.settings_dict['TEST_CHARSET']
return ''
def sql_indexes_for_field(self, model, f, style):
if f.db_index and not f.unique:
qn = self.connection.ops.quote_name
db_table = model._meta.db_table
tablespace = f.db_tablespace or model._meta.db_tablespace
if tablespace:
sql = self.connection.ops.tablespace_sql(tablespace)
if sql:
tablespace_sql = ' ' + sql
else:
tablespace_sql = ''
else:
tablespace_sql = ''
def get_index_sql(index_name, opclass=''):
return (style.SQL_KEYWORD('CREATE INDEX') + ' ' +
style.SQL_TABLE(qn(truncate_name(index_name,self.connection.ops.max_name_length()))) + ' ' +
style.SQL_KEYWORD('ON') + ' ' +
style.SQL_TABLE(qn(db_table)) + ' ' +
"(%s%s)" % (style.SQL_FIELD(qn(f.column)), opclass) +
"%s;" % tablespace_sql)
output = [get_index_sql('%s_%s' % (db_table, f.column))]
# Fields with database column types of `varchar` and `text` need
# a second index that specifies their operator class, which is
# needed when performing correct LIKE queries outside the
# C locale. See #12234.
db_type = f.db_type(connection=self.connection)
if db_type.startswith('varchar'):
output.append(get_index_sql('%s_%s_like' % (db_table, f.column),
' varchar_pattern_ops'))
elif db_type.startswith('text'):
output.append(get_index_sql('%s_%s_like' % (db_table, f.column),
' text_pattern_ops'))
else:
output = []
return output
|
wheerd/patternmatcher | refs/heads/master | tests/common.py | 1 | # -*- coding: utf-8 -*-
from matchpy.expressions.expressions import (
Arity, Operation, Symbol, Wildcard, SymbolWildcard, make_dot_variable, make_plus_variable, make_star_variable,
make_symbol_variable
)
from .utils import MockConstraint
class SpecialSymbol(Symbol):
pass
f = Operation.new('f', Arity.variadic)
f2 = Operation.new('f2', Arity.variadic)
f_u = Operation.new('f_u', Arity.unary)
f_i = Operation.new('f_i', Arity.variadic, one_identity=True)
f_c = Operation.new('f_c', Arity.variadic, commutative=True)
f_ci = Operation.new('f_ci', Arity.variadic, commutative=True, one_identity=True)
f2_c = Operation.new('f2_c', Arity.variadic, commutative=True)
f_a = Operation.new('f_a', Arity.variadic, associative=True)
f_ac = Operation.new('f_ac', Arity.variadic, associative=True, commutative=True)
a = Symbol('a')
b = Symbol('b')
c = Symbol('c')
d = Symbol('d')
s = SpecialSymbol('s')
_ = Wildcard.dot()
_s = Wildcard.symbol()
_ss = Wildcard.symbol(SpecialSymbol)
x_ = make_dot_variable('x')
s_ = make_symbol_variable('s')
ss_ = make_symbol_variable('ss', SpecialSymbol)
y_ = make_dot_variable('y')
z_ = make_dot_variable('z')
__ = Wildcard.plus()
x__ = make_plus_variable('x')
y__ = make_plus_variable('y')
z__ = make_plus_variable('z')
___ = Wildcard.star()
x___ = make_star_variable('x')
y___ = make_star_variable('y')
z___ = make_star_variable('z')
oa_ = Wildcard.optional('o', a)
oa__ = Wildcard(1, False, 'o', a)
oa___ = Wildcard(0, False, 'o', a)
o2b_ = Wildcard.optional('o2', b)
mock_constraint_false = MockConstraint(False)
mock_constraint_true = MockConstraint(True)
del Arity
del Operation
del Symbol
del Wildcard
del MockConstraint
__all__ = [name for name in dir() if not name.startswith('__') or all(c == '_' for c in name)]
|
jennyzhang0215/incubator-mxnet | refs/heads/master | benchmark/python/sparse/util.py | 32 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import random
def estimate_density(DATA_PATH, feature_size):
"""sample 10 times of a size of 1000 for estimating the density of the sparse dataset"""
if not os.path.exists(DATA_PATH):
raise Exception("Data is not there!")
density = []
P = 0.01
for _ in xrange(10):
num_non_zero = 0
num_sample = 0
with open(DATA_PATH) as f:
for line in f:
if (random.random() < P):
num_non_zero += len(line.split(" ")) - 1
num_sample += 1
density.append(num_non_zero * 1.0 / (feature_size * num_sample))
return sum(density) / len(density)
|
Xeralux/tensorflow | refs/heads/master | tensorflow/python/training/sync_replicas_optimizer_test.py | 24 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for sync_replicas_optimizer.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework.test_util import create_local_cluster
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import adam
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import training
# Creates the workers and return their sessions, graphs, train_ops.
def get_workers(num_workers, replicas_to_aggregate, workers):
sessions = []
graphs = []
train_ops = []
for worker_id in range(num_workers):
graph = ops.Graph()
is_chief = (worker_id == 0)
with graph.as_default():
with ops.device("/job:ps/task:0"):
global_step = variables.Variable(0, name="global_step", trainable=False)
var_0 = variables.Variable(0.0, name="v0")
with ops.device("/job:ps/task:1"):
var_1 = variables.Variable(1.0, name="v1")
var_sparse = variables.Variable([[3.0], [4.0]], name="v_sparse")
with ops.device("/job:worker/task:" + str(worker_id)):
grads_0 = constant_op.constant(0.1 + worker_id * 0.2)
grads_1 = constant_op.constant(0.9 + worker_id * 0.2)
# This is to test against sparse gradients.
grads_sparse = ops.IndexedSlices(
constant_op.constant(
[0.1 + worker_id * 0.2], shape=[1, 1]),
constant_op.constant([1]),
constant_op.constant([2, 1]))
sgd_opt = gradient_descent.GradientDescentOptimizer(2.0)
sync_rep_opt = training.SyncReplicasOptimizer(
sgd_opt,
replicas_to_aggregate=replicas_to_aggregate,
total_num_replicas=num_workers)
train_op = [
sync_rep_opt.apply_gradients(
zip([grads_0, grads_1, grads_sparse],
[var_0, var_1, var_sparse]),
global_step=global_step)
]
sync_replicas_hook = sync_rep_opt.make_session_run_hook(
is_chief, num_tokens=num_workers)
# Creates MonitoredSession
session = training.MonitoredTrainingSession(
master=workers[worker_id].target,
is_chief=is_chief,
hooks=[sync_replicas_hook])
sessions.append(session)
graphs.append(graph)
train_ops.append(train_op)
return sessions, graphs, train_ops
class SyncReplicasOptimizerTest(test.TestCase):
def _run(self, train_op, sess):
sess.run(train_op)
def test2Workers(self):
num_workers = 2
replicas_to_aggregate = 2
num_ps = 2
workers, _ = create_local_cluster(num_workers=num_workers, num_ps=num_ps)
# Creates and returns all the workers.
sessions, graphs, train_ops = get_workers(num_workers,
replicas_to_aggregate, workers)
# Chief should have already initialized all the variables.
var_0_g_0 = graphs[0].get_tensor_by_name("v0:0")
var_1_g_0 = graphs[0].get_tensor_by_name("v1:0")
local_step_0 = graphs[0].get_tensor_by_name("sync_rep_local_step:0")
self.assertAllEqual(0.0, sessions[0].run(var_0_g_0))
self.assertAllEqual(1.0, sessions[0].run(var_1_g_0))
self.assertAllEqual(0, sessions[0].run(local_step_0))
# Will just use session 1 to verify all the variables later.
var_0_g_1 = graphs[1].get_tensor_by_name("v0:0")
var_1_g_1 = graphs[1].get_tensor_by_name("v1:0")
var_sparse_g_1 = graphs[1].get_tensor_by_name("v_sparse:0")
local_step_1 = graphs[1].get_tensor_by_name("sync_rep_local_step:0")
global_step = graphs[1].get_tensor_by_name("global_step:0")
# The steps should also be initialized.
self.assertAllEqual(0, sessions[1].run(global_step))
self.assertAllEqual(0, sessions[1].run(local_step_1))
self.assertAllClose([[3.0], [4.0]], sessions[1].run(var_sparse_g_1))
# We have initial tokens in the queue so we can call this one by one. After
# the first step, this will no longer work as there will be no more extra
# tokens in the queue.
sessions[0].run(train_ops[0])
sessions[1].run(train_ops[1])
# The global step should have been updated and the variables should now have
# the new values after the average of the gradients are applied.
while sessions[1].run(global_step) != 1:
time.sleep(0.01)
self.assertAllClose(0 - (0.1 + 0.3) / 2 * 2.0, sessions[1].run(var_0_g_1))
self.assertAllClose(1 - (0.9 + 1.1) / 2 * 2.0, sessions[1].run(var_1_g_1))
self.assertAllClose([[3.0], [4.0 - (0.1 + 0.3) / 2 * 2.0]],
sessions[1].run(var_sparse_g_1))
# The local step for both workers should still be 0 because the initial
# tokens in the token queue are 0s. This means that the following
# computation of the gradients will be wasted as local_step is smaller than
# the current global step. However, this only happens once when the system
# just starts and this is necessary to make the system robust for the case
# when chief gets restarted by errors/preemption/...
self.assertAllEqual(0, sessions[0].run(local_step_0))
self.assertAllEqual(0, sessions[1].run(local_step_1))
sessions[0].run(train_ops[0])
sessions[1].run(train_ops[1])
# Although the global step should still be 1 as explained above, the local
# step should now be updated to 1. The variables are still the same.
self.assertAllEqual(1, sessions[1].run(global_step))
self.assertAllEqual(1, sessions[0].run(local_step_0))
self.assertAllEqual(1, sessions[1].run(local_step_1))
self.assertAllClose(0 - (0.1 + 0.3) / 2 * 2.0, sessions[1].run(var_0_g_1))
self.assertAllClose(1 - (0.9 + 1.1) / 2 * 2.0, sessions[1].run(var_1_g_1))
# At this step, the token queue is empty. So the 2 workers need to work
# together to proceed.
threads = []
threads.append(
self.checkedThread(
target=self._run, args=(train_ops[0], sessions[0])))
threads.append(
self.checkedThread(
target=self._run, args=(train_ops[1], sessions[1])))
# The two workers starts to execute the train op.
for thread in threads:
thread.start()
for thread in threads:
thread.join()
# The global step should now be 2 and the gradients should have been
# applied twice.
self.assertAllEqual(2, sessions[1].run(global_step))
self.assertAllClose(0 - 2 * (0.1 + 0.3) / 2 * 2.0,
sessions[1].run(var_0_g_1))
self.assertAllClose(1 - 2 * (0.9 + 1.1) / 2 * 2.0,
sessions[1].run(var_1_g_1))
# 3 workers and one of them is backup.
def test3Workers1Backup(self):
num_workers = 3
replicas_to_aggregate = 2
num_ps = 2
workers, _ = create_local_cluster(num_workers=num_workers, num_ps=num_ps)
# Creates and returns all the workers.
sessions, graphs, train_ops = get_workers(num_workers,
replicas_to_aggregate, workers)
# Chief should have already initialized all the variables.
var_0_g_1 = graphs[1].get_tensor_by_name("v0:0")
var_1_g_1 = graphs[1].get_tensor_by_name("v1:0")
local_step_1 = graphs[1].get_tensor_by_name("sync_rep_local_step:0")
global_step = graphs[1].get_tensor_by_name("global_step:0")
# The steps should also be initilized.
self.assertAllEqual(0, sessions[1].run(global_step))
self.assertAllEqual(0, sessions[1].run(local_step_1))
# We have initial tokens in the queue so we can call this one by one. After
# the token queue becomes empty, they should be called concurrently.
# Here worker 0 and worker 2 finished first.
sessions[0].run(train_ops[0])
sessions[2].run(train_ops[2])
# The global step should have been updated since we only need to collect 2
# gradients. The variables should now have the new values after the average
# of the gradients from worker 0/2 are applied.
while sessions[1].run(global_step) != 1:
time.sleep(0.01)
self.assertAllEqual(1, sessions[1].run(global_step))
self.assertAllClose(0 - (0.1 + 0.5) / 2 * 2.0, sessions[1].run(var_0_g_1))
self.assertAllClose(1 - (0.9 + 1.3) / 2 * 2.0, sessions[1].run(var_1_g_1))
# Worker 1 finished later and its gradients will now be dropped as it is
# stale.
sessions[1].run(train_ops[1])
# As shown in the previous test, the local_step for all workers should be
# still 0 so their next computation will also be dropped.
sessions[0].run(train_ops[0])
sessions[1].run(train_ops[1])
sessions[2].run(train_ops[2])
# Although the global step should still be 1 as explained above, the local
# step should now be updated to 1. Just check worker 1 as an example.
self.assertAllEqual(1, sessions[1].run(global_step))
self.assertAllEqual(1, sessions[1].run(local_step_1))
thread_0 = self.checkedThread(
target=self._run, args=(train_ops[0], sessions[0]))
thread_1 = self.checkedThread(
target=self._run, args=(train_ops[1], sessions[1]))
# Lets worker 0 execute first.
# It will wait as we need 2 workers to finish this step and the global step
# should be still 1.
thread_0.start()
self.assertAllEqual(1, sessions[1].run(global_step))
# Starts worker 1.
thread_1.start()
thread_1.join()
thread_0.join()
# The global step should now be 2 and the gradients should have been
# applied again.
self.assertAllEqual(2, sessions[1].run(global_step))
self.assertAllClose(-0.6 - (0.1 + 0.3) / 2 * 2.0,
sessions[1].run(var_0_g_1))
self.assertAllClose(-1.2 - (0.9 + 1.1) / 2 * 2.0,
sessions[1].run(var_1_g_1))
class SyncReplicasOptimizerHookTest(test.TestCase):
def testErrorIfUsedBeforeMinimizeCalled(self):
opt = training.SyncReplicasOptimizer(
opt=gradient_descent.GradientDescentOptimizer(1.0),
replicas_to_aggregate=1,
total_num_replicas=1)
hook = opt.make_session_run_hook(True)
with self.assertRaisesRegexp(ValueError,
"apply_gradient should be called"):
hook.begin()
def testCanCreatedBeforeMinimizeCalled(self):
"""This behavior is required to be integrated with Estimators."""
opt = training.SyncReplicasOptimizer(
opt=gradient_descent.GradientDescentOptimizer(1.0),
replicas_to_aggregate=1,
total_num_replicas=1)
hook = opt.make_session_run_hook(True)
v = variables.Variable([0.])
global_step = variables.Variable(0, name="global_step", trainable=False)
opt.minimize(v, global_step=global_step)
hook.begin()
def testFetchVariableList(self):
opt = training.SyncReplicasOptimizer(
opt=adam.AdamOptimizer(0.01),
replicas_to_aggregate=1,
total_num_replicas=1)
v = variables.Variable([0.], name="fetch_variable_test")
global_step = variables.Variable(0, name="global_step", trainable=False)
opt.minimize(v, global_step=global_step)
opt_variables = opt.variables()
beta1_power, beta2_power = opt._opt._get_beta_accumulators()
self.assertIn(beta1_power, opt_variables)
self.assertIn(beta2_power, opt_variables)
if __name__ == "__main__":
test.main()
|
klusark/android_external_chromium_org | refs/heads/cm-11.0 | third_party/closure_linter/closure_linter/fixjsstyle_test.py | 135 | #!/usr/bin/env python
#
# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Medium tests for the gpylint auto-fixer."""
__author__ = 'robbyw@google.com (Robby Walker)'
import StringIO
import gflags as flags
import unittest as googletest
from closure_linter import checker
from closure_linter import error_fixer
_RESOURCE_PREFIX = 'closure_linter/testdata'
flags.FLAGS.strict = True
flags.FLAGS.limited_doc_files = ('dummy.js', 'externs.js')
flags.FLAGS.closurized_namespaces = ('goog', 'dummy')
class FixJsStyleTest(googletest.TestCase):
"""Test case to for gjslint auto-fixing."""
def testFixJsStyle(self):
test_cases = [['fixjsstyle.in.js', 'fixjsstyle.out.js'],
['indentation.js', 'fixjsstyle.indentation.out.js']]
for [running_input_file, running_output_file] in test_cases:
input_filename = None
golden_filename = None
current_filename = None
try:
input_filename = '%s/%s' % (_RESOURCE_PREFIX, running_input_file)
current_filename = input_filename
golden_filename = '%s/%s' % (_RESOURCE_PREFIX, running_output_file)
current_filename = golden_filename
except IOError, ex:
raise IOError('Could not find testdata resource for %s: %s' %
(current_filename, ex))
if running_input_file == 'fixjsstyle.in.js':
with open(input_filename) as f:
for line in f:
# Go to last line.
pass
self.assertTrue(line == line.rstrip(), '%s file should not end '
'with a new line.' % (input_filename))
# Autofix the file, sending output to a fake file.
actual = StringIO.StringIO()
style_checker = checker.JavaScriptStyleChecker(
error_fixer.ErrorFixer(actual))
style_checker.Check(input_filename)
# Now compare the files.
actual.seek(0)
expected = open(golden_filename, 'r')
self.assertEqual(actual.readlines(), expected.readlines())
def testMissingExtraAndUnsortedRequires(self):
"""Tests handling of missing extra and unsorted goog.require statements."""
original = [
"goog.require('dummy.aa');",
"goog.require('dummy.Cc');",
"goog.require('dummy.Dd');",
"",
"var x = new dummy.Bb();",
"dummy.Cc.someMethod();",
"dummy.aa.someMethod();",
]
expected = [
"goog.require('dummy.Bb');",
"goog.require('dummy.Cc');",
"goog.require('dummy.aa');",
"",
"var x = new dummy.Bb();",
"dummy.Cc.someMethod();",
"dummy.aa.someMethod();",
]
self._AssertFixes(original, expected)
def testMissingExtraAndUnsortedProvides(self):
"""Tests handling of missing extra and unsorted goog.provide statements."""
original = [
"goog.provide('dummy.aa');",
"goog.provide('dummy.Cc');",
"goog.provide('dummy.Dd');",
"",
"dummy.Cc = function() {};",
"dummy.Bb = function() {};",
"dummy.aa.someMethod = function();",
]
expected = [
"goog.provide('dummy.Bb');",
"goog.provide('dummy.Cc');",
"goog.provide('dummy.aa');",
"",
"dummy.Cc = function() {};",
"dummy.Bb = function() {};",
"dummy.aa.someMethod = function();",
]
self._AssertFixes(original, expected)
def testNoRequires(self):
"""Tests positioning of missing requires without existing requires."""
original = [
"goog.provide('dummy.Something');",
"",
"dummy.Something = function() {};",
"",
"var x = new dummy.Bb();",
]
expected = [
"goog.provide('dummy.Something');",
"",
"goog.require('dummy.Bb');",
"",
"dummy.Something = function() {};",
"",
"var x = new dummy.Bb();",
]
self._AssertFixes(original, expected)
def testNoProvides(self):
"""Tests positioning of missing provides without existing provides."""
original = [
"goog.require('dummy.Bb');",
"",
"dummy.Something = function() {};",
"",
"var x = new dummy.Bb();",
]
expected = [
"goog.provide('dummy.Something');",
"",
"goog.require('dummy.Bb');",
"",
"dummy.Something = function() {};",
"",
"var x = new dummy.Bb();",
]
self._AssertFixes(original, expected)
def testGoogScopeIndentation(self):
"""Tests Handling a typical end-of-scope indentation fix."""
original = [
'goog.scope(function() {',
' // TODO(brain): Take over the world.',
'}); // goog.scope',
]
expected = [
'goog.scope(function() {',
'// TODO(brain): Take over the world.',
'}); // goog.scope',
]
self._AssertFixes(original, expected)
def testMissingEndOfScopeComment(self):
"""Tests Handling a missing comment at end of goog.scope."""
original = [
'goog.scope(function() {',
'});',
]
expected = [
'goog.scope(function() {',
'}); // goog.scope',
]
self._AssertFixes(original, expected)
def testMissingEndOfScopeCommentWithOtherComment(self):
"""Tests handling an irrelevant comment at end of goog.scope."""
original = [
'goog.scope(function() {',
"}); // I don't belong here!",
]
expected = [
'goog.scope(function() {',
'}); // goog.scope',
]
self._AssertFixes(original, expected)
def testMalformedEndOfScopeComment(self):
"""Tests Handling a malformed comment at end of goog.scope."""
original = [
'goog.scope(function() {',
'}); // goog.scope FTW',
]
expected = [
'goog.scope(function() {',
'}); // goog.scope',
]
self._AssertFixes(original, expected)
def _AssertFixes(self, original, expected):
"""Asserts that the error fixer corrects original to expected."""
original = self._GetHeader() + original
expected = self._GetHeader() + expected
actual = StringIO.StringIO()
style_checker = checker.JavaScriptStyleChecker(
error_fixer.ErrorFixer(actual))
style_checker.CheckLines('testing.js', original, False)
actual.seek(0)
expected = [x + '\n' for x in expected]
self.assertListEqual(actual.readlines(), expected)
def _GetHeader(self):
"""Returns a fake header for a JavaScript file."""
return [
"// Copyright 2011 Google Inc. All Rights Reserved.",
"",
"/**",
" * @fileoverview Fake file overview.",
" * @author fake@google.com (Fake Person)",
" */",
""
]
if __name__ == '__main__':
googletest.main()
|
flwh/KK_mt6589_iq451 | refs/heads/master | prebuilts/python/darwin-x86/2.7.5/lib/python2.7/lib2to3/tests/test_main.py | 57 | # -*- coding: utf-8 -*-
import sys
import codecs
import logging
import os
import re
import shutil
import StringIO
import sys
import tempfile
import unittest
from lib2to3 import main
TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), "data")
PY2_TEST_MODULE = os.path.join(TEST_DATA_DIR, "py2_test_grammar.py")
class TestMain(unittest.TestCase):
if not hasattr(unittest.TestCase, 'assertNotRegex'):
# This method was only introduced in 3.2.
def assertNotRegex(self, text, regexp, msg=None):
import re
if not hasattr(regexp, 'search'):
regexp = re.compile(regexp)
if regexp.search(text):
self.fail("regexp %s MATCHED text %r" % (regexp.pattern, text))
def setUp(self):
self.temp_dir = None # tearDown() will rmtree this directory if set.
def tearDown(self):
# Clean up logging configuration down by main.
del logging.root.handlers[:]
if self.temp_dir:
shutil.rmtree(self.temp_dir)
def run_2to3_capture(self, args, in_capture, out_capture, err_capture):
save_stdin = sys.stdin
save_stdout = sys.stdout
save_stderr = sys.stderr
sys.stdin = in_capture
sys.stdout = out_capture
sys.stderr = err_capture
try:
return main.main("lib2to3.fixes", args)
finally:
sys.stdin = save_stdin
sys.stdout = save_stdout
sys.stderr = save_stderr
def test_unencodable_diff(self):
input_stream = StringIO.StringIO(u"print 'nothing'\nprint u'über'\n")
out = StringIO.StringIO()
out_enc = codecs.getwriter("ascii")(out)
err = StringIO.StringIO()
ret = self.run_2to3_capture(["-"], input_stream, out_enc, err)
self.assertEqual(ret, 0)
output = out.getvalue()
self.assertTrue("-print 'nothing'" in output)
self.assertTrue("WARNING: couldn't encode <stdin>'s diff for "
"your terminal" in err.getvalue())
def setup_test_source_trees(self):
"""Setup a test source tree and output destination tree."""
self.temp_dir = tempfile.mkdtemp() # tearDown() cleans this up.
self.py2_src_dir = os.path.join(self.temp_dir, "python2_project")
self.py3_dest_dir = os.path.join(self.temp_dir, "python3_project")
os.mkdir(self.py2_src_dir)
os.mkdir(self.py3_dest_dir)
# Turn it into a package with a few files.
self.setup_files = []
open(os.path.join(self.py2_src_dir, "__init__.py"), "w").close()
self.setup_files.append("__init__.py")
shutil.copy(PY2_TEST_MODULE, self.py2_src_dir)
self.setup_files.append(os.path.basename(PY2_TEST_MODULE))
self.trivial_py2_file = os.path.join(self.py2_src_dir, "trivial.py")
self.init_py2_file = os.path.join(self.py2_src_dir, "__init__.py")
with open(self.trivial_py2_file, "w") as trivial:
trivial.write("print 'I need a simple conversion.'")
self.setup_files.append("trivial.py")
def test_filename_changing_on_output_single_dir(self):
"""2to3 a single directory with a new output dir and suffix."""
self.setup_test_source_trees()
out = StringIO.StringIO()
err = StringIO.StringIO()
suffix = "TEST"
ret = self.run_2to3_capture(
["-n", "--add-suffix", suffix, "--write-unchanged-files",
"--no-diffs", "--output-dir",
self.py3_dest_dir, self.py2_src_dir],
StringIO.StringIO(""), out, err)
self.assertEqual(ret, 0)
stderr = err.getvalue()
self.assertIn(" implies -w.", stderr)
self.assertIn(
"Output in %r will mirror the input directory %r layout" % (
self.py3_dest_dir, self.py2_src_dir), stderr)
self.assertEqual(set(name+suffix for name in self.setup_files),
set(os.listdir(self.py3_dest_dir)))
for name in self.setup_files:
self.assertIn("Writing converted %s to %s" % (
os.path.join(self.py2_src_dir, name),
os.path.join(self.py3_dest_dir, name+suffix)), stderr)
sep = re.escape(os.sep)
self.assertRegexpMatches(
stderr, r"No changes to .*/__init__\.py".replace("/", sep))
self.assertNotRegex(
stderr, r"No changes to .*/trivial\.py".replace("/", sep))
def test_filename_changing_on_output_two_files(self):
"""2to3 two files in one directory with a new output dir."""
self.setup_test_source_trees()
err = StringIO.StringIO()
py2_files = [self.trivial_py2_file, self.init_py2_file]
expected_files = set(os.path.basename(name) for name in py2_files)
ret = self.run_2to3_capture(
["-n", "-w", "--write-unchanged-files",
"--no-diffs", "--output-dir", self.py3_dest_dir] + py2_files,
StringIO.StringIO(""), StringIO.StringIO(), err)
self.assertEqual(ret, 0)
stderr = err.getvalue()
self.assertIn(
"Output in %r will mirror the input directory %r layout" % (
self.py3_dest_dir, self.py2_src_dir), stderr)
self.assertEqual(expected_files, set(os.listdir(self.py3_dest_dir)))
def test_filename_changing_on_output_single_file(self):
"""2to3 a single file with a new output dir."""
self.setup_test_source_trees()
err = StringIO.StringIO()
ret = self.run_2to3_capture(
["-n", "-w", "--no-diffs", "--output-dir", self.py3_dest_dir,
self.trivial_py2_file],
StringIO.StringIO(""), StringIO.StringIO(), err)
self.assertEqual(ret, 0)
stderr = err.getvalue()
self.assertIn(
"Output in %r will mirror the input directory %r layout" % (
self.py3_dest_dir, self.py2_src_dir), stderr)
self.assertEqual(set([os.path.basename(self.trivial_py2_file)]),
set(os.listdir(self.py3_dest_dir)))
if __name__ == '__main__':
unittest.main()
|
mcolom/ipolDevel | refs/heads/master | ipol_demo/modules/blobs/errors.py | 1 | """
Blobs errors
"""
class IPOLBlobsTemplateError(Exception):
"""
IPOLBlobsTemplateError
"""
class IPOLBlobsThumbnailError(Exception):
"""
IPOLBlobsThumbnailError
"""
class IPOLBlobsDataBaseError(Exception):
"""
IPOLBlobsDataBaseError
"""
class IPOLRemoveDirError(Exception):
"""
IPOLRemoveDirError
"""
|
denyszamiatin/contacts | refs/heads/master | phones.py | 1 | import settings
class Contacts(object):
def __init__(self, serializer):
self.serializer = serializer
self.contacts = self.serializer.load()
def add_contact(self, name, phone):
if name in self.contacts:
raise KeyError('Name exists')
self.contacts[name] = phone
def _fail_if_name_not_exists(self, name):
if name not in self.contacts:
raise KeyError("Name doesn't exist")
def get_phone(self, name):
self._fail_if_name_not_exists(name)
return self.contacts[name]
def delete_contact(self, name):
self._fail_if_name_not_exists(name)
del self.contacts[name]
def update_contact(self, name, new_phone):
self._fail_if_name_not_exists(name)
self.contacts[name] = new_phone
def list_contacts(self):
return self.contacts.items()
def save(self):
self.serializer.save(self.contacts)
|
0asa/scikit-learn | refs/heads/master | examples/feature_selection/feature_selection_pipeline.py | 342 | """
==================
Pipeline Anova SVM
==================
Simple usage of Pipeline that runs successively a univariate
feature selection with anova and then a C-SVM of the selected features.
"""
print(__doc__)
from sklearn import svm
from sklearn.datasets import samples_generator
from sklearn.feature_selection import SelectKBest, f_regression
from sklearn.pipeline import make_pipeline
# import some data to play with
X, y = samples_generator.make_classification(
n_features=20, n_informative=3, n_redundant=0, n_classes=4,
n_clusters_per_class=2)
# ANOVA SVM-C
# 1) anova filter, take 3 best ranked features
anova_filter = SelectKBest(f_regression, k=3)
# 2) svm
clf = svm.SVC(kernel='linear')
anova_svm = make_pipeline(anova_filter, clf)
anova_svm.fit(X, y)
anova_svm.predict(X)
|
lcrees/callchain | refs/heads/master | callchain/tests/mixins/man/reducing.py | 1 | # -*- coding: utf-8 -*-
'''reduce test mixins'''
from inspect import ismodule
from twoq.support import port
class MMathQMixin(object):
def test_max(self):
self._false_true_false(
self.qclass(1, 2, 4).max(), self.assertEqual, 4,
)
from stuf import stuf
stooges = [
stuf(name='moe', age=40),
stuf(name='larry', age=50),
stuf(name='curly', age=60),
]
manq = self.qclass(*stooges).tap(lambda x: x.age).max()
self.assertFalse(manq.balanced)
manq.sync()
self.assertTrue(manq.balanced)
self.assertEqual(stuf(manq.end()), stuf(name='curly', age=60))
self.assertTrue(manq.balanced)
def test_min(self):
self._false_true_false(
self.qclass(10, 5, 100, 2, 1000).min(),
self.assertEqual,
2,
)
self._false_true_false(
self.qclass(10, 5, 100, 2, 1000).tap(lambda x: x).min(),
self.assertEqual,
2,
)
def test_minmax(self):
self._false_true_false(
self.qclass(1, 2, 4).minmax(), self.assertEqual, [1, 4],
)
self._false_true_false(
self.qclass(10, 5, 100, 2, 1000).minmax(),
self.assertEqual,
[2, 1000],
)
def test_sum(self):
self._false_true_false(
self.qclass(1, 2, 3).sum(), self.assertEqual, 6,
)
self._false_true_false(
self.qclass(1, 2, 3).sum(1), self.assertEqual, 7,
)
def test_mode(self):
self._false_true_false(
self.qclass(11, 3, 5, 11, 7, 3, 11).mode(),
self.assertEqual,
11,
)
def test_median(self):
self._false_true_false(
self.qclass(4, 5, 7, 2, 1).median(), self.assertEqual, 4,
)
self._false_true_false(
self.qclass(4, 5, 7, 2, 1, 8).median(), self.assertEqual, 4.5,
)
def test_fsum(self):
self._false_true_false(
self.qclass(.1, .1, .1, .1, .1, .1, .1, .1, .1, .1).fsum(),
self.assertEqual,
1.0,
)
def test_average(self):
self._false_true_false(
self.qclass(10, 40, 45).average(),
self.assertEqual,
31.666666666666668,
)
def test_uncommon(self):
self._false_true_false(
self.qclass(11, 3, 5, 11, 7, 3, 11).uncommon(),
self.assertEqual,
7,
)
def test_frequency(self):
self._false_true_false(
self.qclass(11, 3, 5, 11, 7, 3, 11).frequency(),
self.assertEqual,
[(11, 3), (3, 2), (5, 1), (7, 1)],
)
def test_statrange(self):
self._false_true_false(
self.qclass(3, 5, 7, 3, 11).statrange(),
self.assertEqual,
8,
)
class MTruthQMixin(object):
def test_all(self):
self._false_true_false(
self.qclass(True, 1, None, 'yes').tap(bool).all(),
self.assertFalse,
)
def test_any(self):
self._false_true_false(
self.qclass(None, 0, 'yes', False).tap(bool).any(),
self.assertTrue,
)
def test_include(self):
self._false_true_false(
self.qclass(1, 2, 3).contains(3), self.assertTrue,
)
def test_quantify(self):
self._false_true_false(
self.qclass(True, 1, None, 'yes').tap(bool).quantify(),
self.assertEqual,
3,
)
self._false_true_false(
self.qclass(None, 0, 'yes', False).tap(bool).quantify(),
self.assertEqual,
1,
)
class MReduceQMixin(MMathQMixin, MTruthQMixin):
def test_smash(self):
self._false_true_false(
self.qclass([[1, [2], [3, [[4]]]]]).smash(),
self.assertEqual,
[1, 2, 3, 4],
)
def test_merge(self):
self._false_true_false(
self.qclass([4, 5, 6], [1, 2, 3]).merge(),
self.assertEqual,
[1, 2, 3, 4, 5, 6],
)
def test_pairwise(self):
self._false_true_false(
self.qclass(
'moe', 30, True, 'larry', 40, False, 'curly', 50, 1, 1,
).pairwise(),
self.assertEqual,
[('moe', 30), (30, True), (True, 'larry'), ('larry', 40),
(40, False), (False, 'curly'), ('curly', 50), (50, 1), (1, 1)]
)
def test_reduce(self):
self._false_true_false(
self.qclass(1, 2, 3).tap(lambda x, y: x + y).reduce(),
self.assertEqual,
6,
)
self._false_true_false(
self.qclass(1, 2, 3).tap(lambda x, y: x + y).reduce(1),
self.assertEqual,
7,
)
def test_reduce_right(self):
self._false_true_false(
self.qclass([0, 1], [2, 3], [4, 5]).tap(
lambda x, y: x + y
).reduce_right(),
self.assertEqual,
[4, 5, 2, 3, 0, 1],
)
self._false_true_false(
self.qclass([0, 1], [2, 3], [4, 5]).tap(
lambda x, y: x + y
).reduce_right([0, 0]),
self.assertEqual,
[4, 5, 2, 3, 0, 1, 0, 0],
)
def test_roundrobin(self):
self._false_true_false(
self.qclass(
['moe', 'larry', 'curly'], [30, 40, 50], [True, False, False]
).roundrobin(),
self.assertEqual,
['moe', 30, True, 'larry', 40, False, 'curly', 50, False],
)
def test_zip(self):
self._true_true_false(
self.qclass(
['moe', 'larry', 'curly'], [30, 40, 50], [True, False, False],
).zip(),
self.assertEqual,
[('moe', 30, True), ('larry', 40, False), ('curly', 50, False)],
)
__all__ = sorted(name for name, obj in port.items(locals()) if not any([
name.startswith('_'), ismodule(obj), name in ['ismodule', 'port']
]))
del ismodule
|
40023154/Finalexam_0627 | refs/heads/master | static/Brython3.1.1-20150328-091302/Lib/heapq.py | 628 | """Heap queue algorithm (a.k.a. priority queue).
Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for
all k, counting elements from 0. For the sake of comparison,
non-existing elements are considered to be infinite. The interesting
property of a heap is that a[0] is always its smallest element.
Usage:
heap = [] # creates an empty heap
heappush(heap, item) # pushes a new item on the heap
item = heappop(heap) # pops the smallest item from the heap
item = heap[0] # smallest item on the heap without popping it
heapify(x) # transforms list into a heap, in-place, in linear time
item = heapreplace(heap, item) # pops and returns smallest item, and adds
# new item; the heap size is unchanged
Our API differs from textbook heap algorithms as follows:
- We use 0-based indexing. This makes the relationship between the
index for a node and the indexes for its children slightly less
obvious, but is more suitable since Python uses 0-based indexing.
- Our heappop() method returns the smallest item, not the largest.
These two make it possible to view the heap as a regular Python list
without surprises: heap[0] is the smallest item, and heap.sort()
maintains the heap invariant!
"""
# Original code by Kevin O'Connor, augmented by Tim Peters and Raymond Hettinger
__about__ = """Heap queues
[explanation by François Pinard]
Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for
all k, counting elements from 0. For the sake of comparison,
non-existing elements are considered to be infinite. The interesting
property of a heap is that a[0] is always its smallest element.
The strange invariant above is meant to be an efficient memory
representation for a tournament. The numbers below are `k', not a[k]:
0
1 2
3 4 5 6
7 8 9 10 11 12 13 14
15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
In the tree above, each cell `k' is topping `2*k+1' and `2*k+2'. In
an usual binary tournament we see in sports, each cell is the winner
over the two cells it tops, and we can trace the winner down the tree
to see all opponents s/he had. However, in many computer applications
of such tournaments, we do not need to trace the history of a winner.
To be more memory efficient, when a winner is promoted, we try to
replace it by something else at a lower level, and the rule becomes
that a cell and the two cells it tops contain three different items,
but the top cell "wins" over the two topped cells.
If this heap invariant is protected at all time, index 0 is clearly
the overall winner. The simplest algorithmic way to remove it and
find the "next" winner is to move some loser (let's say cell 30 in the
diagram above) into the 0 position, and then percolate this new 0 down
the tree, exchanging values, until the invariant is re-established.
This is clearly logarithmic on the total number of items in the tree.
By iterating over all items, you get an O(n ln n) sort.
A nice feature of this sort is that you can efficiently insert new
items while the sort is going on, provided that the inserted items are
not "better" than the last 0'th element you extracted. This is
especially useful in simulation contexts, where the tree holds all
incoming events, and the "win" condition means the smallest scheduled
time. When an event schedule other events for execution, they are
scheduled into the future, so they can easily go into the heap. So, a
heap is a good structure for implementing schedulers (this is what I
used for my MIDI sequencer :-).
Various structures for implementing schedulers have been extensively
studied, and heaps are good for this, as they are reasonably speedy,
the speed is almost constant, and the worst case is not much different
than the average case. However, there are other representations which
are more efficient overall, yet the worst cases might be terrible.
Heaps are also very useful in big disk sorts. You most probably all
know that a big sort implies producing "runs" (which are pre-sorted
sequences, which size is usually related to the amount of CPU memory),
followed by a merging passes for these runs, which merging is often
very cleverly organised[1]. It is very important that the initial
sort produces the longest runs possible. Tournaments are a good way
to that. If, using all the memory available to hold a tournament, you
replace and percolate items that happen to fit the current run, you'll
produce runs which are twice the size of the memory for random input,
and much better for input fuzzily ordered.
Moreover, if you output the 0'th item on disk and get an input which
may not fit in the current tournament (because the value "wins" over
the last output value), it cannot fit in the heap, so the size of the
heap decreases. The freed memory could be cleverly reused immediately
for progressively building a second heap, which grows at exactly the
same rate the first heap is melting. When the first heap completely
vanishes, you switch heaps and start a new run. Clever and quite
effective!
In a word, heaps are useful memory structures to know. I use them in
a few applications, and I think it is good to keep a `heap' module
around. :-)
--------------------
[1] The disk balancing algorithms which are current, nowadays, are
more annoying than clever, and this is a consequence of the seeking
capabilities of the disks. On devices which cannot seek, like big
tape drives, the story was quite different, and one had to be very
clever to ensure (far in advance) that each tape movement will be the
most effective possible (that is, will best participate at
"progressing" the merge). Some tapes were even able to read
backwards, and this was also used to avoid the rewinding time.
Believe me, real good tape sorts were quite spectacular to watch!
From all times, sorting has always been a Great Art! :-)
"""
__all__ = ['heappush', 'heappop', 'heapify', 'heapreplace', 'merge',
'nlargest', 'nsmallest', 'heappushpop']
from itertools import islice, count, tee, chain
def heappush(heap, item):
"""Push item onto heap, maintaining the heap invariant."""
heap.append(item)
_siftdown(heap, 0, len(heap)-1)
def heappop(heap):
"""Pop the smallest item off the heap, maintaining the heap invariant."""
lastelt = heap.pop() # raises appropriate IndexError if heap is empty
if heap:
returnitem = heap[0]
heap[0] = lastelt
_siftup(heap, 0)
else:
returnitem = lastelt
return returnitem
def heapreplace(heap, item):
"""Pop and return the current smallest value, and add the new item.
This is more efficient than heappop() followed by heappush(), and can be
more appropriate when using a fixed-size heap. Note that the value
returned may be larger than item! That constrains reasonable uses of
this routine unless written as part of a conditional replacement:
if item > heap[0]:
item = heapreplace(heap, item)
"""
returnitem = heap[0] # raises appropriate IndexError if heap is empty
heap[0] = item
_siftup(heap, 0)
return returnitem
def heappushpop(heap, item):
"""Fast version of a heappush followed by a heappop."""
if heap and heap[0] < item:
item, heap[0] = heap[0], item
_siftup(heap, 0)
return item
def heapify(x):
"""Transform list into a heap, in-place, in O(len(x)) time."""
n = len(x)
# Transform bottom-up. The largest index there's any point to looking at
# is the largest with a child index in-range, so must have 2*i + 1 < n,
# or i < (n-1)/2. If n is even = 2*j, this is (2*j-1)/2 = j-1/2 so
# j-1 is the largest, which is n//2 - 1. If n is odd = 2*j+1, this is
# (2*j+1-1)/2 = j so j-1 is the largest, and that's again n//2-1.
for i in reversed(range(n//2)):
_siftup(x, i)
def _heappushpop_max(heap, item):
"""Maxheap version of a heappush followed by a heappop."""
if heap and item < heap[0]:
item, heap[0] = heap[0], item
_siftup_max(heap, 0)
return item
def _heapify_max(x):
"""Transform list into a maxheap, in-place, in O(len(x)) time."""
n = len(x)
for i in reversed(range(n//2)):
_siftup_max(x, i)
def nlargest(n, iterable):
"""Find the n largest elements in a dataset.
Equivalent to: sorted(iterable, reverse=True)[:n]
"""
if n < 0:
return []
it = iter(iterable)
result = list(islice(it, n))
if not result:
return result
heapify(result)
_heappushpop = heappushpop
for elem in it:
_heappushpop(result, elem)
result.sort(reverse=True)
return result
def nsmallest(n, iterable):
"""Find the n smallest elements in a dataset.
Equivalent to: sorted(iterable)[:n]
"""
if n < 0:
return []
it = iter(iterable)
result = list(islice(it, n))
if not result:
return result
_heapify_max(result)
_heappushpop = _heappushpop_max
for elem in it:
_heappushpop(result, elem)
result.sort()
return result
# 'heap' is a heap at all indices >= startpos, except possibly for pos. pos
# is the index of a leaf with a possibly out-of-order value. Restore the
# heap invariant.
def _siftdown(heap, startpos, pos):
newitem = heap[pos]
# Follow the path to the root, moving parents down until finding a place
# newitem fits.
while pos > startpos:
parentpos = (pos - 1) >> 1
parent = heap[parentpos]
if newitem < parent:
heap[pos] = parent
pos = parentpos
continue
break
heap[pos] = newitem
# The child indices of heap index pos are already heaps, and we want to make
# a heap at index pos too. We do this by bubbling the smaller child of
# pos up (and so on with that child's children, etc) until hitting a leaf,
# then using _siftdown to move the oddball originally at index pos into place.
#
# We *could* break out of the loop as soon as we find a pos where newitem <=
# both its children, but turns out that's not a good idea, and despite that
# many books write the algorithm that way. During a heap pop, the last array
# element is sifted in, and that tends to be large, so that comparing it
# against values starting from the root usually doesn't pay (= usually doesn't
# get us out of the loop early). See Knuth, Volume 3, where this is
# explained and quantified in an exercise.
#
# Cutting the # of comparisons is important, since these routines have no
# way to extract "the priority" from an array element, so that intelligence
# is likely to be hiding in custom comparison methods, or in array elements
# storing (priority, record) tuples. Comparisons are thus potentially
# expensive.
#
# On random arrays of length 1000, making this change cut the number of
# comparisons made by heapify() a little, and those made by exhaustive
# heappop() a lot, in accord with theory. Here are typical results from 3
# runs (3 just to demonstrate how small the variance is):
#
# Compares needed by heapify Compares needed by 1000 heappops
# -------------------------- --------------------------------
# 1837 cut to 1663 14996 cut to 8680
# 1855 cut to 1659 14966 cut to 8678
# 1847 cut to 1660 15024 cut to 8703
#
# Building the heap by using heappush() 1000 times instead required
# 2198, 2148, and 2219 compares: heapify() is more efficient, when
# you can use it.
#
# The total compares needed by list.sort() on the same lists were 8627,
# 8627, and 8632 (this should be compared to the sum of heapify() and
# heappop() compares): list.sort() is (unsurprisingly!) more efficient
# for sorting.
def _siftup(heap, pos):
endpos = len(heap)
startpos = pos
newitem = heap[pos]
# Bubble up the smaller child until hitting a leaf.
childpos = 2*pos + 1 # leftmost child position
while childpos < endpos:
# Set childpos to index of smaller child.
rightpos = childpos + 1
if rightpos < endpos and not heap[childpos] < heap[rightpos]:
childpos = rightpos
# Move the smaller child up.
heap[pos] = heap[childpos]
pos = childpos
childpos = 2*pos + 1
# The leaf at pos is empty now. Put newitem there, and bubble it up
# to its final resting place (by sifting its parents down).
heap[pos] = newitem
_siftdown(heap, startpos, pos)
def _siftdown_max(heap, startpos, pos):
'Maxheap variant of _siftdown'
newitem = heap[pos]
# Follow the path to the root, moving parents down until finding a place
# newitem fits.
while pos > startpos:
parentpos = (pos - 1) >> 1
parent = heap[parentpos]
if parent < newitem:
heap[pos] = parent
pos = parentpos
continue
break
heap[pos] = newitem
def _siftup_max(heap, pos):
'Maxheap variant of _siftup'
endpos = len(heap)
startpos = pos
newitem = heap[pos]
# Bubble up the larger child until hitting a leaf.
childpos = 2*pos + 1 # leftmost child position
while childpos < endpos:
# Set childpos to index of larger child.
rightpos = childpos + 1
if rightpos < endpos and not heap[rightpos] < heap[childpos]:
childpos = rightpos
# Move the larger child up.
heap[pos] = heap[childpos]
pos = childpos
childpos = 2*pos + 1
# The leaf at pos is empty now. Put newitem there, and bubble it up
# to its final resting place (by sifting its parents down).
heap[pos] = newitem
_siftdown_max(heap, startpos, pos)
# If available, use C implementation
#_heapq does not exist in brython, so lets just comment it out.
#try:
# from _heapq import *
#except ImportError:
# pass
def merge(*iterables):
'''Merge multiple sorted inputs into a single sorted output.
Similar to sorted(itertools.chain(*iterables)) but returns a generator,
does not pull the data into memory all at once, and assumes that each of
the input streams is already sorted (smallest to largest).
>>> list(merge([1,3,5,7], [0,2,4,8], [5,10,15,20], [], [25]))
[0, 1, 2, 3, 4, 5, 5, 7, 8, 10, 15, 20, 25]
'''
_heappop, _heapreplace, _StopIteration = heappop, heapreplace, StopIteration
_len = len
h = []
h_append = h.append
for itnum, it in enumerate(map(iter, iterables)):
try:
next = it.__next__
h_append([next(), itnum, next])
except _StopIteration:
pass
heapify(h)
while _len(h) > 1:
try:
while True:
v, itnum, next = s = h[0]
yield v
s[0] = next() # raises StopIteration when exhausted
_heapreplace(h, s) # restore heap condition
except _StopIteration:
_heappop(h) # remove empty iterator
if h:
# fast case when only a single iterator remains
v, itnum, next = h[0]
yield v
yield from next.__self__
# Extend the implementations of nsmallest and nlargest to use a key= argument
_nsmallest = nsmallest
def nsmallest(n, iterable, key=None):
"""Find the n smallest elements in a dataset.
Equivalent to: sorted(iterable, key=key)[:n]
"""
# Short-cut for n==1 is to use min() when len(iterable)>0
if n == 1:
it = iter(iterable)
head = list(islice(it, 1))
if not head:
return []
if key is None:
return [min(chain(head, it))]
return [min(chain(head, it), key=key)]
# When n>=size, it's faster to use sorted()
try:
size = len(iterable)
except (TypeError, AttributeError):
pass
else:
if n >= size:
return sorted(iterable, key=key)[:n]
# When key is none, use simpler decoration
if key is None:
it = zip(iterable, count()) # decorate
result = _nsmallest(n, it)
return [r[0] for r in result] # undecorate
# General case, slowest method
in1, in2 = tee(iterable)
it = zip(map(key, in1), count(), in2) # decorate
result = _nsmallest(n, it)
return [r[2] for r in result] # undecorate
_nlargest = nlargest
def nlargest(n, iterable, key=None):
"""Find the n largest elements in a dataset.
Equivalent to: sorted(iterable, key=key, reverse=True)[:n]
"""
# Short-cut for n==1 is to use max() when len(iterable)>0
if n == 1:
it = iter(iterable)
head = list(islice(it, 1))
if not head:
return []
if key is None:
return [max(chain(head, it))]
return [max(chain(head, it), key=key)]
# When n>=size, it's faster to use sorted()
try:
size = len(iterable)
except (TypeError, AttributeError):
pass
else:
if n >= size:
return sorted(iterable, key=key, reverse=True)[:n]
# When key is none, use simpler decoration
if key is None:
it = zip(iterable, count(0,-1)) # decorate
result = _nlargest(n, it)
return [r[0] for r in result] # undecorate
# General case, slowest method
in1, in2 = tee(iterable)
it = zip(map(key, in1), count(0,-1), in2) # decorate
result = _nlargest(n, it)
return [r[2] for r in result] # undecorate
if __name__ == "__main__":
# Simple sanity test
heap = []
data = [1, 3, 5, 7, 9, 2, 4, 6, 8, 0]
for item in data:
heappush(heap, item)
sort = []
while heap:
sort.append(heappop(heap))
print(sort)
import doctest
doctest.testmod()
|
MalloyPower/parsing-python | refs/heads/master | front-end/testsuite-python-lib/Python-3.3.0/Lib/test/test_call.py | 182 | import unittest
from test import support
# The test cases here cover several paths through the function calling
# code. They depend on the METH_XXX flag that is used to define a C
# function, which can't be verified from Python. If the METH_XXX decl
# for a C function changes, these tests may not cover the right paths.
class CFunctionCalls(unittest.TestCase):
def test_varargs0(self):
self.assertRaises(TypeError, {}.__contains__)
def test_varargs1(self):
{}.__contains__(0)
def test_varargs2(self):
self.assertRaises(TypeError, {}.__contains__, 0, 1)
def test_varargs0_ext(self):
try:
{}.__contains__(*())
except TypeError:
pass
def test_varargs1_ext(self):
{}.__contains__(*(0,))
def test_varargs2_ext(self):
try:
{}.__contains__(*(1, 2))
except TypeError:
pass
else:
raise RuntimeError
def test_varargs0_kw(self):
self.assertRaises(TypeError, {}.__contains__, x=2)
def test_varargs1_kw(self):
self.assertRaises(TypeError, {}.__contains__, x=2)
def test_varargs2_kw(self):
self.assertRaises(TypeError, {}.__contains__, x=2, y=2)
def test_oldargs0_0(self):
{}.keys()
def test_oldargs0_1(self):
self.assertRaises(TypeError, {}.keys, 0)
def test_oldargs0_2(self):
self.assertRaises(TypeError, {}.keys, 0, 1)
def test_oldargs0_0_ext(self):
{}.keys(*())
def test_oldargs0_1_ext(self):
try:
{}.keys(*(0,))
except TypeError:
pass
else:
raise RuntimeError
def test_oldargs0_2_ext(self):
try:
{}.keys(*(1, 2))
except TypeError:
pass
else:
raise RuntimeError
def test_oldargs0_0_kw(self):
try:
{}.keys(x=2)
except TypeError:
pass
else:
raise RuntimeError
def test_oldargs0_1_kw(self):
self.assertRaises(TypeError, {}.keys, x=2)
def test_oldargs0_2_kw(self):
self.assertRaises(TypeError, {}.keys, x=2, y=2)
def test_oldargs1_0(self):
self.assertRaises(TypeError, [].count)
def test_oldargs1_1(self):
[].count(1)
def test_oldargs1_2(self):
self.assertRaises(TypeError, [].count, 1, 2)
def test_oldargs1_0_ext(self):
try:
[].count(*())
except TypeError:
pass
else:
raise RuntimeError
def test_oldargs1_1_ext(self):
[].count(*(1,))
def test_oldargs1_2_ext(self):
try:
[].count(*(1, 2))
except TypeError:
pass
else:
raise RuntimeError
def test_oldargs1_0_kw(self):
self.assertRaises(TypeError, [].count, x=2)
def test_oldargs1_1_kw(self):
self.assertRaises(TypeError, [].count, {}, x=2)
def test_oldargs1_2_kw(self):
self.assertRaises(TypeError, [].count, x=2, y=2)
def test_main():
support.run_unittest(CFunctionCalls)
if __name__ == "__main__":
test_main()
|
pymo/pymo | refs/heads/master | symbian/PythonForS60_1.9.6/module-repo/standard-modules/heapq.py | 91 | # -*- coding: Latin-1 -*-
"""Heap queue algorithm (a.k.a. priority queue).
Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for
all k, counting elements from 0. For the sake of comparison,
non-existing elements are considered to be infinite. The interesting
property of a heap is that a[0] is always its smallest element.
Usage:
heap = [] # creates an empty heap
heappush(heap, item) # pushes a new item on the heap
item = heappop(heap) # pops the smallest item from the heap
item = heap[0] # smallest item on the heap without popping it
heapify(x) # transforms list into a heap, in-place, in linear time
item = heapreplace(heap, item) # pops and returns smallest item, and adds
# new item; the heap size is unchanged
Our API differs from textbook heap algorithms as follows:
- We use 0-based indexing. This makes the relationship between the
index for a node and the indexes for its children slightly less
obvious, but is more suitable since Python uses 0-based indexing.
- Our heappop() method returns the smallest item, not the largest.
These two make it possible to view the heap as a regular Python list
without surprises: heap[0] is the smallest item, and heap.sort()
maintains the heap invariant!
"""
# Original code by Kevin O'Connor, augmented by Tim Peters and Raymond Hettinger
__about__ = """Heap queues
[explanation by François Pinard]
Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for
all k, counting elements from 0. For the sake of comparison,
non-existing elements are considered to be infinite. The interesting
property of a heap is that a[0] is always its smallest element.
The strange invariant above is meant to be an efficient memory
representation for a tournament. The numbers below are `k', not a[k]:
0
1 2
3 4 5 6
7 8 9 10 11 12 13 14
15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
In the tree above, each cell `k' is topping `2*k+1' and `2*k+2'. In
an usual binary tournament we see in sports, each cell is the winner
over the two cells it tops, and we can trace the winner down the tree
to see all opponents s/he had. However, in many computer applications
of such tournaments, we do not need to trace the history of a winner.
To be more memory efficient, when a winner is promoted, we try to
replace it by something else at a lower level, and the rule becomes
that a cell and the two cells it tops contain three different items,
but the top cell "wins" over the two topped cells.
If this heap invariant is protected at all time, index 0 is clearly
the overall winner. The simplest algorithmic way to remove it and
find the "next" winner is to move some loser (let's say cell 30 in the
diagram above) into the 0 position, and then percolate this new 0 down
the tree, exchanging values, until the invariant is re-established.
This is clearly logarithmic on the total number of items in the tree.
By iterating over all items, you get an O(n ln n) sort.
A nice feature of this sort is that you can efficiently insert new
items while the sort is going on, provided that the inserted items are
not "better" than the last 0'th element you extracted. This is
especially useful in simulation contexts, where the tree holds all
incoming events, and the "win" condition means the smallest scheduled
time. When an event schedule other events for execution, they are
scheduled into the future, so they can easily go into the heap. So, a
heap is a good structure for implementing schedulers (this is what I
used for my MIDI sequencer :-).
Various structures for implementing schedulers have been extensively
studied, and heaps are good for this, as they are reasonably speedy,
the speed is almost constant, and the worst case is not much different
than the average case. However, there are other representations which
are more efficient overall, yet the worst cases might be terrible.
Heaps are also very useful in big disk sorts. You most probably all
know that a big sort implies producing "runs" (which are pre-sorted
sequences, which size is usually related to the amount of CPU memory),
followed by a merging passes for these runs, which merging is often
very cleverly organised[1]. It is very important that the initial
sort produces the longest runs possible. Tournaments are a good way
to that. If, using all the memory available to hold a tournament, you
replace and percolate items that happen to fit the current run, you'll
produce runs which are twice the size of the memory for random input,
and much better for input fuzzily ordered.
Moreover, if you output the 0'th item on disk and get an input which
may not fit in the current tournament (because the value "wins" over
the last output value), it cannot fit in the heap, so the size of the
heap decreases. The freed memory could be cleverly reused immediately
for progressively building a second heap, which grows at exactly the
same rate the first heap is melting. When the first heap completely
vanishes, you switch heaps and start a new run. Clever and quite
effective!
In a word, heaps are useful memory structures to know. I use them in
a few applications, and I think it is good to keep a `heap' module
around. :-)
--------------------
[1] The disk balancing algorithms which are current, nowadays, are
more annoying than clever, and this is a consequence of the seeking
capabilities of the disks. On devices which cannot seek, like big
tape drives, the story was quite different, and one had to be very
clever to ensure (far in advance) that each tape movement will be the
most effective possible (that is, will best participate at
"progressing" the merge). Some tapes were even able to read
backwards, and this was also used to avoid the rewinding time.
Believe me, real good tape sorts were quite spectacular to watch!
From all times, sorting has always been a Great Art! :-)
"""
__all__ = ['heappush', 'heappop', 'heapify', 'heapreplace', 'nlargest',
'nsmallest']
from itertools import islice, repeat, count, imap, izip, tee
from operator import itemgetter, neg
import bisect
def heappush(heap, item):
"""Push item onto heap, maintaining the heap invariant."""
heap.append(item)
_siftdown(heap, 0, len(heap)-1)
def heappop(heap):
"""Pop the smallest item off the heap, maintaining the heap invariant."""
lastelt = heap.pop() # raises appropriate IndexError if heap is empty
if heap:
returnitem = heap[0]
heap[0] = lastelt
_siftup(heap, 0)
else:
returnitem = lastelt
return returnitem
def heapreplace(heap, item):
"""Pop and return the current smallest value, and add the new item.
This is more efficient than heappop() followed by heappush(), and can be
more appropriate when using a fixed-size heap. Note that the value
returned may be larger than item! That constrains reasonable uses of
this routine unless written as part of a conditional replacement:
if item > heap[0]:
item = heapreplace(heap, item)
"""
returnitem = heap[0] # raises appropriate IndexError if heap is empty
heap[0] = item
_siftup(heap, 0)
return returnitem
def heapify(x):
"""Transform list into a heap, in-place, in O(len(heap)) time."""
n = len(x)
# Transform bottom-up. The largest index there's any point to looking at
# is the largest with a child index in-range, so must have 2*i + 1 < n,
# or i < (n-1)/2. If n is even = 2*j, this is (2*j-1)/2 = j-1/2 so
# j-1 is the largest, which is n//2 - 1. If n is odd = 2*j+1, this is
# (2*j+1-1)/2 = j so j-1 is the largest, and that's again n//2-1.
for i in reversed(xrange(n//2)):
_siftup(x, i)
def nlargest(n, iterable):
"""Find the n largest elements in a dataset.
Equivalent to: sorted(iterable, reverse=True)[:n]
"""
it = iter(iterable)
result = list(islice(it, n))
if not result:
return result
heapify(result)
_heapreplace = heapreplace
sol = result[0] # sol --> smallest of the nlargest
for elem in it:
if elem <= sol:
continue
_heapreplace(result, elem)
sol = result[0]
result.sort(reverse=True)
return result
def nsmallest(n, iterable):
"""Find the n smallest elements in a dataset.
Equivalent to: sorted(iterable)[:n]
"""
if hasattr(iterable, '__len__') and n * 10 <= len(iterable):
# For smaller values of n, the bisect method is faster than a minheap.
# It is also memory efficient, consuming only n elements of space.
it = iter(iterable)
result = sorted(islice(it, 0, n))
if not result:
return result
insort = bisect.insort
pop = result.pop
los = result[-1] # los --> Largest of the nsmallest
for elem in it:
if los <= elem:
continue
insort(result, elem)
pop()
los = result[-1]
return result
# An alternative approach manifests the whole iterable in memory but
# saves comparisons by heapifying all at once. Also, saves time
# over bisect.insort() which has O(n) data movement time for every
# insertion. Finding the n smallest of an m length iterable requires
# O(m) + O(n log m) comparisons.
h = list(iterable)
heapify(h)
return map(heappop, repeat(h, min(n, len(h))))
# 'heap' is a heap at all indices >= startpos, except possibly for pos. pos
# is the index of a leaf with a possibly out-of-order value. Restore the
# heap invariant.
def _siftdown(heap, startpos, pos):
newitem = heap[pos]
# Follow the path to the root, moving parents down until finding a place
# newitem fits.
while pos > startpos:
parentpos = (pos - 1) >> 1
parent = heap[parentpos]
if parent <= newitem:
break
heap[pos] = parent
pos = parentpos
heap[pos] = newitem
# The child indices of heap index pos are already heaps, and we want to make
# a heap at index pos too. We do this by bubbling the smaller child of
# pos up (and so on with that child's children, etc) until hitting a leaf,
# then using _siftdown to move the oddball originally at index pos into place.
#
# We *could* break out of the loop as soon as we find a pos where newitem <=
# both its children, but turns out that's not a good idea, and despite that
# many books write the algorithm that way. During a heap pop, the last array
# element is sifted in, and that tends to be large, so that comparing it
# against values starting from the root usually doesn't pay (= usually doesn't
# get us out of the loop early). See Knuth, Volume 3, where this is
# explained and quantified in an exercise.
#
# Cutting the # of comparisons is important, since these routines have no
# way to extract "the priority" from an array element, so that intelligence
# is likely to be hiding in custom __cmp__ methods, or in array elements
# storing (priority, record) tuples. Comparisons are thus potentially
# expensive.
#
# On random arrays of length 1000, making this change cut the number of
# comparisons made by heapify() a little, and those made by exhaustive
# heappop() a lot, in accord with theory. Here are typical results from 3
# runs (3 just to demonstrate how small the variance is):
#
# Compares needed by heapify Compares needed by 1000 heappops
# -------------------------- --------------------------------
# 1837 cut to 1663 14996 cut to 8680
# 1855 cut to 1659 14966 cut to 8678
# 1847 cut to 1660 15024 cut to 8703
#
# Building the heap by using heappush() 1000 times instead required
# 2198, 2148, and 2219 compares: heapify() is more efficient, when
# you can use it.
#
# The total compares needed by list.sort() on the same lists were 8627,
# 8627, and 8632 (this should be compared to the sum of heapify() and
# heappop() compares): list.sort() is (unsurprisingly!) more efficient
# for sorting.
def _siftup(heap, pos):
endpos = len(heap)
startpos = pos
newitem = heap[pos]
# Bubble up the smaller child until hitting a leaf.
childpos = 2*pos + 1 # leftmost child position
while childpos < endpos:
# Set childpos to index of smaller child.
rightpos = childpos + 1
if rightpos < endpos and heap[rightpos] <= heap[childpos]:
childpos = rightpos
# Move the smaller child up.
heap[pos] = heap[childpos]
pos = childpos
childpos = 2*pos + 1
# The leaf at pos is empty now. Put newitem there, and bubble it up
# to its final resting place (by sifting its parents down).
heap[pos] = newitem
_siftdown(heap, startpos, pos)
# If available, use C implementation
try:
from _heapq import heappush, heappop, heapify, heapreplace, nlargest, nsmallest
except ImportError:
pass
# Extend the implementations of nsmallest and nlargest to use a key= argument
_nsmallest = nsmallest
def nsmallest(n, iterable, key=None):
"""Find the n smallest elements in a dataset.
Equivalent to: sorted(iterable, key=key)[:n]
"""
in1, in2 = tee(iterable)
it = izip(imap(key, in1), count(), in2) # decorate
result = _nsmallest(n, it)
return map(itemgetter(2), result) # undecorate
_nlargest = nlargest
def nlargest(n, iterable, key=None):
"""Find the n largest elements in a dataset.
Equivalent to: sorted(iterable, key=key, reverse=True)[:n]
"""
in1, in2 = tee(iterable)
it = izip(imap(key, in1), imap(neg, count()), in2) # decorate
result = _nlargest(n, it)
return map(itemgetter(2), result) # undecorate
if __name__ == "__main__":
# Simple sanity test
heap = []
data = [1, 3, 5, 7, 9, 2, 4, 6, 8, 0]
for item in data:
heappush(heap, item)
sort = []
while heap:
sort.append(heappop(heap))
print sort
|
oinopion/django | refs/heads/master | tests/gis_tests/utils.py | 327 | from unittest import skip
from django.conf import settings
from django.db import DEFAULT_DB_ALIAS
def no_backend(test_func, backend):
"Use this decorator to disable test on specified backend."
if settings.DATABASES[DEFAULT_DB_ALIAS]['ENGINE'].rsplit('.')[-1] == backend:
@skip("This test is skipped on '%s' backend" % backend)
def inner():
pass
return inner
else:
return test_func
# Decorators to disable entire test functions for specific
# spatial backends.
def no_oracle(func):
return no_backend(func, 'oracle')
# Shortcut booleans to omit only portions of tests.
_default_db = settings.DATABASES[DEFAULT_DB_ALIAS]['ENGINE'].rsplit('.')[-1]
oracle = _default_db == 'oracle'
postgis = _default_db == 'postgis'
mysql = _default_db == 'mysql'
spatialite = _default_db == 'spatialite'
# MySQL spatial indices can't handle NULL geometries.
gisfield_may_be_null = not mysql
if oracle and 'gis' in settings.DATABASES[DEFAULT_DB_ALIAS]['ENGINE']:
from django.contrib.gis.db.backends.oracle.models import OracleSpatialRefSys as SpatialRefSys
elif postgis:
from django.contrib.gis.db.backends.postgis.models import PostGISSpatialRefSys as SpatialRefSys
elif spatialite:
from django.contrib.gis.db.backends.spatialite.models import SpatialiteSpatialRefSys as SpatialRefSys
else:
SpatialRefSys = None
|
Klaudit/phantomjs | refs/heads/master | src/qt/qtwebkit/Tools/Scripts/webkitpy/port/apple.py | 122 | # Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the Google name nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
from webkitpy.port.base import Port
from webkitpy.layout_tests.models.test_configuration import TestConfiguration
_log = logging.getLogger(__name__)
class ApplePort(Port):
"""Shared logic between all of Apple's ports."""
# This is used to represent the version of an operating system
# corresponding to the "mac" or "win" base LayoutTests/platform
# directory. I'm not sure this concept is very useful,
# but it gives us a way to refer to fallback paths *only* including
# the base directory.
# This is mostly done because TestConfiguration assumes that self.version()
# will never return None. (None would be another way to represent this concept.)
# Apple supposedly has explicit "future" results which are kept in an internal repository.
# It's possible that Apple would want to fix this code to work better with those results.
FUTURE_VERSION = 'future' # FIXME: This whole 'future' thing feels like a hack.
# overridden in subclasses
VERSION_FALLBACK_ORDER = []
ARCHITECTURES = []
@classmethod
def determine_full_port_name(cls, host, options, port_name):
options = options or {}
if port_name in (cls.port_name, cls.port_name + '-wk2'):
# If the port_name matches the (badly named) cls.port_name, that
# means that they passed 'mac' or 'win' and didn't specify a version.
# That convention means that we're supposed to use the version currently
# being run, so this won't work if you're not on mac or win (respectively).
# If you're not on the o/s in question, you must specify a full version or -future (cf. above).
assert host.platform.os_name in port_name, "%s is not in %s!" % (host.platform.os_name, port_name)
if port_name == cls.port_name and not getattr(options, 'webkit_test_runner', False):
port_name = cls.port_name + '-' + host.platform.os_version
else:
port_name = cls.port_name + '-' + host.platform.os_version + '-wk2'
elif getattr(options, 'webkit_test_runner', False) and '-wk2' not in port_name:
port_name += '-wk2'
return port_name
def _strip_port_name_prefix(self, port_name):
# Callers treat this return value as the "version", which only works
# because Apple ports use a simple name-version port_name scheme.
# FIXME: This parsing wouldn't be needed if port_name handling was moved to factory.py
# instead of the individual port constructors.
return port_name[len(self.port_name + '-'):]
def __init__(self, host, port_name, **kwargs):
super(ApplePort, self).__init__(host, port_name, **kwargs)
allowed_port_names = self.VERSION_FALLBACK_ORDER + [self.operating_system() + "-future"]
port_name = port_name.replace('-wk2', '')
self._version = self._strip_port_name_prefix(port_name)
assert port_name in allowed_port_names, "%s is not in %s" % (port_name, allowed_port_names)
def _skipped_file_search_paths(self):
# We don't have a dedicated Skipped file for the most recent version of the port;
# we just use the one in platform/{mac,win}
most_recent_name = self.VERSION_FALLBACK_ORDER[-1]
return set(filter(lambda name: name != most_recent_name, super(ApplePort, self)._skipped_file_search_paths()))
# FIXME: A more sophisticated version of this function should move to WebKitPort and replace all calls to name().
# This is also a misleading name, since 'mac-future' gets remapped to 'mac'.
def _port_name_with_version(self):
return self.name().replace('-future', '').replace('-wk2', '')
def _generate_all_test_configurations(self):
configurations = []
allowed_port_names = self.VERSION_FALLBACK_ORDER + [self.operating_system() + "-future"]
for port_name in allowed_port_names:
for build_type in self.ALL_BUILD_TYPES:
for architecture in self.ARCHITECTURES:
configurations.append(TestConfiguration(version=self._strip_port_name_prefix(port_name), architecture=architecture, build_type=build_type))
return configurations
|
benchisell/photostream-bc | refs/heads/master | flask/lib/python2.7/site-packages/migrate/changeset/__init__.py | 60 | """
This module extends SQLAlchemy and provides additional DDL [#]_
support.
.. [#] SQL Data Definition Language
"""
import re
import warnings
import sqlalchemy
from sqlalchemy import __version__ as _sa_version
warnings.simplefilter('always', DeprecationWarning)
_sa_version = tuple(int(re.match("\d+", x).group(0)) for x in _sa_version.split("."))
SQLA_07 = _sa_version >= (0, 7)
del re
del _sa_version
from migrate.changeset.schema import *
from migrate.changeset.constraint import *
sqlalchemy.schema.Table.__bases__ += (ChangesetTable, )
sqlalchemy.schema.Column.__bases__ += (ChangesetColumn, )
sqlalchemy.schema.Index.__bases__ += (ChangesetIndex, )
sqlalchemy.schema.DefaultClause.__bases__ += (ChangesetDefaultClause, )
|
nxnfufunezn/servo | refs/heads/master | tests/wpt/web-platform-tests/webdriver/element_state/properties.py | 142 | import os
import sys
import unittest
sys.path.insert(1, os.path.abspath(os.path.join(__file__, "../..")))
import base_test
class GetElementPropertiesTest(base_test.WebDriverBaseTest):
def test_get_element_text(self):
self.driver.get(self.webserver.where_is("element_state/res/elements_text.html"))
e = self.driver.find_element_by_name("name")
self.assertEquals("name", e.text)
if __name__ == "__main__":
unittest.main()
|
jhamman/xarray | refs/heads/master | xarray/tests/test_dtypes.py | 2 | import numpy as np
import pytest
from xarray.core import dtypes
@pytest.mark.parametrize(
"args, expected",
[
([np.bool], np.bool),
([np.bool, np.string_], np.object_),
([np.float32, np.float64], np.float64),
([np.float32, np.string_], np.object_),
([np.unicode_, np.int64], np.object_),
([np.unicode_, np.unicode_], np.unicode_),
([np.bytes_, np.unicode_], np.object_),
],
)
def test_result_type(args, expected):
actual = dtypes.result_type(*args)
assert actual == expected
def test_result_type_scalar():
actual = dtypes.result_type(np.arange(3, dtype=np.float32), np.nan)
assert actual == np.float32
def test_result_type_dask_array():
# verify it works without evaluating dask arrays
da = pytest.importorskip("dask.array")
dask = pytest.importorskip("dask")
def error():
raise RuntimeError
array = da.from_delayed(dask.delayed(error)(), (), np.float64)
with pytest.raises(RuntimeError):
array.compute()
actual = dtypes.result_type(array)
assert actual == np.float64
# note that this differs from the behavior for scalar numpy arrays, which
# would get promoted to float32
actual = dtypes.result_type(array, np.array([0.5, 1.0], dtype=np.float32))
assert actual == np.float64
@pytest.mark.parametrize("obj", [1.0, np.inf, "ab", 1.0 + 1.0j, True])
def test_inf(obj):
assert dtypes.INF > obj
assert dtypes.NINF < obj
@pytest.mark.parametrize(
"kind, expected",
[
("a", (np.dtype("O"), "nan")), # dtype('S')
("b", (np.float32, "nan")), # dtype('int8')
("B", (np.float32, "nan")), # dtype('uint8')
("c", (np.dtype("O"), "nan")), # dtype('S1')
("D", (np.complex128, "(nan+nanj)")), # dtype('complex128')
("d", (np.float64, "nan")), # dtype('float64')
("e", (np.float16, "nan")), # dtype('float16')
("F", (np.complex64, "(nan+nanj)")), # dtype('complex64')
("f", (np.float32, "nan")), # dtype('float32')
("h", (np.float32, "nan")), # dtype('int16')
("H", (np.float32, "nan")), # dtype('uint16')
("i", (np.float64, "nan")), # dtype('int32')
("I", (np.float64, "nan")), # dtype('uint32')
("l", (np.float64, "nan")), # dtype('int64')
("L", (np.float64, "nan")), # dtype('uint64')
("m", (np.timedelta64, "NaT")), # dtype('<m8')
("M", (np.datetime64, "NaT")), # dtype('<M8')
("O", (np.dtype("O"), "nan")), # dtype('O')
("p", (np.float64, "nan")), # dtype('int64')
("P", (np.float64, "nan")), # dtype('uint64')
("q", (np.float64, "nan")), # dtype('int64')
("Q", (np.float64, "nan")), # dtype('uint64')
("S", (np.dtype("O"), "nan")), # dtype('S')
("U", (np.dtype("O"), "nan")), # dtype('<U')
("V", (np.dtype("O"), "nan")), # dtype('V')
],
)
def test_maybe_promote(kind, expected):
# 'g': np.float128 is not tested : not available on all platforms
# 'G': np.complex256 is not tested : not available on all platforms
actual = dtypes.maybe_promote(np.dtype(kind))
assert actual[0] == expected[0]
assert str(actual[1]) == expected[1]
|
meteoswiss-mdr/precipattractor | refs/heads/master | pymodules/io_tools_attractor.py | 1 | #!/usr/bin/env python
'''
Module to perform various input/output operations on gif radar files and netCDF/CSV files containing the statistics of the attractor.
The module also provide functionality to generate filenames, etc
Documentation convention from https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt
07.07.2016
Loris Foresti
'''
from __future__ import division
from __future__ import print_function
import numpy as np
import os
import subprocess
import sys
sys.path.append('/store/mch/msrad/python/library/radar/io/') # path to metranet library
import metranet
import fnmatch
import pandas as pd
import csv
from PIL import Image
from netCDF4 import Dataset
import matplotlib.colors as colors
import datetime
from operator import itemgetter
import time_tools_attractor as ti
import data_tools_attractor as dt
import stat_tools_attractor as st
# Radar structure
class Radar_object(object):
# Radar stats
war = -1
fileName = ''
def get_filename_radar(timeStr, inBaseDir='/scratch/lforesti/data/', product='AQC', timeAccumMin=5):
'''
Get name of radar file given a time string, product and temporal resolution
'''
# time parameters
timeAccumMinStr = '%05i' % timeAccumMin
# timestamp
timeLocal = ti.timestring2datetime(timeStr)
year, yearStr, julianDay, julianDayStr = ti.parse_datetime(timeLocal)
hour = timeLocal.hour
minute = timeLocal.minute
# Create filename for input
hourminStr = ('%02i' % hour) + ('%02i' % minute)
radarOperWildCard = '?'
subDir = str(year) + '/' + yearStr + julianDayStr + '/'
inDir = inBaseDir + subDir
if product == 'RZC':
fileNameWildCard = inDir + product + yearStr + julianDayStr + hourminStr + radarOperWildCard + radarOperWildCard + '.801'
else:
fileNameWildCard = inDir + product + yearStr + julianDayStr + hourminStr + radarOperWildCard + '_' + timeAccumMinStr + '*.gif'
# Get filename matching regular expression
fileName = get_filename_matching_regexpr(fileNameWildCard)
return(fileName, yearStr, julianDayStr, hourminStr)
def get_filename_matching_regexpr(fileNameWildCard):
inDir = os.path.dirname(fileNameWildCard)
baseNameWildCard = os.path.basename(fileNameWildCard)
# Check if directory exists
if os.path.isdir(inDir) == False:
print('Directory: ' + inDir + ' does not exists.')
fileName = ''
return(fileName)
# If it does, check for files matching regular expression
listFiles = os.listdir(inDir)
if len(listFiles) > 0:
for file in listFiles:
if fnmatch.fnmatch(file, baseNameWildCard) == True:
fileNameRel = file
# Get absolute filename
if inDir[len(inDir)-1] == '/':
fileName = inDir + fileNameRel
else:
fileName = inDir + '/' + fileNameRel
break
else:
fileName = ''
else:
fileName = ''
return(fileName)
def read_gif_image_rainrate(timeStr, inBaseDir='/scratch/lforesti/data/', product='AQC', timeAccumMin=5):
'''
Basic script to read a radar GIF file and transform to rainrate
'''
fileNameRadar,_,_,_ = get_filename_radar(timeStr, inBaseDir=inBaseDir, product=product, timeAccumMin=timeAccumMin)
if os.path.isfile(fileNameRadar) == False:
rainrate = []
else:
rain8bit, nrRows, nrCols = open_gif_image(fileNameRadar)
# Generate lookup table
noData = -999
lut = dt.get_rainfall_lookuptable(noData)
# Replace 8bit values with rain rates
rainrate = lut[rain8bit]
if (product == 'AQC'): # AQC is given in millimiters!!!
rainrate[rainrate != noData] = rainrate[rainrate != noData]*(60/timeAccumMin)
return(rainrate, fileNameRadar)
def read_bin_image(timeStr, product='RZC', minR = 0.08, fftDomainSize = 512, resKm = 1,\
inBaseDir = '/scratch/lforesti/data/', noData = -999.0, cmaptype = 'MeteoSwiss', domain = 'CCS4'):
# Limits of spatial domain
if domain == 'CCS4':
Xmin = 255000
Xmax = 965000
Ymin = -160000
Ymax = 480000
else:
print('Domain not found.')
sys.exit(1)
allXcoords = np.arange(Xmin,Xmax+resKm*1000,resKm*1000)
allYcoords = np.arange(Ymin,Ymax+resKm*1000,resKm*1000)
# colormap
color_list, clevs, clevsStr = dt.get_colorlist(cmaptype)
cmap = colors.ListedColormap(color_list)
norm = colors.BoundaryNorm(clevs, cmap.N)
cmap.set_over('black',1)
cmapMask = colors.ListedColormap(['black'])
# Get filename
fileName, yearStr, julianDayStr, hourminStr = get_filename_radar(timeStr, inBaseDir, product)
timeLocal = ti.timestring2datetime(timeStr)
# Check if file exists
isFile = os.path.isfile(fileName)
if (isFile == False):
print('File: ', fileName, ' not found.')
radar_object = Radar_object()
else:
try:
ret = metranet.read_file(fileName, physic_value=True, verbose=False)
rainrate = ret.data # mm h-1
rainrate[np.isnan(rainrate)] = noData
# Get coordinates of reduced domain
if fftDomainSize>0:
extent = dt.get_reduced_extent(rainrate.shape[1], rainrate.shape[0], fftDomainSize, fftDomainSize)
Xmin = allXcoords[extent[0]]
Ymin = allYcoords[extent[1]]
Xmax = allXcoords[extent[2]]
Ymax = allYcoords[extent[3]]
subXcoords = np.arange(Xmin,Xmax,resKm*1000)
subYcoords = np.arange(Ymin,Ymax,resKm*1000)
# Select 512x512 domain in the middle
if fftDomainSize>0:
rainrate = dt.extract_middle_domain(rainrate, fftDomainSize, fftDomainSize)
# Create mask radar composite
mask = np.ones(rainrate.shape)
mask[rainrate != noData] = np.nan
mask[rainrate == noData] = 1
# Set lowest rain thresholds
if (minR > 0.0) and (minR < 500.0):
rainThreshold = minR
else: # default minimum rainfall rate
rainThreshold = 0.08
# Compute WAR
war = st.compute_war(rainrate,rainThreshold, noData)
# fills no-rain with nans (for conditional statistics)
rainrateNans = np.copy(rainrate)
condition = rainrateNans < rainThreshold
rainrateNans[condition] = np.nan
# fills no-rain with zeros and missing data with nans (for unconditional statistics)
condition = rainrate < 0
rainrate[condition] = np.nan
condition = (rainrate < rainThreshold) & (rainrate > 0.0)
rainrate[condition] = 0.0
# Compute corresponding reflectivity
A = 316.0
b = 1.5
# Take reflectivity value corresponding to minimum rainfall threshold as zero(0.08 mm/hr)
dbzThreshold,_,_ = dt.rainrate2reflectivity(rainThreshold, A, b)
# Convert rainrate to reflectivity, no-rain are set to zero (for unconditional statistics)
dBZ, minDBZ, minRainRate = dt.rainrate2reflectivity(rainrate, A, b, 0.0)
# fills nans with dbzThreshold for Fourier analysis
condition1 = np.isnan(dBZ)
condition2 = dBZ < dbzThreshold
dBZFourier = np.copy(dBZ)
dBZFourier[(condition1 == True) | (condition2 == True)] = dbzThreshold
# fills no-rain and missing data with nans (for conditional statistics)
condition = rainrateNans < rainThreshold
dBZNans = np.copy(dBZ)
dBZNans[condition] = np.nan
## Creates radar object
radar_object = Radar_object()
# fields
radar_object.dBZ = dBZ
radar_object.dBZFourier = dBZFourier
radar_object.dBZNans = dBZNans
radar_object.rain8bit = []
radar_object.rainrate = rainrate
radar_object.rainrateNans = rainrateNans
radar_object.mask = mask
# statistics
radar_object.war = war
# time stamps
radar_object.datetime = timeLocal
radar_object.datetimeStr = timeStr
radar_object.hourminStr = hourminStr
radar_object.yearStr = yearStr
radar_object.julianDayStr = julianDayStr
# metadata
radar_object.fileName = fileName
radar_object.dbzThreshold = dbzThreshold
radar_object.rainThreshold = rainThreshold
radar_object.alb = []
radar_object.dol = []
radar_object.lem = []
radar_object.ppm = []
radar_object.wei = []
radar_object.dataQuality = []
# Location
radar_object.extent = (Xmin, Xmax, Ymin, Ymax)
radar_object.subXcoords = subXcoords
radar_object.subYcoords = subYcoords
if dBZ.shape[0] == dBZ.shape[1]:
radar_object.fftDomainSize = dBZ.shape[0]
else:
radar_object.fftDomainSize = dBZ.shape
# colormaps
radar_object.cmap = cmap
radar_object.norm = norm
radar_object.clevs = clevs
radar_object.clevsStr = clevsStr
radar_object.cmapMask = cmapMask
except IOError:
print('File ', fileName, ' not readable')
radar_object = Radar_object()
return radar_object
def read_gif_image(timeStr, product='AQC', minR = 0.08, fftDomainSize = 512, resKm = 1, timeAccumMin = 5,\
inBaseDir = '/scratch/lforesti/data/', noData = -999.0, cmaptype = 'MeteoSwiss', domain = 'CCS4'):
# Limits of spatial domain
if domain == 'CCS4':
Xmin = 255000
Xmax = 965000
Ymin = -160000
Ymax = 480000
else:
print('Domain not found.')
sys.exit(1)
allXcoords = np.arange(Xmin,Xmax+resKm*1000,resKm*1000)
allYcoords = np.arange(Ymin,Ymax+resKm*1000,resKm*1000)
# colormap
color_list, clevs, clevsStr = dt.get_colorlist(cmaptype)
cmap = colors.ListedColormap(color_list)
norm = colors.BoundaryNorm(clevs, cmap.N)
cmap.set_over('black',1)
cmapMask = colors.ListedColormap(['black'])
# Get filename
fileName, yearStr, julianDayStr, hourminStr = get_filename_radar(timeStr, inBaseDir, product, timeAccumMin)
timeLocal = ti.timestring2datetime(timeStr)
# Get data quality from fileName
# dataQuality = get_quality_fromfilename(fileName)
# Check if file exists
isFile = os.path.isfile(fileName)
if (isFile == False):
print('File: ', fileName, ' not found.')
print(timeStr,inBaseDir,product,timeAccumMin)
radar_object = Radar_object()
else:
# Reading GIF file
#print('Reading: ', fileName)
try:
# Open GIF image
rain8bit, nrRows, nrCols = open_gif_image(fileName)
# Get GIF image metadata
alb, dol, lem, ppm, wei = get_gif_radar_operation(fileName)
# If metadata are not written in gif file derive them from the quality number in the filename
# if (alb == -1) & (dol == -1) & (lem == -1) & (ppm == -1) & (wei == -1):
# alb, dol, lem = get_radaroperation_from_quality(dataQuality)
# Generate lookup table
lut = dt.get_rainfall_lookuptable(noData)
# Replace 8bit values with rain rates
rainrate = lut[rain8bit]
if (product == 'AQC'): # AQC is given in millimiters!!!
rainrate[rainrate != noData] = rainrate[rainrate != noData]*(60/timeAccumMin)
# Get coordinates of reduced domain
if fftDomainSize > 0:
extent = dt.get_reduced_extent(rainrate.shape[1], rainrate.shape[0], fftDomainSize, fftDomainSize)
Xmin = allXcoords[extent[0]]
Ymin = allYcoords[extent[1]]
Xmax = allXcoords[extent[2]]
Ymax = allYcoords[extent[3]]
subXcoords = np.arange(Xmin,Xmax,resKm*1000)
subYcoords = np.arange(Ymin,Ymax,resKm*1000)
# Select 512x512 domain in the middle
if fftDomainSize>0:
rainrate = dt.extract_middle_domain(rainrate, fftDomainSize, fftDomainSize)
rain8bit = dt.extract_middle_domain(rain8bit, fftDomainSize, fftDomainSize)
# Create mask radar composite
mask = np.ones(rainrate.shape)
isNaN = (rainrate == noData)
mask[~isNaN] = np.nan
mask[isNaN] = 1
# Set lowest rain thresholds
if (minR > 0.0) and (minR < 500.0):
rainThreshold = minR
else: # default minimum rainfall rate
rainThreshold = 0.08
# Compute WAR
war = st.compute_war(rainrate,rainThreshold, noData)
# fills no-rain with nans (for conditional statistics)
rainrateNans = np.copy(rainrate)
condition = rainrateNans < rainThreshold
rainrateNans[condition] = np.nan
import warnings
warnings.filterwarnings("ignore", category=RuntimeWarning)
# fills no-rain with zeros and missing data with nans (for unconditional statistics)
condition = rainrate < 0
rainrate[condition] = np.nan
condition = (rainrate < rainThreshold) & (rainrate > 0.0)
rainrate[condition] = 0.0
# Compute corresponding reflectivity
A = 316.0
b = 1.5
# Take reflectivity value corresponding to minimum rainfall threshold as zero(0.08 mm/hr)
dbzThreshold,_,_ = dt.rainrate2reflectivity(rainThreshold, A, b)
# Convert rainrate to reflectivity, no-rain are set to zero (for unconditional statistics)
dBZ, minDBZ, minRainRate = dt.rainrate2reflectivity(rainrate, A, b, 0.0)
# fills nans with dbzThreshold for Fourier analysis
condition1 = np.isnan(dBZ)
condition2 = dBZ < dbzThreshold
dBZFourier = np.copy(dBZ)
dBZFourier[(condition1 == True) | (condition2 == True)] = dbzThreshold
# fills no-rain and missing data with nans (for conditional statistics)
condition = rainrateNans < rainThreshold
dBZNans = np.copy(dBZ)
dBZNans[condition] = np.nan
## Creates radar object
radar_object = Radar_object()
# fields
radar_object.dBZ = dBZ
radar_object.dBZFourier = dBZFourier
radar_object.dBZNans = dBZNans
radar_object.rain8bit = rain8bit
radar_object.rainrate = rainrate
radar_object.rainrateNans = rainrateNans
radar_object.mask = mask
# statistics
radar_object.war = war
# time stamps
radar_object.datetime = timeLocal
radar_object.datetimeStr = timeStr
radar_object.hourminStr = hourminStr
radar_object.yearStr = yearStr
radar_object.julianDayStr = julianDayStr
# metadata
radar_object.fileName = fileName
radar_object.dbzThreshold = dbzThreshold
radar_object.rainThreshold = rainThreshold
radar_object.alb = alb
radar_object.dol = dol
radar_object.lem = lem
radar_object.ppm = ppm
radar_object.wei = wei
radar_object.dataQuality = np.nan
# Location
radar_object.extent = (Xmin, Xmax, Ymin, Ymax)
radar_object.subXcoords = subXcoords
radar_object.subYcoords = subYcoords
if dBZ.shape[0] == dBZ.shape[1]:
radar_object.fftDomainSize = dBZ.shape[0]
else:
radar_object.fftDomainSize = dBZ.shape
# colormaps
radar_object.cmap = cmap
radar_object.norm = norm
radar_object.clevs = clevs
radar_object.clevsStr = clevsStr
radar_object.cmapMask = cmapMask
except IOError:
print('File ', fileName, ' not readable')
radar_object = Radar_object()
return(radar_object)
def get_filename_wavelets(inBaseDir, analysisType, timeDate, product='AQC', timeAccumMin=5, scaleKM=None, minR=0.08, format='netcdf'):
if format == 'netcdf':
extension = '.nc'
elif format == 'csv':
extension = '.csv'
elif format == 'png':
extension = '.png'
elif format == 'gif':
extension = '.gif'
else:
print('Wrong file format in get_filename_stats')
sys.exit(1)
if scaleKM is None:
print('You have to input the spatial scale of the wavelet decomposition in KM')
sys.exit(1)
# Create time timestamp strings
timeAccumMinStr = '%05i' % (timeAccumMin)
year, yearStr, julianDay, julianDayStr = ti.parse_datetime(timeDate)
hourminStr = ti.get_HHmm_str(timeDate.hour, timeDate.minute)
subDir = ti.get_subdir(timeDate.year, julianDay)
inDir = inBaseDir + subDir
### Define filename of statistics
fullName = inDir + product + '_' + analysisType + '_' + str(scaleKM) + 'km_' + yearStr + julianDayStr + hourminStr + \
'_Rgt' + str(minR) + '_' + timeAccumMinStr + extension
# Get directory name and base filename
dirName = inDir
fileName = os.path.basename(fullName)
return(fullName, dirName, fileName)
def get_filename_stats(inBaseDir, analysisType, timeDate, product='AQC', timeAccumMin=5, quality=0, minR=0.08, wols=0, variableBreak = 0, format='netcdf'):
if format == 'netcdf':
extension = '.nc'
elif format == 'csv':
extension = '.csv'
elif format == 'png':
extension = '.png'
elif format == 'gif':
extension = '.gif'
else:
print('Wrong file format in get_filename_stats')
sys.exit(1)
# Create time timestamp strings
timeAccumMinStr = '%05i' % (timeAccumMin)
year, yearStr, julianDay, julianDayStr = ti.parse_datetime(timeDate)
hourminStr = ti.get_HHmm_str(timeDate.hour, timeDate.minute)
subDir = ti.get_subdir(timeDate.year, julianDay)
inDir = inBaseDir + subDir
### Define filename of statistics
fullName = inDir + product + '_' + analysisType + '_' + yearStr + julianDayStr + hourminStr + str(quality) + \
'_Rgt' + str(minR) + '_WOLS' + str(wols) + '_varBreak' + str(variableBreak) + '_' + timeAccumMinStr + extension
# Get directory name and base filename
dirName = inDir
fileName = os.path.basename(fullName)
return(fullName, dirName, fileName)
def get_filename_velocity(inBaseDir, analysisType, timeDate, product='AQC', timeAccumMin=5, quality=0, format='netcdf'):
if format == 'netcdf':
extension = '.nc'
elif format == 'csv':
extension = '.csv'
elif format == 'png':
extension = '.png'
elif format == 'gif':
extension = '.gif'
else:
print('Wrong file format in get_filename_velocity')
sys.exit(1)
# Create time timestamp strings
timeAccumMinStr = '%05i' % (timeAccumMin)
year, yearStr, julianDay, julianDayStr = ti.parse_datetime(timeDate)
hourminStr = ti.get_HHmm_str(timeDate.hour, timeDate.minute)
subDir = ti.get_subdir(timeDate.year, julianDay)
inDir = inBaseDir + subDir
### Define filename of statistics
fullName = inDir + product + '_' + analysisType + '_' + yearStr + julianDayStr + hourminStr + str(quality) + \
'_' + timeAccumMinStr + extension
# Get directory name and base filename
dirName = inDir
fileName = os.path.basename(fullName)
return(fullName, dirName, fileName)
def get_filename_HZT(dataDirHZT, dateTime):
year, yearStr, julianDay, julianDayStr = ti.parse_datetime(dateTime)
dirName = dataDirHZT + str(year) + '/' + yearStr + julianDayStr + '/'
fileName = 'HZT' + yearStr + julianDayStr + '%02i' % (dateTime.hour) + '%02i' % (dateTime.minute) + '0L.800'
fileNameHZT = dirName + fileName
return(fileNameHZT, dirName)
def read_hzt_match_maple_archive(data, startTimeStr = '', endTimeStr = '', dict_colnames=[], task='add', dataDirHZT_base='/scratch/lforesti/data/', boxSize=64):
'''
Script to read in the daily .npy files containing the boxes with freezing level height data (t,x,y,HZT) at origin and destination.
The script only checks for the corresponding time stamp in the large MAPLE archive and adds the HZT data to the archive.
task
add: Add new columns with HZT data
replace: Completely replace columns with HZT data
complete: Only fill columns with HZT data where there are NaNs
'''
nrColsData = data.shape[1]
nrVars = len(dict_colnames)
if (nrColsData != nrVars) & (task != 'add'):
print('The nr of columns of the data passed does not correspond to the nr of variables in the dictionary.')
print(nrColsData, 'vs', nrVars)
sys.exit(1)
boxSizeStr = '%03i' % boxSize
timeStampJulian = data[:,0].astype(int)
if (len(startTimeStr) == 0) & (len(endTimeStr) == 0):
startJulianTimeStr = '%09i' % np.min(timeStampJulian)
endJulianTimeStr = '%09i' % np.max(timeStampJulian)
startDateTimeDt = ti.juliantimestring2datetime(startJulianTimeStr)
endDateTimeDt = ti.juliantimestring2datetime(endJulianTimeStr)
else:
startDateTimeDt = ti.timestring2datetime(startTimeStr)
endDateTimeDt = ti.timestring2datetime(endTimeStr)
# Create new columns for HZT data or get them if already available
if (len(dict_colnames) == 0) | (task == 'add'):
if ('HZT_d' not in dict_colnames) & ('HZT_o' not in dict_colnames):
HZT_MAPLE_array = np.ones((len(data),2))*[np.nan]
else:
print('HZT_d and HZT_o already in dict_colnames.')
print('Task changed to complete columns.')
task = 'complete'
HZT_MAPLE_array = np.column_stack((data[:, dict_colnames['HZT_d']], data[:, dict_colnames['HZT_o']]))
elif (task == 'replace') | (task == 'complete'):
if ('HZT_d' in dict_colnames) & ('HZT_o' in dict_colnames):
HZT_MAPLE_array = np.column_stack((data[:, dict_colnames['HZT_d']], data[:, dict_colnames['HZT_o']]))
else:
print('HZT_d and HZT_o not in dict_colnames.')
print('Task changed to add columns.')
task = 'add'
HZT_MAPLE_array = np.ones((len(data),2))*[np.nan]
else:
print('Wrong task in read_hzt_match_maple_archive')
sys.exit(1)
timeDate = startDateTimeDt
while timeDate <= endDateTimeDt:
# Print elapsed time
if (timeDate.day == 1):
ti.tic()
timeDate_next = timeDate + datetime.timedelta(days=1)
if (timeDate_next.day == 1):
ti.toc('to match one month of boxes.')
# Get filename
year, yearStr, julianDay, julianDayStr = ti.parse_datetime(timeDate)
subDir = str(year) + '/' + yearStr + julianDayStr + '/'
fileNameHZT_dest = dataDirHZT_base + subDir + 'MAPLE-' + boxSizeStr + '-HZT_' + str(year) + '%02i' % timeDate.month + '%02i' % timeDate.day + '_destination.npy'
fileNameHZT_orig = dataDirHZT_base + subDir + 'MAPLE-' + boxSizeStr + '-HZT_' + str(year) + '%02i' % timeDate.month + '%02i' % timeDate.day + '_origin.npy'
# ti.tic()
#### Read-in destination box file
if os.path.isfile(fileNameHZT_dest):
dataDest = np.load(fileNameHZT_dest)
print(fileNameHZT_dest, 'read.')
# Fill in large MAPLE array at the right rows
dayTimes = np.unique(dataDest[:,0])
for t in dayTimes:
boolTime_dest_all = (timeStampJulian == t)
boolTime_dest_day = (dataDest[:,0] == t)
nrMatchingBoxes_all = np.sum(boolTime_dest_all)
nrMatchingBoxes_day = np.sum(boolTime_dest_day)
# print(t, np.sum(boolTime_dest_all), np.sum(boolTime_dest_day))
if nrMatchingBoxes_all != nrMatchingBoxes_day:
print('You should use the same dataset to extract and match the HZT values.')
print('Expecing: ', nrMatchingBoxes_all, 'values. Received:', nrMatchingBoxes_day, 'values.')
sys.exit()
if (task == 'complete'):
nrNaNs = np.sum(np.isnan(HZT_MAPLE_array[boolTime_dest_all,0]))
if (nrNaNs > 0):
HZT_MAPLE_array[boolTime_dest_all,0] = dataDest[boolTime_dest_day,-1]
if (nrNaNs == 0) & (t == dayTimes[-1]):
print('Destination already in archive.')
else:
HZT_MAPLE_array[boolTime_dest_all,0] = dataDest[boolTime_dest_day,-1]
#### Read-in origin box file
if os.path.isfile(fileNameHZT_orig):
dataOrig = np.load(fileNameHZT_orig)
print(fileNameHZT_orig, 'read.')
# Fill in large MAPLE array at the right rows
dayTimes = np.unique(dataOrig[:,0])
for t in dayTimes:
boolTime_orig_all = (timeStampJulian == t)
boolTime_orig_day = (dataOrig[:,0] == t)
nrMatchingBoxes_all = np.sum(boolTime_orig_all)
nrMatchingBoxes_day = np.sum(boolTime_orig_day)
#print(t, np.sum(boolTime_orig_all), np.sum(boolTime_orig_day))
if nrMatchingBoxes_all != nrMatchingBoxes_day:
print('You should use the same dataset to extract and match the HZT values.')
print('Expecing: ', nrMatchingBoxes_all, 'values. Received:', nrMatchingBoxes_day, 'values.')
sys.exit()
if (task == 'complete'):
nrNaNs = np.sum(np.isnan(HZT_MAPLE_array[boolTime_orig_all,1]))
if (nrNaNs > 0):
HZT_MAPLE_array[boolTime_orig_all,1] = dataOrig[boolTime_orig_day,-1]
if (nrNaNs == 0) & (t == dayTimes[-1]):
print('Origin already in archive.')
else:
HZT_MAPLE_array[boolTime_orig_all,1] = dataOrig[boolTime_orig_day,-1]
# ti.toc('to process one day.')
timeDate = timeDate + datetime.timedelta(days=1)
##########
if (len(dict_colnames) == 0) | (task == 'add'):
print('Adding HZT columns to data...')
data = np.column_stack((data, HZT_MAPLE_array))
max_col_nr = max(dict_colnames.values())
dict_colnames.update({'HZT_d' : (max_col_nr+1)})
dict_colnames.update({'HZT_o' : (max_col_nr+2)})
if (task == 'replace') | (task == 'complete'):
# Replace column data
print('Replacing HZT columns to data...')
data[:, dict_colnames['HZT_d']] = HZT_MAPLE_array[:,0]
data[:, dict_colnames['HZT_o']] = HZT_MAPLE_array[:,1]
return(data, dict_colnames)
def get_filename(inBaseDir, analysisType, timeDate, varNames, varValues, product='AQC', timeAccumMin=5, quality=0, format='netcdf', sep='_'):
if format == 'netcdf':
extension = '.nc'
elif format == 'csv':
extension = '.csv'
elif format == 'png':
extension = '.png'
elif format == 'gif':
extension = '.gif'
else:
print('Wrong file format in get_filename_stats')
sys.exit(1)
if len(varNames) != len(varValues):
print('Number of elements in varNames and varValues must be the same.')
sys.exit(1)
# Create time timestamp strings
timeAccumMinStr = '%05i' % (timeAccumMin)
year, yearStr, julianDay, julianDayStr = ti.parse_datetime(timeDate)
hourminStr = ti.get_HHmm_str(timeDate.hour, timeDate.minute)
subDir = ti.get_subdir(timeDate.year, julianDay)
inDir = inBaseDir + subDir
### Define filename
nrVars = len(varNames)
fullName = inDir + product + sep + analysisType + sep + yearStr + julianDayStr + hourminStr + str(quality)
for i in range(nrVars):
fullName = fullName + sep + str(varNames[i]) + str(varValues[i])
fullName = fullName + sep + timeAccumMinStr + extension
# Get directory name and base filename
dirName = inDir
fileName = os.path.basename(fullName)
return(fullName, dirName, fileName)
def read_csv(fileName, header=True):
'''
Function to read a CSV file containing an array of data into a list of lists.
If the file contains a header for the variable names use header=True.
If it contains only the data use header=False
'''
with open(fileName, 'r') as csvfile:
spamreader = csv.reader(csvfile, delimiter=',', quotechar='|')
arrayData = list(spamreader)
# Prepare output
if header == True:
header = arrayData[0]
arrayData = np.array(arrayData[1:])
return(arrayData, header)
else:
return(arrayData)
def write_csv(fileName, dataArray, header=[]):
'''
Function to write a CSV file containing an array of data.
You have the option to pass a header with variable names.
'''
f = open(fileName, 'w')
csvOut = csv.writer(f)
# fmtArray = ["%i","%.3f"]
# for c in range(0, len(fmtArray)):
# dataArray[:,c] = map(lambda n: fmtArray[c] % n, dataArray[:,c])
if len(header) != 0:
csvOut.writerow(header)
if len(dataArray[0]) > 1:
csvOut.writerows(dataArray)
else:
for val in dataArray:
csvOut.writerow([val])
f.close()
def write_singlecol(fileName, dataArray):
f = open(fileName, 'w')
f.writelines([ret + '\n' for ret in dataArray])
# Read-in list of CSV or NETCDF files containing radar rainfall statistics
def csv_list2array(timeStart, timeEnd, inBaseDir, analysisType='STATS', product='AQC', quality=0, timeAccumMin=5, minR=0.08, wols=0, variableBreak=0):
timeAccumMinStr = '%05i' % (timeAccumMin)
listStats = []
variableNames = []
timeLocal = timeStart
while timeLocal <= timeEnd:
# Create filename
fileName,_,_ = get_filename_stats(inBaseDir, analysisType, timeLocal, product, timeAccumMin, quality, minR, wols, variableBreak, format='csv')
print('Reading: ', fileName)
try:
if len(variableNames) > 0:
df = pd.read_csv(fileName, sep=',')
else:
df = pd.read_csv(fileName, sep=',')
variableNames = list(df.columns.values)
# to np array
arrayStats = df.as_matrix()
# to list
listStatsLocal = arrayStats.tolist()
# Concatenate lists
listStats = listStats + listStatsLocal
except:
print(fileName, ' empty.')
# Update time (one file per day)
timeLocal = timeLocal + datetime.timedelta(hours = 24)
# Check if found data
if (len(listStats) == 0):
listStats = []
print('No data stored in array.')
return(listStats, variableNames)
# Sort list of lists by first variable (time)
listStats.sort(key=itemgetter(0))
# Remove duplicates
df = pd.DataFrame(listStats)
df = df.drop_duplicates(0)
listStats = df.values.tolist()
return(listStats, variableNames)
# Read-in list of CSV or NETCDF files containing radar rainfall statistics
def netcdf_list2array(timeStart,timeEnd, inBaseDir, variableNames = [], analysisType='STATS', product='AQC', quality=0, timeAccumMin=5, minR=0.08, wols=0, variableBreak=0):
timeAccumMinStr = '%05i' % (timeAccumMin)
'''
'''
listStats = []
timeLocal = timeStart
while timeLocal <= timeEnd:
# Create filename
fileName,_,_ = get_filename_stats(inBaseDir, analysisType, timeLocal, product, timeAccumMin, quality, minR, wols, variableBreak, format='netcdf')
print('Reading: ', fileName)
try:
# Read netcdf
if len(variableNames) > 0:
arrayStats,_ = read_netcdf_globalstats(fileName, variableNames)
else:
arrayStats, variableNames = read_netcdf(fileName)
# Concatenate lists
listStats = listStats + arrayStats
except:
print(fileName, ' empty.')
# Update time (one file per day)
timeLocal = timeLocal + datetime.timedelta(hours = 24)
# Check if found data
if (len(listStats) == 0):
listStats = []
print('No data stored in array.')
return(listStats, variableNames)
# Sort list of lists by first variable (time)
listStats.sort(key=itemgetter(0))
# Remove duplicates
df = pd.DataFrame(listStats)
df = df.drop_duplicates(0)
listStats = df.values.tolist()
return(listStats, variableNames)
def write_netcdf_globalstats(fileName, headers, dataArray, lowRainThreshold, boolWOLS, spectralSlopeLims):
nrSamples = dataArray.shape[0]
if boolWOLS == 1:
strWeightedOLS = "Weighted Ordinary Least Squares"
else:
strWeightedOLS = "Ordinary Least Squares"
# Create netCDF Dataset
nc_fid = Dataset(fileName, 'w', format='NETCDF4')
nc_fid.description = "Statistics computed for radar rainfall >= " + str(lowRainThreshold) + " mm/hr"
nc_fid.comment = "FFT spectrum fitted by " + strWeightedOLS
nc_fid.comment = "Spectral_slope_lims [km] = " + str(spectralSlopeLims)
nc_fid.source = "MeteoSwiss"
# Create and fill data into variables
nc_fid.createDimension('time', nrSamples) # Much larger file if putting 'None' (unlimited size)
nc_time = nc_fid.createVariable('time', 'i8', dimensions=('time'))
nc_time.description = "Timestamp (UTC)"
nc_time.units = "%YYYY%MM%DD%HH%mm%SS"
nc_time[:] = dataArray[:,0]
nrVariables = dataArray.shape[1]-1
for var in range(0,nrVariables):
varName = headers[var+1]
# Create variable
if (varName == 'alb') | (varName == 'dol') | (varName == 'lem') |(varName == 'ppm') | (varName == 'wei'):
nc_var = nc_fid.createVariable(varName, 'i1', dimensions=('time',))
else:
nc_var = nc_fid.createVariable(varName, 'f4', dimensions=('time',))
# Put data into variable
nc_var[:] = dataArray[:,var+1]
# Write radar attributes
if varName == 'alb':
nc_var.description = "Number of valid fields from Albis radar (-1: not active, 0: not in operation, 1: ok, 12: correct hourly accumulation)."
if varName == 'dol':
nc_var.description = "Number of valid fields from Dole radar"
if varName == 'lem':
nc_var.description = "Number of valid fields from Lema radar"
if varName == 'ppm':
nc_var.description = "Number of valid fields from Plaine Morte radar"
if varName == 'wei':
nc_var.description = "Number of valid fields from Weissfluhjoch radar"
# Rainfall stats
if varName == 'war':
nc_var.description = "Wet area ratio (WAR). Fraction of rainy pixels."
nc_var.units = "Percentage [%]"
if varName == 'r_mean':
nc_var.description = "Unconditional mean precipitation (including zeros). A.k.a. image mean flux (IMF)"
nc_var.units = "mm/hr"
if varName == 'r_std':
nc_var.description = "Unconditional st. dev. of precipitation (including zeros)."
nc_var.units = "mm/hr"
if varName == 'r_cmean':
nc_var.description = "Conditional mean precipitation >= " + str(lowRainThreshold) + " mm/hr"
nc_var.units = "mm/hr"
if varName == 'r_cstd':
nc_var.description = "Conditional st. dev. of precipitation >= " + str(lowRainThreshold) + " mm/hr"
nc_var.units = "mm/hr"
# dBZ stats
if varName == 'dBZ_mean':
nc_var.description = "Unconditional mean precipitation in dBZ units (including zeros)"
nc_var.units = "dB"
if varName == 'dBZ_std':
nc_var.description = "Unconditional st. dev. of precipitation in dBZ units (including zeros)."
nc_var.units = "dB"
if varName == 'dBZ_cmean':
nc_var.description = "Conditional mean precipitation in dBZ units >= " + str(lowRainThreshold) + " mm/hr"
nc_var.units = "dB"
if varName == 'dBZ_cstd':
nc_var.description = "Conditional st. dev. of precipitation in dBZ units >= " + str(lowRainThreshold) + " mm/hr"
nc_var.units = "dB"
# Fourier stats
if varName == 'beta1':
nc_var.description = "Slope of the Fourier power spectrum for large spatial wavelengths [20-512 km]"
if varName == 'corr_beta1':
nc_var.description = "Correlation coeff. of the linear fit for beta1"
if varName == 'beta2':
nc_var.description = "Slope of the Fourier power spectrum for small spatial wavelengths [3-20 km]"
if varName == 'corr_beta2':
nc_var.description = "Correlation coeff. of the linear fit for beta2"
if varName == 'scaling_break':
nc_var.description = "Best scaling break of the 1d power spectrum"
nc_var.units = "km"
if varName == 'eccentricity':
nc_var.description = "Eccentricity of the anisotropy [0-1]"
if varName == 'orientation':
nc_var.description = "Orientation of the anisotropy [-90 to 90 degrees]"
nc_fid.close()
def read_netcdf_globalstats(fileName, variableNames = None):
# Open data set
nc_fid = Dataset(fileName, 'r', format='NETCDF4')
# Get and read whole list of variables if None is passed
if variableNames == None:
variableNames = [str(var) for var in nc_fid.variables]
# Read-in variables one by one
nrVariables = len(variableNames)
dataArray = []
for var in range(0,nrVariables):
varName = variableNames[var]
varData = nc_fid.variables[varName][:]
dataArray.append(varData)
nc_fid.close()
# Transpose the list of lists
dataArray = zip(*dataArray)
# How many ways to transpose a list of lists...
#dataArray = map(list,map(None,*dataArray))
#dataArray = np.asarray(dataArray).T.tolist()
# Return floating numpy array
# dataArray = np.array(dataArray).T
return(dataArray, variableNames)
def write_netcdf_flow(fileName, timeStamps, xvec, yvec, Ufields, Vfields, noData=-999.0):
'''
Function to write out one or several flow fields to netCDF file
'''
if type(timeStamps) is not list:
if type(timeStamps) is np.ndarray:
timeStamps = timeStamps.tolist()
else:
timeStamps = [timeStamps]
# Create netCDF Dataset
nc_fid = Dataset(fileName, 'w', format='NETCDF4')
nc_fid.title = 'Apparent radar velocity field'
nc_fid.institution = 'MeteoSwiss, Locarno-Monti'
nc_fid.description ="Motion vectors computed using the Lucas-Kanade tracking algorithm and gridded by Kernel interpolation"
nc_fid.comment = 'File generated the ' + str(datetime.datetime.now()) + '.'
nc_fid.noData = noData
# Dimensions
nrSamples = len(timeStamps)
nc_fid.createDimension('time', nrSamples) # Much larger file if putting 'None' (unlimited size)
nc_time = nc_fid.createVariable('time', 'i8', dimensions=('time'))
nc_time.description = "Timestamp (UTC)"
nc_time.units = "%YYYY%MM%DD%HH%mm%SS"
nc_time[:] = timeStamps
dimNames = ['x','y']
dimensions = [int(xvec.shape[0]),
int(yvec.shape[0])]
for i in range(len(dimensions)):
nc_fid.createDimension(dimNames[i],dimensions[i])
# Variables
w_nc_x = nc_fid.createVariable('x', 'f4', dimensions='x')
w_nc_x.description = "Swiss easting"
w_nc_x.units = "km"
w_nc_x[:] = xvec/1000
w_nc_y = nc_fid.createVariable('y', 'f4', dimensions='y')
w_nc_y.description = "Swiss northing"
w_nc_y.units = "km"
w_nc_y[:] = yvec/1000
w_nc_u = nc_fid.createVariable('U', 'f4', dimensions=('time', 'y', 'x'), zlib=True)
w_nc_u.description = "Optical flow - zonal component (West -> East)"
w_nc_u.units = "km/5min"
w_nc_u[:] = Ufields
w_nc_v = nc_fid.createVariable('V', 'f4', dimensions=('time', 'y', 'x'), zlib=True)
w_nc_v.description = "Optical flow - meridional component (South -> North))"
w_nc_v.units = "km/5min"
w_nc_v[:] = Vfields
nc_fid.close()
def write_netcdf_waveletcoeffs(fileName, timeStamps, \
xvecs, yvecs, waveletCoeffs, waveletType = 'none', noData=-999.0):
'''
Function to write out one field of wavelet coefficients to netCDF file
'''
if type(timeStamps) is not list:
if type(timeStamps) is np.ndarray:
timeStamps = timeStamps.tolist()
else:
timeStamps = [timeStamps]
# Create netCDF Dataset
nc_fid = Dataset(fileName, 'w', format='NETCDF4')
nc_fid.title = 'Wavelet coefficients of rainfall field'
nc_fid.institution = 'MeteoSwiss, Locarno-Monti'
nc_fid.description = waveletType + " wavelet"
nc_fid.comment = 'File generated the ' + str(datetime.datetime.now()) + '.'
nc_fid.noData = noData
# Time dimension
nrSamples = len(timeStamps)
nc_fid.createDimension('time', nrSamples) # Much larger file if putting 'None' (unlimited size)
nc_time = nc_fid.createVariable('time', 'i8', dimensions=('time'))
nc_time.description = "Timestamp (UTC)"
nc_time.units = "%YYYY%MM%DD%HH%mm%SS"
nc_time[:] = timeStamps
# Generate groups to store the wavelet coefficients at different scales (x,y,wc)
for scale in range(0,len(waveletCoeffs)):
# Get data at particular scale
scalegrp = nc_fid.createGroup("wc_scale" + str(scale))
xvec = xvecs[scale]
yvec = yvecs[scale]
waveletCoeffScale = np.array(waveletCoeffs[scale])
# Spatial dimension
dimNames = ['x','y']
dimensions = [int(xvec.shape[0]),int(yvec.shape[0])]
for i in range(len(dimensions)):
scalegrp.createDimension(dimNames[i],dimensions[i])
# Write out coordinates
w_nc_x = scalegrp.createVariable('x', 'f4', dimensions='x')
w_nc_x.description = "Swiss easting"
w_nc_x.units = "km"
w_nc_x[:] = xvec/1000
w_nc_y = scalegrp.createVariable('y', 'f4', dimensions='y')
w_nc_y.description = "Swiss northing"
w_nc_y.units = "km"
w_nc_y[:] = yvec/1000
# Write out wavelet coefficients
varName = 'wc' + str(scale)
if scale != len(waveletCoeffs)-1:
scaleKm = int((xvec[1] - xvec[0])/1000)
else:
previousScaleKm = xvecs[scale-1][1] - xvecs[scale-1][0]
scaleKm = int(previousScaleKm*2/1000)
w_nc_u = scalegrp.createVariable(varName, 'f4', dimensions=('time', 'y', 'x'), zlib=True)
w_nc_u.description = "Wavelet coefficients at scale " + str(scaleKm) + ' km'
w_nc_u.units = "amplitude"
w_nc_u[:] = waveletCoeffScale
nc_fid.close()
#@profile
def netcdf_list2wavelet_array(timeStart, timeEnd, inBaseDir, analysisType='WAVELET', \
product='AQC', timeAccumMin=5, scaleKM=None):
'''
Fucntion to open a series of netCDF files containing "upscaled" rainfall fields with wavelets
'''
timeAccumMinStr = '%05i' % (timeAccumMin)
listWaveletScale = []
listTimeStamps = []
fieldSizeDone = False
timeLocal = timeStart
while timeLocal <= timeEnd:
# Create filename
fileName,_,_ = get_filename_wavelets(inBaseDir, analysisType, timeLocal, product, \
timeAccumMin=timeAccumMin, scaleKM=scaleKM, format='netcdf')
try:
# Read netcdf
arrayWaveletScale, arrayTimes, extent = read_netcdf_waveletscale(fileName)
if (arrayWaveletScale[0].shape > 0) & (fieldSizeDone == False):
fieldSize = arrayWaveletScale[0].shape
fieldSizeDone = True
# Flatten 2D arrays of wavelet coeffs
arrayWaveletScaleFlat = []
for t in range(0,len(arrayWaveletScale)):
arrayWaveletScaleFlat.append(arrayWaveletScale[t].ravel())
# Concatenate lists
listWaveletScale = listWaveletScale + arrayWaveletScaleFlat
listTimeStamps = listTimeStamps + arrayTimes.tolist()
print(fileName, 'read successfully.')
except:
print(fileName, 'empty.')
# Update time (one file per day)
timeLocal = timeLocal + datetime.timedelta(hours = 24)
# Check if found data
if (len(listWaveletScale) == 0):
listWaveletScale = []
print('No data stored in array.')
return(listWaveletScale)
# Convert to numpy arrays
arrayTimeStamps = np.asarray(listTimeStamps)
arrayWaveletScale = np.asarray(listWaveletScale)
# Remove duplicates
uniqueTimeStamps, idxUnique = np.unique(arrayTimeStamps, return_index=True)
arrayTimeStamps = arrayTimeStamps[idxUnique]
arrayWaveletScale = arrayWaveletScale[idxUnique,:]
# Sort list of lists by first variable (time)
# dataArray = np.column_stack((arrayTimeStamps, arrayWaveletScale))
# dataArray[dataArray[:,0].argsort()]
# arrayTimeStamps = dataArray[:,0]
# arrayWaveletScale = dataArray[:,1:]
return(arrayWaveletScale, arrayTimeStamps, fieldSize, extent)
def read_netcdf_waveletscale(fileName):
# Open data set
nc_fid = Dataset(fileName, 'r', format='NETCDF4')
# Read-in the array of timestamps
timeArray = nc_fid.variables['time'][:]
# Read-in the array of wavelet coefficients
waveletArray = nc_fid.variables['wc'][:]
# Red-in the extent of the domain
xcoords = nc_fid.variables['x'][:]
ycoords = nc_fid.variables['y'][:]
resX = np.abs(xcoords[1] - xcoords[0])
resY = np.abs(ycoords[1] - ycoords[0])
extent = [np.min(xcoords), np.max(xcoords), np.min(ycoords), np.max(ycoords)]
extent = [np.min(xcoords)-resX/2, np.max(xcoords)+resX/2, np.min(ycoords)-resY/2, np.max(ycoords)+resY/2]
nc_fid.close()
# Transpose the list of lists
# dataArray = zip(*dataArray)
return(waveletArray, timeArray, extent)
def write_netcdf_waveletscale(fileName, timeStampsArray, \
xvec, yvec, waveletCoeffsArray, scaleKM, waveletType = 'none', noData=-999.0):
'''
Function to write out multiple fields of wavelet coefficients at ONE SELECTED SCALE to netCDF file
'''
if len(timeStampsArray) != len(waveletCoeffsArray):
print('timeStampsArray and waveletCoeffsArray should have the same number of elements in write_netcdf_waveletscale')
print(len(timeStampsArray), 'vs', len(waveletCoeffsArray))
sys.exit(1)
if type(timeStampsArray) is not list:
if type(timeStampsArray) is np.ndarray:
timeStampsArray = timeStampsArray.tolist()
else:
timeStampsArray = [timeStampsArray]
try:
# Create netCDF Dataset
nc_fid = Dataset(fileName, 'w', format='NETCDF4')
nc_fid.title = 'Wavelet coefficients of rainfall field'
nc_fid.institution = 'MeteoSwiss, Locarno-Monti'
nc_fid.description = waveletType + " wavelet"
nc_fid.comment = 'File generated the ' + str(datetime.datetime.now()) + '.'
nc_fid.noData = noData
# Time dimension
nrSamples = len(timeStampsArray)
nc_fid.createDimension('time', nrSamples) # Much larger file if putting 'None' (unlimited size)
nc_time = nc_fid.createVariable('time', 'i8', dimensions=('time'))
nc_time.description = "Timestamp (UTC)"
nc_time.units = "%YYYY%MM%DD%HH%mm%SS"
nc_time[:] = timeStampsArray
# Spatial dimension
dimNames = ['x','y']
dimensions = [int(len(xvec)),int(len(yvec))]
for i in range(len(dimensions)):
nc_fid.createDimension(dimNames[i],dimensions[i])
# Write out coordinates
w_nc_x = nc_fid.createVariable('x', 'f4', dimensions='x')
w_nc_x.description = "Swiss easting"
w_nc_x.units = "km"
w_nc_x[:] = xvec/1000
w_nc_y = nc_fid.createVariable('y', 'f4', dimensions='y')
w_nc_y.description = "Swiss northing"
w_nc_y.units = "km"
w_nc_y[:] = yvec/1000
# Write out wavelet coefficients
varName = 'wc'
w_nc_u = nc_fid.createVariable(varName, 'f4', dimensions=('time', 'y', 'x'), zlib=True)
w_nc_u.description = "Wavelet coefficients at scale " + str(scaleKM) + ' km'
w_nc_u.units = "amplitude"
waveletCoeffsArray = np.array(waveletCoeffsArray)
w_nc_u[:] = waveletCoeffsArray
nc_fid.close()
except:
print('NetCDF writing error in write_netcdf_waveletscale')
print('xvec:', xvec)
print('yvec:', yvec)
print('waveletCoeffsArray:', waveletCoeffsArray)
print('waveletCoeffsArray.shape:', waveletCoeffsArray.shape)
sys.exit(1)
def get_file_matching_expr(inDir, fileNameWildCard):
listFilesDir = os.listdir(inDir)
boolFound = False
for file in listFilesDir:
if fnmatch.fnmatch(file, fileNameWildCard):
fileName = file
boolFound = True
if boolFound == False:
fileName = None
return(fileName)
def get_files_period(timeStart, timeEnd, inBaseDir, fileNameExpr, tempResMin = 5):
'''
Function to generalize further
'''
timeLocal = timeStart
fileList = []
while timeLocal <= timeEnd:
year, yearStr, julianDay, julianDayStr = ti.parse_datetime(timeLocal)
timeLocalStr = ti.datetime2juliantimestring(timeLocal)
# Get directory
subDir = str(year) + '/' + yearStr + julianDayStr + '/'
imgDir = inBaseDir + subDir
# Get list of filenames in a given directory matching expression
fileNameExprTime = fileNameExpr + '*' + timeLocalStr + '*.png'
fileName = get_file_matching_expr(imgDir, fileNameExprTime)
# Append filelist
if fileName != None:
fileList.append(imgDir + fileName)
# Update time
timeLocal = timeLocal + datetime.timedelta(minutes = tempResMin)
return(fileList)
def open_gif_image(fileName):
'''
Function to read the radar rainfall field from a gif file.
Parameters
----------
fileName : str
Returns
-------
rain8bit: int
2d numpy array containing the radar rainfall field values using 8-bit coding
nrRows: int
Number of rows of the radar field
nrCols: int
Number of cols of the radar field
'''
rainImg = Image.open(fileName)
nrCols = rainImg.size[0]
nrRows = rainImg.size[1]
rain8bit = np.array(rainImg,dtype=int)
del rainImg
return(rain8bit, nrRows, nrCols)
def get_quality_fromfilename(fileName):
'''
Function to parse a filname to get the one digit number or letter describing the quality of the radar composite.
Parameters
----------
fileName : str
Returns
-------
dataQuality: int, str
Quality of the radar composite (1 or 0)
'''
baseName = os.path.basename(fileName)
dataQualityIdx = baseName.find('_')-1
dataQuality = baseName[dataQualityIdx]
return(dataQuality)
def get_radaroperation_from_quality(quality):
'''
Function to convert a one digit quality number into the activity of the three 3rd gen. radars.
Parameters
----------
quality : str, int
One digit quality number (extracted from the filename, e.g. 7)
Returns
-------
alb: int
Quality of the Albis radar data (1 or 0)
dol: int
Quality of the Dole radar data (1 or 0)
lem: int
Quality of the Lema radar data (1 or 0)
'''
quality = int(quality)
# binary codes for radars
alb_bin = '1'
dol_bin = '10'
lem_bin = '100'
# decimal codes for radars
alb_dec = int(alb_bin,2)
dol_dec = int(dol_bin,2)
lem_dec = int(lem_bin,2)
# quality of each individual radar
alb = -1
dol = -1
lem = -1
if alb_dec == quality:
alb = 1
elif dol_dec == quality:
dol = 1
elif lem_dec == quality:
lem = 1
elif (alb_dec + dol_dec) == quality:
alb = 1
dol = 1
elif (alb_dec + lem_dec) == quality:
alb = 1
lem = 1
elif (dol_dec + lem_dec) == quality:
dol = 1
lem = 1
elif (alb_dec + dol_dec + lem_dec) == quality:
alb = 1
dol = 1
lem = 1
# print(quality, '->', 'ALB=', alb, 'DOL=', dol, 'LEM=', lem)
return(alb,dol,lem)
def get_radaroperation_from_quality_4gen(quality):
'''
Function to convert a one digit quality number into the activity of the five 4th gen. radars.
Parameters
----------
quality : str, int
One digit quality number (extracted from the filename, e.g. 7)
Returns
-------
alb: int
Quality of the Albis radar data (1 or 0)
dol: int
Quality of the Dole radar data (1 or 0)
lem: int
Quality of the Lema radar data (1 or 0)
ppm: int
Quality of the PlaineMorte radar data (1 or 0)
wei: int
Quality of the Weissfluhgipfel radar data (1 or 0)
'''
alb,dol,lem,ppm,wei = -1,-1,-1,-1,-1
characters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' # English
#characters = 'ABCDEFGHILMNOPQRSTUVZ' # Italian
indices = np.arange(0,len(characters))
try:
qualitySum = int(quality)
except:
idxCharacter = characters.find(quality)+1
qualitySum = 9 + idxCharacter
# print(quality, idxCharacter, qualitySum)
# binary codes for radars
alb_bin = '1'
dol_bin = '10'
lem_bin = '100'
ppm_bin = '1000'
wei_bin = '10000'
# decimal codes for radars
alb_dec = int(alb_bin,2)
dol_dec = int(dol_bin,2)
lem_dec = int(lem_bin,2)
ppm_dec = int(ppm_bin,2)
wei_dec = int(wei_bin,2)
# Loop over the various combinations... so much fun
arrayDec = [alb_dec, dol_dec, lem_dec, ppm_dec, wei_dec]
combinations = find_sum_in_list(arrayDec, qualitySum)
for i in range(0,len(combinations)):
if combinations[i] == alb_dec:
alb = 1
if combinations[i] == dol_dec:
dol = 1
if combinations[i] == lem_dec:
lem = 1
if combinations[i] == ppm_dec:
ppm = 1
if combinations[i] == wei_dec:
wei = 1
return(alb,dol,lem,ppm,wei)
from itertools import combinations
def find_sum_in_list(numbers, target):
# Check particular case where you sum up all the numbers
if np.sum(numbers) == target:
return(numbers)
# Check all other cases
results = []
for x in range(len(numbers)):
for combo in combinations(numbers, x):
if sum(combo) == target:
results.append(list(combo))
# Extract first combination found or issue error if nothing found
if len(results) != 0:
results = results[0]
else:
print('Impossible to find a combination of ', numbers,' to sum up to', target)
sys.exit(1)
return(results)
def get_gif_radar_operation(fileName):
'''
Function to read the metadata of a radar composite gif file and get the quality of the different radars.
Parameters
----------
fileName : str
Returns
-------
alb: int
Quality of the Albis radar data (number of valid fields in the accumulation)
dol: int
Quality of the Dole radar data
lem: int
Quality of the Lema radar data
ppm: int
Quality of the Plaine Morte radar data
wei: int
Quality of the Weissfluhgipfel radar data
'''
# Default values for period before radar installation
alb = -1
dol = -1
lem = -1
ppm = -1
wei = -1
try:
# Use ImageMagick identify command to grep the line with the radar operations
cmd = 'identify -format "%c" ' + fileName + ' | grep ALB'
outString = subprocess.check_output(cmd, shell=True)
# Parse output string
outStringArray = outString.split(' ')
# Get data quality integer for each radar
for radar in range(0,len(outStringArray)):
radarString = outStringArray[radar].split('=')
if radarString[0] == 'ALB':
alb = int(radarString[1])
if (radarString[0] == 'DOE') | (radarString[0] == 'DOL'):
dol = int(radarString[1])
if (radarString[0] == 'MLE') | (radarString[0] == 'LEM'):
lem = int(radarString[1])
if radarString[0] == 'PPM':
ppm = int(radarString[1])
if radarString[0] == 'WEI':
wei = int(radarString[1])
except:
# print('ALB activity not readable from ', fileName)
# print('Use the data quality from file name instead')
# Default values if nothing is found in gif file
alb = -1
dol = -1
lem = -1
ppm = -1
wei = -1
return(alb, dol, lem, ppm, wei) |
supergis/micropython | refs/heads/master | tests/basics/continue.py | 118 | for i in range(4):
print('one', i)
if i > 2:
continue
print('two', i)
for i in range(4):
print('one', i)
if i < 2:
continue
print('two', i)
for i in [1, 2, 3, 4]:
if i == 3:
continue
print(i)
|
marchon/py-ECDSA | refs/heads/master | ecdsa/lib/util.py | 2 | from __future__ import division
import os
import math
import binascii
from hashlib import sha256
from . import der
from .curves import orderlen
from six import PY3, int2byte, b, next
# RFC5480:
# The "unrestricted" algorithm identifier is:
# id-ecPublicKey OBJECT IDENTIFIER ::= {
# iso(1) member-body(2) us(840) ansi-X9-62(10045) keyType(2) 1 }
oid_ecPublicKey = (1, 2, 840, 10045, 2, 1)
encoded_oid_ecPublicKey = der.encode_oid(*oid_ecPublicKey)
def randrange(order, entropy=None):
"""Return a random integer k such that 1 <= k < order, uniformly
distributed across that range. For simplicity, this only behaves well if
'order' is fairly close (but below) a power of 256. The try-try-again
algorithm we use takes longer and longer time (on average) to complete as
'order' falls, rising to a maximum of avg=512 loops for the worst-case
(256**k)+1 . All of the standard curves behave well. There is a cutoff at
10k loops (which raises RuntimeError) to prevent an infinite loop when
something is really broken like the entropy function not working.
Note that this function is not declared to be forwards-compatible: we may
change the behavior in future releases. The entropy= argument (which
should get a callable that behaves like os.urandom) can be used to
achieve stability within a given release (for repeatable unit tests), but
should not be used as a long-term-compatible key generation algorithm.
"""
# we could handle arbitrary orders (even 256**k+1) better if we created
# candidates bit-wise instead of byte-wise, which would reduce the
# worst-case behavior to avg=2 loops, but that would be more complex. The
# change would be to round the order up to a power of 256, subtract one
# (to get 0xffff..), use that to get a byte-long mask for the top byte,
# generate the len-1 entropy bytes, generate one extra byte and mask off
# the top bits, then combine it with the rest. Requires jumping back and
# forth between strings and integers a lot.
if entropy is None:
entropy = os.urandom
assert order > 1
bytes = orderlen(order)
dont_try_forever = 10000 # gives about 2**-60 failures for worst case
while dont_try_forever > 0:
dont_try_forever -= 1
candidate = string_to_number(entropy(bytes)) + 1
if 1 <= candidate < order:
return candidate
continue
raise RuntimeError("randrange() tried hard but gave up, either something"
" is very wrong or you got realllly unlucky. Order was"
" %x" % order)
class PRNG:
# this returns a callable which, when invoked with an integer N, will
# return N pseudorandom bytes. Note: this is a short-term PRNG, meant
# primarily for the needs of randrange_from_seed__trytryagain(), which
# only needs to run it a few times per seed. It does not provide
# protection against state compromise (forward security).
def __init__(self, seed):
self.generator = self.block_generator(seed)
def __call__(self, numbytes):
a = [next(self.generator) for i in range(numbytes)]
if PY3:
return bytes(a)
else:
return "".join(a)
def block_generator(self, seed):
counter = 0
while True:
for byte in sha256(("prng-%d-%s" % (counter, seed)).encode()).digest():
yield byte
counter += 1
def randrange_from_seed__overshoot_modulo(seed, order):
# hash the data, then turn the digest into a number in [1,order).
#
# We use David-Sarah Hopwood's suggestion: turn it into a number that's
# sufficiently larger than the group order, then modulo it down to fit.
# This should give adequate (but not perfect) uniformity, and simple
# code. There are other choices: try-try-again is the main one.
base = PRNG(seed)(2*orderlen(order))
number = (int(binascii.hexlify(base), 16) % (order-1)) + 1
assert 1 <= number < order, (1, number, order)
return number
def lsb_of_ones(numbits):
return (1 << numbits) - 1
def bits_and_bytes(order):
bits = int(math.log(order-1, 2)+1)
bytes = bits // 8
extrabits = bits % 8
return bits, bytes, extrabits
# the following randrange_from_seed__METHOD() functions take an
# arbitrarily-sized secret seed and turn it into a number that obeys the same
# range limits as randrange() above. They are meant for deriving consistent
# signing keys from a secret rather than generating them randomly, for
# example a protocol in which three signing keys are derived from a master
# secret. You should use a uniformly-distributed unguessable seed with about
# curve.baselen bytes of entropy. To use one, do this:
# seed = os.urandom(curve.baselen) # or other starting point
# secexp = ecdsa.util.randrange_from_seed__trytryagain(sed, curve.order)
# sk = SigningKey.from_secret_exponent(secexp, curve)
def randrange_from_seed__truncate_bytes(seed, order, hashmod=sha256):
# hash the seed, then turn the digest into a number in [1,order), but
# don't worry about trying to uniformly fill the range. This will lose,
# on average, four bits of entropy.
bits, bytes, extrabits = bits_and_bytes(order)
if extrabits:
bytes += 1
base = hashmod(seed).digest()[:bytes]
base = "\x00"*(bytes-len(base)) + base
number = 1+int(binascii.hexlify(base), 16)
assert 1 <= number < order
return number
def randrange_from_seed__truncate_bits(seed, order, hashmod=sha256):
# like string_to_randrange_truncate_bytes, but only lose an average of
# half a bit
bits = int(math.log(order-1, 2)+1)
maxbytes = (bits+7) // 8
base = hashmod(seed).digest()[:maxbytes]
base = "\x00"*(maxbytes-len(base)) + base
topbits = 8*maxbytes - bits
if topbits:
base = int2byte(ord(base[0]) & lsb_of_ones(topbits)) + base[1:]
number = 1+int(binascii.hexlify(base), 16)
assert 1 <= number < order
return number
def randrange_from_seed__trytryagain(seed, order):
# figure out exactly how many bits we need (rounded up to the nearest
# bit), so we can reduce the chance of looping to less than 0.5 . This is
# specified to feed from a byte-oriented PRNG, and discards the
# high-order bits of the first byte as necessary to get the right number
# of bits. The average number of loops will range from 1.0 (when
# order=2**k-1) to 2.0 (when order=2**k+1).
assert order > 1
bits, bytes, extrabits = bits_and_bytes(order)
generate = PRNG(seed)
while True:
extrabyte = b("")
if extrabits:
extrabyte = int2byte(ord(generate(1)) & lsb_of_ones(extrabits))
guess = string_to_number(extrabyte + generate(bytes)) + 1
if 1 <= guess < order:
return guess
def number_to_string(num, order):
l = orderlen(order)
fmt_str = "%0" + str(2*l) + "x"
string = binascii.unhexlify((fmt_str % num).encode())
assert len(string) == l, (len(string), l)
return string
def number_to_string_crop(num, order):
l = orderlen(order)
fmt_str = "%0" + str(2*l) + "x"
string = binascii.unhexlify((fmt_str % num).encode())
return string[:l]
def string_to_number(string):
return int(binascii.hexlify(string), 16)
def string_to_number_fixedlen(string, order):
l = orderlen(order)
assert len(string) == l, (len(string), l)
return int(binascii.hexlify(string), 16)
# these methods are useful for the sigencode= argument to SK.sign() and the
# sigdecode= argument to VK.verify(), and control how the signature is packed
# or unpacked.
def sigencode_strings(r, s, order):
r_str = number_to_string(r, order)
s_str = number_to_string(s, order)
return (r_str, s_str)
def sigencode_string(r, s, order):
# for any given curve, the size of the signature numbers is
# fixed, so just use simple concatenation
r_str, s_str = sigencode_strings(r, s, order)
return r_str + s_str
def sigencode_der(r, s, order):
return der.encode_sequence(der.encode_integer(r), der.encode_integer(s))
# canonical versions of sigencode methods
# these enforce low S values, by negating the value (modulo the order) if above order/2
# see CECKey::Sign() https://github.com/bitcoin/bitcoin/blob/master/src/key.cpp#L214
def sigencode_strings_canonize(r, s, order):
if s > order / 2:
s = order - s
return sigencode_strings(r, s, order)
def sigencode_string_canonize(r, s, order):
if s > order / 2:
s = order - s
return sigencode_string(r, s, order)
def sigencode_der_canonize(r, s, order):
if s > order / 2:
s = order - s
return sigencode_der(r, s, order)
def sigdecode_string(signature, order):
l = orderlen(order)
assert len(signature) == 2*l, (len(signature), 2*l)
r = string_to_number_fixedlen(signature[:l], order)
s = string_to_number_fixedlen(signature[l:], order)
return r, s
def sigdecode_strings(rs_strings, order):
(r_str, s_str) = rs_strings
l = orderlen(order)
assert len(r_str) == l, (len(r_str), l)
assert len(s_str) == l, (len(s_str), l)
r = string_to_number_fixedlen(r_str, order)
s = string_to_number_fixedlen(s_str, order)
return r, s
def sigdecode_der(sig_der, order):
#return der.encode_sequence(der.encode_integer(r), der.encode_integer(s))
rs_strings, empty = der.remove_sequence(sig_der)
if empty != b(""):
raise der.UnexpectedDER("trailing junk after DER sig: %s" %
binascii.hexlify(empty))
r, rest = der.remove_integer(rs_strings)
s, empty = der.remove_integer(rest)
if empty != b(""):
raise der.UnexpectedDER("trailing junk after DER numbers: %s" %
binascii.hexlify(empty))
return r, s
|
jounex/hue | refs/heads/master | apps/security/src/security/management/commands/__init__.py | 929 | # Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
marianoguerra/feedformatter | refs/heads/master | feedformatter.py | 1 | '''module to create atom and rss feeds'''
# Feedformatter
# Copyright (c) 2008, Luke Maurits <luke@maurits.id.au>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import sys
PY3 = sys.version_info[0] == 3
__version__ = "0.5"
__author__ = "Luke Maurits, Michael Stella, Mariano Guerra"
__copyright__ = "Copyright 2008 Luke Maurits"
if PY3:
from io import StringIO
basestring = str
else:
if sys.version_info[1] < 6:
bytes = str
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
# This "staircase" of import attempts is ugly. If there's a nicer way to do
# this, please let me know!
try:
import xml.etree.cElementTree as ET
except ImportError:
try:
import xml.etree.ElementTree as ET
except ImportError:
try:
import cElementTree as ET
except ImportError:
try:
from elementtree import ElementTree as ET
except ImportError:
raise ImportError("Could not import any form of element tree!")
try:
from xml.dom.ext import PrettyPrint
from xml.dom.ext.reader.Sax import FromXml
CAN_PRETTY_PRINT = True
except ImportError:
CAN_PRETTY_PRINT = False
import time
import datetime
def _get_tz_offset():
"""
Return the current timezone's offset from GMT as a string
in the format +/-HH:MM, as required by RFC3339.
"""
seconds = -1 * time.timezone # Python gets the offset backward! >:(
minutes = seconds / 60
hours = minutes / 60
minutes = minutes - hours * 60
hours = abs(hours)
if seconds < 0:
return "-%02d:%02d" % (hours, minutes)
else:
return "+%02d:%02d" % (hours, minutes)
def _convert_datetime(dtime):
"""
Convert dtime, which may be one of a whole lot of things, into a
standard 9 part time tuple.
"""
if type(dtime) is datetime.datetime:
return dtime.timetuple()
elif ((type(dtime) is tuple and len(dtime) == 9) or
type(dtime) is time.struct_time):
# Already done!
return dtime
elif type(dtime) is int or type(dtime) is float:
# Assume this is a seconds-since-epoch time
return time.localtime(dtime)
elif isinstance(dtime, basestring):
# A time stamp?
try:
return time.strptime(dtime, "%a, %d %b %Y %H:%M:%S %Z")
except ValueError:
# Maybe this is a string of an epoch time?
try:
return time.localtime(float(dtime))
except ValueError:
# Guess not.
raise Exception("Unrecongised time format!")
else:
# No idea what this is. Give up!
raise Exception("Unrecongised time format!")
def _format_datetime(feed_type, dtime):
"""
Convert some representation of a date and time into a string which can be
used in a validly formatted feed of type feed_type. Raise an
Exception if this cannot be done.
"""
# First, convert time into a time structure
if not type(dtime) is time.struct_time:
dtime = _convert_datetime(dtime)
# Then, convert that to the appropriate string
if feed_type is "rss2":
return time.strftime("%a, %d %b %Y %H:%M:%S %Z", dtime)
elif feed_type is "atom":
return time.strftime("%Y-%m-%dT%H:%M:%S", dtime) + _get_tz_offset()
def _atomise_id(tag):
"""return a tag in a suitable format for atom"""
if type(tag) is dict:
return tag['href'].replace('http://', 'tag:')
return tag.replace('http://', 'tag:')
def _atomise_link(link, rel=None):
"""return a link in a suitable format for atom"""
if type(link) is dict:
if 'type' not in link:
link['type'] = 'text/html'
if rel and 'rel' not in link:
link['rel'] = rel
return link
else:
result = {'href' : link, 'type': 'text/html'}
if rel:
result['rel'] = rel
return result
def _atomise_author(author):
"""
Convert author from whatever it is to a dictionary representing an
atom:Person construct.
"""
if type(author) is dict:
return author
else:
if author.startswith("http://") or author.startswith("www"):
# This is clearly a URI
return {"uri" : author}
elif "@" in author and "." in author:
# This is most probably an email address
return {"email" : author}
else:
# Must be a name
return {"name" : author}
def _rssify_author(author):
"""
Convert author from whatever it is to a plain old email string for
use in an RSS 2.0 feed.
"""
if type(author) is dict:
return author.get("email", None)
else:
if "@" in author and "." in author:
# Probably an email address
return author
else:
return None
def _rssify_link(link):
"""return a link in a suitable format"""
if type(link) is dict:
return link['href']
else:
return link
def _format_content(content):
"""Converts the ATOM 'content' node into a dict,
which will allow one to pass in a dict which has
an optionaly 'type' argument
"""
if type(content) is dict:
if not 'type' in content:
content['type'] = 'text'
return content
else:
return {
'type': 'html',
'content': content,
}
def _add_subelems(root_element, mappings, dictionary):
"""
Add one subelement to root_element for each key in dictionary
which is supported by a mapping in mappings
"""
for mapping in mappings:
for key in mapping[0]:
if key in dictionary:
if len(mapping) == 2:
value = dictionary[key]
elif len(mapping) == 3:
value = mapping[2](dictionary[key])
_add_subelem(root_element, mapping[1], value)
break
def _add_subelem(root_element, name, value):
"""ad a subelement to *root_element*"""
if value is None:
return
if type(value) is dict:
### HORRIBLE HACK!
if name == "link":
ET.SubElement(root_element, name, value)
elif name == 'content':
# A wee hack too, the content node must be
# converted to a CDATA block. This is a sort of cheat, see:
# http://stackoverflow.com
# /questions/174890/how-to-output-cdata-using-elementtree
element = ET.Element(name, type= value['type'])
element.append(cdata(value['content']))
root_element.append(element)
else:
sub_elem = ET.SubElement(root_element, name)
for key in value:
_add_subelem(sub_elem, key, value[key])
else:
ET.SubElement(root_element, name).text = value
def _stringify(tree, pretty):
"""
Turn an ElementTree into a string, optionally with line breaks and indentation.
"""
if pretty and CAN_PRETTY_PRINT:
string = StringIO()
doc = FromXml(_element_to_string(tree))
PrettyPrint(doc, string, indent=" ")
return string.getvalue()
else:
return _element_to_string(tree)
def _element_to_string(element, encoding=None):
"""
This replaces ElementTree's tostring() function
with one that will use our local ElementTreeCDATA
class instead
"""
class Dummy(object):
"""a dummy class that has the required fields to be used in
the write method call below"""
def __init__(self, write_function):
self.write = write_function
data = []
if encoding is None:
encoding = 'utf-8'
file_like = Dummy(data.append)
ElementTreeCDATA(element).write(file_like, encoding)
new_data = []
for item in data:
if item is None:
new_item = ""
elif isinstance(item, bytes):
new_item = item.decode(encoding)
else:
new_item = item
new_data.append(new_item)
return ''.join(new_data)
class Feed(object):
"""class that represents a feed object"""
def __init__(self, feed=None, items=None):
if feed:
self.feed = feed
else:
self.feed = {}
if items:
self.items = items
else:
self.items = []
self.entries = self.items
### RSS 1.0 STUFF ------------------------------
def validate_rss1(self):
"""Raise an InvalidFeedException if the feed cannot be validly
formatted as RSS 1.0."""
# <channel> must contain "title"
if "title" not in self.feed:
raise InvalidFeedException("The channel element of an "
"RSS 1.0 feed must contain a title subelement")
# <channel> must contain "link"
if "link" not in self.feed:
raise InvalidFeedException("The channel element of an "
" RSS 1.0 feeds must contain a link subelement")
# <channel> must contain "description"
if "description" not in self.feed:
raise InvalidFeedException("The channel element of an "
"RSS 1.0 feeds must contain a description subelement")
# Each <item> must contain "title" and "link"
for item in self.items:
if "title" not in item:
raise InvalidFeedException("Each item element in an RSS 1.0 "
"feed must contain a title subelement")
if "link" not in item:
raise InvalidFeedException("Each item element in an RSS 1.0 "
"feed must contain a link subelement")
def format_rss1_string(self, validate=True, pretty=False):
"""Format the feed as RSS 1.0 and return the result as a string."""
if validate:
self.validate_rss1()
rss1_root = ET.Element('rdf:RDF',
{"xmlns:rdf" : "http://www.w3.org/1999/02/22-rdf-syntax-ns#",
"xmlns" : "http://purl.org/rss/1.0/"})
rss1_channel = ET.SubElement(rss1_root, 'channel',
{"rdf:about" : self.feed["link"]})
_add_subelems(rss1_channel, _rss1_channel_mappings, self.feed)
rss1_contents = ET.SubElement(rss1_channel, 'items')
rss1_contents_seq = ET.SubElement (rss1_contents, 'rdf:Seq')
for item in self.items:
ET.SubElement(rss1_contents_seq, 'rdf:li', resource=item["link"])
for item in self.items:
rss1_item = ET.SubElement(rss1_root, 'item',
{"rdf:about" : item["link"]})
_add_subelems(rss1_item, _rss1_item_mappings, item)
return _stringify(rss1_root, pretty=pretty)
def format_rss1_file(self, filename, validate=True, pretty=False):
"""Format the feed as RSS 1.0 and save the result to a file."""
string = self.format_rss1_string(validate, pretty)
handle = open(filename, "w")
handle.write(string)
handle.close()
### RSS 2.0 STUFF ------------------------------
def validate_rss2(self):
"""Raise an InvalidFeedException if the feed cannot be validly
formatted as RSS 2.0."""
# <channel> must contain "title"
if "title" not in self.feed:
raise InvalidFeedException("The channel element of an "
"RSS 2.0 feed must contain a title subelement")
# <channel> must contain "link"
if "link" not in self.feed:
raise InvalidFeedException("The channel element of an "
" RSS 2.0 feeds must contain a link subelement")
# <channel> must contain "description"
if "description" not in self.feed:
raise InvalidFeedException("The channel element of an "
"RSS 2.0 feeds must contain a description subelement")
# Each <item> must contain at least "title" OR "description"
for item in self.items:
if not ("title" in item or "description" in item):
raise InvalidFeedException("Each item element in an RSS 2.0 "
"feed must contain at least a title or description"
" subelement")
def format_rss2_string(self, validate=True, pretty=False):
"""Format the feed as RSS 2.0 and return the result as a string."""
if validate:
self.validate_rss2()
rss2_root = ET.Element('rss', {'version':'2.0'})
rss2_channel = ET.SubElement(rss2_root, 'channel')
_add_subelems(rss2_channel, _rss2_channel_mappings, self.feed)
for item in self.items:
rss2_item = ET.SubElement(rss2_channel, 'item')
_add_subelems(rss2_item, _rss2_item_mappings, item)
return ('<?xml version="1.0" encoding="UTF-8" ?>\n' +
_stringify(rss2_root, pretty=pretty))
def format_rss2_file(self, filename, validate=True, pretty=False):
"""Format the feed as RSS 2.0 and save the result to a file."""
string = self.format_rss2_string(validate, pretty)
handle = open(filename, "w")
handle.write(string)
handle.close()
### ATOM STUFF ------------------------------
def validate_atom(self):
"""Raise an InvalidFeedException if the feed cannot be validly
formatted as Atom 1.0."""
# Must have at least one "author" element in "feed" OR at least
# "author" element in each "entry".
if "author" not in self.feed:
for entry in self.entries:
if "author" not in entry:
raise InvalidFeedException("Atom feeds must have either at "
"least one author element in the feed element or at "
" least one author element in each entry element")
def format_atom_string(self, validate=True, pretty=False):
"""Format the feed as Atom 1.0 and return the result as a string."""
if validate:
self.validate_atom()
atom_root = ET.Element('feed', {"xmlns":"http://www.w3.org/2005/Atom"})
_add_subelems(atom_root, _atom_feed_mappings, self.feed)
for entry in self.entries:
atom_item = ET.SubElement( atom_root, 'entry')
_add_subelems(atom_item, _atom_item_mappings, entry)
return ('<?xml version="1.0" encoding="UTF-8" ?>\n' +
_stringify(atom_root, pretty=pretty))
def format_atom_file(self, filename, validate=True, pretty=False):
"""Format the feed as Atom 1.0 and save the result to a file."""
string = self.format_atom_string(validate, pretty)
handle = open(filename, "w")
handle.write(string)
handle.close()
class InvalidFeedException(Exception):
"""Exception thrown when manipulating an invalid feed"""
pass
def cdata(text=None):
"""create and return a CDATA element"""
if text is None:
text = ""
element = ET.Element("CDATA")
element.text = text
return element
class ElementTreeCDATA(ET.ElementTree):
"""
Subclass of ElementTree which handles CDATA blocks reasonably
"""
def _write(self, file_like, node, encoding, namespaces):
"""write this element representation to *file_like*"""
if node.tag == "CDATA":
text = node.text.encode(encoding)
file_like.write("\n<![CDATA[%s]]>\n" % text)
else:
ET.ElementTree._write(self, file_like, node, encoding, namespaces)
# RSS 1.0 Functions ----------
_rss1_channel_mappings = (
(("title",), "title"),
(("link", "url"), "link"),
(("description", "desc", "summary"), "description")
)
_rss1_item_mappings = (
(("title",), "title"),
(("link", "url"), "link"),
(("description", "desc", "summary"), "description")
)
# RSS 2.0 Functions ----------
_rss2_channel_mappings = (
(("title",), "title"),
(("link", "url"), "link", _rssify_link),
(("description", "desc", "summary"), "description"),
(("pubDate_parsed", "pubdate_parsed", "date_parsed", "published_parsed", "updated_parsed", "pubDate", "pubdate", "date", "published", "updated"), "pubDate",
lambda x: _format_datetime("rss2",x)),
(("category",), "category"),
(("language",), "language"),
(("copyright",), "copyright"),
(("webMaster",), "webmaster"),
(("image",), "image"),
(("skipHours",), "skipHours"),
(("skipDays",), "skipDays")
)
_rss2_item_mappings = (
(("title",), "title"),
(("link", "url"), "link", _rssify_link),
(("description", "desc", "summary"), "description"),
(("guid", "id"), "guid"),
(("pubDate_parsed", "pubdate_parsed", "date_parsed", "published_parsed", "updated_parsed", "pubDate", "pubdate", "date", "published", "updated"), "pubDate",
lambda x: _format_datetime("rss2",x)),
(("category",), "category"),
(("author",), "author", _rssify_author)
)
# Atom 1.0 ----------
_atom_feed_mappings = (
(("title",), "title"),
(("id", "link", "url"), "id", _atomise_id),
(("link", "url"), "link", _atomise_link),
(("description", "desc", "summary"), "subtitle"),
(("pubDate_parsed", "pubdate_parsed", "date_parsed", "published_parsed", "updated_parsed", "pubDate", "pubdate", "date", "published", "updated"), "updated",
lambda x: _format_datetime("atom",x)),
(("category",), "category"),
(("author",), "author", _atomise_author)
)
_atom_item_mappings = (
(("title",), "title"),
(("link", "url"), "link", lambda x: _atomise_link(x, rel='alternate')),
(("id", "link", "url"), "id", _atomise_id),
(("description", "desc", "summary"), "summary"),
(("content",), "content", _format_content),
(("pubDate_parsed", "pubdate_parsed", "date_parsed", "published_parsed", "updated_parsed", "pubDate", "pubdate", "date", "published", "updated"), "published",
lambda x: _format_datetime("atom",x)),
(("updated",), "updated", lambda x: _format_datetime("atom",x)),
(("category",), "category"),
(("author",), "author", _atomise_author)
)
### FACTORY FUNCTIONS ------------------------------
def from_ufp(ufp):
"""build a Feed object from an ufp (?)"""
return Feed(ufp["feed"], ufp["items"])
### MAIN ------------------------------
def main():
"""
main function called when the module is invoked from the command
line, display a small demo of the module
"""
def show(*args):
"""a cross version replacement for print that is useful for the demo
here"""
sys.stdout.write(" ".join([str(arg) for arg in args]))
sys.stdout.write("\n")
feed = Feed()
feed.feed["title"] = "Test Feed"
feed.feed["link"] = "http://code.google.com/p/feedformatter/"
feed.feed["author"] = "Luke Maurits"
feed.feed["description"] = "A simple test feed for feedformatter"
item = {}
item["title"] = "Test item"
item["link"] = "http://www.python.org"
item["description"] = "Python programming language"
item["guid"] = "1234567890"
feed.items.append(item)
show("---- RSS 1.0 ----")
show(feed.format_rss1_string(pretty=True))
show("---- RSS 2.0 ----")
show(feed.format_rss2_string(pretty=True))
show("---- Atom 1.0 ----")
show(feed.format_atom_string(pretty=True))
if __name__ == "__main__":
main()
|
tarikkdiry/Flock | refs/heads/master | flask/lib/python2.7/site-packages/guess_language/guess_language.py | 59 | ''' Guess the language of text.
Based on guesslanguage.cpp by Jacob R Rideout for KDE
http://websvn.kde.org/branches/work/sonnet-refactoring/common/nlp/guesslanguage.cpp?view=markup
which itself is based on Language::Guess by Maciej Ceglowski
http://languid.cantbedone.org/
Copyright (c) 2008, Kent S Johnson
C++ version is Copyright (c) 2006 Jacob R Rideout <kde@jacobrideout.net>
Perl version is (c) 2004-6 Maciej Ceglowski
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
Note: Language::Guess is GPL-licensed. KDE developers received permission
from the author to distribute their port under LGPL:
http://lists.kde.org/?l=kde-sonnet&m=116910092228811&w=2
'''
__all__ = 'guessLanguage guessLanguageName guessLanguageInfo guessLanguageTag guessLanguageId'.split()
import codecs, os, re, sys, unicodedata
from collections import defaultdict
from blocks import unicodeBlock
MIN_LENGTH = 20
BASIC_LATIN = "en ceb ha so tlh id haw la sw eu nr nso zu xh ss st tn ts".split()
EXTENDED_LATIN = "cs af pl hr ro sk sl tr hu az et sq ca es fr de nl it da is nb sv fi lv pt ve lt tl cy".split()
ALL_LATIN = BASIC_LATIN + EXTENDED_LATIN
CYRILLIC = "ru uk kk uz mn sr mk bg ky".split()
ARABIC = "ar fa ps ur".split()
DEVANAGARI = "hi ne".split()
# NOTE mn appears twice, once for mongolian script and once for CYRILLIC
SINGLETONS = [
('Armenian', 'hy'),
('Hebrew', 'he'),
('Bengali', 'bn'),
('Gurmukhi', 'pa'),
('Greek', 'el'),
('Gujarati', 'gu'),
('Oriya', 'or'),
('Tamil', 'ta'),
('Telugu', 'te'),
('Kannada', 'kn'),
('Malayalam', 'ml'),
('Sinhala', 'si'),
('Thai', 'th'),
('Lao', 'lo'),
('Tibetan', 'bo'),
('Burmese', 'my'),
('Georgian', 'ka'),
('Mongolian', 'mn-Mong'),
('Khmer', 'km'),
]
PT = "pt_BR pt_PT".split()
UNKNOWN = 'UNKNOWN'
models = {}
NAME_MAP = {
"ab" : "Abkhazian",
"af" : "Afrikaans",
"ar" : "Arabic",
"az" : "Azeri",
"be" : "Byelorussian",
"bg" : "Bulgarian",
"bn" : "Bengali",
"bo" : "Tibetan",
"br" : "Breton",
"ca" : "Catalan",
"ceb" : "Cebuano",
"cs" : "Czech",
"cy" : "Welsh",
"da" : "Danish",
"de" : "German",
"el" : "Greek",
"en" : "English",
"eo" : "Esperanto",
"es" : "Spanish",
"et" : "Estonian",
"eu" : "Basque",
"fa" : "Farsi",
"fi" : "Finnish",
"fo" : "Faroese",
"fr" : "French",
"fy" : "Frisian",
"gd" : "Scots Gaelic",
"gl" : "Galician",
"gu" : "Gujarati",
"ha" : "Hausa",
"haw" : "Hawaiian",
"he" : "Hebrew",
"hi" : "Hindi",
"hr" : "Croatian",
"hu" : "Hungarian",
"hy" : "Armenian",
"id" : "Indonesian",
"is" : "Icelandic",
"it" : "Italian",
"ja" : "Japanese",
"ka" : "Georgian",
"kk" : "Kazakh",
"km" : "Cambodian",
"ko" : "Korean",
"ku" : "Kurdish",
"ky" : "Kyrgyz",
"la" : "Latin",
"lt" : "Lithuanian",
"lv" : "Latvian",
"mg" : "Malagasy",
"mk" : "Macedonian",
"ml" : "Malayalam",
"mn" : "Mongolian",
"mr" : "Marathi",
"ms" : "Malay",
"nd" : "Ndebele",
"ne" : "Nepali",
"nl" : "Dutch",
"nn" : "Nynorsk",
"no" : "Norwegian",
"nso" : "Sepedi",
"pa" : "Punjabi",
"pl" : "Polish",
"ps" : "Pashto",
"pt" : "Portuguese",
"ro" : "Romanian",
"ru" : "Russian",
"sa" : "Sanskrit",
"sh" : "Serbo-Croatian",
"sk" : "Slovak",
"sl" : "Slovene",
"so" : "Somali",
"sq" : "Albanian",
"sr" : "Serbian",
"sv" : "Swedish",
"sw" : "Swahili",
"ta" : "Tamil",
"te" : "Telugu",
"th" : "Thai",
"tl" : "Tagalog",
"tlh" : "Klingon",
"tn" : "Setswana",
"tr" : "Turkish",
"ts" : "Tsonga",
"tw" : "Twi",
"uk" : "Ukrainian",
"uk" : "Ukranian",
"ur" : "Urdu",
"uz" : "Uzbek",
"ve" : "Venda",
"vi" : "Vietnamese",
"xh" : "Xhosa",
"zh" : "Chinese",
"zh-tw" : "Traditional Chinese (Taiwan)",
"zu" : "Zulu",
}
IANA_MAP = {
"ab" : 12026,
"af" : 40,
"ar" : 26020,
"az" : 26030,
"be" : 11890,
"bg" : 26050,
"bn" : 26040,
"bo" : 26601,
"br" : 1361,
"ca" : 3,
"ceb" : 26060,
"cs" : 26080,
"cy" : 26560,
"da" : 26090,
"de" : 26160,
"el" : 26165,
"en" : 26110,
"eo" : 11933,
"es" : 26460,
"et" : 26120,
"eu" : 1232,
"fa" : 26130,
"fi" : 26140,
"fo" : 11817,
"fr" : 26150,
"fy" : 1353,
"gd" : 65555,
"gl" : 1252,
"gu" : 26599,
"ha" : 26170,
"haw" : 26180,
"he" : 26592,
"hi" : 26190,
"hr" : 26070,
"hu" : 26200,
"hy" : 26597,
"id" : 26220,
"is" : 26210,
"it" : 26230,
"ja" : 26235,
"ka" : 26600,
"kk" : 26240,
"km" : 1222,
"ko" : 26255,
"ku" : 11815,
"ky" : 26260,
"la" : 26280,
"lt" : 26300,
"lv" : 26290,
"mg" : 1362,
"mk" : 26310,
"ml" : 26598,
"mn" : 26320,
"mr" : 1201,
"ms" : 1147,
"ne" : 26330,
"nl" : 26100,
"nn" : 172,
"no" : 26340,
"pa" : 65550,
"pl" : 26380,
"ps" : 26350,
"pt" : 26390,
"ro" : 26400,
"ru" : 26410,
"sa" : 1500,
"sh" : 1399,
"sk" : 26430,
"sl" : 26440,
"so" : 26450,
"sq" : 26010,
"sr" : 26420,
"sv" : 26480,
"sw" : 26470,
"ta" : 26595,
"te" : 26596,
"th" : 26594,
"tl" : 26490,
"tlh" : 26250,
"tn" : 65578,
"tr" : 26500,
"tw" : 1499,
"uk" : 26510,
"uk" : 26520,
"ur" : 26530,
"uz" : 26540,
"vi" : 26550,
"zh" : 26065,
"zh-tw" : 22,
}
def _load_models():
modelsDir = os.path.join(os.path.dirname(__file__), 'trigrams')
modelsList = os.listdir(modelsDir)
lineRe = re.compile(r"(.{3})\s+(.*)")
for modelFile in modelsList:
modelPath = os.path.join(modelsDir, modelFile)
if os.path.isdir(modelPath):
continue
f = codecs.open(modelPath, 'r', 'utf-8')
model = {} # QHash<QString,int> model
for line in f:
m = lineRe.search(line)
if m:
model[m.group(1)] = int(m.group(2))
models[modelFile.lower()] = model
_load_models()
def guessLanguage(text):
''' Returns the language code, i.e. 'en' '''
if not text:
return UNKNOWN
if isinstance(text, str):
text = unicode(text, 'utf-8')
text = normalize(text)
return _identify(text, find_runs(text))
def guessLanguageInfo(text):
"""
Returns (tag, id, name) i.e. ('en', 26110, 'english')
"""
tag = guessLanguage(text)
if tag == UNKNOWN:
return UNKNOWN,UNKNOWN,UNKNOWN
id = _getId(tag)
name = _getName(tag)
return tag,id,name
# An alias for guessLanguage
guessLanguageTag = guessLanguage
def guessLanguageId(text):
"""
Returns the language id. i.e. 26110
"""
lang = guessLanguage(text)
return _getId(lang)
def guessLanguageName(text):
"""
Returns the language name. i.e. 'english'
"""
lang = guessLanguage(text)
return _getName(lang)
def _getId(iana):
return IANA_MAP.get(iana, UNKNOWN)
def _getName(iana):
return NAME_MAP.get(iana, UNKNOWN)
def find_runs(text):
''' Count the number of characters in each character block '''
run_types = defaultdict(int)
totalCount = 0
for c in text:
if c.isalpha():
block = unicodeBlock(c)
run_types[block] += 1
totalCount += 1
# import pprint
# pprint.pprint(run_types)
# return run types that used for 40% or more of the string
# always return basic latin if found more than 15%
# and extended additional latin if over 10% (for Vietnamese)
relevant_runs = []
for key, value in run_types.items():
pct = (value*100) / totalCount
if pct >=40:
relevant_runs.append(key)
elif key == "Basic Latin" and ( pct >=15 ):
relevant_runs.append(key)
elif key == "Latin Extended Additional" and ( pct >=10 ):
relevant_runs.append(key)
return relevant_runs
def _identify(sample, scripts):
if len(sample) < 3:
return UNKNOWN
if "Hangul Syllables" in scripts or "Hangul Jamo" in scripts \
or "Hangul Compatibility Jamo" in scripts or "Hangul" in scripts:
return "ko"
if "Greek and Coptic" in scripts:
return "el"
if "Katakana" in scripts:
return "ja"
if "CJK Unified Ideographs" in scripts or "Bopomofo" in scripts \
or "Bopomofo Extended" in scripts or "KangXi Radicals" in scripts:
# This is in both Ceglowski and Rideout
# I can't imagine why...
# or "Arabic Presentation Forms-A" in scripts
return "zh"
if "Cyrillic" in scripts:
return check( sample, CYRILLIC )
if "Arabic" in scripts or "Arabic Presentation Forms-A" in scripts or "Arabic Presentation Forms-B" in scripts:
return check( sample, ARABIC )
if "Devanagari" in scripts:
return check( sample, DEVANAGARI )
# Try languages with unique scripts
for blockName, langName in SINGLETONS:
if blockName in scripts:
return langName
if "Latin Extended Additional" in scripts:
return "vi"
if "Extended Latin" in scripts:
latinLang = check( sample, EXTENDED_LATIN )
if latinLang == "pt":
return check(sample, PT)
else:
return latinLang
if "Basic Latin" in scripts:
return check( sample, ALL_LATIN )
return UNKNOWN
def check(sample, langs):
if len(sample) < MIN_LENGTH:
return UNKNOWN
scores = []
model = createOrderedModel(sample) # QMap<int,QString>
for key in langs:
lkey = key.lower()
if lkey in models:
scores.append( (distance(model, models[lkey]), key) )
if not scores:
return UNKNOWN
# we want the lowest score, less distance = greater chance of match
# pprint(sorted(scores))
return min(scores)[1]
def createOrderedModel(content):
''' Create a list of trigrams in content sorted by frequency '''
trigrams = defaultdict(int) # QHash<QString,int>
content = content.lower()
for i in xrange(0, len(content)-2):
trigrams[content[i:i+3]]+=1
return sorted(trigrams.keys(), key=lambda k: (-trigrams[k], k))
spRe = re.compile(r"\s\s", re.UNICODE)
MAXGRAMS = 300
def distance(model, knownModel):
dist = 0
for i, value in enumerate(model[:MAXGRAMS]):
if not spRe.search(value):
if value in knownModel:
dist += abs(i - knownModel[value])
else:
dist += MAXGRAMS
return dist
def _makeNonAlphaRe():
nonAlpha = [u'[^']
for i in range(sys.maxunicode):
c = unichr(i)
if c.isalpha(): nonAlpha.append(c)
nonAlpha.append(u']')
nonAlpha = u"".join(nonAlpha)
return re.compile(nonAlpha)
nonAlphaRe = _makeNonAlphaRe()
spaceRe = re.compile('\s+', re.UNICODE)
def normalize(u):
''' Convert to normalized unicode.
Remove non-alpha chars and compress runs of spaces.
'''
u = unicodedata.normalize('NFC', u)
u = nonAlphaRe.sub(' ', u)
u = spaceRe.sub(' ', u)
return u |
chrrrles/pyvcloud | refs/heads/master | pyvcloud/schema/vcd/v1_5/schemas/vcloud/vmsType.py | 4 | null |
mrry/tensorflow | refs/heads/windows | tensorflow/python/summary/event_file_inspector.py | 62 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Logic for TensorBoard inspector to help humans investigate event files.
Example usages:
tensorboard --inspect --event_file=myevents.out
tensorboard --inspect --event_file=myevents.out --tag=loss
tensorboard --inspect --logdir=mylogdir
tensorboard --inspect --logdir=mylogdir --tag=loss
This script runs over a logdir and creates an InspectionUnit for every
subdirectory with event files. If running over an event file, it creates only
one InspectionUnit. One block of output is printed to console for each
InspectionUnit.
The primary content of an InspectionUnit is the dict field_to_obs that maps
fields (e.g. "scalar", "histogram", "session_log:start", etc.) to a list of
Observations for the field. Observations correspond one-to-one with Events in an
event file but contain less information because they only store what is
necessary to generate the final console output.
The final output is rendered to console by applying some aggregating function
to the lists of Observations. Different functions are applied depending on the
type of field. For instance, for "scalar" fields, the inspector shows aggregate
statistics. For other fields like "session_log:start", all observed steps are
printed in order to aid debugging.
[1] Query a logdir or an event file for its logged tags and summary statistics
using --logdir or --event_file.
[[event_file]] contains these tags:
histograms
binary/Sign/Activations
binary/nn_tanh/act/Activations
binary/nn_tanh/biases
binary/nn_tanh/biases:gradient
binary/nn_tanh/weights
binary/nn_tanh/weights:gradient
images
input_images/image/0
input_images/image/1
input_images/image/2
scalars
Learning Rate
Total Cost
Total Cost (raw)
Debug output aggregated over all tags:
graph
first_step 0
last_step 0
max_step 0
min_step 0
num_steps 1
outoforder_steps []
histograms
first_step 491
last_step 659823
max_step 659823
min_step 491
num_steps 993
outoforder_steps []
images -
scalars
first_step 0
last_step 659823
max_step 659823
min_step 0
num_steps 1985
outoforder_steps []
sessionlog:checkpoint
first_step 7129
last_step 657167
max_step 657167
min_step 7129
num_steps 99
outoforder_steps []
sessionlog:start
outoforder_steps []
steps [0L]
sessionlog:stop -
[2] Drill down into a particular tag using --tag.
Debug output for binary/Sign/Activations:
histograms
first_step 491
last_step 659823
max_step 659823
min_step 491
num_steps 993
outoforder_steps []
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import itertools
import os
from tensorflow.core.util.event_pb2 import SessionLog
from tensorflow.python.platform import app
from tensorflow.python.platform import flags
from tensorflow.python.platform import gfile
from tensorflow.python.summary import event_accumulator
from tensorflow.python.summary import event_multiplexer
from tensorflow.python.summary.impl import event_file_loader
FLAGS = flags.FLAGS
# Map of field names within summary.proto to the user-facing names that this
# script outputs.
SUMMARY_TYPE_TO_FIELD = {'simple_value': 'scalars',
'histo': 'histograms',
'image': 'images',
'audio': 'audio'}
for summary_type in event_accumulator.SUMMARY_TYPES:
if summary_type not in SUMMARY_TYPE_TO_FIELD:
SUMMARY_TYPE_TO_FIELD[summary_type] = summary_type
# Types of summaries that we may want to query for by tag.
TAG_FIELDS = list(SUMMARY_TYPE_TO_FIELD.values())
# Summaries that we want to see every instance of.
LONG_FIELDS = ['sessionlog:start', 'sessionlog:stop']
# Summaries that we only want an abridged digest of, since they would
# take too much screen real estate otherwise.
SHORT_FIELDS = ['graph', 'sessionlog:checkpoint'] + TAG_FIELDS
# All summary types that we can inspect.
TRACKED_FIELDS = SHORT_FIELDS + LONG_FIELDS
# An `Observation` contains the data within each Event file that the inspector
# cares about. The inspector accumulates Observations as it processes events.
Observation = collections.namedtuple('Observation', ['step', 'wall_time',
'tag'])
# An InspectionUnit is created for each organizational structure in the event
# files visible in the final terminal output. For instance, one InspectionUnit
# is created for each subdirectory in logdir. When asked to inspect a single
# event file, there may only be one InspectionUnit.
# The InspectionUnit contains the `name` of the organizational unit that will be
# printed to console, a `generator` that yields `Event` protos, and a mapping
# from string fields to `Observations` that the inspector creates.
InspectionUnit = collections.namedtuple('InspectionUnit', ['name', 'generator',
'field_to_obs'])
PRINT_SEPARATOR = '=' * 70 + '\n'
def get_field_to_observations_map(generator, query_for_tag=''):
"""Return a field to `Observations` dict for the event generator.
Args:
generator: A generator over event protos.
query_for_tag: A string that if specified, only create observations for
events with this tag name.
Returns:
A dict mapping keys in `TRACKED_FIELDS` to an `Observation` list.
"""
def increment(stat, event, tag=''):
assert stat in TRACKED_FIELDS
field_to_obs[stat].append(Observation(step=event.step,
wall_time=event.wall_time,
tag=tag)._asdict())
field_to_obs = dict([(t, []) for t in TRACKED_FIELDS])
for event in generator:
## Process the event
if event.HasField('graph_def') and (not query_for_tag):
increment('graph', event)
if event.HasField('session_log') and (not query_for_tag):
status = event.session_log.status
if status == SessionLog.START:
increment('sessionlog:start', event)
elif status == SessionLog.STOP:
increment('sessionlog:stop', event)
elif status == SessionLog.CHECKPOINT:
increment('sessionlog:checkpoint', event)
elif event.HasField('summary'):
for value in event.summary.value:
if query_for_tag and value.tag != query_for_tag:
continue
for proto_name, display_name in SUMMARY_TYPE_TO_FIELD.items():
if value.HasField(proto_name):
increment(display_name, event, value.tag)
return field_to_obs
def get_unique_tags(field_to_obs):
"""Returns a dictionary of tags that a user could query over.
Args:
field_to_obs: Dict that maps string field to `Observation` list.
Returns:
A dict that maps keys in `TAG_FIELDS` to a list of string tags present in
the event files. If the dict does not have any observations of the type,
maps to an empty list so that we can render this to console.
"""
return {field: sorted(set([x.get('tag', '') for x in observations]))
for field, observations in field_to_obs.items()
if field in TAG_FIELDS}
def print_dict(d, show_missing=True):
"""Prints a shallow dict to console.
Args:
d: Dict to print.
show_missing: Whether to show keys with empty values.
"""
for k, v in sorted(d.items()):
if (not v) and show_missing:
# No instances of the key, so print missing symbol.
print('{} -'.format(k))
elif isinstance(v, list):
# Value is a list, so print each item of the list.
print(k)
for item in v:
print(' {}'.format(item))
elif isinstance(v, dict):
# Value is a dict, so print each (key, value) pair of the dict.
print(k)
for kk, vv in sorted(v.items()):
print(' {:<20} {}'.format(kk, vv))
def get_dict_to_print(field_to_obs):
"""Transform the field-to-obs mapping into a printable dictionary.
Args:
field_to_obs: Dict that maps string field to `Observation` list.
Returns:
A dict with the keys and values to print to console.
"""
def compressed_steps(steps):
return {'num_steps': len(set(steps)),
'min_step': min(steps),
'max_step': max(steps),
'last_step': steps[-1],
'first_step': steps[0],
'outoforder_steps': get_out_of_order(steps)}
def full_steps(steps):
return {'steps': steps, 'outoforder_steps': get_out_of_order(steps)}
output = {}
for field, observations in field_to_obs.items():
if not observations:
output[field] = None
continue
steps = [x['step'] for x in observations]
if field in SHORT_FIELDS:
output[field] = compressed_steps(steps)
if field in LONG_FIELDS:
output[field] = full_steps(steps)
return output
def get_out_of_order(list_of_numbers):
"""Returns elements that break the monotonically non-decreasing trend.
This is used to find instances of global step values that are "out-of-order",
which may trigger TensorBoard event discarding logic.
Args:
list_of_numbers: A list of numbers.
Returns:
A list of tuples in which each tuple are two elements are adjacent, but the
second element is lower than the first.
"""
# TODO(cassandrax): Consider changing this to only check for out-of-order
# steps within a particular tag.
result = []
for i in range(len(list_of_numbers)):
if i == 0:
continue
if list_of_numbers[i] < list_of_numbers[i - 1]:
result.append((list_of_numbers[i - 1], list_of_numbers[i]))
return result
def generators_from_logdir(logdir):
"""Returns a list of event generators for subdirectories with event files.
The number of generators returned should equal the number of directories
within logdir that contain event files. If only logdir contains event files,
returns a list of length one.
Args:
logdir: A log directory that contains event files.
Returns:
List of event generators for each subdirectory with event files.
"""
subdirs = event_multiplexer.GetLogdirSubdirectories(logdir)
generators = [itertools.chain(*[
generator_from_event_file(os.path.join(subdir, f))
for f in gfile.ListDirectory(subdir)
if event_accumulator.IsTensorFlowEventsFile(os.path.join(subdir, f))
]) for subdir in subdirs]
return generators
def generator_from_event_file(event_file):
"""Returns a generator that yields events from an event file."""
return event_file_loader.EventFileLoader(event_file).Load()
def get_inspection_units(logdir='', event_file='', tag=''):
"""Returns a list of InspectionUnit objects given either logdir or event_file.
If logdir is given, the number of InspectionUnits should equal the
number of directories or subdirectories that contain event files.
If event_file is given, the number of InspectionUnits should be 1.
Args:
logdir: A log directory that contains event files.
event_file: Or, a particular event file path.
tag: An optional tag name to query for.
Returns:
A list of InspectionUnit objects.
"""
if logdir:
subdirs = event_multiplexer.GetLogdirSubdirectories(logdir)
inspection_units = []
for subdir in subdirs:
generator = itertools.chain(*[
generator_from_event_file(os.path.join(subdir, f))
for f in gfile.ListDirectory(subdir)
if event_accumulator.IsTensorFlowEventsFile(os.path.join(subdir, f))
])
inspection_units.append(InspectionUnit(
name=subdir,
generator=generator,
field_to_obs=get_field_to_observations_map(generator, tag)))
if inspection_units:
print('Found event files in:\n{}\n'.format('\n'.join(
[u.name for u in inspection_units])))
elif event_accumulator.IsTensorFlowEventsFile(logdir):
print(
'It seems that {} may be an event file instead of a logdir. If this '
'is the case, use --event_file instead of --logdir to pass '
'it in.'.format(logdir))
else:
print('No event files found within logdir {}'.format(logdir))
return inspection_units
elif event_file:
generator = generator_from_event_file(event_file)
return [InspectionUnit(
name=event_file,
generator=generator,
field_to_obs=get_field_to_observations_map(generator, tag))]
def inspect(logdir='', event_file='', tag=''):
"""Main function for inspector that prints out a digest of event files.
Args:
logdir: A log directory that contains event files.
event_file: Or, a particular event file path.
tag: An optional tag name to query for.
Raises:
ValueError: If neither logdir and event_file are given, or both are given.
"""
if logdir and event_file:
raise ValueError(
'Must specify either --logdir or --event_file, but not both.')
if not (logdir or event_file):
raise ValueError('Must specify either --logdir or --event_file.')
print(PRINT_SEPARATOR +
'Processing event files... (this can take a few minutes)\n' +
PRINT_SEPARATOR)
inspection_units = get_inspection_units(logdir, event_file, tag)
for unit in inspection_units:
if tag:
print('Event statistics for tag {} in {}:'.format(tag, unit.name))
else:
# If the user is not inspecting a particular tag, also print the list of
# all available tags that they can query.
print('These tags are in {}:'.format(unit.name))
print_dict(get_unique_tags(unit.field_to_obs))
print(PRINT_SEPARATOR)
print('Event statistics for {}:'.format(unit.name))
print_dict(get_dict_to_print(unit.field_to_obs), show_missing=(not tag))
print(PRINT_SEPARATOR)
if __name__ == '__main__':
app.run()
|
fxfitz/ansible | refs/heads/devel | test/units/utils/test_shlex.py | 197 | # (c) 2015, Marius Gedminas <marius@gedmin.as>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import unittest
from ansible.utils.shlex import shlex_split
class TestSplit(unittest.TestCase):
def test_trivial(self):
self.assertEqual(shlex_split("a b c"), ["a", "b", "c"])
def test_unicode(self):
self.assertEqual(shlex_split(u"a b \u010D"), [u"a", u"b", u"\u010D"])
def test_quoted(self):
self.assertEqual(shlex_split('"a b" c'), ["a b", "c"])
def test_comments(self):
self.assertEqual(shlex_split('"a b" c # d', comments=True), ["a b", "c"])
def test_error(self):
self.assertRaises(ValueError, shlex_split, 'a "b')
|
christian-esken/cassandra | refs/heads/trunk | pylib/setup.py | 78 | #!/usr/bin/python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from distutils.core import setup
def get_extensions():
if "--no-compile" in sys.argv:
return []
from Cython.Build import cythonize
return cythonize("cqlshlib/copyutil.py")
setup(
name="cassandra-pylib",
description="Cassandra Python Libraries",
packages=["cqlshlib"],
ext_modules=get_extensions(),
)
|
Woerd88/script.module.hachoir | refs/heads/master | lib/hachoir_parser/common/win32_lang_id.py | 186 | """
Windows 2000 - List of Locale IDs and Language Groups
Original data table:
http://www.microsoft.com/globaldev/reference/win2k/setup/lcid.mspx
"""
LANGUAGE_ID = {
0x0436: u"Afrikaans",
0x041c: u"Albanian",
0x0401: u"Arabic Saudi Arabia",
0x0801: u"Arabic Iraq",
0x0c01: u"Arabic Egypt",
0x1001: u"Arabic Libya",
0x1401: u"Arabic Algeria",
0x1801: u"Arabic Morocco",
0x1c01: u"Arabic Tunisia",
0x2001: u"Arabic Oman",
0x2401: u"Arabic Yemen",
0x2801: u"Arabic Syria",
0x2c01: u"Arabic Jordan",
0x3001: u"Arabic Lebanon",
0x3401: u"Arabic Kuwait",
0x3801: u"Arabic UAE",
0x3c01: u"Arabic Bahrain",
0x4001: u"Arabic Qatar",
0x042b: u"Armenian",
0x042c: u"Azeri Latin",
0x082c: u"Azeri Cyrillic",
0x042d: u"Basque",
0x0423: u"Belarusian",
0x0402: u"Bulgarian",
0x0403: u"Catalan",
0x0404: u"Chinese Taiwan",
0x0804: u"Chinese PRC",
0x0c04: u"Chinese Hong Kong",
0x1004: u"Chinese Singapore",
0x1404: u"Chinese Macau",
0x041a: u"Croatian",
0x0405: u"Czech",
0x0406: u"Danish",
0x0413: u"Dutch Standard",
0x0813: u"Dutch Belgian",
0x0409: u"English United States",
0x0809: u"English United Kingdom",
0x0c09: u"English Australian",
0x1009: u"English Canadian",
0x1409: u"English New Zealand",
0x1809: u"English Irish",
0x1c09: u"English South Africa",
0x2009: u"English Jamaica",
0x2409: u"English Caribbean",
0x2809: u"English Belize",
0x2c09: u"English Trinidad",
0x3009: u"English Zimbabwe",
0x3409: u"English Philippines",
0x0425: u"Estonian",
0x0438: u"Faeroese",
0x0429: u"Farsi",
0x040b: u"Finnish",
0x040c: u"French Standard",
0x080c: u"French Belgian",
0x0c0c: u"French Canadian",
0x100c: u"French Swiss",
0x140c: u"French Luxembourg",
0x180c: u"French Monaco",
0x0437: u"Georgian",
0x0407: u"German Standard",
0x0807: u"German Swiss",
0x0c07: u"German Austrian",
0x1007: u"German Luxembourg",
0x1407: u"German Liechtenstein",
0x0408: u"Greek",
0x040d: u"Hebrew",
0x0439: u"Hindi",
0x040e: u"Hungarian",
0x040f: u"Icelandic",
0x0421: u"Indonesian",
0x0410: u"Italian Standard",
0x0810: u"Italian Swiss",
0x0411: u"Japanese",
0x043f: u"Kazakh",
0x0457: u"Konkani",
0x0412: u"Korean",
0x0426: u"Latvian",
0x0427: u"Lithuanian",
0x042f: u"Macedonian",
0x043e: u"Malay Malaysia",
0x083e: u"Malay Brunei Darussalam",
0x044e: u"Marathi",
0x0414: u"Norwegian Bokmal",
0x0814: u"Norwegian Nynorsk",
0x0415: u"Polish",
0x0416: u"Portuguese Brazilian",
0x0816: u"Portuguese Standard",
0x0418: u"Romanian",
0x0419: u"Russian",
0x044f: u"Sanskrit",
0x081a: u"Serbian Latin",
0x0c1a: u"Serbian Cyrillic",
0x041b: u"Slovak",
0x0424: u"Slovenian",
0x040a: u"Spanish Traditional Sort",
0x080a: u"Spanish Mexican",
0x0c0a: u"Spanish Modern Sort",
0x100a: u"Spanish Guatemala",
0x140a: u"Spanish Costa Rica",
0x180a: u"Spanish Panama",
0x1c0a: u"Spanish Dominican Republic",
0x200a: u"Spanish Venezuela",
0x240a: u"Spanish Colombia",
0x280a: u"Spanish Peru",
0x2c0a: u"Spanish Argentina",
0x300a: u"Spanish Ecuador",
0x340a: u"Spanish Chile",
0x380a: u"Spanish Uruguay",
0x3c0a: u"Spanish Paraguay",
0x400a: u"Spanish Bolivia",
0x440a: u"Spanish El Salvador",
0x480a: u"Spanish Honduras",
0x4c0a: u"Spanish Nicaragua",
0x500a: u"Spanish Puerto Rico",
0x0441: u"Swahili",
0x041d: u"Swedish",
0x081d: u"Swedish Finland",
0x0449: u"Tamil",
0x0444: u"Tatar",
0x041e: u"Thai",
0x041f: u"Turkish",
0x0422: u"Ukrainian",
0x0420: u"Urdu",
0x0443: u"Uzbek Latin",
0x0843: u"Uzbek Cyrillic",
0x042a: u"Vietnamese",
}
|
KitKatXperience/platform_external_chromium_org | refs/heads/kk | chrome/common/extensions/docs/server2/build_server.py | 57 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This script is used to copy all dependencies into the local directory.
# The package of files can then be uploaded to App Engine.
import os
import shutil
import stat
import sys
SRC_DIR = os.path.join(sys.path[0], os.pardir, os.pardir, os.pardir, os.pardir,
os.pardir)
THIRD_PARTY_DIR = os.path.join(SRC_DIR, 'third_party')
LOCAL_THIRD_PARTY_DIR = os.path.join(sys.path[0], 'third_party')
TOOLS_DIR = os.path.join(SRC_DIR, 'tools')
SCHEMA_COMPILER_FILES = ['memoize.py',
'model.py',
'idl_schema.py',
'schema_util.py',
'json_parse.py']
def MakeInit(path):
path = os.path.join(path, '__init__.py')
with open(os.path.join(path), 'w') as f:
os.utime(os.path.join(path), None)
def OnError(function, path, excinfo):
os.chmod(path, stat.S_IWUSR)
function(path)
def CopyThirdParty(src, dest, files=None, make_init=True):
dest_path = os.path.join(LOCAL_THIRD_PARTY_DIR, dest)
if not files:
shutil.copytree(src, dest_path)
if make_init:
MakeInit(dest_path)
return
try:
os.makedirs(dest_path)
except Exception:
pass
if make_init:
MakeInit(dest_path)
for filename in files:
shutil.copy(os.path.join(src, filename), os.path.join(dest_path, filename))
def main():
if os.path.isdir(LOCAL_THIRD_PARTY_DIR):
try:
shutil.rmtree(LOCAL_THIRD_PARTY_DIR, False, OnError)
except OSError:
print('*-------------------------------------------------------------*\n'
'| If you are receiving an upload error, try removing |\n'
'| chrome/common/extensions/docs/server2/third_party manually. |\n'
'*-------------------------------------------------------------*\n')
CopyThirdParty(os.path.join(THIRD_PARTY_DIR, 'handlebar'), 'handlebar')
CopyThirdParty(os.path.join(SRC_DIR, 'ppapi', 'generators'),
'json_schema_compiler')
CopyThirdParty(os.path.join(THIRD_PARTY_DIR, 'ply'),
os.path.join('json_schema_compiler', 'ply'))
CopyThirdParty(os.path.join(TOOLS_DIR, 'json_schema_compiler'),
'json_schema_compiler',
SCHEMA_COMPILER_FILES)
CopyThirdParty(os.path.join(TOOLS_DIR, 'json_comment_eater'),
'json_schema_compiler',
['json_comment_eater.py'])
CopyThirdParty(os.path.join(THIRD_PARTY_DIR, 'simplejson'),
os.path.join('json_schema_compiler', 'simplejson'),
make_init=False)
MakeInit(LOCAL_THIRD_PARTY_DIR)
# To be able to use the Handlebar class we need this import in __init__.py.
with open(os.path.join(LOCAL_THIRD_PARTY_DIR,
'handlebar',
'__init__.py'), 'a') as f:
f.write('from handlebar import Handlebar\n')
if __name__ == '__main__':
main()
|
darmaa/odoo | refs/heads/master | addons/stock_picking_wave/__openerp__.py | 61 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Warehouse Management: Waves',
'version': '1.0',
'category': 'Stock Management',
'description': """
This module adds the picking wave option in warehouse management.
=================================================================
""",
'author': 'OpenERP SA',
'website': 'http://www.openerp.com',
'depends': ['stock'],
'data': ['security/ir.model.access.csv',
'stock_picking_wave_view.xml',
'stock_picking_wave_sequence.xml',
'wizard/picking_to_wave_view.xml',
],
'demo': [
'stock_picking_wave_demo.xml',
],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
ekkyvalent/ep-blog | refs/heads/master | node_modules/node-gyp/gyp/pylib/gyp/win_tool.py | 1417 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility functions for Windows builds.
These functions are executed via gyp-win-tool when using the ninja generator.
"""
import os
import re
import shutil
import subprocess
import stat
import string
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
# A regex matching an argument corresponding to the output filename passed to
# link.exe.
_LINK_EXE_OUT_ARG = re.compile('/OUT:(?P<out>.+)$', re.IGNORECASE)
def main(args):
executor = WinTool()
exit_code = executor.Dispatch(args)
if exit_code is not None:
sys.exit(exit_code)
class WinTool(object):
"""This class performs all the Windows tooling steps. The methods can either
be executed directly, or dispatched from an argument list."""
def _UseSeparateMspdbsrv(self, env, args):
"""Allows to use a unique instance of mspdbsrv.exe per linker instead of a
shared one."""
if len(args) < 1:
raise Exception("Not enough arguments")
if args[0] != 'link.exe':
return
# Use the output filename passed to the linker to generate an endpoint name
# for mspdbsrv.exe.
endpoint_name = None
for arg in args:
m = _LINK_EXE_OUT_ARG.match(arg)
if m:
endpoint_name = re.sub(r'\W+', '',
'%s_%d' % (m.group('out'), os.getpid()))
break
if endpoint_name is None:
return
# Adds the appropriate environment variable. This will be read by link.exe
# to know which instance of mspdbsrv.exe it should connect to (if it's
# not set then the default endpoint is used).
env['_MSPDBSRV_ENDPOINT_'] = endpoint_name
def Dispatch(self, args):
"""Dispatches a string command to a method."""
if len(args) < 1:
raise Exception("Not enough arguments")
method = "Exec%s" % self._CommandifyName(args[0])
return getattr(self, method)(*args[1:])
def _CommandifyName(self, name_string):
"""Transforms a tool name like recursive-mirror to RecursiveMirror."""
return name_string.title().replace('-', '')
def _GetEnv(self, arch):
"""Gets the saved environment from a file for a given architecture."""
# The environment is saved as an "environment block" (see CreateProcess
# and msvs_emulation for details). We convert to a dict here.
# Drop last 2 NULs, one for list terminator, one for trailing vs. separator.
pairs = open(arch).read()[:-2].split('\0')
kvs = [item.split('=', 1) for item in pairs]
return dict(kvs)
def ExecStamp(self, path):
"""Simple stamp command."""
open(path, 'w').close()
def ExecRecursiveMirror(self, source, dest):
"""Emulation of rm -rf out && cp -af in out."""
if os.path.exists(dest):
if os.path.isdir(dest):
def _on_error(fn, path, excinfo):
# The operation failed, possibly because the file is set to
# read-only. If that's why, make it writable and try the op again.
if not os.access(path, os.W_OK):
os.chmod(path, stat.S_IWRITE)
fn(path)
shutil.rmtree(dest, onerror=_on_error)
else:
if not os.access(dest, os.W_OK):
# Attempt to make the file writable before deleting it.
os.chmod(dest, stat.S_IWRITE)
os.unlink(dest)
if os.path.isdir(source):
shutil.copytree(source, dest)
else:
shutil.copy2(source, dest)
def ExecLinkWrapper(self, arch, use_separate_mspdbsrv, *args):
"""Filter diagnostic output from link that looks like:
' Creating library ui.dll.lib and object ui.dll.exp'
This happens when there are exports from the dll or exe.
"""
env = self._GetEnv(arch)
if use_separate_mspdbsrv == 'True':
self._UseSeparateMspdbsrv(env, args)
link = subprocess.Popen([args[0].replace('/', '\\')] + list(args[1:]),
shell=True,
env=env,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
out, _ = link.communicate()
for line in out.splitlines():
if (not line.startswith(' Creating library ') and
not line.startswith('Generating code') and
not line.startswith('Finished generating code')):
print line
return link.returncode
def ExecLinkWithManifests(self, arch, embed_manifest, out, ldcmd, resname,
mt, rc, intermediate_manifest, *manifests):
"""A wrapper for handling creating a manifest resource and then executing
a link command."""
# The 'normal' way to do manifests is to have link generate a manifest
# based on gathering dependencies from the object files, then merge that
# manifest with other manifests supplied as sources, convert the merged
# manifest to a resource, and then *relink*, including the compiled
# version of the manifest resource. This breaks incremental linking, and
# is generally overly complicated. Instead, we merge all the manifests
# provided (along with one that includes what would normally be in the
# linker-generated one, see msvs_emulation.py), and include that into the
# first and only link. We still tell link to generate a manifest, but we
# only use that to assert that our simpler process did not miss anything.
variables = {
'python': sys.executable,
'arch': arch,
'out': out,
'ldcmd': ldcmd,
'resname': resname,
'mt': mt,
'rc': rc,
'intermediate_manifest': intermediate_manifest,
'manifests': ' '.join(manifests),
}
add_to_ld = ''
if manifests:
subprocess.check_call(
'%(python)s gyp-win-tool manifest-wrapper %(arch)s %(mt)s -nologo '
'-manifest %(manifests)s -out:%(out)s.manifest' % variables)
if embed_manifest == 'True':
subprocess.check_call(
'%(python)s gyp-win-tool manifest-to-rc %(arch)s %(out)s.manifest'
' %(out)s.manifest.rc %(resname)s' % variables)
subprocess.check_call(
'%(python)s gyp-win-tool rc-wrapper %(arch)s %(rc)s '
'%(out)s.manifest.rc' % variables)
add_to_ld = ' %(out)s.manifest.res' % variables
subprocess.check_call(ldcmd + add_to_ld)
# Run mt.exe on the theoretically complete manifest we generated, merging
# it with the one the linker generated to confirm that the linker
# generated one does not add anything. This is strictly unnecessary for
# correctness, it's only to verify that e.g. /MANIFESTDEPENDENCY was not
# used in a #pragma comment.
if manifests:
# Merge the intermediate one with ours to .assert.manifest, then check
# that .assert.manifest is identical to ours.
subprocess.check_call(
'%(python)s gyp-win-tool manifest-wrapper %(arch)s %(mt)s -nologo '
'-manifest %(out)s.manifest %(intermediate_manifest)s '
'-out:%(out)s.assert.manifest' % variables)
assert_manifest = '%(out)s.assert.manifest' % variables
our_manifest = '%(out)s.manifest' % variables
# Load and normalize the manifests. mt.exe sometimes removes whitespace,
# and sometimes doesn't unfortunately.
with open(our_manifest, 'rb') as our_f:
with open(assert_manifest, 'rb') as assert_f:
our_data = our_f.read().translate(None, string.whitespace)
assert_data = assert_f.read().translate(None, string.whitespace)
if our_data != assert_data:
os.unlink(out)
def dump(filename):
sys.stderr.write('%s\n-----\n' % filename)
with open(filename, 'rb') as f:
sys.stderr.write(f.read() + '\n-----\n')
dump(intermediate_manifest)
dump(our_manifest)
dump(assert_manifest)
sys.stderr.write(
'Linker generated manifest "%s" added to final manifest "%s" '
'(result in "%s"). '
'Were /MANIFEST switches used in #pragma statements? ' % (
intermediate_manifest, our_manifest, assert_manifest))
return 1
def ExecManifestWrapper(self, arch, *args):
"""Run manifest tool with environment set. Strip out undesirable warning
(some XML blocks are recognized by the OS loader, but not the manifest
tool)."""
env = self._GetEnv(arch)
popen = subprocess.Popen(args, shell=True, env=env,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = popen.communicate()
for line in out.splitlines():
if line and 'manifest authoring warning 81010002' not in line:
print line
return popen.returncode
def ExecManifestToRc(self, arch, *args):
"""Creates a resource file pointing a SxS assembly manifest.
|args| is tuple containing path to resource file, path to manifest file
and resource name which can be "1" (for executables) or "2" (for DLLs)."""
manifest_path, resource_path, resource_name = args
with open(resource_path, 'wb') as output:
output.write('#include <windows.h>\n%s RT_MANIFEST "%s"' % (
resource_name,
os.path.abspath(manifest_path).replace('\\', '/')))
def ExecMidlWrapper(self, arch, outdir, tlb, h, dlldata, iid, proxy, idl,
*flags):
"""Filter noisy filenames output from MIDL compile step that isn't
quietable via command line flags.
"""
args = ['midl', '/nologo'] + list(flags) + [
'/out', outdir,
'/tlb', tlb,
'/h', h,
'/dlldata', dlldata,
'/iid', iid,
'/proxy', proxy,
idl]
env = self._GetEnv(arch)
popen = subprocess.Popen(args, shell=True, env=env,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = popen.communicate()
# Filter junk out of stdout, and write filtered versions. Output we want
# to filter is pairs of lines that look like this:
# Processing C:\Program Files (x86)\Microsoft SDKs\...\include\objidl.idl
# objidl.idl
lines = out.splitlines()
prefixes = ('Processing ', '64 bit Processing ')
processing = set(os.path.basename(x)
for x in lines if x.startswith(prefixes))
for line in lines:
if not line.startswith(prefixes) and line not in processing:
print line
return popen.returncode
def ExecAsmWrapper(self, arch, *args):
"""Filter logo banner from invocations of asm.exe."""
env = self._GetEnv(arch)
popen = subprocess.Popen(args, shell=True, env=env,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = popen.communicate()
for line in out.splitlines():
if (not line.startswith('Copyright (C) Microsoft Corporation') and
not line.startswith('Microsoft (R) Macro Assembler') and
not line.startswith(' Assembling: ') and
line):
print line
return popen.returncode
def ExecRcWrapper(self, arch, *args):
"""Filter logo banner from invocations of rc.exe. Older versions of RC
don't support the /nologo flag."""
env = self._GetEnv(arch)
popen = subprocess.Popen(args, shell=True, env=env,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = popen.communicate()
for line in out.splitlines():
if (not line.startswith('Microsoft (R) Windows (R) Resource Compiler') and
not line.startswith('Copyright (C) Microsoft Corporation') and
line):
print line
return popen.returncode
def ExecActionWrapper(self, arch, rspfile, *dir):
"""Runs an action command line from a response file using the environment
for |arch|. If |dir| is supplied, use that as the working directory."""
env = self._GetEnv(arch)
# TODO(scottmg): This is a temporary hack to get some specific variables
# through to actions that are set after gyp-time. http://crbug.com/333738.
for k, v in os.environ.iteritems():
if k not in env:
env[k] = v
args = open(rspfile).read()
dir = dir[0] if dir else None
return subprocess.call(args, shell=True, env=env, cwd=dir)
def ExecClCompile(self, project_dir, selected_files):
"""Executed by msvs-ninja projects when the 'ClCompile' target is used to
build selected C/C++ files."""
project_dir = os.path.relpath(project_dir, BASE_DIR)
selected_files = selected_files.split(';')
ninja_targets = [os.path.join(project_dir, filename) + '^^'
for filename in selected_files]
cmd = ['ninja.exe']
cmd.extend(ninja_targets)
return subprocess.call(cmd, shell=True, cwd=BASE_DIR)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
charlesvdv/servo | refs/heads/master | tests/wpt/harness/wptrunner/wptmanifest/backends/static.py | 190 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import operator
from ..node import NodeVisitor
from ..parser import parse
class Compiler(NodeVisitor):
"""Compiler backend that evaluates conditional expressions
to give static output"""
def compile(self, tree, expr_data, data_cls_getter=None, **kwargs):
"""Compile a raw AST into a form with conditional expressions
evaluated.
tree - The root node of the wptmanifest AST to compile
expr_data - A dictionary of key / value pairs to use when
evaluating conditional expressions
data_cls_getter - A function taking two parameters; the previous
output node and the current ast node and returning
the class of the output node to use for the current
ast node
"""
self._kwargs = kwargs
self.expr_data = expr_data
if data_cls_getter is None:
self.data_cls_getter = lambda x, y: ManifestItem
else:
self.data_cls_getter = data_cls_getter
self.output_node = None
self.visit(tree)
return self.output_node
def visit_DataNode(self, node):
output_parent = self.output_node
if self.output_node is None:
assert node.parent is None
self.output_node = self.data_cls_getter(None, None)(None, **self._kwargs)
else:
self.output_node = self.data_cls_getter(self.output_node, node)(node.data)
for child in node.children:
self.visit(child)
if output_parent is not None:
output_parent.append(self.output_node)
self.output_node = self.output_node.parent
def visit_KeyValueNode(self, node):
key_name = node.data
key_value = None
for child in node.children:
value = self.visit(child)
if value is not None:
key_value = value
break
if key_value is not None:
self.output_node.set(key_name, key_value)
def visit_ValueNode(self, node):
return node.data
def visit_AtomNode(self, node):
return node.data
def visit_ListNode(self, node):
return [self.visit(child) for child in node.children]
def visit_ConditionalNode(self, node):
assert len(node.children) == 2
if self.visit(node.children[0]):
return self.visit(node.children[1])
def visit_StringNode(self, node):
value = node.data
for child in node.children:
value = self.visit(child)(value)
return value
def visit_NumberNode(self, node):
if "." in node.data:
return float(node.data)
else:
return int(node.data)
def visit_VariableNode(self, node):
value = self.expr_data[node.data]
for child in node.children:
value = self.visit(child)(value)
return value
def visit_IndexNode(self, node):
assert len(node.children) == 1
index = self.visit(node.children[0])
return lambda x: x[index]
def visit_UnaryExpressionNode(self, node):
assert len(node.children) == 2
operator = self.visit(node.children[0])
operand = self.visit(node.children[1])
return operator(operand)
def visit_BinaryExpressionNode(self, node):
assert len(node.children) == 3
operator = self.visit(node.children[0])
operand_0 = self.visit(node.children[1])
operand_1 = self.visit(node.children[2])
return operator(operand_0, operand_1)
def visit_UnaryOperatorNode(self, node):
return {"not": operator.not_}[node.data]
def visit_BinaryOperatorNode(self, node):
return {"and": operator.and_,
"or": operator.or_,
"==": operator.eq,
"!=": operator.ne}[node.data]
class ManifestItem(object):
def __init__(self, name, **kwargs):
self.parent = None
self.name = name
self.children = []
self._data = {}
def __repr__(self):
return "<ManifestItem %s>" % (self.name)
def __str__(self):
rv = [repr(self)]
for item in self.children:
rv.extend(" %s" % line for line in str(item).split("\n"))
return "\n".join(rv)
@property
def is_empty(self):
if self._data:
return False
return all(child.is_empty for child in self.children)
@property
def root(self):
node = self
while node.parent is not None:
node = node.parent
return node
def has_key(self, key):
for node in [self, self.root]:
if key in node._data:
return True
return False
def get(self, key):
for node in [self, self.root]:
if key in node._data:
return node._data[key]
raise KeyError
def set(self, name, value):
self._data[name] = value
def remove(self):
if self.parent:
self.parent._remove_child(self)
def _remove_child(self, child):
self.children.remove(child)
child.parent = None
def iterchildren(self, name=None):
for item in self.children:
if item.name == name or name is None:
yield item
def _flatten(self):
rv = {}
for node in [self, self.root]:
for name, value in node._data.iteritems():
if name not in rv:
rv[name] = value
return rv
def iteritems(self):
for item in self._flatten().iteritems():
yield item
def iterkeys(self):
for item in self._flatten().iterkeys():
yield item
def itervalues(self):
for item in self._flatten().itervalues():
yield item
def append(self, child):
child.parent = self
self.children.append(child)
return child
def compile_ast(ast, expr_data, data_cls_getter=None, **kwargs):
return Compiler().compile(ast,
expr_data,
data_cls_getter=data_cls_getter,
**kwargs)
def compile(stream, expr_data, data_cls_getter=None, **kwargs):
return compile_ast(parse(stream),
expr_data,
data_cls_getter=data_cls_getter,
**kwargs)
|
davidbrenner/rednotebook | refs/heads/master | rednotebook/external/argparse.py | 490 | # Author: Steven J. Bethard <steven.bethard@gmail.com>.
"""Command-line parsing library
This module is an optparse-inspired command-line parsing library that:
- handles both optional and positional arguments
- produces highly informative usage messages
- supports parsers that dispatch to sub-parsers
The following is a simple usage example that sums integers from the
command-line and writes the result to a file::
parser = argparse.ArgumentParser(
description='sum the integers at the command line')
parser.add_argument(
'integers', metavar='int', nargs='+', type=int,
help='an integer to be summed')
parser.add_argument(
'--log', default=sys.stdout, type=argparse.FileType('w'),
help='the file where the sum should be written')
args = parser.parse_args()
args.log.write('%s' % sum(args.integers))
args.log.close()
The module contains the following public classes:
- ArgumentParser -- The main entry point for command-line parsing. As the
example above shows, the add_argument() method is used to populate
the parser with actions for optional and positional arguments. Then
the parse_args() method is invoked to convert the args at the
command-line into an object with attributes.
- ArgumentError -- The exception raised by ArgumentParser objects when
there are errors with the parser's actions. Errors raised while
parsing the command-line are caught by ArgumentParser and emitted
as command-line messages.
- FileType -- A factory for defining types of files to be created. As the
example above shows, instances of FileType are typically passed as
the type= argument of add_argument() calls.
- Action -- The base class for parser actions. Typically actions are
selected by passing strings like 'store_true' or 'append_const' to
the action= argument of add_argument(). However, for greater
customization of ArgumentParser actions, subclasses of Action may
be defined and passed as the action= argument.
- HelpFormatter, RawDescriptionHelpFormatter, RawTextHelpFormatter,
ArgumentDefaultsHelpFormatter -- Formatter classes which
may be passed as the formatter_class= argument to the
ArgumentParser constructor. HelpFormatter is the default,
RawDescriptionHelpFormatter and RawTextHelpFormatter tell the parser
not to change the formatting for help text, and
ArgumentDefaultsHelpFormatter adds information about argument defaults
to the help.
All other classes in this module are considered implementation details.
(Also note that HelpFormatter and RawDescriptionHelpFormatter are only
considered public as object names -- the API of the formatter objects is
still considered an implementation detail.)
"""
__version__ = '1.2.1'
__all__ = [
'ArgumentParser',
'ArgumentError',
'ArgumentTypeError',
'FileType',
'HelpFormatter',
'ArgumentDefaultsHelpFormatter',
'RawDescriptionHelpFormatter',
'RawTextHelpFormatter',
'Namespace',
'Action',
'ONE_OR_MORE',
'OPTIONAL',
'PARSER',
'REMAINDER',
'SUPPRESS',
'ZERO_OR_MORE',
]
import copy as _copy
import os as _os
import re as _re
import sys as _sys
import textwrap as _textwrap
from gettext import gettext as _
try:
set
except NameError:
# for python < 2.4 compatibility (sets module is there since 2.3):
from sets import Set as set
try:
basestring
except NameError:
basestring = str
try:
sorted
except NameError:
# for python < 2.4 compatibility:
def sorted(iterable, reverse=False):
result = list(iterable)
result.sort()
if reverse:
result.reverse()
return result
def _callable(obj):
return hasattr(obj, '__call__') or hasattr(obj, '__bases__')
SUPPRESS = '==SUPPRESS=='
OPTIONAL = '?'
ZERO_OR_MORE = '*'
ONE_OR_MORE = '+'
PARSER = 'A...'
REMAINDER = '...'
_UNRECOGNIZED_ARGS_ATTR = '_unrecognized_args'
# =============================
# Utility functions and classes
# =============================
class _AttributeHolder(object):
"""Abstract base class that provides __repr__.
The __repr__ method returns a string in the format::
ClassName(attr=name, attr=name, ...)
The attributes are determined either by a class-level attribute,
'_kwarg_names', or by inspecting the instance __dict__.
"""
def __repr__(self):
type_name = type(self).__name__
arg_strings = []
for arg in self._get_args():
arg_strings.append(repr(arg))
for name, value in self._get_kwargs():
arg_strings.append('%s=%r' % (name, value))
return '%s(%s)' % (type_name, ', '.join(arg_strings))
def _get_kwargs(self):
return sorted(self.__dict__.items())
def _get_args(self):
return []
def _ensure_value(namespace, name, value):
if getattr(namespace, name, None) is None:
setattr(namespace, name, value)
return getattr(namespace, name)
# ===============
# Formatting Help
# ===============
class HelpFormatter(object):
"""Formatter for generating usage messages and argument help strings.
Only the name of this class is considered a public API. All the methods
provided by the class are considered an implementation detail.
"""
def __init__(self,
prog,
indent_increment=2,
max_help_position=24,
width=None):
# default setting for width
if width is None:
try:
width = int(_os.environ['COLUMNS'])
except (KeyError, ValueError):
width = 80
width -= 2
self._prog = prog
self._indent_increment = indent_increment
self._max_help_position = max_help_position
self._width = width
self._current_indent = 0
self._level = 0
self._action_max_length = 0
self._root_section = self._Section(self, None)
self._current_section = self._root_section
self._whitespace_matcher = _re.compile(r'\s+')
self._long_break_matcher = _re.compile(r'\n\n\n+')
# ===============================
# Section and indentation methods
# ===============================
def _indent(self):
self._current_indent += self._indent_increment
self._level += 1
def _dedent(self):
self._current_indent -= self._indent_increment
assert self._current_indent >= 0, 'Indent decreased below 0.'
self._level -= 1
class _Section(object):
def __init__(self, formatter, parent, heading=None):
self.formatter = formatter
self.parent = parent
self.heading = heading
self.items = []
def format_help(self):
# format the indented section
if self.parent is not None:
self.formatter._indent()
join = self.formatter._join_parts
for func, args in self.items:
func(*args)
item_help = join([func(*args) for func, args in self.items])
if self.parent is not None:
self.formatter._dedent()
# return nothing if the section was empty
if not item_help:
return ''
# add the heading if the section was non-empty
if self.heading is not SUPPRESS and self.heading is not None:
current_indent = self.formatter._current_indent
heading = '%*s%s:\n' % (current_indent, '', self.heading)
else:
heading = ''
# join the section-initial newline, the heading and the help
return join(['\n', heading, item_help, '\n'])
def _add_item(self, func, args):
self._current_section.items.append((func, args))
# ========================
# Message building methods
# ========================
def start_section(self, heading):
self._indent()
section = self._Section(self, self._current_section, heading)
self._add_item(section.format_help, [])
self._current_section = section
def end_section(self):
self._current_section = self._current_section.parent
self._dedent()
def add_text(self, text):
if text is not SUPPRESS and text is not None:
self._add_item(self._format_text, [text])
def add_usage(self, usage, actions, groups, prefix=None):
if usage is not SUPPRESS:
args = usage, actions, groups, prefix
self._add_item(self._format_usage, args)
def add_argument(self, action):
if action.help is not SUPPRESS:
# find all invocations
get_invocation = self._format_action_invocation
invocations = [get_invocation(action)]
for subaction in self._iter_indented_subactions(action):
invocations.append(get_invocation(subaction))
# update the maximum item length
invocation_length = max([len(s) for s in invocations])
action_length = invocation_length + self._current_indent
self._action_max_length = max(self._action_max_length,
action_length)
# add the item to the list
self._add_item(self._format_action, [action])
def add_arguments(self, actions):
for action in actions:
self.add_argument(action)
# =======================
# Help-formatting methods
# =======================
def format_help(self):
help = self._root_section.format_help()
if help:
help = self._long_break_matcher.sub('\n\n', help)
help = help.strip('\n') + '\n'
return help
def _join_parts(self, part_strings):
return ''.join([part
for part in part_strings
if part and part is not SUPPRESS])
def _format_usage(self, usage, actions, groups, prefix):
if prefix is None:
prefix = _('usage: ')
# if usage is specified, use that
if usage is not None:
usage = usage % dict(prog=self._prog)
# if no optionals or positionals are available, usage is just prog
elif usage is None and not actions:
usage = '%(prog)s' % dict(prog=self._prog)
# if optionals and positionals are available, calculate usage
elif usage is None:
prog = '%(prog)s' % dict(prog=self._prog)
# split optionals from positionals
optionals = []
positionals = []
for action in actions:
if action.option_strings:
optionals.append(action)
else:
positionals.append(action)
# build full usage string
format = self._format_actions_usage
action_usage = format(optionals + positionals, groups)
usage = ' '.join([s for s in [prog, action_usage] if s])
# wrap the usage parts if it's too long
text_width = self._width - self._current_indent
if len(prefix) + len(usage) > text_width:
# break usage into wrappable parts
part_regexp = r'\(.*?\)+|\[.*?\]+|\S+'
opt_usage = format(optionals, groups)
pos_usage = format(positionals, groups)
opt_parts = _re.findall(part_regexp, opt_usage)
pos_parts = _re.findall(part_regexp, pos_usage)
assert ' '.join(opt_parts) == opt_usage
assert ' '.join(pos_parts) == pos_usage
# helper for wrapping lines
def get_lines(parts, indent, prefix=None):
lines = []
line = []
if prefix is not None:
line_len = len(prefix) - 1
else:
line_len = len(indent) - 1
for part in parts:
if line_len + 1 + len(part) > text_width:
lines.append(indent + ' '.join(line))
line = []
line_len = len(indent) - 1
line.append(part)
line_len += len(part) + 1
if line:
lines.append(indent + ' '.join(line))
if prefix is not None:
lines[0] = lines[0][len(indent):]
return lines
# if prog is short, follow it with optionals or positionals
if len(prefix) + len(prog) <= 0.75 * text_width:
indent = ' ' * (len(prefix) + len(prog) + 1)
if opt_parts:
lines = get_lines([prog] + opt_parts, indent, prefix)
lines.extend(get_lines(pos_parts, indent))
elif pos_parts:
lines = get_lines([prog] + pos_parts, indent, prefix)
else:
lines = [prog]
# if prog is long, put it on its own line
else:
indent = ' ' * len(prefix)
parts = opt_parts + pos_parts
lines = get_lines(parts, indent)
if len(lines) > 1:
lines = []
lines.extend(get_lines(opt_parts, indent))
lines.extend(get_lines(pos_parts, indent))
lines = [prog] + lines
# join lines into usage
usage = '\n'.join(lines)
# prefix with 'usage:'
return '%s%s\n\n' % (prefix, usage)
def _format_actions_usage(self, actions, groups):
# find group indices and identify actions in groups
group_actions = set()
inserts = {}
for group in groups:
try:
start = actions.index(group._group_actions[0])
except ValueError:
continue
else:
end = start + len(group._group_actions)
if actions[start:end] == group._group_actions:
for action in group._group_actions:
group_actions.add(action)
if not group.required:
if start in inserts:
inserts[start] += ' ['
else:
inserts[start] = '['
inserts[end] = ']'
else:
if start in inserts:
inserts[start] += ' ('
else:
inserts[start] = '('
inserts[end] = ')'
for i in range(start + 1, end):
inserts[i] = '|'
# collect all actions format strings
parts = []
for i, action in enumerate(actions):
# suppressed arguments are marked with None
# remove | separators for suppressed arguments
if action.help is SUPPRESS:
parts.append(None)
if inserts.get(i) == '|':
inserts.pop(i)
elif inserts.get(i + 1) == '|':
inserts.pop(i + 1)
# produce all arg strings
elif not action.option_strings:
part = self._format_args(action, action.dest)
# if it's in a group, strip the outer []
if action in group_actions:
if part[0] == '[' and part[-1] == ']':
part = part[1:-1]
# add the action string to the list
parts.append(part)
# produce the first way to invoke the option in brackets
else:
option_string = action.option_strings[0]
# if the Optional doesn't take a value, format is:
# -s or --long
if action.nargs == 0:
part = '%s' % option_string
# if the Optional takes a value, format is:
# -s ARGS or --long ARGS
else:
default = action.dest.upper()
args_string = self._format_args(action, default)
part = '%s %s' % (option_string, args_string)
# make it look optional if it's not required or in a group
if not action.required and action not in group_actions:
part = '[%s]' % part
# add the action string to the list
parts.append(part)
# insert things at the necessary indices
for i in sorted(inserts, reverse=True):
parts[i:i] = [inserts[i]]
# join all the action items with spaces
text = ' '.join([item for item in parts if item is not None])
# clean up separators for mutually exclusive groups
open = r'[\[(]'
close = r'[\])]'
text = _re.sub(r'(%s) ' % open, r'\1', text)
text = _re.sub(r' (%s)' % close, r'\1', text)
text = _re.sub(r'%s *%s' % (open, close), r'', text)
text = _re.sub(r'\(([^|]*)\)', r'\1', text)
text = text.strip()
# return the text
return text
def _format_text(self, text):
if '%(prog)' in text:
text = text % dict(prog=self._prog)
text_width = self._width - self._current_indent
indent = ' ' * self._current_indent
return self._fill_text(text, text_width, indent) + '\n\n'
def _format_action(self, action):
# determine the required width and the entry label
help_position = min(self._action_max_length + 2,
self._max_help_position)
help_width = self._width - help_position
action_width = help_position - self._current_indent - 2
action_header = self._format_action_invocation(action)
# ho nelp; start on same line and add a final newline
if not action.help:
tup = self._current_indent, '', action_header
action_header = '%*s%s\n' % tup
# short action name; start on the same line and pad two spaces
elif len(action_header) <= action_width:
tup = self._current_indent, '', action_width, action_header
action_header = '%*s%-*s ' % tup
indent_first = 0
# long action name; start on the next line
else:
tup = self._current_indent, '', action_header
action_header = '%*s%s\n' % tup
indent_first = help_position
# collect the pieces of the action help
parts = [action_header]
# if there was help for the action, add lines of help text
if action.help:
help_text = self._expand_help(action)
help_lines = self._split_lines(help_text, help_width)
parts.append('%*s%s\n' % (indent_first, '', help_lines[0]))
for line in help_lines[1:]:
parts.append('%*s%s\n' % (help_position, '', line))
# or add a newline if the description doesn't end with one
elif not action_header.endswith('\n'):
parts.append('\n')
# if there are any sub-actions, add their help as well
for subaction in self._iter_indented_subactions(action):
parts.append(self._format_action(subaction))
# return a single string
return self._join_parts(parts)
def _format_action_invocation(self, action):
if not action.option_strings:
metavar, = self._metavar_formatter(action, action.dest)(1)
return metavar
else:
parts = []
# if the Optional doesn't take a value, format is:
# -s, --long
if action.nargs == 0:
parts.extend(action.option_strings)
# if the Optional takes a value, format is:
# -s ARGS, --long ARGS
else:
default = action.dest.upper()
args_string = self._format_args(action, default)
for option_string in action.option_strings:
parts.append('%s %s' % (option_string, args_string))
return ', '.join(parts)
def _metavar_formatter(self, action, default_metavar):
if action.metavar is not None:
result = action.metavar
elif action.choices is not None:
choice_strs = [str(choice) for choice in action.choices]
result = '{%s}' % ','.join(choice_strs)
else:
result = default_metavar
def format(tuple_size):
if isinstance(result, tuple):
return result
else:
return (result, ) * tuple_size
return format
def _format_args(self, action, default_metavar):
get_metavar = self._metavar_formatter(action, default_metavar)
if action.nargs is None:
result = '%s' % get_metavar(1)
elif action.nargs == OPTIONAL:
result = '[%s]' % get_metavar(1)
elif action.nargs == ZERO_OR_MORE:
result = '[%s [%s ...]]' % get_metavar(2)
elif action.nargs == ONE_OR_MORE:
result = '%s [%s ...]' % get_metavar(2)
elif action.nargs == REMAINDER:
result = '...'
elif action.nargs == PARSER:
result = '%s ...' % get_metavar(1)
else:
formats = ['%s' for _ in range(action.nargs)]
result = ' '.join(formats) % get_metavar(action.nargs)
return result
def _expand_help(self, action):
params = dict(vars(action), prog=self._prog)
for name in list(params):
if params[name] is SUPPRESS:
del params[name]
for name in list(params):
if hasattr(params[name], '__name__'):
params[name] = params[name].__name__
if params.get('choices') is not None:
choices_str = ', '.join([str(c) for c in params['choices']])
params['choices'] = choices_str
return self._get_help_string(action) % params
def _iter_indented_subactions(self, action):
try:
get_subactions = action._get_subactions
except AttributeError:
pass
else:
self._indent()
for subaction in get_subactions():
yield subaction
self._dedent()
def _split_lines(self, text, width):
text = self._whitespace_matcher.sub(' ', text).strip()
return _textwrap.wrap(text, width)
def _fill_text(self, text, width, indent):
text = self._whitespace_matcher.sub(' ', text).strip()
return _textwrap.fill(text, width, initial_indent=indent,
subsequent_indent=indent)
def _get_help_string(self, action):
return action.help
class RawDescriptionHelpFormatter(HelpFormatter):
"""Help message formatter which retains any formatting in descriptions.
Only the name of this class is considered a public API. All the methods
provided by the class are considered an implementation detail.
"""
def _fill_text(self, text, width, indent):
return ''.join([indent + line for line in text.splitlines(True)])
class RawTextHelpFormatter(RawDescriptionHelpFormatter):
"""Help message formatter which retains formatting of all help text.
Only the name of this class is considered a public API. All the methods
provided by the class are considered an implementation detail.
"""
def _split_lines(self, text, width):
return text.splitlines()
class ArgumentDefaultsHelpFormatter(HelpFormatter):
"""Help message formatter which adds default values to argument help.
Only the name of this class is considered a public API. All the methods
provided by the class are considered an implementation detail.
"""
def _get_help_string(self, action):
help = action.help
if '%(default)' not in action.help:
if action.default is not SUPPRESS:
defaulting_nargs = [OPTIONAL, ZERO_OR_MORE]
if action.option_strings or action.nargs in defaulting_nargs:
help += ' (default: %(default)s)'
return help
# =====================
# Options and Arguments
# =====================
def _get_action_name(argument):
if argument is None:
return None
elif argument.option_strings:
return '/'.join(argument.option_strings)
elif argument.metavar not in (None, SUPPRESS):
return argument.metavar
elif argument.dest not in (None, SUPPRESS):
return argument.dest
else:
return None
class ArgumentError(Exception):
"""An error from creating or using an argument (optional or positional).
The string value of this exception is the message, augmented with
information about the argument that caused it.
"""
def __init__(self, argument, message):
self.argument_name = _get_action_name(argument)
self.message = message
def __str__(self):
if self.argument_name is None:
format = '%(message)s'
else:
format = 'argument %(argument_name)s: %(message)s'
return format % dict(message=self.message,
argument_name=self.argument_name)
class ArgumentTypeError(Exception):
"""An error from trying to convert a command line string to a type."""
pass
# ==============
# Action classes
# ==============
class Action(_AttributeHolder):
"""Information about how to convert command line strings to Python objects.
Action objects are used by an ArgumentParser to represent the information
needed to parse a single argument from one or more strings from the
command line. The keyword arguments to the Action constructor are also
all attributes of Action instances.
Keyword Arguments:
- option_strings -- A list of command-line option strings which
should be associated with this action.
- dest -- The name of the attribute to hold the created object(s)
- nargs -- The number of command-line arguments that should be
consumed. By default, one argument will be consumed and a single
value will be produced. Other values include:
- N (an integer) consumes N arguments (and produces a list)
- '?' consumes zero or one arguments
- '*' consumes zero or more arguments (and produces a list)
- '+' consumes one or more arguments (and produces a list)
Note that the difference between the default and nargs=1 is that
with the default, a single value will be produced, while with
nargs=1, a list containing a single value will be produced.
- const -- The value to be produced if the option is specified and the
option uses an action that takes no values.
- default -- The value to be produced if the option is not specified.
- type -- The type which the command-line arguments should be converted
to, should be one of 'string', 'int', 'float', 'complex' or a
callable object that accepts a single string argument. If None,
'string' is assumed.
- choices -- A container of values that should be allowed. If not None,
after a command-line argument has been converted to the appropriate
type, an exception will be raised if it is not a member of this
collection.
- required -- True if the action must always be specified at the
command line. This is only meaningful for optional command-line
arguments.
- help -- The help string describing the argument.
- metavar -- The name to be used for the option's argument with the
help string. If None, the 'dest' value will be used as the name.
"""
def __init__(self,
option_strings,
dest,
nargs=None,
const=None,
default=None,
type=None,
choices=None,
required=False,
help=None,
metavar=None):
self.option_strings = option_strings
self.dest = dest
self.nargs = nargs
self.const = const
self.default = default
self.type = type
self.choices = choices
self.required = required
self.help = help
self.metavar = metavar
def _get_kwargs(self):
names = [
'option_strings',
'dest',
'nargs',
'const',
'default',
'type',
'choices',
'help',
'metavar',
]
return [(name, getattr(self, name)) for name in names]
def __call__(self, parser, namespace, values, option_string=None):
raise NotImplementedError(_('.__call__() not defined'))
class _StoreAction(Action):
def __init__(self,
option_strings,
dest,
nargs=None,
const=None,
default=None,
type=None,
choices=None,
required=False,
help=None,
metavar=None):
if nargs == 0:
raise ValueError('nargs for store actions must be > 0; if you '
'have nothing to store, actions such as store '
'true or store const may be more appropriate')
if const is not None and nargs != OPTIONAL:
raise ValueError('nargs must be %r to supply const' % OPTIONAL)
super(_StoreAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=nargs,
const=const,
default=default,
type=type,
choices=choices,
required=required,
help=help,
metavar=metavar)
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, values)
class _StoreConstAction(Action):
def __init__(self,
option_strings,
dest,
const,
default=None,
required=False,
help=None,
metavar=None):
super(_StoreConstAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=0,
const=const,
default=default,
required=required,
help=help)
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, self.const)
class _StoreTrueAction(_StoreConstAction):
def __init__(self,
option_strings,
dest,
default=False,
required=False,
help=None):
super(_StoreTrueAction, self).__init__(
option_strings=option_strings,
dest=dest,
const=True,
default=default,
required=required,
help=help)
class _StoreFalseAction(_StoreConstAction):
def __init__(self,
option_strings,
dest,
default=True,
required=False,
help=None):
super(_StoreFalseAction, self).__init__(
option_strings=option_strings,
dest=dest,
const=False,
default=default,
required=required,
help=help)
class _AppendAction(Action):
def __init__(self,
option_strings,
dest,
nargs=None,
const=None,
default=None,
type=None,
choices=None,
required=False,
help=None,
metavar=None):
if nargs == 0:
raise ValueError('nargs for append actions must be > 0; if arg '
'strings are not supplying the value to append, '
'the append const action may be more appropriate')
if const is not None and nargs != OPTIONAL:
raise ValueError('nargs must be %r to supply const' % OPTIONAL)
super(_AppendAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=nargs,
const=const,
default=default,
type=type,
choices=choices,
required=required,
help=help,
metavar=metavar)
def __call__(self, parser, namespace, values, option_string=None):
items = _copy.copy(_ensure_value(namespace, self.dest, []))
items.append(values)
setattr(namespace, self.dest, items)
class _AppendConstAction(Action):
def __init__(self,
option_strings,
dest,
const,
default=None,
required=False,
help=None,
metavar=None):
super(_AppendConstAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=0,
const=const,
default=default,
required=required,
help=help,
metavar=metavar)
def __call__(self, parser, namespace, values, option_string=None):
items = _copy.copy(_ensure_value(namespace, self.dest, []))
items.append(self.const)
setattr(namespace, self.dest, items)
class _CountAction(Action):
def __init__(self,
option_strings,
dest,
default=None,
required=False,
help=None):
super(_CountAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=0,
default=default,
required=required,
help=help)
def __call__(self, parser, namespace, values, option_string=None):
new_count = _ensure_value(namespace, self.dest, 0) + 1
setattr(namespace, self.dest, new_count)
class _HelpAction(Action):
def __init__(self,
option_strings,
dest=SUPPRESS,
default=SUPPRESS,
help=None):
super(_HelpAction, self).__init__(
option_strings=option_strings,
dest=dest,
default=default,
nargs=0,
help=help)
def __call__(self, parser, namespace, values, option_string=None):
parser.print_help()
parser.exit()
class _VersionAction(Action):
def __init__(self,
option_strings,
version=None,
dest=SUPPRESS,
default=SUPPRESS,
help="show program's version number and exit"):
super(_VersionAction, self).__init__(
option_strings=option_strings,
dest=dest,
default=default,
nargs=0,
help=help)
self.version = version
def __call__(self, parser, namespace, values, option_string=None):
version = self.version
if version is None:
version = parser.version
formatter = parser._get_formatter()
formatter.add_text(version)
parser.exit(message=formatter.format_help())
class _SubParsersAction(Action):
class _ChoicesPseudoAction(Action):
def __init__(self, name, help):
sup = super(_SubParsersAction._ChoicesPseudoAction, self)
sup.__init__(option_strings=[], dest=name, help=help)
def __init__(self,
option_strings,
prog,
parser_class,
dest=SUPPRESS,
help=None,
metavar=None):
self._prog_prefix = prog
self._parser_class = parser_class
self._name_parser_map = {}
self._choices_actions = []
super(_SubParsersAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=PARSER,
choices=self._name_parser_map,
help=help,
metavar=metavar)
def add_parser(self, name, **kwargs):
# set prog from the existing prefix
if kwargs.get('prog') is None:
kwargs['prog'] = '%s %s' % (self._prog_prefix, name)
# create a pseudo-action to hold the choice help
if 'help' in kwargs:
help = kwargs.pop('help')
choice_action = self._ChoicesPseudoAction(name, help)
self._choices_actions.append(choice_action)
# create the parser and add it to the map
parser = self._parser_class(**kwargs)
self._name_parser_map[name] = parser
return parser
def _get_subactions(self):
return self._choices_actions
def __call__(self, parser, namespace, values, option_string=None):
parser_name = values[0]
arg_strings = values[1:]
# set the parser name if requested
if self.dest is not SUPPRESS:
setattr(namespace, self.dest, parser_name)
# select the parser
try:
parser = self._name_parser_map[parser_name]
except KeyError:
tup = parser_name, ', '.join(self._name_parser_map)
msg = _('unknown parser %r (choices: %s)' % tup)
raise ArgumentError(self, msg)
# parse all the remaining options into the namespace
# store any unrecognized options on the object, so that the top
# level parser can decide what to do with them
namespace, arg_strings = parser.parse_known_args(arg_strings, namespace)
if arg_strings:
vars(namespace).setdefault(_UNRECOGNIZED_ARGS_ATTR, [])
getattr(namespace, _UNRECOGNIZED_ARGS_ATTR).extend(arg_strings)
# ==============
# Type classes
# ==============
class FileType(object):
"""Factory for creating file object types
Instances of FileType are typically passed as type= arguments to the
ArgumentParser add_argument() method.
Keyword Arguments:
- mode -- A string indicating how the file is to be opened. Accepts the
same values as the builtin open() function.
- bufsize -- The file's desired buffer size. Accepts the same values as
the builtin open() function.
"""
def __init__(self, mode='r', bufsize=None):
self._mode = mode
self._bufsize = bufsize
def __call__(self, string):
# the special argument "-" means sys.std{in,out}
if string == '-':
if 'r' in self._mode:
return _sys.stdin
elif 'w' in self._mode:
return _sys.stdout
else:
msg = _('argument "-" with mode %r' % self._mode)
raise ValueError(msg)
# all other arguments are used as file names
if self._bufsize:
return open(string, self._mode, self._bufsize)
else:
return open(string, self._mode)
def __repr__(self):
args = [self._mode, self._bufsize]
args_str = ', '.join([repr(arg) for arg in args if arg is not None])
return '%s(%s)' % (type(self).__name__, args_str)
# ===========================
# Optional and Positional Parsing
# ===========================
class Namespace(_AttributeHolder):
"""Simple object for storing attributes.
Implements equality by attribute names and values, and provides a simple
string representation.
"""
def __init__(self, **kwargs):
for name in kwargs:
setattr(self, name, kwargs[name])
__hash__ = None
def __eq__(self, other):
return vars(self) == vars(other)
def __ne__(self, other):
return not (self == other)
def __contains__(self, key):
return key in self.__dict__
class _ActionsContainer(object):
def __init__(self,
description,
prefix_chars,
argument_default,
conflict_handler):
super(_ActionsContainer, self).__init__()
self.description = description
self.argument_default = argument_default
self.prefix_chars = prefix_chars
self.conflict_handler = conflict_handler
# set up registries
self._registries = {}
# register actions
self.register('action', None, _StoreAction)
self.register('action', 'store', _StoreAction)
self.register('action', 'store_const', _StoreConstAction)
self.register('action', 'store_true', _StoreTrueAction)
self.register('action', 'store_false', _StoreFalseAction)
self.register('action', 'append', _AppendAction)
self.register('action', 'append_const', _AppendConstAction)
self.register('action', 'count', _CountAction)
self.register('action', 'help', _HelpAction)
self.register('action', 'version', _VersionAction)
self.register('action', 'parsers', _SubParsersAction)
# raise an exception if the conflict handler is invalid
self._get_handler()
# action storage
self._actions = []
self._option_string_actions = {}
# groups
self._action_groups = []
self._mutually_exclusive_groups = []
# defaults storage
self._defaults = {}
# determines whether an "option" looks like a negative number
self._negative_number_matcher = _re.compile(r'^-\d+$|^-\d*\.\d+$')
# whether or not there are any optionals that look like negative
# numbers -- uses a list so it can be shared and edited
self._has_negative_number_optionals = []
# ====================
# Registration methods
# ====================
def register(self, registry_name, value, object):
registry = self._registries.setdefault(registry_name, {})
registry[value] = object
def _registry_get(self, registry_name, value, default=None):
return self._registries[registry_name].get(value, default)
# ==================================
# Namespace default accessor methods
# ==================================
def set_defaults(self, **kwargs):
self._defaults.update(kwargs)
# if these defaults match any existing arguments, replace
# the previous default on the object with the new one
for action in self._actions:
if action.dest in kwargs:
action.default = kwargs[action.dest]
def get_default(self, dest):
for action in self._actions:
if action.dest == dest and action.default is not None:
return action.default
return self._defaults.get(dest, None)
# =======================
# Adding argument actions
# =======================
def add_argument(self, *args, **kwargs):
"""
add_argument(dest, ..., name=value, ...)
add_argument(option_string, option_string, ..., name=value, ...)
"""
# if no positional args are supplied or only one is supplied and
# it doesn't look like an option string, parse a positional
# argument
chars = self.prefix_chars
if not args or len(args) == 1 and args[0][0] not in chars:
if args and 'dest' in kwargs:
raise ValueError('dest supplied twice for positional argument')
kwargs = self._get_positional_kwargs(*args, **kwargs)
# otherwise, we're adding an optional argument
else:
kwargs = self._get_optional_kwargs(*args, **kwargs)
# if no default was supplied, use the parser-level default
if 'default' not in kwargs:
dest = kwargs['dest']
if dest in self._defaults:
kwargs['default'] = self._defaults[dest]
elif self.argument_default is not None:
kwargs['default'] = self.argument_default
# create the action object, and add it to the parser
action_class = self._pop_action_class(kwargs)
if not _callable(action_class):
raise ValueError('unknown action "%s"' % action_class)
action = action_class(**kwargs)
# raise an error if the action type is not callable
type_func = self._registry_get('type', action.type, action.type)
if not _callable(type_func):
raise ValueError('%r is not callable' % type_func)
return self._add_action(action)
def add_argument_group(self, *args, **kwargs):
group = _ArgumentGroup(self, *args, **kwargs)
self._action_groups.append(group)
return group
def add_mutually_exclusive_group(self, **kwargs):
group = _MutuallyExclusiveGroup(self, **kwargs)
self._mutually_exclusive_groups.append(group)
return group
def _add_action(self, action):
# resolve any conflicts
self._check_conflict(action)
# add to actions list
self._actions.append(action)
action.container = self
# index the action by any option strings it has
for option_string in action.option_strings:
self._option_string_actions[option_string] = action
# set the flag if any option strings look like negative numbers
for option_string in action.option_strings:
if self._negative_number_matcher.match(option_string):
if not self._has_negative_number_optionals:
self._has_negative_number_optionals.append(True)
# return the created action
return action
def _remove_action(self, action):
self._actions.remove(action)
def _add_container_actions(self, container):
# collect groups by titles
title_group_map = {}
for group in self._action_groups:
if group.title in title_group_map:
msg = _('cannot merge actions - two groups are named %r')
raise ValueError(msg % (group.title))
title_group_map[group.title] = group
# map each action to its group
group_map = {}
for group in container._action_groups:
# if a group with the title exists, use that, otherwise
# create a new group matching the container's group
if group.title not in title_group_map:
title_group_map[group.title] = self.add_argument_group(
title=group.title,
description=group.description,
conflict_handler=group.conflict_handler)
# map the actions to their new group
for action in group._group_actions:
group_map[action] = title_group_map[group.title]
# add container's mutually exclusive groups
# NOTE: if add_mutually_exclusive_group ever gains title= and
# description= then this code will need to be expanded as above
for group in container._mutually_exclusive_groups:
mutex_group = self.add_mutually_exclusive_group(
required=group.required)
# map the actions to their new mutex group
for action in group._group_actions:
group_map[action] = mutex_group
# add all actions to this container or their group
for action in container._actions:
group_map.get(action, self)._add_action(action)
def _get_positional_kwargs(self, dest, **kwargs):
# make sure required is not specified
if 'required' in kwargs:
msg = _("'required' is an invalid argument for positionals")
raise TypeError(msg)
# mark positional arguments as required if at least one is
# always required
if kwargs.get('nargs') not in [OPTIONAL, ZERO_OR_MORE]:
kwargs['required'] = True
if kwargs.get('nargs') == ZERO_OR_MORE and 'default' not in kwargs:
kwargs['required'] = True
# return the keyword arguments with no option strings
return dict(kwargs, dest=dest, option_strings=[])
def _get_optional_kwargs(self, *args, **kwargs):
# determine short and long option strings
option_strings = []
long_option_strings = []
for option_string in args:
# error on strings that don't start with an appropriate prefix
if not option_string[0] in self.prefix_chars:
msg = _('invalid option string %r: '
'must start with a character %r')
tup = option_string, self.prefix_chars
raise ValueError(msg % tup)
# strings starting with two prefix characters are long options
option_strings.append(option_string)
if option_string[0] in self.prefix_chars:
if len(option_string) > 1:
if option_string[1] in self.prefix_chars:
long_option_strings.append(option_string)
# infer destination, '--foo-bar' -> 'foo_bar' and '-x' -> 'x'
dest = kwargs.pop('dest', None)
if dest is None:
if long_option_strings:
dest_option_string = long_option_strings[0]
else:
dest_option_string = option_strings[0]
dest = dest_option_string.lstrip(self.prefix_chars)
if not dest:
msg = _('dest= is required for options like %r')
raise ValueError(msg % option_string)
dest = dest.replace('-', '_')
# return the updated keyword arguments
return dict(kwargs, dest=dest, option_strings=option_strings)
def _pop_action_class(self, kwargs, default=None):
action = kwargs.pop('action', default)
return self._registry_get('action', action, action)
def _get_handler(self):
# determine function from conflict handler string
handler_func_name = '_handle_conflict_%s' % self.conflict_handler
try:
return getattr(self, handler_func_name)
except AttributeError:
msg = _('invalid conflict_resolution value: %r')
raise ValueError(msg % self.conflict_handler)
def _check_conflict(self, action):
# find all options that conflict with this option
confl_optionals = []
for option_string in action.option_strings:
if option_string in self._option_string_actions:
confl_optional = self._option_string_actions[option_string]
confl_optionals.append((option_string, confl_optional))
# resolve any conflicts
if confl_optionals:
conflict_handler = self._get_handler()
conflict_handler(action, confl_optionals)
def _handle_conflict_error(self, action, conflicting_actions):
message = _('conflicting option string(s): %s')
conflict_string = ', '.join([option_string
for option_string, action
in conflicting_actions])
raise ArgumentError(action, message % conflict_string)
def _handle_conflict_resolve(self, action, conflicting_actions):
# remove all conflicting options
for option_string, action in conflicting_actions:
# remove the conflicting option
action.option_strings.remove(option_string)
self._option_string_actions.pop(option_string, None)
# if the option now has no option string, remove it from the
# container holding it
if not action.option_strings:
action.container._remove_action(action)
class _ArgumentGroup(_ActionsContainer):
def __init__(self, container, title=None, description=None, **kwargs):
# add any missing keyword arguments by checking the container
update = kwargs.setdefault
update('conflict_handler', container.conflict_handler)
update('prefix_chars', container.prefix_chars)
update('argument_default', container.argument_default)
super_init = super(_ArgumentGroup, self).__init__
super_init(description=description, **kwargs)
# group attributes
self.title = title
self._group_actions = []
# share most attributes with the container
self._registries = container._registries
self._actions = container._actions
self._option_string_actions = container._option_string_actions
self._defaults = container._defaults
self._has_negative_number_optionals = \
container._has_negative_number_optionals
def _add_action(self, action):
action = super(_ArgumentGroup, self)._add_action(action)
self._group_actions.append(action)
return action
def _remove_action(self, action):
super(_ArgumentGroup, self)._remove_action(action)
self._group_actions.remove(action)
class _MutuallyExclusiveGroup(_ArgumentGroup):
def __init__(self, container, required=False):
super(_MutuallyExclusiveGroup, self).__init__(container)
self.required = required
self._container = container
def _add_action(self, action):
if action.required:
msg = _('mutually exclusive arguments must be optional')
raise ValueError(msg)
action = self._container._add_action(action)
self._group_actions.append(action)
return action
def _remove_action(self, action):
self._container._remove_action(action)
self._group_actions.remove(action)
class ArgumentParser(_AttributeHolder, _ActionsContainer):
"""Object for parsing command line strings into Python objects.
Keyword Arguments:
- prog -- The name of the program (default: sys.argv[0])
- usage -- A usage message (default: auto-generated from arguments)
- description -- A description of what the program does
- epilog -- Text following the argument descriptions
- parents -- Parsers whose arguments should be copied into this one
- formatter_class -- HelpFormatter class for printing help messages
- prefix_chars -- Characters that prefix optional arguments
- fromfile_prefix_chars -- Characters that prefix files containing
additional arguments
- argument_default -- The default value for all arguments
- conflict_handler -- String indicating how to handle conflicts
- add_help -- Add a -h/-help option
"""
def __init__(self,
prog=None,
usage=None,
description=None,
epilog=None,
version=None,
parents=[],
formatter_class=HelpFormatter,
prefix_chars='-',
fromfile_prefix_chars=None,
argument_default=None,
conflict_handler='error',
add_help=True):
if version is not None:
import warnings
warnings.warn(
"""The "version" argument to ArgumentParser is deprecated. """
"""Please use """
""""add_argument(..., action='version', version="N", ...)" """
"""instead""", DeprecationWarning)
superinit = super(ArgumentParser, self).__init__
superinit(description=description,
prefix_chars=prefix_chars,
argument_default=argument_default,
conflict_handler=conflict_handler)
# default setting for prog
if prog is None:
prog = _os.path.basename(_sys.argv[0])
self.prog = prog
self.usage = usage
self.epilog = epilog
self.version = version
self.formatter_class = formatter_class
self.fromfile_prefix_chars = fromfile_prefix_chars
self.add_help = add_help
add_group = self.add_argument_group
self._positionals = add_group(_('positional arguments'))
self._optionals = add_group(_('optional arguments'))
self._subparsers = None
# register types
def identity(string):
return string
self.register('type', None, identity)
# add help and version arguments if necessary
# (using explicit default to override global argument_default)
if '-' in prefix_chars:
default_prefix = '-'
else:
default_prefix = prefix_chars[0]
if self.add_help:
self.add_argument(
default_prefix+'h', default_prefix*2+'help',
action='help', default=SUPPRESS,
help=_('show this help message and exit'))
if self.version:
self.add_argument(
default_prefix+'v', default_prefix*2+'version',
action='version', default=SUPPRESS,
version=self.version,
help=_("show program's version number and exit"))
# add parent arguments and defaults
for parent in parents:
self._add_container_actions(parent)
try:
defaults = parent._defaults
except AttributeError:
pass
else:
self._defaults.update(defaults)
# =======================
# Pretty __repr__ methods
# =======================
def _get_kwargs(self):
names = [
'prog',
'usage',
'description',
'version',
'formatter_class',
'conflict_handler',
'add_help',
]
return [(name, getattr(self, name)) for name in names]
# ==================================
# Optional/Positional adding methods
# ==================================
def add_subparsers(self, **kwargs):
if self._subparsers is not None:
self.error(_('cannot have multiple subparser arguments'))
# add the parser class to the arguments if it's not present
kwargs.setdefault('parser_class', type(self))
if 'title' in kwargs or 'description' in kwargs:
title = _(kwargs.pop('title', 'subcommands'))
description = _(kwargs.pop('description', None))
self._subparsers = self.add_argument_group(title, description)
else:
self._subparsers = self._positionals
# prog defaults to the usage message of this parser, skipping
# optional arguments and with no "usage:" prefix
if kwargs.get('prog') is None:
formatter = self._get_formatter()
positionals = self._get_positional_actions()
groups = self._mutually_exclusive_groups
formatter.add_usage(self.usage, positionals, groups, '')
kwargs['prog'] = formatter.format_help().strip()
# create the parsers action and add it to the positionals list
parsers_class = self._pop_action_class(kwargs, 'parsers')
action = parsers_class(option_strings=[], **kwargs)
self._subparsers._add_action(action)
# return the created parsers action
return action
def _add_action(self, action):
if action.option_strings:
self._optionals._add_action(action)
else:
self._positionals._add_action(action)
return action
def _get_optional_actions(self):
return [action
for action in self._actions
if action.option_strings]
def _get_positional_actions(self):
return [action
for action in self._actions
if not action.option_strings]
# =====================================
# Command line argument parsing methods
# =====================================
def parse_args(self, args=None, namespace=None):
args, argv = self.parse_known_args(args, namespace)
if argv:
msg = _('unrecognized arguments: %s')
self.error(msg % ' '.join(argv))
return args
def parse_known_args(self, args=None, namespace=None):
# args default to the system args
if args is None:
args = _sys.argv[1:]
# default Namespace built from parser defaults
if namespace is None:
namespace = Namespace()
# add any action defaults that aren't present
for action in self._actions:
if action.dest is not SUPPRESS:
if not hasattr(namespace, action.dest):
if action.default is not SUPPRESS:
default = action.default
if isinstance(action.default, basestring):
default = self._get_value(action, default)
setattr(namespace, action.dest, default)
# add any parser defaults that aren't present
for dest in self._defaults:
if not hasattr(namespace, dest):
setattr(namespace, dest, self._defaults[dest])
# parse the arguments and exit if there are any errors
try:
namespace, args = self._parse_known_args(args, namespace)
if hasattr(namespace, _UNRECOGNIZED_ARGS_ATTR):
args.extend(getattr(namespace, _UNRECOGNIZED_ARGS_ATTR))
delattr(namespace, _UNRECOGNIZED_ARGS_ATTR)
return namespace, args
except ArgumentError:
err = _sys.exc_info()[1]
self.error(str(err))
def _parse_known_args(self, arg_strings, namespace):
# replace arg strings that are file references
if self.fromfile_prefix_chars is not None:
arg_strings = self._read_args_from_files(arg_strings)
# map all mutually exclusive arguments to the other arguments
# they can't occur with
action_conflicts = {}
for mutex_group in self._mutually_exclusive_groups:
group_actions = mutex_group._group_actions
for i, mutex_action in enumerate(mutex_group._group_actions):
conflicts = action_conflicts.setdefault(mutex_action, [])
conflicts.extend(group_actions[:i])
conflicts.extend(group_actions[i + 1:])
# find all option indices, and determine the arg_string_pattern
# which has an 'O' if there is an option at an index,
# an 'A' if there is an argument, or a '-' if there is a '--'
option_string_indices = {}
arg_string_pattern_parts = []
arg_strings_iter = iter(arg_strings)
for i, arg_string in enumerate(arg_strings_iter):
# all args after -- are non-options
if arg_string == '--':
arg_string_pattern_parts.append('-')
for arg_string in arg_strings_iter:
arg_string_pattern_parts.append('A')
# otherwise, add the arg to the arg strings
# and note the index if it was an option
else:
option_tuple = self._parse_optional(arg_string)
if option_tuple is None:
pattern = 'A'
else:
option_string_indices[i] = option_tuple
pattern = 'O'
arg_string_pattern_parts.append(pattern)
# join the pieces together to form the pattern
arg_strings_pattern = ''.join(arg_string_pattern_parts)
# converts arg strings to the appropriate and then takes the action
seen_actions = set()
seen_non_default_actions = set()
def take_action(action, argument_strings, option_string=None):
seen_actions.add(action)
argument_values = self._get_values(action, argument_strings)
# error if this argument is not allowed with other previously
# seen arguments, assuming that actions that use the default
# value don't really count as "present"
if argument_values is not action.default:
seen_non_default_actions.add(action)
for conflict_action in action_conflicts.get(action, []):
if conflict_action in seen_non_default_actions:
msg = _('not allowed with argument %s')
action_name = _get_action_name(conflict_action)
raise ArgumentError(action, msg % action_name)
# take the action if we didn't receive a SUPPRESS value
# (e.g. from a default)
if argument_values is not SUPPRESS:
action(self, namespace, argument_values, option_string)
# function to convert arg_strings into an optional action
def consume_optional(start_index):
# get the optional identified at this index
option_tuple = option_string_indices[start_index]
action, option_string, explicit_arg = option_tuple
# identify additional optionals in the same arg string
# (e.g. -xyz is the same as -x -y -z if no args are required)
match_argument = self._match_argument
action_tuples = []
while True:
# if we found no optional action, skip it
if action is None:
extras.append(arg_strings[start_index])
return start_index + 1
# if there is an explicit argument, try to match the
# optional's string arguments to only this
if explicit_arg is not None:
arg_count = match_argument(action, 'A')
# if the action is a single-dash option and takes no
# arguments, try to parse more single-dash options out
# of the tail of the option string
chars = self.prefix_chars
if arg_count == 0 and option_string[1] not in chars:
action_tuples.append((action, [], option_string))
char = option_string[0]
option_string = char + explicit_arg[0]
new_explicit_arg = explicit_arg[1:] or None
optionals_map = self._option_string_actions
if option_string in optionals_map:
action = optionals_map[option_string]
explicit_arg = new_explicit_arg
else:
msg = _('ignored explicit argument %r')
raise ArgumentError(action, msg % explicit_arg)
# if the action expect exactly one argument, we've
# successfully matched the option; exit the loop
elif arg_count == 1:
stop = start_index + 1
args = [explicit_arg]
action_tuples.append((action, args, option_string))
break
# error if a double-dash option did not use the
# explicit argument
else:
msg = _('ignored explicit argument %r')
raise ArgumentError(action, msg % explicit_arg)
# if there is no explicit argument, try to match the
# optional's string arguments with the following strings
# if successful, exit the loop
else:
start = start_index + 1
selected_patterns = arg_strings_pattern[start:]
arg_count = match_argument(action, selected_patterns)
stop = start + arg_count
args = arg_strings[start:stop]
action_tuples.append((action, args, option_string))
break
# add the Optional to the list and return the index at which
# the Optional's string args stopped
assert action_tuples
for action, args, option_string in action_tuples:
take_action(action, args, option_string)
return stop
# the list of Positionals left to be parsed; this is modified
# by consume_positionals()
positionals = self._get_positional_actions()
# function to convert arg_strings into positional actions
def consume_positionals(start_index):
# match as many Positionals as possible
match_partial = self._match_arguments_partial
selected_pattern = arg_strings_pattern[start_index:]
arg_counts = match_partial(positionals, selected_pattern)
# slice off the appropriate arg strings for each Positional
# and add the Positional and its args to the list
for action, arg_count in zip(positionals, arg_counts):
args = arg_strings[start_index: start_index + arg_count]
start_index += arg_count
take_action(action, args)
# slice off the Positionals that we just parsed and return the
# index at which the Positionals' string args stopped
positionals[:] = positionals[len(arg_counts):]
return start_index
# consume Positionals and Optionals alternately, until we have
# passed the last option string
extras = []
start_index = 0
if option_string_indices:
max_option_string_index = max(option_string_indices)
else:
max_option_string_index = -1
while start_index <= max_option_string_index:
# consume any Positionals preceding the next option
next_option_string_index = min([
index
for index in option_string_indices
if index >= start_index])
if start_index != next_option_string_index:
positionals_end_index = consume_positionals(start_index)
# only try to parse the next optional if we didn't consume
# the option string during the positionals parsing
if positionals_end_index > start_index:
start_index = positionals_end_index
continue
else:
start_index = positionals_end_index
# if we consumed all the positionals we could and we're not
# at the index of an option string, there were extra arguments
if start_index not in option_string_indices:
strings = arg_strings[start_index:next_option_string_index]
extras.extend(strings)
start_index = next_option_string_index
# consume the next optional and any arguments for it
start_index = consume_optional(start_index)
# consume any positionals following the last Optional
stop_index = consume_positionals(start_index)
# if we didn't consume all the argument strings, there were extras
extras.extend(arg_strings[stop_index:])
# if we didn't use all the Positional objects, there were too few
# arg strings supplied.
if positionals:
self.error(_('too few arguments'))
# make sure all required actions were present
for action in self._actions:
if action.required:
if action not in seen_actions:
name = _get_action_name(action)
self.error(_('argument %s is required') % name)
# make sure all required groups had one option present
for group in self._mutually_exclusive_groups:
if group.required:
for action in group._group_actions:
if action in seen_non_default_actions:
break
# if no actions were used, report the error
else:
names = [_get_action_name(action)
for action in group._group_actions
if action.help is not SUPPRESS]
msg = _('one of the arguments %s is required')
self.error(msg % ' '.join(names))
# return the updated namespace and the extra arguments
return namespace, extras
def _read_args_from_files(self, arg_strings):
# expand arguments referencing files
new_arg_strings = []
for arg_string in arg_strings:
# for regular arguments, just add them back into the list
if arg_string[0] not in self.fromfile_prefix_chars:
new_arg_strings.append(arg_string)
# replace arguments referencing files with the file content
else:
try:
args_file = open(arg_string[1:])
try:
arg_strings = []
for arg_line in args_file.read().splitlines():
for arg in self.convert_arg_line_to_args(arg_line):
arg_strings.append(arg)
arg_strings = self._read_args_from_files(arg_strings)
new_arg_strings.extend(arg_strings)
finally:
args_file.close()
except IOError:
err = _sys.exc_info()[1]
self.error(str(err))
# return the modified argument list
return new_arg_strings
def convert_arg_line_to_args(self, arg_line):
return [arg_line]
def _match_argument(self, action, arg_strings_pattern):
# match the pattern for this action to the arg strings
nargs_pattern = self._get_nargs_pattern(action)
match = _re.match(nargs_pattern, arg_strings_pattern)
# raise an exception if we weren't able to find a match
if match is None:
nargs_errors = {
None: _('expected one argument'),
OPTIONAL: _('expected at most one argument'),
ONE_OR_MORE: _('expected at least one argument'),
}
default = _('expected %s argument(s)') % action.nargs
msg = nargs_errors.get(action.nargs, default)
raise ArgumentError(action, msg)
# return the number of arguments matched
return len(match.group(1))
def _match_arguments_partial(self, actions, arg_strings_pattern):
# progressively shorten the actions list by slicing off the
# final actions until we find a match
result = []
for i in range(len(actions), 0, -1):
actions_slice = actions[:i]
pattern = ''.join([self._get_nargs_pattern(action)
for action in actions_slice])
match = _re.match(pattern, arg_strings_pattern)
if match is not None:
result.extend([len(string) for string in match.groups()])
break
# return the list of arg string counts
return result
def _parse_optional(self, arg_string):
# if it's an empty string, it was meant to be a positional
if not arg_string:
return None
# if it doesn't start with a prefix, it was meant to be positional
if not arg_string[0] in self.prefix_chars:
return None
# if the option string is present in the parser, return the action
if arg_string in self._option_string_actions:
action = self._option_string_actions[arg_string]
return action, arg_string, None
# if it's just a single character, it was meant to be positional
if len(arg_string) == 1:
return None
# if the option string before the "=" is present, return the action
if '=' in arg_string:
option_string, explicit_arg = arg_string.split('=', 1)
if option_string in self._option_string_actions:
action = self._option_string_actions[option_string]
return action, option_string, explicit_arg
# search through all possible prefixes of the option string
# and all actions in the parser for possible interpretations
option_tuples = self._get_option_tuples(arg_string)
# if multiple actions match, the option string was ambiguous
if len(option_tuples) > 1:
options = ', '.join([option_string
for action, option_string, explicit_arg in option_tuples])
tup = arg_string, options
self.error(_('ambiguous option: %s could match %s') % tup)
# if exactly one action matched, this segmentation is good,
# so return the parsed action
elif len(option_tuples) == 1:
option_tuple, = option_tuples
return option_tuple
# if it was not found as an option, but it looks like a negative
# number, it was meant to be positional
# unless there are negative-number-like options
if self._negative_number_matcher.match(arg_string):
if not self._has_negative_number_optionals:
return None
# if it contains a space, it was meant to be a positional
if ' ' in arg_string:
return None
# it was meant to be an optional but there is no such option
# in this parser (though it might be a valid option in a subparser)
return None, arg_string, None
def _get_option_tuples(self, option_string):
result = []
# option strings starting with two prefix characters are only
# split at the '='
chars = self.prefix_chars
if option_string[0] in chars and option_string[1] in chars:
if '=' in option_string:
option_prefix, explicit_arg = option_string.split('=', 1)
else:
option_prefix = option_string
explicit_arg = None
for option_string in self._option_string_actions:
if option_string.startswith(option_prefix):
action = self._option_string_actions[option_string]
tup = action, option_string, explicit_arg
result.append(tup)
# single character options can be concatenated with their arguments
# but multiple character options always have to have their argument
# separate
elif option_string[0] in chars and option_string[1] not in chars:
option_prefix = option_string
explicit_arg = None
short_option_prefix = option_string[:2]
short_explicit_arg = option_string[2:]
for option_string in self._option_string_actions:
if option_string == short_option_prefix:
action = self._option_string_actions[option_string]
tup = action, option_string, short_explicit_arg
result.append(tup)
elif option_string.startswith(option_prefix):
action = self._option_string_actions[option_string]
tup = action, option_string, explicit_arg
result.append(tup)
# shouldn't ever get here
else:
self.error(_('unexpected option string: %s') % option_string)
# return the collected option tuples
return result
def _get_nargs_pattern(self, action):
# in all examples below, we have to allow for '--' args
# which are represented as '-' in the pattern
nargs = action.nargs
# the default (None) is assumed to be a single argument
if nargs is None:
nargs_pattern = '(-*A-*)'
# allow zero or one arguments
elif nargs == OPTIONAL:
nargs_pattern = '(-*A?-*)'
# allow zero or more arguments
elif nargs == ZERO_OR_MORE:
nargs_pattern = '(-*[A-]*)'
# allow one or more arguments
elif nargs == ONE_OR_MORE:
nargs_pattern = '(-*A[A-]*)'
# allow any number of options or arguments
elif nargs == REMAINDER:
nargs_pattern = '([-AO]*)'
# allow one argument followed by any number of options or arguments
elif nargs == PARSER:
nargs_pattern = '(-*A[-AO]*)'
# all others should be integers
else:
nargs_pattern = '(-*%s-*)' % '-*'.join('A' * nargs)
# if this is an optional action, -- is not allowed
if action.option_strings:
nargs_pattern = nargs_pattern.replace('-*', '')
nargs_pattern = nargs_pattern.replace('-', '')
# return the pattern
return nargs_pattern
# ========================
# Value conversion methods
# ========================
def _get_values(self, action, arg_strings):
# for everything but PARSER args, strip out '--'
if action.nargs not in [PARSER, REMAINDER]:
arg_strings = [s for s in arg_strings if s != '--']
# optional argument produces a default when not present
if not arg_strings and action.nargs == OPTIONAL:
if action.option_strings:
value = action.const
else:
value = action.default
if isinstance(value, basestring):
value = self._get_value(action, value)
self._check_value(action, value)
# when nargs='*' on a positional, if there were no command-line
# args, use the default if it is anything other than None
elif (not arg_strings and action.nargs == ZERO_OR_MORE and
not action.option_strings):
if action.default is not None:
value = action.default
else:
value = arg_strings
self._check_value(action, value)
# single argument or optional argument produces a single value
elif len(arg_strings) == 1 and action.nargs in [None, OPTIONAL]:
arg_string, = arg_strings
value = self._get_value(action, arg_string)
self._check_value(action, value)
# REMAINDER arguments convert all values, checking none
elif action.nargs == REMAINDER:
value = [self._get_value(action, v) for v in arg_strings]
# PARSER arguments convert all values, but check only the first
elif action.nargs == PARSER:
value = [self._get_value(action, v) for v in arg_strings]
self._check_value(action, value[0])
# all other types of nargs produce a list
else:
value = [self._get_value(action, v) for v in arg_strings]
for v in value:
self._check_value(action, v)
# return the converted value
return value
def _get_value(self, action, arg_string):
type_func = self._registry_get('type', action.type, action.type)
if not _callable(type_func):
msg = _('%r is not callable')
raise ArgumentError(action, msg % type_func)
# convert the value to the appropriate type
try:
result = type_func(arg_string)
# ArgumentTypeErrors indicate errors
except ArgumentTypeError:
name = getattr(action.type, '__name__', repr(action.type))
msg = str(_sys.exc_info()[1])
raise ArgumentError(action, msg)
# TypeErrors or ValueErrors also indicate errors
except (TypeError, ValueError):
name = getattr(action.type, '__name__', repr(action.type))
msg = _('invalid %s value: %r')
raise ArgumentError(action, msg % (name, arg_string))
# return the converted value
return result
def _check_value(self, action, value):
# converted value must be one of the choices (if specified)
if action.choices is not None and value not in action.choices:
tup = value, ', '.join(map(repr, action.choices))
msg = _('invalid choice: %r (choose from %s)') % tup
raise ArgumentError(action, msg)
# =======================
# Help-formatting methods
# =======================
def format_usage(self):
formatter = self._get_formatter()
formatter.add_usage(self.usage, self._actions,
self._mutually_exclusive_groups)
return formatter.format_help()
def format_help(self):
formatter = self._get_formatter()
# usage
formatter.add_usage(self.usage, self._actions,
self._mutually_exclusive_groups)
# description
formatter.add_text(self.description)
# positionals, optionals and user-defined groups
for action_group in self._action_groups:
formatter.start_section(action_group.title)
formatter.add_text(action_group.description)
formatter.add_arguments(action_group._group_actions)
formatter.end_section()
# epilog
formatter.add_text(self.epilog)
# determine help from format above
return formatter.format_help()
def format_version(self):
import warnings
warnings.warn(
'The format_version method is deprecated -- the "version" '
'argument to ArgumentParser is no longer supported.',
DeprecationWarning)
formatter = self._get_formatter()
formatter.add_text(self.version)
return formatter.format_help()
def _get_formatter(self):
return self.formatter_class(prog=self.prog)
# =====================
# Help-printing methods
# =====================
def print_usage(self, file=None):
if file is None:
file = _sys.stdout
self._print_message(self.format_usage(), file)
def print_help(self, file=None):
if file is None:
file = _sys.stdout
self._print_message(self.format_help(), file)
def print_version(self, file=None):
import warnings
warnings.warn(
'The print_version method is deprecated -- the "version" '
'argument to ArgumentParser is no longer supported.',
DeprecationWarning)
self._print_message(self.format_version(), file)
def _print_message(self, message, file=None):
if message:
if file is None:
file = _sys.stderr
file.write(message)
# ===============
# Exiting methods
# ===============
def exit(self, status=0, message=None):
if message:
self._print_message(message, _sys.stderr)
_sys.exit(status)
def error(self, message):
"""error(message: string)
Prints a usage message incorporating the message to stderr and
exits.
If you override this in a subclass, it should not return -- it
should either exit or raise an exception.
"""
self.print_usage(_sys.stderr)
self.exit(2, _('%s: error: %s\n') % (self.prog, message))
|
h3llrais3r/SickRage | refs/heads/master | lib/hachoir_parser/parser_list.py | 85 | import re
import types
from hachoir_core.error import error
from hachoir_core.i18n import _
from hachoir_parser import Parser, HachoirParser
import sys
### Parser list ################################################################
class ParserList(object):
VALID_CATEGORY = ("archive", "audio", "container", "file_system",
"game", "image", "misc", "program", "video")
ID_REGEX = re.compile("^[a-z0-9][a-z0-9_]{2,}$")
def __init__(self):
self.parser_list = []
self.bytag = { "id": {}, "category": {} }
def translate(self, name, value):
if name in ("magic",):
return True
elif name == "min_size":
return - value < 0 or "Invalid minimum size (min_size)"
elif name == "description":
return isinstance(value, (str, unicode)) and bool(value) or "Invalid description"
elif name == "category":
if value not in self.VALID_CATEGORY:
return "Invalid category: %r" % value
elif name == "id":
if type(value) is not str or not self.ID_REGEX.match(value):
return "Invalid identifier: %r" % value
parser = self.bytag[name].get(value)
if parser:
return "Duplicate parser id: %s already used by %s" % \
(value, parser[0].__name__)
# TODO: lists should be forbidden
if isinstance(value, list):
value = tuple(value)
elif not isinstance(value, tuple):
value = value,
return name, value
def validParser(self, parser, tags):
if "id" not in tags:
return "No identifier"
if "description" not in tags:
return "No description"
# TODO: Allow simple strings for file_ext/mime ?
# (see also HachoirParser.createFilenameSuffix)
file_ext = tags.get("file_ext", ())
if not isinstance(file_ext, (tuple, list)):
return "File extension is not a tuple or list"
mimes = tags.get("mime", ())
if not isinstance(mimes, tuple):
return "MIME type is not a tuple"
for mime in mimes:
if not isinstance(mime, unicode):
return "MIME type %r is not an unicode string" % mime
return ""
def add(self, parser):
tags = parser.getParserTags()
err = self.validParser(parser, tags)
if err:
error("Skip parser %s: %s" % (parser.__name__, err))
return
_tags = []
for tag in tags.iteritems():
tag = self.translate(*tag)
if isinstance(tag, tuple):
_tags.append(tag)
elif tag is not True:
error("[%s] %s" % (parser.__name__, tag))
return
self.parser_list.append(parser)
for name, values in _tags:
byname = self.bytag.setdefault(name,{})
for value in values:
byname.setdefault(value,[]).append(parser)
def __iter__(self):
return iter(self.parser_list)
def print_(self, title=None, out=None, verbose=False, format="one-line"):
"""Display a list of parser with its title
* out: output file
* title : title of the list to display
* format: "rest", "trac", "file-ext", "mime" or "one_line" (default)
"""
if out is None:
out = sys.stdout
if format in ("file-ext", "mime"):
# Create file extension set
extensions = set()
for parser in self:
file_ext = parser.getParserTags().get(format, ())
file_ext = list(file_ext)
try:
file_ext.remove("")
except ValueError:
pass
extensions |= set(file_ext)
# Remove empty extension
extensions -= set(('',))
# Convert to list and sort by ASCII order
extensions = list(extensions)
extensions.sort()
# Print list
text = ", ".join( str(item) for item in extensions )
if format == "file-ext":
print >>out, "File extensions: %s." % text
print >>out
print >>out, "Total: %s file extensions." % len(extensions)
else:
print >>out, "MIME types: %s." % text
print >>out
print >>out, "Total: %s MIME types." % len(extensions)
return
if format == "trac":
print >>out, "== List of parsers =="
print >>out
print >>out, "Total: %s parsers" % len(self.parser_list)
print >>out
elif format == "one_line":
if title:
print >>out, title
else:
print >>out, _("List of Hachoir parsers.")
print >>out
# Create parser list sorted by module
bycategory = self.bytag["category"]
for category in sorted(bycategory.iterkeys()):
if format == "one_line":
parser_list = [ parser.PARSER_TAGS["id"] for parser in bycategory[category] ]
parser_list.sort()
print >>out, "- %s: %s" % (category.title(), ", ".join(parser_list))
else:
if format == "rest":
print >>out, category.replace("_", " ").title()
print >>out, "-" * len(category)
print >>out
elif format == "trac":
print >>out, "=== %s ===" % category.replace("_", " ").title()
print >>out
else:
print >>out, "[%s]" % category
parser_list = sorted(bycategory[category],
key=lambda parser: parser.PARSER_TAGS["id"])
if format == "rest":
for parser in parser_list:
tags = parser.getParserTags()
print >>out, "* %s: %s" % (tags["id"], tags["description"])
elif format == "trac":
for parser in parser_list:
tags = parser.getParserTags()
desc = tags["description"]
desc = re.sub(r"([A-Z][a-z]+[A-Z][^ ]+)", r"!\1", desc)
print >>out, " * %s: %s" % (tags["id"], desc)
else:
for parser in parser_list:
parser.print_(out, verbose)
print >>out
if format != "trac":
print >>out, "Total: %s parsers" % len(self.parser_list)
class HachoirParserList(ParserList):
_instance = None
@classmethod
def getInstance(cls):
if cls._instance is None:
cls._instance = cls()
return cls._instance
def __init__(self):
ParserList.__init__(self)
self._load()
def _load(self):
"""
Load all parsers from "hachoir.parser" module.
Return the list of loaded parsers.
"""
# Parser list is already loaded?
if self.parser_list:
return self.parser_list
todo = []
module = __import__("hachoir_parser")
for attrname in dir(module):
attr = getattr(module, attrname)
if isinstance(attr, types.ModuleType):
todo.append(attr)
for module in todo:
for name in dir(module):
attr = getattr(module, name)
if isinstance(attr, type) \
and issubclass(attr, HachoirParser) \
and attr not in (Parser, HachoirParser):
self.add(attr)
assert 1 <= len(self.parser_list)
return self.parser_list
|
peterwittek/concept_drifts | refs/heads/master | trackBmus.py | 1 | # -*- coding: utf-8 -*-
"""
Created on Thu Feb 5 16:50:02 2015
@author: wittek
"""
from math import sqrt
threshold1 = 5
threshold2 = 11
def get_keywords(keyword_filename, bmu_filename):
bmus = []
bmu_file = open(bmu_filename,'r')
n_rows, n_columns = bmu_file.readline()[1:].split(' ')
bmu_file.readline()
for line in bmu_file:
line = line.strip()
elements = line.split(' ')
bmus.append((int(elements[1]), int(elements[2])))
bmu_file.close
keywords = {}
keyword_file = open(keyword_filename,'r')
keyword_file.readline()
k = 0
for line in keyword_file:
line = line.strip()
elements = line.split('\t')
keywords[elements[1]] = bmus[k]
k += 1
keyword_file.close()
return keywords, int(n_rows), int(n_columns)
def toroid_distance(coords1, coords2, n_rows, n_columns):
x1 = min(coords1[0], coords2[0])
y1 = min(coords1[1], coords2[1])
x2 = max(coords1[0], coords2[0])
y2 = max(coords1[1], coords2[1])
xdist = min(x2-x1, x1+n_rows-x2)
ydist = min(y2-y1, y1+n_columns-y2)
return sqrt(xdist**2+ydist**2)
keywords1, n_rows, n_columns = get_keywords('data/termvectorsperiod1.names',
'data/termvectorsperiod1.bm')
keywords2, _, _ = get_keywords('data/termvectorsperiod2.names',
'data/termvectorsperiod2.bm')
keywords3, _, _ = get_keywords('data/termvectorsperiod3.names',
'data/termvectorsperiod3.bm')
candidates = []
for keyword in keywords1:
if keywords2.has_key(keyword) and keywords3.has_key(keyword):
coords1 = keywords1[keyword]
coords2 = keywords2[keyword]
coords3 = keywords3[keyword]
if toroid_distance(coords1, coords2, n_rows, n_columns) < threshold1:
candidates.append(keyword)
for i, keyword1 in enumerate(candidates):
for keyword2 in candidates:
if keyword1 is not keyword2:
coords1 = keywords1[keyword1]
coords2 = keywords1[keyword2]
if toroid_distance(coords1, coords2, n_rows, n_columns) < threshold2:
print keyword1, keyword2, keywords1[keyword1], keywords1[keyword2], keywords2[keyword1], keywords3[keyword1]
|
coolo/packagekit | refs/heads/master | lib/python/packagekit/filter.py | 18 | #!/usr/bin/python
# Licensed under the GNU General Public License Version 2
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# Copyright (C) 2008
# Richard Hughes <richard@hughsie.com>
# imports
from .enums import *
from .package import PackagekitPackage
import collections
class PackagekitFilter(object, PackagekitPackage):
def __init__(self, fltlist="none"):
''' save state '''
self.fltlist = fltlist
self.package_list = [] #we can't do emitting as found if we are post-processing
self.installed_unique = {}
def add_installed(self, pkgs):
''' add a list of packages that are already installed '''
for pkg in pkgs:
self.package_list.append((pkg, INFO_INSTALLED))
def add_available(self, pkgs):
''' add a list of packages that are available '''
for pkg in pkgs:
self.package_list.append((pkg, INFO_AVAILABLE))
def add_custom(self, pkg, info):
''' add a custom packages indervidually '''
self.package_list.append((pkg, info))
def _filter_base(self, pkg):
''' do extra filtering (gui, devel etc) '''
for flt in self.fltlist:
if flt in (FILTER_GUI, FILTER_NOT_GUI):
if not self._do_gui_filtering(flt, pkg):
return False
elif flt in (FILTER_DEVELOPMENT, FILTER_NOT_DEVELOPMENT):
if not self._do_devel_filtering(flt, pkg):
return False
elif flt in (FILTER_FREE, FILTER_NOT_FREE):
if not self._do_free_filtering(flt, pkg):
return False
elif flt in (FILTER_ARCH, FILTER_NOT_ARCH):
if not self._do_arch_filtering(flt, pkg):
return False
return True
def _filter_installed(self, pkg):
''' do extra filtering (gui, devel etc) '''
for flt in self.fltlist:
if flt in (FILTER_INSTALLED, FILTER_NOT_INSTALLED):
if not self._do_installed_filtering(flt, pkg):
return False
return True
def get_package_list(self):
'''
do filtering we couldn't do when generating the list
'''
# filter common things here like architecture
# NOTE: we can't do installed and ~installed here as we need
# this data for the newest and downgrade checks below
package_list = self.package_list
self.package_list = []
for pkg, state in package_list:
if self._filter_base(pkg):
self.package_list.append((pkg, state))
# prepare lookup table of installed packages
installed_dict = collections.defaultdict(list)
for pkg, state in self.package_list:
if state is INFO_INSTALLED:
installed_dict[self._pkg_get_name(pkg)].append(pkg)
# check there are not available versions in the package list
# that are older than the installed version
package_list = self.package_list
self.package_list = []
for pkg, state in package_list:
add = True
if state is INFO_AVAILABLE:
for pkg_tmp in installed_dict[self._pkg_get_name(pkg)]:
rc = self._pkg_compare(pkg, pkg_tmp)
# don't add if the same as the installed package
# or a downgrade to the existing installed package
if rc == 0 or rc == -1:
add = False
break
if add:
self.package_list.append((pkg, state))
# filter installed state last
package_list = self.package_list
self.package_list = []
for pkg, state in package_list:
if self._filter_installed(pkg):
self.package_list.append((pkg, state))
# do the backend specific filtering
return self.post_process()
def post_process(self):
'''
do filtering we couldn't do when generating the list
Needed to be implemented in a sub class
'''
return self.package_list
def _pkg_compare(self, pkg1, pkg2):
'''
Returns a version comparison of the packages, where:
-2 : pkg1 not comparable with pkg2
-1 : pkg2 is newer than pkg1
0 : pkg1 == pkg2
1 : pkg1 is newer than pkg2
2 : not implemented
Needed to be implemented in a sub class
'''
return 2
def _pkg_get_name(self, pkg):
'''
Returns the name of the package used for duplicate filtering
Needed to be implemented in a sub class
'''
return None
def _pkg_is_installed(self, pkg):
'''
Return if the package is installed.
Needed to be implemented in a sub class
'''
return True
def _pkg_is_devel(self, pkg):
'''
Return if the package is development.
Needed to be implemented in a sub class
'''
return True
def _pkg_is_gui(self, pkg):
'''
Return if the package is a GUI program.
Needed to be implemented in a sub class
'''
return True
def _pkg_is_free(self, pkg):
'''
Return if the package is free software.
Needed to be implemented in a sub class
'''
return True
def _pkg_is_arch(self, pkg):
'''
Return if the package is the same architecture as the machine.
Needed to be implemented in a sub class
'''
return True
def _do_installed_filtering(self, flt, pkg):
is_installed = self._pkg_is_installed(pkg)
if flt == FILTER_INSTALLED:
want_installed = True
else:
want_installed = False
return is_installed == want_installed
def _do_devel_filtering(self, flt, pkg):
is_devel = self._pkg_is_devel(pkg)
if flt == FILTER_DEVELOPMENT:
want_devel = True
else:
want_devel = False
return is_devel == want_devel
def _do_gui_filtering(self, flt, pkg):
is_gui = self._pkg_is_gui(pkg)
if flt == FILTER_GUI:
want_gui = True
else:
want_gui = False
return is_gui == want_gui
def _do_free_filtering(self, flt, pkg):
is_free = self._pkg_is_free(pkg)
if flt == FILTER_FREE:
want_free = True
else:
want_free = False
return is_free == want_free
def _do_arch_filtering(self, flt, pkg):
is_arch = self._pkg_is_arch(pkg)
if flt == FILTER_ARCH:
want_arch = True
else:
want_arch = False
return is_arch == want_arch
|
EcmaXp/micropython | refs/heads/opencom | tests/basics/bytes_gen.py | 116 | # construct a bytes object from a generator
def gen():
for i in range(4):
yield i
print(bytes(gen()))
|
zlfben/gem5 | refs/heads/develop | src/arch/mips/MipsTLB.py | 69 | # -*- mode:python -*-
# Copyright (c) 2007 MIPS Technologies, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Jaidev Patwardhan
# Korey Sewell
from m5.SimObject import SimObject
from m5.params import *
from BaseTLB import BaseTLB
class MipsTLB(BaseTLB):
type = 'MipsTLB'
cxx_class = 'MipsISA::TLB'
cxx_header = 'arch/mips/tlb.hh'
size = Param.Int(64, "TLB size")
|
akretion/odoo | refs/heads/12-patch-paging-100-in-o2m | addons/auth_signup/models/res_config_settings.py | 14 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from ast import literal_eval
from odoo import api, fields, models
class ResConfigSettings(models.TransientModel):
_inherit = 'res.config.settings'
auth_signup_reset_password = fields.Boolean(string='Enable password reset from Login page', config_parameter='auth_signup.reset_password')
auth_signup_uninvited = fields.Selection([
('b2b', 'On invitation'),
('b2c', 'Free sign up'),
], string='Customer Account', default='b2b', config_parameter='auth_signup.invitation_scope')
auth_signup_template_user_id = fields.Many2one('res.users', string='Template user for new users created through signup',
config_parameter='base.template_portal_user_id')
@api.multi
def open_template_user(self):
action = self.env.ref('base.action_res_users').read()[0]
action['res_id'] = literal_eval(self.env['ir.config_parameter'].sudo().get_param('base.template_portal_user_id', 'False'))
action['views'] = [[self.env.ref('base.view_users_form').id, 'form']]
return action
|
Nickito12/stepmania-server | refs/heads/master | test/test_models/test_song_stat.py | 1 | """ Module to test song_stat model """
import datetime
from test.factories.song_stat_factory import SongStatFactory
from test.factories.user_factory import UserFactory
from test import utils
class SongStatTest(utils.DBTest):
""" test SongStat model"""
def test_lit_difficulty(self):
""" Test lit_difficulty property """
song_stat = SongStatFactory(difficulty=0)
self.assertEqual(song_stat.lit_difficulty, "BEGINNER")
song_stat = SongStatFactory(difficulty=1)
self.assertEqual(song_stat.lit_difficulty, "EASY")
song_stat = SongStatFactory(difficulty=2)
self.assertEqual(song_stat.lit_difficulty, "MEDIUM")
song_stat = SongStatFactory(difficulty=3)
self.assertEqual(song_stat.lit_difficulty, "HARD")
song_stat = SongStatFactory(difficulty=4)
self.assertEqual(song_stat.lit_difficulty, "EXPERT")
song_stat = SongStatFactory(difficulty=67)
self.assertEqual(song_stat.lit_difficulty, "67")
def test_full_difficulty(self):
""" Test full_difficulty property """
song_stat = SongStatFactory(difficulty=0, feet=4)
self.assertEqual(song_stat.full_difficulty, "BEGINNER (4)")
song_stat = SongStatFactory(difficulty=3, feet=78)
self.assertEqual(song_stat.full_difficulty, "HARD (78)")
def test_lit_grade(self):
""" Test lit_difficulty property """
song_stat = SongStatFactory(grade=0)
self.assertEqual(song_stat.lit_grade, "AAAA")
song_stat = SongStatFactory(grade=1)
self.assertEqual(song_stat.lit_grade, "AAA")
song_stat = SongStatFactory(grade=3)
self.assertEqual(song_stat.lit_grade, "A")
song_stat = SongStatFactory(grade=4)
self.assertEqual(song_stat.lit_grade, "B")
song_stat = SongStatFactory(grade=6)
self.assertEqual(song_stat.lit_grade, "D")
def test_pretty_result(self):
""" Test pretty_result property """
date = datetime.datetime(2017, 10, 13, 11, 42)
song_stat = SongStatFactory(
difficulty=3, #HARD
feet=9,
grade=3, #A
user=UserFactory(name="José Prout"),
percentage=78.327,
created_at=date
)
self.assertEqual(
song_stat.pretty_result(),
r"HARD (9): José Prout A (78.33%) on 13/10/17"
)
|
fengbaicanhe/intellij-community | refs/heads/master | python/testData/psi/CommentAtBeginningOfStatementList.py | 158 | def foo(a):
if a == 5:
# a is 5
print 'no'
foo(5) |
makerbot/ReplicatorG | refs/heads/master | skein_engines/skeinforge-50/skeinforge_application/skeinforge_plugins/craft_plugins/alteration.py | 12 | #! /usr/bin/env python
"""
This page is in the table of contents.
The alteration plugin adds the start and end files to the gcode.
This plugin also removes the alteration prefix tokens from the alteration lines. Alteration lines have a prefix token so they can go through the craft plugins without being modified. However, the tokens are not recognized by the firmware so they have to be removed before export. The alteration token is:
(<alterationDeleteThisPrefix/>)
The alteration manual page is at:
http://fabmetheus.crsndoo.com/wiki/index.php/Skeinforge_Alteration
==Operation==
The default 'Activate Alteration' checkbox is on. When it is on, the functions described below will work, when it is off, nothing will be done.
==Settings==
Alteration looks for alteration files in the alterations folder in the .skeinforge folder in the home directory. Alteration does not care if the text file names are capitalized, but some file systems do not handle file name cases properly, so to be on the safe side you should give them lower case names. If it doesn't find the file it then looks in the alterations folder in the skeinforge_plugins folder.
===Name of End File===
Default is 'end.gcode'.
If there is a file with the name of the "Name of End File" setting, it will be added to the very end of the gcode.
===Name of Start File===
Default is 'start.gcode'.
If there is a file with the name of the "Name of Start File" setting, it will be added to the very beginning of the gcode.
===Remove Redundant Mcode===
Default: True
If 'Remove Redundant Mcode' is selected then M104 and M108 lines which are followed by a different value before there is a movement will be removed. For example, if there is something like:
M113 S1.0
M104 S60.0
(<layer> 0.72 )
M104 S200.0
(<skirt>)
with Remove Redundant Mcode selected, that snippet would become:
M113 S1.0
M104 S200.0
(<layer> 0.72 )
(<skirt>)
This is a relatively safe procedure, the only reason it is optional is because someone might make an alteration file which, for some unknown reason, requires the redundant mcode.
===Replace Variable with Setting===
Default: True
If 'Replace Variable with Setting' is selected and there is an alteration line with a setting token, the token will be replaced by the value.
For example, if there is an alteration line like:
M140 S<setting.chamber.BedTemperature>
the token would be replaced with the value and assuming the bed chamber was 60.0, the output would be:
M140 S60.0
==Examples==
The following examples add the alteration information to the file Screw Holder Bottom.stl. The examples are run in a terminal in the folder which contains Screw Holder Bottom.stl and alteration.py.
> python alteration.py
This brings up the alteration dialog.
> python alteration.py Screw Holder Bottom.stl
The alteration tool is parsing the file:
Screw Holder Bottom.stl
..
The alteration tool has created the file:
.. Screw Holder Bottom_alteration.gcode
"""
from __future__ import absolute_import
#Init has to be imported first because it has code to workaround the python bug where relative imports don't work if the module is imported as a main module.
import __init__
from fabmetheus_utilities.fabmetheus_tools import fabmetheus_interpret
from fabmetheus_utilities import archive
from fabmetheus_utilities import gcodec
from fabmetheus_utilities import settings
from skeinforge_application.skeinforge_utilities import skeinforge_craft
from skeinforge_application.skeinforge_utilities import skeinforge_polyfile
from skeinforge_application.skeinforge_utilities import skeinforge_profile
import cStringIO
import sys
__author__ = 'Enrique Perez (perez_enrique@yahoo.com)'
__date__ = '$Date: 2008/02/05 $'
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
def getCraftedText(fileName, text='', repository=None):
'Alteration a gcode linear move text.'
return getCraftedTextFromText(archive.getTextIfEmpty(fileName, text), repository)
def getCraftedTextFromText(gcodeText, repository=None):
'Alteration a gcode linear move text.'
if gcodec.isProcedureDoneOrFileIsEmpty(gcodeText, 'alteration'):
return gcodeText
if repository == None:
repository = settings.getReadRepository(AlterationRepository())
if not repository.activateAlteration.value:
return gcodeText
return AlterationSkein().getCraftedGcode(gcodeText, repository)
def getGcodeTextWithoutRedundantMcode(gcodeText):
'Get gcode text without redundant M104 and M108.'
lines = archive.getTextLines(gcodeText)
lines = getLinesWithoutRedundancy('M104', lines)
lines = getLinesWithoutRedundancy('M108', lines)
output = cStringIO.StringIO()
gcodec.addLinesToCString(output, lines)
return output.getvalue()
def getLinesWithoutRedundancy(duplicateWord, lines):
'Get gcode lines without redundant first words.'
oldDuplicationIndex = None
for lineIndex, line in enumerate(lines):
firstWord = gcodec.getFirstWordFromLine(line)
if firstWord == duplicateWord:
if oldDuplicationIndex == None:
oldDuplicationIndex = lineIndex
else:
lines[oldDuplicationIndex] = line
lines[lineIndex] = ''
elif firstWord.startswith('G') or firstWord == 'M101' or firstWord == 'M103':
oldDuplicationIndex = None
return lines
def getNewRepository():
'Get new repository.'
return AlterationRepository()
def writeOutput(fileName, shouldAnalyze=True):
'Alteration a gcode linear move file. Chain alteration the gcode if the alteration procedure has not been done.'
skeinforge_craft.writeChainTextWithNounMessage(fileName, 'alteration', shouldAnalyze)
class AlterationRepository:
"A class to handle the alteration settings."
def __init__(self):
"Set the default settings, execute title & settings fileName."
skeinforge_profile.addListsToCraftTypeRepository('skeinforge_application.skeinforge_plugins.craft_plugins.alteration.html', self )
self.baseNameSynonym = 'bookend.csv'
self.fileNameInput = settings.FileNameInput().getFromFileName(fabmetheus_interpret.getGNUTranslatorGcodeFileTypeTuples(), 'Open File for Alteration', self, '')
self.openWikiManualHelpPage = settings.HelpPage().getOpenFromAbsolute('http://fabmetheus.crsndoo.com/wiki/index.php/Skeinforge_Alteration')
self.activateAlteration = settings.BooleanSetting().getFromValue('Activate Alteration', self, True)
self.nameOfEndFile = settings.StringSetting().getFromValue('Name of End File:', self, 'end.gcode')
self.nameOfStartFile = settings.StringSetting().getFromValue('Name of Start File:', self, 'start.gcode')
self.removeRedundantMcode = settings.BooleanSetting().getFromValue('Remove Redundant Mcode', self, True)
self.replaceVariableWithSetting = settings.BooleanSetting().getFromValue('Replace Variable with Setting', self, True)
self.executeTitle = 'Alteration'
def execute(self):
'Alteration button has been clicked.'
fileNames = skeinforge_polyfile.getFileOrDirectoryTypesUnmodifiedGcode(self.fileNameInput.value, fabmetheus_interpret.getImportPluginFileNames(), self.fileNameInput.wasCancelled)
for fileName in fileNames:
writeOutput(fileName)
class AlterationSkein:
"A class to alteration a skein of extrusions."
def __init__(self):
'Initialize.'
self.distanceFeedRate = gcodec.DistanceFeedRate()
self.lineIndex = 0
self.settingDictionary = None
def addFromUpperLowerFile(self, fileName):
"Add lines of text from the fileName or the lowercase fileName, if there is no file by the original fileName in the directory."
alterationFileLines = settings.getAlterationFileLines(fileName)
self.distanceFeedRate.addLinesSetAbsoluteDistanceMode(alterationFileLines)
def getCraftedGcode(self, gcodeText, repository):
"Parse gcode text and store the bevel gcode."
self.lines = archive.getTextLines(gcodeText)
if repository.replaceVariableWithSetting.value:
self.setSettingDictionary()
self.addFromUpperLowerFile(repository.nameOfStartFile.value) # Add a start file if it exists.
self.parseInitialization()
for self.lineIndex in xrange(self.lineIndex, len(self.lines)):
line = self.lines[self.lineIndex]
self.distanceFeedRate.addLine(line)
self.addFromUpperLowerFile(repository.nameOfEndFile.value) # Add an end file if it exists.
gcodeText = self.getReplacedAlterationText()
if repository.removeRedundantMcode.value:
gcodeText = getGcodeTextWithoutRedundantMcode(gcodeText)
return gcodeText
def getReplacedAlterationLine(self, alterationFileLine, searchIndex=0):
'Get the alteration file line with variables replaced with the settings.'
settingIndex = alterationFileLine.find('setting.', searchIndex)
beginIndex = settingIndex - 1
if beginIndex < 0:
return alterationFileLine
endBracketIndex = alterationFileLine.find('>', settingIndex)
if alterationFileLine[beginIndex] != '<' or endBracketIndex == -1:
return alterationFileLine
endIndex = endBracketIndex + 1
innerToken = alterationFileLine[settingIndex + len('setting.'): endIndex].replace('>', '').replace(' ', '').replace('_', '').lower()
if innerToken in self.settingDictionary:
replacedSetting = self.settingDictionary[innerToken]
replacedAlterationLine = alterationFileLine[: beginIndex] + replacedSetting + alterationFileLine[endIndex :]
return self.getReplacedAlterationLine(replacedAlterationLine, beginIndex + len(replacedSetting))
return alterationFileLine
def getReplacedAlterationText(self):
'Replace the alteration lines if there are settings.'
if self.settingDictionary == None:
return self.distanceFeedRate.output.getvalue().replace('(<alterationDeleteThisPrefix/>)', '')
lines = archive.getTextLines(self.distanceFeedRate.output.getvalue())
distanceFeedRate = gcodec.DistanceFeedRate()
for line in lines:
if line.startswith('(<alterationDeleteThisPrefix/>)'):
line = self.getReplacedAlterationLine(line[len('(<alterationDeleteThisPrefix/>)') :])
distanceFeedRate.addLine(line)
return distanceFeedRate.output.getvalue()
def parseInitialization(self):
'Parse gcode initialization and store the parameters.'
for self.lineIndex in xrange(len(self.lines)):
line = self.lines[self.lineIndex]
splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line)
firstWord = gcodec.getFirstWord(splitLine)
self.distanceFeedRate.parseSplitLine(firstWord, splitLine)
if firstWord == '(</extruderInitialization>)':
self.distanceFeedRate.addTagBracketedProcedure('alteration')
return
self.distanceFeedRate.addLine(line)
def setSettingDictionary(self):
'Set the setting dictionary from the gcode text.'
for line in self.lines:
splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line)
firstWord = gcodec.getFirstWord(splitLine)
if firstWord == '(<setting>' and self.settingDictionary != None:
if len(splitLine) > 4:
procedure = splitLine[1]
name = splitLine[2].replace('_', ' ').replace(' ', '')
if '(' in name:
name = name[: name.find('(')]
value = ' '.join(splitLine[3 : -1])
self.settingDictionary[(procedure + '.' + name).lower()] = value
elif firstWord == '(<settings>)':
self.settingDictionary = {}
elif firstWord == '(</settings>)':
return
def main():
"Display the alteration dialog."
if len(sys.argv) > 1:
writeOutput(' '.join(sys.argv[1 :]))
else:
settings.startMainLoopFromConstructor(getNewRepository())
if __name__ == "__main__":
main()
|
pmaunz/pyqtgraph | refs/heads/develop | doc/extensions/qt_doc.py | 28 | """
Extension for building Qt-like documentation.
- Method lists preceding the actual method documentation
- Inherited members documented separately
- Members inherited from Qt have links to qt-project documentation
- Signal documentation
"""
def setup(app):
# probably we will be making a wrapper around autodoc
app.setup_extension('sphinx.ext.autodoc')
# would it be useful to define a new domain?
#app.add_domain(QtDomain)
## Add new configuration options
app.add_config_value('todo_include_todos', False, False)
## Nodes are the basic objects representing documentation directives
## and roles
app.add_node(Todolist)
app.add_node(Todo,
html=(visit_todo_node, depart_todo_node),
latex=(visit_todo_node, depart_todo_node),
text=(visit_todo_node, depart_todo_node))
## New directives like ".. todo:"
app.add_directive('todo', TodoDirective)
app.add_directive('todolist', TodolistDirective)
## Connect callbacks to specific hooks in the build process
app.connect('doctree-resolved', process_todo_nodes)
app.connect('env-purge-doc', purge_todos)
from docutils import nodes
from sphinx.util.compat import Directive
from sphinx.util.compat import make_admonition
# Just a general node
class Todolist(nodes.General, nodes.Element):
pass
# .. and its directive
class TodolistDirective(Directive):
# all directives have 'run' method that returns a list of nodes
def run(self):
return [Todolist('')]
# Admonition classes are like notes or warnings
class Todo(nodes.Admonition, nodes.Element):
pass
def visit_todo_node(self, node):
self.visit_admonition(node)
def depart_todo_node(self, node):
self.depart_admonition(node)
class TodoDirective(Directive):
# this enables content in the directive
has_content = True
def run(self):
env = self.state.document.settings.env
# create a new target node for linking to
targetid = "todo-%d" % env.new_serialno('todo')
targetnode = nodes.target('', '', ids=[targetid])
# make the admonition node
ad = make_admonition(Todo, self.name, [('Todo')], self.options,
self.content, self.lineno, self.content_offset,
self.block_text, self.state, self.state_machine)
# store a handle in a global list of all todos
if not hasattr(env, 'todo_all_todos'):
env.todo_all_todos = []
env.todo_all_todos.append({
'docname': env.docname,
'lineno': self.lineno,
'todo': ad[0].deepcopy(),
'target': targetnode,
})
# return both the linking target and the node itself
return [targetnode] + ad
# env data is persistent across source files so we purge whenever the source file has changed.
def purge_todos(app, env, docname):
if not hasattr(env, 'todo_all_todos'):
return
env.todo_all_todos = [todo for todo in env.todo_all_todos
if todo['docname'] != docname]
# called at the end of resolving phase; we will convert temporary nodes
# into finalized nodes
def process_todo_nodes(app, doctree, fromdocname):
if not app.config.todo_include_todos:
for node in doctree.traverse(Todo):
node.parent.remove(node)
# Replace all todolist nodes with a list of the collected todos.
# Augment each todo with a backlink to the original location.
env = app.builder.env
for node in doctree.traverse(Todolist):
if not app.config.todo_include_todos:
node.replace_self([])
continue
content = []
for todo_info in env.todo_all_todos:
para = nodes.paragraph()
filename = env.doc2path(todo_info['docname'], base=None)
description = (
('(The original entry is located in %s, line %d and can be found ') %
(filename, todo_info['lineno']))
para += nodes.Text(description, description)
# Create a reference
newnode = nodes.reference('', '')
innernode = nodes.emphasis(('here'), ('here'))
newnode['refdocname'] = todo_info['docname']
newnode['refuri'] = app.builder.get_relative_uri(
fromdocname, todo_info['docname'])
newnode['refuri'] += '#' + todo_info['target']['refid']
newnode.append(innernode)
para += newnode
para += nodes.Text('.)', '.)')
# Insert into the todolist
content.append(todo_info['todo'])
content.append(para)
node.replace_self(content)
|
jimsrc/seatos | refs/heads/master | etc/n_CR/share/load_data.py | 2 | #!/usr/bin/env ipython
import numpy as np
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
class gral():
def __init__(self):
self.name = ''
sh, mc = gral(), gral()
cr = gral()
cr.sh, cr.mc = gral(), gral()
vlo, vhi = 550.0, 3000.0 #100.0, 450.0 #550.0, 3000.0
dir_inp_sh = '../../../sheaths/ascii/MCflag2/wShiftCorr/_test_Vmc_'
dir_inp_mc = '../../../mcs/ascii/MCflag2/wShiftCorr/_test_Vmc_'
fname_inp_part = 'MCflag2_2before.4after_fgap0.2_Wang90.0_vlo.%4.1f.vhi.%4.1f' % (vlo, vhi)
#--- rmsB
fname_sh = dir_inp_sh + '/%s_rmsB.txt' % fname_inp_part
fname_mc = dir_inp_mc + '/%s_rmsB.txt' % fname_inp_part
sh.data = np.loadtxt(fname_sh).T
mc.data = np.loadtxt(fname_mc).T
sh.t, sh.rmsB = sh.data[0], sh.data[2]
mc.t, mc.rmsB = mc.data[0], mc.data[2]
#--- B
fname_sh = dir_inp_sh + '/%s_B.txt' % fname_inp_part
fname_mc = dir_inp_mc + '/%s_B.txt' % fname_inp_part
sh.data = np.loadtxt(fname_sh).T
mc.data = np.loadtxt(fname_mc).T
sh.t, sh.B = sh.data[0], sh.data[2]
mc.t, mc.B = mc.data[0], mc.data[2]
#++++++++++++++++++++++++++++++++++++++++++++++++++++
fname_sh = dir_inp_sh + '/%s_CRs.txt' % fname_inp_part
fname_mc = dir_inp_mc + '/%s_CRs.txt' % fname_inp_part
cr.sh.data = np.loadtxt(fname_sh).T
cr.mc.data = np.loadtxt(fname_mc).T
cr.sh.t, cr.sh.avr = cr.sh.data[0], cr.sh.data[2]
cr.mc.t, cr.mc.avr = cr.mc.data[0], cr.mc.data[2]
|
helixyte/TheLMA | refs/heads/master | thelma/repositories/rdb/mappers/isojobpreparationplate.py | 1 | """
This file is part of the TheLMA (THe Laboratory Management Application) project.
See LICENSE.txt for licensing, CONTRIBUTORS.txt for contributor information.
ISO job preparation plate mapper
"""
from sqlalchemy.orm import relationship
from everest.repositories.rdb.utils import mapper
from thelma.entities.iso import IsoJobPreparationPlate
from thelma.entities.job import IsoJob
from thelma.entities.rack import Rack
from thelma.entities.racklayout import RackLayout
__docformat__ = 'reStructuredText en'
__all__ = ['create_mapper']
def create_mapper(iso_job_preparation_plate_tbl):
"Mapper factory."
m = mapper(IsoJobPreparationPlate, iso_job_preparation_plate_tbl,
id_attribute='iso_job_preparation_plate_id',
properties=dict(
rack=relationship(Rack, uselist=False),
rack_layout=relationship(RackLayout, uselist=False),
iso_job=relationship(IsoJob, uselist=False,
back_populates='iso_job_preparation_plates'),
),
)
return m
|
LinuxChristian/home-assistant | refs/heads/dev | homeassistant/components/ifttt.py | 20 | """
Support to trigger Maker IFTTT recipes.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/ifttt/
"""
import logging
import requests
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['pyfttt==0.3']
_LOGGER = logging.getLogger(__name__)
ATTR_EVENT = 'event'
ATTR_VALUE1 = 'value1'
ATTR_VALUE2 = 'value2'
ATTR_VALUE3 = 'value3'
CONF_KEY = 'key'
DOMAIN = 'ifttt'
SERVICE_TRIGGER = 'trigger'
SERVICE_TRIGGER_SCHEMA = vol.Schema({
vol.Required(ATTR_EVENT): cv.string,
vol.Optional(ATTR_VALUE1): cv.string,
vol.Optional(ATTR_VALUE2): cv.string,
vol.Optional(ATTR_VALUE3): cv.string,
})
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_KEY): cv.string,
}),
}, extra=vol.ALLOW_EXTRA)
def trigger(hass, event, value1=None, value2=None, value3=None):
"""Trigger a Maker IFTTT recipe."""
data = {
ATTR_EVENT: event,
ATTR_VALUE1: value1,
ATTR_VALUE2: value2,
ATTR_VALUE3: value3,
}
hass.services.call(DOMAIN, SERVICE_TRIGGER, data)
def setup(hass, config):
"""Set up the IFTTT service component."""
key = config[DOMAIN][CONF_KEY]
def trigger_service(call):
"""Handle IFTTT trigger service calls."""
event = call.data[ATTR_EVENT]
value1 = call.data.get(ATTR_VALUE1)
value2 = call.data.get(ATTR_VALUE2)
value3 = call.data.get(ATTR_VALUE3)
try:
import pyfttt as pyfttt
pyfttt.send_event(key, event, value1, value2, value3)
except requests.exceptions.RequestException:
_LOGGER.exception("Error communicating with IFTTT")
hass.services.register(DOMAIN, SERVICE_TRIGGER, trigger_service,
schema=SERVICE_TRIGGER_SCHEMA)
return True
|
akesandgren/easybuild-framework | refs/heads/main | easybuild/base/generaloption.py | 1 | #
# Copyright 2011-2021 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
#
"""
A class that can be used to generated options to python scripts in a general way.
:author: Stijn De Weirdt (Ghent University)
:author: Jens Timmerman (Ghent University)
"""
import copy
import difflib
import inspect
import operator
import os
import re
import sys
import textwrap
from functools import reduce
from optparse import Option, OptionGroup, OptionParser, OptionValueError, Values
from optparse import SUPPRESS_HELP as nohelp # supported in optparse of python v2.4
from optparse import gettext as _gettext # this is gettext.gettext normally
from easybuild.base.fancylogger import getLogger, setroot, setLogLevel, getDetailsLogLevels
from easybuild.base.optcomplete import autocomplete, CompleterOption
from easybuild.tools.py2vs3 import StringIO, configparser, string_type
from easybuild.tools.utilities import mk_rst_table, nub, shell_quote
HELP_OUTPUT_FORMATS = ['', 'rst', 'short', 'config']
def set_columns(cols=None):
"""Set os.environ COLUMNS variable
- only if it is not set already
"""
if 'COLUMNS' in os.environ:
# do nothing
return
if cols is None:
stty = '/usr/bin/stty'
if os.path.exists(stty):
try:
cols = int(os.popen('%s size 2>/dev/null' % stty).read().strip().split(' ')[1])
except (AttributeError, IndexError, OSError, ValueError):
# do nothing
pass
if cols is not None:
os.environ['COLUMNS'] = "%s" % cols
def what_str_list_tuple(name):
"""Given name, return separator, class and helptext wrt separator.
(Currently supports strlist, strtuple, pathlist, pathtuple)
"""
sep = ','
helpsep = 'comma'
if name.startswith('path'):
sep = os.pathsep
helpsep = 'pathsep'
klass = None
if name.endswith('list'):
klass = list
elif name.endswith('tuple'):
klass = tuple
return sep, klass, helpsep
def check_str_list_tuple(option, opt, value): # pylint: disable=unused-argument
"""
check function for strlist and strtuple type
assumes value is comma-separated list
returns list or tuple of strings
"""
sep, klass, _ = what_str_list_tuple(option.type)
split = value.split(sep)
if klass is None:
err = _gettext("check_strlist_strtuple: unsupported type %s" % option.type)
raise OptionValueError(err)
else:
return klass(split)
def get_empty_add_flex(allvalues, self=None):
"""Return the empty element for add_flex action for allvalues"""
empty = None
if isinstance(allvalues, (list, tuple)):
if isinstance(allvalues[0], string_type):
empty = ''
if empty is None:
msg = "get_empty_add_flex cannot determine empty element for type %s (%s)"
msg = msg % (type(allvalues), allvalues)
exc_class = TypeError
if self is None:
raise exc_class(msg)
else:
self.log.raiseException(msg, exc_class)
return empty
class ExtOption(CompleterOption):
"""Extended options class
- enable/disable support
Actions:
- shorthelp : hook for shortend help messages
- confighelp : hook for configfile-style help messages
- store_debuglog : turns on fancylogger debugloglevel
- also: 'store_infolog', 'store_warninglog'
- add : add value to default (result is default + value)
- add_first : add default to value (result is value + default)
- extend : alias for add with strlist type
- type must support + (__add__) and one of negate (__neg__) or slicing (__getslice__)
- add_flex : similar to add / add_first, but replaces the first "empty" element with the default
- the empty element is dependent of the type
- for {str,path}{list,tuple} this is the empty string
- types must support the index method to determine the location of the "empty" element
- the replacement uses +
- e.g. a strlist type with value "0,,1"` and default [3,4] and action add_flex will
use the empty string '' as "empty" element, and will result in [0,3,4,1] (not [0,[3,4],1])
(but also a strlist with value "" and default [3,4] will result in [3,4];
so you can't set an empty list with add_flex)
- date : convert into datetime.date
- datetime : convert into datetime.datetime
- regex: compile str in regexp
- store_or_None
- set default to None if no option passed,
- set to default if option without value passed,
- set to value if option with value passed
Types:
- strlist, strtuple : convert comma-separated string in a list resp. tuple of strings
- pathlist, pathtuple : using os.pathsep, convert pathsep-separated string in a list resp. tuple of strings
- the path separator is OS-dependent
"""
EXTEND_SEPARATOR = ','
ENABLE = 'enable' # do nothing
DISABLE = 'disable' # inverse action
EXTOPTION_EXTRA_OPTIONS = ('date', 'datetime', 'regex', 'add', 'add_first', 'add_flex',)
EXTOPTION_STORE_OR = ('store_or_None', 'help') # callback type
EXTOPTION_LOG = ('store_debuglog', 'store_infolog', 'store_warninglog',)
EXTOPTION_HELP = ('shorthelp', 'confighelp', 'help')
ACTIONS = Option.ACTIONS + EXTOPTION_EXTRA_OPTIONS + EXTOPTION_STORE_OR + EXTOPTION_LOG + EXTOPTION_HELP
STORE_ACTIONS = Option.STORE_ACTIONS + EXTOPTION_EXTRA_OPTIONS + EXTOPTION_LOG + ('store_or_None',)
TYPED_ACTIONS = Option.TYPED_ACTIONS + EXTOPTION_EXTRA_OPTIONS + EXTOPTION_STORE_OR
ALWAYS_TYPED_ACTIONS = Option.ALWAYS_TYPED_ACTIONS + EXTOPTION_EXTRA_OPTIONS
TYPE_STRLIST = ['%s%s' % (name, klass) for klass in ['list', 'tuple'] for name in ['str', 'path']]
TYPE_CHECKER = dict([(x, check_str_list_tuple) for x in TYPE_STRLIST] + list(Option.TYPE_CHECKER.items()))
TYPES = tuple(TYPE_STRLIST + list(Option.TYPES))
BOOLEAN_ACTIONS = ('store_true', 'store_false',) + EXTOPTION_LOG
def __init__(self, *args, **kwargs):
"""Add logger to init"""
CompleterOption.__init__(self, *args, **kwargs)
self.log = getLogger(self.__class__.__name__)
def _set_attrs(self, attrs):
"""overwrite _set_attrs to allow store_or callbacks"""
Option._set_attrs(self, attrs)
if self.action == 'extend':
# alias
self.action = 'add'
self.type = 'strlist'
elif self.action in self.EXTOPTION_STORE_OR:
setattr(self, 'store_or', self.action)
def store_or(option, opt_str, value, parser, *args, **kwargs): # pylint: disable=unused-argument
"""Callback for supporting options with optional values."""
# see http://stackoverflow.com/questions/1229146/parsing-empty-options-in-python
# ugly code, optparse is crap
if parser.rargs and not parser.rargs[0].startswith('-'):
val = option.check_value(opt_str, parser.rargs.pop(0))
else:
val = kwargs.get('orig_default', None)
setattr(parser.values, option.dest, val)
# without the following, --x=y doesn't work; only --x y
self.nargs = 0 # allow 0 args, will also use 0 args
if self.type is None:
# set to not None, for takes_value to return True
self.type = 'string'
self.callback = store_or
self.callback_kwargs = {
'orig_default': copy.deepcopy(self.default),
}
self.action = 'callback' # act as callback
if self.store_or in self.EXTOPTION_STORE_OR:
self.default = None
else:
self.log.raiseException("_set_attrs: unknown store_or %s" % self.store_or, exception=ValueError)
def process(self, opt, value, values, parser):
"""Handle option-as-value issues before actually processing option."""
if hasattr(parser, 'is_value_a_commandline_option'):
errmsg = parser.is_value_a_commandline_option(opt, value)
if errmsg is not None:
prefix = "%s=" % self._long_opts[0] if self._long_opts else self._short_opts[0]
self.log.raiseException("%s. Use '%s%s' if the value is correct." % (errmsg, prefix, value),
exception=OptionValueError)
return Option.process(self, opt, value, values, parser)
def take_action(self, action, dest, opt, value, values, parser):
"""Extended take_action"""
orig_action = action # keep copy
# dest is None for actions like shorthelp and confighelp
if dest and getattr(parser._long_opt.get('--' + dest, ''), 'store_or', '') == 'help':
Option.take_action(self, action, dest, opt, value, values, parser)
fn = getattr(parser, 'print_%shelp' % values.help, None)
if fn is None:
self.log.raiseException("Unsupported output format for help: %s" % value.help, exception=ValueError)
else:
fn()
parser.exit()
elif action == 'shorthelp':
parser.print_shorthelp()
parser.exit()
elif action == 'confighelp':
parser.print_confighelp()
parser.exit()
elif action in ('store_true', 'store_false',) + self.EXTOPTION_LOG:
if action in self.EXTOPTION_LOG:
action = 'store_true'
if opt.startswith("--%s-" % self.ENABLE):
# keep action
pass
elif opt.startswith("--%s-" % self.DISABLE):
# reverse action
if action in ('store_true',) + self.EXTOPTION_LOG:
action = 'store_false'
elif action in ('store_false',):
action = 'store_true'
if orig_action in self.EXTOPTION_LOG and action == 'store_true':
newloglevel = orig_action.split('_')[1][:-3].upper()
logstate = ", ".join(["(%s, %s)" % (n, l) for n, l in getDetailsLogLevels()])
self.log.debug("changing loglevel to %s, current state: %s", newloglevel, logstate)
setLogLevel(newloglevel)
self.log.debug("changed loglevel to %s, previous state: %s", newloglevel, logstate)
if hasattr(values, '_logaction_taken'):
values._logaction_taken[dest] = True
Option.take_action(self, action, dest, opt, value, values, parser)
elif action in self.EXTOPTION_EXTRA_OPTIONS:
if action in ("add", "add_first", "add_flex",):
# determine type from lvalue
# set default first
default = getattr(parser.get_default_values(), dest, None)
if default is None:
default = type(value)()
# 'add*' actions require that the default value is of type list or tuple,
# which supports composing via '+' and slicing
if not isinstance(default, (list, tuple)):
msg = "Unsupported type %s for action %s (requires list)"
self.log.raiseException(msg % (type(default), action))
if action in ('add', 'add_flex'):
lvalue = default + value
elif action == 'add_first':
lvalue = value + default
if action == 'add_flex' and lvalue:
# use lvalue here rather than default to make sure there is 1 element
# to determine the type
if not hasattr(lvalue, 'index'):
msg = "Unsupported type %s for action %s (requires index method)"
self.log.raiseException(msg % (type(lvalue), action))
empty = get_empty_add_flex(lvalue, self=self)
if empty in value:
ind = value.index(empty)
lvalue = value[:ind] + default + value[ind + 1:]
else:
lvalue = value
elif action == "regex":
lvalue = re.compile(r'' + value)
else:
msg = "Unknown extended option action %s (known: %s)"
self.log.raiseException(msg % (action, self.EXTOPTION_EXTRA_OPTIONS))
setattr(values, dest, lvalue)
else:
Option.take_action(self, action, dest, opt, value, values, parser)
# set flag to mark as passed by action (ie not by default)
# - distinguish from setting default value through option
if hasattr(values, '_action_taken'):
values._action_taken[dest] = True
class ExtOptionGroup(OptionGroup):
"""An OptionGroup with support for configfile section names"""
RESERVED_SECTIONS = [configparser.DEFAULTSECT]
NO_SECTION = ('NO', 'SECTION')
def __init__(self, *args, **kwargs):
self.log = getLogger(self.__class__.__name__)
section_name = kwargs.pop('section_name', None)
if section_name in self.RESERVED_SECTIONS:
self.log.raiseException('Cannot use reserved name %s for section name.' % section_name)
OptionGroup.__init__(self, *args, **kwargs)
self.section_name = section_name
self.section_options = []
def add_option(self, *args, **kwargs):
"""Extract configfile section info"""
option = OptionGroup.add_option(self, *args, **kwargs)
self.section_options.append(option)
return option
class ExtOptionParser(OptionParser):
"""
Make an option parser that limits the C{-h} / C{--shorthelp} to short opts only,
C{-H} / C{--help} for all options.
Pass options through environment. Like:
- C{export PROGNAME_SOMEOPTION = value} will generate {--someoption=value}
- C{export PROGNAME_OTHEROPTION = 1} will generate {--otheroption}
- C{export PROGNAME_OTHEROPTION = 0} (or no or false) won't do anything
distinction is made based on option.action in TYPED_ACTIONS allow
C{--enable-} / C{--disable-} (using eg ExtOption option_class)
"""
shorthelp = ('h', "--shorthelp",)
longhelp = ('H', "--help",)
VALUES_CLASS = Values
DESCRIPTION_DOCSTRING = False
ALLOW_OPTION_NAME_AS_VALUE = False # exact match for option name (without the '-') as value
ALLOW_OPTION_AS_VALUE = False # exact match for option as value
ALLOW_DASH_AS_VALUE = False # any value starting with a '-'
ALLOW_TYPO_AS_VALUE = True # value with similarity score from difflib.get_close_matches
def __init__(self, *args, **kwargs):
"""
Following named arguments are specific to ExtOptionParser
(the remaining ones are passed to the parent OptionParser class)
:param help_to_string: boolean, if True, the help is written
to a newly created StingIO instance
:param help_to_file: filehandle, help is written to this filehandle
:param envvar_prefix: string, specify the environment variable prefix
to use (if you don't want the default one)
:param process_env_options: boolean, if False, don't check the
environment for options (default: True)
:param error_env_options: boolean, if True, use error_env_options_method
if an environment variable with correct envvar_prefix
exists but does not correspond to an existing option
(default: False)
:param error_env_options_method: callable; method to use to report error
in used environment variables (see error_env_options);
accepts string value + additional
string arguments for formatting the message
(default: own log.error method)
"""
self.log = getLogger(self.__class__.__name__)
self.help_to_string = kwargs.pop('help_to_string', None)
self.help_to_file = kwargs.pop('help_to_file', None)
self.envvar_prefix = kwargs.pop('envvar_prefix', None)
self.process_env_options = kwargs.pop('process_env_options', True)
self.error_env_options = kwargs.pop('error_env_options', False)
self.error_env_option_method = kwargs.pop('error_env_option_method', self.log.error)
# py2.4 epilog compatibilty with py2.7 / optparse 1.5.3
self.epilog = kwargs.pop('epilog', None)
if 'option_class' not in kwargs:
kwargs['option_class'] = ExtOption
OptionParser.__init__(self, *args, **kwargs)
# redefine formatter for py2.4 compat
if not hasattr(self.formatter, 'format_epilog'):
setattr(self.formatter, 'format_epilog', self.formatter.format_description)
if self.epilog is None:
self.epilog = []
if hasattr(self.option_class, 'ENABLE') and hasattr(self.option_class, 'DISABLE'):
epilogtxt = 'Boolean options support %(disable)s prefix to do the inverse of the action,'
epilogtxt += ' e.g. option --someopt also supports --disable-someopt.'
self.epilog.append(epilogtxt % {'disable': self.option_class.DISABLE})
self.environment_arguments = None
self.commandline_arguments = None
def is_value_a_commandline_option(self, opt, value, index=None):
"""
Determine if value is/could be an option passed via the commandline.
If it is, return the reason why (can be used as message); or return None if it isn't.
opt is the option flag to which the value is passed;
index is the index of the value on the commandline (if None, it is determined from orig_rargs and rargs)
The method tests for possible ambiguity on the commandline when the parser
interprets the argument following an option as a value, whereas it is far more likely that
it is (intended as) an option; --longopt=value is never considered ambiguous, regardless of the value.
"""
# Values that are/could be options that are passed via
# only --longopt=value is not a problem.
# When processing the enviroment and/or configfile, we always set
# --longopt=value, so no issues there either.
# following checks assume that value is a string (not a store_or_None)
if not isinstance(value, string_type):
return None
cmdline_index = None
try:
cmdline_index = self.commandline_arguments.index(value)
except ValueError:
# no index found for value, so not a stand-alone value
if opt.startswith('--'):
# only --longopt=value is unambigouos
return None
if index is None:
# index of last parsed arg in commandline_arguments via remainder of rargs
index = len(self.commandline_arguments) - len(self.rargs) - 1
if cmdline_index is not None and index != cmdline_index:
# This is not the value you are looking for
return None
if not self.ALLOW_OPTION_NAME_AS_VALUE:
value_as_opt = '-%s' % value
if value_as_opt in self._short_opt or value_as_opt in self._long_opt:
return "'-%s' is a valid option" % value
if (not self.ALLOW_OPTION_AS_VALUE) and (value in self._long_opt or value in self._short_opt):
return "Value '%s' is also a valid option" % value
if not self.ALLOW_DASH_AS_VALUE and value.startswith('-'):
return "Value '%s' starts with a '-'" % value
if not self.ALLOW_TYPO_AS_VALUE:
possibilities = self._long_opt.keys() + self._short_opt.keys()
# also on optionnames, i.e. without the -- / -
possibilities.extend([x.lstrip('-') for x in possibilities])
# max 3 options; minimum score is taken from EB experience
matches = difflib.get_close_matches(value, possibilities, 3, 0.85)
if matches:
return "Value '%s' too close match to option(s) %s" % (value, ', '.join(matches))
return None
def set_description_docstring(self):
"""Try to find the main docstring and add it if description is not None"""
stack = inspect.stack()[-1]
try:
docstr = stack[0].f_globals.get('__doc__', None)
except (IndexError, ValueError, AttributeError):
self.log.debug("set_description_docstring: no docstring found in latest stack globals")
docstr = None
if docstr is not None:
indent = " "
# kwargs and ** magic to deal with width
kwargs = {
'initial_indent': indent * 2,
'subsequent_indent': indent * 2,
'replace_whitespace': False,
}
width = os.environ.get('COLUMNS', None)
if width is not None:
# default textwrap width
try:
kwargs['width'] = int(width)
except ValueError:
pass
# deal with newlines in docstring
final_docstr = ['']
for line in str(docstr).strip("\n ").split("\n"):
final_docstr.append(textwrap.fill(line, **kwargs))
final_docstr.append('')
return "\n".join(final_docstr)
def format_description(self, formatter):
"""Extend to allow docstring as description"""
description = ''
if self.description == 'NONE_AND_NOT_NONE':
if self.DESCRIPTION_DOCSTRING:
description = self.set_description_docstring()
elif self.description:
description = formatter.format_description(self.get_description())
return str(description)
def set_usage(self, usage):
"""Return usage and set try to set autogenerated description."""
usage = OptionParser.set_usage(self, usage)
if self.description is None:
self.description = 'NONE_AND_NOT_NONE'
return usage
def get_default_values(self):
"""Introduce the ExtValues class with class constant
- make it dynamic, otherwise the class constant is shared between multiple instances
- class constant is used to avoid _action_taken as option in the __dict__
- only works by using reference to object
- same for _logaction_taken
"""
values = OptionParser.get_default_values(self)
class ExtValues(self.VALUES_CLASS):
_action_taken = {}
_logaction_taken = {}
newvalues = ExtValues()
newvalues.__dict__ = values.__dict__.copy()
return newvalues
def format_help(self, formatter=None):
"""For py2.4 compatibility reasons (missing epilog). This is the py2.7 / optparse 1.5.3 code"""
if formatter is None:
formatter = self.formatter
result = []
if self.usage:
result.append(self.get_usage() + "\n")
if self.description:
result.append(self.format_description(formatter) + "\n")
result.append(self.format_option_help(formatter))
result.append(self.format_epilog(formatter))
return "".join(result)
def format_epilog(self, formatter):
"""Allow multiple epilog parts"""
res = []
if not isinstance(self.epilog, (list, tuple,)):
self.epilog = [self.epilog]
for epi in self.epilog:
res.append(formatter.format_epilog(epi))
return "".join(res)
def print_shorthelp(self, fh=None):
"""Print a shortened help (no longopts)"""
for opt in self._get_all_options():
if opt._short_opts is None or len([x for x in opt._short_opts if len(x) > 0]) == 0:
opt.help = nohelp
opt._long_opts = [] # remove all long_opts
removeoptgrp = []
for optgrp in self.option_groups:
# remove all option groups that have only nohelp options
if reduce(operator.and_, [opt.help == nohelp for opt in optgrp.option_list]):
removeoptgrp.append(optgrp)
for optgrp in removeoptgrp:
self.option_groups.remove(optgrp)
self.print_help(fh)
def check_help(self, fh):
"""Checks filehandle for help functions"""
if self.help_to_string:
self.help_to_file = StringIO()
if fh is None:
fh = self.help_to_file
if hasattr(self.option_class, 'ENABLE') and hasattr(self.option_class, 'DISABLE'):
def _is_enable_disable(x):
"""Does the option start with ENABLE/DISABLE"""
_e = x.startswith("--%s-" % self.option_class.ENABLE)
_d = x.startswith("--%s-" % self.option_class.DISABLE)
return _e or _d
for opt in self._get_all_options():
# remove all long_opts with ENABLE/DISABLE naming
opt._long_opts = [x for x in opt._long_opts if not _is_enable_disable(x)]
return fh
# pylint: disable=arguments-differ
def print_help(self, fh=None):
"""Intercept print to file to print to string and remove the ENABLE/DISABLE options from help"""
fh = self.check_help(fh)
OptionParser.print_help(self, fh)
def print_rsthelp(self, fh=None):
""" Print help in rst format """
fh = self.check_help(fh)
result = []
if self.usage:
title = "Usage"
result.extend([title, '-' * len(title), '', '``%s``' % self.get_usage().replace("Usage: ", '').strip(), ''])
if self.description:
title = "Description"
result.extend([title, '-' * len(title), '', self.description, ''])
result.append(self.format_option_rsthelp())
rsthelptxt = '\n'.join(result)
if fh is None:
fh = sys.stdout
fh.write(rsthelptxt)
def format_option_rsthelp(self, formatter=None):
""" Formatting for help in rst format """
if not formatter:
formatter = self.formatter
formatter.store_option_strings(self)
res = []
titles = ["Option flag", "Option description"]
all_opts = [("Help options", self.option_list)] + \
[(group.title, group.option_list) for group in self.option_groups]
for title, opts in all_opts:
values = []
res.extend([title, '-' * len(title)])
for opt in opts:
if opt.help is not nohelp:
values.append(['``%s``' % formatter.option_strings[opt], formatter.expand_default(opt)])
res.extend(mk_rst_table(titles, map(list, zip(*values))))
res.append('')
return '\n'.join(res)
def print_confighelp(self, fh=None):
"""Print help as a configfile."""
# walk through all optiongroups
# append where necessary, keep track of sections
all_groups = {}
sections = []
for gr in self.option_groups:
section = gr.section_name
if not (section is None or section == ExtOptionGroup.NO_SECTION):
if section not in sections:
sections.append(section)
ag = all_groups.setdefault(section, [])
ag.extend(gr.section_options)
# set MAIN section first if exists
main_idx = sections.index('MAIN')
if main_idx > 0: # not needed if it main_idx == 0
sections.remove('MAIN')
sections.insert(0, 'MAIN')
option_template = "# %(help)s\n#%(option)s=\n"
txt = ''
for section in sections:
txt += "[%s]\n" % section
for option in all_groups[section]:
data = {
'help': option.help,
'option': option.get_opt_string().lstrip('-'),
}
txt += option_template % data
txt += "\n"
# overwrite the format_help to be able to use the the regular print_help
def format_help(*args, **kwargs): # pylint: disable=unused-argument
return txt
self.format_help = format_help
self.print_help(fh)
def _add_help_option(self):
"""Add shorthelp and longhelp"""
self.add_option("-%s" % self.shorthelp[0],
self.shorthelp[1], # *self.shorthelp[1:], syntax error in Python 2.4
action="shorthelp",
help=_gettext("show short help message and exit"))
self.add_option("-%s" % self.longhelp[0],
self.longhelp[1], # *self.longhelp[1:], syntax error in Python 2.4
action="help",
type="choice",
choices=HELP_OUTPUT_FORMATS,
default=HELP_OUTPUT_FORMATS[0],
metavar='OUTPUT_FORMAT',
help=_gettext("show full help message and exit"))
self.add_option("--confighelp",
action="confighelp",
help=_gettext("show help as annotated configfile"))
def _get_args(self, args):
"""Prepend the options set through the environment"""
self.commandline_arguments = OptionParser._get_args(self, args)
self.get_env_options()
return self.environment_arguments + self.commandline_arguments # prepend the environment options as longopts
def get_env_options_prefix(self):
"""Return the prefix to use for options passed through the environment"""
# sys.argv[0] or the prog= argument of the optionparser, strip possible extension
if self.envvar_prefix is None:
self.envvar_prefix = self.get_prog_name().rsplit('.', 1)[0].upper()
return self.envvar_prefix
def get_env_options(self):
"""Retrieve options from the environment: prefix_longopt.upper()"""
self.environment_arguments = []
if not self.process_env_options:
self.log.debug("Not processing environment for options")
return
if self.envvar_prefix is None:
self.get_env_options_prefix()
epilogprefixtxt = "All long option names can be passed as environment variables. "
epilogprefixtxt += "Variable name is %(prefix)s_<LONGNAME> "
epilogprefixtxt += "eg. --some-opt is same as setting %(prefix)s_SOME_OPT in the environment."
self.epilog.append(epilogprefixtxt % {'prefix': self.envvar_prefix})
candidates = dict([(k, v) for k, v in os.environ.items() if k.startswith("%s_" % self.envvar_prefix)])
for opt in self._get_all_options():
if opt._long_opts is None:
continue
for lo in opt._long_opts:
if len(lo) == 0:
continue
env_opt_name = "%s_%s" % (self.envvar_prefix, lo.lstrip('-').replace('-', '_').upper())
val = candidates.pop(env_opt_name, None)
if val is not None:
if opt.action in opt.TYPED_ACTIONS: # not all typed actions are mandatory, but let's assume so
self.environment_arguments.append("%s=%s" % (lo, val))
else:
# interpretation of values: 0/no/false means: don't set it
if ("%s" % val).lower() not in ("0", "no", "false",):
self.environment_arguments.append("%s" % lo)
else:
self.log.debug("Environment variable %s is not set" % env_opt_name)
if candidates:
msg = "Found %s environment variable(s) that are prefixed with %s but do not match valid option(s): %s"
if self.error_env_options:
logmethod = self.error_env_option_method
else:
logmethod = self.log.debug
logmethod(msg, len(candidates), self.envvar_prefix, ','.join(sorted(candidates)))
self.log.debug("Environment variable options with prefix %s: %s",
self.envvar_prefix, self.environment_arguments)
return self.environment_arguments
def get_option_by_long_name(self, name):
"""Return the option matching the long option name"""
for opt in self._get_all_options():
if opt._long_opts is None:
continue
for lo in opt._long_opts:
if len(lo) == 0:
continue
dest = lo.lstrip('-')
if name == dest:
return opt
return None
class GeneralOption(object):
"""
'Used-to-be simple' wrapper class for option parsing
Options with go_ prefix are for this class, the remainder is passed to the parser
- go_args : use these instead of of sys.argv[1:]
- go_columns : specify column width (in columns)
- go_useconfigfiles : use configfiles or not (default set by CONFIGFILES_USE)
if True, an option --configfiles will be added
- go_configfiles : list of configfiles to parse. Uses ConfigParser.read; last file wins
- go_configfiles_initenv : section dict of key/value dict; inserted before configfileparsing
As a special case, using all uppercase key in DEFAULT section with a case-sensitive
configparser can be used to set "constants" for easy interpolation in all sections.
- go_loggername : name of logger, default classname
- go_mainbeforedefault : set the main options before the default ones
- go_autocompleter : dict with named options to pass to the autocomplete call (eg arg_completer)
if is None: disable autocompletion; default is {} (ie no extra args passed)
Sections starting with the string 'raw_' in the sectionname will be parsed as raw sections,
meaning there will be no interpolation of the strings. This comes in handy if you want to configure strings
with templates in them.
Options process order (last one wins)
0. default defined with option
1. value in (last) configfile (last configfile wins)
2. options parsed by option parser
In case the ExtOptionParser is used
0. value set through environment variable
1. value set through commandline option
"""
OPTIONNAME_PREFIX_SEPARATOR = '-'
DEBUG_OPTIONS_BUILD = False # enable debug mode when building the options ?
USAGE = None
ALLOPTSMANDATORY = True
PARSER = ExtOptionParser
INTERSPERSED = True # mix args with options
CONFIGFILES_USE = True
CONFIGFILES_RAISE_MISSING = False
CONFIGFILES_INIT = [] # initial list of defaults, overwritten by go_configfiles options
CONFIGFILES_IGNORE = []
CONFIGFILES_MAIN_SECTION = 'MAIN' # sectionname that contains the non-grouped/non-prefixed options
CONFIGFILE_PARSER = configparser.SafeConfigParser
CONFIGFILE_CASESENSITIVE = True
METAVAR_DEFAULT = True # generate a default metavar
METAVAR_MAP = None # metvar, list of longopts map
OPTIONGROUP_SORTED_OPTIONS = True
PROCESSED_OPTIONS_PROPERTIES = ['type', 'default', 'action', 'opt_name', 'prefix', 'section_name']
VERSION = None # set the version (will add --version)
DEFAULTSECT = configparser.DEFAULTSECT
DEFAULT_LOGLEVEL = None
DEFAULT_CONFIGFILES = None
DEFAULT_IGNORECONFIGFILES = None
SETROOTLOGGER = False
def __init__(self, **kwargs):
go_args = kwargs.pop('go_args', None)
self.no_system_exit = kwargs.pop('go_nosystemexit', None) # unit test option
self.use_configfiles = kwargs.pop('go_useconfigfiles', self.CONFIGFILES_USE) # use or ignore config files
self.configfiles = kwargs.pop('go_configfiles', self.CONFIGFILES_INIT[:]) # configfiles to parse
configfiles_initenv = kwargs.pop('go_configfiles_initenv', None) # initial environment for configfiles to parse
prefixloggername = kwargs.pop('go_prefixloggername', False) # name of logger is same as envvar prefix
mainbeforedefault = kwargs.pop('go_mainbeforedefault', False) # Set the main options before the default ones
autocompleter = kwargs.pop('go_autocompleter', {}) # Pass these options to the autocomplete call
if self.SETROOTLOGGER:
setroot()
set_columns(kwargs.pop('go_columns', None))
kwargs.update({
'option_class': ExtOption,
'usage': kwargs.get('usage', self.USAGE),
'version': self.VERSION,
})
self.parser = self.PARSER(**kwargs)
self.parser.allow_interspersed_args = self.INTERSPERSED
self.configfile_parser = None
self.configfile_remainder = {}
loggername = self.__class__.__name__
if prefixloggername:
prefix = self.parser.get_env_options_prefix()
if prefix is not None and len(prefix) > 0:
loggername = prefix.replace('.', '_') # . indicate hierarchy in logging land
self.log = getLogger(name=loggername)
self.options = None
self.args = None
self.autocompleter = autocompleter
self.auto_prefix = None
self.auto_section_name = None
self.processed_options = {}
self.config_prefix_sectionnames_map = {}
self.set_go_debug()
if mainbeforedefault:
self.main_options()
self._default_options()
else:
self._default_options()
self.main_options()
self.parseoptions(options_list=go_args)
if self.options is not None:
# None for eg usage/help
self.configfile_parser_init(initenv=configfiles_initenv)
self.parseconfigfiles()
self._set_default_loglevel()
self.postprocess()
self.validate()
def set_go_debug(self):
"""Check if debug options are on and then set fancylogger to debug.
This is not the default way to set debug, it enables debug logging
in an earlier stage to debug generaloption itself.
"""
if self.options is None:
if self.DEBUG_OPTIONS_BUILD:
setLogLevel('DEBUG')
def _default_options(self):
"""Generate default options: debug/log and configfile"""
self._make_debug_options()
self._make_configfiles_options()
def _make_debug_options(self):
"""Add debug/logging options: debug and info"""
self._logopts = {
'debug': ("Enable debug log mode", None, "store_debuglog", False, 'd'),
'info': ("Enable info log mode", None, "store_infolog", False),
'quiet': ("Enable quiet/warning log mode", None, "store_warninglog", False),
}
descr = ['Debug and logging options', '']
self.log.debug("Add debug and logging options descr %s opts %s (no prefix)" % (descr, self._logopts))
self.add_group_parser(self._logopts, descr, prefix=None)
def _set_default_loglevel(self):
"""Set the default loglevel if no logging options are set"""
loglevel_set = sum([getattr(self.options, name, False) for name in self._logopts.keys()])
if not loglevel_set and self.DEFAULT_LOGLEVEL is not None:
setLogLevel(self.DEFAULT_LOGLEVEL)
def _make_configfiles_options(self):
"""Add configfiles option"""
opts = {
'configfiles': ("Parse (additional) configfiles", "strlist", "add", self.DEFAULT_CONFIGFILES),
'ignoreconfigfiles': ("Ignore configfiles", "strlist", "add", self.DEFAULT_IGNORECONFIGFILES),
}
descr = ['Configfile options', '']
self.log.debug("Add configfiles options descr %s opts %s (no prefix)" % (descr, opts))
self.add_group_parser(opts, descr, prefix=None, section_name=ExtOptionGroup.NO_SECTION)
def main_options(self):
"""Create the main options automatically"""
# make_init is deprecated
if hasattr(self, 'make_init'):
self.log.debug('main_options: make_init is deprecated. Rename function to main_options.')
getattr(self, 'make_init')()
else:
# function names which end with _options and do not start with main or _
reg_main_options = re.compile("^(?!_|main).*_options$")
names = [x for x in dir(self) if reg_main_options.search(x)]
if len(names) == 0:
self.log.error("main_options: no options functions implemented")
else:
for name in names:
fn = getattr(self, name)
if callable(fn): # inspect.isfunction fails beacuse this is a boundmethod
self.auto_section_name = '_'.join(name.split('_')[:-1])
self.log.debug('main_options: adding options from %s (auto_section_name %s)' %
(name, self.auto_section_name))
fn()
self.auto_section_name = None # reset it
def make_option_metavar(self, longopt, details): # pylint: disable=unused-argument
"""Generate the metavar for option longopt
@type longopt: str
@type details: tuple
"""
if self.METAVAR_MAP is not None:
for metavar, longopts in self.METAVAR_MAP.items():
if longopt in longopts:
return metavar
if self.METAVAR_DEFAULT:
return longopt.upper()
def add_group_parser(self, opt_dict, description, prefix=None, otherdefaults=None, section_name=None):
"""Make a group parser from a dict
@type opt_dict: dict
@type description: a 2 element list (short and long description)
@section_name: str, the name of the section group in the config file.
:param opt_dict: options, with the form C{"long_opt" : value}.
Value is a C{tuple} containing
C{(help,type,action,default(,optional string=short option; list/tuple=choices; dict=add_option kwargs))}
help message passed through opt_dict will be extended with type and default
If section_name is None, prefix will be used. If prefix is None or '', 'DEFAULT' is used.
"""
if opt_dict is None:
# skip opt_dict None
# if opt_dict is empty dict {}, the eg the descritionis added to the help
self.log.debug("Skipping opt_dict %s with description %s prefix %s" %
(opt_dict, description, prefix))
return
if otherdefaults is None:
otherdefaults = {}
self.log.debug("add_group_parser: passed prefix %s section_name %s" % (prefix, section_name))
self.log.debug("add_group_parser: auto_prefix %s auto_section_name %s" %
(self.auto_prefix, self.auto_section_name))
if prefix is None:
if self.auto_prefix is None:
prefix = ''
else:
prefix = self.auto_prefix
if section_name is None:
if prefix is not None and len(prefix) > 0 and not (prefix == self.auto_prefix):
section_name = prefix
elif self.auto_section_name is not None and len(self.auto_section_name) > 0:
section_name = self.auto_section_name
else:
section_name = self.CONFIGFILES_MAIN_SECTION
self.log.debug("add_group_parser: set prefix %s section_name %s" % (prefix, section_name))
# add the section name to the help output
if section_name is None or section_name == ExtOptionGroup.NO_SECTION:
section_help = ''
else:
section_help = " (configfile section %s)" % (section_name)
if description[1]:
short_description = description[0]
long_description = "%s%s" % (description[1], section_help)
else:
short_description = "%s%s" % (description[0], section_help)
long_description = description[1]
opt_grp = ExtOptionGroup(self.parser, short_description, long_description, section_name=section_name)
keys = list(opt_dict.keys())
if self.OPTIONGROUP_SORTED_OPTIONS:
keys.sort() # alphabetical
for key in keys:
completer = None
details = opt_dict[key]
hlp = details[0]
typ = details[1]
action = details[2]
default = details[3]
# easy override default with otherdefault
if key in otherdefaults:
default = otherdefaults.get(key)
extra_help = []
if typ in ExtOption.TYPE_STRLIST:
sep, klass, helpsep = what_str_list_tuple(typ)
extra_help.append("type %s-separated %s" % (helpsep, klass.__name__))
elif typ is not None:
extra_help.append("type %s" % typ)
if default is not None:
if len(str(default)) == 0:
extra_help.append("default: ''") # empty string
elif typ in ExtOption.TYPE_STRLIST:
extra_help.append("default: %s" % sep.join(default))
else:
extra_help.append("default: %s" % default)
# for boolean options enabled by default, mention that they can be disabled using --disable-*
if default is True:
extra_help.append("disable with --disable-%s" % key)
if len(extra_help) > 0:
hlp += " (%s)" % ("; ".join(extra_help))
opt_name, opt_dest = self.make_options_option_name_and_destination(prefix, key)
args = ["--%s" % opt_name]
# this has to match PROCESSED_OPTIONS_PROPERTIES
self.processed_options[opt_dest] = [typ, default, action, opt_name, prefix, section_name] # add longopt
if len(self.processed_options[opt_dest]) != len(self.PROCESSED_OPTIONS_PROPERTIES):
self.log.raiseException("PROCESSED_OPTIONS_PROPERTIES length mismatch")
nameds = {
'dest': opt_dest,
'action': action,
}
metavar = self.make_option_metavar(key, details)
if metavar is not None:
nameds['metavar'] = metavar
if default is not None:
nameds['default'] = default
if typ:
nameds['type'] = typ
passed_kwargs = {}
if len(details) >= 5:
for extra_detail in details[4:]:
if isinstance(extra_detail, (list, tuple,)):
# choices
nameds['choices'] = ["%s" % x for x in extra_detail] # force to strings
hlp += ' (choices: %s)' % ', '.join(nameds['choices'])
elif isinstance(extra_detail, string_type) and len(extra_detail) == 1:
args.insert(0, "-%s" % extra_detail)
elif isinstance(extra_detail, (dict,)):
# extract any optcomplete completer hints
completer = extra_detail.pop('completer', None)
# add remainder
passed_kwargs.update(extra_detail)
else:
self.log.raiseException("add_group_parser: unknown extra detail %s" % extra_detail)
# add help
nameds['help'] = _gettext(hlp)
if hasattr(self.parser.option_class, 'ENABLE') and hasattr(self.parser.option_class, 'DISABLE'):
if action in self.parser.option_class.BOOLEAN_ACTIONS:
args.append("--%s-%s" % (self.parser.option_class.ENABLE, opt_name))
args.append("--%s-%s" % (self.parser.option_class.DISABLE, opt_name))
# force passed_kwargs as final nameds
nameds.update(passed_kwargs)
opt = opt_grp.add_option(*args, **nameds)
if completer is not None:
opt.completer = completer
self.parser.add_option_group(opt_grp)
# map between prefix and sectionnames
prefix_section_names = self.config_prefix_sectionnames_map.setdefault(prefix, [])
if section_name not in prefix_section_names:
prefix_section_names.append(section_name)
self.log.debug("Added prefix %s to list of sectionnames for %s" % (prefix, section_name))
def default_parseoptions(self):
"""Return default options"""
return sys.argv[1:]
def autocomplete(self):
"""Set the autocompletion magic via optcomplete"""
# very basic for now, no special options
if self.autocompleter is None:
self.log.debug('self.autocompleter is None, disabling autocompleter')
else:
self.log.debug('setting autocomplete with args %s' % self.autocompleter)
autocomplete(self.parser, **self.autocompleter)
def parseoptions(self, options_list=None):
"""Parse the options"""
if options_list is None:
options_list = self.default_parseoptions()
self.autocomplete()
try:
(self.options, self.args) = self.parser.parse_args(options_list)
except SystemExit as err:
self.log.debug("parseoptions: parse_args err %s code %s" % (err, err.code))
if self.no_system_exit:
return
else:
sys.exit(err.code)
self.log.debug("parseoptions: options from environment %s" % (self.parser.environment_arguments))
self.log.debug("parseoptions: options from commandline %s" % (self.parser.commandline_arguments))
# args should be empty, since everything is optional
if len(self.args) > 1:
self.log.debug("Found remaining args %s" % self.args)
if self.ALLOPTSMANDATORY:
self.parser.error("Invalid arguments args %s" % self.args)
self.log.debug("Found options %s args %s" % (self.options, self.args))
def configfile_parser_init(self, initenv=None):
"""
Initialise the configparser to use.
:params initenv: insert initial environment into the configparser.
It is a dict of dicts; the first level key is the section name;
the 2nd level key,value is the key=value.
All section names, keys and values are converted to strings.
"""
self.configfile_parser = self.CONFIGFILE_PARSER()
# make case sensitive
if self.CONFIGFILE_CASESENSITIVE:
self.log.debug('Initialise case sensitive configparser')
self.configfile_parser.optionxform = str
else:
self.log.debug('Initialise case insensitive configparser')
self.configfile_parser.optionxform = str.lower
# insert the initenv in the parser
if initenv is None:
initenv = {}
for name, section in initenv.items():
name = str(name)
if name == self.DEFAULTSECT:
# is protected/reserved (and hidden)
pass
elif not self.configfile_parser.has_section(name):
self.configfile_parser.add_section(name)
for key, value in section.items():
self.configfile_parser.set(name, str(key), str(value))
def parseconfigfiles(self):
"""Parse configfiles"""
if not self.use_configfiles:
self.log.debug('parseconfigfiles: use_configfiles False, skipping configfiles')
return
if self.configfiles is None:
self.configfiles = []
self.log.debug("parseconfigfiles: configfiles initially set %s" % self.configfiles)
option_configfiles = self.options.__dict__.get('configfiles', []) # empty list, will win so no defaults
option_ignoreconfigfiles = self.options.__dict__.get('ignoreconfigfiles', self.CONFIGFILES_IGNORE)
self.log.debug("parseconfigfiles: configfiles set through commandline %s" % option_configfiles)
self.log.debug("parseconfigfiles: ignoreconfigfiles set through commandline %s" % option_ignoreconfigfiles)
if option_configfiles is not None:
self.configfiles.extend(option_configfiles)
if option_ignoreconfigfiles is None:
option_ignoreconfigfiles = []
# Configparser fails on broken config files
# - if config file doesn't exist, it's no issue
configfiles = []
for fn in self.configfiles:
if not os.path.isfile(fn):
if self.CONFIGFILES_RAISE_MISSING:
self.log.raiseException("parseconfigfiles: configfile %s not found." % fn)
else:
self.log.debug("parseconfigfiles: configfile %s not found, will be skipped" % fn)
if fn in option_ignoreconfigfiles:
self.log.debug("parseconfigfiles: configfile %s will be ignored", fn)
else:
configfiles.append(fn)
try:
parsed_files = self.configfile_parser.read(configfiles)
except Exception:
self.log.raiseException("parseconfigfiles: problem during read")
self.log.debug("parseconfigfiles: following files were parsed %s" % parsed_files)
self.log.debug("parseconfigfiles: following files were NOT parsed %s" %
[x for x in configfiles if x not in parsed_files])
self.log.debug("parseconfigfiles: sections (w/o %s) %s" %
(self.DEFAULTSECT, self.configfile_parser.sections()))
# walk through list of section names
# - look for options set though config files
configfile_values = {}
configfile_options_default = {}
configfile_cmdline = []
configfile_cmdline_dest = [] # expected destinations
# won't parse
cfg_sections = self.config_prefix_sectionnames_map.values() # without DEFAULT
for section in cfg_sections:
if section not in self.config_prefix_sectionnames_map.values():
self.log.warning("parseconfigfiles: found section %s, won't be parsed" % section)
continue
# add any non-option related configfile data to configfile_remainder dict
cfg_sections_flat = [name for section_names in cfg_sections for name in section_names]
for section in self.configfile_parser.sections():
if section not in cfg_sections_flat:
self.log.debug("parseconfigfiles: found section %s, adding to remainder" % section)
remainder = self.configfile_remainder.setdefault(section, {})
# parse the remaining options, sections starting with 'raw_'
# as their name will be considered raw sections
for opt, val in self.configfile_parser.items(section, raw=(section.startswith('raw_'))):
remainder[opt] = val
# options are passed to the commandline option parser
for prefix, section_names in self.config_prefix_sectionnames_map.items():
for section in section_names:
# default section is treated separate in ConfigParser
if not self.configfile_parser.has_section(section):
self.log.debug('parseconfigfiles: no section %s' % str(section))
continue
elif section == ExtOptionGroup.NO_SECTION:
self.log.debug('parseconfigfiles: ignoring NO_SECTION %s' % str(section))
continue
elif section.lower() == 'default':
self.log.debug('parseconfigfiles: ignoring default section %s' % section)
continue
for opt, val in self.configfile_parser.items(section):
self.log.debug('parseconfigfiles: section %s option %s val %s' % (section, opt, val))
opt_name, opt_dest = self.make_options_option_name_and_destination(prefix, opt)
actual_option = self.parser.get_option_by_long_name(opt_name)
if actual_option is None:
# don't fail on DEFAULT UPPERCASE options in case-sensitive mode.
in_def = self.configfile_parser.has_option(self.DEFAULTSECT, opt)
if in_def and self.CONFIGFILE_CASESENSITIVE and opt == opt.upper():
self.log.debug(('parseconfigfiles: no option corresponding with '
'opt %s dest %s in section %s but found all uppercase '
'in DEFAULT section. Skipping.') % (opt, opt_dest, section))
continue
else:
self.log.raiseException(('parseconfigfiles: no option corresponding with '
'opt %s dest %s in section %s') % (opt, opt_dest, section))
configfile_options_default[opt_dest] = actual_option.default
# log actions require special care
# if any log action was already taken before, it would precede the one from the configfile
# however, multiple logactions in a configfile (or environment for that matter) have
# undefined behaviour
is_log_action = actual_option.action in ExtOption.EXTOPTION_LOG
log_action_taken = getattr(self.options, '_logaction_taken', False)
if is_log_action and log_action_taken:
# value set through take_action. do not modify by configfile
self.log.debug(('parseconfigfiles: log action %s (value %s) found,'
' but log action already taken. Ignoring.') % (opt_dest, val))
elif actual_option.action in ExtOption.BOOLEAN_ACTIONS:
try:
newval = self.configfile_parser.getboolean(section, opt)
self.log.debug(('parseconfigfiles: getboolean for option %s value %s '
'in section %s returned %s') % (opt, val, section, newval))
except Exception:
self.log.raiseException(('parseconfigfiles: failed to getboolean for option %s value %s '
'in section %s') % (opt, val, section))
if hasattr(self.parser.option_class, 'ENABLE') and hasattr(self.parser.option_class, 'DISABLE'):
if newval:
cmd_template = "--enable-%s"
else:
cmd_template = "--disable-%s"
configfile_cmdline_dest.append(opt_dest)
configfile_cmdline.append(cmd_template % opt_name)
else:
self.log.debug(("parseconfigfiles: no enable/disable, not trying to set boolean-valued "
"option %s via cmdline, just setting value to %s" % (opt_name, newval)))
configfile_values[opt_dest] = newval
else:
configfile_cmdline_dest.append(opt_dest)
configfile_cmdline.append("--%s" % opt_name)
configfile_cmdline.append(val)
# reparse
self.log.debug('parseconfigfiles: going to parse options through cmdline %s' % configfile_cmdline)
try:
# can't reprocress the environment, since we are not reporcessing the commandline either
self.parser.process_env_options = False
(parsed_configfile_options, parsed_configfile_args) = self.parser.parse_args(configfile_cmdline)
self.parser.process_env_options = True
except Exception:
self.log.raiseException('parseconfigfiles: failed to parse options through cmdline %s' %
configfile_cmdline)
# re-report the options as parsed via parser
self.log.debug("parseconfigfiles: options from configfile %s" % (self.parser.commandline_arguments))
if len(parsed_configfile_args) > 0:
self.log.raiseException('parseconfigfiles: not all options were parsed: %s' % parsed_configfile_args)
for opt_dest in configfile_cmdline_dest:
try:
configfile_values[opt_dest] = getattr(parsed_configfile_options, opt_dest)
except AttributeError:
self.log.raiseException('parseconfigfiles: failed to retrieve dest %s from parsed_configfile_options' %
opt_dest)
self.log.debug('parseconfigfiles: parsed values from configfiles: %s' % configfile_values)
for opt_dest, val in configfile_values.items():
set_opt = False
if not hasattr(self.options, opt_dest):
self.log.debug('parseconfigfiles: adding new option %s with value %s' % (opt_dest, val))
set_opt = True
else:
if hasattr(self.options, '_action_taken') and self.options._action_taken.get(opt_dest, None):
# value set through take_action. do not modify by configfile
self.log.debug('parseconfigfiles: option %s already found in _action_taken' % (opt_dest))
else:
self.log.debug('parseconfigfiles: option %s not found in _action_taken, setting to %s' %
(opt_dest, val))
set_opt = True
if set_opt:
setattr(self.options, opt_dest, val)
if hasattr(self.options, '_action_taken'):
self.options._action_taken[opt_dest] = True
def make_options_option_name_and_destination(self, prefix, key):
"""Make the options option name"""
if prefix == '':
name = key
else:
name = "".join([prefix, self.OPTIONNAME_PREFIX_SEPARATOR, key])
# dest : replace '-' with '_'
dest = name.replace('-', '_')
return name, dest
def _get_options_by_property(self, prop_type, prop_value):
"""Return all options with property type equal to value"""
if prop_type not in self.PROCESSED_OPTIONS_PROPERTIES:
self.log.raiseException('Invalid prop_type %s for PROCESSED_OPTIONS_PROPERTIES %s' %
(prop_type, self.PROCESSED_OPTIONS_PROPERTIES))
prop_idx = self.PROCESSED_OPTIONS_PROPERTIES.index(prop_type)
# get all options with prop_type
options = {}
for key in [dest for dest, props in self.processed_options.items() if props[prop_idx] == prop_value]:
options[key] = getattr(self.options, key, None) # None? isn't there always a default
return options
def get_options_by_prefix(self, prefix):
"""Get all options that set with prefix. Return a dict. The keys are stripped of the prefix."""
offset = 0
if prefix:
offset = len(prefix) + len(self.OPTIONNAME_PREFIX_SEPARATOR)
prefix_dict = {}
for dest, value in self._get_options_by_property('prefix', prefix).items():
new_dest = dest[offset:]
prefix_dict[new_dest] = value
return prefix_dict
def get_options_by_section(self, section):
"""Get all options from section. Return a dict."""
return self._get_options_by_property('section_name', section)
def postprocess(self):
"""Some additional processing"""
pass
def validate(self):
"""Final step, allows for validating the options and/or args"""
pass
def dict_by_prefix(self, merge_empty_prefix=False):
"""Break the options dict by prefix; return nested dict.
:param merge_empty_prefix : boolean (default False) also (try to) merge the empty
prefix in the root of the dict. If there is a non-prefixed optionname
that matches a prefix, it will be rejected and error will be logged.
"""
subdict = {}
prefix_idx = self.PROCESSED_OPTIONS_PROPERTIES.index('prefix')
for prefix in nub([props[prefix_idx] for props in self.processed_options.values()]):
subdict[prefix] = self.get_options_by_prefix(prefix)
if merge_empty_prefix and '' in subdict:
self.log.debug("dict_by_prefix: merge_empty_prefix set")
for opt, val in subdict[''].items():
if opt in subdict:
self.log.error("dict_by_prefix: non-prefixed option %s conflicts with prefix of same name." % opt)
else:
subdict[opt] = val
self.log.debug("dict_by_prefix: subdict %s" % subdict)
return subdict
def generate_cmd_line(self, ignore=None, add_default=None):
"""Create the commandline options that would create the current self.options.
The result is sorted on the destination names.
:param ignore : regex on destination
:param add_default : print value that are equal to default
"""
if ignore is not None:
self.log.debug("generate_cmd_line ignore %s" % ignore)
ignore = re.compile(ignore)
else:
self.log.debug("generate_cmd_line no ignore")
args = []
opt_dests = sorted(self.options.__dict__)
for opt_dest in opt_dests:
# help is store_or_None, but is not a processed option, so skip it
if opt_dest in ExtOption.EXTOPTION_HELP:
continue
opt_value = self.options.__dict__[opt_dest]
# this is the action as parsed by the class, not the actual action set in option
# (eg action store_or_None is shown here as store_or_None, not as callback)
typ = self.processed_options[opt_dest][self.PROCESSED_OPTIONS_PROPERTIES.index('type')]
default = self.processed_options[opt_dest][self.PROCESSED_OPTIONS_PROPERTIES.index('default')]
action = self.processed_options[opt_dest][self.PROCESSED_OPTIONS_PROPERTIES.index('action')]
opt_name = self.processed_options[opt_dest][self.PROCESSED_OPTIONS_PROPERTIES.index('opt_name')]
if ignore is not None and ignore.search(opt_dest):
self.log.debug("generate_cmd_line adding %s value %s matches ignore. Not adding to args." %
(opt_name, opt_value))
continue
if opt_value == default:
# do nothing
# except for store_or_None and friends
msg = ''
if not (add_default or action in ('store_or_None',)):
msg = ' Not adding to args.'
self.log.debug("generate_cmd_line adding %s value %s default found.%s" %
(opt_name, opt_value, msg))
if not (add_default or action in ('store_or_None',)):
continue
if opt_value is None:
# do nothing
self.log.debug("generate_cmd_line adding %s value %s. None found. not adding to args." %
(opt_name, opt_value))
continue
if action in ExtOption.EXTOPTION_STORE_OR:
if opt_value == default:
self.log.debug("generate_cmd_line %s adding %s (value is default value %s)" %
(action, opt_name, opt_value))
args.append("--%s" % (opt_name))
else:
self.log.debug("generate_cmd_line %s adding %s non-default value %s" %
(action, opt_name, opt_value))
if typ in ExtOption.TYPE_STRLIST:
sep, _, _ = what_str_list_tuple(typ)
args.append("--%s=%s" % (opt_name, shell_quote(sep.join(opt_value))))
else:
args.append("--%s=%s" % (opt_name, shell_quote(opt_value)))
elif action in ("store_true", "store_false",) + ExtOption.EXTOPTION_LOG:
# not default!
self.log.debug("generate_cmd_line adding %s value %s. store action found" %
(opt_name, opt_value))
if (action in ('store_true',) + ExtOption.EXTOPTION_LOG and default is True and opt_value is False) or \
(action in ('store_false',) and default is False and opt_value is True):
if hasattr(self.parser.option_class, 'ENABLE') and hasattr(self.parser.option_class, 'DISABLE'):
args.append("--%s-%s" % (self.parser.option_class.DISABLE, opt_name))
else:
self.log.error(("generate_cmd_line: %s : can't set inverse of default %s with action %s "
"with missing ENABLE/DISABLE in option_class") %
(opt_name, default, action))
else:
if opt_value == default and ((action in ('store_true',) +
ExtOption.EXTOPTION_LOG and default is False) or
(action in ('store_false',) and default is True)):
if hasattr(self.parser.option_class, 'ENABLE') and \
hasattr(self.parser.option_class, 'DISABLE'):
args.append("--%s-%s" % (self.parser.option_class.DISABLE, opt_name))
else:
self.log.debug(("generate_cmd_line: %s : action %s can only set to inverse of default %s "
"and current value is default. Not adding to args.") %
(opt_name, action, default))
else:
args.append("--%s" % opt_name)
elif action in ("add", "add_first", "add_flex"):
if default is not None:
if action == 'add_flex' and default:
for ind, elem in enumerate(opt_value):
if elem == default[0] and opt_value[ind:ind + len(default)] == default:
empty = get_empty_add_flex(opt_value, self=self)
# TODO: this will only work for tuples and lists
opt_value = opt_value[:ind] + type(opt_value)([empty]) + opt_value[ind + len(default):]
# only the first occurence
break
elif hasattr(opt_value, '__neg__'):
if action == 'add_first':
opt_value = opt_value + -default
else:
opt_value = -default + opt_value
elif hasattr(opt_value, '__getslice__'):
if action == 'add_first':
opt_value = opt_value[:-len(default)]
else:
opt_value = opt_value[len(default):]
if typ in ExtOption.TYPE_STRLIST:
sep, klass, helpsep = what_str_list_tuple(typ)
restype = '%s-separated %s' % (helpsep, klass.__name__)
value = sep.join(opt_value)
else:
restype = 'string'
value = opt_value
if not opt_value:
# empty strings, empty lists, 0
self.log.debug('generate_cmd_line no value left, skipping.')
continue
self.log.debug("generate_cmd_line adding %s value %s. %s action, return as %s" %
(opt_name, opt_value, action, restype))
args.append("--%s=%s" % (opt_name, shell_quote(value)))
elif typ in ExtOption.TYPE_STRLIST:
sep, _, _ = what_str_list_tuple(typ)
args.append("--%s=%s" % (opt_name, shell_quote(sep.join(opt_value))))
elif action in ("append",):
# add multiple times
self.log.debug("generate_cmd_line adding %s value %s. append action, return as multiple args" %
(opt_name, opt_value))
args.extend(["--%s=%s" % (opt_name, shell_quote(v)) for v in opt_value])
elif action in ("regex",):
self.log.debug("generate_cmd_line adding %s regex pattern %s" % (opt_name, opt_value.pattern))
args.append("--%s=%s" % (opt_name, shell_quote(opt_value.pattern)))
else:
self.log.debug("generate_cmd_line adding %s value %s" % (opt_name, opt_value))
args.append("--%s=%s" % (opt_name, shell_quote(opt_value)))
self.log.debug("commandline args %s" % args)
return args
class SimpleOptionParser(ExtOptionParser):
DESCRIPTION_DOCSTRING = True
class SimpleOption(GeneralOption):
PARSER = SimpleOptionParser
SETROOTLOGGER = True
def __init__(self, go_dict=None, descr=None, short_groupdescr=None, long_groupdescr=None, config_files=None):
"""Initialisation
:param go_dict : General Option option dict
:param short_descr : short description of main options
:param long_descr : longer description of main options
:param config_files : list of configfiles to read options from
a general options dict has as key the long option name, and is followed by a list/tuple
mandatory are 4 elements : option help, type, action, default
a 5th element is optional and is the short help name (if any)
the generated help will include the docstring
"""
self.go_dict = go_dict
if short_groupdescr is None:
short_groupdescr = 'Main options'
if long_groupdescr is None:
long_groupdescr = ''
self.descr = [short_groupdescr, long_groupdescr]
kwargs = {
'go_prefixloggername': True,
'go_mainbeforedefault': True,
}
if config_files is not None:
kwargs['go_configfiles'] = config_files
super(SimpleOption, self).__init__(**kwargs)
if descr is not None:
# TODO: as there is no easy/clean way to access the version of the vsc-base package,
# this is equivalent to a warning
self.log.deprecated('SimpleOption descr argument', '2.5.0', '3.0.0')
def main_options(self):
if self.go_dict is not None:
prefix = None
self.add_group_parser(self.go_dict, self.descr, prefix=prefix)
def simple_option(go_dict=None, descr=None, short_groupdescr=None, long_groupdescr=None, config_files=None):
"""A function that returns a single level GeneralOption option parser
:param go_dict : General Option option dict
:param short_descr : short description of main options
:param long_descr : longer description of main options
:param config_files : list of configfiles to read options from
a general options dict has as key the long option name, and is followed by a list/tuple
mandatory are 4 elements : option help, type, action, default
a 5th element is optional and is the short help name (if any)
the generated help will include the docstring
"""
return SimpleOption(go_dict=go_dict, descr=descr, short_groupdescr=short_groupdescr,
long_groupdescr=long_groupdescr, config_files=config_files)
|
obi-two/Rebelion | refs/heads/master | data/scripts/templates/object/mobile/tatooine_npc/shared_capt_loftus.py | 2 | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/tatooine_npc/shared_capt_loftus.iff"
result.attribute_template_id = 9
result.stfName("npc_name","human_base_male")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result |
soundcloud/essentia | refs/heads/master | src/examples/python/streaming_extractor/tuningfrequency.py | 10 | # Copyright (C) 2006-2013 Music Technology Group - Universitat Pompeu Fabra
#
# This file is part of Essentia
#
# Essentia is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the Affero GNU General Public License
# version 3 along with this program. If not, see http://www.gnu.org/licenses/
#! /usr/bin/env python
import sys, os
import essentia, essentia.standard, essentia.streaming
from essentia.streaming import *
tonalFrameSize = 4096
tonalHopSize = 2048
class TuningFrequencyExtractor(essentia.streaming.CompositeBase):
def __init__(self, frameSize=tonalFrameSize, hopSize=tonalHopSize):
super(TuningFrequencyExtractor, self).__init__()
fc = FrameCutter(frameSize=frameSize,
hopSize=hopSize,
silentFrames='noise')
w = Windowing(type='blackmanharris62')
spec = Spectrum()
peaks = SpectralPeaks(maxPeaks=10000,
magnitudeThreshold=0.00001,
minFrequency=40,
maxFrequency=5000,
orderBy='frequency');
tuning = TuningFrequency()
fc.frame >> w.frame >> spec.frame
spec.spectrum >> peaks.spectrum
peaks.magnitudes >> tuning.magnitudes
peaks.frequencies >> tuning.frequencies
tuning.tuningCents >> None
# define inputs:
self.inputs['signal'] = fc.signal
# define outputs:
self.outputs['tuningFrequency'] = tuning.tuningFrequency
usage = 'tuningfrequency.py [options] <inputfilename> <outputfilename>'
def parse_args():
import numpy
essentia_version = '%s\n'\
'python version: %s\n'\
'numpy version: %s' % (essentia.__version__, # full version
sys.version.split()[0], # python major version
numpy.__version__) # numpy version
from optparse import OptionParser
parser = OptionParser(usage=usage, version=essentia_version)
parser.add_option("-c","--cpp", action="store_true", dest="generate_cpp",
help="generate cpp code from CompositeBase algorithm")
parser.add_option("-d", "--dot", action="store_true", dest="generate_dot",
help="generate dot and cpp code from CompositeBase algorithm")
(options, args) = parser.parse_args()
return options, args
if __name__ == '__main__':
opts, args = parse_args()
if len(args) != 2:
cmd = './'+os.path.basename(sys.argv[0])+ ' -h'
os.system(cmd)
sys.exit(1)
if opts.generate_dot:
essentia.translate(TuningFrequencyExtractor, 'streaming_extractortuningfrequency', dot_graph=True)
elif opts.generate_cpp:
essentia.translate(TuningFrequencyExtractor, 'streaming_extractortuningfrequency', dot_graph=False)
pool = essentia.Pool()
loader = essentia.streaming.MonoLoader(filename=args[0])
tuning = TuningFrequencyExtractor()
loader.audio >> tuning.signal
tuning.tuningFrequency >> (pool, 'tuning_frequency')
essentia.run(loader)
stats = ['mean', 'var', 'min', 'max', 'dmean', 'dmean2', 'dvar', 'dvar2']
poolStats = essentia.standard.PoolAggregator(defaultStats=stats)(pool)
essentia.standard.YamlOutput(filename=args[1])(poolStats)
|
les69/calvin-base | refs/heads/master | calvin/actorstore/systemactors/exception/ExceptionHandler.py | 3 | # -*- coding: utf-8 -*-
# Copyright (c) 2015 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from calvin.actor.actor import Actor, ActionResult, manage, condition, guard
from calvin.runtime.north.calvin_token import EOSToken, ExceptionToken
class ExceptionHandler(Actor):
"""
Scan tokens for Exceptions.
Any non-exception or EOS is simply passed on. Exceptions other than EOS are replaced
with an EOS token on the ouput 'token' port unless optional 'replace' argument is true,
in which case 'replacement' argument (defaults to null) is produced.
Any exception (including EOS) are produces its reason on the 'status' output port.
Inputs:
token : any token
Outputs:
token : input token or EOS/replacement on exception
status : reason for any exception tokens encountered (including EOS)
"""
def exception_handler(self, action, args, context):
try:
e = args[context['exceptions']['token'][0]]
except:
e = ExceptionToken()
self.status = e
self.token = EOSToken()
return ActionResult()
@manage(['status', 'token', 'replace', 'replacement'])
def init(self, replace=False, replacement=None):
self.replace = replace
self.replacement = replacement
self.status = None
self.token = None
@condition([], ['token', 'status'])
@guard(lambda self: self.token and self.status)
def produce_with_exception(self):
tok = self.replacement if self.replace else self.token
status = self.status
self.token = None
self.status = None
return ActionResult(production=(tok, status.value))
@condition([], ['token'])
@guard(lambda self: self.token and not self.status)
def produce(self):
tok = self.token
self.token = None
return ActionResult(production=(tok,))
@condition(['token'])
@guard(lambda self, tok: not self.status)
def consume(self, tok):
self.token = tok
self.status = None
return ActionResult()
action_priority = (produce_with_exception, produce, consume)
test_set = [
{ # normal token
'in': {'token': 42},
'out': {'token': [42], 'status':[]}
},
{ # Exception
'in': {'token': ExceptionToken()},
'out': {'token': ['End of stream'], 'status':['Exception']}
},
{ # Exception
'in': {'token': EOSToken()},
'out': {'token': ['End of stream'], 'status':['End of stream']}
},
{ # Exception with replace (default)
'setup': [lambda self: self.init(replace=True)],
'in': {'token': EOSToken()},
'out': {'token': [None], 'status':['End of stream']}
},
{ # Exception with replace
'setup': [lambda self: self.init(replace=True, replacement={})],
'in': {'token': EOSToken()},
'out': {'token': [{}], 'status':['End of stream']}
},
{ # Exception with replace
'setup': [lambda self: self.init(replace=True, replacement={})],
'in': {'token': ExceptionToken()},
'out': {'token': [{}], 'status':['Exception']}
},
]
|
mvpcom/CyberHandsFaraz | refs/heads/master | ROS/chugv_ws/src/chugv_control/scripts/key_teleop.py | 1 | #! /usr/bin/env python
# rosrun chugv_control key_teleop.py
import rospy, math
import numpy as np
import sys, termios, tty, select, os
from geometry_msgs.msg import Twist
class KeyTeleop(object):
cmd_bindings = {'q':np.array([1,1]),
'w':np.array([1,0]),
'e':np.array([1,-1]),
'a':np.array([0,1]),
'd':np.array([0,-1]),
'z':np.array([-1,-1]),
'x':np.array([-1,0]),
'c':np.array([-1,1])
}
set_bindings = { 't':np.array([1,1]),
'b':np.array([-1,-1]),
'y':np.array([1,0]),
'n':np.array([-1,0]),
'u':np.array([0,1]),
'm':np.array([0,-1])
}
def init(self):
# Save terminal settings
self.settings = termios.tcgetattr(sys.stdin)
# Initial values
self.inc_ratio = 0.1
self.speed = np.array([0.5, 1.0])
self.command = np.array([0, 0])
self.update_rate = 10 # Hz
self.alive = True
# Setup publisher
self.pub_twist = rospy.Publisher('/cmd_vel', Twist)
def fini(self):
# Restore terminal settings
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, self.settings)
def run(self):
try:
self.init()
self.print_usage()
r = rospy.Rate(self.update_rate) # Hz
while not rospy.is_shutdown():
ch = self.get_key()
self.process_key(ch)
self.update()
r.sleep()
except rospy.exceptions.ROSInterruptException:
pass
finally:
self.fini()
def print_usage(self):
msg = """
Keyboard Teleop that Publish to /cmd_vel (geometry_msgs/Twist)
Copyright (C) 2013
Released under BSD License
--------------------------------------------------
H: Print this menu
Moving around:
Q W E
A S D
Z X Z
T/B : increase/decrease max speeds 10%
Y/N : increase/decrease only linear speed 10%
U/M : increase/decrease only angular speed 10%
anything else : stop
G : Quit
--------------------------------------------------
"""
self.loginfo(msg)
self.show_status()
# Used to print items to screen, while terminal is in funky mode
def loginfo(self, str):
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, self.settings)
print(str)
tty.setraw(sys.stdin.fileno())
# Used to print teleop status
def show_status(self):
msg = 'Status:\tlinear %.2f\tangular %.2f' % (self.speed[0],self.speed[1])
self.loginfo(msg)
# For everything that can't be a binding, use if/elif instead
def process_key(self, ch):
if ch == 'h':
self.print_usage()
elif ch in self.cmd_bindings.keys():
self.command = self.cmd_bindings[ch]
elif ch in self.set_bindings.keys():
self.speed = self.speed * (1 + self.set_bindings[ch]*self.inc_ratio)
self.show_status()
elif ch == 'g':
self.loginfo('Quitting')
# Stop the robot
twist = Twist()
self.pub_twist.publish(twist)
rospy.signal_shutdown('Shutdown')
else:
self.command = np.array([0, 0])
def update(self):
if rospy.is_shutdown():
return
twist = Twist()
cmd = self.speed*self.command
twist.linear.x = cmd[0]
twist.angular.z = cmd[1]
self.pub_twist.publish(twist)
# Get input from the terminal
def get_key(self):
tty.setraw(sys.stdin.fileno())
select.select([sys.stdin], [], [], 0)
key = sys.stdin.read(1)
return key.lower()
if __name__ == '__main__':
rospy.init_node('keyboard_teleop')
teleop = KeyTeleop()
teleop.run()
|
ptphp/PyLib | refs/heads/master | src/dev/web/publish/pub58.py | 1 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from database import *
import urllib,urllib2,json,re,time
from BeautifulSoup import BeautifulSoup
import MySQLdb
from urlparse import urlparse
import threading
import random
import time
from Queue import Queue
import traceback
citycode = "sh"
conn = MySQLdb.Connect(host="localhost",user="dbuser",passwd="201108",db="house",charset="utf8")
cr = conn.cursor()
Q = []
#===============================================================================
# 赶集押金
#===============================================================================
def depositDict():
dHtml = """
<select name="pay_type_int" id="pay_type_int">
<option value="1">押一付三</option>
<option value="2">面议</option>
<option value="3">押一付一</option>
<option value="4">押一付二</option>
<option value="5">押二付一</option>
<option value="9">押二付三</option>
<option value="6">半年付不押</option>
<option value="7">年付不押</option>
<option value="8">押一付半年</option>
<option value="10">押一付年</option>
<option value="11">押二付年</option>
<option value="12">押三付年</option>
</select>
"""
soup = BeautifulSoup(dHtml)
options = soup("option")
for row in options:
pass#print '"%s":"%s",' % (row['value'],row.string)
option_deposit = {
"1":"押一付三",
"2":"面议",
"3":"押一付一",
"4":"押一付二",
"5":"押二付一",
"9":"押二付三",
"6":"半年付不押",
"7":"年付不押",
"8":"押一付半年",
"10":"押一付年",
"11":"押二付年",
"12":"押三付年"
}
def towardDict():
tHtml = """
<select name="chaoxiang" id="chaoxiang" autocomplete="off">
<option value="1">东</option>
<option value="2">南</option>
<option value="3">西</option>
<option value="4">北</option>
<option value="5">东西</option>
<option value="6">南北</option>
<option value="7">东南</option>
<option value="8">东北</option>
<option value="9">西南</option>
<option value="10">西北</option>
</select>
"""
soup = BeautifulSoup(tHtml)
options = soup("option")
for row in options:
pass#print '"%s":"%s",' % (row['value'],row.string)
option_toward = {
"1":"东",
"2":"南",
"3":"西",
"4":"北",
"5":"东西",
"6":"南北",
"7":"东南",
"8":"东北",
"9":"西南",
"10":"西北"
}
print option_toward
def housTypeDict():
tHtml = """
<select name="fang_xing" id="fang_xing" autocomplete="off">
<option value="2">平房/四合院</option>
<option value="3">普通住宅</option>
<option value="4">公寓</option>
<option value="5">商住楼</option>
<option value="7">别墅</option>
<option value="8">其他</option>
</select>
"""
soup = BeautifulSoup(tHtml)
options = soup("option")
for row in options:
print '"%s":"%s",' % (row['value'],row.string)
option_housType = {
"2":"平房/四合院",
"3":"普通住宅",
"4":"公寓",
"5":"商住楼",
"7":"别墅",
"8":"其他",
}
def ftimentDict():
fHtml = """
<select name="zhuangxiu" id="zhuangxiu" autocomplete="off">
<option value="0">装修情况</option>
<option value="1">豪华装修</option>
<option value="2">精装修</option>
<option value="3">中等装修</option>
<option value="4">简单装修</option>
<option value="5">毛坯</option>
</select>
"""
soup = BeautifulSoup(fHtml)
options = soup("option")
for row in options:
print '"%s":"%s",' % (row['value'],row.string)
option_fitment = {
"1":"豪华装修",
"2":"精装修",
"3":"中等装修",
"4":"简单装修",
"5":"毛坯",
}
def cityArea(city):
sql = """
DROP TABLE IF EXISTS `region_gj`;
CREATE TABLE IF NOT EXISTS `region_gj` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`regionname` varchar(25) NOT NULL,
`regioncode` varchar(25) NULL,
`regionvalue` int(11) NULL,
`city` int(11) NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8 AUTO_INCREMENT=1 ;
DROP TABLE IF EXISTS `section_gj`;
CREATE TABLE IF NOT EXISTS `section_gj` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`sectionname` varchar(25) NOT NULL,
`sectioncode` varchar(25) NULL,
`sectionvalue` int(11) NULL,
`region` int(11) NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8 AUTO_INCREMENT=1 ;
DROP TABLE IF EXISTS `borough_gj`;
CREATE TABLE IF NOT EXISTS `borough_gj` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`name` varchar(80) NOT NULL,
`letter` varchar(80) NOT NULL,
`addr` varchar(180) NULL,
`region` varchar(50) NULL,
`section` varchar(50) NULL,
`region_id` int(11) NULL,
`section_id` int(11) NULL,
`url` varchar(250) NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8 AUTO_INCREMENT=1 ;
"""
pubUrl = "http://%s.ganji.com/common/pub.php?category=housing&type=5" % city
streetUrl = "http://%s.ganji.com/ajax/streetOptions.php" % city
#domain=sh&district_id=2&street_id=-1&with_all_option=1
req = urllib2.Request(pubUrl)
res = urllib2.urlopen(req).read()
soup = BeautifulSoup(res)
select = soup("select",{"name":"district_id"})
soup = BeautifulSoup(str(select))
options = soup("option")
for row in options:
if row['value']:
t = row['value'].split(",")
sql = """insert into region_gj values(null,'%s',null,%d,1);""" % (t[1],int(t[0]))
#cr.execute(sql)
#conn.commit()
sql = "select id,regionvalue from region_gj"
cr.execute(sql)
r = cr.fetchall()
for jj in r:
postData = {}
postData['domain'] = "sh"
postData['district_id'] = jj[1]
postData['street_id'] = "-1"
postData['with_all_option'] = "1"
queryData = urllib.urlencode(postData)
#print postData
req = urllib2.Request(streetUrl)
res = urllib2.urlopen(req,queryData).read()
j = json.loads(res)
for h in j:
print h[0],h[1]
if h[0] >= 0:
sql = """insert into section_gj values(null,'%s',null,%d,%d);""" % (h[1],int(h[0]),jj[0])
print sql
cr.execute(sql)
conn.commit()
def getBorgoughContent(url):
info = {}
tt = url.split("/")
info["url"] = url
url = url+"jieshao/"
res = urllib2.urlopen(url).read()
res = re.sub("\n|\r|\t| ","",res)
soup = BeautifulSoup(res)
h = urlparse(url)
info["name"] = soup.h1.string.replace("介绍","")
print url
info["letter"] = tt[-2]
info["addr"] = ''
if re.search(">地址:</td><td width=\"300\">(.*?)<",res):
info["addr"] = re.search(">地址:</td><td width=\"300\">(.*?)<",res).group(1)
area = ""
if re.search(">板块:</td><td>(.*?)</td>",res):
area = re.search(">板块:</td><td>(.*?)</td>",res).group(1)
if area:
soup_area = BeautifulSoup(area)
l = soup_area("a")
if len(l) >1:
info['region'] = l[0].string
info['section'] = l[1].string
else:
info['region'] = l[0].string
info['section'] = ''
del soup
del res
del area
del soup_area
del h
del url
del l
if info not in Q:
Q.append(info)
del info
def test_job(url, sleep = 0.001 ):
try:
getBorgoughContent(url)
except:
print traceback.format_exc()
pass
return url
def getBoroughUrl(url):
import socket
socket.setdefaulttimeout(10)
res = urllib2.urlopen(url).read()
soup = BeautifulSoup(res)
a = soup.findAll("a",{"class":"t"})
workerArr = []
j = 0
for i in a:
bUrl = i['href']
workerArr.append(Producer(bUrl))
time.sleep(random.randrange(10)/10.0)
workerArr[j].start()
j = j+1
def getBorough():
cc = Consumer()
cc.start()
for i in range(1611):
url = "http://%s.58.com/xiaoqu/pn%d/" % (citycode,i)
getBoroughUrl(url)
class Producer(threading.Thread):
def __init__(self,url):
threading.Thread.__init__(self)
self.url = url
def run(self):
getBorgoughContent(self.url)
class Consumer(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
def run(self):
while 1:
time.sleep(0.1)
if Q:
row = Q.pop()
try:
sql ="insert into borough_58(name,letter,addr,region,section,url) values('%s','%s','%s','%s','%s','%s')" % (row['name'],row['letter'],row['addr'],row['region'],row['section'],row['url'])
cr.execute(sql)
conn.commit()
except:
print traceback.format_exc()
pass
if __name__ == "__main__":
#getBorough("sh")
#getBorgoughContent("http://sh.58.com/xiaoqu/zhongyuansh/minxingercun/")
getBorough()
|
mioann47/mobile-app-privacy-analyzer | refs/heads/master | mypythonscripts/mysql/connector/fabric/__init__.py | 5 | # MySQL Connector/Python - MySQL driver written in Python.
# Copyright (c) 2013, 2017, Oracle and/or its affiliates. All rights reserved.
# MySQL Connector/Python is licensed under the terms of the GPLv2
# <http://www.gnu.org/licenses/old-licenses/gpl-2.0.html>, like most
# MySQL Connectors. There are special exceptions to the terms and
# conditions of the GPLv2 as it is applied to this software, see the
# FOSS License Exception
# <http://www.mysql.com/about/legal/licensing/foss-exception.html>.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""MySQL Fabric support"""
from collections import namedtuple
from .connection import (
MODE_READONLY, MODE_READWRITE,
STATUS_PRIMARY, STATUS_SECONDARY,
SCOPE_GLOBAL, SCOPE_LOCAL,
Fabric, FabricConnection,
MySQLFabricConnection,
FabricSet,
)
# Order of field_names must match how Fabric is returning the data
FabricMySQLServer = namedtuple(
'FabricMySQLServer',
['uuid', 'group', 'host', 'port', 'mode', 'status', 'weight']
)
# Order of field_names must match how Fabric is returning the data
FabricShard = namedtuple(
'FabricShard',
['database', 'table', 'column', 'key',
'shard', 'shard_type', 'group', 'global_group']
)
def connect(**kwargs):
"""Create a MySQLFabricConnection object"""
return MySQLFabricConnection(**kwargs)
__all__ = [
'MODE_READWRITE',
'MODE_READONLY',
'STATUS_PRIMARY',
'STATUS_SECONDARY',
'SCOPE_GLOBAL',
'SCOPE_LOCAL',
'FabricMySQLServer',
'FabricShard',
'connect',
'Fabric',
'FabricConnection',
'MySQLFabricConnection',
'FabricSet',
]
|
yencarnacion/jaikuengine | refs/heads/master | .google_appengine/lib/django-1.5/django/conf/locale/gl/formats.py | 231 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = r'j \d\e F \d\e Y'
TIME_FORMAT = 'H:i:s'
DATETIME_FORMAT = r'j \d\e F \d\e Y \á\s H:i'
YEAR_MONTH_FORMAT = r'F \d\e Y'
MONTH_DAY_FORMAT = r'j \d\e F'
SHORT_DATE_FORMAT = 'd-m-Y'
SHORT_DATETIME_FORMAT = 'd-m-Y, H:i'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
# DATE_INPUT_FORMATS =
# TIME_INPUT_FORMATS =
# DATETIME_INPUT_FORMATS =
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
# NUMBER_GROUPING =
|
Shekharrajak/django-db-mailer | refs/heads/master | docs/conf.py | 3 | # -*- coding: utf-8 -*-
#
# django-db-mailer documentation build configuration file, created by
# sphinx-quickstart on Wed Jan 18 14:55:29 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../demo'))
sys.path.append(os.path.abspath('../'))
os.environ['DJANGO_SETTINGS_MODULE'] = 'demo.settings'
from dbmail import get_version
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.todo']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'django-db-mailer'
copyright = u'2014, '
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = get_version()
# The full version, including alpha/beta/rc tags.
release = get_version()
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-db-mailerdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'django-db-mailer.tex', u'django-db-mailer Documentation',
u'', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'django-db-mailer', u'django-db-mailer Documentation',
[u'gotlium'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'django-db-mailer', u'django-db-mailer Documentation',
u'gotlium', 'django-db-mailer', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote' |
punchagan/zulip | refs/heads/master | zerver/tests/test_integrations_dev_panel.py | 3 | from unittest.mock import MagicMock, patch
import orjson
from zerver.lib.test_classes import ZulipTestCase
from zerver.models import Message, Stream, get_realm, get_user
class TestIntegrationsDevPanel(ZulipTestCase):
zulip_realm = get_realm("zulip")
def test_check_send_webhook_fixture_message_for_error(self) -> None:
bot = get_user("webhook-bot@zulip.com", self.zulip_realm)
url = f"/api/v1/external/airbrake?api_key={bot.api_key}"
target_url = "/devtools/integrations/check_send_webhook_fixture_message"
body = "{}" # This empty body should generate a KeyError on the webhook code side.
data = {
"url": url,
"body": body,
"custom_headers": "{}",
"is_json": "true",
}
with self.assertLogs(level="ERROR") as logs:
response = self.client_post(target_url, data)
self.assertEqual(response.status_code, 500) # Since the response would be forwarded.
expected_response = {"result": "error", "msg": "Internal server error"}
self.assertEqual(orjson.loads(response.content), expected_response)
# Intention of this test looks like to trigger keyError
# so just testing KeyError is printed along with Traceback in logs
self.assertTrue("KeyError" in logs.output[0])
self.assertTrue("Traceback (most recent call last)" in logs.output[0])
self.assertEqual(
logs.output[1], "ERROR:django.request:Internal Server Error: /api/v1/external/airbrake"
)
def test_check_send_webhook_fixture_message_for_success_without_headers(self) -> None:
bot = get_user("webhook-bot@zulip.com", self.zulip_realm)
url = f"/api/v1/external/airbrake?api_key={bot.api_key}&stream=Denmark&topic=Airbrake Notifications"
target_url = "/devtools/integrations/check_send_webhook_fixture_message"
with open("zerver/webhooks/airbrake/fixtures/error_message.json") as f:
body = f.read()
data = {
"url": url,
"body": body,
"custom_headers": "{}",
"is_json": "true",
}
response = self.client_post(target_url, data)
expected_response = {
"responses": [{"status_code": 200, "message": {"result": "success", "msg": ""}}],
"result": "success",
"msg": "",
}
response_content = orjson.loads(response.content)
response_content["responses"][0]["message"] = orjson.loads(
response_content["responses"][0]["message"]
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response_content, expected_response)
latest_msg = Message.objects.latest("id")
expected_message = '[ZeroDivisionError](https://zulip.airbrake.io/projects/125209/groups/1705190192091077626): "Error message from logger" occurred.'
self.assertEqual(latest_msg.content, expected_message)
self.assertEqual(Stream.objects.get(id=latest_msg.recipient.type_id).name, "Denmark")
self.assertEqual(latest_msg.topic_name(), "Airbrake Notifications")
def test_check_send_webhook_fixture_message_for_success_with_headers(self) -> None:
bot = get_user("webhook-bot@zulip.com", self.zulip_realm)
url = f"/api/v1/external/github?api_key={bot.api_key}&stream=Denmark&topic=GitHub Notifications"
target_url = "/devtools/integrations/check_send_webhook_fixture_message"
with open("zerver/webhooks/github/fixtures/ping__organization.json") as f:
body = f.read()
data = {
"url": url,
"body": body,
"custom_headers": orjson.dumps({"X_GITHUB_EVENT": "ping"}).decode(),
"is_json": "true",
}
response = self.client_post(target_url, data)
self.assertEqual(response.status_code, 200)
latest_msg = Message.objects.latest("id")
expected_message = "GitHub webhook has been successfully configured by eeshangarg."
self.assertEqual(latest_msg.content, expected_message)
self.assertEqual(Stream.objects.get(id=latest_msg.recipient.type_id).name, "Denmark")
self.assertEqual(latest_msg.topic_name(), "GitHub Notifications")
def test_check_send_webhook_fixture_message_for_success_with_headers_and_non_json_fixtures(
self,
) -> None:
bot = get_user("webhook-bot@zulip.com", self.zulip_realm)
url = f"/api/v1/external/wordpress?api_key={bot.api_key}&stream=Denmark&topic=WordPress Notifications"
target_url = "/devtools/integrations/check_send_webhook_fixture_message"
with open("zerver/webhooks/wordpress/fixtures/publish_post_no_data_provided.txt") as f:
body = f.read()
data = {
"url": url,
"body": body,
"custom_headers": orjson.dumps(
{"Content-Type": "application/x-www-form-urlencoded"}
).decode(),
"is_json": "false",
}
response = self.client_post(target_url, data)
self.assertEqual(response.status_code, 200)
latest_msg = Message.objects.latest("id")
expected_message = "New post published:\n* [New WordPress Post](WordPress Post URL)"
self.assertEqual(latest_msg.content, expected_message)
self.assertEqual(Stream.objects.get(id=latest_msg.recipient.type_id).name, "Denmark")
self.assertEqual(latest_msg.topic_name(), "WordPress Notifications")
def test_get_fixtures_for_nonexistant_integration(self) -> None:
target_url = "/devtools/integrations/somerandomnonexistantintegration/fixtures"
response = self.client_get(target_url)
expected_response = {
"msg": '"somerandomnonexistantintegration" is not a valid webhook integration.',
"result": "error",
}
self.assertEqual(response.status_code, 404)
self.assertEqual(orjson.loads(response.content), expected_response)
@patch("zerver.views.development.integrations.os.path.exists")
def test_get_fixtures_for_integration_without_fixtures(
self, os_path_exists_mock: MagicMock
) -> None:
os_path_exists_mock.return_value = False
target_url = "/devtools/integrations/airbrake/fixtures"
response = self.client_get(target_url)
expected_response = {
"msg": 'The integration "airbrake" does not have fixtures.',
"result": "error",
}
self.assertEqual(response.status_code, 404)
self.assertEqual(orjson.loads(response.content), expected_response)
def test_get_fixtures_for_success(self) -> None:
target_url = "/devtools/integrations/airbrake/fixtures"
response = self.client_get(target_url)
self.assertEqual(response.status_code, 200)
self.assertIsNotNone(orjson.loads(response.content)["fixtures"])
def test_get_dev_panel_page(self) -> None:
# Just to satisfy the test suite.
target_url = "/devtools/integrations/"
response = self.client_get(target_url)
self.assertEqual(response.status_code, 200)
def test_send_all_webhook_fixture_messages_for_success(self) -> None:
bot = get_user("webhook-bot@zulip.com", self.zulip_realm)
url = f"/api/v1/external/appfollow?api_key={bot.api_key}&stream=Denmark&topic=Appfollow Bulk Notifications"
target_url = "/devtools/integrations/send_all_webhook_fixture_messages"
data = {
"url": url,
"custom_headers": "{}",
"integration_name": "appfollow",
}
response = self.client_post(target_url, data)
expected_responses = [
{
"fixture_name": "sample.json",
"status_code": 200,
"message": {"msg": "", "result": "success"},
},
{
"fixture_name": "review.json",
"status_code": 200,
"message": {"msg": "", "result": "success"},
},
]
responses = orjson.loads(response.content)["responses"]
for r in responses:
r["message"] = orjson.loads(r["message"])
self.assertEqual(response.status_code, 200)
for r in responses:
# We have to use this roundabout manner since the order may vary each time.
# This is not an issue.
self.assertTrue(r in expected_responses)
expected_responses.remove(r)
new_messages = Message.objects.order_by("-id")[0:2]
expected_messages = [
"Webhook integration was successful.\nTest User / Acme (Google Play)",
"Acme - Group chat\nApp Store, Acme Technologies, Inc.\n★★★★★ United States\n**Great for Information Management**\nAcme enables me to manage the flow of information quite well. I only wish I could create and edit my Acme Post files in the iOS app.\n*by* **Mr RESOLUTIONARY** *for v3.9*\n[Permalink](http://appfollow.io/permalink) · [Add tag](http://watch.appfollow.io/add_tag)",
]
for msg in new_messages:
# new_messages -> expected_messages or expected_messages -> new_messages shouldn't make
# a difference since equality is commutative.
self.assertTrue(msg.content in expected_messages)
expected_messages.remove(msg.content)
self.assertEqual(Stream.objects.get(id=msg.recipient.type_id).name, "Denmark")
self.assertEqual(msg.topic_name(), "Appfollow Bulk Notifications")
def test_send_all_webhook_fixture_messages_for_success_with_non_json_fixtures(self) -> None:
bot = get_user("webhook-bot@zulip.com", self.zulip_realm)
url = f"/api/v1/external/wordpress?api_key={bot.api_key}&stream=Denmark&topic=WordPress Bulk Notifications"
target_url = "/devtools/integrations/send_all_webhook_fixture_messages"
data = {
"url": url,
"custom_headers": "{}",
"integration_name": "wordpress",
}
response = self.client_post(target_url, data)
expected_responses = [
{
"message": {
"msg": "Unknown WordPress webhook action: WordPress Action",
"result": "error",
},
"fixture_name": "user_register.txt",
"status_code": 400,
},
{
"message": {
"msg": "Unknown WordPress webhook action: WordPress Action",
"result": "error",
},
"fixture_name": "publish_post_no_data_provided.txt",
"status_code": 400,
},
{
"message": {
"msg": "Unknown WordPress webhook action: WordPress Action",
"result": "error",
},
"fixture_name": "unknown_action_no_data.txt",
"status_code": 400,
},
{
"message": {
"msg": "Unknown WordPress webhook action: WordPress Action",
"result": "error",
},
"fixture_name": "publish_page.txt",
"status_code": 400,
},
{
"message": {
"msg": "Unknown WordPress webhook action: WordPress Action",
"result": "error",
},
"fixture_name": "unknown_action_no_hook_provided.txt",
"status_code": 400,
},
{
"message": {
"msg": "Unknown WordPress webhook action: WordPress Action",
"result": "error",
},
"fixture_name": "publish_post_type_not_provided.txt",
"status_code": 400,
},
{
"message": {
"msg": "Unknown WordPress webhook action: WordPress Action",
"result": "error",
},
"fixture_name": "wp_login.txt",
"status_code": 400,
},
{
"message": {
"msg": "Unknown WordPress webhook action: WordPress Action",
"result": "error",
},
"fixture_name": "publish_post.txt",
"status_code": 400,
},
]
responses = orjson.loads(response.content)["responses"]
for r in responses:
r["message"] = orjson.loads(r["message"])
self.assertEqual(response.status_code, 200)
for r in responses:
# We have to use this roundabout manner since the order may vary each time. This is not
# an issue. Basically, we're trying to compare 2 lists and since we're not resorting to
# using sets or a sorted order, we're sticking with O(n*m) time complexity for this
# comparison (where n and m are the lengths of the two lists respectively). But since
# this is just a unit test and more importantly n = m = some-low-number we don't really
# care about the time complexity being what it is.
self.assertTrue(r in expected_responses)
expected_responses.remove(r)
@patch("zerver.views.development.integrations.os.path.exists")
def test_send_all_webhook_fixture_messages_for_missing_fixtures(
self, os_path_exists_mock: MagicMock
) -> None:
os_path_exists_mock.return_value = False
bot = get_user("webhook-bot@zulip.com", self.zulip_realm)
url = f"/api/v1/external/appfollow?api_key={bot.api_key}&stream=Denmark&topic=Appfollow Bulk Notifications"
data = {
"url": url,
"custom_headers": "{}",
"integration_name": "appfollow",
}
response = self.client_post(
"/devtools/integrations/send_all_webhook_fixture_messages", data
)
expected_response = {
"msg": 'The integration "appfollow" does not have fixtures.',
"result": "error",
}
self.assertEqual(response.status_code, 404)
self.assertEqual(orjson.loads(response.content), expected_response)
|
ZhangXinNan/tensorflow | refs/heads/master | tensorflow/contrib/gan/python/eval/python/sliced_wasserstein.py | 43 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model evaluation tools for TFGAN."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.gan.python.eval.python import sliced_wasserstein_impl
# pylint: disable=wildcard-import
from tensorflow.contrib.gan.python.eval.python.sliced_wasserstein_impl import *
# pylint: enable=wildcard-import
from tensorflow.python.util.all_util import remove_undocumented
__all__ = sliced_wasserstein_impl.__all__
remove_undocumented(__name__, __all__)
|
tachang/django-storages | refs/heads/master | storages/backends/image.py | 24 |
import os
from django.core.files.storage import FileSystemStorage
from django.core.exceptions import ImproperlyConfigured
try:
from PIL import ImageFile as PILImageFile
except ImportError:
raise ImproperlyConfigured, "Could not load PIL dependency.\
\nSee http://www.pythonware.com/products/pil/"
class ImageStorage(FileSystemStorage):
"""
A FileSystemStorage which normalizes extensions for images.
Comes from http://www.djangosnippets.org/snippets/965/
"""
def find_extension(self, format):
"""Normalizes PIL-returned format into a standard, lowercase extension."""
format = format.lower()
if format == 'jpeg':
format = 'jpg'
return format
def save(self, name, content):
dirname = os.path.dirname(name)
basename = os.path.basename(name)
# Use PIL to determine filetype
p = PILImageFile.Parser()
while 1:
data = content.read(1024)
if not data:
break
p.feed(data)
if p.image:
im = p.image
break
extension = self.find_extension(im.format)
# Does the basename already have an extension? If so, replace it.
# bare as in without extension
bare_basename, _ = os.path.splitext(basename)
basename = bare_basename + '.' + extension
name = os.path.join(dirname, basename)
return super(ImageStorage, self).save(name, content)
|
bufferx/tornado | refs/heads/master | tornado/test/httpclient_test.py | 1 | #!/usr/bin/env python
from __future__ import absolute_import, division, print_function, with_statement
import base64
import binascii
from contextlib import closing
import functools
import sys
import threading
from tornado.escape import utf8
from tornado.httpclient import HTTPRequest, HTTPResponse, _RequestProxy, HTTPError, HTTPClient
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from tornado.iostream import IOStream
from tornado.log import gen_log
from tornado import netutil
from tornado.stack_context import ExceptionStackContext, NullContext
from tornado.testing import AsyncHTTPTestCase, bind_unused_port, gen_test, ExpectLog
from tornado.test.util import unittest
from tornado.util import u, bytes_type
from tornado.web import Application, RequestHandler, url
try:
from io import BytesIO # python 3
except ImportError:
from cStringIO import StringIO as BytesIO
class HelloWorldHandler(RequestHandler):
def get(self):
name = self.get_argument("name", "world")
self.set_header("Content-Type", "text/plain")
self.finish("Hello %s!" % name)
class PostHandler(RequestHandler):
def post(self):
self.finish("Post arg1: %s, arg2: %s" % (
self.get_argument("arg1"), self.get_argument("arg2")))
class ChunkHandler(RequestHandler):
def get(self):
self.write("asdf")
self.flush()
self.write("qwer")
class AuthHandler(RequestHandler):
def get(self):
self.finish(self.request.headers["Authorization"])
class CountdownHandler(RequestHandler):
def get(self, count):
count = int(count)
if count > 0:
self.redirect(self.reverse_url("countdown", count - 1))
else:
self.write("Zero")
class EchoPostHandler(RequestHandler):
def post(self):
self.write(self.request.body)
class UserAgentHandler(RequestHandler):
def get(self):
self.write(self.request.headers.get('User-Agent', 'User agent not set'))
class ContentLength304Handler(RequestHandler):
def get(self):
self.set_status(304)
self.set_header('Content-Length', 42)
def _clear_headers_for_304(self):
# Tornado strips content-length from 304 responses, but here we
# want to simulate servers that include the headers anyway.
pass
class AllMethodsHandler(RequestHandler):
SUPPORTED_METHODS = RequestHandler.SUPPORTED_METHODS + ('OTHER',)
def method(self):
self.write(self.request.method)
get = post = put = delete = options = patch = other = method
# These tests end up getting run redundantly: once here with the default
# HTTPClient implementation, and then again in each implementation's own
# test suite.
class HTTPClientCommonTestCase(AsyncHTTPTestCase):
def get_app(self):
return Application([
url("/hello", HelloWorldHandler),
url("/post", PostHandler),
url("/chunk", ChunkHandler),
url("/auth", AuthHandler),
url("/countdown/([0-9]+)", CountdownHandler, name="countdown"),
url("/echopost", EchoPostHandler),
url("/user_agent", UserAgentHandler),
url("/304_with_content_length", ContentLength304Handler),
url("/all_methods", AllMethodsHandler),
], gzip=True)
def test_hello_world(self):
response = self.fetch("/hello")
self.assertEqual(response.code, 200)
self.assertEqual(response.headers["Content-Type"], "text/plain")
self.assertEqual(response.body, b"Hello world!")
self.assertEqual(int(response.request_time), 0)
response = self.fetch("/hello?name=Ben")
self.assertEqual(response.body, b"Hello Ben!")
def test_streaming_callback(self):
# streaming_callback is also tested in test_chunked
chunks = []
response = self.fetch("/hello",
streaming_callback=chunks.append)
# with streaming_callback, data goes to the callback and not response.body
self.assertEqual(chunks, [b"Hello world!"])
self.assertFalse(response.body)
def test_post(self):
response = self.fetch("/post", method="POST",
body="arg1=foo&arg2=bar")
self.assertEqual(response.code, 200)
self.assertEqual(response.body, b"Post arg1: foo, arg2: bar")
def test_chunked(self):
response = self.fetch("/chunk")
self.assertEqual(response.body, b"asdfqwer")
chunks = []
response = self.fetch("/chunk",
streaming_callback=chunks.append)
self.assertEqual(chunks, [b"asdf", b"qwer"])
self.assertFalse(response.body)
def test_chunked_close(self):
# test case in which chunks spread read-callback processing
# over several ioloop iterations, but the connection is already closed.
sock, port = bind_unused_port()
with closing(sock):
def write_response(stream, request_data):
stream.write(b"""\
HTTP/1.1 200 OK
Transfer-Encoding: chunked
1
1
1
2
0
""".replace(b"\n", b"\r\n"), callback=stream.close)
def accept_callback(conn, address):
# fake an HTTP server using chunked encoding where the final chunks
# and connection close all happen at once
stream = IOStream(conn, io_loop=self.io_loop)
stream.read_until(b"\r\n\r\n",
functools.partial(write_response, stream))
netutil.add_accept_handler(sock, accept_callback, self.io_loop)
self.http_client.fetch("http://127.0.0.1:%d/" % port, self.stop)
resp = self.wait()
resp.rethrow()
self.assertEqual(resp.body, b"12")
self.io_loop.remove_handler(sock.fileno())
def test_streaming_stack_context(self):
chunks = []
exc_info = []
def error_handler(typ, value, tb):
exc_info.append((typ, value, tb))
return True
def streaming_cb(chunk):
chunks.append(chunk)
if chunk == b'qwer':
1 / 0
with ExceptionStackContext(error_handler):
self.fetch('/chunk', streaming_callback=streaming_cb)
self.assertEqual(chunks, [b'asdf', b'qwer'])
self.assertEqual(1, len(exc_info))
self.assertIs(exc_info[0][0], ZeroDivisionError)
def test_basic_auth(self):
self.assertEqual(self.fetch("/auth", auth_username="Aladdin",
auth_password="open sesame").body,
b"Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==")
def test_basic_auth_explicit_mode(self):
self.assertEqual(self.fetch("/auth", auth_username="Aladdin",
auth_password="open sesame",
auth_mode="basic").body,
b"Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==")
def test_unsupported_auth_mode(self):
# curl and simple clients handle errors a bit differently; the
# important thing is that they don't fall back to basic auth
# on an unknown mode.
with ExpectLog(gen_log, "uncaught exception", required=False):
with self.assertRaises((ValueError, HTTPError)):
response = self.fetch("/auth", auth_username="Aladdin",
auth_password="open sesame",
auth_mode="asdf")
response.rethrow()
def test_follow_redirect(self):
response = self.fetch("/countdown/2", follow_redirects=False)
self.assertEqual(302, response.code)
self.assertTrue(response.headers["Location"].endswith("/countdown/1"))
response = self.fetch("/countdown/2")
self.assertEqual(200, response.code)
self.assertTrue(response.effective_url.endswith("/countdown/0"))
self.assertEqual(b"Zero", response.body)
def test_credentials_in_url(self):
url = self.get_url("/auth").replace("http://", "http://me:secret@")
self.http_client.fetch(url, self.stop)
response = self.wait()
self.assertEqual(b"Basic " + base64.b64encode(b"me:secret"),
response.body)
def test_body_encoding(self):
unicode_body = u("\xe9")
byte_body = binascii.a2b_hex(b"e9")
# unicode string in body gets converted to utf8
response = self.fetch("/echopost", method="POST", body=unicode_body,
headers={"Content-Type": "application/blah"})
self.assertEqual(response.headers["Content-Length"], "2")
self.assertEqual(response.body, utf8(unicode_body))
# byte strings pass through directly
response = self.fetch("/echopost", method="POST",
body=byte_body,
headers={"Content-Type": "application/blah"})
self.assertEqual(response.headers["Content-Length"], "1")
self.assertEqual(response.body, byte_body)
# Mixing unicode in headers and byte string bodies shouldn't
# break anything
response = self.fetch("/echopost", method="POST", body=byte_body,
headers={"Content-Type": "application/blah"},
user_agent=u("foo"))
self.assertEqual(response.headers["Content-Length"], "1")
self.assertEqual(response.body, byte_body)
def test_types(self):
response = self.fetch("/hello")
self.assertEqual(type(response.body), bytes_type)
self.assertEqual(type(response.headers["Content-Type"]), str)
self.assertEqual(type(response.code), int)
self.assertEqual(type(response.effective_url), str)
def test_header_callback(self):
first_line = []
headers = {}
chunks = []
def header_callback(header_line):
if header_line.startswith('HTTP/'):
first_line.append(header_line)
elif header_line != '\r\n':
k, v = header_line.split(':', 1)
headers[k] = v.strip()
def streaming_callback(chunk):
# All header callbacks are run before any streaming callbacks,
# so the header data is available to process the data as it
# comes in.
self.assertEqual(headers['Content-Type'], 'text/html; charset=UTF-8')
chunks.append(chunk)
self.fetch('/chunk', header_callback=header_callback,
streaming_callback=streaming_callback)
self.assertEqual(len(first_line), 1)
self.assertRegexpMatches(first_line[0], 'HTTP/1.[01] 200 OK\r\n')
self.assertEqual(chunks, [b'asdf', b'qwer'])
def test_header_callback_stack_context(self):
exc_info = []
def error_handler(typ, value, tb):
exc_info.append((typ, value, tb))
return True
def header_callback(header_line):
if header_line.startswith('Content-Type:'):
1 / 0
with ExceptionStackContext(error_handler):
self.fetch('/chunk', header_callback=header_callback)
self.assertEqual(len(exc_info), 1)
self.assertIs(exc_info[0][0], ZeroDivisionError)
def test_configure_defaults(self):
defaults = dict(user_agent='TestDefaultUserAgent')
# Construct a new instance of the configured client class
client = self.http_client.__class__(self.io_loop, force_instance=True,
defaults=defaults)
client.fetch(self.get_url('/user_agent'), callback=self.stop)
response = self.wait()
self.assertEqual(response.body, b'TestDefaultUserAgent')
client.close()
def test_304_with_content_length(self):
# According to the spec 304 responses SHOULD NOT include
# Content-Length or other entity headers, but some servers do it
# anyway.
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3.5
response = self.fetch('/304_with_content_length')
self.assertEqual(response.code, 304)
self.assertEqual(response.headers['Content-Length'], '42')
def test_final_callback_stack_context(self):
# The final callback should be run outside of the httpclient's
# stack_context. We want to ensure that there is not stack_context
# between the user's callback and the IOLoop, so monkey-patch
# IOLoop.handle_callback_exception and disable the test harness's
# context with a NullContext.
# Note that this does not apply to secondary callbacks (header
# and streaming_callback), as errors there must be seen as errors
# by the http client so it can clean up the connection.
exc_info = []
def handle_callback_exception(callback):
exc_info.append(sys.exc_info())
self.stop()
self.io_loop.handle_callback_exception = handle_callback_exception
with NullContext():
self.http_client.fetch(self.get_url('/hello'),
lambda response: 1 / 0)
self.wait()
self.assertEqual(exc_info[0][0], ZeroDivisionError)
@gen_test
def test_future_interface(self):
response = yield self.http_client.fetch(self.get_url('/hello'))
self.assertEqual(response.body, b'Hello world!')
@gen_test
def test_future_http_error(self):
try:
yield self.http_client.fetch(self.get_url('/notfound'))
except HTTPError as e:
self.assertEqual(e.code, 404)
self.assertEqual(e.response.code, 404)
@gen_test
def test_reuse_request_from_response(self):
# The response.request attribute should be an HTTPRequest, not
# a _RequestProxy.
# This test uses self.http_client.fetch because self.fetch calls
# self.get_url on the input unconditionally.
url = self.get_url('/hello')
response = yield self.http_client.fetch(url)
self.assertEqual(response.request.url, url)
self.assertTrue(isinstance(response.request, HTTPRequest))
response2 = yield self.http_client.fetch(response.request)
self.assertEqual(response2.body, b'Hello world!')
def test_all_methods(self):
for method in ['GET', 'DELETE', 'OPTIONS']:
response = self.fetch('/all_methods', method=method)
self.assertEqual(response.body, utf8(method))
for method in ['POST', 'PUT', 'PATCH']:
response = self.fetch('/all_methods', method=method, body=b'')
self.assertEqual(response.body, utf8(method))
response = self.fetch('/all_methods', method='HEAD')
self.assertEqual(response.body, b'')
response = self.fetch('/all_methods', method='OTHER',
allow_nonstandard_methods=True)
self.assertEqual(response.body, b'OTHER')
@gen_test
def test_body(self):
hello_url = self.get_url('/hello')
with self.assertRaises(AssertionError) as context:
yield self.http_client.fetch(hello_url, body='data')
self.assertTrue('must be empty' in str(context.exception))
with self.assertRaises(AssertionError) as context:
yield self.http_client.fetch(hello_url, method='POST')
self.assertTrue('must not be empty' in str(context.exception))
class RequestProxyTest(unittest.TestCase):
def test_request_set(self):
proxy = _RequestProxy(HTTPRequest('http://example.com/',
user_agent='foo'),
dict())
self.assertEqual(proxy.user_agent, 'foo')
def test_default_set(self):
proxy = _RequestProxy(HTTPRequest('http://example.com/'),
dict(network_interface='foo'))
self.assertEqual(proxy.network_interface, 'foo')
def test_both_set(self):
proxy = _RequestProxy(HTTPRequest('http://example.com/',
proxy_host='foo'),
dict(proxy_host='bar'))
self.assertEqual(proxy.proxy_host, 'foo')
def test_neither_set(self):
proxy = _RequestProxy(HTTPRequest('http://example.com/'),
dict())
self.assertIs(proxy.auth_username, None)
def test_bad_attribute(self):
proxy = _RequestProxy(HTTPRequest('http://example.com/'),
dict())
with self.assertRaises(AttributeError):
proxy.foo
def test_defaults_none(self):
proxy = _RequestProxy(HTTPRequest('http://example.com/'), None)
self.assertIs(proxy.auth_username, None)
class HTTPResponseTestCase(unittest.TestCase):
def test_str(self):
response = HTTPResponse(HTTPRequest('http://example.com'),
200, headers={}, buffer=BytesIO())
s = str(response)
self.assertTrue(s.startswith('HTTPResponse('))
self.assertIn('code=200', s)
class SyncHTTPClientTest(unittest.TestCase):
def setUp(self):
if IOLoop.configured_class().__name__ in ('TwistedIOLoop',
'AsyncIOMainLoop'):
# TwistedIOLoop only supports the global reactor, so we can't have
# separate IOLoops for client and server threads.
# AsyncIOMainLoop doesn't work with the default policy
# (although it could with some tweaks to this test and a
# policy that created loops for non-main threads).
raise unittest.SkipTest(
'Sync HTTPClient not compatible with TwistedIOLoop or '
'AsyncIOMainLoop')
self.server_ioloop = IOLoop()
sock, self.port = bind_unused_port()
app = Application([('/', HelloWorldHandler)])
self.server = HTTPServer(app, io_loop=self.server_ioloop)
self.server.add_socket(sock)
self.server_thread = threading.Thread(target=self.server_ioloop.start)
self.server_thread.start()
self.http_client = HTTPClient()
def tearDown(self):
def stop_server():
self.server.stop()
self.server_ioloop.stop()
self.server_ioloop.add_callback(stop_server)
self.server_thread.join()
self.http_client.close()
self.server_ioloop.close(all_fds=True)
def get_url(self, path):
return 'http://localhost:%d%s' % (self.port, path)
def test_sync_client(self):
response = self.http_client.fetch(self.get_url('/'))
self.assertEqual(b'Hello world!', response.body)
def test_sync_client_error(self):
# Synchronous HTTPClient raises errors directly; no need for
# response.rethrow()
with self.assertRaises(HTTPError) as assertion:
self.http_client.fetch(self.get_url('/notfound'))
self.assertEqual(assertion.exception.code, 404)
|
abircse06/youtube-dl | refs/heads/master | youtube_dl/extractor/mailru.py | 81 | # encoding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
class MailRuIE(InfoExtractor):
IE_NAME = 'mailru'
IE_DESC = 'Видео@Mail.Ru'
_VALID_URL = r'http://(?:www\.)?my\.mail\.ru/(?:video/.*#video=/?(?P<idv1>(?:[^/]+/){3}\d+)|(?:(?P<idv2prefix>(?:[^/]+/){2})video/(?P<idv2suffix>[^/]+/\d+))\.html)'
_TESTS = [
{
'url': 'http://my.mail.ru/video/top#video=/mail/sonypicturesrus/75/76',
'md5': 'dea205f03120046894db4ebb6159879a',
'info_dict': {
'id': '46301138_76',
'ext': 'mp4',
'title': 'Новый Человек-Паук. Высокое напряжение. Восстание Электро',
'timestamp': 1393232740,
'upload_date': '20140224',
'uploader': 'sonypicturesrus',
'uploader_id': 'sonypicturesrus@mail.ru',
'duration': 184,
},
},
{
'url': 'http://my.mail.ru/corp/hitech/video/news_hi-tech_mail_ru/1263.html',
'md5': '00a91a58c3402204dcced523777b475f',
'info_dict': {
'id': '46843144_1263',
'ext': 'mp4',
'title': 'Samsung Galaxy S5 Hammer Smash Fail Battery Explosion',
'timestamp': 1397217632,
'upload_date': '20140411',
'uploader': 'hitech',
'uploader_id': 'hitech@corp.mail.ru',
'duration': 245,
},
},
]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('idv1')
if not video_id:
video_id = mobj.group('idv2prefix') + mobj.group('idv2suffix')
video_data = self._download_json(
'http://api.video.mail.ru/videos/%s.json?new=1' % video_id, video_id, 'Downloading video JSON')
author = video_data['author']
uploader = author['name']
uploader_id = author.get('id') or author.get('email')
view_count = video_data.get('views_count')
meta_data = video_data['meta']
content_id = '%s_%s' % (
meta_data.get('accId', ''), meta_data['itemId'])
title = meta_data['title']
if title.endswith('.mp4'):
title = title[:-4]
thumbnail = meta_data['poster']
duration = meta_data['duration']
timestamp = meta_data['timestamp']
formats = [
{
'url': video['url'],
'format_id': video['key'],
'height': int(video['key'].rstrip('p'))
} for video in video_data['videos']
]
self._sort_formats(formats)
return {
'id': content_id,
'title': title,
'thumbnail': thumbnail,
'timestamp': timestamp,
'uploader': uploader,
'uploader_id': uploader_id,
'duration': duration,
'view_count': view_count,
'formats': formats,
}
|
aodag/babel | refs/heads/master | babel/__init__.py | 21 | # -*- coding: utf-8 -*-
"""
babel
~~~~~
Integrated collection of utilities that assist in internationalizing and
localizing applications.
This package is basically composed of two major parts:
* tools to build and work with ``gettext`` message catalogs
* a Python interface to the CLDR (Common Locale Data Repository), providing
access to various locale display names, localized number and date
formatting, etc.
:copyright: (c) 2013 by the Babel Team.
:license: BSD, see LICENSE for more details.
"""
from babel.core import UnknownLocaleError, Locale, default_locale, \
negotiate_locale, parse_locale, get_locale_identifier
__version__ = '2.0-dev'
|
redeyser/IceCash2 | refs/heads/master | dbIceCash.py | 1 | #!/usr/bin/python
# -*- coding: utf-8
# version 2.000
# DataBase for IceCash
import my
import os
import re
from chIceCash import _round
from md5 import md5
import tbIceCash as tbs
import datetime
DATABASE = "IceCash"
MYSQL_USER = "icecash"
MYSQL_PASSWORD = "icecash1024"
TB_BOXES_HD = "tb_boxes_hd"
TB_BOXES_CT = "tb_boxes_ct"
TB_CHECK_HEAD = "tb_check_head"
TB_CHECK_CONT = "tb_check_cont"
TB_EGAIS_PLACES = "tb_egais_places"
TB_EGAIS_OSTAT = "tb_egais_ostat"
TB_EGAIS_DOCS_HD = "tb_egais_docs_hd"
TB_EGAIS_DOCS_CT = "tb_egais_docs_ct"
TB_EGAIS_DOCS_NEED = "tb_egais_docs_need"
SPEED = {\
1:1200,\
2:2400,\
3:4800,\
4:9600,\
5:14400,\
6:38400,\
7:57600,\
8:115200\
}
TYPE_USER = {\
"ADMIN" :100,\
"KASSIR" :50,\
"SELLER" :10,\
}
def Str2Shash(s,c,eq):
a = s.split(c)
Sarr=[]
for e in a:
Sarr.append(e.strip(" "))
Shash={}
for e in Sarr:
if e.find(eq)!=-1:
a=e.split(eq)
Shash[a[0].strip(" ")] = a[1].strip(" ")
else:
Shash[e.strip(" ")] = e.strip(" ")
return Shash
def array4json(a):
r=[]
for h in a:
if type(h)==datetime.timedelta or type(h)==datetime.date:
r.append(str(h))
else:
r.append(h)
return r
class dbIceCash(my.db):
def __init__(self,dbname,host,user,password):
self.tbs={}
my.db.__init__(self,dbname,host,user,password)
self.tb_sets = tbs.tb_sets ('tb_sets')
self.tb_price = tbs.tb_price ('tb_price')
self.tb_price_shk = tbs.tb_price_shk ('tb_price_shk')
self.tb_discount = tbs.tb_discount ('tb_discount')
self.tb_users = tbs.tb_users ('tb_users')
self.tb_trsc_hd = tbs.tb_trsc_hd ('tb_trsc_hd')
self.tb_trsc_ct = tbs.tb_trsc_ct ('tb_trsc_ct')
self.tb_check_head = tbs.tb_check_head ('tb_check_head')
self.tb_check_cont = tbs.tb_check_cont ('tb_check_cont')
self.tb_types = tbs.tb_types ('tb_types')
self.tb_Zet = tbs.tb_Zet ('tb_Zet')
self.tb_Zet_cont = tbs.tb_Zet_cont ('tb_Zet_cont')
self.tb_egais_places = tbs.tb_egais_places ('tb_egais_places')
self.tb_egais_docs_hd = tbs.tb_egais_docs_hd ('tb_egais_docs_hd')
self.tb_egais_docs_ct = tbs.tb_egais_docs_ct ('tb_egais_docs_ct')
self.tb_egais_ostat = tbs.tb_egais_ostat ('tb_egais_ostat')
self.tb_actions_hd = tbs.tb_actions_hd ('tb_actions_hd')
self.tb_tlist = tbs.tb_tlist ('tb_tlist')
self.tb_egais_docs_need = tbs.tb_egais_docs_need ('tb_egais_docs_need')
self._tbinit(TB_BOXES_HD)
self._tbinit(TB_BOXES_CT)
self._tbinit(TB_CHECK_HEAD)
self._tbinit(TB_CHECK_CONT)
self._tbinit(TB_EGAIS_PLACES)
self._tbinit(TB_EGAIS_OSTAT)
self._tbinit(TB_EGAIS_DOCS_HD)
self._tbinit(TB_EGAIS_DOCS_CT)
self._tbinit(TB_EGAIS_DOCS_NEED)
def _tbinit(self,name):
tb=getattr(tbs, name)
self.tbs[name] = tb(name)
def _tbcreate(self,tn):
self.run(self.tbs[tn]._create())
def _recreate(self):
for name in self.tbs.keys():
if not name in self.tables:
self._tbcreate(name)
print "created table %s" % name
def _tables(self):
res=self.get("show tables")
db=[]
for r in res:
db.append(r[0])
self.tables=db
def truncate_trsc(self):
self.run("truncate tb_Zet")
self.run("truncate tb_Zet_cont")
self.run("truncate tb_trsc_hd")
self.run("truncate tb_trsc_ct")
self.run("truncate tb_actions_hd")
self.run("truncate tb_tlist")
self.run("truncate tb_check_head")
self.run("truncate tb_check_cont")
return True
def truncate_egais(self):
self.run("truncate tb_egais_docs_hd")
self.run("truncate tb_egais_docs_ct")
self.run("truncate tb_egais_places")
self.run("truncate tb_egais_ostat")
return True
""" Optimize functions ----------------------- """
def _gets(self,tn,tostr=False,dttm2str=True):
self.result_order=[]
result = self.get(self.tbs[tn]._gets())
if len(result)==0:
return None
else:
res=[]
for r in result:
res.append(self.tbs[tn].result2values(r,tostr=tostr,dttm2str=dttm2str))
return res
""" Получить запись по id """
def _getid(self,tn,id,tostr=False,dttm2str=True):
res = self.get(self.tbs[tn]._getid(id))
if len(res)==0:
return None
else:
return self.tbs[tn].result2values(res[0],tostr=tostr,dttm2str=dttm2str)
""" Получить записи по idhd """
def _gethd(self,tn,id,tostr=False,dttm2str=True):
res = self.get(self.tbs[tn]._gethd(id))
if len(res)==0:
return []
else:
result=[]
for r in res:
result.append( self.tbs[tn].result2values(r,tostr=tostr,dttm2str=dttm2str) )
return result
""" Получить Заголовочную запись и подчиненные """
def _get_data_hd_ct(self,tb_hd,tb_ct,id,tostr=False,dttm2str=True):
hd = self._getid(tb_hd,id,tostr=tostr,dttm2str=dttm2str)
if hd != None:
ct = self._gethd( tb_ct,id,tostr=tostr,dttm2str=dttm2str )
else:
ct=None
return (hd,ct)
""" Очистить таблицу """
def _truncate(self,tn):
self.run("truncate %s" % tn)
return True
""" Переделать в хэш """
def _db2hash(self,r,id,val):
h={}
for rec in r:
h[rec[id]]=rec[val]
return h
""" Переделать в массив """
def _db2arr(self,r,id):
t=[]
for rec in r:
t.append(rec[id])
return t
""" Простая выборка """
def _select(self,tn,where="",fields=None,order=None,group=None,tostr=False,dttm2str=True,toarr=False,tohash=False,nofields=False):
self.result_order=[]
if where!="" and fields==None:
where= " where %s" % where
if group:
_group=" group by "+group
else:
_group=""
if order:
_order=" order by "+order
else:
_order=""
if fields==None:
result = self.get(self.tbs[tn].query_all_select()+where+_group+_order)
else:
result = self.get(self.tbs[tn].query_select(fields,where)+_group+_order)
if len(result)==0:
return []
else:
if toarr:
res=[]
for r in result:
if tostr:
s=str(r[0])
else:
s=r[0]
res.append(s)
return res
if tohash:
res={}
self.result_order=[]
for r in result:
if tostr:
s=str(r[1])
else:
s=r[1]
res[r[0]]=s
self.result_order.append(r[0])
return res
res=[]
if not nofields:
for r in result:
res.append(self.tbs[tn].result2values(r,tostr=tostr,dttm2str=dttm2str))
else:
res=result
return res
""" Добавить запись """
def _insert(self,tn,struct):
r=self.run(self.tbs[tn].query_insert(struct))
if not r:
self.lastid=0
return False
self.lastid=self.get(my.Q_LASTID)[0][0]
return True
""" Изменить запись """
def _update(self,tn,struct,where):
return self.run(self.tbs[tn].query_update(struct)+" where %s" % where)
""" Удалить запись """
def _delete(self,tn,where):
return self.run(self.tbs[tn].query_delete(where))
""" Пустая запись """
def _empty(self,tn):
return self.tbs[tn].empty_all_values()
""" Удалить запись """
def _delete(self,tn,where):
return self.run(self.tbs[tn].query_delete(where))
""" ------------------------------------------ """
def _user_gets(self,rule,_if):
return self.get(self.tb_users._gets(_if+str(rule)))
def _user_add(self,aval):
self.run(self.tb_users._add(aval))
def _user_upd(self,iduser,aval):
self.run(self.tb_users._upd(iduser,aval))
def _user_get(self,login="",id=0):
user=self.get(self.tb_users._get(login=login,id=id))
if len(user)==0:
return False
else:
user=self.tb_users.result2values(user[0])
self.user=user
return True
def _user_auth(self,login,password):
user=self.get(self.tb_users._get(login=login))
if len(user)==0:
return False
else:
user=self.tb_users.result2values(user[0])
self.user=user
if md5(password).hexdigest()==user['password']:
return True
else:
return False
def _types_add(self,pref,id,name):
self.run(self.tb_types._add([pref,id,name]))
def _types_get(self,pref):
return self.get(self.tb_types._get(pref))
def _discount_get(self,code):
r=self.get(self.tb_discount._get(code))
if len(r)==0:
self.discount=None
return False
else:
self.discount=self.tb_discount.result2values(r[0])
return True
def _read_sets(self,group=''):
self.sets={}
data = self.get(self.tb_sets._getall(group))
for d in data:
if d[3]==None:
s='None'
else:
s=d[3]
self.sets[d[2]]=s
try:
self.idplace=int(self.sets['idplace'])
self.nkassa=int(self.sets['nkassa'])
except:
self.idpalce=1
self.nkassa=1
def _sets_get(self,group=''):
sets={}
data = self.get(self.tb_sets._getall(group))
for d in data:
if d[3]==None:
s='None'
else:
s=d[3]
sets[d[2]]=s
return sets
def _sets_add(self,g,n,v):
self.run(self.tb_sets._add([g,n,v]))
def _sets_upd(self,n,v):
self.run(self.tb_sets._upd(n,v))
def _search_shk(self,shk):
self.price=self.get(self.tb_price._find_shk(shk))
if len(self.price)==0:
priceshk=self.get(self.tb_price_shk._find_shk(shk))
if len(priceshk)==0:
self.price=None
self.price_shk=None
return False
priceshk=self.tb_price_shk.result2values(priceshk[0],False)
if not self._price_get(priceshk['id']):
self.price=None
self.price_shk=None
return False
self.price['name']=priceshk['name']
if priceshk['koef']!=0:
self.price['cena']=priceshk['koef']*self.price['cena']
else:
self.price['cena']=priceshk['cena']
return True
else:
self.price=self.tb_price.result2values(self.price[0],False)
return True
def _price_get(self,code=None):
self.price=self.get(self.tb_price._get(code))
if len(self.price)==0:
self.price=None
self.price_shk=None
return False
else:
self.price=self.tb_price.result2values(self.price[0],False)
self.price_shk=self.get(self.tb_price_shk._get(code,None))
return True
def _price_get_group(self,parent):
if parent=="":
parent="0"
self.price=self.get(self.tb_price._get_group(parent))
if len(self.price)==0:
return False
else:
return True
def _price_get_in_list(self,_list):
price=self.get(self.tb_price._get_in_list(_list))
if len(price)==0:
return False
else:
self.price={}
for p in price:
self.price[p[0]]=self.tb_price.result2values(p,tostr=True)
return True
def _check_hd_get(self,iduser,id):
self.ch_head=self.get(self.tb_check_head._get(iduser,id))
if len(self.ch_head)==0:
self.ch_head=None
return False
else:
self.ch_head=self.tb_check_head.result2values(self.ch_head[0])
self.ch_head['summa_wod']=self.ch_head['summa']-self.ch_head['discount_sum']
self.ch_head['summa_pay']=self.ch_head['summa']-self.ch_head['discount_sum']-self.ch_head['bonus_discount']
return True
def _check_get(self,iduser,id):
self.ch_head=self.get(self.tb_check_head._get(iduser,id))
self.ch_cont=self.get(self.tb_check_cont._gets(iduser,id))
if len(self.ch_head)==0:
self.ch_head=None
return False
else:
self.ch_head=self.tb_check_head.result2values(self.ch_head[0])
self.ch_head['summa_wod']=self.ch_head['summa']-self.ch_head['discount_sum']
self.ch_head['summa_pay']=self.ch_head['summa']-self.ch_head['discount_sum']-self.ch_head['bonus_discount']
return True
def _check_gets(self,iduser,_all):
self.checks=self.get(self.tb_check_head._gets(iduser,_all))
if len(self.checks)==0:
return False
else:
#replace field puttime as str for json
d=[]
for p in range(len(self.checks)):
pos=list(self.checks[p])
pos[2]=str(pos[2])
d.append(pos)
self.checks=d
return True
def _check_json(self):
if self.ch_head!=None:
empty=False
else:
empty=True
result={"empty":empty,"head":{},"cont":{}}
return result
head=self.ch_head
head['puttime']=str(head['puttime'])
result={"head":head,}
cont={}
for p in range(len(self.ch_cont)):
pos = self.tb_check_cont.result2values(self.ch_cont[p],decode=True)
del pos['date']
del pos['time']
cont[p] = pos
head['itogo']=_round(head['summa']-head['bonus_discount']-head['discount_sum'],2)
result={"empty":empty,"head":head,"cont":cont}
return result
def _check_update(self,iduser,id,struct):
self.run(self.tb_check_head._upd(iduser,id,struct))
def _check_load(self,iduser,id,curiduser):
if not self._check_get(iduser,id):
return False
self.run(self.tb_check_head._clear(curiduser,0))
self.run(self.tb_check_cont._clear(curiduser,0))
self.run(self.tb_check_head._upd(iduser,id,{'id':0,'iduser':curiduser}))
self.run(self.tb_check_cont._updid(iduser,id,0,iduser2=curiduser))
return True
def _check_delete(self,iduser,id):
self.run(self.tb_check_head._del(iduser,id))
self.run(self.tb_check_cont._del(iduser,id))
def _check_save(self,iduser):
if not self._check_get(iduser,0):
return 0
if len(self.ch_cont)==0:
return 0
self.run(self.tb_check_head._copyto(iduser,0))
id=self.get(my.Q_LASTID)[0][0]
self.run(self.tb_check_cont._updid(iduser,0,id))
self.run(self.tb_check_head._clear(iduser,0))
self.run(self.tb_check_cont._clear(iduser,0))
return id
def _check_create(self,iduser):
self.run(self.tb_check_head._clear(iduser,0))
self.run(self.tb_check_cont._clear(iduser,0))
self.run(self.tb_check_head._new(iduser))
id=self.get(my.Q_LASTID)
self.run(self.tb_check_head._setcur(iduser,id[0][0]))
def _check_pos_barcode(self,iduser,idcheck,barcode):
r=self.get(self.tb_check_cont._find_barcode(iduser,idcheck,barcode))
if len(r)==0:
return False
else:
return True
def _check_pos_get(self,iduser,idcheck,id):
self.ch_cont_pos=self.get(self.tb_check_cont._get(iduser,idcheck,id))
if len(self.ch_cont_pos)==0:
self.ch_cont_pos=None
return False
else:
self.ch_cont_pos=self.tb_check_cont.result2values(self.ch_cont_pos[0])
return True
def _check_pos_add(self,iduser,idcheck,a):
self.run(self.tb_check_cont._add(iduser,idcheck,a))
id=self.get(my.Q_LASTID)
return id[0][0]
def _check_pos_upd(self,iduser,idcheck,id,struct):
self.run(self.tb_check_cont._upd(iduser,idcheck,id,struct))
def _check_pos_upds(self,iduser,idcheck,struct):
self.run(self.tb_check_cont._upds(iduser,idcheck,struct))
def _check_pos_dels(self,iduser,idcheck,_list):
self.run(self.tb_check_cont._dels(iduser,idcheck,_list))
def _check_pos_info_set(self,iduser,idcheck,id,shcode=True,cena=None):
if not self._check_pos_get(iduser,idcheck,id):
return False
code=self.ch_cont_pos["code"]
if self.price!=None:
if len(self.price_shk)>0:
multiprice=1
else:
multiprice=0
struct={}
for f in self.tb_check_cont.fd_priceinfo:
struct[f] = self.price[ f[2:] ]
if cena!=None:
struct["paramf1"] = cena
else:
struct["paramf1"] = self.price["cena"]
struct["name"] = self.price["name"]
struct["multiprice"] = multiprice
if not shcode:
del struct['p_shk']
self.run(self.tb_check_cont._upd(iduser,idcheck,id,struct))
return True
else:
return False
def _trsc_to_check(self,iduser,id,ro=True,_reverse=False):
self._check_create(iduser)
if not self._trsc_get_check(id,tostr=False):
return False
fd_ident=['type','seller','errors','discount_card','discount_proc',\
'bonus_card','bonus_proc','bonus_max','bonus_sum','bonus_type']
fd_koef=['summa','discount_sum','bonus_discount','bonus']
fd_koef_ct=['paramf2','paramf3','discount','dcount','bonus_discount','bonus']
if _reverse:
if self.trsc_hd['type']==1:
self.trsc_hd['type']=0
_koef=-1
else:
self.trsc_hd['type']=1
_koef=1
else:
if self.trsc_hd['type']==1:
_koef=-1
else:
_koef=1
struct={'ro':ro,'iduser':iduser}
for n in fd_ident:
struct[n] = self.trsc_hd[n]
for n in fd_koef:
struct[n] = _koef*self.trsc_hd[n]
self._check_update(iduser,0,struct)
for p in self.trsc_ct:
struct={}
for f in self.tb_check_cont.fieldsorder:
if p.has_key(f):
if f in fd_koef_ct and p[f]!=None:
struct[f]=_koef*p[f]
else:
struct[f]=p[f]
self._check_pos_add(iduser,0,struct)
return True
def _check_to_trsc(self,ncheck,nal,bnal,ispayed,isfiscal=0):
fd_ident=['iduser','seller','type','errors','discount_card','discount_proc',\
'bonus_card','bonus_proc','bonus_max','bonus_sum','bonus_type','egais_url','egais_sign']
fd_koef=['summa','discount_sum','bonus_discount','bonus']
fd_koef_ct=['paramf2','paramf3','discount','dcount','bonus_discount','bonus']
if self.ch_head['type']==1:
_koef=-1
else:
_koef=1
struct={'ncheck':ncheck,'pay_nal':nal*_koef,'pay_bnal':bnal*_koef,'ispayed':ispayed,'isfiscal':isfiscal}
dt = my.curdate2my()
tm = my.curtime2my()
struct['date']=dt
struct['time']=tm
struct['idplace']=self.idplace
struct['nkassa']=self.nkassa
for n in fd_ident:
struct[n] = self.ch_head[n]
for n in fd_koef:
struct[n] = _koef*self.ch_head[n]
self.run(self.tb_trsc_hd._add(struct))
id=self.get(my.Q_LASTID)[0][0]
if ncheck==0:
self.run(self.tb_trsc_hd._upd(self.idplace,self.nkassa,id,{'ncheck':id}))
for pos in self.ch_cont:
p=self.tb_check_cont.result2values(pos)
struct={}
for f in self.tb_trsc_ct.record_add:
if p.has_key(f):
if f in fd_koef_ct and p[f]!=None:
struct[f]=_koef*p[f]
else:
struct[f]=p[f]
struct['idhd']=id
self.run(self.tb_trsc_ct._add(struct))
return True
def _trsc_get_check(self,id,tostr=True):
if id==None:
trsc_hd=self.get("select * from tb_trsc_hd order by id limit 1")
if len(trsc_hd)>0:
id=trsc_hd[0][0]
else:
trsc_hd=self.get(self.tb_trsc_hd._get_check(self.idplace,self.nkassa,id))
if len(trsc_hd)==0:
return False
self._trsc_hd = trsc_hd[0]
self.trsc_hd = self.tb_trsc_hd.result2values(trsc_hd[0],tostr=tostr)
trsc_ct=self.get(self.tb_trsc_ct._get_title(id))
self.trsc_ct=[]
for c in trsc_ct:
d=self.tb_trsc_ct.result2values(c,tostr=tostr)
d['name']=c[-1]
self.trsc_ct.append(d)
return True
def _trsc_get(self,idzet=None,limit=1000):
if not limit:
limit=1000
if idzet:
self.Zet=self.get(self.tb_Zet._get(self.idplace,self.nkassa,idzet))
if len(self.Zet)==0:
return False
(idbegin,idend)=self.Zet[0][8:10]
chhd=self.get(self.tb_trsc_hd._get_Z(idbegin,idend))
chct=self.get(self.tb_trsc_ct._get_Z(idbegin,idend))
else:
chhd=self.get(self.tb_trsc_hd._getup(limit))
chct=self.get(self.tb_trsc_ct._getup(limit))
data=[]
curct=0
for hd in chhd:
idhd=hd[0]
_hd=array4json(hd)
_ct=[]
while curct<len(chct) and idhd==chct[curct][1]:
_c=array4json(chct[curct])
_ct.append(_c)
curct+=1
data.append({'h':_hd,'b':_ct})
return data
def _trsc_updup(self,id1,id2,up):
return self.run(self.tb_trsc_hd._updup(id1,id2,up))
def _curZet_calc(self):
if self._Zet_last():
idbegin = self.ZetLast['end_ncheck']+1
else:
idbegin=None
idend=None
calcSum = self.get(self.tb_trsc_hd._calc_sum (self.idplace,self.nkassa,idbegin,idend))[0]
calcCount = self.get(self.tb_trsc_hd._calc_count(self.idplace,self.nkassa,idbegin,None))[0]
fd=['c_sale',
'summa','summa_ret','summa_nal','summa_bnal',
'discount','bonus','bonus_discount',
'begin_ncheck','end_ncheck',
'c_nofiscal','c_saled','nf_summa','nf_discount','nf_bonus_discount']
self.curZet={}
for i in range(len(fd)):
self.curZet[fd[i]]=calcSum[i]
fd=['c_return','c_cancel','c_error']
for i in range(len(fd)):
try:
self.curZet[fd[i]]=int(calcCount[i])
except:
self.curZet[fd[i]]=0
def _trsc_calc(self,idbegin=None,idend=None,full=True):
self.run(self.tb_trsc_hd._recalc(idbegin,idend))
calcSum = self.get(self.tb_trsc_hd._calc_sum (self.idplace,self.nkassa,idbegin,idend))[0]
calcCount = self.get(self.tb_trsc_hd._calc_count (self.idplace,self.nkassa,idbegin,idend))[0]
if idend==None:
self._trsc_last()
else:
self._trsc_get_check(idend)
self.trsc=self._trsc_hd
self._trsc_get_check(idbegin)
if calcSum[0]==0:
self.Zet={}
self.Zet_ct=[]
self.Zet['vir']=0
self.Zet['c_sale']=0
self.Zet['summa']=0
self.Zet['summa_nal']=0
self.Zet['summa_bnal']=0
return False
fd=['c_sale',
'summa','summa_ret','summa_nal','summa_bnal',
'discount','bonus','bonus_discount',
'begin_ncheck','end_ncheck',
'c_nofiscal','c_saled','nf_summa','nf_discount','nf_bonus_discount']
self.Zet={ 'begin_date':self.trsc_hd['date'],
'begin_time':self.trsc_hd['time'],
'end_date':self.trsc[3],
'end_time':self.trsc[4]}
for i in range(len(fd)):
self.Zet[fd[i]]=calcSum[i]
fd=['c_return','c_cancel','c_error']
for i in range(len(fd)):
try:
self.Zet[fd[i]]=int(calcCount[i])
except:
self.Zet[fd[i]]=0
if self.Zet['summa']!=None:
self.Zet['vir']=self.Zet['summa']-self.Zet['discount']-self.Zet['bonus_discount']
self.Zet['summa_nal']=self.Zet['vir']-self.Zet['summa_bnal']
else:
self.Zet['vir']=0
if self.Zet['nf_summa']!=None:
self.Zet['nf_vir']=self.Zet['nf_summa']-self.Zet['nf_discount']-self.Zet['nf_bonus_discount']
else:
self.Zet['nf_vir']=0
#Расчитываем дату Зет отчета
t=self.sets['begin_time'].split(':')
t=datetime.timedelta(0,int(t[0])*60*60+int(t[1])*60)
if self.Zet['end_time']>t:
self.Zet['date']=self.Zet['end_date']
else:
self.Zet['date']=self.Zet['end_date']-datetime.timedelta(1,0)
for n in ('end_date','end_time','begin_date','begin_time','date'):
self.Zet[n]=str(self.Zet[n])
self.Zet_ct=[]
if not full:
return True
calcCt=self.get(self.tb_trsc_hd._calc_ct(idbegin,idend))
fd=['section','idgroup','code','alco','paramf1','paramf2','paramf3','discount','bonus','bonus_discount','isfiscal']
for d in calcCt:
ct={}
for i in range(len(d)):
ct[fd[i]]=d[i]
self.Zet_ct.append(ct)
return True
def _trsc_last(self,_if=""):
d=self.get(self.tb_trsc_hd._last(self.idplace,self.nkassa,_if))
if len(d)==0:
self.trsc=None
return 0
else:
self.trsc=d[0]
return d[0][0]
def _trsc_filter(self,dt1,dt2,tm1,tm2,ncheck,tcheck,tpay,tclose,dcard,bcard,fiscal,error,discount,bonus,summa,alco,code,group):
r=self.get(self.tb_trsc_hd._filter(self.idplace,self.nkassa,dt1,dt2,tm1,tm2,ncheck,tcheck,tpay,tclose,dcard,bcard,fiscal,error,discount,bonus,summa,alco,code,group))
self.f_checks=[]
for n in r:
data=self.tb_trsc_hd.result2values(n,tostr=True)
data['vir']=_round(float(data['summa'])-float(data['discount_sum'])-float(data['bonus_discount']),2)
self.f_checks.append(data)
def _Zet_last(self):
d=self.get(self.tb_Zet._last(self.idplace,self.nkassa))
if len(d)>0:
self.ZetLast=self.tb_Zet.result2values(d[0])
return True
else:
self.ZetLast=None
return False
def _Zet_update(self,id):
r=self.run(self.tb_Zet._upd(id,self.Zet))
if not r:
print "not update"
return False
self.ZetID=id
self.run(self.tb_Zet_cont._dels(id))
for v in self.Zet_ct:
v['idhd']=id
self.run(self.tb_Zet_cont._add(v))
return True
def _Zet_add(self):
r=self.run(self.tb_Zet._add(self.Zet))
if not r:
return False
id=self.get(my.Q_LASTID)[0][0]
self.ZetID=id
for v in self.Zet_ct:
v['idhd']=id
self.run(self.tb_Zet_cont._add(v))
return True
def _Zet_gets(self,dt1,dt2,up=None):
self.Zets=self.get(self.tb_Zet._gets(dt1,dt2,up))
if len(self.Zets)==0:
return False
Zets=[]
for v in self.Zets:
struct=self.tb_Zet.result2values(v,tostr=True)
Zets.append(struct)
self.Zets=Zets
return True
def _Zet_upd(self,id,struct):
return self.run(self.tb_Zet._upd(id,struct))
def _Zet_get(self,id):
self.Zet=self.get(self.tb_Zet._get(self.idplace,self.nkassa,id))
if len(self.Zet)==0:
return False
else:
self.Zet=self.tb_Zet.result2values(self.Zet[0],tostr=True)
Zet_ct=self.get(self.tb_Zet_cont._get(id))
self.Zet_ct=[]
for n in Zet_ct:
self.Zet_ct.append(self.tb_Zet_cont.result2values(n,tostr=True))
return True
def _Zet_get_html(self,id):
self.Zet=self.get(self.tb_Zet._get(self.idplace,self.nkassa,id))
if len(self.Zet)==0:
return False
else:
self.Zet=self.tb_Zet.result2values(self.Zet[0])
self.Zet_ct=self.get(self.tb_Zet_cont._gethtml(id))
return True
def egais_ostat_clear(self):
self.run(self.tb_egais_ostat.query_truncate())
def egais_ostat_add(self,struct):
self.run(self.tb_egais_ostat._add(struct))
def egais_places_clear(self):
self.run(self.tb_egais_places.query_truncate())
def egais_places_add(self,struct):
self.run(self.tb_egais_places._add(struct))
def egais_docs_hd_del(self,id):
if self.run(self.tb_egais_docs_hd._del(id)):
return self.run(self.tb_egais_docs_ct._del(id))
return False
def egais_docs_hd_add(self,struct):
if self.run(self.tb_egais_docs_hd._add(struct)):
return self.get(my.Q_LASTID)[0][0]
else:
return 0
def egais_docs_ct_add(self,struct):
if self.run(self.tb_egais_docs_ct._add(struct)):
return self.get(my.Q_LASTID)[0][0]
else:
return 0
def egais_docs_hd_upd(self,id,struct):
return self.run(self.tb_egais_docs_hd._upd(id,struct))
def egais_docs_del(self,id):
r=self.run(self.tb_egais_docs_hd._del(id))
self.run(self.tb_egais_docs_ct._del(id))
return r
def egais_docs_ct_getpos(self,idd,id):
r=self.get(self.tb_egais_docs_ct._getpos(idd,id))
if len(r)==0:
return False
self.egais_doc_ct=self.tb_egais_docs_ct.result2values(r[0],tostr=True)
return True
def egais_docs_ct_upd(self,idd,id,struct):
return self.run(self.tb_egais_docs_ct._upd(idd,id,struct))
def egais_docs_ct_updId(self,idd,id,struct):
return self.run(self.tb_egais_docs_ct._updId(idd,id,struct))
def egais_docs_find(self,_type,recv_RegId,send_RegId,wb_NUMBER):
self.egais_doc=self.get(self.tb_egais_docs_hd._find(_type,recv_RegId,send_RegId,wb_NUMBER))
if len(self.egais_doc)>0:
self.egais_doc=self.egais_doc[0]
return True
else:
return False
def egais_find_replyId(self,reply_id):
self.egais_doc=self.get(self.tb_egais_docs_hd._find_replyId(reply_id))
if len(self.egais_doc)>0:
self.egais_doc=self.egais_doc[0]
return True
else:
return False
def egais_find_ttn(self,ttn):
self.egais_doc=self.get(self.tb_egais_docs_hd._find_ttn(ttn))
if len(self.egais_doc)>0:
self.egais_doc=self.egais_doc[0]
return True
else:
return False
def egais_get_postav(self):
postav=self.get(self.tb_egais_docs_hd._get_postav())
return postav
def egais_get_ostat(self):
ostat=self.get(self.tb_egais_ostat._gets())
return ostat
def egais_get_mydocs(self,_type,status,postav,dt1,dt2):
postav=self.get(self.tb_egais_docs_hd._get_docs(_type,status,postav,dt1,dt2))
d=[]
if len(postav)>0:
for p in postav:
d.append(self.tb_egais_docs_hd.result2values(p,tostr=True))
return d
def egais_get_mydoc(self,idd):
hd=self.get(self.tb_egais_docs_hd._get(idd))
self.egais_doc_hd=[]
self.egais_doc_ct=[]
if len(hd)==0:
return False
self.egais_doc_hd=self.tb_egais_docs_hd.result2values(hd[0],tostr=True)
ct=self.get(self.tb_egais_docs_ct._get(idd))
for p in ct:
self.egais_doc_ct.append(self.tb_egais_docs_ct.result2values(p,tostr=True))
return True
def _actions_gets(self):
return self.get(self.tb_actions_hd._gets())
def _tlist_gets(self):
return self.get(self.tb_tlist._gets())
def boxes_get_sublink(self,id):
return self._select(TB_BOXES_HD," idhd=%s" % id,dttm2str=True)
def _zakaz_copy(self,iduser,id):
hd=self._getid(TB_BOXES_HD,id)
if hd==None:
return False
if hd['status']!=3:
return False
sub=self.boxes_get_sublink(id)
a=[str(hd['id'])]
for s in sub:
a.append(str(s['id']))
l=",".join(a)
d=self._select(TB_BOXES_CT," idhd in (%s) and storno=0" % l,dttm2str=True,fields=['idhd','code','count'])
for r in d:
idpos=self._check_pos_add(iduser,0,{'nbox':id,'code':str(r['code']),'paramf2':r['count']})
self._price_get(r['code'])
self._check_pos_info_set(iduser,0,idpos)
return True
def _zakaz_delete(self,iduser,id):
self._delete(TB_CHECK_CONT,"iduser=%s and idcheck=0 and nbox=%s" % (iduser,id))
return True
def _create(self):
if self.open():
self.run("drop user '%s'@'localhost'" % MYSQL_USER)
self.run("drop database %s" % DATABASE)
self.run("create database %s" % DATABASE)
self.run("create user '%s'@'localhost' IDENTIFIED BY '%s'" % (MYSQL_USER,MYSQL_PASSWORD))
self.run("grant all on "+DATABASE+".* to '%s'@'localhost'" % MYSQL_USER)
self.run("flush privileges")
self.run("use %s" % DATABASE)
self.run(self.tb_sets._create())
self._sets_add('server','version','2.0')
self._sets_add('server','server_port','10110')
self._sets_add('server','dtprint_ip','localhost')
self._sets_add('server','dtprint_port','10111')
self._sets_add('server','backoffice_ip','IceServ')
self._sets_add('server','backservice_port','10100')
self._sets_add('server','bonus_port','7172')
self._sets_add('server','dtprint_passwd','dtpadm')
self._sets_add('server','regid','beerkem')
self._sets_add('server','egais_ip','localhost')
self._sets_add('server','regpassword','766766')
self._sets_add('server','temperature','0')
self._sets_add('server','upgrade','0')
self._sets_add('server','prize_port','7174')
self._sets_add('client','timeout_ping','15')
self._sets_add('client','timeout_query','60')
self._sets_add('client','css','default')
self._sets_add('client','scale_prefix','21')
self._sets_add('client','user_prefix','111')
self._sets_add('client','discount_prefix','222')
self._sets_add('client','bonus_prefix','777')
self._sets_add('client','site','site')
self._sets_add('magazine','texthead','')
self._sets_add('magazine','textfoot','')
self._sets_add('magazine','orgname','')
self._sets_add('magazine','placename','')
self._sets_add('magazine','inn','4205183793')
self._sets_add('magazine','kpp','420501001')
self._sets_add('magazine','logo','')
self._sets_add('magazine','idplace','1')
self._sets_add('magazine','nkassa','1')
self._sets_add('magazine','calcost','0')
self._sets_add('magazine','pricenum','0')
self._sets_add('magazine','messagetype','default')
self._sets_add('magazine','begin_time','06:00:00')
self._sets_add('magazine','action','0')
self._sets_add('magazine','sets','0')
self._sets_add('magazine','nofiscal_proc','0')
self._sets_add('device','dev_scanner','')
self._sets_add('device','d_name','None')
self._sets_add('device','d_speed','8')
self._sets_add('device','d_devtype','KKM_FPRINT')
self._sets_add('device','d_printsh','1')
self._sets_add('device','d_autonull','1')
self._sets_add('device','d_autobox','1')
self._sets_add('device','d_summertm','1')
self._sets_add('device','d_autocut','1')
self._sets_add('device','d_ignore','0')
self._sets_add('device','d_ncheck','1')
self.run(self.tb_trsc_hd._create())
self.run(self.tb_trsc_ct._create())
self.run(self.tb_price._create())
self.run(self.tb_price_shk._create())
self.run(self.tb_discount._create())
self.run(self.tb_check_head._create())
self.run(self.tb_check_cont._create())
self.run(self.tb_users._create())
self.run(self.tb_types._create())
self.run(self.tb_Zet._create())
self.run(self.tb_Zet_cont._create())
self.run(self.tb_egais_places._create())
self.run(self.tb_egais_ostat._create())
self.run(self.tb_egais_docs_hd._create())
self.run(self.tb_egais_docs_ct._create())
self.run(self.tb_egais_docs_need._create())
self.run(self.tb_actions_hd._create())
self.run(self.tb_tlist._create())
self._types_add('us',10,u'Продавец')
self._types_add('us',50,u'Кассир')
self._types_add('us',100,u'Админ')
self._user_add(['admin' ,md5("678543").hexdigest(),TYPE_USER['ADMIN']])
self._user_add(['kassir',md5("766766").hexdigest(),TYPE_USER['KASSIR']])
self._user_add(['seller',md5("111").hexdigest() ,TYPE_USER['SELLER']])
print "created database"
else:
print "error.not_open"
def price_add(self,values):
self.run(self.tb_price._add(values))
def price_upd(self,values,_type):
code=values[0]
r=self.get(self.tb_price._get(code))
if len(r)>0:
struct=self.tb_price.result2values(values,decode=False)
if _type=='clear':
pass
elif _type=='keep' or _type=='add':
ost=struct['ostatok']
del struct['ostatok']
self.run(self.tb_price._upd(code,struct))
if _type=='add':
self.run(self.tb_price._upd_grow(code,ost))
else:
self.price_add(values)
def price_add_shk(self,values):
if not self.run(self.tb_price_shk._add(values)):
print values[0:2]
def price_upd_shk(self,code,shk,values):
r=self.get(self.tb_price_shk._get(code,shk))
if len(r)>0:
self.run(self.tb_price_shk._upd(code,shk,values))
else:
self.price_add_shk([code,shk]+values)
def price_clear(self):
self.run('truncate tb_price_shk')
return self.run('truncate tb_price')
def price_load(self,fname):
""" IceCash format like Shtrihm
1 : Код товара
2 : Штрихкод
3 : Наименование
4 : Литраж
5 : Цена
6 : Остаток
7 : *Схема скидок
8 : Признак весового товара
9 : Номер секции
10 : *Максимальная скидка
11 : *Тип номенклатуры
12 : Признак алкоголя по ЕГАИС
13 : *Минимальная цена
14 : *Максимальная цена
15 : reserved2
16 : Идентификатор группы этого товара
17 : Признак товара (не группы)
"""
try:
f = open(fname,'r')
except:
return False
line = f.readline()
line=line.rstrip("\n\r")
Shash = Str2Shash(line,";","=")
if not Shash.has_key("#IceCash"):
print "price_load: error: not #IceCash file"
f.close()
return False
if not Shash.has_key("type"):
print "price_load: error: no type param "
f.close()
return False
if Shash["type"]!="price":
print "price_load: error: type<>IceCash.price"
f.close()
return False
if not Shash.has_key("sheme_record"):
Shash["sheme_record"]="clear"
if not Shash.has_key("sheme_count"):
Shash["sheme_count"]="clear"
if Shash["sheme_record"]=="clear":
print "price_load: clear"
insert=True
self.price_clear()
else:
insert=False
for line in f.readlines():
if line=='':
continue
line=line.decode('cp1251').encode('utf8')
arr=line.split(';')
try:
arr[3]=float(arr[3])
except:
arr[3]=0
code=arr[0]
pref = code[0]
if re.match(r'^#(\d)*$',code):
if len(arr)<6:
continue
(code,shk,n1,n2,cena,koef)=arr[0:6]
code=code.lstrip('#')
if koef=="" or koef=="0":
koef=1
if not insert:
self.price_upd_shk(code,shk,[n1,cena,koef])
else:
self.price_add_shk([code,shk,n1,cena,koef])
elif re.match(r'^(\d)*$',code):
if len(arr)<17:
continue
if not insert:
self.price_upd(arr,Shash['sheme_count'])
else:
self.price_add(arr)
return True
"""
db = dbIceCash(DATABASE, "localhost", MYSQL_USER, MYSQL_PASSWORD)
if db.open():
# db.price_load("site/download/price/pos1.spr")
db._check_create(112)
db.close()
"""
|
Hubert51/ROOMr | refs/heads/master | server_part/app/manage.py | 1 |
from flask import Flask
from flask import render_template
import MySQLdb
import sys
default_encoding = 'utf-8'
if sys.getdefaultencoding() != default_encoding:
reload(sys)
sys.setdefaultencoding(default_encoding)
app = Flask(__name__)
# This is online version of database
# db = MySQLdb.connect(host="Gengruijie.mysql.pythonanywhere-services.com",user="Gengruijie",passwd="jixiaofang123",db="Gengruijie$ROOMr")
# this is local database for testing and developing
db = MySQLdb.connect(host="127.0.0.1",user="root",passwd="gengruijie",db="ROOMR")
cur = db.cursor()
def getInfo(info):
state = info[-1]
roomInfo = info[0:-1]
for i in range(len(roomInfo)):
if ( roomInfo[i].isalpha() ):
roomNum = roomInfo[0:i]
roomSection = roomInfo[i:]
return (state, roomNum, roomSection)
@app.route('/')
def hello_world():
query551 = "select state from Info WHERE num = 551"
cur.execute(query551)
state551 = cur.fetchone()
print(state551)
state = state551[0]
if(state551[0] == 1 ):
state1 = "Occupied"
else :
state1 = "empty"
return render_template("main.html")#,state = state1)
@app.route("/<info>")
def update(info):
state, roomNum, roomSection = getInfo(info)
return roomNum
# query = "UPDATE Info SET state = 0 WHERE num=551 "
# cur.execute(query)
# db.commit()
# return "succeed"
@app.route("/551c1")
def dominate():
query = "UPDATE Info SET state = 1 WHERE num=551 "
cur.execute(query)
db.commit()
return "dominate"
if __name__ == '__main__':
app.run(debug=True);
|
joh12041/quac | refs/heads/master | lib/confidence_ellipse.py | 3 | '''This module contains functions to compute confidence ellipses for
2-dimensional Gaussians and Gaussian mixture models. (Note that by
"ellipse" we mean "polygon approximating an ellipse".) See
<http://sites.stat.psu.edu/~ajw13/stat505/fa06/06_multnorm/06_multnorm_revist.html>.'''
# Copyright (c) Los Alamos National Security, LLC, and others.
import math
import numpy as np
from django.contrib.gis import geos
import planar
# Number of edges in the polygons which approximate an ellipse. This should be
# divisible by 4, which places a polygon vertex at each ellipse vertex.
POLYGON_EDGES = 32
class My_Polygon(planar.Polygon):
'Knows how to convert itself to a geos.Polygon.'
@property
def as_geos(self):
# Two basic tricks here:
# 1. planar.Polygon is sort of like a sequence of sequences, but not
# quite enough apparently.
# 2. geos.Polygon requires a closed ring (i.e., a[0] == a[-1]).
return geos.Polygon([tuple(i) for i in self] + [tuple(self[0])])
def chisq_crit2(confidence):
'''Return the critical value of a chi-squared distribution with 2 degrees
of freedom at a given confidence level (e.g., 0.95).'''
# Typically, you would look this up in a table, but it's easy to compute
# when DOF = 2.
return (-2 * math.log(1 - confidence))
def ellipse_1(mean, covar, confidence):
'''Return an ellipse of the given confidence for the given Gaussian.'''
# Draw a circle of radius 1 around the origin.
cir = My_Polygon.regular(POLYGON_EDGES, radius=1)
# Compute ellipse parameters.
(eivals, eivecs) = np.linalg.eigh(covar)
crit = chisq_crit2(confidence)
scale0 = math.sqrt(crit * eivals[0])
scale1 = math.sqrt(crit * eivals[1])
angle = planar.Vec2(eivecs[0][0], eivecs[0][1]).angle
# Transform the circle into an ellipse. NOTE: Originally, I set this up to
# build a full transformation matrix and then multiply the polygon by that
# composite matrix. However, the order of operations was really wierd --
# e.g., t * s * r works, even though the transformation order is s, r, t. I
# think this is due to some mismatch between the associativity of the
# multiplication operator and calls to the __mult__() special method.
# Anyway, this way is a lot easier to follow.
return (planar.Affine.translation(mean)
* (planar.Affine.rotation(angle)
* (planar.Affine.scale((scale0, scale1)) * cir))).as_geos
def ellipses_n(gmm, confidence):
'''Return a (perhaps noncontiguous) confidence region for the given mixture
model at the given confidence. To do this, we compute the union of the
confidence ellipses for each component. Thus, the confidence of a given
component region is *not* proportional to its area.'''
# FIXME: need to consider weight of each component!
assert False, 'untested'
ells = []
for i in range(gmm.n_components):
ells.append(ellipse_1(gmm.means_[i], gmm.covars_[i], confidence))
return geos.MultiPolygon(ells).cascaded_union
# If run as a script, do a simple test.
if (__name__ == '__main__'):
import matplotlib.pyplot as plt
import sklearn.mixture
data = [(0,0), (1,0), (1,1), (1,2), (2,2), (2,3)]
gmm = sklearn.mixture.GMM(1, 'full')
gmm.fit(data)
mean = gmm.means_[0]
covar = gmm.covars_[0]
fig = plt.figure(1)
plt.axis('equal')
plt.scatter([i[0] for i in data], [i[1] for i in data])
points95 = ellipse_1(mean, covar, 0.95).coords[0]
plt.scatter([i[0] for i in points95], [i[1] for i in points95], c='red')
points99 = ellipse_1(mean, covar, 0.99).coords[0]
plt.scatter([i[0] for i in points99], [i[1] for i in points99], c='green')
plt.show()
|
TelekomCloud/pony-express | refs/heads/master | ponyexpress/models/__init__.py | 1 | from ponyexpress.models.repository import Repository
from ponyexpress.models.repo_history import RepoHistory
from ponyexpress.models.package import Package
from ponyexpress.models.package_history import PackageHistory
from ponyexpress.models.node import Node
|
kawasaki2013/python-for-android-x86 | refs/heads/master | python-modules/twisted/twisted/test/test_persisted.py | 60 |
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
# System Imports
import sys
from twisted.trial import unittest
try:
import cPickle as pickle
except ImportError:
import pickle
try:
import cStringIO as StringIO
except ImportError:
import StringIO
# Twisted Imports
from twisted.persisted import styles, aot, crefutil
class VersionTestCase(unittest.TestCase):
def testNullVersionUpgrade(self):
global NullVersioned
class NullVersioned:
ok = 0
pkcl = pickle.dumps(NullVersioned())
class NullVersioned(styles.Versioned):
persistenceVersion = 1
def upgradeToVersion1(self):
self.ok = 1
mnv = pickle.loads(pkcl)
styles.doUpgrade()
assert mnv.ok, "initial upgrade not run!"
def testVersionUpgrade(self):
global MyVersioned
class MyVersioned(styles.Versioned):
persistenceVersion = 2
persistenceForgets = ['garbagedata']
v3 = 0
v4 = 0
def __init__(self):
self.somedata = 'xxx'
self.garbagedata = lambda q: 'cant persist'
def upgradeToVersion3(self):
self.v3 += 1
def upgradeToVersion4(self):
self.v4 += 1
mv = MyVersioned()
assert not (mv.v3 or mv.v4), "hasn't been upgraded yet"
pickl = pickle.dumps(mv)
MyVersioned.persistenceVersion = 4
obj = pickle.loads(pickl)
styles.doUpgrade()
assert obj.v3, "didn't do version 3 upgrade"
assert obj.v4, "didn't do version 4 upgrade"
pickl = pickle.dumps(obj)
obj = pickle.loads(pickl)
styles.doUpgrade()
assert obj.v3 == 1, "upgraded unnecessarily"
assert obj.v4 == 1, "upgraded unnecessarily"
def testNonIdentityHash(self):
global ClassWithCustomHash
class ClassWithCustomHash(styles.Versioned):
def __init__(self, unique, hash):
self.unique = unique
self.hash = hash
def __hash__(self):
return self.hash
v1 = ClassWithCustomHash('v1', 0)
v2 = ClassWithCustomHash('v2', 0)
pkl = pickle.dumps((v1, v2))
del v1, v2
ClassWithCustomHash.persistenceVersion = 1
ClassWithCustomHash.upgradeToVersion1 = lambda self: setattr(self, 'upgraded', True)
v1, v2 = pickle.loads(pkl)
styles.doUpgrade()
self.assertEquals(v1.unique, 'v1')
self.assertEquals(v2.unique, 'v2')
self.failUnless(v1.upgraded)
self.failUnless(v2.upgraded)
def testUpgradeDeserializesObjectsRequiringUpgrade(self):
global ToyClassA, ToyClassB
class ToyClassA(styles.Versioned):
pass
class ToyClassB(styles.Versioned):
pass
x = ToyClassA()
y = ToyClassB()
pklA, pklB = pickle.dumps(x), pickle.dumps(y)
del x, y
ToyClassA.persistenceVersion = 1
def upgradeToVersion1(self):
self.y = pickle.loads(pklB)
styles.doUpgrade()
ToyClassA.upgradeToVersion1 = upgradeToVersion1
ToyClassB.persistenceVersion = 1
ToyClassB.upgradeToVersion1 = lambda self: setattr(self, 'upgraded', True)
x = pickle.loads(pklA)
styles.doUpgrade()
self.failUnless(x.y.upgraded)
class MyEphemeral(styles.Ephemeral):
def __init__(self, x):
self.x = x
class EphemeralTestCase(unittest.TestCase):
def testEphemeral(self):
o = MyEphemeral(3)
self.assertEquals(o.__class__, MyEphemeral)
self.assertEquals(o.x, 3)
pickl = pickle.dumps(o)
o = pickle.loads(pickl)
self.assertEquals(o.__class__, styles.Ephemeral)
self.assert_(not hasattr(o, 'x'))
class Pickleable:
def __init__(self, x):
self.x = x
def getX(self):
return self.x
class A:
"""
dummy class
"""
def amethod(self):
pass
class B:
"""
dummy class
"""
def bmethod(self):
pass
def funktion():
pass
class PicklingTestCase(unittest.TestCase):
"""Test pickling of extra object types."""
def testModule(self):
pickl = pickle.dumps(styles)
o = pickle.loads(pickl)
self.assertEquals(o, styles)
def testClassMethod(self):
pickl = pickle.dumps(Pickleable.getX)
o = pickle.loads(pickl)
self.assertEquals(o, Pickleable.getX)
def testInstanceMethod(self):
obj = Pickleable(4)
pickl = pickle.dumps(obj.getX)
o = pickle.loads(pickl)
self.assertEquals(o(), 4)
self.assertEquals(type(o), type(obj.getX))
def testStringIO(self):
f = StringIO.StringIO()
f.write("abc")
pickl = pickle.dumps(f)
o = pickle.loads(pickl)
self.assertEquals(type(o), type(f))
self.assertEquals(f.getvalue(), "abc")
class EvilSourceror:
def __init__(self, x):
self.a = self
self.a.b = self
self.a.b.c = x
class NonDictState:
def __getstate__(self):
return self.state
def __setstate__(self, state):
self.state = state
class AOTTestCase(unittest.TestCase):
def testSimpleTypes(self):
obj = (1, 2.0, 3j, True, slice(1, 2, 3), 'hello', u'world', sys.maxint + 1, None, Ellipsis)
rtObj = aot.unjellyFromSource(aot.jellyToSource(obj))
self.assertEquals(obj, rtObj)
def testMethodSelfIdentity(self):
a = A()
b = B()
a.bmethod = b.bmethod
b.a = a
im_ = aot.unjellyFromSource(aot.jellyToSource(b)).a.bmethod
self.assertEquals(im_.im_class, im_.im_self.__class__)
def test_methodNotSelfIdentity(self):
"""
If a class change after an instance has been created,
L{aot.unjellyFromSource} shoud raise a C{TypeError} when trying to
unjelly the instance.
"""
a = A()
b = B()
a.bmethod = b.bmethod
b.a = a
savedbmethod = B.bmethod
del B.bmethod
try:
self.assertRaises(TypeError, aot.unjellyFromSource,
aot.jellyToSource(b))
finally:
B.bmethod = savedbmethod
def test_unsupportedType(self):
"""
L{aot.jellyToSource} should raise a C{TypeError} when trying to jelly
an unknown type.
"""
try:
set
except:
from sets import Set as set
self.assertRaises(TypeError, aot.jellyToSource, set())
def testBasicIdentity(self):
# Anyone wanting to make this datastructure more complex, and thus this
# test more comprehensive, is welcome to do so.
aj = aot.AOTJellier().jellyToAO
d = {'hello': 'world', "method": aj}
l = [1, 2, 3,
"he\tllo\n\n\"x world!",
u"goodbye \n\t\u1010 world!",
1, 1.0, 100 ** 100l, unittest, aot.AOTJellier, d,
funktion
]
t = tuple(l)
l.append(l)
l.append(t)
l.append(t)
uj = aot.unjellyFromSource(aot.jellyToSource([l, l]))
assert uj[0] is uj[1]
assert uj[1][0:5] == l[0:5]
def testNonDictState(self):
a = NonDictState()
a.state = "meringue!"
assert aot.unjellyFromSource(aot.jellyToSource(a)).state == a.state
def testCopyReg(self):
s = "foo_bar"
sio = StringIO.StringIO()
sio.write(s)
uj = aot.unjellyFromSource(aot.jellyToSource(sio))
# print repr(uj.__dict__)
assert uj.getvalue() == s
def testFunkyReferences(self):
o = EvilSourceror(EvilSourceror([]))
j1 = aot.jellyToAOT(o)
oj = aot.unjellyFromAOT(j1)
assert oj.a is oj
assert oj.a.b is oj.b
assert oj.c is not oj.c.c
class CrefUtilTestCase(unittest.TestCase):
"""
Tests for L{crefutil}.
"""
def test_dictUnknownKey(self):
"""
L{crefutil._DictKeyAndValue} only support keys C{0} and C{1}.
"""
d = crefutil._DictKeyAndValue({})
self.assertRaises(RuntimeError, d.__setitem__, 2, 3)
def test_deferSetMultipleTimes(self):
"""
L{crefutil._Defer} can be assigned a key only one time.
"""
d = crefutil._Defer()
d[0] = 1
self.assertRaises(RuntimeError, d.__setitem__, 0, 1)
testCases = [VersionTestCase, EphemeralTestCase, PicklingTestCase]
|
mollstam/UnrealPy | refs/heads/master | UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/Python-2.7.10/Lib/smtpd.py | 174 | #! /usr/bin/env python
"""An RFC 2821 smtp proxy.
Usage: %(program)s [options] [localhost:localport [remotehost:remoteport]]
Options:
--nosetuid
-n
This program generally tries to setuid `nobody', unless this flag is
set. The setuid call will fail if this program is not run as root (in
which case, use this flag).
--version
-V
Print the version number and exit.
--class classname
-c classname
Use `classname' as the concrete SMTP proxy class. Uses `PureProxy' by
default.
--debug
-d
Turn on debugging prints.
--help
-h
Print this message and exit.
Version: %(__version__)s
If localhost is not given then `localhost' is used, and if localport is not
given then 8025 is used. If remotehost is not given then `localhost' is used,
and if remoteport is not given, then 25 is used.
"""
# Overview:
#
# This file implements the minimal SMTP protocol as defined in RFC 821. It
# has a hierarchy of classes which implement the backend functionality for the
# smtpd. A number of classes are provided:
#
# SMTPServer - the base class for the backend. Raises NotImplementedError
# if you try to use it.
#
# DebuggingServer - simply prints each message it receives on stdout.
#
# PureProxy - Proxies all messages to a real smtpd which does final
# delivery. One known problem with this class is that it doesn't handle
# SMTP errors from the backend server at all. This should be fixed
# (contributions are welcome!).
#
# MailmanProxy - An experimental hack to work with GNU Mailman
# <www.list.org>. Using this server as your real incoming smtpd, your
# mailhost will automatically recognize and accept mail destined to Mailman
# lists when those lists are created. Every message not destined for a list
# gets forwarded to a real backend smtpd, as with PureProxy. Again, errors
# are not handled correctly yet.
#
# Please note that this script requires Python 2.0
#
# Author: Barry Warsaw <barry@python.org>
#
# TODO:
#
# - support mailbox delivery
# - alias files
# - ESMTP
# - handle error codes from the backend smtpd
import sys
import os
import errno
import getopt
import time
import socket
import asyncore
import asynchat
__all__ = ["SMTPServer","DebuggingServer","PureProxy","MailmanProxy"]
program = sys.argv[0]
__version__ = 'Python SMTP proxy version 0.2'
class Devnull:
def write(self, msg): pass
def flush(self): pass
DEBUGSTREAM = Devnull()
NEWLINE = '\n'
EMPTYSTRING = ''
COMMASPACE = ', '
def usage(code, msg=''):
print >> sys.stderr, __doc__ % globals()
if msg:
print >> sys.stderr, msg
sys.exit(code)
class SMTPChannel(asynchat.async_chat):
COMMAND = 0
DATA = 1
def __init__(self, server, conn, addr):
asynchat.async_chat.__init__(self, conn)
self.__server = server
self.__conn = conn
self.__addr = addr
self.__line = []
self.__state = self.COMMAND
self.__greeting = 0
self.__mailfrom = None
self.__rcpttos = []
self.__data = ''
self.__fqdn = socket.getfqdn()
try:
self.__peer = conn.getpeername()
except socket.error, err:
# a race condition may occur if the other end is closing
# before we can get the peername
self.close()
if err[0] != errno.ENOTCONN:
raise
return
print >> DEBUGSTREAM, 'Peer:', repr(self.__peer)
self.push('220 %s %s' % (self.__fqdn, __version__))
self.set_terminator('\r\n')
# Overrides base class for convenience
def push(self, msg):
asynchat.async_chat.push(self, msg + '\r\n')
# Implementation of base class abstract method
def collect_incoming_data(self, data):
self.__line.append(data)
# Implementation of base class abstract method
def found_terminator(self):
line = EMPTYSTRING.join(self.__line)
print >> DEBUGSTREAM, 'Data:', repr(line)
self.__line = []
if self.__state == self.COMMAND:
if not line:
self.push('500 Error: bad syntax')
return
method = None
i = line.find(' ')
if i < 0:
command = line.upper()
arg = None
else:
command = line[:i].upper()
arg = line[i+1:].strip()
method = getattr(self, 'smtp_' + command, None)
if not method:
self.push('502 Error: command "%s" not implemented' % command)
return
method(arg)
return
else:
if self.__state != self.DATA:
self.push('451 Internal confusion')
return
# Remove extraneous carriage returns and de-transparency according
# to RFC 821, Section 4.5.2.
data = []
for text in line.split('\r\n'):
if text and text[0] == '.':
data.append(text[1:])
else:
data.append(text)
self.__data = NEWLINE.join(data)
status = self.__server.process_message(self.__peer,
self.__mailfrom,
self.__rcpttos,
self.__data)
self.__rcpttos = []
self.__mailfrom = None
self.__state = self.COMMAND
self.set_terminator('\r\n')
if not status:
self.push('250 Ok')
else:
self.push(status)
# SMTP and ESMTP commands
def smtp_HELO(self, arg):
if not arg:
self.push('501 Syntax: HELO hostname')
return
if self.__greeting:
self.push('503 Duplicate HELO/EHLO')
else:
self.__greeting = arg
self.push('250 %s' % self.__fqdn)
def smtp_NOOP(self, arg):
if arg:
self.push('501 Syntax: NOOP')
else:
self.push('250 Ok')
def smtp_QUIT(self, arg):
# args is ignored
self.push('221 Bye')
self.close_when_done()
# factored
def __getaddr(self, keyword, arg):
address = None
keylen = len(keyword)
if arg[:keylen].upper() == keyword:
address = arg[keylen:].strip()
if not address:
pass
elif address[0] == '<' and address[-1] == '>' and address != '<>':
# Addresses can be in the form <person@dom.com> but watch out
# for null address, e.g. <>
address = address[1:-1]
return address
def smtp_MAIL(self, arg):
print >> DEBUGSTREAM, '===> MAIL', arg
address = self.__getaddr('FROM:', arg) if arg else None
if not address:
self.push('501 Syntax: MAIL FROM:<address>')
return
if self.__mailfrom:
self.push('503 Error: nested MAIL command')
return
self.__mailfrom = address
print >> DEBUGSTREAM, 'sender:', self.__mailfrom
self.push('250 Ok')
def smtp_RCPT(self, arg):
print >> DEBUGSTREAM, '===> RCPT', arg
if not self.__mailfrom:
self.push('503 Error: need MAIL command')
return
address = self.__getaddr('TO:', arg) if arg else None
if not address:
self.push('501 Syntax: RCPT TO: <address>')
return
self.__rcpttos.append(address)
print >> DEBUGSTREAM, 'recips:', self.__rcpttos
self.push('250 Ok')
def smtp_RSET(self, arg):
if arg:
self.push('501 Syntax: RSET')
return
# Resets the sender, recipients, and data, but not the greeting
self.__mailfrom = None
self.__rcpttos = []
self.__data = ''
self.__state = self.COMMAND
self.push('250 Ok')
def smtp_DATA(self, arg):
if not self.__rcpttos:
self.push('503 Error: need RCPT command')
return
if arg:
self.push('501 Syntax: DATA')
return
self.__state = self.DATA
self.set_terminator('\r\n.\r\n')
self.push('354 End data with <CR><LF>.<CR><LF>')
class SMTPServer(asyncore.dispatcher):
def __init__(self, localaddr, remoteaddr):
self._localaddr = localaddr
self._remoteaddr = remoteaddr
asyncore.dispatcher.__init__(self)
try:
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
# try to re-use a server port if possible
self.set_reuse_addr()
self.bind(localaddr)
self.listen(5)
except:
# cleanup asyncore.socket_map before raising
self.close()
raise
else:
print >> DEBUGSTREAM, \
'%s started at %s\n\tLocal addr: %s\n\tRemote addr:%s' % (
self.__class__.__name__, time.ctime(time.time()),
localaddr, remoteaddr)
def handle_accept(self):
pair = self.accept()
if pair is not None:
conn, addr = pair
print >> DEBUGSTREAM, 'Incoming connection from %s' % repr(addr)
channel = SMTPChannel(self, conn, addr)
# API for "doing something useful with the message"
def process_message(self, peer, mailfrom, rcpttos, data):
"""Override this abstract method to handle messages from the client.
peer is a tuple containing (ipaddr, port) of the client that made the
socket connection to our smtp port.
mailfrom is the raw address the client claims the message is coming
from.
rcpttos is a list of raw addresses the client wishes to deliver the
message to.
data is a string containing the entire full text of the message,
headers (if supplied) and all. It has been `de-transparencied'
according to RFC 821, Section 4.5.2. In other words, a line
containing a `.' followed by other text has had the leading dot
removed.
This function should return None, for a normal `250 Ok' response;
otherwise it returns the desired response string in RFC 821 format.
"""
raise NotImplementedError
class DebuggingServer(SMTPServer):
# Do something with the gathered message
def process_message(self, peer, mailfrom, rcpttos, data):
inheaders = 1
lines = data.split('\n')
print '---------- MESSAGE FOLLOWS ----------'
for line in lines:
# headers first
if inheaders and not line:
print 'X-Peer:', peer[0]
inheaders = 0
print line
print '------------ END MESSAGE ------------'
class PureProxy(SMTPServer):
def process_message(self, peer, mailfrom, rcpttos, data):
lines = data.split('\n')
# Look for the last header
i = 0
for line in lines:
if not line:
break
i += 1
lines.insert(i, 'X-Peer: %s' % peer[0])
data = NEWLINE.join(lines)
refused = self._deliver(mailfrom, rcpttos, data)
# TBD: what to do with refused addresses?
print >> DEBUGSTREAM, 'we got some refusals:', refused
def _deliver(self, mailfrom, rcpttos, data):
import smtplib
refused = {}
try:
s = smtplib.SMTP()
s.connect(self._remoteaddr[0], self._remoteaddr[1])
try:
refused = s.sendmail(mailfrom, rcpttos, data)
finally:
s.quit()
except smtplib.SMTPRecipientsRefused, e:
print >> DEBUGSTREAM, 'got SMTPRecipientsRefused'
refused = e.recipients
except (socket.error, smtplib.SMTPException), e:
print >> DEBUGSTREAM, 'got', e.__class__
# All recipients were refused. If the exception had an associated
# error code, use it. Otherwise,fake it with a non-triggering
# exception code.
errcode = getattr(e, 'smtp_code', -1)
errmsg = getattr(e, 'smtp_error', 'ignore')
for r in rcpttos:
refused[r] = (errcode, errmsg)
return refused
class MailmanProxy(PureProxy):
def process_message(self, peer, mailfrom, rcpttos, data):
from cStringIO import StringIO
from Mailman import Utils
from Mailman import Message
from Mailman import MailList
# If the message is to a Mailman mailing list, then we'll invoke the
# Mailman script directly, without going through the real smtpd.
# Otherwise we'll forward it to the local proxy for disposition.
listnames = []
for rcpt in rcpttos:
local = rcpt.lower().split('@')[0]
# We allow the following variations on the theme
# listname
# listname-admin
# listname-owner
# listname-request
# listname-join
# listname-leave
parts = local.split('-')
if len(parts) > 2:
continue
listname = parts[0]
if len(parts) == 2:
command = parts[1]
else:
command = ''
if not Utils.list_exists(listname) or command not in (
'', 'admin', 'owner', 'request', 'join', 'leave'):
continue
listnames.append((rcpt, listname, command))
# Remove all list recipients from rcpttos and forward what we're not
# going to take care of ourselves. Linear removal should be fine
# since we don't expect a large number of recipients.
for rcpt, listname, command in listnames:
rcpttos.remove(rcpt)
# If there's any non-list destined recipients left,
print >> DEBUGSTREAM, 'forwarding recips:', ' '.join(rcpttos)
if rcpttos:
refused = self._deliver(mailfrom, rcpttos, data)
# TBD: what to do with refused addresses?
print >> DEBUGSTREAM, 'we got refusals:', refused
# Now deliver directly to the list commands
mlists = {}
s = StringIO(data)
msg = Message.Message(s)
# These headers are required for the proper execution of Mailman. All
# MTAs in existence seem to add these if the original message doesn't
# have them.
if not msg.getheader('from'):
msg['From'] = mailfrom
if not msg.getheader('date'):
msg['Date'] = time.ctime(time.time())
for rcpt, listname, command in listnames:
print >> DEBUGSTREAM, 'sending message to', rcpt
mlist = mlists.get(listname)
if not mlist:
mlist = MailList.MailList(listname, lock=0)
mlists[listname] = mlist
# dispatch on the type of command
if command == '':
# post
msg.Enqueue(mlist, tolist=1)
elif command == 'admin':
msg.Enqueue(mlist, toadmin=1)
elif command == 'owner':
msg.Enqueue(mlist, toowner=1)
elif command == 'request':
msg.Enqueue(mlist, torequest=1)
elif command in ('join', 'leave'):
# TBD: this is a hack!
if command == 'join':
msg['Subject'] = 'subscribe'
else:
msg['Subject'] = 'unsubscribe'
msg.Enqueue(mlist, torequest=1)
class Options:
setuid = 1
classname = 'PureProxy'
def parseargs():
global DEBUGSTREAM
try:
opts, args = getopt.getopt(
sys.argv[1:], 'nVhc:d',
['class=', 'nosetuid', 'version', 'help', 'debug'])
except getopt.error, e:
usage(1, e)
options = Options()
for opt, arg in opts:
if opt in ('-h', '--help'):
usage(0)
elif opt in ('-V', '--version'):
print >> sys.stderr, __version__
sys.exit(0)
elif opt in ('-n', '--nosetuid'):
options.setuid = 0
elif opt in ('-c', '--class'):
options.classname = arg
elif opt in ('-d', '--debug'):
DEBUGSTREAM = sys.stderr
# parse the rest of the arguments
if len(args) < 1:
localspec = 'localhost:8025'
remotespec = 'localhost:25'
elif len(args) < 2:
localspec = args[0]
remotespec = 'localhost:25'
elif len(args) < 3:
localspec = args[0]
remotespec = args[1]
else:
usage(1, 'Invalid arguments: %s' % COMMASPACE.join(args))
# split into host/port pairs
i = localspec.find(':')
if i < 0:
usage(1, 'Bad local spec: %s' % localspec)
options.localhost = localspec[:i]
try:
options.localport = int(localspec[i+1:])
except ValueError:
usage(1, 'Bad local port: %s' % localspec)
i = remotespec.find(':')
if i < 0:
usage(1, 'Bad remote spec: %s' % remotespec)
options.remotehost = remotespec[:i]
try:
options.remoteport = int(remotespec[i+1:])
except ValueError:
usage(1, 'Bad remote port: %s' % remotespec)
return options
if __name__ == '__main__':
options = parseargs()
# Become nobody
classname = options.classname
if "." in classname:
lastdot = classname.rfind(".")
mod = __import__(classname[:lastdot], globals(), locals(), [""])
classname = classname[lastdot+1:]
else:
import __main__ as mod
class_ = getattr(mod, classname)
proxy = class_((options.localhost, options.localport),
(options.remotehost, options.remoteport))
if options.setuid:
try:
import pwd
except ImportError:
print >> sys.stderr, \
'Cannot import module "pwd"; try running with -n option.'
sys.exit(1)
nobody = pwd.getpwnam('nobody')[2]
try:
os.setuid(nobody)
except OSError, e:
if e.errno != errno.EPERM: raise
print >> sys.stderr, \
'Cannot setuid "nobody"; try running with -n option.'
sys.exit(1)
try:
asyncore.loop()
except KeyboardInterrupt:
pass
|
TangHao1987/intellij-community | refs/heads/master | python/lib/Lib/site-packages/django/contrib/localflavor/us/forms.py | 229 | """
USA-specific Form helpers
"""
from django.core.validators import EMPTY_VALUES
from django.forms import ValidationError
from django.forms.fields import Field, RegexField, Select, CharField
from django.utils.encoding import smart_unicode
from django.utils.translation import ugettext_lazy as _
import re
phone_digits_re = re.compile(r'^(?:1-?)?(\d{3})[-\.]?(\d{3})[-\.]?(\d{4})$')
ssn_re = re.compile(r"^(?P<area>\d{3})[-\ ]?(?P<group>\d{2})[-\ ]?(?P<serial>\d{4})$")
class USZipCodeField(RegexField):
default_error_messages = {
'invalid': _('Enter a zip code in the format XXXXX or XXXXX-XXXX.'),
}
def __init__(self, *args, **kwargs):
super(USZipCodeField, self).__init__(r'^\d{5}(?:-\d{4})?$',
max_length=None, min_length=None, *args, **kwargs)
class USPhoneNumberField(CharField):
default_error_messages = {
'invalid': _('Phone numbers must be in XXX-XXX-XXXX format.'),
}
def clean(self, value):
super(USPhoneNumberField, self).clean(value)
if value in EMPTY_VALUES:
return u''
value = re.sub('(\(|\)|\s+)', '', smart_unicode(value))
m = phone_digits_re.search(value)
if m:
return u'%s-%s-%s' % (m.group(1), m.group(2), m.group(3))
raise ValidationError(self.error_messages['invalid'])
class USSocialSecurityNumberField(Field):
"""
A United States Social Security number.
Checks the following rules to determine whether the number is valid:
* Conforms to the XXX-XX-XXXX format.
* No group consists entirely of zeroes.
* The leading group is not "666" (block "666" will never be allocated).
* The number is not in the promotional block 987-65-4320 through
987-65-4329, which are permanently invalid.
* The number is not one known to be invalid due to otherwise widespread
promotional use or distribution (e.g., the Woolworth's number or the
1962 promotional number).
"""
default_error_messages = {
'invalid': _('Enter a valid U.S. Social Security number in XXX-XX-XXXX format.'),
}
def clean(self, value):
super(USSocialSecurityNumberField, self).clean(value)
if value in EMPTY_VALUES:
return u''
match = re.match(ssn_re, value)
if not match:
raise ValidationError(self.error_messages['invalid'])
area, group, serial = match.groupdict()['area'], match.groupdict()['group'], match.groupdict()['serial']
# First pass: no blocks of all zeroes.
if area == '000' or \
group == '00' or \
serial == '0000':
raise ValidationError(self.error_messages['invalid'])
# Second pass: promotional and otherwise permanently invalid numbers.
if area == '666' or \
(area == '987' and group == '65' and 4320 <= int(serial) <= 4329) or \
value == '078-05-1120' or \
value == '219-09-9999':
raise ValidationError(self.error_messages['invalid'])
return u'%s-%s-%s' % (area, group, serial)
class USStateField(Field):
"""
A form field that validates its input is a U.S. state name or abbreviation.
It normalizes the input to the standard two-leter postal service
abbreviation for the given state.
"""
default_error_messages = {
'invalid': _('Enter a U.S. state or territory.'),
}
def clean(self, value):
from us_states import STATES_NORMALIZED
super(USStateField, self).clean(value)
if value in EMPTY_VALUES:
return u''
try:
value = value.strip().lower()
except AttributeError:
pass
else:
try:
return STATES_NORMALIZED[value.strip().lower()].decode('ascii')
except KeyError:
pass
raise ValidationError(self.error_messages['invalid'])
class USStateSelect(Select):
"""
A Select widget that uses a list of U.S. states/territories as its choices.
"""
def __init__(self, attrs=None):
from us_states import STATE_CHOICES
super(USStateSelect, self).__init__(attrs, choices=STATE_CHOICES)
class USPSSelect(Select):
"""
A Select widget that uses a list of US Postal Service codes as its
choices.
"""
def __init__(self, attrs=None):
from us_states import USPS_CHOICES
super(USPSSelect, self).__init__(attrs, choices=USPS_CHOICES)
|
jamestwebber/scipy | refs/heads/master | scipy/optimize/_hessian_update_strategy.py | 2 | """Hessian update strategies for quasi-Newton optimization methods."""
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.linalg import norm
from scipy.linalg import get_blas_funcs
from warnings import warn
__all__ = ['HessianUpdateStrategy', 'BFGS', 'SR1']
class HessianUpdateStrategy(object):
"""Interface for implementing Hessian update strategies.
Many optimization methods make use of Hessian (or inverse Hessian)
approximations, such as the quasi-Newton methods BFGS, SR1, L-BFGS.
Some of these approximations, however, do not actually need to store
the entire matrix or can compute the internal matrix product with a
given vector in a very efficiently manner. This class serves as an
abstract interface between the optimization algorithm and the
quasi-Newton update strategies, giving freedom of implementation
to store and update the internal matrix as efficiently as possible.
Different choices of initialization and update procedure will result
in different quasi-Newton strategies.
Four methods should be implemented in derived classes: ``initialize``,
``update``, ``dot`` and ``get_matrix``.
Notes
-----
Any instance of a class that implements this interface,
can be accepted by the method ``minimize`` and used by
the compatible solvers to approximate the Hessian (or
inverse Hessian) used by the optimization algorithms.
"""
def initialize(self, n, approx_type):
"""Initialize internal matrix.
Allocate internal memory for storing and updating
the Hessian or its inverse.
Parameters
----------
n : int
Problem dimension.
approx_type : {'hess', 'inv_hess'}
Selects either the Hessian or the inverse Hessian.
When set to 'hess' the Hessian will be stored and updated.
When set to 'inv_hess' its inverse will be used instead.
"""
raise NotImplementedError("The method ``initialize(n, approx_type)``"
" is not implemented.")
def update(self, delta_x, delta_grad):
"""Update internal matrix.
Update Hessian matrix or its inverse (depending on how 'approx_type'
is defined) using information about the last evaluated points.
Parameters
----------
delta_x : ndarray
The difference between two points the gradient
function have been evaluated at: ``delta_x = x2 - x1``.
delta_grad : ndarray
The difference between the gradients:
``delta_grad = grad(x2) - grad(x1)``.
"""
raise NotImplementedError("The method ``update(delta_x, delta_grad)``"
" is not implemented.")
def dot(self, p):
"""Compute the product of the internal matrix with the given vector.
Parameters
----------
p : array_like
1-D array representing a vector.
Returns
-------
Hp : array
1-D represents the result of multiplying the approximation matrix
by vector p.
"""
raise NotImplementedError("The method ``dot(p)``"
" is not implemented.")
def get_matrix(self):
"""Return current internal matrix.
Returns
-------
H : ndarray, shape (n, n)
Dense matrix containing either the Hessian
or its inverse (depending on how 'approx_type'
is defined).
"""
raise NotImplementedError("The method ``get_matrix(p)``"
" is not implemented.")
class FullHessianUpdateStrategy(HessianUpdateStrategy):
"""Hessian update strategy with full dimensional internal representation.
"""
_syr = get_blas_funcs('syr', dtype='d') # Symmetric rank 1 update
_syr2 = get_blas_funcs('syr2', dtype='d') # Symmetric rank 2 update
# Symmetric matrix-vector product
_symv = get_blas_funcs('symv', dtype='d')
def __init__(self, init_scale='auto'):
self.init_scale = init_scale
# Until initialize is called we can't really use the class,
# so it makes sense to set everything to None.
self.first_iteration = None
self.approx_type = None
self.B = None
self.H = None
def initialize(self, n, approx_type):
"""Initialize internal matrix.
Allocate internal memory for storing and updating
the Hessian or its inverse.
Parameters
----------
n : int
Problem dimension.
approx_type : {'hess', 'inv_hess'}
Selects either the Hessian or the inverse Hessian.
When set to 'hess' the Hessian will be stored and updated.
When set to 'inv_hess' its inverse will be used instead.
"""
self.first_iteration = True
self.n = n
self.approx_type = approx_type
if approx_type not in ('hess', 'inv_hess'):
raise ValueError("`approx_type` must be 'hess' or 'inv_hess'.")
# Create matrix
if self.approx_type == 'hess':
self.B = np.eye(n, dtype=float)
else:
self.H = np.eye(n, dtype=float)
def _auto_scale(self, delta_x, delta_grad):
# Heuristic to scale matrix at first iteration.
# Described in Nocedal and Wright "Numerical Optimization"
# p.143 formula (6.20).
s_norm2 = np.dot(delta_x, delta_x)
y_norm2 = np.dot(delta_grad, delta_grad)
ys = np.abs(np.dot(delta_grad, delta_x))
if ys == 0.0 or y_norm2 == 0 or s_norm2 == 0:
return 1
if self.approx_type == 'hess':
return y_norm2 / ys
else:
return ys / y_norm2
def _update_implementation(self, delta_x, delta_grad):
raise NotImplementedError("The method ``_update_implementation``"
" is not implemented.")
def update(self, delta_x, delta_grad):
"""Update internal matrix.
Update Hessian matrix or its inverse (depending on how 'approx_type'
is defined) using information about the last evaluated points.
Parameters
----------
delta_x : ndarray
The difference between two points the gradient
function have been evaluated at: ``delta_x = x2 - x1``.
delta_grad : ndarray
The difference between the gradients:
``delta_grad = grad(x2) - grad(x1)``.
"""
if np.all(delta_x == 0.0):
return
if np.all(delta_grad == 0.0):
warn('delta_grad == 0.0. Check if the approximated '
'function is linear. If the function is linear '
'better results can be obtained by defining the '
'Hessian as zero instead of using quasi-Newton '
'approximations.', UserWarning)
return
if self.first_iteration:
# Get user specific scale
if self.init_scale == "auto":
scale = self._auto_scale(delta_x, delta_grad)
else:
scale = float(self.init_scale)
# Scale initial matrix with ``scale * np.eye(n)``
if self.approx_type == 'hess':
self.B *= scale
else:
self.H *= scale
self.first_iteration = False
self._update_implementation(delta_x, delta_grad)
def dot(self, p):
"""Compute the product of the internal matrix with the given vector.
Parameters
----------
p : array_like
1-D array representing a vector.
Returns
-------
Hp : array
1-D represents the result of multiplying the approximation matrix
by vector p.
"""
if self.approx_type == 'hess':
return self._symv(1, self.B, p)
else:
return self._symv(1, self.H, p)
def get_matrix(self):
"""Return the current internal matrix.
Returns
-------
M : ndarray, shape (n, n)
Dense matrix containing either the Hessian or its inverse
(depending on how `approx_type` was defined).
"""
if self.approx_type == 'hess':
M = np.copy(self.B)
else:
M = np.copy(self.H)
li = np.tril_indices_from(M, k=-1)
M[li] = M.T[li]
return M
class BFGS(FullHessianUpdateStrategy):
"""Broyden-Fletcher-Goldfarb-Shanno (BFGS) Hessian update strategy.
Parameters
----------
exception_strategy : {'skip_update', 'damp_update'}, optional
Define how to proceed when the curvature condition is violated.
Set it to 'skip_update' to just skip the update. Or, alternatively,
set it to 'damp_update' to interpolate between the actual BFGS
result and the unmodified matrix. Both exceptions strategies
are explained in [1]_, p.536-537.
min_curvature : float
This number, scaled by a normalization factor, defines the
minimum curvature ``dot(delta_grad, delta_x)`` allowed to go
unaffected by the exception strategy. By default is equal to
1e-8 when ``exception_strategy = 'skip_update'`` and equal
to 0.2 when ``exception_strategy = 'damp_update'``.
init_scale : {float, 'auto'}
Matrix scale at first iteration. At the first
iteration the Hessian matrix or its inverse will be initialized
with ``init_scale*np.eye(n)``, where ``n`` is the problem dimension.
Set it to 'auto' in order to use an automatic heuristic for choosing
the initial scale. The heuristic is described in [1]_, p.143.
By default uses 'auto'.
Notes
-----
The update is based on the description in [1]_, p.140.
References
----------
.. [1] Nocedal, Jorge, and Stephen J. Wright. "Numerical optimization"
Second Edition (2006).
"""
def __init__(self, exception_strategy='skip_update', min_curvature=None,
init_scale='auto'):
if exception_strategy == 'skip_update':
if min_curvature is not None:
self.min_curvature = min_curvature
else:
self.min_curvature = 1e-8
elif exception_strategy == 'damp_update':
if min_curvature is not None:
self.min_curvature = min_curvature
else:
self.min_curvature = 0.2
else:
raise ValueError("`exception_strategy` must be 'skip_update' "
"or 'damp_update'.")
super(BFGS, self).__init__(init_scale)
self.exception_strategy = exception_strategy
def _update_inverse_hessian(self, ys, Hy, yHy, s):
"""Update the inverse Hessian matrix.
BFGS update using the formula:
``H <- H + ((H*y).T*y + s.T*y)/(s.T*y)^2 * (s*s.T)
- 1/(s.T*y) * ((H*y)*s.T + s*(H*y).T)``
where ``s = delta_x`` and ``y = delta_grad``. This formula is
equivalent to (6.17) in [1]_ written in a more efficient way
for implementation.
References
----------
.. [1] Nocedal, Jorge, and Stephen J. Wright. "Numerical optimization"
Second Edition (2006).
"""
self.H = self._syr2(-1.0 / ys, s, Hy, a=self.H)
self.H = self._syr((ys+yHy)/ys**2, s, a=self.H)
def _update_hessian(self, ys, Bs, sBs, y):
"""Update the Hessian matrix.
BFGS update using the formula:
``B <- B - (B*s)*(B*s).T/s.T*(B*s) + y*y^T/s.T*y``
where ``s`` is short for ``delta_x`` and ``y`` is short
for ``delta_grad``. Formula (6.19) in [1]_.
References
----------
.. [1] Nocedal, Jorge, and Stephen J. Wright. "Numerical optimization"
Second Edition (2006).
"""
self.B = self._syr(1.0 / ys, y, a=self.B)
self.B = self._syr(-1.0 / sBs, Bs, a=self.B)
def _update_implementation(self, delta_x, delta_grad):
# Auxiliary variables w and z
if self.approx_type == 'hess':
w = delta_x
z = delta_grad
else:
w = delta_grad
z = delta_x
# Do some common operations
wz = np.dot(w, z)
Mw = self.dot(w)
wMw = Mw.dot(w)
# Guarantee that wMw > 0 by reinitializing matrix.
# While this is always true in exact arithmetics,
# indefinite matrix may appear due to roundoff errors.
if wMw <= 0.0:
scale = self._auto_scale(delta_x, delta_grad)
# Reinitialize matrix
if self.approx_type == 'hess':
self.B = scale * np.eye(self.n, dtype=float)
else:
self.H = scale * np.eye(self.n, dtype=float)
# Do common operations for new matrix
Mw = self.dot(w)
wMw = Mw.dot(w)
# Check if curvature condition is violated
if wz <= self.min_curvature * wMw:
# If the option 'skip_update' is set
# we just skip the update when the condion
# is violated.
if self.exception_strategy == 'skip_update':
return
# If the option 'damp_update' is set we
# interpolate between the actual BFGS
# result and the unmodified matrix.
elif self.exception_strategy == 'damp_update':
update_factor = (1-self.min_curvature) / (1 - wz/wMw)
z = update_factor*z + (1-update_factor)*Mw
wz = np.dot(w, z)
# Update matrix
if self.approx_type == 'hess':
self._update_hessian(wz, Mw, wMw, z)
else:
self._update_inverse_hessian(wz, Mw, wMw, z)
class SR1(FullHessianUpdateStrategy):
"""Symmetric-rank-1 Hessian update strategy.
Parameters
----------
min_denominator : float
This number, scaled by a normalization factor,
defines the minimum denominator magnitude allowed
in the update. When the condition is violated we skip
the update. By default uses ``1e-8``.
init_scale : {float, 'auto'}, optional
Matrix scale at first iteration. At the first
iteration the Hessian matrix or its inverse will be initialized
with ``init_scale*np.eye(n)``, where ``n`` is the problem dimension.
Set it to 'auto' in order to use an automatic heuristic for choosing
the initial scale. The heuristic is described in [1]_, p.143.
By default uses 'auto'.
Notes
-----
The update is based on the description in [1]_, p.144-146.
References
----------
.. [1] Nocedal, Jorge, and Stephen J. Wright. "Numerical optimization"
Second Edition (2006).
"""
def __init__(self, min_denominator=1e-8, init_scale='auto'):
self.min_denominator = min_denominator
super(SR1, self).__init__(init_scale)
def _update_implementation(self, delta_x, delta_grad):
# Auxiliary variables w and z
if self.approx_type == 'hess':
w = delta_x
z = delta_grad
else:
w = delta_grad
z = delta_x
# Do some common operations
Mw = self.dot(w)
z_minus_Mw = z - Mw
denominator = np.dot(w, z_minus_Mw)
# If the denominator is too small
# we just skip the update.
if np.abs(denominator) <= self.min_denominator*norm(w)*norm(z_minus_Mw):
return
# Update matrix
if self.approx_type == 'hess':
self.B = self._syr(1/denominator, z_minus_Mw, a=self.B)
else:
self.H = self._syr(1/denominator, z_minus_Mw, a=self.H)
|
lisael/pg-django | refs/heads/master | tests/modeltests/str/tests.py | 34 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import datetime
from django.test import TestCase
from .models import Article, InternationalArticle
class SimpleTests(TestCase):
def test_basic(self):
a = Article.objects.create(
headline='Area man programs in Python',
pub_date=datetime.datetime(2005, 7, 28)
)
self.assertEqual(str(a), 'Area man programs in Python')
self.assertEqual(repr(a), '<Article: Area man programs in Python>')
def test_international(self):
a = InternationalArticle.objects.create(
headline=u'Girl wins €12.500 in lottery',
pub_date=datetime.datetime(2005, 7, 28)
)
# The default str() output will be the UTF-8 encoded output of __unicode__().
self.assertEqual(str(a), 'Girl wins \xe2\x82\xac12.500 in lottery') |
vladimir-smirnov-sociomantic/graphite-api | refs/heads/master | graphite_api/intervals.py | 14 | INFINITY = float('inf')
NEGATIVE_INFINITY = -INFINITY
class IntervalSet(object):
__slots__ = ('intervals', 'size')
def __init__(self, intervals, disjoint=False):
self.intervals = intervals
if not disjoint:
self.intervals = union_overlapping(self.intervals)
self.size = sum(i.size for i in self.intervals)
def __repr__(self):
return repr(self.intervals)
def __eq__(self, other):
return self.intervals == other.intervals
def __iter__(self):
return iter(self.intervals)
def __bool__(self):
return self.size != 0
__nonzero__ = __bool__ # python 2
def __sub__(self, other):
return self.intersect(other.complement())
def complement(self):
complementary = []
cursor = NEGATIVE_INFINITY
for interval in self.intervals:
if cursor < interval.start:
complementary.append(Interval(cursor, interval.start))
cursor = interval.end
if cursor < INFINITY:
complementary.append(Interval(cursor, INFINITY))
return IntervalSet(complementary, disjoint=True)
def intersect(self, other):
# XXX The last major bottleneck. Factorial-time hell.
# Then again, this function is entirely unused...
if not self or not other:
return IntervalSet([])
intersections = [x for x in (i.intersect(j)
for i in self.intervals
for j in other.intervals)
if x]
return IntervalSet(intersections, disjoint=True)
def intersect_interval(self, interval):
intersections = [x for x in (i.intersect(interval)
for i in self.intervals)
if x]
return IntervalSet(intersections, disjoint=True)
def union(self, other):
return IntervalSet(sorted(self.intervals + other.intervals))
class Interval(object):
__slots__ = ('start', 'end', 'tuple', 'size')
def __init__(self, start, end):
if end - start < 0:
raise ValueError("Invalid interval start=%s end=%s" % (start, end))
self.start = start
self.end = end
self.tuple = (start, end)
self.size = self.end - self.start
def __eq__(self, other):
return self.tuple == other.tuple
def __hash__(self):
return hash(self.tuple)
def __lt__(self, other):
return (self.start < other.start) - (self.start > other.start)
def __len__(self):
raise TypeError("len() doesn't support infinite values, use the "
"'size' attribute instead")
def __bool__(self):
return self.size != 0
__nonzero__ = __bool__ # python 2
def __repr__(self):
return '<Interval: %s>' % str(self.tuple)
def intersect(self, other):
start = max(self.start, other.start)
end = min(self.end, other.end)
if end > start:
return Interval(start, end)
def overlaps(self, other):
earlier = self if self.start <= other.start else other
later = self if earlier is other else other
return earlier.end >= later.start
def union(self, other):
if not self.overlaps(other):
raise TypeError("Union of disjoint intervals is not an interval")
start = min(self.start, other.start)
end = max(self.end, other.end)
return Interval(start, end)
def union_overlapping(intervals):
"""Union any overlapping intervals in the given set."""
disjoint_intervals = []
for interval in intervals:
if disjoint_intervals and disjoint_intervals[-1].overlaps(interval):
disjoint_intervals[-1] = disjoint_intervals[-1].union(interval)
else:
disjoint_intervals.append(interval)
return disjoint_intervals
|
xombiemp/CouchPotatoServer | refs/heads/master | libs/pyutil/test/current/json_tests/test_default.py | 106 | from unittest import TestCase
from pyutil import jsonutil as json
class TestDefault(TestCase):
def test_default(self):
self.assertEquals(
json.dumps(type, default=repr),
json.dumps(repr(type)))
|
Bysmyyr/chromium-crosswalk | refs/heads/master | tools/usb_gadget/echo_gadget.py | 43 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""USB echo gadget module.
This gadget has pairs of IN/OUT endpoints that echo packets back to the host.
"""
import uuid
import composite_gadget
import usb_constants
import usb_descriptors
class EchoCompositeFeature(composite_gadget.CompositeFeature):
"""Composite device feature that echos data back to the host.
"""
def __init__(self, endpoints):
"""Create an echo gadget.
"""
fs_interfaces = []
hs_interfaces = []
if len(endpoints) >= 1:
iface_num, iface_string, in_endpoint, out_endpoint = endpoints[0]
fs_intr_interface_desc = usb_descriptors.InterfaceDescriptor(
bInterfaceNumber=iface_num,
bInterfaceClass=usb_constants.DeviceClass.VENDOR,
bInterfaceSubClass=0,
bInterfaceProtocol=0,
iInterface=iface_string,
)
fs_intr_interface_desc.AddEndpoint(usb_descriptors.EndpointDescriptor(
bEndpointAddress=out_endpoint,
bmAttributes=usb_constants.TransferType.INTERRUPT,
wMaxPacketSize=64,
bInterval=1 # 1ms
))
fs_intr_interface_desc.AddEndpoint(usb_descriptors.EndpointDescriptor(
bEndpointAddress=in_endpoint,
bmAttributes=usb_constants.TransferType.INTERRUPT,
wMaxPacketSize=64,
bInterval=1 # 1ms
))
fs_interfaces.append(fs_intr_interface_desc)
hs_intr_interface_desc = usb_descriptors.InterfaceDescriptor(
bInterfaceNumber=iface_num,
bInterfaceClass=usb_constants.DeviceClass.VENDOR,
bInterfaceSubClass=0,
bInterfaceProtocol=0,
iInterface=iface_string
)
hs_intr_interface_desc.AddEndpoint(usb_descriptors.EndpointDescriptor(
bEndpointAddress=out_endpoint,
bmAttributes=usb_constants.TransferType.INTERRUPT,
wMaxPacketSize=64,
bInterval=4 # 1ms
))
hs_intr_interface_desc.AddEndpoint(usb_descriptors.EndpointDescriptor(
bEndpointAddress=in_endpoint,
bmAttributes=usb_constants.TransferType.INTERRUPT,
wMaxPacketSize=64,
bInterval=4 # 1ms
))
hs_interfaces.append(hs_intr_interface_desc)
if len(endpoints) >= 2:
iface_num, iface_string, in_endpoint, out_endpoint = endpoints[1]
fs_bulk_interface_desc = usb_descriptors.InterfaceDescriptor(
bInterfaceNumber=iface_num,
bInterfaceClass=usb_constants.DeviceClass.VENDOR,
bInterfaceSubClass=0,
bInterfaceProtocol=0,
iInterface=iface_string
)
fs_bulk_interface_desc.AddEndpoint(usb_descriptors.EndpointDescriptor(
bEndpointAddress=out_endpoint,
bmAttributes=usb_constants.TransferType.BULK,
wMaxPacketSize=64,
bInterval=0
))
fs_bulk_interface_desc.AddEndpoint(usb_descriptors.EndpointDescriptor(
bEndpointAddress=in_endpoint,
bmAttributes=usb_constants.TransferType.BULK,
wMaxPacketSize=64,
bInterval=0
))
fs_interfaces.append(fs_bulk_interface_desc)
hs_bulk_interface_desc = usb_descriptors.InterfaceDescriptor(
bInterfaceNumber=iface_num,
bInterfaceClass=usb_constants.DeviceClass.VENDOR,
bInterfaceSubClass=0,
bInterfaceProtocol=0,
iInterface=iface_string
)
hs_bulk_interface_desc.AddEndpoint(usb_descriptors.EndpointDescriptor(
bEndpointAddress=out_endpoint,
bmAttributes=usb_constants.TransferType.BULK,
wMaxPacketSize=512,
bInterval=0
))
hs_bulk_interface_desc.AddEndpoint(usb_descriptors.EndpointDescriptor(
bEndpointAddress=in_endpoint,
bmAttributes=usb_constants.TransferType.BULK,
wMaxPacketSize=512,
bInterval=0
))
hs_interfaces.append(hs_bulk_interface_desc)
if len(endpoints) >= 3:
iface_num, iface_string, in_endpoint, out_endpoint = endpoints[2]
fs_interfaces.append(usb_descriptors.InterfaceDescriptor(
bInterfaceNumber=iface_num,
bInterfaceClass=usb_constants.DeviceClass.VENDOR,
bInterfaceSubClass=0,
bInterfaceProtocol=0,
iInterface=iface_string
))
fs_isoc_interface_desc = usb_descriptors.InterfaceDescriptor(
bInterfaceNumber=iface_num,
bAlternateSetting=1,
bInterfaceClass=usb_constants.DeviceClass.VENDOR,
bInterfaceSubClass=0,
bInterfaceProtocol=0,
iInterface=iface_string
)
fs_isoc_interface_desc.AddEndpoint(usb_descriptors.EndpointDescriptor(
bEndpointAddress=out_endpoint,
bmAttributes=usb_constants.TransferType.ISOCHRONOUS,
wMaxPacketSize=1023,
bInterval=1 # 1ms
))
fs_isoc_interface_desc.AddEndpoint(usb_descriptors.EndpointDescriptor(
bEndpointAddress=in_endpoint,
bmAttributes=usb_constants.TransferType.ISOCHRONOUS,
wMaxPacketSize=1023,
bInterval=1 # 1ms
))
fs_interfaces.append(fs_isoc_interface_desc)
hs_interfaces.append(usb_descriptors.InterfaceDescriptor(
bInterfaceNumber=iface_num,
bInterfaceClass=usb_constants.DeviceClass.VENDOR,
bInterfaceSubClass=0,
bInterfaceProtocol=0,
iInterface=iface_string
))
hs_isoc_interface_desc = usb_descriptors.InterfaceDescriptor(
bInterfaceNumber=iface_num,
bAlternateSetting=1,
bInterfaceClass=usb_constants.DeviceClass.VENDOR,
bInterfaceSubClass=0,
bInterfaceProtocol=0,
iInterface=iface_string
)
hs_isoc_interface_desc.AddEndpoint(usb_descriptors.EndpointDescriptor(
bEndpointAddress=out_endpoint,
bmAttributes=usb_constants.TransferType.ISOCHRONOUS,
wMaxPacketSize=512,
bInterval=4 # 1ms
))
hs_isoc_interface_desc.AddEndpoint(usb_descriptors.EndpointDescriptor(
bEndpointAddress=in_endpoint,
bmAttributes=usb_constants.TransferType.ISOCHRONOUS,
wMaxPacketSize=512,
bInterval=4 # 1ms
))
hs_interfaces.append(hs_isoc_interface_desc)
super(EchoCompositeFeature, self).__init__(fs_interfaces, hs_interfaces)
def ReceivePacket(self, endpoint, data):
"""Echo a packet back to the host.
Args:
endpoint: Incoming endpoint (must be an OUT pipe).
data: Packet data.
"""
assert endpoint & usb_constants.Dir.IN == 0
self.SendPacket(endpoint | usb_constants.Dir.IN, data)
class EchoGadget(composite_gadget.CompositeGadget):
"""Echo gadget.
"""
def __init__(self):
"""Create an echo gadget.
"""
device_desc = usb_descriptors.DeviceDescriptor(
idVendor=usb_constants.VendorID.GOOGLE,
idProduct=usb_constants.ProductID.GOOGLE_ECHO_GADGET,
bcdUSB=0x0200,
iManufacturer=1,
iProduct=2,
iSerialNumber=3,
bcdDevice=0x0100)
feature = EchoCompositeFeature(
endpoints=[(0, 4, 0x81, 0x01), (1, 5, 0x82, 0x02), (2, 6, 0x83, 0x03)])
super(EchoGadget, self).__init__(device_desc, [feature])
self.AddStringDescriptor(1, 'Google Inc.')
self.AddStringDescriptor(2, 'Echo Gadget')
self.AddStringDescriptor(3, '{:06X}'.format(uuid.getnode()))
self.AddStringDescriptor(4, 'Interrupt Echo')
self.AddStringDescriptor(5, 'Bulk Echo')
self.AddStringDescriptor(6, 'Isochronous Echo')
# Enable Microsoft OS Descriptors for Windows 8 and above.
self.EnableMicrosoftOSDescriptorsV1(vendor_code=0x01)
# These are used to force Windows to load WINUSB.SYS for the echo functions.
self.SetMicrosoftCompatId(0, 'WINUSB')
self.SetMicrosoftCompatId(1, 'WINUSB')
self.SetMicrosoftCompatId(2, 'WINUSB')
self.AddDeviceCapabilityDescriptor(usb_descriptors.ContainerIdDescriptor(
ContainerID=uuid.uuid4().bytes_le))
def RegisterHandlers():
"""Registers web request handlers with the application server."""
import server
from tornado import web
class WebConfigureHandler(web.RequestHandler):
def post(self):
server.SwitchGadget(EchoGadget())
server.app.add_handlers('.*$', [
(r'/echo/configure', WebConfigureHandler),
])
|
mrquim/repository.mrquim | refs/heads/master | repo/script.module.schism.common/lib/js2py/translators/translating_nodes.py | 31 | from __future__ import unicode_literals
from pyjsparserdata import *
from friendly_nodes import *
import random
# number of characters above which expression will be split to multiple lines in order to avoid python parser stack overflow
# still experimental so I suggest to set it to 400 in order to avoid common errors
# set it to smaller value only if you have problems with parser stack overflow
LINE_LEN_LIMIT = 400 # 200 # or any other value - the larger the smaller probability of errors :)
class ForController:
def __init__(self):
self.inside = [False]
self.update = ''
def enter_for(self, update):
self.inside.append(True)
self.update = update
def leave_for(self):
self.inside.pop()
def enter_other(self):
self.inside.append(False)
def leave_other(self):
self.inside.pop()
def is_inside(self):
return self.inside[-1]
class InlineStack:
NAME = 'PyJs_%s_%d_'
def __init__(self):
self.reps = {}
self.names = []
def inject_inlines(self, source):
for lval in self.names: # first in first out! Its important by the way
source = inject_before_lval(source, lval, self.reps[lval])
return source
def require(self, typ):
name = self.NAME % (typ, len(self.names))
self.names.append(name)
return name
def define(self, name, val):
self.reps[name] = val
def reset(self):
self.rel = {}
self.names = []
class ContextStack:
def __init__(self):
self.to_register = set([])
self.to_define = {}
def reset(self):
self.to_register = set([])
self.to_define = {}
def register(self, var):
self.to_register.add(var)
def define(self, name, code):
self.to_define[name] = code
self.register(name)
def get_code(self):
code = 'var.registers([%s])\n' % ', '.join(repr(e) for e in self.to_register)
for name, func_code in self.to_define.iteritems():
code += func_code
return code
def clean_stacks():
global Context, inline_stack
Context = ContextStack()
inline_stack = InlineStack()
def to_key(literal_or_identifier):
''' returns string representation of this object'''
if literal_or_identifier['type']=='Identifier':
return literal_or_identifier['name']
elif literal_or_identifier['type']=='Literal':
k = literal_or_identifier['value']
if isinstance(k, float):
return unicode(float_repr(k))
elif 'regex' in literal_or_identifier:
return compose_regex(k)
elif isinstance(k, bool):
return 'true' if k else 'false'
elif k is None:
return 'null'
else:
return unicode(k)
def trans(ele, standard=False):
"""Translates esprima syntax tree to python by delegating to appriopriate translating node"""
try:
node = globals().get(ele['type'])
if not node:
raise NotImplementedError('%s is not supported!' % ele['type'])
if standard:
node = node.__dict__['standard'] if 'standard' in node.__dict__ else node
return node(**ele)
except:
#print ele
raise
def limited(func):
'''Decorator limiting resulting line length in order to avoid python parser stack overflow -
If expression longer than LINE_LEN_LIMIT characters then it will be moved to upper line
USE ONLY ON EXPRESSIONS!!! '''
def f(standard=False, **args):
insert_pos = len(inline_stack.names) # in case line is longer than limit we will have to insert the lval at current position
# this is because calling func will change inline_stack.
# we cant use inline_stack.require here because we dont know whether line overflows yet
res = func(**args)
if len(res)>LINE_LEN_LIMIT:
name = inline_stack.require('LONG')
inline_stack.names.pop()
inline_stack.names.insert(insert_pos, name)
res = 'def %s(var=var):\n return %s\n' % (name, res)
inline_stack.define(name, res)
return name+'()'
else:
return res
f.__dict__['standard'] = func
return f
# ==== IDENTIFIERS AND LITERALS =======
inf = float('inf')
def Literal(type, value, raw, regex=None):
if regex: # regex
return 'JsRegExp(%s)' % repr(compose_regex(value))
elif value is None: # null
return 'var.get(u"null")'
# Todo template
# String, Bool, Float
return 'Js(%s)' % repr(value) if value!=inf else 'Js(float("inf"))'
def Identifier(type, name):
return 'var.get(%s)' % repr(name)
@limited
def MemberExpression(type, computed, object, property):
far_left = trans(object)
if computed: # obj[prop] type accessor
# may be literal which is the same in every case so we can save some time on conversion
if property['type'] == 'Literal':
prop = repr(to_key(property))
else: # worst case
prop = trans(property)
else: # always the same since not computed (obj.prop accessor)
prop = repr(to_key(property))
return far_left + '.get(%s)' % prop
def ThisExpression(type):
return 'var.get(u"this")'
@limited
def CallExpression(type, callee, arguments):
arguments = [trans(e) for e in arguments]
if callee['type']=='MemberExpression':
far_left = trans(callee['object'])
if callee['computed']: # obj[prop] type accessor
# may be literal which is the same in every case so we can save some time on conversion
if callee['property']['type'] == 'Literal':
prop = repr(to_key(callee['property']))
else: # worst case
prop = trans(callee['property']) # its not a string literal! so no repr
else: # always the same since not computed (obj.prop accessor)
prop = repr(to_key(callee['property']))
arguments.insert(0, prop)
return far_left + '.callprop(%s)' % ', '.join(arguments)
else: # standard call
return trans(callee) + '(%s)' % ', '.join(arguments)
# ========== ARRAYS ============
def ArrayExpression(type, elements): # todo fix null inside problem
return 'Js([%s])' % ', '.join(trans(e) if e else 'None' for e in elements)
# ========== OBJECTS =============
def ObjectExpression(type, properties):
name = inline_stack.require('Object')
elems = []
after = ''
for p in properties:
if p['kind']=='init':
elems.append('%s:%s' % Property(**p))
elif p['kind']=='set':
k, setter = Property(**p) # setter is just a lval referring to that function, it will be defined in InlineStack automatically
after += '%s.define_own_property(%s, {"set":%s, "configurable":True, "enumerable":True})\n' % (name, k, setter)
elif p['kind']=='get':
k, getter = Property(**p)
after += '%s.define_own_property(%s, {"get":%s, "configurable":True, "enumerable":True})\n' % (name, k, getter)
else:
raise RuntimeError('Unexpected object propery kind')
obj = '%s = Js({%s})\n' % (name, ','.join(elems))
inline_stack.define(name, obj+after)
return name
def Property(type, kind, key, computed, value, method, shorthand):
if shorthand or computed:
raise NotImplementedError('Shorthand and Computed properties not implemented!')
k = to_key(key)
if k is None:
raise SyntaxError('Invalid key in dictionary! Or bug in Js2Py')
v = trans(value)
return repr(k), v
# ========== EXPRESSIONS ============
@limited
def UnaryExpression(type, operator, argument, prefix):
a = trans(argument, standard=True) # unary involve some complex operations so we cant use line shorteners here
if operator=='delete':
if argument['type'] in ['Identifier', 'MemberExpression']:
# means that operation is valid
return js_delete(a)
return 'PyJsComma(%s, Js(True))' % a # otherwise not valid, just perform expression and return true.
elif operator=='typeof':
return js_typeof(a)
return UNARY[operator](a)
@limited
def BinaryExpression(type, operator, left, right):
a = trans(left)
b = trans(right)
# delegate to our friends
return BINARY[operator](a,b)
@limited
def UpdateExpression(type, operator, argument, prefix):
a = trans(argument, standard=True) # also complex operation involving parsing of the result so no line length reducing here
return js_postfix(a, operator=='++', not prefix)
@limited
def AssignmentExpression(type, operator, left, right):
operator = operator[:-1]
if left['type']=='Identifier':
if operator:
return 'var.put(%s, %s, %s)' % (repr(to_key(left)), trans(right), repr(operator))
else:
return 'var.put(%s, %s)' % (repr(to_key(left)), trans(right))
elif left['type']=='MemberExpression':
far_left = trans(left['object'])
if left['computed']: # obj[prop] type accessor
# may be literal which is the same in every case so we can save some time on conversion
if left['property']['type'] == 'Literal':
prop = repr(to_key(left['property']))
else: # worst case
prop = trans(left['property']) # its not a string literal! so no repr
else: # always the same since not computed (obj.prop accessor)
prop = repr(to_key(left['property']))
if operator:
return far_left + '.put(%s, %s, %s)' % (prop, trans(right), repr(operator))
else:
return far_left + '.put(%s, %s)' % (prop, trans(right))
else:
raise SyntaxError('Invalid left hand side in assignment!')
@limited
def SequenceExpression(type, expressions):
return reduce(js_comma, (trans(e) for e in expressions))
@limited
def NewExpression(type, callee, arguments):
return trans(callee) + '.create(%s)' % ', '.join(trans(e) for e in arguments)
@limited
def ConditionalExpression(type, test, consequent, alternate): # caused plenty of problems in my home-made translator :)
return '(%s if %s else %s)' % (trans(consequent), trans(test), trans(alternate))
# =========== STATEMENTS =============
def BlockStatement(type, body):
return StatementList(body) # never returns empty string! In the worst case returns pass\n
def ExpressionStatement(type, expression):
return trans(expression) + '\n' # end expression space with new line
def BreakStatement(type, label):
if label:
return 'raise %s("Breaked")\n' % (get_break_label(label['name']))
else:
return 'break\n'
def ContinueStatement(type, label):
if label:
return 'raise %s("Continued")\n' % (get_continue_label(label['name']))
else:
return 'continue\n'
def ReturnStatement(type, argument):
return 'return %s\n' % (trans(argument) if argument else "var.get('undefined')")
def EmptyStatement(type):
return 'pass\n'
def DebuggerStatement(type):
return 'pass\n'
def DoWhileStatement(type, body, test):
inside = trans(body) + 'if not %s:\n' % trans(test) + indent('break\n')
result = 'while 1:\n' + indent(inside)
return result
def ForStatement(type, init, test, update, body):
update = indent(trans(update)) if update else ''
init = trans(init) if init else ''
if not init.endswith('\n'):
init += '\n'
test = trans(test) if test else '1'
if not update:
result = '#for JS loop\n%swhile %s:\n%s%s\n' % (init, test, indent(trans(body)), update)
else:
result = '#for JS loop\n%swhile %s:\n' % (init, test)
body = 'try:\n%sfinally:\n %s\n' % (indent(trans(body)), update)
result += indent(body)
return result
def ForInStatement(type, left, right, body, each):
res = 'for PyJsTemp in %s:\n' % trans(right)
if left['type']=="VariableDeclaration":
addon = trans(left) # make sure variable is registered
if addon != 'pass\n':
res = addon + res # we have to execute this expression :(
# now extract the name
try:
name = left['declarations'][0]['id']['name']
except:
raise RuntimeError('Unusual ForIn loop')
elif left['type']=='Identifier':
name = left['name']
else:
raise RuntimeError('Unusual ForIn loop')
res += indent('var.put(%s, PyJsTemp)\n' % repr(name) + trans(body))
return res
def IfStatement(type, test, consequent, alternate):
# NOTE we cannot do elif because function definition inside elif statement would not be possible!
IF = 'if %s:\n' % trans(test)
IF += indent(trans(consequent))
if not alternate:
return IF
ELSE = 'else:\n' + indent(trans(alternate))
return IF + ELSE
def LabeledStatement(type, label, body):
# todo consider using smarter approach!
inside = trans(body)
defs = ''
if inside.startswith('while ') or inside.startswith('for ') or inside.startswith('#for'):
# we have to add contine label as well...
# 3 or 1 since #for loop type has more lines before real for.
sep = 1 if not inside.startswith('#for') else 3
cont_label = get_continue_label(label['name'])
temp = inside.split('\n')
injected = 'try:\n'+'\n'.join(temp[sep:])
injected += 'except %s:\n pass\n'%cont_label
inside = '\n'.join(temp[:sep])+'\n'+indent(injected)
defs += 'class %s(Exception): pass\n'%cont_label
break_label = get_break_label(label['name'])
inside = 'try:\n%sexcept %s:\n pass\n'% (indent(inside), break_label)
defs += 'class %s(Exception): pass\n'%break_label
return defs + inside
def StatementList(lis):
if lis: # ensure we don't return empty string because it may ruin indentation!
code = ''.join(trans(e) for e in lis)
return code if code else 'pass\n'
else:
return 'pass\n'
def PyimportStatement(type, imp):
lib = imp['name']
jlib = 'PyImport_%s' % lib
code = 'import %s as %s\n' % (lib, jlib)
#check whether valid lib name...
try:
compile(code, '', 'exec')
except:
raise SyntaxError('Invalid Python module name (%s) in pyimport statement'%lib)
# var.pyimport will handle module conversion to PyJs object
code += 'var.pyimport(%s, %s)\n' % (repr(lib), jlib)
return code
def SwitchStatement(type, discriminant, cases):
#TODO there will be a problem with continue in a switch statement.... FIX IT
code = 'while 1:\n' + indent('SWITCHED = False\nCONDITION = (%s)\n')
code = code % trans(discriminant)
for case in cases:
case_code = None
if case['test']: # case (x):
case_code = 'if SWITCHED or PyJsStrictEq(CONDITION, %s):\n' % (trans(case['test']))
else: # default:
case_code = 'if True:\n'
case_code += indent('SWITCHED = True\n')
case_code += indent(StatementList(case['consequent']))
# one more indent for whole
code += indent(case_code)
# prevent infinite loop and sort out nested switch...
code += indent('SWITCHED = True\nbreak\n')
return code
def ThrowStatement(type, argument):
return 'PyJsTempException = JsToPyException(%s)\nraise PyJsTempException\n' % trans(argument)
def TryStatement(type, block, handler, handlers, guardedHandlers, finalizer):
result = 'try:\n%s' % indent(trans(block))
# complicated catch statement...
if handler:
identifier = handler['param']['name']
holder = 'PyJsHolder_%s_%d'%(identifier.encode('hex'), random.randrange(1e8))
identifier = repr(identifier)
result += 'except PyJsException as PyJsTempException:\n'
# fill in except ( catch ) block and remember to recover holder variable to its previous state
result += indent(TRY_CATCH.replace('HOLDER', holder).replace('NAME', identifier).replace('BLOCK', indent(trans(handler['body']))))
# translate finally statement if present
if finalizer:
result += 'finally:\n%s' % indent(trans(finalizer))
return result
def LexicalDeclaration(type, declarations, kind):
raise NotImplementedError('let and const not implemented yet but they will be soon! Check github for updates.')
def VariableDeclarator(type, id, init):
name = id['name']
# register the name if not already registered
Context.register(name)
if init:
return 'var.put(%s, %s)\n' % (repr(name), trans(init))
return ''
def VariableDeclaration(type, declarations, kind):
code = ''.join(trans(d) for d in declarations)
return code if code else 'pass\n'
def WhileStatement(type, test, body):
result = 'while %s:\n'%trans(test) + indent(trans(body))
return result
def WithStatement(type, object, body):
raise NotImplementedError('With statement not implemented!')
def Program(type, body):
inline_stack.reset()
code = ''.join(trans(e) for e in body)
# here add hoisted elements (register variables and define functions)
code = Context.get_code() + code
# replace all inline variables
code = inline_stack.inject_inlines(code)
return code
# ======== FUNCTIONS ============
def FunctionDeclaration(type, id, params, defaults, body, generator, expression):
if generator:
raise NotImplementedError('Generators not supported')
if defaults:
raise NotImplementedError('Defaults not supported')
if not id:
return FunctionExpression(type, id, params, defaults, body, generator, expression)
JsName = id['name']
PyName = 'PyJsHoisted_%s_' % JsName
PyName = PyName if is_valid_py_name(PyName) else 'PyJsHoistedNonPyName'
# this is quite complicated
global Context
previous_context = Context
# change context to the context of this function
Context = ContextStack()
# translate body within current context
code = trans(body)
# get arg names
vars = [v['name'] for v in params]
# args are automaticaly registered variables
Context.to_register.update(vars)
# add all hoisted elements inside function
code = Context.get_code() + code
# check whether args are valid python names:
used_vars = []
for v in vars:
if is_valid_py_name(v):
used_vars.append(v)
else: # invalid arg in python, for example $, replace with alternatice arg
used_vars.append('PyJsArg_%s_' % v.encode('hex'))
header = '@Js\n'
header+= 'def %s(%sthis, arguments, var=var):\n' % (PyName, ', '.join(used_vars) +(', ' if vars else ''))
# transfer names from Py scope to Js scope
arg_map = dict(zip(vars, used_vars))
arg_map.update({'this':'this', 'arguments':'arguments'})
arg_conv = 'var = Scope({%s}, var)\n' % ', '.join(repr(k)+':'+v for k,v in arg_map.iteritems())
# and finally set the name of the function to its real name:
footer = '%s.func_name = %s\n' % (PyName, repr(JsName))
footer+= 'var.put(%s, %s)\n' % (repr(JsName), PyName)
whole_code = header + indent(arg_conv+code) + footer
# restore context
Context = previous_context
# define in upper context
Context.define(JsName, whole_code)
return 'pass\n'
def FunctionExpression(type, id, params, defaults, body, generator, expression):
if generator:
raise NotImplementedError('Generators not supported')
if defaults:
raise NotImplementedError('Defaults not supported')
JsName = id['name'] if id else 'anonymous'
if not is_valid_py_name(JsName):
ScriptName = 'InlineNonPyName'
else:
ScriptName = JsName
PyName = inline_stack.require(ScriptName) # this is unique
# again quite complicated
global Context
previous_context = Context
# change context to the context of this function
Context = ContextStack()
# translate body within current context
code = trans(body)
# get arg names
vars = [v['name'] for v in params]
# args are automaticaly registered variables
Context.to_register.update(vars)
# add all hoisted elements inside function
code = Context.get_code() + code
# check whether args are valid python names:
used_vars = []
for v in vars:
try:
compile(v, 'a','exec') # valid
used_vars.append(v)
except: # invalid arg in python, for example $, replace with alternatice arg
used_vars.append('PyJsArg_%s_' % v.encode('hex'))
header = '@Js\n'
header+= 'def %s(%sthis, arguments, var=var):\n' % (PyName, ', '.join(used_vars) +(', ' if vars else ''))
# transfer names from Py scope to Js scope
arg_map = dict(zip(vars, used_vars))
arg_map.update({'this':'this', 'arguments':'arguments'})
if id: # make self available from inside...
if id['name'] not in arg_map:
arg_map[id['name']] = PyName
arg_conv = 'var = Scope({%s}, var)\n' % ', '.join(repr(k)+':'+v for k,v in arg_map.iteritems())
# and finally set the name of the function to its real name:
footer = '%s._set_name(%s)\n' % (PyName, repr(JsName))
whole_code = header + indent(arg_conv+code) + footer
# restore context
Context = previous_context
# define in upper context
inline_stack.define(PyName, whole_code)
return PyName
LogicalExpression = BinaryExpression
PostfixExpression = UpdateExpression
clean_stacks()
if __name__=='__main__':
import codecs
import time
import pyjsparser
c = None#'''`ijfdij`'''
if not c:
with codecs.open("esp.js", "r", "utf-8") as f:
c = f.read()
print 'Started'
t = time.time()
res = trans(pyjsparser.PyJsParser().parse(c))
dt = time.time() - t+ 0.000000001
print 'Translated everyting in', round(dt,5), 'seconds.'
print 'Thats %d characters per second' % int(len(c)/dt)
with open('res.py', 'w') as f:
f.write(res)
|
firstval/micropython | refs/heads/master | tests/cmdline/cmd_showbc.py | 54 | # cmdline: -v -v
# test printing of all bytecodes
def f():
# constants
a = None + False + True
a = 0
a = 1000
a = -1000
# constructing data
a = 1
b = (1, 2)
c = [1, 2]
d = {1, 2}
e = {}
f = {1:2}
g = 'a'
h = b'a'
# unary/binary ops
i = 1
j = 2
k = a + b
l = -a
m = not a
m = a == b == c
m = not (a == b and b == c)
# attributes
n = b.c
b.c = n
# subscript
p = b[0]
b[0] = p
# slice
a = b[::]
# sequenc unpacking
a, b = c
# tuple swapping
a, b = b, a
a, b, c = c, b, a
# del fast
del a
# globals
global gl
gl = a
del gl
# comprehensions
a = (b for c in d if e)
a = [b for c in d if e]
a = {b:b for c in d if e}
# function calls
a()
a(1)
a(b=1)
a(*b)
# method calls
a.b()
a.b(1)
a.b(c=1)
a.b(*c)
# jumps
if a:
x
else:
y
while a:
b
while not a:
b
# for loop
for a in b:
c
# exceptions
try:
while a:
break
except:
b
finally:
c
# with
with a:
b
# closed over variables
x = 1
def closure():
a = x + 1
x = 1
del x
# import
import a
from a import b
from a import *
# raise
raise
raise 1
# return
return
return 1
# functions with default args
def f(a=1):
pass
def f(b=2):
return b + a
# function which yields
def f():
yield
yield 1
yield from 1
# class
class Class:
pass
|
xiang12835/python_web | refs/heads/master | py2_web2py/web2py/gluon/contrib/markmin/markmin2html.py | 1 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# created by Massimo Di Pierro
# recreated by Vladyslav Kozlovskyy
# license MIT/BSD/GPL
from __future__ import print_function
import re
import sys
import urllib
import ast
PY2 = sys.version_info[0] == 2
if PY2:
from urllib import quote as urllib_quote
from string import maketrans
else:
from urllib.parse import quote as urllib_quote
maketrans = str.maketrans
"""
TODO: next version should use MathJax
<script type="text/javascript" src="http://cdn.mathjax.org/mathjax/latest/MathJax.js">
MathJax.Hub.Config({
extensions: ["tex2jax.js","TeX/AMSmath.js","TeX/AMSsymbols.js"],
jax: ["input/TeX", "output/HTML-CSS"],
tex2jax: {
inlineMath: [ ['$','$'], ["\\(","\\)"] ],
displayMath: [ ['$$','$$'], ["\\[","\\]"] ],
},
"HTML-CSS": { availableFonts: ["TeX"] }
});
</script>
"""
__all__ = ['render', 'markmin2html', 'markmin_escape']
__doc__ = """
# Markmin markup language
## About
This is a new markup language that we call markmin designed to produce high quality scientific papers and books and also put them online. We provide serializers for html, latex and pdf. It is implemented in the ``markmin2html`` function in the ``markmin2html.py``.
Example of usage:
``
m = "Hello **world** [[link http://web2py.com]]"
from markmin2html import markmin2html
print(markmin2html(m))
from markmin2latex import markmin2latex
print(markmin2latex(m))
from markmin2pdf import markmin2pdf # requires pdflatex
print(markmin2pdf(m))
``
====================
# This is a test block
with new features:
This is a blockquote with
a list with tables in it:
-----------
This is a paragraph before list.
You can continue paragraph on the
next lines.
This is an ordered list with tables:
+ Item 1
+ Item 2
+ --------
aa|bb|cc
11|22|33
--------:tableclass1[tableid1]
+ Item 4
-----------
T1| T2| t3
===========
aaa|bbb|ccc
ddd|fff|ggg
123|0 |5.0
-----------:tableclass1
-----------:blockquoteclass[blockquoteid]
This this a new paragraph
with a followed table.
Table has header, footer, sections,
odd and even rows:
-------------------------------
**Title 1**|**Title 2**|**Title 3**
==============================
data 1 | data 2 | 2.00
data 3 |data4(long)| 23.00
|data 5 | 33.50
==============================
New section|New data | 5.00
data 1 |data2(long)|100.45
|data 3 | 12.50
data 4 | data 5 | .33
data 6 |data7(long)| 8.01
|data 8 | 514
==============================
Total: | 9 items |698,79
------------------------------:tableclass1[tableid2]
## Multilevel
lists
Now lists can be multilevel:
+ Ordered item 1 on level 1.
You can continue item text on
next strings
. paragraph in an item
++. Ordered item 1 of sublevel 2 with
a paragraph (paragraph can start
with point after plus or minus
characters, e.g. **++.** or **--.**)
++. This is another item. But with 3 paragraphs,
blockquote and sublists:
.. This is the second paragraph in the item. You
can add paragraphs to an item, using point
notation, where first characters in the string
are sequence of points with space between
them and another string. For example, this
paragraph (in sublevel 2) starts with two points:
``.. This is the second paragraph...``
.. ----------
### this is a blockquote in a list
You can use blockquote with headers, paragraphs,
tables and lists in it:
Tables can have or have not header and footer.
This table is defined without any header
and footer in it:
---------------------
red |fox | 0
blue |dolphin | 1000
green|leaf | 10000
---------------------
----------
.. This is yet another paragraph in the item.
--- This is an item of unordered list **(sublevel 3)**
--- This is the second item of the unordered list ''(sublevel 3)''
++++++ This is a single item of ordered list in sublevel 6
.... and this is a paragraph in sublevel 4
---. This is a new item with paragraph in sublevel 3.
++++ Start ordered list in sublevel 4 with code block: ``
line 1
line 2
line 3
``
++++. Yet another item with code block (we need to indent \`\` to add code block as part of item):
``
line 1
line 2
line 3
``
This item finishes with this paragraph.
... Item in sublevel 3 can be continued with paragraphs.
... ``
this is another
code block
in the
sublevel 3 item
``
+++ The last item in sublevel 3
.. This is a continuous paragraph for item 2 in sublevel 2.
You can use such structure to create difficult structured
documents.
++ item 3 in sublevel 2
-- item 1 in sublevel 2 (new unordered list)
-- item 2 in sublevel 2
-- item 3 in sublevel 2
++ item 1 in sublevel 2 (new ordered list)
++ item 2 in sublevel 2
++ item 3 in sublevle 2
+ item 2 in level 1
+ item 3 in level 1
- new unordered list (item 1 in level 1)
- level 2 in level 1
- level 3 in level 1
- level 4 in level 1
## This is the last section of the test
Single paragraph with '----' in it will be turned into separator:
-----------
And this is the last paragraph in
the test. Be happy!
====================
## Why?
We wanted a markup language with the following requirements:
- less than 300 lines of functional code
- easy to read
- secure
- support table, ul, ol, code
- support html5 video and audio elements (html serialization only)
- can align images and resize them
- can specify class for tables, blockquotes and code elements
- can add anchors
- does not use _ for markup (since it creates odd behavior)
- automatically links urls
- fast
- easy to extend
- supports latex and pdf including references
- allows to describe the markup in the markup (this document is generated from markmin syntax)
(results depend on text but in average for text ~100K markmin is 30% faster than markdown, for text ~10K it is 10x faster)
The [[web2py book http://www.lulu.com/product/paperback/web2py-%283rd-edition%29/12822827]] published by lulu, for example, was entirely generated with markmin2pdf from the online [[web2py wiki http://www.web2py.com/book]]
## Download
- http://web2py.googlecode.com/hg/gluon/contrib/markmin/markmin2html.py
- http://web2py.googlecode.com/hg/gluon/contrib/markmin/markmin2latex.py
- http://web2py.googlecode.com/hg/gluon/contrib/markmin/markmin2pdf.py
markmin2html.py and markmin2latex.py are single files and have no web2py dependence. Their license is BSD.
## Examples
### Bold, italic, code and links
------------------------------------------------------------------------------
**SOURCE** | **OUTPUT**
==============================================================================
``# title`` | **title**
``## section`` | **section**
``### subsection`` | **subsection**
``**bold**`` | **bold**
``''italic''`` | ''italic''
``~~strikeout~~`` | ~~strikeout~~
``!`!`verbatim`!`!`` | ``verbatim``
``\`\`color with **bold**\`\`:red`` | ``color with **bold**``:red
``\`\`many colors\`\`:color[blue:#ffff00]`` | ``many colors``:color[blue:#ffff00]
``http://google.com`` | http://google.com
``[[**click** me #myanchor]]`` | [[**click** me #myanchor]]
``[[click me [extra info] #myanchor popup]]`` | [[click me [extra info] #myanchor popup]]
-------------------------------------------------------------------------------
### More on links
The format is always ``[[title link]]`` or ``[[title [extra] link]]``. Notice you can nest bold, italic, strikeout and code inside the link ``title``.
### Anchors [[myanchor]]
You can place an anchor anywhere in the text using the syntax ``[[name]]`` where ''name'' is the name of the anchor.
You can then link the anchor with [[link #myanchor]], i.e. ``[[link #myanchor]]`` or [[link with an extra info [extra info] #myanchor]], i.e.
``[[link with an extra info [extra info] #myanchor]]``.
### Images
[[alt-string for the image [the image title] http://www.web2py.com/examples/static/web2py_logo.png right 200px]]
This paragraph has an image aligned to the right with a width of 200px. Its is placed using the code
``[[alt-string for the image [the image title] http://www.web2py.com/examples/static/web2py_logo.png right 200px]]``.
### Unordered Lists
``
- Dog
- Cat
- Mouse
``
is rendered as
- Dog
- Cat
- Mouse
Two new lines between items break the list in two lists.
### Ordered Lists
``
+ Dog
+ Cat
+ Mouse
``
is rendered as
+ Dog
+ Cat
+ Mouse
### Multilevel Lists
``
+ Dogs
-- red
-- brown
-- black
+ Cats
-- fluffy
-- smooth
-- bald
+ Mice
-- small
-- big
-- huge
``
is rendered as
+ Dogs
-- red
-- brown
-- black
+ Cats
-- fluffy
-- smooth
-- bald
+ Mice
-- small
-- big
-- huge
### Tables (with optional header and/or footer)
Something like this
``
-----------------
**A**|**B**|**C**
=================
0 | 0 | X
0 | X | 0
X | 0 | 0
=================
**D**|**F**|**G**
-----------------:abc[id]
``
is a table and is rendered as
-----------------
**A**|**B**|**C**
=================
0 | 0 | X
0 | X | 0
X | 0 | 0
=================
**D**|**F**|**G**
-----------------:abc[id]
Four or more dashes delimit the table and | separates the columns.
The ``:abc``, ``:id[abc_1]`` or ``:abc[abc_1]`` at the end sets the class and/or id for the table and it is optional.
### Blockquote
A table with a single cell is rendered as a blockquote:
-----
Hello world
-----
Blockquote can contain headers, paragraphs, lists and tables:
``
-----
This is a paragraph in a blockquote
+ item 1
+ item 2
-- item 2.1
-- item 2.2
+ item 3
---------
0 | 0 | X
0 | X | 0
X | 0 | 0
---------:tableclass1
-----
``
is rendered as:
-----
This is a paragraph in a blockquote
+ item 1
+ item 2
-- item 2.1
-- item 2.2
+ item 3
---------
0 | 0 | X
0 | X | 0
X | 0 | 0
---------:tableclass1
-----
### Code, ``<code>``, escaping and extra stuff
``
def test():
return "this is Python code"
``:python
Optionally a ` inside a ``!`!`...`!`!`` block can be inserted escaped with !`!.
**NOTE:** You can escape markmin constructions (\\'\\',\`\`,\*\*,\~\~,\[,\{,\]\},\$,\@) with '\\\\' character:
so \\\\`\\\\` can replace !`!`! escape string
The ``:python`` after the markup is also optional. If present, by default, it is used to set the class of the <code> block.
The behavior can be overridden by passing an argument ``extra`` to the ``render`` function. For example:
``
markmin2html("!`!!`!aaa!`!!`!:custom",
extra=dict(custom=lambda text: 'x'+text+'x'))
``:python
generates
``'xaaax'``:python
(the ``!`!`...`!`!:custom`` block is rendered by the ``custom=lambda`` function passed to ``render``).
### Line breaks
``[[NEWLINE]]`` tag is used to break lines:
``
#### Multiline [[NEWLINE]]
title
paragraph [[NEWLINE]]
with breaks[[NEWLINE]]in it
``
generates:
#### Multiline [[NEWLINE]]
title
paragraph [[NEWLINE]]
with breaks[[NEWLINE]]in it
### Html5 support
Markmin also supports the <video> and <audio> html5 tags using the notation:
``
[[message link video]]
[[message link audio]]
[[message [title] link video]]
[[message [title] link audio]]
``
where ``message`` will be shown in browsers without HTML5 video/audio tags support.
### Latex and other extensions
Formulas can be embedded into HTML with ''\$\$``formula``\$\$''.
You can use Google charts to render the formula:
``
LATEX = '<img src="http://chart.apis.google.com/chart?cht=tx&chl=%s" />'
markmin2html(text,{'latex':lambda code: LATEX % urllib.quote(code)})
``
### Code with syntax highlighting
This requires a syntax highlighting tool, such as the web2py CODE helper.
``
extra={'code_cpp':lambda text: CODE(text,language='cpp').xml(),
'code_java':lambda text: CODE(text,language='java').xml(),
'code_python':lambda text: CODE(text,language='python').xml(),
'code_html':lambda text: CODE(text,language='html').xml()}
``
or simple:
``
extra={'code':lambda text,lang='python': CODE(text,language=lang).xml()}
``
``
markmin2html(text,extra=extra)
``
Code can now be marked up as in this example:
``
!`!`
<html><body>example</body></html>
!`!`:code_html
``
OR
``
!`!`
<html><body>example</body></html>
!`!`:code[html]
``
### Citations and References
Citations are treated as internal links in html and proper citations in latex if there is a final section called "References". Items like
``
- [[key]] value
``
in the References will be translated into Latex
``
\\bibitem{key} value
``
Here is an example of usage:
``
As shown in Ref.!`!`mdipierro`!`!:cite
## References
- [[mdipierro]] web2py Manual, 3rd Edition, lulu.com
``
### Caveats
``<ul/>``, ``<ol/>``, ``<code/>``, ``<table/>``, ``<blockquote/>``, ``<h1/>``, ..., ``<h6/>`` do not have ``<p>...</p>`` around them.
"""
html_colors = ['aqua', 'black', 'blue', 'fuchsia', 'gray', 'green',
'lime', 'maroon', 'navy', 'olive', 'purple', 'red',
'silver', 'teal', 'white', 'yellow']
META = '\x06'
LINK = '\x07'
DISABLED_META = '\x08'
LATEX = '<img src="http://chart.apis.google.com/chart?cht=tx&chl=%s" />'
regex_URL = re.compile(r'@/(?P<a>\w*)/(?P<c>\w*)/(?P<f>\w*(\.\w+)?)(/(?P<args>[\w\.\-/]+))?')
regex_env2 = re.compile(r'@\{(?P<a>[\w\-\.]+?)(\:(?P<b>.*?))?\}')
regex_expand_meta = re.compile('(' + META + '|' + DISABLED_META + '|````)')
regex_dd = re.compile(r'\$\$(?P<latex>.*?)\$\$')
regex_code = re.compile(
'(' + META + '|' + DISABLED_META + r'|````)|(``(?P<t>.+?)``(?::(?P<c>[a-zA-Z][_a-zA-Z\-\d]*)(?:\[(?P<p>[^\]]*)\])?)?)',
re.S)
regex_strong = re.compile(r'\*\*(?P<t>[^\s*]+( +[^\s*]+)*)\*\*')
regex_del = re.compile(r'~~(?P<t>[^\s~]+( +[^\s~]+)*)~~')
regex_em = re.compile(r"''(?P<t>([^\s']| |'(?!'))+)''")
regex_num = re.compile(r"^\s*[+-]?((\d+(\.\d*)?)|\.\d+)([eE][+-]?[0-9]+)?\s*$")
regex_list = re.compile('^(?:(?:(#{1,6})|(?:(\.+|\++|\-+)(\.)?))\s*)?(.*)$')
regex_bq_headline = re.compile('^(?:(\.+|\++|\-+)(\.)?\s+)?(-{3}-*)$')
regex_tq = re.compile('^(-{3}-*)(?::(?P<c>[a-zA-Z][_a-zA-Z\-\d]*)(?:\[(?P<p>[a-zA-Z][_a-zA-Z\-\d]*)\])?)?$')
regex_proto = re.compile(r'(?<!["\w>/=])(?P<p>\w+):(?P<k>\w+://[\w\d\-+=?%&/:.]+)', re.M)
regex_auto = re.compile(r'(?<!["\w>/=])(?P<k>\w+://[\w\d\-+_=?%&/:.,;#]+\w|[\w\-.]+@[\w\-.]+)', re.M)
regex_link = re.compile(r'(' + LINK + r')|\[\[(?P<s>.+?)\]\]', re.S)
regex_link_level2 = re.compile(r'^(?P<t>\S.*?)?(?:\s+\[(?P<a>.+?)\])?(?:\s+(?P<k>\S+))?(?:\s+(?P<p>popup))?\s*$', re.S)
regex_media_level2 = re.compile(
r'^(?P<t>\S.*?)?(?:\s+\[(?P<a>.+?)\])?(?:\s+(?P<k>\S+))?\s+(?P<p>img|IMG|left|right|center|video|audio|blockleft|blockright)(?:\s+(?P<w>\d+px))?\s*$',
re.S)
regex_markmin_escape = re.compile(r"(\\*)(['`:*~\\[\]{}@\$+\-.#\n])")
regex_backslash = re.compile(r"\\(['`:*~\\[\]{}@\$+\-.#\n])")
ttab_in = maketrans("'`:*~\\[]{}@$+-.#\n", '\x0b\x0c\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x05')
ttab_out = maketrans('\x0b\x0c\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x05', "'`:*~\\[]{}@$+-.#\n")
regex_quote = re.compile('(?P<name>\w+?)\s*\=\s*')
def local_html_escape(data, quote=False):
"""
Works with bytes.
Replace special characters "&", "<" and ">" to HTML-safe sequences.
If the optional flag quote is true (the default), the quotation mark
characters, both double quote (") and single quote (') characters are also
translated.
"""
if PY2:
import cgi
data = cgi.escape(data, quote)
return data.replace("'", "'") if quote else data
else:
import html
if isinstance(data, str):
return html.escape(data, quote=quote)
data = data.replace(b"&", b"&") # Must be done first!
data = data.replace(b"<", b"<")
data = data.replace(b">", b">")
if quote:
data = data.replace(b'"', b""")
data = data.replace(b'\'', b"'")
return data
def make_dict(b):
return '{%s}' % regex_quote.sub("'\g<name>':", b)
def safe_eval(node_or_string, env):
"""
Safely evaluate an expression node or a string containing a Python
expression. The string or node provided may only consist of the following
Python literal structures: strings, numbers, tuples, lists, dicts, booleans,
and None.
"""
_safe_names = {'None': None, 'True': True, 'False': False}
_safe_names.update(env)
if isinstance(node_or_string, basestring):
node_or_string = ast.parse(node_or_string, mode='eval')
if isinstance(node_or_string, ast.Expression):
node_or_string = node_or_string.body
def _convert(node):
if isinstance(node, ast.Str):
return node.s
elif isinstance(node, ast.Num):
return node.n
elif isinstance(node, ast.Tuple):
return tuple(map(_convert, node.elts))
elif isinstance(node, ast.List):
return list(map(_convert, node.elts))
elif isinstance(node, ast.Dict):
return dict((_convert(k), _convert(v)) for k, v
in zip(node.keys, node.values))
elif isinstance(node, ast.Name):
if node.id in _safe_names:
return _safe_names[node.id]
elif isinstance(node, ast.BinOp) and \
isinstance(node.op, (ast.Add, ast.Sub)) and \
isinstance(node.right, ast.Num) and \
isinstance(node.right.n, complex) and \
isinstance(node.left, ast.Num) and \
isinstance(node.left.n, (int, long, float)):
left = node.left.n
right = node.right.n
if isinstance(node.op, ast.Add):
return left + right
else:
return left - right
raise ValueError('malformed string')
return _convert(node_or_string)
def markmin_escape(text):
""" insert \\ before markmin control characters: '`:*~[]{}@$ """
return regex_markmin_escape.sub(
lambda m: '\\' + m.group(0).replace('\\', '\\\\'), text)
def replace_autolinks(text, autolinks):
return regex_auto.sub(lambda m: autolinks(m.group('k')), text)
def replace_at_urls(text, url):
# this is experimental @{function/args}
def u1(match, url=url):
a, c, f, args = match.group('a', 'c', 'f', 'args')
return url(a=a or None, c=c or None, f=f or None,
args=(args or '').split('/'), scheme=True, host=True)
return regex_URL.sub(u1, text)
def replace_components(text, env):
# not perfect but acceptable
def u2(match, env=env):
f = env.get(match.group('a'), match.group(0))
if callable(f):
b = match.group('b')
try:
b = safe_eval(make_dict(b), env)
except:
pass
try:
f = f(**b) if isinstance(b, dict) else f(b)
except Exception as e:
f = 'ERROR: %s' % e
return str(f)
text = regex_env2.sub(u2, text)
return text
def autolinks_simple(url):
"""
it automatically converts the url to link,
image, video or audio tag
"""
u_url = url.lower()
if '@' in url and '://' not in url:
return '<a href="mailto:%s">%s</a>' % (url, url)
elif u_url.endswith(('.jpg', '.jpeg', '.gif', '.png')):
return '<img src="%s" controls />' % url
elif u_url.endswith(('.mp4', '.mpeg', '.mov', '.ogv')):
return '<video src="%s" controls></video>' % url
elif u_url.endswith(('.mp3', '.wav', '.ogg')):
return '<audio src="%s" controls></audio>' % url
return '<a href="%s">%s</a>' % (url, url)
def protolinks_simple(proto, url):
"""
it converts url to html-string using appropriate proto-prefix:
Uses for construction "proto:url", e.g.:
"iframe:http://www.example.com/path" will call protolinks()
with parameters:
proto="iframe"
url="http://www.example.com/path"
"""
if proto in ('iframe', 'embed'): # == 'iframe':
return '<iframe src="%s" frameborder="0" allowfullscreen></iframe>' % url
# elif proto == 'embed': # NOTE: embed is a synonym to iframe now
# return '<a href="%s" class="%sembed">%s></a>'%(url,class_prefix,url)
elif proto == 'qr':
return '<img style="width:100px" src="http://chart.apis.google.com/chart?cht=qr&chs=100x100&chl=%s&choe=UTF-8&chld=H" alt="QR Code" title="QR Code" />' % url
return proto + ':' + url
def email_simple(email):
return '<a href="mailto:%s">%s</a>' % (email, email)
def render(text,
extra={},
allowed={},
sep='p',
URL=None,
environment=None,
latex='google',
autolinks='default',
protolinks='default',
class_prefix='',
id_prefix='markmin_',
pretty_print=False):
"""
Arguments:
- text is the text to be processed
- extra is a dict like extra=dict(custom=lambda value: value) that process custom code
as in " ``this is custom code``:custom "
- allowed is a dictionary of list of allowed classes like
allowed = dict(code=('python','cpp','java'))
- sep can be 'p' to separate text in <p>...</p>
or can be 'br' to separate text using <br />
- URL -
- environment is a dictionary of environment variables (can be accessed with @{variable}
- latex -
- autolinks is a function to convert auto urls to html-code (default is autolinks(url) )
- protolinks is a function to convert proto-urls (e.g."proto:url") to html-code
(default is protolinks(proto,url))
- class_prefix is a prefix for ALL classes in markmin text. E.g. if class_prefix='my_'
then for ``test``:cls class will be changed to "my_cls" (default value is '')
- id_prefix is prefix for ALL ids in markmin text (default value is 'markmin_'). E.g.:
-- [[id]] will be converted to <span class="anchor" id="markmin_id"></span>
-- [[link #id]] will be converted to <a href="#markmin_id">link</a>
-- ``test``:cls[id] will be converted to <code class="cls" id="markmin_id">test</code>
>>> render('this is\\n# a section\\n\\nparagraph')
'<p>this is</p><h1>a section</h1><p>paragraph</p>'
>>> render('this is\\n## a subsection\\n\\nparagraph')
'<p>this is</p><h2>a subsection</h2><p>paragraph</p>'
>>> render('this is\\n### a subsubsection\\n\\nparagraph')
'<p>this is</p><h3>a subsubsection</h3><p>paragraph</p>'
>>> render('**hello world**')
'<p><strong>hello world</strong></p>'
>>> render('``hello world``')
'<code>hello world</code>'
>>> render('``hello world``:python')
'<code class="python">hello world</code>'
>>> render('``\\nhello\\nworld\\n``:python')
'<pre><code class="python">hello\\nworld</code></pre>'
>>> render('``hello world``:python[test_id]')
'<code class="python" id="markmin_test_id">hello world</code>'
>>> render('``hello world``:id[test_id]')
'<code id="markmin_test_id">hello world</code>'
>>> render('``\\nhello\\nworld\\n``:python[test_id]')
'<pre><code class="python" id="markmin_test_id">hello\\nworld</code></pre>'
>>> render('``\\nhello\\nworld\\n``:id[test_id]')
'<pre><code id="markmin_test_id">hello\\nworld</code></pre>'
>>> render("''hello world''")
'<p><em>hello world</em></p>'
>>> render('** hello** **world**')
'<p>** hello** <strong>world</strong></p>'
>>> render('- this\\n- is\\n- a list\\n\\nand this\\n- is\\n- another')
'<ul><li>this</li><li>is</li><li>a list</li></ul><p>and this</p><ul><li>is</li><li>another</li></ul>'
>>> render('+ this\\n+ is\\n+ a list\\n\\nand this\\n+ is\\n+ another')
'<ol><li>this</li><li>is</li><li>a list</li></ol><p>and this</p><ol><li>is</li><li>another</li></ol>'
>>> render("----\\na | b\\nc | d\\n----\\n")
'<table><tbody><tr class="first"><td>a</td><td>b</td></tr><tr class="even"><td>c</td><td>d</td></tr></tbody></table>'
>>> render("----\\nhello world\\n----\\n")
'<blockquote><p>hello world</p></blockquote>'
>>> render('[[myanchor]]')
'<p><span class="anchor" id="markmin_myanchor"></span></p>'
>>> render('[[ http://example.com]]')
'<p><a href="http://example.com">http://example.com</a></p>'
>>> render('[[bookmark [http://example.com] ]]')
'<p><span class="anchor" id="markmin_bookmark"><a href="http://example.com">http://example.com</a></span></p>'
>>> render('[[this is a link http://example.com]]')
'<p><a href="http://example.com">this is a link</a></p>'
>>> render('[[this is an image http://example.com left]]')
'<p><img src="http://example.com" alt="this is an image" style="float:left" /></p>'
>>> render('[[this is an image http://example.com left 200px]]')
'<p><img src="http://example.com" alt="this is an image" style="float:left;width:200px" /></p>'
>>> render("[[Your browser doesn't support <video> HTML5 tag http://example.com video]]")
'<p><video controls="controls"><source src="http://example.com" />Your browser doesn\\'t support <video> HTML5 tag</video></p>'
>>> render("[[Your browser doesn't support <audio> HTML5 tag http://example.com audio]]")
'<p><audio controls="controls"><source src="http://example.com" />Your browser doesn\\'t support <audio> HTML5 tag</audio></p>'
>>> render("[[Your\\nbrowser\\ndoesn't\\nsupport\\n<audio> HTML5 tag http://exam\\\\\\nple.com\\naudio]]")
'<p><audio controls="controls"><source src="http://example.com" />Your browser doesn\\'t support <audio> HTML5 tag</audio></p>'
>>> render('[[this is a **link** http://example.com]]')
'<p><a href="http://example.com">this is a <strong>link</strong></a></p>'
>>> render("``aaa``:custom", extra=dict(custom=lambda text: 'x'+text+'x'))
'xaaax'
>>> print(render(r"$$\int_a^b sin(x)dx$$"))
<img src="http://chart.apis.google.com/chart?cht=tx&chl=%5Cint_a%5Eb%20sin%28x%29dx" />
>>> markmin2html(r"use backslash: \[\[[[mess\[[ag\]]e link]]\]]")
'<p>use backslash: [[<a href="link">mess[[ag]]e</a>]]</p>'
>>> markmin2html("backslash instead of exclamation sign: \``probe``")
'<p>backslash instead of exclamation sign: ``probe``</p>'
>>> render(r"simple image: [[\[[this is an image\]] http://example.com IMG]]!!!")
'<p>simple image: <img src="http://example.com" alt="[[this is an image]]" />!!!</p>'
>>> render(r"simple link no anchor with popup: [[ http://example.com popup]]")
'<p>simple link no anchor with popup: <a href="http://example.com" target="_blank">http://example.com</a></p>'
>>> render("auto-url: http://example.com")
'<p>auto-url: <a href="http://example.com">http://example.com</a></p>'
>>> render("auto-image: (http://example.com/image.jpeg)")
'<p>auto-image: (<img src="http://example.com/image.jpeg" controls />)</p>'
>>> render("qr: (qr:http://example.com/image.jpeg)")
'<p>qr: (<img style="width:100px" src="http://chart.apis.google.com/chart?cht=qr&chs=100x100&chl=http://example.com/image.jpeg&choe=UTF-8&chld=H" alt="QR Code" title="QR Code" />)</p>'
>>> render("embed: (embed:http://example.com/page)")
'<p>embed: (<iframe src="http://example.com/page" frameborder="0" allowfullscreen></iframe>)</p>'
>>> render("iframe: (iframe:http://example.com/page)")
'<p>iframe: (<iframe src="http://example.com/page" frameborder="0" allowfullscreen></iframe>)</p>'
>>> render("title1: [[test message [simple \[test\] title] http://example.com ]] test")
'<p>title1: <a href="http://example.com" title="simple [test] title">test message</a> test</p>'
>>> render("title2: \[\[[[test message [simple title] http://example.com popup]]\]]")
'<p>title2: [[<a href="http://example.com" title="simple title" target="_blank">test message</a>]]</p>'
>>> render("title3: [[ [link w/o anchor but with title] http://www.example.com ]]")
'<p>title3: <a href="http://www.example.com" title="link w/o anchor but with title">http://www.example.com</a></p>'
>>> render("title4: [[ [simple title] http://www.example.com popup]]")
'<p>title4: <a href="http://www.example.com" title="simple title" target="_blank">http://www.example.com</a></p>'
>>> render("title5: [[test message [simple title] http://example.com IMG]]")
'<p>title5: <img src="http://example.com" alt="test message" title="simple title" /></p>'
>>> render("title6: [[[test message w/o title] http://example.com IMG]]")
'<p>title6: <img src="http://example.com" alt="[test message w/o title]" /></p>'
>>> render("title7: [[[this is not a title] [this is a title] http://example.com IMG]]")
'<p>title7: <img src="http://example.com" alt="[this is not a title]" title="this is a title" /></p>'
>>> render("title8: [[test message [title] http://example.com center]]")
'<p>title8: <p style="text-align:center"><img src="http://example.com" alt="test message" title="title" /></p></p>'
>>> render("title9: [[test message [title] http://example.com left]]")
'<p>title9: <img src="http://example.com" alt="test message" title="title" style="float:left" /></p>'
>>> render("title10: [[test message [title] http://example.com right 100px]]")
'<p>title10: <img src="http://example.com" alt="test message" title="title" style="float:right;width:100px" /></p>'
>>> render("title11: [[test message [title] http://example.com center 200px]]")
'<p>title11: <p style="text-align:center"><img src="http://example.com" alt="test message" title="title" style="width:200px" /></p></p>'
>>> render(r"\\[[probe]]")
'<p>[[probe]]</p>'
>>> render(r"\\\\[[probe]]")
'<p>\\\\<span class="anchor" id="markmin_probe"></span></p>'
>>> render(r"\\\\\\[[probe]]")
'<p>\\\\[[probe]]</p>'
>>> render(r"\\\\\\\\[[probe]]")
'<p>\\\\\\\\<span class="anchor" id="markmin_probe"></span></p>'
>>> render(r"\\\\\\\\\[[probe]]")
'<p>\\\\\\\\[[probe]]</p>'
>>> render(r"\\\\\\\\\\\[[probe]]")
'<p>\\\\\\\\\\\\<span class="anchor" id="markmin_probe"></span></p>'
>>> render("``[[ [\\[[probe\]\\]] URL\\[x\\]]]``:red[dummy_params]")
'<span style="color: red"><a href="URL[x]" title="[[probe]]">URL[x]</a></span>'
>>> render("the \\**text**")
'<p>the **text**</p>'
>>> render("the \\``text``")
'<p>the ``text``</p>'
>>> render("the \\\\''text''")
"<p>the ''text''</p>"
>>> render("the [[link [**with** ``<b>title</b>``:red] http://www.example.com]]")
'<p>the <a href="http://www.example.com" title="**with** ``<b>title</b>``:red">link</a></p>'
>>> render("the [[link \\[**without** ``<b>title</b>``:red\\] http://www.example.com]]")
'<p>the <a href="http://www.example.com">link [<strong>without</strong> <span style="color: red"><b>title</b></span>]</a></p>'
>>> render("aaa-META-``code``:text[]-LINK-[[link http://www.example.com]]-LINK-[[image http://www.picture.com img]]-end")
'<p>aaa-META-<code class="text">code</code>-LINK-<a href="http://www.example.com">link</a>-LINK-<img src="http://www.picture.com" alt="image" />-end</p>'
>>> render("[[<a>test</a> [<a>test2</a>] <a>text3</a>]]")
'<p><a href="<a>text3</a>" title="<a>test2</a>"><a>test</a></a></p>'
>>> render("[[<a>test</a> [<a>test2</a>] <a>text3</a> IMG]]")
'<p><img src="<a>text3</a>" alt="<a>test</a>" title="<a>test2</a>" /></p>'
>>> render("**bold** ''italic'' ~~strikeout~~")
'<p><strong>bold</strong> <em>italic</em> <del>strikeout</del></p>'
>>> render("this is ``a red on yellow text``:c[#FF0000:#FFFF00]")
'<p>this is <span style="color: #FF0000;background-color: #FFFF00;">a red on yellow text</span></p>'
>>> render("this is ``a text with yellow background``:c[:yellow]")
'<p>this is <span style="background-color: yellow;">a text with yellow background</span></p>'
>>> render("this is ``a colored text (RoyalBlue)``:color[rgb(65,105,225)]")
'<p>this is <span style="color: rgb(65,105,225);">a colored text (RoyalBlue)</span></p>'
>>> render("this is ``a green text``:color[green:]")
'<p>this is <span style="color: green;">a green text</span></p>'
>>> render("**@{probe:1}**", environment=dict(probe=lambda t:"test %s" % t))
'<p><strong>test 1</strong></p>'
>>> render("**@{probe:t=a}**", environment=dict(probe=lambda t:"test %s" % t, a=1))
'<p><strong>test 1</strong></p>'
>>> render('[[id1 [span **messag** in ''markmin''] ]] ... [[**link** to id [link\\\'s title] #mark1]]')
'<p><span class="anchor" id="markmin_id1">span <strong>messag</strong> in markmin</span> ... <a href="#markmin_mark1" title="link\\\'s title"><strong>link</strong> to id</a></p>'
>>> render('# Multiline[[NEWLINE]]\\n title\\nParagraph[[NEWLINE]]\\nwith breaks[[NEWLINE]]\\nin it')
'<h1>Multiline<br /> title</h1><p>Paragraph<br /> with breaks<br /> in it</p>'
>>> render("anchor with name 'NEWLINE': [[NEWLINE [ ] ]]")
'<p>anchor with name \\'NEWLINE\\': <span class="anchor" id="markmin_NEWLINE"></span></p>'
>>> render("anchor with name 'NEWLINE': [[NEWLINE [newline] ]]")
'<p>anchor with name \\'NEWLINE\\': <span class="anchor" id="markmin_NEWLINE">newline</span></p>'
"""
if autolinks == "default":
autolinks = autolinks_simple
if protolinks == "default":
protolinks = protolinks_simple
pp = '\n' if pretty_print else ''
text = text if text is None or isinstance(text, str) else text.decode('utf8', 'strict')
if not (isinstance(text, str)):
text = str(text or '')
text = regex_backslash.sub(lambda m: m.group(1).translate(ttab_in), text)
text = text.replace('\x05', '').replace('\r\n', '\n') # concatenate strings separeted by \\n
if URL is not None:
text = replace_at_urls(text, URL)
if latex == 'google':
text = regex_dd.sub('``\g<latex>``:latex ', text)
#############################################################
# replace all blocks marked with ``...``:class[id] with META
# store them into segments they will be treated as code
#############################################################
segments = []
def mark_code(m):
g = m.group(0)
if g in (META, DISABLED_META):
segments.append((None, None, None, g))
return m.group()
elif g == '````':
segments.append((None, None, None, ''))
return m.group()
else:
c = m.group('c') or ''
p = m.group('p') or ''
if 'code' in allowed and c not in allowed['code']:
c = ''
code = m.group('t').replace('!`!', '`')
segments.append((code, c, p, m.group(0)))
return META
text = regex_code.sub(mark_code, text)
#############################################################
# replace all blocks marked with [[...]] with LINK
# store them into links they will be treated as link
#############################################################
links = []
def mark_link(m):
links.append(None if m.group() == LINK
else m.group('s'))
return LINK
text = regex_link.sub(mark_link, text)
text = local_html_escape(text)
if protolinks:
text = regex_proto.sub(lambda m: protolinks(*m.group('p', 'k')), text)
if autolinks:
text = replace_autolinks(text, autolinks)
#############################################################
# normalize spaces
#############################################################
strings = text.split('\n')
def parse_title(t, s): # out, lev, etags, tag, s):
hlevel = str(len(t))
out.extend(etags[::-1])
out.append("<h%s>%s" % (hlevel, s))
etags[:] = ["</h%s>%s" % (hlevel, pp)]
lev = 0
ltags[:] = []
tlev[:] = []
return (lev, 'h')
def parse_list(t, p, s, tag, lev, mtag, lineno):
lent = len(t)
if lent < lev: # current item level < previous item level
while ltags[-1] > lent:
ltags.pop()
out.append(etags.pop())
lev = lent
tlev[lev:] = []
if lent > lev: # current item level > previous item level
if lev == 0: # previous line is not a list (paragraph or title)
out.extend(etags[::-1])
ltags[:] = []
tlev[:] = []
etags[:] = []
if pend and mtag == '.': # paragraph in a list:
out.append(etags.pop())
ltags.pop()
for i in range(lent - lev):
out.append('<' + tag + '>' + pp)
etags.append('</' + tag + '>' + pp)
lev += 1
ltags.append(lev)
tlev.append(tag)
elif lent == lev:
if tlev[-1] != tag:
# type of list is changed (ul<=>ol):
for i in range(ltags.count(lent)):
ltags.pop()
out.append(etags.pop())
tlev[-1] = tag
out.append('<' + tag + '>' + pp)
etags.append('</' + tag + '>' + pp)
ltags.append(lev)
else:
if ltags.count(lev) > 1:
out.append(etags.pop())
ltags.pop()
mtag = 'l'
out.append('<li>')
etags.append('</li>' + pp)
ltags.append(lev)
if s[:1] == '-':
(s, mtag, lineno) = parse_table_or_blockquote(s, mtag, lineno)
if p and mtag == 'l':
(lev, mtag, lineno) = parse_point(t, s, lev, '', lineno)
else:
out.append(s)
return (lev, mtag, lineno)
def parse_point(t, s, lev, mtag, lineno):
""" paragraphs in lists """
lent = len(t)
if lent > lev:
return parse_list(t, '.', s, 'ul', lev, mtag, lineno)
elif lent < lev:
while ltags[-1] > lent:
ltags.pop()
out.append(etags.pop())
lev = lent
tlev[lev:] = []
mtag = ''
elif lent == lev:
if pend and mtag == '.':
out.append(etags.pop())
ltags.pop()
if br and mtag in ('l', '.'):
out.append(br)
if s == META:
mtag = ''
else:
mtag = '.'
if s[:1] == '-':
(s, mtag, lineno) = parse_table_or_blockquote(s, mtag, lineno)
if mtag == '.':
out.append(pbeg)
if pend:
etags.append(pend)
ltags.append(lev)
out.append(s)
return (lev, mtag, lineno)
def parse_table_or_blockquote(s, mtag, lineno):
# check next line. If next line :
# - is empty -> this is an <hr /> tag
# - consists '|' -> table
# - consists other characters -> blockquote
if (lineno + 1 >= strings_len or
not (s.count('-') == len(s) and len(s) > 3)):
return (s, mtag, lineno)
lineno += 1
s = strings[lineno].strip()
if s:
if '|' in s:
# table
tout = []
thead = []
tbody = []
rownum = 0
t_id = ''
t_cls = ''
# parse table:
while lineno < strings_len:
s = strings[lineno].strip()
if s[:1] == '=':
# header or footer
if s.count('=') == len(s) and len(s) > 3:
if not thead: # if thead list is empty:
thead = tout
else:
tbody.extend(tout)
tout = []
rownum = 0
lineno += 1
continue
m = regex_tq.match(s)
if m:
t_cls = m.group('c') or ''
t_id = m.group('p') or ''
break
if rownum % 2:
tr = '<tr class="even">'
else:
tr = '<tr class="first">' if rownum == 0 else '<tr>'
tout.append(tr + ''.join(['<td%s>%s</td>' % (
' class="num"'
if regex_num.match(f) else '',
f.strip()
) for f in s.split('|')]) + '</tr>' + pp)
rownum += 1
lineno += 1
t_cls = ' class="%s%s"' % (class_prefix, t_cls) \
if t_cls and t_cls != 'id' else ''
t_id = ' id="%s%s"' % (id_prefix, t_id) if t_id else ''
s = ''
if thead:
s += '<thead>' + pp + ''.join([l for l in thead]) + '</thead>' + pp
if not tbody: # tbody strings are in tout list
tbody = tout
tout = []
if tbody: # if tbody list is not empty:
s += '<tbody>' + pp + ''.join([l for l in tbody]) + '</tbody>' + pp
if tout: # tfoot is not empty:
s += '<tfoot>' + pp + ''.join([l for l in tout]) + '</tfoot>' + pp
s = '<table%s%s>%s%s</table>%s' % (t_cls, t_id, pp, s, pp)
mtag = 't'
else:
# parse blockquote:
bq_begin = lineno
t_mode = False # embedded table
t_cls = ''
t_id = ''
# search blockquote closing line:
while lineno < strings_len:
s = strings[lineno].strip()
if not t_mode:
m = regex_tq.match(s)
if m:
if (lineno + 1 == strings_len or
'|' not in strings[lineno + 1]):
t_cls = m.group('c') or ''
t_id = m.group('p') or ''
break
if regex_bq_headline.match(s):
if (lineno + 1 < strings_len and
strings[lineno + 1].strip()):
t_mode = True
lineno += 1
continue
elif regex_tq.match(s):
t_mode = False
lineno += 1
continue
lineno += 1
t_cls = ' class="%s%s"' % (class_prefix, t_cls) \
if t_cls and t_cls != 'id' else ''
t_id = ' id="%s%s"' % (id_prefix, t_id) \
if t_id else ''
s = '<blockquote%s%s>%s</blockquote>%s' \
% (t_cls,
t_id,
render('\n'.join(strings[bq_begin:lineno])), pp)
mtag = 'q'
else:
s = '<hr />'
lineno -= 1
mtag = 'q'
return (s, 'q', lineno)
if sep == 'p':
pbeg = "<p>"
pend = "</p>" + pp
br = ''
else:
pbeg = pend = ''
br = "<br />" + pp if sep == 'br' else ''
lev = 0 # nesting level of lists
c0 = '' # first character of current line
out = [] # list of processed lines
etags = [] # trailing tags
ltags = [] # level# correspondent to trailing tag
tlev = [] # list of tags for each level ('ul' or 'ol')
mtag = '' # marked tag (~last tag) ('l','.','h','p','t'). Used to set <br/>
# and to avoid <p></p> around tables and blockquotes
lineno = 0
strings_len = len(strings)
while lineno < strings_len:
s0 = strings[lineno][:1]
s = strings[lineno].strip()
""" # + - . ---------------------
## ++ -- .. ------- field | field | field <-title
### +++ --- ... quote =====================
#### ++++ ---- .... ------- field | field | field <-body
##### +++++ ----- ..... ---------------------:class[id]
"""
pc0 = c0 # first character of previous line
c0 = s[:1]
if c0: # for non empty strings
if c0 in "#+-.": # first character is one of: # + - .
(t1, t2, p, ss) = regex_list.findall(s)[0]
# t1 - tag ("###")
# t2 - tag ("+++", "---", "...")
# p - paragraph point ('.')->for "++." or "--."
# ss - other part of string
if t1 or t2:
# headers and lists:
if c0 == '#': # headers
(lev, mtag) = parse_title(t1, ss)
lineno += 1
continue
elif c0 == '+': # ordered list
(lev, mtag, lineno) = parse_list(t2, p, ss, 'ol', lev, mtag, lineno)
lineno += 1
continue
elif c0 == '-': # unordered list, table or blockquote
if p or ss:
(lev, mtag, lineno) = parse_list(t2, p, ss, 'ul', lev, mtag, lineno)
lineno += 1
continue
else:
(s, mtag, lineno) = parse_table_or_blockquote(s, mtag, lineno)
elif lev > 0: # and c0 == '.' # paragraph in lists
(lev, mtag, lineno) = parse_point(t2, ss, lev, mtag, lineno)
lineno += 1
continue
if lev == 0 and (mtag == 'q' or s == META):
# new paragraph
pc0 = ''
if pc0 == '' or (mtag != 'p' and s0 not in (' ', '\t')):
# paragraph
out.extend(etags[::-1])
etags = []
ltags = []
tlev = []
lev = 0
if br and mtag == 'p':
out.append(br)
if mtag != 'q' and s != META:
if pend:
etags = [pend]
out.append(pbeg)
mtag = 'p'
else:
mtag = ''
out.append(s)
else:
if lev > 0 and mtag == '.' and s == META:
out.append(etags.pop())
ltags.pop()
out.append(s)
mtag = ''
else:
out.append(' ' + s)
lineno += 1
out.extend(etags[::-1])
text = ''.join(out)
#############################################################
# do strong,em,del
#############################################################
text = regex_strong.sub('<strong>\g<t></strong>', text)
text = regex_del.sub('<del>\g<t></del>', text)
text = regex_em.sub('<em>\g<t></em>', text)
#############################################################
# deal with images, videos, audios and links
#############################################################
def sub_media(m):
t, a, k, p, w = m.group('t', 'a', 'k', 'p', 'w')
if not k:
return m.group(0)
k = local_html_escape(k)
t = t or ''
style = 'width:%s' % w if w else ''
title = ' title="%s"' % local_html_escape(a).replace(META, DISABLED_META) if a else ''
p_begin = p_end = ''
if p == 'center':
p_begin = '<p style="text-align:center">'
p_end = '</p>' + pp
elif p == 'blockleft':
p_begin = '<p style="text-align:left">'
p_end = '</p>' + pp
elif p == 'blockright':
p_begin = '<p style="text-align:right">'
p_end = '</p>' + pp
elif p in ('left', 'right'):
style = ('float:%s' % p) + (';%s' % style if style else '')
if t and regex_auto.match(t):
p_begin = p_begin + '<a href="%s">' % t
p_end = '</a>' + p_end
t = ''
if style:
style = ' style="%s"' % style
if p in ('video', 'audio'):
t = render(t, {}, {}, 'br', URL, environment, latex,
autolinks, protolinks, class_prefix, id_prefix, pretty_print)
return '<%(p)s controls="controls"%(title)s%(style)s><source src="%(k)s" />%(t)s</%(p)s>' \
% dict(p=p, title=title, style=style, k=k, t=t)
alt = ' alt="%s"' % local_html_escape(t).replace(META, DISABLED_META) if t else ''
return '%(begin)s<img src="%(k)s"%(alt)s%(title)s%(style)s />%(end)s' \
% dict(begin=p_begin, k=k, alt=alt, title=title, style=style, end=p_end)
def sub_link(m):
t, a, k, p = m.group('t', 'a', 'k', 'p')
if not k and not t:
return m.group(0)
t = t or ''
a = local_html_escape(a) if a else ''
if k:
if '#' in k and ':' not in k.split('#')[0]:
# wikipage, not external url
k = k.replace('#', '#' + id_prefix)
k = local_html_escape(k)
title = ' title="%s"' % a.replace(META, DISABLED_META) if a else ''
target = ' target="_blank"' if p == 'popup' else ''
t = render(t, {}, {}, 'br', URL, environment, latex, None,
None, class_prefix, id_prefix, pretty_print) if t else k
return '<a href="%(k)s"%(title)s%(target)s>%(t)s</a>' \
% dict(k=k, title=title, target=target, t=t)
if t == 'NEWLINE' and not a:
return '<br />' + pp
return '<span class="anchor" id="%s">%s</span>' % (
local_html_escape(id_prefix + t),
render(a, {}, {}, 'br', URL,
environment, latex, autolinks,
protolinks, class_prefix,
id_prefix, pretty_print))
parts = text.split(LINK)
text = parts[0]
for i, s in enumerate(links):
if s is None:
html = LINK
else:
html = regex_media_level2.sub(sub_media, s)
if html == s:
html = regex_link_level2.sub(sub_link, html)
if html == s:
# return unprocessed string as a signal of an error
html = '[[%s]]' % s
text += html + parts[i + 1]
#############################################################
# process all code text
#############################################################
def expand_meta(m):
code, b, p, s = segments.pop(0)
if code is None or m.group() == DISABLED_META:
return local_html_escape(s)
if b in extra:
if code[:1] == '\n':
code = code[1:]
if code[-1:] == '\n':
code = code[:-1]
if p:
return str(extra[b](code, p))
else:
return str(extra[b](code))
elif b == 'cite':
return '[' + ','.join('<a href="#%s" class="%s">%s</a>' %
(id_prefix + d, b, d) for d in local_html_escape(code).split(',')) + ']'
elif b == 'latex':
return LATEX % urllib_quote(code)
elif b in html_colors:
return '<span style="color: %s">%s</span>' \
% (b, render(code, {}, {}, 'br', URL, environment, latex,
autolinks, protolinks, class_prefix, id_prefix, pretty_print))
elif b in ('c', 'color') and p:
c = p.split(':')
fg = 'color: %s;' % c[0] if c[0] else ''
bg = 'background-color: %s;' % c[1] if len(c) > 1 and c[1] else ''
return '<span style="%s%s">%s</span>' \
% (fg, bg, render(code, {}, {}, 'br', URL, environment, latex,
autolinks, protolinks, class_prefix, id_prefix, pretty_print))
cls = ' class="%s%s"' % (class_prefix, b) if b and b != 'id' else ''
id = ' id="%s%s"' % (id_prefix, local_html_escape(p)) if p else ''
beg = (code[:1] == '\n')
end = [None, -1][code[-1:] == '\n']
if beg and end:
return '<pre><code%s%s>%s</code></pre>%s' % (cls, id, local_html_escape(code[1:-1]), pp)
return '<code%s%s>%s</code>' % (cls, id, local_html_escape(code[beg:end]))
text = regex_expand_meta.sub(expand_meta, text)
if environment:
text = replace_components(text, environment)
return text.translate(ttab_out)
def markmin2html(text, extra={}, allowed={}, sep='p',
autolinks='default', protolinks='default',
class_prefix='', id_prefix='markmin_', pretty_print=False):
return render(text, extra, allowed, sep,
autolinks=autolinks, protolinks=protolinks,
class_prefix=class_prefix, id_prefix=id_prefix,
pretty_print=pretty_print)
def run_doctests():
import doctest
doctest.testmod()
if __name__ == '__main__':
import sys
import doctest
from textwrap import dedent
html = dedent("""
<!doctype html>
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
<head>
<meta http-equiv="content-type" content="text/html; charset=utf-8" />
%(style)s
<title>%(title)s</title>
</head>
<body>
%(body)s
</body>
</html>""")[1:]
if sys.argv[1:2] == ['-h']:
style = dedent("""
<style>
blockquote { background-color: #FFFAAE; padding: 7px; }
table { border-collapse: collapse; }
thead td { border-bottom: 1px solid; }
tfoot td { border-top: 1px solid; }
.tableclass1 { background-color: lime; }
.tableclass1 thead { color: yellow; background-color: green; }
.tableclass1 tfoot { color: yellow; background-color: green; }
.tableclass1 .even td { background-color: #80FF7F; }
.tableclass1 .first td {border-top: 1px solid; }
td.num { text-align: right; }
pre { background-color: #E0E0E0; padding: 5px; }
</style>""")[1:]
print(html % dict(title="Markmin markup language",
style=style,
body=markmin2html(__doc__, pretty_print=True)))
elif sys.argv[1:2] == ['-t']:
from timeit import Timer
loops = 1000
ts = Timer("markmin2html(__doc__)", "from markmin2html import markmin2html")
print('timeit "markmin2html(__doc__)":')
t = min([ts.timeit(loops) for i in range(3)])
print("%s loops, best of 3: %.3f ms per loop" % (loops, t / 1000 * loops))
elif len(sys.argv) > 1:
fargv = open(sys.argv[1], 'r')
try:
markmin_text = fargv.read()
# embed css file from second parameter into html file
if len(sys.argv) > 2:
if sys.argv[2].startswith('@'):
markmin_style = '<link rel="stylesheet" href="' + sys.argv[2][1:] + '"/>'
else:
fargv2 = open(sys.argv[2], 'r')
try:
markmin_style = "<style>\n" + fargv2.read() + "</style>"
finally:
fargv2.close()
else:
markmin_style = ""
print(html % dict(title=sys.argv[1], style=markmin_style,
body=markmin2html(markmin_text, pretty_print=True)))
finally:
fargv.close()
else:
print("Usage: " + sys.argv[0] + " -h | -t | file.markmin [file.css|@path_to/css]")
print("where: -h - print __doc__")
print(" -t - timeit __doc__ (for testing purpuse only)")
print(" file.markmin [file.css] - process file.markmin + built in file.css (optional)")
print(" file.markmin [@path_to/css] - process file.markmin + link path_to/css (optional)")
run_doctests()
|
pawitp/android_kernel_samsung_i9082 | refs/heads/cm-12.0 | tools/perf/python/twatch.py | 3213 | #! /usr/bin/python
# -*- python -*-
# -*- coding: utf-8 -*-
# twatch - Experimental use of the perf python interface
# Copyright (C) 2011 Arnaldo Carvalho de Melo <acme@redhat.com>
#
# This application is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2.
#
# This application is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
import perf
def main():
cpus = perf.cpu_map()
threads = perf.thread_map()
evsel = perf.evsel(task = 1, comm = 1, mmap = 0,
wakeup_events = 1, sample_period = 1,
sample_id_all = 1,
sample_type = perf.SAMPLE_PERIOD | perf.SAMPLE_TID | perf.SAMPLE_CPU | perf.SAMPLE_TID)
evsel.open(cpus = cpus, threads = threads);
evlist = perf.evlist(cpus, threads)
evlist.add(evsel)
evlist.mmap()
while True:
evlist.poll(timeout = -1)
for cpu in cpus:
event = evlist.read_on_cpu(cpu)
if not event:
continue
print "cpu: %2d, pid: %4d, tid: %4d" % (event.sample_cpu,
event.sample_pid,
event.sample_tid),
print event
if __name__ == '__main__':
main()
|
joakim-hove/django | refs/heads/master | tests/migrations/migrations_test_apps/alter_fk/author_app/migrations/0002_alter_id.py | 379 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('author_app', '0001_initial'),
('book_app', '0001_initial'), # Forces the book table to alter the FK
]
operations = [
migrations.AlterField(
model_name='author',
name='id',
field=models.CharField(max_length=10, primary_key=True),
),
]
|
pyneng/pyneng.github.io | refs/heads/master | code_examples/pytest/check_ip_functions.py | 1 | import ipaddress
def check_ip(ip):
#return False
try:
ipaddress.ip_address(ip)
return True
except ValueError as err:
return False
if __name__ == "__main__":
result = check_ip('10.1.1.1')
print('Function result:', result)
|
nevil-brownlee/python-libtrace | refs/heads/master | test/v3-test-cases/test-plt.py | 2 | #!/usr/bin/env python
# Thu, 13 Mar 14 (PDT)
# plt-test.py: Test packet-level attributes
# Copyright (C) 2017, Nevil Brownlee, U Auckland | WAND
import socket
import sys
from plt_testing import *
#dt = datetime.datetime.utcnow() # class
#print "***** dt = {0}".format(dt)
#t = plt.ipp_obj(4, "1234")
#print "t = {0}\n".format(t)
v = plt.version()
test_println("plt version = {0}".format(v), get_tag())
#d = plt.Data(3)
#print "d={0}\n".format(d)
t = get_example_trace('anon-v4.pcap')
p = plt.packet()
np = 0
while np != 12:
t.read_packet(p)
# print "np=%d, %s" % (np, p)
test_println("np=%d" % (np), get_tag())
np += 1
ip = p.ip
if ip:
# print "%3d, ip = <%s>" % (np, ip)
test_println("%3d" % (np), get_tag())
test_println(" wlen=%d, caplen=%d, src=%s, dst=%s" % (
p.wire_len, p.capture_len, ip.src_prefix, ip.dst_prefix), get_tag("np:"+str(np)))
n = 0
for pkt in t:
n += 1 # Wireshark uses 1-org packet numbers
test_println("n=%d" % (n), get_tag())
linktype = pkt.linktype
ethertype = pkt.ethertype
test_println("n=%d, linktype=%d, ethertype=%04x " % (n, linktype, ethertype), get_tag("n:"+str(n)))
if n == 5:
break
ip = pkt.ip
if not ip:
continue
pt = pkt.time
test_println("time = >{0}<".format(pt), get_tag("n:"+str(n)))
# pkt.time = 20.5
# print "seconds=%f, ts_sec=%u, erf_time=%llu" % (pkt.seconds, pkt.ts_sec, pkt.erf_time)
test_println("seconds={0}, ts_sec={1}, erf_time={2}".format(pkt.seconds, pkt.ts_sec, pkt.erf_time), get_tag("n:"+str(n)))
wlen = pkt.wire_len; clen = pkt.capture_len
test_println("n=%d, wlen=%d, clen=%d" % (n, wlen, clen), get_tag("n:"+str(n)))
# print "***** ip={0}\n" . format(ip)
test_println(" ver=%d, %s -> %s, proto=%d, tclass=%d, ttl=%d, hlen=%d, plen=%d" % (
ip.version, ip.src_prefix, ip.dst_prefix,
ip.proto, ip.traffic_class, ip.hop_limit, ip.hdr_len, ip.pkt_len), get_tag("n:"+str(n)))
# io = 55
# print "io = {0}".format(io)
ip.traffic_class = 55;
ip.hop_limit = (123);
test_println("== ver=%d, %s -> %s, proto=%d, tclass=%d, ttl=%d, hlen=%d, plen=%d" % (
ip.version, ip.src_prefix, ip.dst_prefix,
ip.proto, ip.traffic_class, ip.hop_limit, ip.hdr_len, ip.pkt_len), get_tag("n:"+str(n)))
ethertype = ip.ethertype
linktype = ip.linktype
test_println(" from ip: linktype=%d, ethertype=%04x" % (linktype, ethertype), get_tag("n:"+str(n)))
# print "ip.info() = [%s]" % (ip.info())
test_println("%s -> %s " % (ip.src_prefix, ip.dst_prefix), get_tag("n:"+str(n)))
ip.src_prefix = ipp.from_s("1.2.3.4")
ip.dst_prefix = ipp.from_s("5.6.7.8")
test_println("now %s => %s " % (ip.src_prefix, ip.dst_prefix), get_tag("n:"+str(n)))
# ip.version = 5 # Read-only!
t.close() # Don't do this inside the loop!
test_println("%d packets in trace\n" % (n), get_tag())
|
aaron-spear/style-guide | refs/heads/gh-pages | node_modules/npm-shrinkwrap/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/MSVSSettings_test.py | 778 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for the MSVSSettings.py file."""
import StringIO
import unittest
import gyp.MSVSSettings as MSVSSettings
class TestSequenceFunctions(unittest.TestCase):
def setUp(self):
self.stderr = StringIO.StringIO()
def _ExpectedWarnings(self, expected):
"""Compares recorded lines to expected warnings."""
self.stderr.seek(0)
actual = self.stderr.read().split('\n')
actual = [line for line in actual if line]
self.assertEqual(sorted(expected), sorted(actual))
def testValidateMSVSSettings_tool_names(self):
"""Tests that only MSVS tool names are allowed."""
MSVSSettings.ValidateMSVSSettings(
{'VCCLCompilerTool': {},
'VCLinkerTool': {},
'VCMIDLTool': {},
'foo': {},
'VCResourceCompilerTool': {},
'VCLibrarianTool': {},
'VCManifestTool': {},
'ClCompile': {}},
self.stderr)
self._ExpectedWarnings([
'Warning: unrecognized tool foo',
'Warning: unrecognized tool ClCompile'])
def testValidateMSVSSettings_settings(self):
"""Tests that for invalid MSVS settings."""
MSVSSettings.ValidateMSVSSettings(
{'VCCLCompilerTool': {
'AdditionalIncludeDirectories': 'folder1;folder2',
'AdditionalOptions': ['string1', 'string2'],
'AdditionalUsingDirectories': 'folder1;folder2',
'AssemblerListingLocation': 'a_file_name',
'AssemblerOutput': '0',
'BasicRuntimeChecks': '5',
'BrowseInformation': 'fdkslj',
'BrowseInformationFile': 'a_file_name',
'BufferSecurityCheck': 'true',
'CallingConvention': '-1',
'CompileAs': '1',
'DebugInformationFormat': '2',
'DefaultCharIsUnsigned': 'true',
'Detect64BitPortabilityProblems': 'true',
'DisableLanguageExtensions': 'true',
'DisableSpecificWarnings': 'string1;string2',
'EnableEnhancedInstructionSet': '1',
'EnableFiberSafeOptimizations': 'true',
'EnableFunctionLevelLinking': 'true',
'EnableIntrinsicFunctions': 'true',
'EnablePREfast': 'true',
'Enableprefast': 'bogus',
'ErrorReporting': '1',
'ExceptionHandling': '1',
'ExpandAttributedSource': 'true',
'FavorSizeOrSpeed': '1',
'FloatingPointExceptions': 'true',
'FloatingPointModel': '1',
'ForceConformanceInForLoopScope': 'true',
'ForcedIncludeFiles': 'file1;file2',
'ForcedUsingFiles': 'file1;file2',
'GeneratePreprocessedFile': '1',
'GenerateXMLDocumentationFiles': 'true',
'IgnoreStandardIncludePath': 'true',
'InlineFunctionExpansion': '1',
'KeepComments': 'true',
'MinimalRebuild': 'true',
'ObjectFile': 'a_file_name',
'OmitDefaultLibName': 'true',
'OmitFramePointers': 'true',
'OpenMP': 'true',
'Optimization': '1',
'PrecompiledHeaderFile': 'a_file_name',
'PrecompiledHeaderThrough': 'a_file_name',
'PreprocessorDefinitions': 'string1;string2',
'ProgramDataBaseFileName': 'a_file_name',
'RuntimeLibrary': '1',
'RuntimeTypeInfo': 'true',
'ShowIncludes': 'true',
'SmallerTypeCheck': 'true',
'StringPooling': 'true',
'StructMemberAlignment': '1',
'SuppressStartupBanner': 'true',
'TreatWChar_tAsBuiltInType': 'true',
'UndefineAllPreprocessorDefinitions': 'true',
'UndefinePreprocessorDefinitions': 'string1;string2',
'UseFullPaths': 'true',
'UsePrecompiledHeader': '1',
'UseUnicodeResponseFiles': 'true',
'WarnAsError': 'true',
'WarningLevel': '1',
'WholeProgramOptimization': 'true',
'XMLDocumentationFileName': 'a_file_name',
'ZZXYZ': 'bogus'},
'VCLinkerTool': {
'AdditionalDependencies': 'file1;file2',
'AdditionalLibraryDirectories': 'folder1;folder2',
'AdditionalManifestDependencies': 'file1;file2',
'AdditionalOptions': 'a string1',
'AddModuleNamesToAssembly': 'file1;file2',
'AllowIsolation': 'true',
'AssemblyDebug': '2',
'AssemblyLinkResource': 'file1;file2',
'BaseAddress': 'a string1',
'CLRImageType': '2',
'CLRThreadAttribute': '2',
'CLRUnmanagedCodeCheck': 'true',
'DataExecutionPrevention': '2',
'DelayLoadDLLs': 'file1;file2',
'DelaySign': 'true',
'Driver': '2',
'EmbedManagedResourceFile': 'file1;file2',
'EnableCOMDATFolding': '2',
'EnableUAC': 'true',
'EntryPointSymbol': 'a string1',
'ErrorReporting': '2',
'FixedBaseAddress': '2',
'ForceSymbolReferences': 'file1;file2',
'FunctionOrder': 'a_file_name',
'GenerateDebugInformation': 'true',
'GenerateManifest': 'true',
'GenerateMapFile': 'true',
'HeapCommitSize': 'a string1',
'HeapReserveSize': 'a string1',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreDefaultLibraryNames': 'file1;file2',
'IgnoreEmbeddedIDL': 'true',
'IgnoreImportLibrary': 'true',
'ImportLibrary': 'a_file_name',
'KeyContainer': 'a_file_name',
'KeyFile': 'a_file_name',
'LargeAddressAware': '2',
'LinkIncremental': '2',
'LinkLibraryDependencies': 'true',
'LinkTimeCodeGeneration': '2',
'ManifestFile': 'a_file_name',
'MapExports': 'true',
'MapFileName': 'a_file_name',
'MergedIDLBaseFileName': 'a_file_name',
'MergeSections': 'a string1',
'MidlCommandFile': 'a_file_name',
'ModuleDefinitionFile': 'a_file_name',
'OptimizeForWindows98': '1',
'OptimizeReferences': '2',
'OutputFile': 'a_file_name',
'PerUserRedirection': 'true',
'Profile': 'true',
'ProfileGuidedDatabase': 'a_file_name',
'ProgramDatabaseFile': 'a_file_name',
'RandomizedBaseAddress': '2',
'RegisterOutput': 'true',
'ResourceOnlyDLL': 'true',
'SetChecksum': 'true',
'ShowProgress': '2',
'StackCommitSize': 'a string1',
'StackReserveSize': 'a string1',
'StripPrivateSymbols': 'a_file_name',
'SubSystem': '2',
'SupportUnloadOfDelayLoadedDLL': 'true',
'SuppressStartupBanner': 'true',
'SwapRunFromCD': 'true',
'SwapRunFromNet': 'true',
'TargetMachine': '2',
'TerminalServerAware': '2',
'TurnOffAssemblyGeneration': 'true',
'TypeLibraryFile': 'a_file_name',
'TypeLibraryResourceID': '33',
'UACExecutionLevel': '2',
'UACUIAccess': 'true',
'UseLibraryDependencyInputs': 'true',
'UseUnicodeResponseFiles': 'true',
'Version': 'a string1'},
'VCMIDLTool': {
'AdditionalIncludeDirectories': 'folder1;folder2',
'AdditionalOptions': 'a string1',
'CPreprocessOptions': 'a string1',
'DefaultCharType': '1',
'DLLDataFileName': 'a_file_name',
'EnableErrorChecks': '1',
'ErrorCheckAllocations': 'true',
'ErrorCheckBounds': 'true',
'ErrorCheckEnumRange': 'true',
'ErrorCheckRefPointers': 'true',
'ErrorCheckStubData': 'true',
'GenerateStublessProxies': 'true',
'GenerateTypeLibrary': 'true',
'HeaderFileName': 'a_file_name',
'IgnoreStandardIncludePath': 'true',
'InterfaceIdentifierFileName': 'a_file_name',
'MkTypLibCompatible': 'true',
'notgood': 'bogus',
'OutputDirectory': 'a string1',
'PreprocessorDefinitions': 'string1;string2',
'ProxyFileName': 'a_file_name',
'RedirectOutputAndErrors': 'a_file_name',
'StructMemberAlignment': '1',
'SuppressStartupBanner': 'true',
'TargetEnvironment': '1',
'TypeLibraryName': 'a_file_name',
'UndefinePreprocessorDefinitions': 'string1;string2',
'ValidateParameters': 'true',
'WarnAsError': 'true',
'WarningLevel': '1'},
'VCResourceCompilerTool': {
'AdditionalOptions': 'a string1',
'AdditionalIncludeDirectories': 'folder1;folder2',
'Culture': '1003',
'IgnoreStandardIncludePath': 'true',
'notgood2': 'bogus',
'PreprocessorDefinitions': 'string1;string2',
'ResourceOutputFileName': 'a string1',
'ShowProgress': 'true',
'SuppressStartupBanner': 'true',
'UndefinePreprocessorDefinitions': 'string1;string2'},
'VCLibrarianTool': {
'AdditionalDependencies': 'file1;file2',
'AdditionalLibraryDirectories': 'folder1;folder2',
'AdditionalOptions': 'a string1',
'ExportNamedFunctions': 'string1;string2',
'ForceSymbolReferences': 'a string1',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreSpecificDefaultLibraries': 'file1;file2',
'LinkLibraryDependencies': 'true',
'ModuleDefinitionFile': 'a_file_name',
'OutputFile': 'a_file_name',
'SuppressStartupBanner': 'true',
'UseUnicodeResponseFiles': 'true'},
'VCManifestTool': {
'AdditionalManifestFiles': 'file1;file2',
'AdditionalOptions': 'a string1',
'AssemblyIdentity': 'a string1',
'ComponentFileName': 'a_file_name',
'DependencyInformationFile': 'a_file_name',
'GenerateCatalogFiles': 'true',
'InputResourceManifests': 'a string1',
'ManifestResourceFile': 'a_file_name',
'OutputManifestFile': 'a_file_name',
'RegistrarScriptFile': 'a_file_name',
'ReplacementsFile': 'a_file_name',
'SuppressStartupBanner': 'true',
'TypeLibraryFile': 'a_file_name',
'UpdateFileHashes': 'truel',
'UpdateFileHashesSearchPath': 'a_file_name',
'UseFAT32Workaround': 'true',
'UseUnicodeResponseFiles': 'true',
'VerboseOutput': 'true'}},
self.stderr)
self._ExpectedWarnings([
'Warning: for VCCLCompilerTool/BasicRuntimeChecks, '
'index value (5) not in expected range [0, 4)',
'Warning: for VCCLCompilerTool/BrowseInformation, '
"invalid literal for int() with base 10: 'fdkslj'",
'Warning: for VCCLCompilerTool/CallingConvention, '
'index value (-1) not in expected range [0, 3)',
'Warning: for VCCLCompilerTool/DebugInformationFormat, '
'converted value for 2 not specified.',
'Warning: unrecognized setting VCCLCompilerTool/Enableprefast',
'Warning: unrecognized setting VCCLCompilerTool/ZZXYZ',
'Warning: for VCLinkerTool/TargetMachine, '
'converted value for 2 not specified.',
'Warning: unrecognized setting VCMIDLTool/notgood',
'Warning: unrecognized setting VCResourceCompilerTool/notgood2',
'Warning: for VCManifestTool/UpdateFileHashes, '
"expected bool; got 'truel'"
''])
def testValidateMSBuildSettings_settings(self):
"""Tests that for invalid MSBuild settings."""
MSVSSettings.ValidateMSBuildSettings(
{'ClCompile': {
'AdditionalIncludeDirectories': 'folder1;folder2',
'AdditionalOptions': ['string1', 'string2'],
'AdditionalUsingDirectories': 'folder1;folder2',
'AssemblerListingLocation': 'a_file_name',
'AssemblerOutput': 'NoListing',
'BasicRuntimeChecks': 'StackFrameRuntimeCheck',
'BrowseInformation': 'false',
'BrowseInformationFile': 'a_file_name',
'BufferSecurityCheck': 'true',
'BuildingInIDE': 'true',
'CallingConvention': 'Cdecl',
'CompileAs': 'CompileAsC',
'CompileAsManaged': 'Pure',
'CreateHotpatchableImage': 'true',
'DebugInformationFormat': 'ProgramDatabase',
'DisableLanguageExtensions': 'true',
'DisableSpecificWarnings': 'string1;string2',
'EnableEnhancedInstructionSet': 'StreamingSIMDExtensions',
'EnableFiberSafeOptimizations': 'true',
'EnablePREfast': 'true',
'Enableprefast': 'bogus',
'ErrorReporting': 'Prompt',
'ExceptionHandling': 'SyncCThrow',
'ExpandAttributedSource': 'true',
'FavorSizeOrSpeed': 'Neither',
'FloatingPointExceptions': 'true',
'FloatingPointModel': 'Precise',
'ForceConformanceInForLoopScope': 'true',
'ForcedIncludeFiles': 'file1;file2',
'ForcedUsingFiles': 'file1;file2',
'FunctionLevelLinking': 'false',
'GenerateXMLDocumentationFiles': 'true',
'IgnoreStandardIncludePath': 'true',
'InlineFunctionExpansion': 'OnlyExplicitInline',
'IntrinsicFunctions': 'false',
'MinimalRebuild': 'true',
'MultiProcessorCompilation': 'true',
'ObjectFileName': 'a_file_name',
'OmitDefaultLibName': 'true',
'OmitFramePointers': 'true',
'OpenMPSupport': 'true',
'Optimization': 'Disabled',
'PrecompiledHeader': 'NotUsing',
'PrecompiledHeaderFile': 'a_file_name',
'PrecompiledHeaderOutputFile': 'a_file_name',
'PreprocessKeepComments': 'true',
'PreprocessorDefinitions': 'string1;string2',
'PreprocessOutputPath': 'a string1',
'PreprocessSuppressLineNumbers': 'false',
'PreprocessToFile': 'false',
'ProcessorNumber': '33',
'ProgramDataBaseFileName': 'a_file_name',
'RuntimeLibrary': 'MultiThreaded',
'RuntimeTypeInfo': 'true',
'ShowIncludes': 'true',
'SmallerTypeCheck': 'true',
'StringPooling': 'true',
'StructMemberAlignment': '1Byte',
'SuppressStartupBanner': 'true',
'TrackerLogDirectory': 'a_folder',
'TreatSpecificWarningsAsErrors': 'string1;string2',
'TreatWarningAsError': 'true',
'TreatWChar_tAsBuiltInType': 'true',
'UndefineAllPreprocessorDefinitions': 'true',
'UndefinePreprocessorDefinitions': 'string1;string2',
'UseFullPaths': 'true',
'UseUnicodeForAssemblerListing': 'true',
'WarningLevel': 'TurnOffAllWarnings',
'WholeProgramOptimization': 'true',
'XMLDocumentationFileName': 'a_file_name',
'ZZXYZ': 'bogus'},
'Link': {
'AdditionalDependencies': 'file1;file2',
'AdditionalLibraryDirectories': 'folder1;folder2',
'AdditionalManifestDependencies': 'file1;file2',
'AdditionalOptions': 'a string1',
'AddModuleNamesToAssembly': 'file1;file2',
'AllowIsolation': 'true',
'AssemblyDebug': '',
'AssemblyLinkResource': 'file1;file2',
'BaseAddress': 'a string1',
'BuildingInIDE': 'true',
'CLRImageType': 'ForceIJWImage',
'CLRSupportLastError': 'Enabled',
'CLRThreadAttribute': 'MTAThreadingAttribute',
'CLRUnmanagedCodeCheck': 'true',
'CreateHotPatchableImage': 'X86Image',
'DataExecutionPrevention': 'false',
'DelayLoadDLLs': 'file1;file2',
'DelaySign': 'true',
'Driver': 'NotSet',
'EmbedManagedResourceFile': 'file1;file2',
'EnableCOMDATFolding': 'false',
'EnableUAC': 'true',
'EntryPointSymbol': 'a string1',
'FixedBaseAddress': 'false',
'ForceFileOutput': 'Enabled',
'ForceSymbolReferences': 'file1;file2',
'FunctionOrder': 'a_file_name',
'GenerateDebugInformation': 'true',
'GenerateMapFile': 'true',
'HeapCommitSize': 'a string1',
'HeapReserveSize': 'a string1',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreEmbeddedIDL': 'true',
'IgnoreSpecificDefaultLibraries': 'a_file_list',
'ImageHasSafeExceptionHandlers': 'true',
'ImportLibrary': 'a_file_name',
'KeyContainer': 'a_file_name',
'KeyFile': 'a_file_name',
'LargeAddressAware': 'false',
'LinkDLL': 'true',
'LinkErrorReporting': 'SendErrorReport',
'LinkStatus': 'true',
'LinkTimeCodeGeneration': 'UseLinkTimeCodeGeneration',
'ManifestFile': 'a_file_name',
'MapExports': 'true',
'MapFileName': 'a_file_name',
'MergedIDLBaseFileName': 'a_file_name',
'MergeSections': 'a string1',
'MidlCommandFile': 'a_file_name',
'MinimumRequiredVersion': 'a string1',
'ModuleDefinitionFile': 'a_file_name',
'MSDOSStubFileName': 'a_file_name',
'NoEntryPoint': 'true',
'OptimizeReferences': 'false',
'OutputFile': 'a_file_name',
'PerUserRedirection': 'true',
'PreventDllBinding': 'true',
'Profile': 'true',
'ProfileGuidedDatabase': 'a_file_name',
'ProgramDatabaseFile': 'a_file_name',
'RandomizedBaseAddress': 'false',
'RegisterOutput': 'true',
'SectionAlignment': '33',
'SetChecksum': 'true',
'ShowProgress': 'LinkVerboseREF',
'SpecifySectionAttributes': 'a string1',
'StackCommitSize': 'a string1',
'StackReserveSize': 'a string1',
'StripPrivateSymbols': 'a_file_name',
'SubSystem': 'Console',
'SupportNobindOfDelayLoadedDLL': 'true',
'SupportUnloadOfDelayLoadedDLL': 'true',
'SuppressStartupBanner': 'true',
'SwapRunFromCD': 'true',
'SwapRunFromNET': 'true',
'TargetMachine': 'MachineX86',
'TerminalServerAware': 'false',
'TrackerLogDirectory': 'a_folder',
'TreatLinkerWarningAsErrors': 'true',
'TurnOffAssemblyGeneration': 'true',
'TypeLibraryFile': 'a_file_name',
'TypeLibraryResourceID': '33',
'UACExecutionLevel': 'AsInvoker',
'UACUIAccess': 'true',
'Version': 'a string1'},
'ResourceCompile': {
'AdditionalIncludeDirectories': 'folder1;folder2',
'AdditionalOptions': 'a string1',
'Culture': '0x236',
'IgnoreStandardIncludePath': 'true',
'NullTerminateStrings': 'true',
'PreprocessorDefinitions': 'string1;string2',
'ResourceOutputFileName': 'a string1',
'ShowProgress': 'true',
'SuppressStartupBanner': 'true',
'TrackerLogDirectory': 'a_folder',
'UndefinePreprocessorDefinitions': 'string1;string2'},
'Midl': {
'AdditionalIncludeDirectories': 'folder1;folder2',
'AdditionalOptions': 'a string1',
'ApplicationConfigurationMode': 'true',
'ClientStubFile': 'a_file_name',
'CPreprocessOptions': 'a string1',
'DefaultCharType': 'Signed',
'DllDataFileName': 'a_file_name',
'EnableErrorChecks': 'EnableCustom',
'ErrorCheckAllocations': 'true',
'ErrorCheckBounds': 'true',
'ErrorCheckEnumRange': 'true',
'ErrorCheckRefPointers': 'true',
'ErrorCheckStubData': 'true',
'GenerateClientFiles': 'Stub',
'GenerateServerFiles': 'None',
'GenerateStublessProxies': 'true',
'GenerateTypeLibrary': 'true',
'HeaderFileName': 'a_file_name',
'IgnoreStandardIncludePath': 'true',
'InterfaceIdentifierFileName': 'a_file_name',
'LocaleID': '33',
'MkTypLibCompatible': 'true',
'OutputDirectory': 'a string1',
'PreprocessorDefinitions': 'string1;string2',
'ProxyFileName': 'a_file_name',
'RedirectOutputAndErrors': 'a_file_name',
'ServerStubFile': 'a_file_name',
'StructMemberAlignment': 'NotSet',
'SuppressCompilerWarnings': 'true',
'SuppressStartupBanner': 'true',
'TargetEnvironment': 'Itanium',
'TrackerLogDirectory': 'a_folder',
'TypeLibFormat': 'NewFormat',
'TypeLibraryName': 'a_file_name',
'UndefinePreprocessorDefinitions': 'string1;string2',
'ValidateAllParameters': 'true',
'WarnAsError': 'true',
'WarningLevel': '1'},
'Lib': {
'AdditionalDependencies': 'file1;file2',
'AdditionalLibraryDirectories': 'folder1;folder2',
'AdditionalOptions': 'a string1',
'DisplayLibrary': 'a string1',
'ErrorReporting': 'PromptImmediately',
'ExportNamedFunctions': 'string1;string2',
'ForceSymbolReferences': 'a string1',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreSpecificDefaultLibraries': 'file1;file2',
'LinkTimeCodeGeneration': 'true',
'MinimumRequiredVersion': 'a string1',
'ModuleDefinitionFile': 'a_file_name',
'Name': 'a_file_name',
'OutputFile': 'a_file_name',
'RemoveObjects': 'file1;file2',
'SubSystem': 'Console',
'SuppressStartupBanner': 'true',
'TargetMachine': 'MachineX86i',
'TrackerLogDirectory': 'a_folder',
'TreatLibWarningAsErrors': 'true',
'UseUnicodeResponseFiles': 'true',
'Verbose': 'true'},
'Manifest': {
'AdditionalManifestFiles': 'file1;file2',
'AdditionalOptions': 'a string1',
'AssemblyIdentity': 'a string1',
'ComponentFileName': 'a_file_name',
'EnableDPIAwareness': 'fal',
'GenerateCatalogFiles': 'truel',
'GenerateCategoryTags': 'true',
'InputResourceManifests': 'a string1',
'ManifestFromManagedAssembly': 'a_file_name',
'notgood3': 'bogus',
'OutputManifestFile': 'a_file_name',
'OutputResourceManifests': 'a string1',
'RegistrarScriptFile': 'a_file_name',
'ReplacementsFile': 'a_file_name',
'SuppressDependencyElement': 'true',
'SuppressStartupBanner': 'true',
'TrackerLogDirectory': 'a_folder',
'TypeLibraryFile': 'a_file_name',
'UpdateFileHashes': 'true',
'UpdateFileHashesSearchPath': 'a_file_name',
'VerboseOutput': 'true'},
'ProjectReference': {
'LinkLibraryDependencies': 'true',
'UseLibraryDependencyInputs': 'true'},
'ManifestResourceCompile': {
'ResourceOutputFileName': 'a_file_name'},
'': {
'EmbedManifest': 'true',
'GenerateManifest': 'true',
'IgnoreImportLibrary': 'true',
'LinkIncremental': 'false'}},
self.stderr)
self._ExpectedWarnings([
'Warning: unrecognized setting ClCompile/Enableprefast',
'Warning: unrecognized setting ClCompile/ZZXYZ',
'Warning: unrecognized setting Manifest/notgood3',
'Warning: for Manifest/GenerateCatalogFiles, '
"expected bool; got 'truel'",
'Warning: for Lib/TargetMachine, unrecognized enumerated value '
'MachineX86i',
"Warning: for Manifest/EnableDPIAwareness, expected bool; got 'fal'"])
def testConvertToMSBuildSettings_empty(self):
"""Tests an empty conversion."""
msvs_settings = {}
expected_msbuild_settings = {}
actual_msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(
msvs_settings,
self.stderr)
self.assertEqual(expected_msbuild_settings, actual_msbuild_settings)
self._ExpectedWarnings([])
def testConvertToMSBuildSettings_minimal(self):
"""Tests a minimal conversion."""
msvs_settings = {
'VCCLCompilerTool': {
'AdditionalIncludeDirectories': 'dir1',
'AdditionalOptions': '/foo',
'BasicRuntimeChecks': '0',
},
'VCLinkerTool': {
'LinkTimeCodeGeneration': '1',
'ErrorReporting': '1',
'DataExecutionPrevention': '2',
},
}
expected_msbuild_settings = {
'ClCompile': {
'AdditionalIncludeDirectories': 'dir1',
'AdditionalOptions': '/foo',
'BasicRuntimeChecks': 'Default',
},
'Link': {
'LinkTimeCodeGeneration': 'UseLinkTimeCodeGeneration',
'LinkErrorReporting': 'PromptImmediately',
'DataExecutionPrevention': 'true',
},
}
actual_msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(
msvs_settings,
self.stderr)
self.assertEqual(expected_msbuild_settings, actual_msbuild_settings)
self._ExpectedWarnings([])
def testConvertToMSBuildSettings_warnings(self):
"""Tests conversion that generates warnings."""
msvs_settings = {
'VCCLCompilerTool': {
'AdditionalIncludeDirectories': '1',
'AdditionalOptions': '2',
# These are incorrect values:
'BasicRuntimeChecks': '12',
'BrowseInformation': '21',
'UsePrecompiledHeader': '13',
'GeneratePreprocessedFile': '14'},
'VCLinkerTool': {
# These are incorrect values:
'Driver': '10',
'LinkTimeCodeGeneration': '31',
'ErrorReporting': '21',
'FixedBaseAddress': '6'},
'VCResourceCompilerTool': {
# Custom
'Culture': '1003'}}
expected_msbuild_settings = {
'ClCompile': {
'AdditionalIncludeDirectories': '1',
'AdditionalOptions': '2'},
'Link': {},
'ResourceCompile': {
# Custom
'Culture': '0x03eb'}}
actual_msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(
msvs_settings,
self.stderr)
self.assertEqual(expected_msbuild_settings, actual_msbuild_settings)
self._ExpectedWarnings([
'Warning: while converting VCCLCompilerTool/BasicRuntimeChecks to '
'MSBuild, index value (12) not in expected range [0, 4)',
'Warning: while converting VCCLCompilerTool/BrowseInformation to '
'MSBuild, index value (21) not in expected range [0, 3)',
'Warning: while converting VCCLCompilerTool/UsePrecompiledHeader to '
'MSBuild, index value (13) not in expected range [0, 3)',
'Warning: while converting VCCLCompilerTool/GeneratePreprocessedFile to '
'MSBuild, value must be one of [0, 1, 2]; got 14',
'Warning: while converting VCLinkerTool/Driver to '
'MSBuild, index value (10) not in expected range [0, 4)',
'Warning: while converting VCLinkerTool/LinkTimeCodeGeneration to '
'MSBuild, index value (31) not in expected range [0, 5)',
'Warning: while converting VCLinkerTool/ErrorReporting to '
'MSBuild, index value (21) not in expected range [0, 3)',
'Warning: while converting VCLinkerTool/FixedBaseAddress to '
'MSBuild, index value (6) not in expected range [0, 3)',
])
def testConvertToMSBuildSettings_full_synthetic(self):
"""Tests conversion of all the MSBuild settings."""
msvs_settings = {
'VCCLCompilerTool': {
'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'AdditionalUsingDirectories': 'folder1;folder2;folder3',
'AssemblerListingLocation': 'a_file_name',
'AssemblerOutput': '0',
'BasicRuntimeChecks': '1',
'BrowseInformation': '2',
'BrowseInformationFile': 'a_file_name',
'BufferSecurityCheck': 'true',
'CallingConvention': '0',
'CompileAs': '1',
'DebugInformationFormat': '4',
'DefaultCharIsUnsigned': 'true',
'Detect64BitPortabilityProblems': 'true',
'DisableLanguageExtensions': 'true',
'DisableSpecificWarnings': 'd1;d2;d3',
'EnableEnhancedInstructionSet': '0',
'EnableFiberSafeOptimizations': 'true',
'EnableFunctionLevelLinking': 'true',
'EnableIntrinsicFunctions': 'true',
'EnablePREfast': 'true',
'ErrorReporting': '1',
'ExceptionHandling': '2',
'ExpandAttributedSource': 'true',
'FavorSizeOrSpeed': '0',
'FloatingPointExceptions': 'true',
'FloatingPointModel': '1',
'ForceConformanceInForLoopScope': 'true',
'ForcedIncludeFiles': 'file1;file2;file3',
'ForcedUsingFiles': 'file1;file2;file3',
'GeneratePreprocessedFile': '1',
'GenerateXMLDocumentationFiles': 'true',
'IgnoreStandardIncludePath': 'true',
'InlineFunctionExpansion': '2',
'KeepComments': 'true',
'MinimalRebuild': 'true',
'ObjectFile': 'a_file_name',
'OmitDefaultLibName': 'true',
'OmitFramePointers': 'true',
'OpenMP': 'true',
'Optimization': '3',
'PrecompiledHeaderFile': 'a_file_name',
'PrecompiledHeaderThrough': 'a_file_name',
'PreprocessorDefinitions': 'd1;d2;d3',
'ProgramDataBaseFileName': 'a_file_name',
'RuntimeLibrary': '0',
'RuntimeTypeInfo': 'true',
'ShowIncludes': 'true',
'SmallerTypeCheck': 'true',
'StringPooling': 'true',
'StructMemberAlignment': '1',
'SuppressStartupBanner': 'true',
'TreatWChar_tAsBuiltInType': 'true',
'UndefineAllPreprocessorDefinitions': 'true',
'UndefinePreprocessorDefinitions': 'd1;d2;d3',
'UseFullPaths': 'true',
'UsePrecompiledHeader': '1',
'UseUnicodeResponseFiles': 'true',
'WarnAsError': 'true',
'WarningLevel': '2',
'WholeProgramOptimization': 'true',
'XMLDocumentationFileName': 'a_file_name'},
'VCLinkerTool': {
'AdditionalDependencies': 'file1;file2;file3',
'AdditionalLibraryDirectories': 'folder1;folder2;folder3',
'AdditionalLibraryDirectories_excluded': 'folder1;folder2;folder3',
'AdditionalManifestDependencies': 'file1;file2;file3',
'AdditionalOptions': 'a_string',
'AddModuleNamesToAssembly': 'file1;file2;file3',
'AllowIsolation': 'true',
'AssemblyDebug': '0',
'AssemblyLinkResource': 'file1;file2;file3',
'BaseAddress': 'a_string',
'CLRImageType': '1',
'CLRThreadAttribute': '2',
'CLRUnmanagedCodeCheck': 'true',
'DataExecutionPrevention': '0',
'DelayLoadDLLs': 'file1;file2;file3',
'DelaySign': 'true',
'Driver': '1',
'EmbedManagedResourceFile': 'file1;file2;file3',
'EnableCOMDATFolding': '0',
'EnableUAC': 'true',
'EntryPointSymbol': 'a_string',
'ErrorReporting': '0',
'FixedBaseAddress': '1',
'ForceSymbolReferences': 'file1;file2;file3',
'FunctionOrder': 'a_file_name',
'GenerateDebugInformation': 'true',
'GenerateManifest': 'true',
'GenerateMapFile': 'true',
'HeapCommitSize': 'a_string',
'HeapReserveSize': 'a_string',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreDefaultLibraryNames': 'file1;file2;file3',
'IgnoreEmbeddedIDL': 'true',
'IgnoreImportLibrary': 'true',
'ImportLibrary': 'a_file_name',
'KeyContainer': 'a_file_name',
'KeyFile': 'a_file_name',
'LargeAddressAware': '2',
'LinkIncremental': '1',
'LinkLibraryDependencies': 'true',
'LinkTimeCodeGeneration': '2',
'ManifestFile': 'a_file_name',
'MapExports': 'true',
'MapFileName': 'a_file_name',
'MergedIDLBaseFileName': 'a_file_name',
'MergeSections': 'a_string',
'MidlCommandFile': 'a_file_name',
'ModuleDefinitionFile': 'a_file_name',
'OptimizeForWindows98': '1',
'OptimizeReferences': '0',
'OutputFile': 'a_file_name',
'PerUserRedirection': 'true',
'Profile': 'true',
'ProfileGuidedDatabase': 'a_file_name',
'ProgramDatabaseFile': 'a_file_name',
'RandomizedBaseAddress': '1',
'RegisterOutput': 'true',
'ResourceOnlyDLL': 'true',
'SetChecksum': 'true',
'ShowProgress': '0',
'StackCommitSize': 'a_string',
'StackReserveSize': 'a_string',
'StripPrivateSymbols': 'a_file_name',
'SubSystem': '2',
'SupportUnloadOfDelayLoadedDLL': 'true',
'SuppressStartupBanner': 'true',
'SwapRunFromCD': 'true',
'SwapRunFromNet': 'true',
'TargetMachine': '3',
'TerminalServerAware': '2',
'TurnOffAssemblyGeneration': 'true',
'TypeLibraryFile': 'a_file_name',
'TypeLibraryResourceID': '33',
'UACExecutionLevel': '1',
'UACUIAccess': 'true',
'UseLibraryDependencyInputs': 'false',
'UseUnicodeResponseFiles': 'true',
'Version': 'a_string'},
'VCResourceCompilerTool': {
'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'Culture': '1003',
'IgnoreStandardIncludePath': 'true',
'PreprocessorDefinitions': 'd1;d2;d3',
'ResourceOutputFileName': 'a_string',
'ShowProgress': 'true',
'SuppressStartupBanner': 'true',
'UndefinePreprocessorDefinitions': 'd1;d2;d3'},
'VCMIDLTool': {
'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'CPreprocessOptions': 'a_string',
'DefaultCharType': '0',
'DLLDataFileName': 'a_file_name',
'EnableErrorChecks': '2',
'ErrorCheckAllocations': 'true',
'ErrorCheckBounds': 'true',
'ErrorCheckEnumRange': 'true',
'ErrorCheckRefPointers': 'true',
'ErrorCheckStubData': 'true',
'GenerateStublessProxies': 'true',
'GenerateTypeLibrary': 'true',
'HeaderFileName': 'a_file_name',
'IgnoreStandardIncludePath': 'true',
'InterfaceIdentifierFileName': 'a_file_name',
'MkTypLibCompatible': 'true',
'OutputDirectory': 'a_string',
'PreprocessorDefinitions': 'd1;d2;d3',
'ProxyFileName': 'a_file_name',
'RedirectOutputAndErrors': 'a_file_name',
'StructMemberAlignment': '3',
'SuppressStartupBanner': 'true',
'TargetEnvironment': '1',
'TypeLibraryName': 'a_file_name',
'UndefinePreprocessorDefinitions': 'd1;d2;d3',
'ValidateParameters': 'true',
'WarnAsError': 'true',
'WarningLevel': '4'},
'VCLibrarianTool': {
'AdditionalDependencies': 'file1;file2;file3',
'AdditionalLibraryDirectories': 'folder1;folder2;folder3',
'AdditionalLibraryDirectories_excluded': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'ExportNamedFunctions': 'd1;d2;d3',
'ForceSymbolReferences': 'a_string',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreSpecificDefaultLibraries': 'file1;file2;file3',
'LinkLibraryDependencies': 'true',
'ModuleDefinitionFile': 'a_file_name',
'OutputFile': 'a_file_name',
'SuppressStartupBanner': 'true',
'UseUnicodeResponseFiles': 'true'},
'VCManifestTool': {
'AdditionalManifestFiles': 'file1;file2;file3',
'AdditionalOptions': 'a_string',
'AssemblyIdentity': 'a_string',
'ComponentFileName': 'a_file_name',
'DependencyInformationFile': 'a_file_name',
'EmbedManifest': 'true',
'GenerateCatalogFiles': 'true',
'InputResourceManifests': 'a_string',
'ManifestResourceFile': 'my_name',
'OutputManifestFile': 'a_file_name',
'RegistrarScriptFile': 'a_file_name',
'ReplacementsFile': 'a_file_name',
'SuppressStartupBanner': 'true',
'TypeLibraryFile': 'a_file_name',
'UpdateFileHashes': 'true',
'UpdateFileHashesSearchPath': 'a_file_name',
'UseFAT32Workaround': 'true',
'UseUnicodeResponseFiles': 'true',
'VerboseOutput': 'true'}}
expected_msbuild_settings = {
'ClCompile': {
'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string /J',
'AdditionalUsingDirectories': 'folder1;folder2;folder3',
'AssemblerListingLocation': 'a_file_name',
'AssemblerOutput': 'NoListing',
'BasicRuntimeChecks': 'StackFrameRuntimeCheck',
'BrowseInformation': 'true',
'BrowseInformationFile': 'a_file_name',
'BufferSecurityCheck': 'true',
'CallingConvention': 'Cdecl',
'CompileAs': 'CompileAsC',
'DebugInformationFormat': 'EditAndContinue',
'DisableLanguageExtensions': 'true',
'DisableSpecificWarnings': 'd1;d2;d3',
'EnableEnhancedInstructionSet': 'NotSet',
'EnableFiberSafeOptimizations': 'true',
'EnablePREfast': 'true',
'ErrorReporting': 'Prompt',
'ExceptionHandling': 'Async',
'ExpandAttributedSource': 'true',
'FavorSizeOrSpeed': 'Neither',
'FloatingPointExceptions': 'true',
'FloatingPointModel': 'Strict',
'ForceConformanceInForLoopScope': 'true',
'ForcedIncludeFiles': 'file1;file2;file3',
'ForcedUsingFiles': 'file1;file2;file3',
'FunctionLevelLinking': 'true',
'GenerateXMLDocumentationFiles': 'true',
'IgnoreStandardIncludePath': 'true',
'InlineFunctionExpansion': 'AnySuitable',
'IntrinsicFunctions': 'true',
'MinimalRebuild': 'true',
'ObjectFileName': 'a_file_name',
'OmitDefaultLibName': 'true',
'OmitFramePointers': 'true',
'OpenMPSupport': 'true',
'Optimization': 'Full',
'PrecompiledHeader': 'Create',
'PrecompiledHeaderFile': 'a_file_name',
'PrecompiledHeaderOutputFile': 'a_file_name',
'PreprocessKeepComments': 'true',
'PreprocessorDefinitions': 'd1;d2;d3',
'PreprocessSuppressLineNumbers': 'false',
'PreprocessToFile': 'true',
'ProgramDataBaseFileName': 'a_file_name',
'RuntimeLibrary': 'MultiThreaded',
'RuntimeTypeInfo': 'true',
'ShowIncludes': 'true',
'SmallerTypeCheck': 'true',
'StringPooling': 'true',
'StructMemberAlignment': '1Byte',
'SuppressStartupBanner': 'true',
'TreatWarningAsError': 'true',
'TreatWChar_tAsBuiltInType': 'true',
'UndefineAllPreprocessorDefinitions': 'true',
'UndefinePreprocessorDefinitions': 'd1;d2;d3',
'UseFullPaths': 'true',
'WarningLevel': 'Level2',
'WholeProgramOptimization': 'true',
'XMLDocumentationFileName': 'a_file_name'},
'Link': {
'AdditionalDependencies': 'file1;file2;file3',
'AdditionalLibraryDirectories': 'folder1;folder2;folder3',
'AdditionalManifestDependencies': 'file1;file2;file3',
'AdditionalOptions': 'a_string',
'AddModuleNamesToAssembly': 'file1;file2;file3',
'AllowIsolation': 'true',
'AssemblyDebug': '',
'AssemblyLinkResource': 'file1;file2;file3',
'BaseAddress': 'a_string',
'CLRImageType': 'ForceIJWImage',
'CLRThreadAttribute': 'STAThreadingAttribute',
'CLRUnmanagedCodeCheck': 'true',
'DataExecutionPrevention': '',
'DelayLoadDLLs': 'file1;file2;file3',
'DelaySign': 'true',
'Driver': 'Driver',
'EmbedManagedResourceFile': 'file1;file2;file3',
'EnableCOMDATFolding': '',
'EnableUAC': 'true',
'EntryPointSymbol': 'a_string',
'FixedBaseAddress': 'false',
'ForceSymbolReferences': 'file1;file2;file3',
'FunctionOrder': 'a_file_name',
'GenerateDebugInformation': 'true',
'GenerateMapFile': 'true',
'HeapCommitSize': 'a_string',
'HeapReserveSize': 'a_string',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreEmbeddedIDL': 'true',
'IgnoreSpecificDefaultLibraries': 'file1;file2;file3',
'ImportLibrary': 'a_file_name',
'KeyContainer': 'a_file_name',
'KeyFile': 'a_file_name',
'LargeAddressAware': 'true',
'LinkErrorReporting': 'NoErrorReport',
'LinkTimeCodeGeneration': 'PGInstrument',
'ManifestFile': 'a_file_name',
'MapExports': 'true',
'MapFileName': 'a_file_name',
'MergedIDLBaseFileName': 'a_file_name',
'MergeSections': 'a_string',
'MidlCommandFile': 'a_file_name',
'ModuleDefinitionFile': 'a_file_name',
'NoEntryPoint': 'true',
'OptimizeReferences': '',
'OutputFile': 'a_file_name',
'PerUserRedirection': 'true',
'Profile': 'true',
'ProfileGuidedDatabase': 'a_file_name',
'ProgramDatabaseFile': 'a_file_name',
'RandomizedBaseAddress': 'false',
'RegisterOutput': 'true',
'SetChecksum': 'true',
'ShowProgress': 'NotSet',
'StackCommitSize': 'a_string',
'StackReserveSize': 'a_string',
'StripPrivateSymbols': 'a_file_name',
'SubSystem': 'Windows',
'SupportUnloadOfDelayLoadedDLL': 'true',
'SuppressStartupBanner': 'true',
'SwapRunFromCD': 'true',
'SwapRunFromNET': 'true',
'TargetMachine': 'MachineARM',
'TerminalServerAware': 'true',
'TurnOffAssemblyGeneration': 'true',
'TypeLibraryFile': 'a_file_name',
'TypeLibraryResourceID': '33',
'UACExecutionLevel': 'HighestAvailable',
'UACUIAccess': 'true',
'Version': 'a_string'},
'ResourceCompile': {
'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'Culture': '0x03eb',
'IgnoreStandardIncludePath': 'true',
'PreprocessorDefinitions': 'd1;d2;d3',
'ResourceOutputFileName': 'a_string',
'ShowProgress': 'true',
'SuppressStartupBanner': 'true',
'UndefinePreprocessorDefinitions': 'd1;d2;d3'},
'Midl': {
'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'CPreprocessOptions': 'a_string',
'DefaultCharType': 'Unsigned',
'DllDataFileName': 'a_file_name',
'EnableErrorChecks': 'All',
'ErrorCheckAllocations': 'true',
'ErrorCheckBounds': 'true',
'ErrorCheckEnumRange': 'true',
'ErrorCheckRefPointers': 'true',
'ErrorCheckStubData': 'true',
'GenerateStublessProxies': 'true',
'GenerateTypeLibrary': 'true',
'HeaderFileName': 'a_file_name',
'IgnoreStandardIncludePath': 'true',
'InterfaceIdentifierFileName': 'a_file_name',
'MkTypLibCompatible': 'true',
'OutputDirectory': 'a_string',
'PreprocessorDefinitions': 'd1;d2;d3',
'ProxyFileName': 'a_file_name',
'RedirectOutputAndErrors': 'a_file_name',
'StructMemberAlignment': '4',
'SuppressStartupBanner': 'true',
'TargetEnvironment': 'Win32',
'TypeLibraryName': 'a_file_name',
'UndefinePreprocessorDefinitions': 'd1;d2;d3',
'ValidateAllParameters': 'true',
'WarnAsError': 'true',
'WarningLevel': '4'},
'Lib': {
'AdditionalDependencies': 'file1;file2;file3',
'AdditionalLibraryDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'ExportNamedFunctions': 'd1;d2;d3',
'ForceSymbolReferences': 'a_string',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreSpecificDefaultLibraries': 'file1;file2;file3',
'ModuleDefinitionFile': 'a_file_name',
'OutputFile': 'a_file_name',
'SuppressStartupBanner': 'true',
'UseUnicodeResponseFiles': 'true'},
'Manifest': {
'AdditionalManifestFiles': 'file1;file2;file3',
'AdditionalOptions': 'a_string',
'AssemblyIdentity': 'a_string',
'ComponentFileName': 'a_file_name',
'GenerateCatalogFiles': 'true',
'InputResourceManifests': 'a_string',
'OutputManifestFile': 'a_file_name',
'RegistrarScriptFile': 'a_file_name',
'ReplacementsFile': 'a_file_name',
'SuppressStartupBanner': 'true',
'TypeLibraryFile': 'a_file_name',
'UpdateFileHashes': 'true',
'UpdateFileHashesSearchPath': 'a_file_name',
'VerboseOutput': 'true'},
'ManifestResourceCompile': {
'ResourceOutputFileName': 'my_name'},
'ProjectReference': {
'LinkLibraryDependencies': 'true',
'UseLibraryDependencyInputs': 'false'},
'': {
'EmbedManifest': 'true',
'GenerateManifest': 'true',
'IgnoreImportLibrary': 'true',
'LinkIncremental': 'false'}}
actual_msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(
msvs_settings,
self.stderr)
self.assertEqual(expected_msbuild_settings, actual_msbuild_settings)
self._ExpectedWarnings([])
def testConvertToMSBuildSettings_actual(self):
"""Tests the conversion of an actual project.
A VS2008 project with most of the options defined was created through the
VS2008 IDE. It was then converted to VS2010. The tool settings found in
the .vcproj and .vcxproj files were converted to the two dictionaries
msvs_settings and expected_msbuild_settings.
Note that for many settings, the VS2010 converter adds macros like
%(AdditionalIncludeDirectories) to make sure than inherited values are
included. Since the Gyp projects we generate do not use inheritance,
we removed these macros. They were:
ClCompile:
AdditionalIncludeDirectories: ';%(AdditionalIncludeDirectories)'
AdditionalOptions: ' %(AdditionalOptions)'
AdditionalUsingDirectories: ';%(AdditionalUsingDirectories)'
DisableSpecificWarnings: ';%(DisableSpecificWarnings)',
ForcedIncludeFiles: ';%(ForcedIncludeFiles)',
ForcedUsingFiles: ';%(ForcedUsingFiles)',
PreprocessorDefinitions: ';%(PreprocessorDefinitions)',
UndefinePreprocessorDefinitions:
';%(UndefinePreprocessorDefinitions)',
Link:
AdditionalDependencies: ';%(AdditionalDependencies)',
AdditionalLibraryDirectories: ';%(AdditionalLibraryDirectories)',
AdditionalManifestDependencies:
';%(AdditionalManifestDependencies)',
AdditionalOptions: ' %(AdditionalOptions)',
AddModuleNamesToAssembly: ';%(AddModuleNamesToAssembly)',
AssemblyLinkResource: ';%(AssemblyLinkResource)',
DelayLoadDLLs: ';%(DelayLoadDLLs)',
EmbedManagedResourceFile: ';%(EmbedManagedResourceFile)',
ForceSymbolReferences: ';%(ForceSymbolReferences)',
IgnoreSpecificDefaultLibraries:
';%(IgnoreSpecificDefaultLibraries)',
ResourceCompile:
AdditionalIncludeDirectories: ';%(AdditionalIncludeDirectories)',
AdditionalOptions: ' %(AdditionalOptions)',
PreprocessorDefinitions: ';%(PreprocessorDefinitions)',
Manifest:
AdditionalManifestFiles: ';%(AdditionalManifestFiles)',
AdditionalOptions: ' %(AdditionalOptions)',
InputResourceManifests: ';%(InputResourceManifests)',
"""
msvs_settings = {
'VCCLCompilerTool': {
'AdditionalIncludeDirectories': 'dir1',
'AdditionalOptions': '/more',
'AdditionalUsingDirectories': 'test',
'AssemblerListingLocation': '$(IntDir)\\a',
'AssemblerOutput': '1',
'BasicRuntimeChecks': '3',
'BrowseInformation': '1',
'BrowseInformationFile': '$(IntDir)\\e',
'BufferSecurityCheck': 'false',
'CallingConvention': '1',
'CompileAs': '1',
'DebugInformationFormat': '4',
'DefaultCharIsUnsigned': 'true',
'Detect64BitPortabilityProblems': 'true',
'DisableLanguageExtensions': 'true',
'DisableSpecificWarnings': 'abc',
'EnableEnhancedInstructionSet': '1',
'EnableFiberSafeOptimizations': 'true',
'EnableFunctionLevelLinking': 'true',
'EnableIntrinsicFunctions': 'true',
'EnablePREfast': 'true',
'ErrorReporting': '2',
'ExceptionHandling': '2',
'ExpandAttributedSource': 'true',
'FavorSizeOrSpeed': '2',
'FloatingPointExceptions': 'true',
'FloatingPointModel': '1',
'ForceConformanceInForLoopScope': 'false',
'ForcedIncludeFiles': 'def',
'ForcedUsingFiles': 'ge',
'GeneratePreprocessedFile': '2',
'GenerateXMLDocumentationFiles': 'true',
'IgnoreStandardIncludePath': 'true',
'InlineFunctionExpansion': '1',
'KeepComments': 'true',
'MinimalRebuild': 'true',
'ObjectFile': '$(IntDir)\\b',
'OmitDefaultLibName': 'true',
'OmitFramePointers': 'true',
'OpenMP': 'true',
'Optimization': '3',
'PrecompiledHeaderFile': '$(IntDir)\\$(TargetName).pche',
'PrecompiledHeaderThrough': 'StdAfx.hd',
'PreprocessorDefinitions': 'WIN32;_DEBUG;_CONSOLE',
'ProgramDataBaseFileName': '$(IntDir)\\vc90b.pdb',
'RuntimeLibrary': '3',
'RuntimeTypeInfo': 'false',
'ShowIncludes': 'true',
'SmallerTypeCheck': 'true',
'StringPooling': 'true',
'StructMemberAlignment': '3',
'SuppressStartupBanner': 'false',
'TreatWChar_tAsBuiltInType': 'false',
'UndefineAllPreprocessorDefinitions': 'true',
'UndefinePreprocessorDefinitions': 'wer',
'UseFullPaths': 'true',
'UsePrecompiledHeader': '0',
'UseUnicodeResponseFiles': 'false',
'WarnAsError': 'true',
'WarningLevel': '3',
'WholeProgramOptimization': 'true',
'XMLDocumentationFileName': '$(IntDir)\\c'},
'VCLinkerTool': {
'AdditionalDependencies': 'zx',
'AdditionalLibraryDirectories': 'asd',
'AdditionalManifestDependencies': 's2',
'AdditionalOptions': '/mor2',
'AddModuleNamesToAssembly': 'd1',
'AllowIsolation': 'false',
'AssemblyDebug': '1',
'AssemblyLinkResource': 'd5',
'BaseAddress': '23423',
'CLRImageType': '3',
'CLRThreadAttribute': '1',
'CLRUnmanagedCodeCheck': 'true',
'DataExecutionPrevention': '0',
'DelayLoadDLLs': 'd4',
'DelaySign': 'true',
'Driver': '2',
'EmbedManagedResourceFile': 'd2',
'EnableCOMDATFolding': '1',
'EnableUAC': 'false',
'EntryPointSymbol': 'f5',
'ErrorReporting': '2',
'FixedBaseAddress': '1',
'ForceSymbolReferences': 'd3',
'FunctionOrder': 'fssdfsd',
'GenerateDebugInformation': 'true',
'GenerateManifest': 'false',
'GenerateMapFile': 'true',
'HeapCommitSize': '13',
'HeapReserveSize': '12',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreDefaultLibraryNames': 'flob;flok',
'IgnoreEmbeddedIDL': 'true',
'IgnoreImportLibrary': 'true',
'ImportLibrary': 'f4',
'KeyContainer': 'f7',
'KeyFile': 'f6',
'LargeAddressAware': '2',
'LinkIncremental': '0',
'LinkLibraryDependencies': 'false',
'LinkTimeCodeGeneration': '1',
'ManifestFile':
'$(IntDir)\\$(TargetFileName).2intermediate.manifest',
'MapExports': 'true',
'MapFileName': 'd5',
'MergedIDLBaseFileName': 'f2',
'MergeSections': 'f5',
'MidlCommandFile': 'f1',
'ModuleDefinitionFile': 'sdsd',
'OptimizeForWindows98': '2',
'OptimizeReferences': '2',
'OutputFile': '$(OutDir)\\$(ProjectName)2.exe',
'PerUserRedirection': 'true',
'Profile': 'true',
'ProfileGuidedDatabase': '$(TargetDir)$(TargetName).pgdd',
'ProgramDatabaseFile': 'Flob.pdb',
'RandomizedBaseAddress': '1',
'RegisterOutput': 'true',
'ResourceOnlyDLL': 'true',
'SetChecksum': 'false',
'ShowProgress': '1',
'StackCommitSize': '15',
'StackReserveSize': '14',
'StripPrivateSymbols': 'd3',
'SubSystem': '1',
'SupportUnloadOfDelayLoadedDLL': 'true',
'SuppressStartupBanner': 'false',
'SwapRunFromCD': 'true',
'SwapRunFromNet': 'true',
'TargetMachine': '1',
'TerminalServerAware': '1',
'TurnOffAssemblyGeneration': 'true',
'TypeLibraryFile': 'f3',
'TypeLibraryResourceID': '12',
'UACExecutionLevel': '2',
'UACUIAccess': 'true',
'UseLibraryDependencyInputs': 'true',
'UseUnicodeResponseFiles': 'false',
'Version': '333'},
'VCResourceCompilerTool': {
'AdditionalIncludeDirectories': 'f3',
'AdditionalOptions': '/more3',
'Culture': '3084',
'IgnoreStandardIncludePath': 'true',
'PreprocessorDefinitions': '_UNICODE;UNICODE2',
'ResourceOutputFileName': '$(IntDir)/$(InputName)3.res',
'ShowProgress': 'true'},
'VCManifestTool': {
'AdditionalManifestFiles': 'sfsdfsd',
'AdditionalOptions': 'afdsdafsd',
'AssemblyIdentity': 'sddfdsadfsa',
'ComponentFileName': 'fsdfds',
'DependencyInformationFile': '$(IntDir)\\mt.depdfd',
'EmbedManifest': 'false',
'GenerateCatalogFiles': 'true',
'InputResourceManifests': 'asfsfdafs',
'ManifestResourceFile':
'$(IntDir)\\$(TargetFileName).embed.manifest.resfdsf',
'OutputManifestFile': '$(TargetPath).manifestdfs',
'RegistrarScriptFile': 'sdfsfd',
'ReplacementsFile': 'sdffsd',
'SuppressStartupBanner': 'false',
'TypeLibraryFile': 'sfsd',
'UpdateFileHashes': 'true',
'UpdateFileHashesSearchPath': 'sfsd',
'UseFAT32Workaround': 'true',
'UseUnicodeResponseFiles': 'false',
'VerboseOutput': 'true'}}
expected_msbuild_settings = {
'ClCompile': {
'AdditionalIncludeDirectories': 'dir1',
'AdditionalOptions': '/more /J',
'AdditionalUsingDirectories': 'test',
'AssemblerListingLocation': '$(IntDir)a',
'AssemblerOutput': 'AssemblyCode',
'BasicRuntimeChecks': 'EnableFastChecks',
'BrowseInformation': 'true',
'BrowseInformationFile': '$(IntDir)e',
'BufferSecurityCheck': 'false',
'CallingConvention': 'FastCall',
'CompileAs': 'CompileAsC',
'DebugInformationFormat': 'EditAndContinue',
'DisableLanguageExtensions': 'true',
'DisableSpecificWarnings': 'abc',
'EnableEnhancedInstructionSet': 'StreamingSIMDExtensions',
'EnableFiberSafeOptimizations': 'true',
'EnablePREfast': 'true',
'ErrorReporting': 'Queue',
'ExceptionHandling': 'Async',
'ExpandAttributedSource': 'true',
'FavorSizeOrSpeed': 'Size',
'FloatingPointExceptions': 'true',
'FloatingPointModel': 'Strict',
'ForceConformanceInForLoopScope': 'false',
'ForcedIncludeFiles': 'def',
'ForcedUsingFiles': 'ge',
'FunctionLevelLinking': 'true',
'GenerateXMLDocumentationFiles': 'true',
'IgnoreStandardIncludePath': 'true',
'InlineFunctionExpansion': 'OnlyExplicitInline',
'IntrinsicFunctions': 'true',
'MinimalRebuild': 'true',
'ObjectFileName': '$(IntDir)b',
'OmitDefaultLibName': 'true',
'OmitFramePointers': 'true',
'OpenMPSupport': 'true',
'Optimization': 'Full',
'PrecompiledHeader': 'NotUsing', # Actual conversion gives ''
'PrecompiledHeaderFile': 'StdAfx.hd',
'PrecompiledHeaderOutputFile': '$(IntDir)$(TargetName).pche',
'PreprocessKeepComments': 'true',
'PreprocessorDefinitions': 'WIN32;_DEBUG;_CONSOLE',
'PreprocessSuppressLineNumbers': 'true',
'PreprocessToFile': 'true',
'ProgramDataBaseFileName': '$(IntDir)vc90b.pdb',
'RuntimeLibrary': 'MultiThreadedDebugDLL',
'RuntimeTypeInfo': 'false',
'ShowIncludes': 'true',
'SmallerTypeCheck': 'true',
'StringPooling': 'true',
'StructMemberAlignment': '4Bytes',
'SuppressStartupBanner': 'false',
'TreatWarningAsError': 'true',
'TreatWChar_tAsBuiltInType': 'false',
'UndefineAllPreprocessorDefinitions': 'true',
'UndefinePreprocessorDefinitions': 'wer',
'UseFullPaths': 'true',
'WarningLevel': 'Level3',
'WholeProgramOptimization': 'true',
'XMLDocumentationFileName': '$(IntDir)c'},
'Link': {
'AdditionalDependencies': 'zx',
'AdditionalLibraryDirectories': 'asd',
'AdditionalManifestDependencies': 's2',
'AdditionalOptions': '/mor2',
'AddModuleNamesToAssembly': 'd1',
'AllowIsolation': 'false',
'AssemblyDebug': 'true',
'AssemblyLinkResource': 'd5',
'BaseAddress': '23423',
'CLRImageType': 'ForceSafeILImage',
'CLRThreadAttribute': 'MTAThreadingAttribute',
'CLRUnmanagedCodeCheck': 'true',
'DataExecutionPrevention': '',
'DelayLoadDLLs': 'd4',
'DelaySign': 'true',
'Driver': 'UpOnly',
'EmbedManagedResourceFile': 'd2',
'EnableCOMDATFolding': 'false',
'EnableUAC': 'false',
'EntryPointSymbol': 'f5',
'FixedBaseAddress': 'false',
'ForceSymbolReferences': 'd3',
'FunctionOrder': 'fssdfsd',
'GenerateDebugInformation': 'true',
'GenerateMapFile': 'true',
'HeapCommitSize': '13',
'HeapReserveSize': '12',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreEmbeddedIDL': 'true',
'IgnoreSpecificDefaultLibraries': 'flob;flok',
'ImportLibrary': 'f4',
'KeyContainer': 'f7',
'KeyFile': 'f6',
'LargeAddressAware': 'true',
'LinkErrorReporting': 'QueueForNextLogin',
'LinkTimeCodeGeneration': 'UseLinkTimeCodeGeneration',
'ManifestFile': '$(IntDir)$(TargetFileName).2intermediate.manifest',
'MapExports': 'true',
'MapFileName': 'd5',
'MergedIDLBaseFileName': 'f2',
'MergeSections': 'f5',
'MidlCommandFile': 'f1',
'ModuleDefinitionFile': 'sdsd',
'NoEntryPoint': 'true',
'OptimizeReferences': 'true',
'OutputFile': '$(OutDir)$(ProjectName)2.exe',
'PerUserRedirection': 'true',
'Profile': 'true',
'ProfileGuidedDatabase': '$(TargetDir)$(TargetName).pgdd',
'ProgramDatabaseFile': 'Flob.pdb',
'RandomizedBaseAddress': 'false',
'RegisterOutput': 'true',
'SetChecksum': 'false',
'ShowProgress': 'LinkVerbose',
'StackCommitSize': '15',
'StackReserveSize': '14',
'StripPrivateSymbols': 'd3',
'SubSystem': 'Console',
'SupportUnloadOfDelayLoadedDLL': 'true',
'SuppressStartupBanner': 'false',
'SwapRunFromCD': 'true',
'SwapRunFromNET': 'true',
'TargetMachine': 'MachineX86',
'TerminalServerAware': 'false',
'TurnOffAssemblyGeneration': 'true',
'TypeLibraryFile': 'f3',
'TypeLibraryResourceID': '12',
'UACExecutionLevel': 'RequireAdministrator',
'UACUIAccess': 'true',
'Version': '333'},
'ResourceCompile': {
'AdditionalIncludeDirectories': 'f3',
'AdditionalOptions': '/more3',
'Culture': '0x0c0c',
'IgnoreStandardIncludePath': 'true',
'PreprocessorDefinitions': '_UNICODE;UNICODE2',
'ResourceOutputFileName': '$(IntDir)%(Filename)3.res',
'ShowProgress': 'true'},
'Manifest': {
'AdditionalManifestFiles': 'sfsdfsd',
'AdditionalOptions': 'afdsdafsd',
'AssemblyIdentity': 'sddfdsadfsa',
'ComponentFileName': 'fsdfds',
'GenerateCatalogFiles': 'true',
'InputResourceManifests': 'asfsfdafs',
'OutputManifestFile': '$(TargetPath).manifestdfs',
'RegistrarScriptFile': 'sdfsfd',
'ReplacementsFile': 'sdffsd',
'SuppressStartupBanner': 'false',
'TypeLibraryFile': 'sfsd',
'UpdateFileHashes': 'true',
'UpdateFileHashesSearchPath': 'sfsd',
'VerboseOutput': 'true'},
'ProjectReference': {
'LinkLibraryDependencies': 'false',
'UseLibraryDependencyInputs': 'true'},
'': {
'EmbedManifest': 'false',
'GenerateManifest': 'false',
'IgnoreImportLibrary': 'true',
'LinkIncremental': ''
},
'ManifestResourceCompile': {
'ResourceOutputFileName':
'$(IntDir)$(TargetFileName).embed.manifest.resfdsf'}
}
actual_msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(
msvs_settings,
self.stderr)
self.assertEqual(expected_msbuild_settings, actual_msbuild_settings)
self._ExpectedWarnings([])
if __name__ == '__main__':
unittest.main()
|
tempbottle/restcommander | refs/heads/master | play-1.2.4/python/Lib/curses/ascii.py | 396 | """Constants and membership tests for ASCII characters"""
NUL = 0x00 # ^@
SOH = 0x01 # ^A
STX = 0x02 # ^B
ETX = 0x03 # ^C
EOT = 0x04 # ^D
ENQ = 0x05 # ^E
ACK = 0x06 # ^F
BEL = 0x07 # ^G
BS = 0x08 # ^H
TAB = 0x09 # ^I
HT = 0x09 # ^I
LF = 0x0a # ^J
NL = 0x0a # ^J
VT = 0x0b # ^K
FF = 0x0c # ^L
CR = 0x0d # ^M
SO = 0x0e # ^N
SI = 0x0f # ^O
DLE = 0x10 # ^P
DC1 = 0x11 # ^Q
DC2 = 0x12 # ^R
DC3 = 0x13 # ^S
DC4 = 0x14 # ^T
NAK = 0x15 # ^U
SYN = 0x16 # ^V
ETB = 0x17 # ^W
CAN = 0x18 # ^X
EM = 0x19 # ^Y
SUB = 0x1a # ^Z
ESC = 0x1b # ^[
FS = 0x1c # ^\
GS = 0x1d # ^]
RS = 0x1e # ^^
US = 0x1f # ^_
SP = 0x20 # space
DEL = 0x7f # delete
controlnames = [
"NUL", "SOH", "STX", "ETX", "EOT", "ENQ", "ACK", "BEL",
"BS", "HT", "LF", "VT", "FF", "CR", "SO", "SI",
"DLE", "DC1", "DC2", "DC3", "DC4", "NAK", "SYN", "ETB",
"CAN", "EM", "SUB", "ESC", "FS", "GS", "RS", "US",
"SP"
]
def _ctoi(c):
if type(c) == type(""):
return ord(c)
else:
return c
def isalnum(c): return isalpha(c) or isdigit(c)
def isalpha(c): return isupper(c) or islower(c)
def isascii(c): return _ctoi(c) <= 127 # ?
def isblank(c): return _ctoi(c) in (8,32)
def iscntrl(c): return _ctoi(c) <= 31
def isdigit(c): return _ctoi(c) >= 48 and _ctoi(c) <= 57
def isgraph(c): return _ctoi(c) >= 33 and _ctoi(c) <= 126
def islower(c): return _ctoi(c) >= 97 and _ctoi(c) <= 122
def isprint(c): return _ctoi(c) >= 32 and _ctoi(c) <= 126
def ispunct(c): return _ctoi(c) != 32 and not isalnum(c)
def isspace(c): return _ctoi(c) in (9, 10, 11, 12, 13, 32)
def isupper(c): return _ctoi(c) >= 65 and _ctoi(c) <= 90
def isxdigit(c): return isdigit(c) or \
(_ctoi(c) >= 65 and _ctoi(c) <= 70) or (_ctoi(c) >= 97 and _ctoi(c) <= 102)
def isctrl(c): return _ctoi(c) < 32
def ismeta(c): return _ctoi(c) > 127
def ascii(c):
if type(c) == type(""):
return chr(_ctoi(c) & 0x7f)
else:
return _ctoi(c) & 0x7f
def ctrl(c):
if type(c) == type(""):
return chr(_ctoi(c) & 0x1f)
else:
return _ctoi(c) & 0x1f
def alt(c):
if type(c) == type(""):
return chr(_ctoi(c) | 0x80)
else:
return _ctoi(c) | 0x80
def unctrl(c):
bits = _ctoi(c)
if bits == 0x7f:
rep = "^?"
elif isprint(bits & 0x7f):
rep = chr(bits & 0x7f)
else:
rep = "^" + chr(((bits & 0x7f) | 0x20) + 0x20)
if bits & 0x80:
return "!" + rep
return rep
|
pde/torbrowser-launcher | refs/heads/master | lib/Parsley-1.1/ometa/_generated/pymeta_v1.py | 2 | def createParserClass(GrammarBase, ruleGlobals):
if ruleGlobals is None:
ruleGlobals = {}
class pymeta_v1(GrammarBase):
def rule_comment(self):
_locals = {'self': self}
self.locals['comment'] = _locals
_G_exactly_1, lastError = self.exactly('#')
self.considerError(lastError, 'comment')
def _G_many_2():
def _G_not_3():
_G_exactly_4, lastError = self.exactly('\n')
self.considerError(lastError, None)
return (_G_exactly_4, self.currentError)
_G_not_5, lastError = self._not(_G_not_3)
self.considerError(lastError, None)
_G_apply_6, lastError = self._apply(self.rule_anything, "anything", [])
self.considerError(lastError, None)
return (_G_apply_6, self.currentError)
_G_many_7, lastError = self.many(_G_many_2)
self.considerError(lastError, 'comment')
return (_G_many_7, self.currentError)
def rule_hspace(self):
_locals = {'self': self}
self.locals['hspace'] = _locals
def _G_or_8():
_G_exactly_9, lastError = self.exactly(' ')
self.considerError(lastError, None)
return (_G_exactly_9, self.currentError)
def _G_or_10():
_G_exactly_11, lastError = self.exactly('\t')
self.considerError(lastError, None)
return (_G_exactly_11, self.currentError)
def _G_or_12():
_G_apply_13, lastError = self._apply(self.rule_comment, "comment", [])
self.considerError(lastError, None)
return (_G_apply_13, self.currentError)
_G_or_14, lastError = self._or([_G_or_8, _G_or_10, _G_or_12])
self.considerError(lastError, 'hspace')
return (_G_or_14, self.currentError)
def rule_vspace(self):
_locals = {'self': self}
self.locals['vspace'] = _locals
def _G_or_15():
_G_exactly_16, lastError = self.exactly('\r\n')
self.considerError(lastError, None)
return (_G_exactly_16, self.currentError)
def _G_or_17():
_G_exactly_18, lastError = self.exactly('\r')
self.considerError(lastError, None)
return (_G_exactly_18, self.currentError)
def _G_or_19():
_G_exactly_20, lastError = self.exactly('\n')
self.considerError(lastError, None)
return (_G_exactly_20, self.currentError)
_G_or_21, lastError = self._or([_G_or_15, _G_or_17, _G_or_19])
self.considerError(lastError, 'vspace')
return (_G_or_21, self.currentError)
def rule_ws(self):
_locals = {'self': self}
self.locals['ws'] = _locals
def _G_many_22():
def _G_or_23():
_G_apply_24, lastError = self._apply(self.rule_hspace, "hspace", [])
self.considerError(lastError, None)
return (_G_apply_24, self.currentError)
def _G_or_25():
_G_apply_26, lastError = self._apply(self.rule_vspace, "vspace", [])
self.considerError(lastError, None)
return (_G_apply_26, self.currentError)
def _G_or_27():
_G_apply_28, lastError = self._apply(self.rule_comment, "comment", [])
self.considerError(lastError, None)
return (_G_apply_28, self.currentError)
_G_or_29, lastError = self._or([_G_or_23, _G_or_25, _G_or_27])
self.considerError(lastError, None)
return (_G_or_29, self.currentError)
_G_many_30, lastError = self.many(_G_many_22)
self.considerError(lastError, 'ws')
return (_G_many_30, self.currentError)
def rule_number(self):
_locals = {'self': self}
self.locals['number'] = _locals
_G_apply_31, lastError = self._apply(self.rule_ws, "ws", [])
self.considerError(lastError, 'number')
def _G_or_32():
_G_exactly_33, lastError = self.exactly('-')
self.considerError(lastError, None)
_G_apply_34, lastError = self._apply(self.rule_barenumber, "barenumber", [])
self.considerError(lastError, None)
_locals['x'] = _G_apply_34
_G_python_35, lastError = eval('t.Exactly(-x)', self.globals, _locals), None
self.considerError(lastError, None)
return (_G_python_35, self.currentError)
def _G_or_36():
_G_apply_37, lastError = self._apply(self.rule_barenumber, "barenumber", [])
self.considerError(lastError, None)
_locals['x'] = _G_apply_37
_G_python_38, lastError = eval('t.Exactly(x)', self.globals, _locals), None
self.considerError(lastError, None)
return (_G_python_38, self.currentError)
_G_or_39, lastError = self._or([_G_or_32, _G_or_36])
self.considerError(lastError, 'number')
return (_G_or_39, self.currentError)
def rule_barenumber(self):
_locals = {'self': self}
self.locals['barenumber'] = _locals
def _G_or_40():
_G_exactly_41, lastError = self.exactly('0')
self.considerError(lastError, None)
def _G_or_42():
def _G_or_43():
_G_exactly_44, lastError = self.exactly('x')
self.considerError(lastError, None)
return (_G_exactly_44, self.currentError)
def _G_or_45():
_G_exactly_46, lastError = self.exactly('X')
self.considerError(lastError, None)
return (_G_exactly_46, self.currentError)
_G_or_47, lastError = self._or([_G_or_43, _G_or_45])
self.considerError(lastError, None)
def _G_consumedby_48():
def _G_many1_49():
_G_apply_50, lastError = self._apply(self.rule_hexdigit, "hexdigit", [])
self.considerError(lastError, None)
return (_G_apply_50, self.currentError)
_G_many1_51, lastError = self.many(_G_many1_49, _G_many1_49())
self.considerError(lastError, None)
return (_G_many1_51, self.currentError)
_G_consumedby_52, lastError = self.consumedby(_G_consumedby_48)
self.considerError(lastError, None)
_locals['hs'] = _G_consumedby_52
_G_python_53, lastError = eval('int(hs, 16)', self.globals, _locals), None
self.considerError(lastError, None)
return (_G_python_53, self.currentError)
def _G_or_54():
def _G_consumedby_55():
def _G_many1_56():
_G_apply_57, lastError = self._apply(self.rule_octaldigit, "octaldigit", [])
self.considerError(lastError, None)
return (_G_apply_57, self.currentError)
_G_many1_58, lastError = self.many(_G_many1_56, _G_many1_56())
self.considerError(lastError, None)
return (_G_many1_58, self.currentError)
_G_consumedby_59, lastError = self.consumedby(_G_consumedby_55)
self.considerError(lastError, None)
_locals['ds'] = _G_consumedby_59
_G_python_60, lastError = eval('int(ds, 8)', self.globals, _locals), None
self.considerError(lastError, None)
return (_G_python_60, self.currentError)
_G_or_61, lastError = self._or([_G_or_42, _G_or_54])
self.considerError(lastError, None)
return (_G_or_61, self.currentError)
def _G_or_62():
def _G_consumedby_63():
def _G_many1_64():
_G_apply_65, lastError = self._apply(self.rule_digit, "digit", [])
self.considerError(lastError, None)
return (_G_apply_65, self.currentError)
_G_many1_66, lastError = self.many(_G_many1_64, _G_many1_64())
self.considerError(lastError, None)
return (_G_many1_66, self.currentError)
_G_consumedby_67, lastError = self.consumedby(_G_consumedby_63)
self.considerError(lastError, None)
_locals['ds'] = _G_consumedby_67
_G_python_68, lastError = eval('int(ds)', self.globals, _locals), None
self.considerError(lastError, None)
return (_G_python_68, self.currentError)
_G_or_69, lastError = self._or([_G_or_40, _G_or_62])
self.considerError(lastError, 'barenumber')
return (_G_or_69, self.currentError)
def rule_octaldigit(self):
_locals = {'self': self}
self.locals['octaldigit'] = _locals
_G_apply_70, lastError = self._apply(self.rule_anything, "anything", [])
self.considerError(lastError, 'octaldigit')
_locals['x'] = _G_apply_70
def _G_pred_71():
_G_python_72, lastError = eval("x in '01234567'", self.globals, _locals), None
self.considerError(lastError, None)
return (_G_python_72, self.currentError)
_G_pred_73, lastError = self.pred(_G_pred_71)
self.considerError(lastError, 'octaldigit')
_G_python_74, lastError = eval('x', self.globals, _locals), None
self.considerError(lastError, 'octaldigit')
return (_G_python_74, self.currentError)
def rule_hexdigit(self):
_locals = {'self': self}
self.locals['hexdigit'] = _locals
_G_apply_75, lastError = self._apply(self.rule_anything, "anything", [])
self.considerError(lastError, 'hexdigit')
_locals['x'] = _G_apply_75
def _G_pred_76():
_G_python_77, lastError = eval("x in '0123456789ABCDEFabcdef'", self.globals, _locals), None
self.considerError(lastError, None)
return (_G_python_77, self.currentError)
_G_pred_78, lastError = self.pred(_G_pred_76)
self.considerError(lastError, 'hexdigit')
_G_python_79, lastError = eval('x', self.globals, _locals), None
self.considerError(lastError, 'hexdigit')
return (_G_python_79, self.currentError)
def rule_escapedChar(self):
_locals = {'self': self}
self.locals['escapedChar'] = _locals
_G_exactly_80, lastError = self.exactly('\\')
self.considerError(lastError, 'escapedChar')
def _G_or_81():
_G_exactly_82, lastError = self.exactly('n')
self.considerError(lastError, None)
_G_python_83, lastError = "\n", None
self.considerError(lastError, None)
return (_G_python_83, self.currentError)
def _G_or_84():
_G_exactly_85, lastError = self.exactly('r')
self.considerError(lastError, None)
_G_python_86, lastError = "\r", None
self.considerError(lastError, None)
return (_G_python_86, self.currentError)
def _G_or_87():
_G_exactly_88, lastError = self.exactly('t')
self.considerError(lastError, None)
_G_python_89, lastError = "\t", None
self.considerError(lastError, None)
return (_G_python_89, self.currentError)
def _G_or_90():
_G_exactly_91, lastError = self.exactly('b')
self.considerError(lastError, None)
_G_python_92, lastError = "\b", None
self.considerError(lastError, None)
return (_G_python_92, self.currentError)
def _G_or_93():
_G_exactly_94, lastError = self.exactly('f')
self.considerError(lastError, None)
_G_python_95, lastError = "\f", None
self.considerError(lastError, None)
return (_G_python_95, self.currentError)
def _G_or_96():
_G_exactly_97, lastError = self.exactly('"')
self.considerError(lastError, None)
_G_python_98, lastError = '"', None
self.considerError(lastError, None)
return (_G_python_98, self.currentError)
def _G_or_99():
_G_exactly_100, lastError = self.exactly("'")
self.considerError(lastError, None)
_G_python_101, lastError = "'", None
self.considerError(lastError, None)
return (_G_python_101, self.currentError)
def _G_or_102():
_G_exactly_103, lastError = self.exactly('\\')
self.considerError(lastError, None)
_G_python_104, lastError = "\\", None
self.considerError(lastError, None)
return (_G_python_104, self.currentError)
_G_or_105, lastError = self._or([_G_or_81, _G_or_84, _G_or_87, _G_or_90, _G_or_93, _G_or_96, _G_or_99, _G_or_102])
self.considerError(lastError, 'escapedChar')
return (_G_or_105, self.currentError)
def rule_character(self):
_locals = {'self': self}
self.locals['character'] = _locals
_G_apply_106, lastError = self._apply(self.rule_ws, "ws", [])
self.considerError(lastError, 'character')
_G_exactly_107, lastError = self.exactly("'")
self.considerError(lastError, 'character')
def _G_or_108():
_G_apply_109, lastError = self._apply(self.rule_escapedChar, "escapedChar", [])
self.considerError(lastError, None)
return (_G_apply_109, self.currentError)
def _G_or_110():
_G_apply_111, lastError = self._apply(self.rule_anything, "anything", [])
self.considerError(lastError, None)
return (_G_apply_111, self.currentError)
_G_or_112, lastError = self._or([_G_or_108, _G_or_110])
self.considerError(lastError, 'character')
_locals['c'] = _G_or_112
_G_apply_113, lastError = self._apply(self.rule_ws, "ws", [])
self.considerError(lastError, 'character')
_G_exactly_114, lastError = self.exactly("'")
self.considerError(lastError, 'character')
_G_python_115, lastError = eval('t.Exactly(c)', self.globals, _locals), None
self.considerError(lastError, 'character')
return (_G_python_115, self.currentError)
def rule_string(self):
_locals = {'self': self}
self.locals['string'] = _locals
_G_apply_116, lastError = self._apply(self.rule_ws, "ws", [])
self.considerError(lastError, 'string')
_G_exactly_117, lastError = self.exactly('"')
self.considerError(lastError, 'string')
def _G_many_118():
def _G_or_119():
_G_apply_120, lastError = self._apply(self.rule_escapedChar, "escapedChar", [])
self.considerError(lastError, None)
return (_G_apply_120, self.currentError)
def _G_or_121():
def _G_not_122():
_G_exactly_123, lastError = self.exactly('"')
self.considerError(lastError, None)
return (_G_exactly_123, self.currentError)
_G_not_124, lastError = self._not(_G_not_122)
self.considerError(lastError, None)
_G_apply_125, lastError = self._apply(self.rule_anything, "anything", [])
self.considerError(lastError, None)
return (_G_apply_125, self.currentError)
_G_or_126, lastError = self._or([_G_or_119, _G_or_121])
self.considerError(lastError, None)
return (_G_or_126, self.currentError)
_G_many_127, lastError = self.many(_G_many_118)
self.considerError(lastError, 'string')
_locals['c'] = _G_many_127
_G_exactly_128, lastError = self.exactly('"')
self.considerError(lastError, 'string')
_G_python_129, lastError = eval("t.Exactly(''.join(c))", self.globals, _locals), None
self.considerError(lastError, 'string')
return (_G_python_129, self.currentError)
def rule_name(self):
_locals = {'self': self}
self.locals['name'] = _locals
def _G_consumedby_130():
_G_apply_131, lastError = self._apply(self.rule_letter, "letter", [])
self.considerError(lastError, None)
def _G_many_132():
def _G_or_133():
_G_apply_134, lastError = self._apply(self.rule_letterOrDigit, "letterOrDigit", [])
self.considerError(lastError, None)
return (_G_apply_134, self.currentError)
def _G_or_135():
_G_exactly_136, lastError = self.exactly('_')
self.considerError(lastError, None)
return (_G_exactly_136, self.currentError)
_G_or_137, lastError = self._or([_G_or_133, _G_or_135])
self.considerError(lastError, None)
return (_G_or_137, self.currentError)
_G_many_138, lastError = self.many(_G_many_132)
self.considerError(lastError, None)
return (_G_many_138, self.currentError)
_G_consumedby_139, lastError = self.consumedby(_G_consumedby_130)
self.considerError(lastError, 'name')
return (_G_consumedby_139, self.currentError)
def rule_application(self):
_locals = {'self': self}
self.locals['application'] = _locals
_G_apply_140, lastError = self._apply(self.rule_ws, "ws", [])
self.considerError(lastError, 'application')
_G_exactly_141, lastError = self.exactly('<')
self.considerError(lastError, 'application')
_G_apply_142, lastError = self._apply(self.rule_ws, "ws", [])
self.considerError(lastError, 'application')
_G_apply_143, lastError = self._apply(self.rule_name, "name", [])
self.considerError(lastError, 'application')
_locals['name'] = _G_apply_143
def _G_or_144():
_G_exactly_145, lastError = self.exactly(' ')
self.considerError(lastError, None)
_G_python_146, lastError = eval("self.applicationArgs(finalChar='>')", self.globals, _locals), None
self.considerError(lastError, None)
_locals['args'] = _G_python_146
_G_exactly_147, lastError = self.exactly('>')
self.considerError(lastError, None)
_G_python_148, lastError = eval('t.Apply(name, self.rulename, args)', self.globals, _locals), None
self.considerError(lastError, None)
return (_G_python_148, self.currentError)
def _G_or_149():
_G_apply_150, lastError = self._apply(self.rule_ws, "ws", [])
self.considerError(lastError, None)
_G_exactly_151, lastError = self.exactly('>')
self.considerError(lastError, None)
_G_python_152, lastError = eval('t.Apply(name, self.rulename, [])', self.globals, _locals), None
self.considerError(lastError, None)
return (_G_python_152, self.currentError)
_G_or_153, lastError = self._or([_G_or_144, _G_or_149])
self.considerError(lastError, 'application')
return (_G_or_153, self.currentError)
def rule_expr1(self):
_locals = {'self': self}
self.locals['expr1'] = _locals
def _G_or_154():
_G_apply_155, lastError = self._apply(self.rule_application, "application", [])
self.considerError(lastError, None)
return (_G_apply_155, self.currentError)
def _G_or_156():
_G_apply_157, lastError = self._apply(self.rule_ruleValue, "ruleValue", [])
self.considerError(lastError, None)
return (_G_apply_157, self.currentError)
def _G_or_158():
_G_apply_159, lastError = self._apply(self.rule_semanticPredicate, "semanticPredicate", [])
self.considerError(lastError, None)
return (_G_apply_159, self.currentError)
def _G_or_160():
_G_apply_161, lastError = self._apply(self.rule_semanticAction, "semanticAction", [])
self.considerError(lastError, None)
return (_G_apply_161, self.currentError)
def _G_or_162():
_G_apply_163, lastError = self._apply(self.rule_number, "number", [])
self.considerError(lastError, None)
_locals['n'] = _G_apply_163
_G_python_164, lastError = eval('self.isTree()', self.globals, _locals), None
self.considerError(lastError, None)
_G_python_165, lastError = eval('n', self.globals, _locals), None
self.considerError(lastError, None)
return (_G_python_165, self.currentError)
def _G_or_166():
_G_apply_167, lastError = self._apply(self.rule_character, "character", [])
self.considerError(lastError, None)
return (_G_apply_167, self.currentError)
def _G_or_168():
_G_apply_169, lastError = self._apply(self.rule_string, "string", [])
self.considerError(lastError, None)
return (_G_apply_169, self.currentError)
def _G_or_170():
_G_apply_171, lastError = self._apply(self.rule_ws, "ws", [])
self.considerError(lastError, None)
_G_exactly_172, lastError = self.exactly('(')
self.considerError(lastError, None)
_G_apply_173, lastError = self._apply(self.rule_expr, "expr", [])
self.considerError(lastError, None)
_locals['e'] = _G_apply_173
_G_apply_174, lastError = self._apply(self.rule_ws, "ws", [])
self.considerError(lastError, None)
_G_exactly_175, lastError = self.exactly(')')
self.considerError(lastError, None)
_G_python_176, lastError = eval('e', self.globals, _locals), None
self.considerError(lastError, None)
return (_G_python_176, self.currentError)
def _G_or_177():
_G_apply_178, lastError = self._apply(self.rule_ws, "ws", [])
self.considerError(lastError, None)
_G_exactly_179, lastError = self.exactly('[')
self.considerError(lastError, None)
_G_apply_180, lastError = self._apply(self.rule_expr, "expr", [])
self.considerError(lastError, None)
_locals['e'] = _G_apply_180
_G_apply_181, lastError = self._apply(self.rule_ws, "ws", [])
self.considerError(lastError, None)
_G_exactly_182, lastError = self.exactly(']')
self.considerError(lastError, None)
_G_python_183, lastError = eval('self.isTree()', self.globals, _locals), None
self.considerError(lastError, None)
_G_python_184, lastError = eval('t.List(e)', self.globals, _locals), None
self.considerError(lastError, None)
return (_G_python_184, self.currentError)
_G_or_185, lastError = self._or([_G_or_154, _G_or_156, _G_or_158, _G_or_160, _G_or_162, _G_or_166, _G_or_168, _G_or_170, _G_or_177])
self.considerError(lastError, 'expr1')
return (_G_or_185, self.currentError)
def rule_expr2(self):
_locals = {'self': self}
self.locals['expr2'] = _locals
def _G_or_186():
_G_apply_187, lastError = self._apply(self.rule_ws, "ws", [])
self.considerError(lastError, None)
_G_exactly_188, lastError = self.exactly('~')
self.considerError(lastError, None)
def _G_or_189():
_G_exactly_190, lastError = self.exactly('~')
self.considerError(lastError, None)
_G_apply_191, lastError = self._apply(self.rule_expr2, "expr2", [])
self.considerError(lastError, None)
_locals['e'] = _G_apply_191
_G_python_192, lastError = eval('t.Lookahead(e)', self.globals, _locals), None
self.considerError(lastError, None)
return (_G_python_192, self.currentError)
def _G_or_193():
_G_apply_194, lastError = self._apply(self.rule_expr2, "expr2", [])
self.considerError(lastError, None)
_locals['e'] = _G_apply_194
_G_python_195, lastError = eval('t.Not(e)', self.globals, _locals), None
self.considerError(lastError, None)
return (_G_python_195, self.currentError)
_G_or_196, lastError = self._or([_G_or_189, _G_or_193])
self.considerError(lastError, None)
return (_G_or_196, self.currentError)
def _G_or_197():
_G_apply_198, lastError = self._apply(self.rule_expr1, "expr1", [])
self.considerError(lastError, None)
return (_G_apply_198, self.currentError)
_G_or_199, lastError = self._or([_G_or_186, _G_or_197])
self.considerError(lastError, 'expr2')
return (_G_or_199, self.currentError)
def rule_expr3(self):
_locals = {'self': self}
self.locals['expr3'] = _locals
def _G_or_200():
_G_apply_201, lastError = self._apply(self.rule_expr2, "expr2", [])
self.considerError(lastError, None)
_locals['e'] = _G_apply_201
def _G_or_202():
_G_exactly_203, lastError = self.exactly('*')
self.considerError(lastError, None)
_G_python_204, lastError = eval('t.Many(e)', self.globals, _locals), None
self.considerError(lastError, None)
return (_G_python_204, self.currentError)
def _G_or_205():
_G_exactly_206, lastError = self.exactly('+')
self.considerError(lastError, None)
_G_python_207, lastError = eval('t.Many1(e)', self.globals, _locals), None
self.considerError(lastError, None)
return (_G_python_207, self.currentError)
def _G_or_208():
_G_exactly_209, lastError = self.exactly('?')
self.considerError(lastError, None)
_G_python_210, lastError = eval('t.Optional(e)', self.globals, _locals), None
self.considerError(lastError, None)
return (_G_python_210, self.currentError)
def _G_or_211():
_G_python_212, lastError = eval('e', self.globals, _locals), None
self.considerError(lastError, None)
return (_G_python_212, self.currentError)
_G_or_213, lastError = self._or([_G_or_202, _G_or_205, _G_or_208, _G_or_211])
self.considerError(lastError, None)
_locals['r'] = _G_or_213
def _G_or_214():
_G_exactly_215, lastError = self.exactly(':')
self.considerError(lastError, None)
_G_apply_216, lastError = self._apply(self.rule_name, "name", [])
self.considerError(lastError, None)
_locals['n'] = _G_apply_216
_G_python_217, lastError = eval('t.Bind(n, r)', self.globals, _locals), None
self.considerError(lastError, None)
return (_G_python_217, self.currentError)
def _G_or_218():
_G_python_219, lastError = eval('r', self.globals, _locals), None
self.considerError(lastError, None)
return (_G_python_219, self.currentError)
_G_or_220, lastError = self._or([_G_or_214, _G_or_218])
self.considerError(lastError, None)
return (_G_or_220, self.currentError)
def _G_or_221():
_G_apply_222, lastError = self._apply(self.rule_ws, "ws", [])
self.considerError(lastError, None)
_G_exactly_223, lastError = self.exactly(':')
self.considerError(lastError, None)
_G_apply_224, lastError = self._apply(self.rule_name, "name", [])
self.considerError(lastError, None)
_locals['n'] = _G_apply_224
_G_python_225, lastError = eval('t.Bind(n, t.Apply("anything", self.rulename, []))', self.globals, _locals), None
self.considerError(lastError, None)
return (_G_python_225, self.currentError)
_G_or_226, lastError = self._or([_G_or_200, _G_or_221])
self.considerError(lastError, 'expr3')
return (_G_or_226, self.currentError)
def rule_expr4(self):
_locals = {'self': self}
self.locals['expr4'] = _locals
def _G_many_227():
_G_apply_228, lastError = self._apply(self.rule_expr3, "expr3", [])
self.considerError(lastError, None)
return (_G_apply_228, self.currentError)
_G_many_229, lastError = self.many(_G_many_227)
self.considerError(lastError, 'expr4')
_locals['es'] = _G_many_229
_G_python_230, lastError = eval('t.And(es)', self.globals, _locals), None
self.considerError(lastError, 'expr4')
return (_G_python_230, self.currentError)
def rule_expr(self):
_locals = {'self': self}
self.locals['expr'] = _locals
_G_apply_231, lastError = self._apply(self.rule_expr4, "expr4", [])
self.considerError(lastError, 'expr')
_locals['e'] = _G_apply_231
def _G_many_232():
_G_apply_233, lastError = self._apply(self.rule_ws, "ws", [])
self.considerError(lastError, None)
_G_exactly_234, lastError = self.exactly('|')
self.considerError(lastError, None)
_G_apply_235, lastError = self._apply(self.rule_expr4, "expr4", [])
self.considerError(lastError, None)
return (_G_apply_235, self.currentError)
_G_many_236, lastError = self.many(_G_many_232)
self.considerError(lastError, 'expr')
_locals['es'] = _G_many_236
_G_python_237, lastError = eval('t.Or([e] + es)', self.globals, _locals), None
self.considerError(lastError, 'expr')
return (_G_python_237, self.currentError)
def rule_ruleValue(self):
_locals = {'self': self}
self.locals['ruleValue'] = _locals
_G_apply_238, lastError = self._apply(self.rule_ws, "ws", [])
self.considerError(lastError, 'ruleValue')
_G_exactly_239, lastError = self.exactly('=>')
self.considerError(lastError, 'ruleValue')
_G_python_240, lastError = eval('self.ruleValueExpr(False)', self.globals, _locals), None
self.considerError(lastError, 'ruleValue')
return (_G_python_240, self.currentError)
def rule_semanticPredicate(self):
_locals = {'self': self}
self.locals['semanticPredicate'] = _locals
_G_apply_241, lastError = self._apply(self.rule_ws, "ws", [])
self.considerError(lastError, 'semanticPredicate')
_G_exactly_242, lastError = self.exactly('?(')
self.considerError(lastError, 'semanticPredicate')
_G_python_243, lastError = eval('self.semanticPredicateExpr()', self.globals, _locals), None
self.considerError(lastError, 'semanticPredicate')
return (_G_python_243, self.currentError)
def rule_semanticAction(self):
_locals = {'self': self}
self.locals['semanticAction'] = _locals
_G_apply_244, lastError = self._apply(self.rule_ws, "ws", [])
self.considerError(lastError, 'semanticAction')
_G_exactly_245, lastError = self.exactly('!(')
self.considerError(lastError, 'semanticAction')
_G_python_246, lastError = eval('self.semanticActionExpr()', self.globals, _locals), None
self.considerError(lastError, 'semanticAction')
return (_G_python_246, self.currentError)
def rule_ruleEnd(self):
_locals = {'self': self}
self.locals['ruleEnd'] = _locals
def _G_or_247():
def _G_many_248():
_G_apply_249, lastError = self._apply(self.rule_hspace, "hspace", [])
self.considerError(lastError, None)
return (_G_apply_249, self.currentError)
_G_many_250, lastError = self.many(_G_many_248)
self.considerError(lastError, None)
def _G_many1_251():
_G_apply_252, lastError = self._apply(self.rule_vspace, "vspace", [])
self.considerError(lastError, None)
return (_G_apply_252, self.currentError)
_G_many1_253, lastError = self.many(_G_many1_251, _G_many1_251())
self.considerError(lastError, None)
return (_G_many1_253, self.currentError)
def _G_or_254():
_G_apply_255, lastError = self._apply(self.rule_end, "end", [])
self.considerError(lastError, None)
return (_G_apply_255, self.currentError)
_G_or_256, lastError = self._or([_G_or_247, _G_or_254])
self.considerError(lastError, 'ruleEnd')
return (_G_or_256, self.currentError)
def rule_rulePart(self):
_locals = {'self': self}
self.locals['rulePart'] = _locals
_G_apply_257, lastError = self._apply(self.rule_anything, "anything", [])
self.considerError(lastError, 'rulePart')
_locals['requiredName'] = _G_apply_257
_G_apply_258, lastError = self._apply(self.rule_ws, "ws", [])
self.considerError(lastError, 'rulePart')
_G_apply_259, lastError = self._apply(self.rule_name, "name", [])
self.considerError(lastError, 'rulePart')
_locals['n'] = _G_apply_259
def _G_pred_260():
_G_python_261, lastError = eval('n == requiredName', self.globals, _locals), None
self.considerError(lastError, None)
return (_G_python_261, self.currentError)
_G_pred_262, lastError = self.pred(_G_pred_260)
self.considerError(lastError, 'rulePart')
_G_python_263, lastError = eval('setattr(self, "rulename", n)', self.globals, _locals), None
self.considerError(lastError, 'rulePart')
_G_apply_264, lastError = self._apply(self.rule_expr4, "expr4", [])
self.considerError(lastError, 'rulePart')
_locals['args'] = _G_apply_264
def _G_or_265():
_G_apply_266, lastError = self._apply(self.rule_ws, "ws", [])
self.considerError(lastError, None)
_G_exactly_267, lastError = self.exactly('::=')
self.considerError(lastError, None)
_G_apply_268, lastError = self._apply(self.rule_expr, "expr", [])
self.considerError(lastError, None)
_locals['e'] = _G_apply_268
_G_apply_269, lastError = self._apply(self.rule_ruleEnd, "ruleEnd", [])
self.considerError(lastError, None)
_G_python_270, lastError = eval('t.And([args, e])', self.globals, _locals), None
self.considerError(lastError, None)
return (_G_python_270, self.currentError)
def _G_or_271():
_G_apply_272, lastError = self._apply(self.rule_ruleEnd, "ruleEnd", [])
self.considerError(lastError, None)
_G_python_273, lastError = eval('args', self.globals, _locals), None
self.considerError(lastError, None)
return (_G_python_273, self.currentError)
_G_or_274, lastError = self._or([_G_or_265, _G_or_271])
self.considerError(lastError, 'rulePart')
return (_G_or_274, self.currentError)
def rule_rule(self):
_locals = {'self': self}
self.locals['rule'] = _locals
_G_apply_275, lastError = self._apply(self.rule_ws, "ws", [])
self.considerError(lastError, 'rule')
def _G_lookahead_276():
_G_apply_277, lastError = self._apply(self.rule_name, "name", [])
self.considerError(lastError, None)
_locals['n'] = _G_apply_277
return (_locals['n'], self.currentError)
_G_lookahead_278, lastError = self.lookahead(_G_lookahead_276)
self.considerError(lastError, 'rule')
_G_python_279, lastError = eval('n', self.globals, _locals), None
self.considerError(lastError, 'rule')
_G_apply_280, lastError = self._apply(self.rule_rulePart, "rulePart", [_G_python_279])
self.considerError(lastError, 'rule')
_locals['r'] = _G_apply_280
def _G_or_281():
def _G_many1_282():
_G_python_283, lastError = eval('n', self.globals, _locals), None
self.considerError(lastError, None)
_G_apply_284, lastError = self._apply(self.rule_rulePart, "rulePart", [_G_python_283])
self.considerError(lastError, None)
return (_G_apply_284, self.currentError)
_G_many1_285, lastError = self.many(_G_many1_282, _G_many1_282())
self.considerError(lastError, None)
_locals['rs'] = _G_many1_285
_G_python_286, lastError = eval('t.Rule(n, t.Or([r] + rs))', self.globals, _locals), None
self.considerError(lastError, None)
return (_G_python_286, self.currentError)
def _G_or_287():
_G_python_288, lastError = eval('t.Rule(n, r)', self.globals, _locals), None
self.considerError(lastError, None)
return (_G_python_288, self.currentError)
_G_or_289, lastError = self._or([_G_or_281, _G_or_287])
self.considerError(lastError, 'rule')
return (_G_or_289, self.currentError)
def rule_grammar(self):
_locals = {'self': self}
self.locals['grammar'] = _locals
def _G_many_290():
_G_apply_291, lastError = self._apply(self.rule_rule, "rule", [])
self.considerError(lastError, None)
return (_G_apply_291, self.currentError)
_G_many_292, lastError = self.many(_G_many_290)
self.considerError(lastError, 'grammar')
_locals['rs'] = _G_many_292
_G_apply_293, lastError = self._apply(self.rule_ws, "ws", [])
self.considerError(lastError, 'grammar')
_G_python_294, lastError = eval('t.Grammar(self.name, self.tree_target, rs)', self.globals, _locals), None
self.considerError(lastError, 'grammar')
return (_G_python_294, self.currentError)
if pymeta_v1.globals is not None:
pymeta_v1.globals = pymeta_v1.globals.copy()
pymeta_v1.globals.update(ruleGlobals)
else:
pymeta_v1.globals = ruleGlobals
return pymeta_v1 |
Bitergia/allura | refs/heads/master | Allura/allura/controllers/task.py | 3 | class TaskController(object):
'''WSGI app providing web-like RPC
The purpose of this app is to allow us to replicate the
normal web request environment as closely as possible
when executing celery tasks.
'''
def __call__(self, environ, start_response):
task = environ['task']
result = task(restore_context=False)
start_response('200 OK', [])
return [ result ]
|
dplorimer/osf | refs/heads/master | scripts/tests/test_migrate_dates.py | 54 | # -*- coding: utf-8 -*-
import datetime
from nose.tools import * # noqa
from scripts.osfstorage.utils import ensure_osf_files
from website import settings
ensure_osf_files(settings)
# Hack: Must configure add-ons before importing `OsfTestCase`
from website.addons.osfstorage.tests.factories import FileVersionFactory
from website.addons.osfstorage.model import OsfStorageFileRecord
from website.addons.osffiles.model import NodeFile
from tests.base import OsfTestCase
from tests.factories import ProjectFactory
from scripts.osfstorage.migrate_dates import main
class TestMigrateDates(OsfTestCase):
def setUp(self):
super(TestMigrateDates, self).setUp()
self.path = 'old-pizza'
self.project = ProjectFactory()
self.node_settings = self.project.get_addon('osfstorage')
self.node_file = NodeFile(path=self.path)
self.node_file.save()
self.node_file.reload()
self.date = self.node_file.date_modified
self.project.files_versions['old_pizza'] = [self.node_file._id]
self.project.save()
self.version = FileVersionFactory(date_modified=datetime.datetime.now())
self.record, _ = OsfStorageFileRecord.get_or_create(self.node_file.path, self.node_settings)
self.record.versions = [self.version]
self.record.save()
def test_migrate_dates(self):
assert_not_equal(self.version.date_modified, self.date)
main(dry_run=False)
assert_equal(self.version.date_created, self.date)
|
yencarnacion/jaikuengine | refs/heads/master | .google_appengine/lib/django-1.5/django/contrib/messages/tests/base.py | 63 | from django import http
from django.conf import settings, global_settings
from django.contrib.messages import constants, utils, get_level, set_level
from django.contrib.messages.api import MessageFailure
from django.contrib.messages.storage import default_storage, base
from django.contrib.messages.storage.base import Message
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.test.utils import override_settings
from django.utils.translation import ugettext_lazy
from django.utils.unittest import skipIf
def skipUnlessAuthIsInstalled(func):
return skipIf(
'django.contrib.auth' not in settings.INSTALLED_APPS,
"django.contrib.auth isn't installed")(func)
def add_level_messages(storage):
"""
Adds 6 messages from different levels (including a custom one) to a storage
instance.
"""
storage.add(constants.INFO, 'A generic info message')
storage.add(29, 'Some custom level')
storage.add(constants.DEBUG, 'A debugging message', extra_tags='extra-tag')
storage.add(constants.WARNING, 'A warning')
storage.add(constants.ERROR, 'An error')
storage.add(constants.SUCCESS, 'This was a triumph.')
class override_settings_tags(override_settings):
def enable(self):
super(override_settings_tags, self).enable()
# LEVEL_TAGS is a constant defined in the
# django.contrib.messages.storage.base module, so after changing
# settings.MESSAGE_TAGS, we need to update that constant too.
self.old_level_tags = base.LEVEL_TAGS
base.LEVEL_TAGS = utils.get_level_tags()
def disable(self):
super(override_settings_tags, self).disable()
base.LEVEL_TAGS = self.old_level_tags
class BaseTest(TestCase):
storage_class = default_storage
urls = 'django.contrib.messages.tests.urls'
levels = {
'debug': constants.DEBUG,
'info': constants.INFO,
'success': constants.SUCCESS,
'warning': constants.WARNING,
'error': constants.ERROR,
}
def setUp(self):
self.settings_override = override_settings_tags(
TEMPLATE_DIRS = (),
TEMPLATE_CONTEXT_PROCESSORS = global_settings.TEMPLATE_CONTEXT_PROCESSORS,
MESSAGE_TAGS = '',
MESSAGE_STORAGE = '%s.%s' % (self.storage_class.__module__,
self.storage_class.__name__),
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer',
)
self.settings_override.enable()
def tearDown(self):
self.settings_override.disable()
def get_request(self):
return http.HttpRequest()
def get_response(self):
return http.HttpResponse()
def get_storage(self, data=None):
"""
Returns the storage backend, setting its loaded data to the ``data``
argument.
This method avoids the storage ``_get`` method from getting called so
that other parts of the storage backend can be tested independent of
the message retrieval logic.
"""
storage = self.storage_class(self.get_request())
storage._loaded_data = data or []
return storage
def test_add(self):
storage = self.get_storage()
self.assertFalse(storage.added_new)
storage.add(constants.INFO, 'Test message 1')
self.assertTrue(storage.added_new)
storage.add(constants.INFO, 'Test message 2', extra_tags='tag')
self.assertEqual(len(storage), 2)
def test_add_lazy_translation(self):
storage = self.get_storage()
response = self.get_response()
storage.add(constants.INFO, ugettext_lazy('lazy message'))
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 1)
def test_no_update(self):
storage = self.get_storage()
response = self.get_response()
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 0)
def test_add_update(self):
storage = self.get_storage()
response = self.get_response()
storage.add(constants.INFO, 'Test message 1')
storage.add(constants.INFO, 'Test message 1', extra_tags='tag')
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 2)
def test_existing_add_read_update(self):
storage = self.get_existing_storage()
response = self.get_response()
storage.add(constants.INFO, 'Test message 3')
list(storage) # Simulates a read
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 0)
def test_existing_read_add_update(self):
storage = self.get_existing_storage()
response = self.get_response()
list(storage) # Simulates a read
storage.add(constants.INFO, 'Test message 3')
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 1)
@override_settings(MESSAGE_LEVEL=constants.DEBUG)
def test_full_request_response_cycle(self):
"""
With the message middleware enabled, tests that messages are properly
stored and then retrieved across the full request/redirect/response
cycle.
"""
data = {
'messages': ['Test message %d' % x for x in range(5)],
}
show_url = reverse('django.contrib.messages.tests.urls.show')
for level in ('debug', 'info', 'success', 'warning', 'error'):
add_url = reverse('django.contrib.messages.tests.urls.add',
args=(level,))
response = self.client.post(add_url, data, follow=True)
self.assertRedirects(response, show_url)
self.assertTrue('messages' in response.context)
messages = [Message(self.levels[level], msg) for msg in
data['messages']]
self.assertEqual(list(response.context['messages']), messages)
for msg in data['messages']:
self.assertContains(response, msg)
@override_settings(MESSAGE_LEVEL=constants.DEBUG)
def test_with_template_response(self):
data = {
'messages': ['Test message %d' % x for x in range(5)],
}
show_url = reverse('django.contrib.messages.tests.urls.show_template_response')
for level in self.levels.keys():
add_url = reverse('django.contrib.messages.tests.urls.add_template_response',
args=(level,))
response = self.client.post(add_url, data, follow=True)
self.assertRedirects(response, show_url)
self.assertTrue('messages' in response.context)
for msg in data['messages']:
self.assertContains(response, msg)
# there shouldn't be any messages on second GET request
response = self.client.get(show_url)
for msg in data['messages']:
self.assertNotContains(response, msg)
@override_settings(MESSAGE_LEVEL=constants.DEBUG)
def test_multiple_posts(self):
"""
Tests that messages persist properly when multiple POSTs are made
before a GET.
"""
data = {
'messages': ['Test message %d' % x for x in range(5)],
}
show_url = reverse('django.contrib.messages.tests.urls.show')
messages = []
for level in ('debug', 'info', 'success', 'warning', 'error'):
messages.extend([Message(self.levels[level], msg) for msg in
data['messages']])
add_url = reverse('django.contrib.messages.tests.urls.add',
args=(level,))
self.client.post(add_url, data)
response = self.client.get(show_url)
self.assertTrue('messages' in response.context)
self.assertEqual(list(response.context['messages']), messages)
for msg in data['messages']:
self.assertContains(response, msg)
@override_settings(
INSTALLED_APPS=filter(
lambda app:app!='django.contrib.messages', settings.INSTALLED_APPS),
MIDDLEWARE_CLASSES=filter(
lambda m:'MessageMiddleware' not in m, settings.MIDDLEWARE_CLASSES),
TEMPLATE_CONTEXT_PROCESSORS=filter(
lambda p:'context_processors.messages' not in p,
settings.TEMPLATE_CONTEXT_PROCESSORS),
MESSAGE_LEVEL=constants.DEBUG
)
def test_middleware_disabled(self):
"""
Tests that, when the middleware is disabled, an exception is raised
when one attempts to store a message.
"""
data = {
'messages': ['Test message %d' % x for x in range(5)],
}
show_url = reverse('django.contrib.messages.tests.urls.show')
for level in ('debug', 'info', 'success', 'warning', 'error'):
add_url = reverse('django.contrib.messages.tests.urls.add',
args=(level,))
self.assertRaises(MessageFailure, self.client.post, add_url,
data, follow=True)
@override_settings(
INSTALLED_APPS=filter(
lambda app:app!='django.contrib.messages', settings.INSTALLED_APPS),
MIDDLEWARE_CLASSES=filter(
lambda m:'MessageMiddleware' not in m, settings.MIDDLEWARE_CLASSES),
TEMPLATE_CONTEXT_PROCESSORS=filter(
lambda p:'context_processors.messages' not in p,
settings.TEMPLATE_CONTEXT_PROCESSORS),
MESSAGE_LEVEL=constants.DEBUG
)
def test_middleware_disabled_fail_silently(self):
"""
Tests that, when the middleware is disabled, an exception is not
raised if 'fail_silently' = True
"""
data = {
'messages': ['Test message %d' % x for x in range(5)],
'fail_silently': True,
}
show_url = reverse('django.contrib.messages.tests.urls.show')
for level in ('debug', 'info', 'success', 'warning', 'error'):
add_url = reverse('django.contrib.messages.tests.urls.add',
args=(level,))
response = self.client.post(add_url, data, follow=True)
self.assertRedirects(response, show_url)
self.assertFalse('messages' in response.context)
def stored_messages_count(self, storage, response):
"""
Returns the number of messages being stored after a
``storage.update()`` call.
"""
raise NotImplementedError('This method must be set by a subclass.')
def test_get(self):
raise NotImplementedError('This method must be set by a subclass.')
def get_existing_storage(self):
return self.get_storage([Message(constants.INFO, 'Test message 1'),
Message(constants.INFO, 'Test message 2',
extra_tags='tag')])
def test_existing_read(self):
"""
Tests that reading the existing storage doesn't cause the data to be
lost.
"""
storage = self.get_existing_storage()
self.assertFalse(storage.used)
# After iterating the storage engine directly, the used flag is set.
data = list(storage)
self.assertTrue(storage.used)
# The data does not disappear because it has been iterated.
self.assertEqual(data, list(storage))
def test_existing_add(self):
storage = self.get_existing_storage()
self.assertFalse(storage.added_new)
storage.add(constants.INFO, 'Test message 3')
self.assertTrue(storage.added_new)
def test_default_level(self):
# get_level works even with no storage on the request.
request = self.get_request()
self.assertEqual(get_level(request), constants.INFO)
# get_level returns the default level if it hasn't been set.
storage = self.get_storage()
request._messages = storage
self.assertEqual(get_level(request), constants.INFO)
# Only messages of sufficient level get recorded.
add_level_messages(storage)
self.assertEqual(len(storage), 5)
def test_low_level(self):
request = self.get_request()
storage = self.storage_class(request)
request._messages = storage
self.assertTrue(set_level(request, 5))
self.assertEqual(get_level(request), 5)
add_level_messages(storage)
self.assertEqual(len(storage), 6)
def test_high_level(self):
request = self.get_request()
storage = self.storage_class(request)
request._messages = storage
self.assertTrue(set_level(request, 30))
self.assertEqual(get_level(request), 30)
add_level_messages(storage)
self.assertEqual(len(storage), 2)
@override_settings(MESSAGE_LEVEL=29)
def test_settings_level(self):
request = self.get_request()
storage = self.storage_class(request)
self.assertEqual(get_level(request), 29)
add_level_messages(storage)
self.assertEqual(len(storage), 3)
def test_tags(self):
storage = self.get_storage()
storage.level = 0
add_level_messages(storage)
tags = [msg.tags for msg in storage]
self.assertEqual(tags,
['info', '', 'extra-tag debug', 'warning', 'error',
'success'])
@override_settings_tags(MESSAGE_TAGS={
constants.INFO: 'info',
constants.DEBUG: '',
constants.WARNING: '',
constants.ERROR: 'bad',
29: 'custom',
}
)
def test_custom_tags(self):
storage = self.get_storage()
storage.level = 0
add_level_messages(storage)
tags = [msg.tags for msg in storage]
self.assertEqual(tags,
['info', 'custom', 'extra-tag', '', 'bad', 'success'])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.