repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
henryre/shalo
|
refs/heads/master
|
utils/language_data_utils.py
|
1
|
import cPickle
import numpy as np
DATA_DIR = 'data/'
def senna(vector_f='embeddings.txt', words_f ='words.lst', out_f='senna.pkl'):
"""Process raw Senna word vectors"""
with open(DATA_DIR + words_f, 'rb') as f:
words = [line.strip() for line in f]
M = np.loadtxt(DATA_DIR + vector_f)
print "Found {0} words".format(len(words))
print "Found {0}x{1} embedding matrix".format(*M.shape)
with open(DATA_DIR + out_f, 'wb') as f:
cPickle.dump((words, M), f)
def dep_w2v(data_fname='deps.words', out_fname='depw2v.pkl'):
"""Process raw dependency word2vec data from Levy & Goldberg '14"""
M = np.loadtxt(DATA_DIR + data_fname, converters={0: lambda x: 0})
M = M[:, 1:]
print "Loaded {0}x{1} word vector matrix".format(*M.shape)
with open(DATA_DIR + data_fname, 'rb') as f:
words = [line.split()[0] for line in f]
with open(DATA_DIR + out_fname, 'wb') as f:
cPickle.dump((words, M), f)
def glove(data_fname='glove.840B.300d.txt', out_fname='glove.pkl'):
"""Process raw dependency GloVe data from Socher '13"""
words, U, dim = [], [], None
with open(DATA_DIR + data_fname, 'rb') as f:
for j, line in enumerate(f):
x = line.strip().split()
word, vector, d = x[0], np.ravel(x[1:]), len(x) - 1
if dim is None: dim = d
elif d != dim: raise Exception('{0}: {1}!={2}'.format(j, dim, d))
U.append(vector)
words.append(word)
U = np.array(U)
print "Found {0} words".format(len(words))
print "Found {0}x{1} embedding matrix".format(*U.shape)
with open(DATA_DIR + out_fname, 'wb') as f:
cPickle.dump((words, U), f)
def word_freq(data_fname='count_1w.txt', out_fname='word_freq.pkl'):
counts, s = {}, 0
with open(DATA_DIR + data_fname, 'rb') as f:
for line in f:
word, ct = line.strip().split()
ct = int(ct)
counts[word] = ct
s += ct
for k, v in counts.iteritems():
counts[k] = float(v) / s
with open(DATA_DIR + out_fname, 'wb') as f:
cPickle.dump(counts, f)
|
6WIND/scapy
|
refs/heads/master
|
scapy/layers/tls/crypto/compression.py
|
7
|
# This file is part of Scapy
# Copyright (C) 2007, 2008, 2009 Arnaud Ebalard
# 2015, 2016 Maxence Tury
# This program is published under a GPLv2 license
"""
TLS compression.
"""
from __future__ import absolute_import
import zlib
from scapy.error import warning
import scapy.modules.six as six
_tls_compression_algs = {}
_tls_compression_algs_cls = {}
class _GenericCompMetaclass(type):
"""
Compression classes are automatically registered through this metaclass.
"""
def __new__(cls, name, bases, dct):
the_class = super(_GenericCompMetaclass, cls).__new__(cls, name,
bases, dct)
comp_name = dct.get("name")
val = dct.get("val")
if comp_name:
_tls_compression_algs[val] = comp_name
_tls_compression_algs_cls[val] = the_class
return the_class
class _GenericComp(six.with_metaclass(_GenericCompMetaclass, object)):
pass
class Comp_NULL(_GenericComp):
"""
The default and advised compression method for TLS: doing nothing.
"""
name = "null"
val = 0
def compress(self, s):
return s
def decompress(self, s):
return s
class Comp_Deflate(_GenericComp):
"""
DEFLATE algorithm, specified for TLS by RFC 3749.
"""
name = "deflate"
val = 1
def compress(self, s):
tmp = self.compress_state.compress(s)
tmp += self.compress_state.flush(zlib.Z_FULL_FLUSH)
return tmp
def decompress(self, s):
return self.decompress_state.decompress(s)
def __init__(self):
self.compress_state = zlib.compressobj()
self.decompress_state = zlib.decompressobj()
class Comp_LZS(_GenericComp):
"""
Lempel-Zic-Stac (LZS) algorithm, specified for TLS by RFC 3943.
XXX No support for now.
"""
name = "LZS"
val = 64
def compress(self, s):
warning("LZS Compression algorithm is not implemented yet")
return s
def decompress(self, s):
warning("LZS Compression algorithm is not implemented yet")
return s
|
zofuthan/edx-platform
|
refs/heads/master
|
lms/djangoapps/courseware/features/homepage.py
|
140
|
# pylint: disable=missing-docstring
# pylint: disable=redefined-outer-name
from lettuce import world, step
from nose.tools import assert_equals, assert_greater # pylint: disable=no-name-in-module
@step(u'I should see the following links and ids')
def should_see_a_link_called(step):
for link_id_pair in step.hashes:
link_id = link_id_pair['id']
text = link_id_pair['Link']
link = world.browser.find_by_id(link_id)
assert_greater(
len(link),
0,
"Link length is less than 1. ID: {id} Text: {text}".format(id=link_id, text=text)
)
assert_equals(link.text, text)
|
stupschwartz/branching
|
refs/heads/master
|
src/branching.py
|
1
|
import os
class Branch(object):
def __init__(self, name):
self.name = name
def push(self):
os.system('git push origin %s' % self.name)
print 'branching'
|
jnerin/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/rackspace/rax_mon_entity.py
|
45
|
#!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: rax_mon_entity
short_description: Create or delete a Rackspace Cloud Monitoring entity
description:
- Create or delete a Rackspace Cloud Monitoring entity, which represents a device
to monitor. Entities associate checks and alarms with a target system and
provide a convenient, centralized place to store IP addresses. Rackspace
monitoring module flow | *rax_mon_entity* -> rax_mon_check ->
rax_mon_notification -> rax_mon_notification_plan -> rax_mon_alarm
version_added: "2.0"
options:
label:
description:
- Defines a name for this entity. Must be a non-empty string between 1 and
255 characters long.
required: true
state:
description:
- Ensure that an entity with this C(name) exists or does not exist.
choices: ["present", "absent"]
agent_id:
description:
- Rackspace monitoring agent on the target device to which this entity is
bound. Necessary to collect C(agent.) rax_mon_checks against this entity.
named_ip_addresses:
description:
- Hash of IP addresses that may be referenced by name by rax_mon_checks
added to this entity. Must be a dictionary of with keys that are names
between 1 and 64 characters long, and values that are valid IPv4 or IPv6
addresses.
metadata:
description:
- Hash of arbitrary C(name), C(value) pairs that are passed to associated
rax_mon_alarms. Names and values must all be between 1 and 255 characters
long.
author: Ash Wilson
extends_documentation_fragment: rackspace.openstack
'''
EXAMPLES = '''
- name: Entity example
gather_facts: False
hosts: local
connection: local
tasks:
- name: Ensure an entity exists
rax_mon_entity:
credentials: ~/.rax_pub
state: present
label: my_entity
named_ip_addresses:
web_box: 192.0.2.4
db_box: 192.0.2.5
meta:
hurf: durf
register: the_entity
'''
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module
def cloud_monitoring(module, state, label, agent_id, named_ip_addresses,
metadata):
if len(label) < 1 or len(label) > 255:
module.fail_json(msg='label must be between 1 and 255 characters long')
changed = False
cm = pyrax.cloud_monitoring
if not cm:
module.fail_json(msg='Failed to instantiate client. This typically '
'indicates an invalid region or an incorrectly '
'capitalized region name.')
existing = []
for entity in cm.list_entities():
if label == entity.label:
existing.append(entity)
entity = None
if existing:
entity = existing[0]
if state == 'present':
should_update = False
should_delete = False
should_create = False
if len(existing) > 1:
module.fail_json(msg='%s existing entities have the label %s.' %
(len(existing), label))
if entity:
if named_ip_addresses and named_ip_addresses != entity.ip_addresses:
should_delete = should_create = True
# Change an existing Entity, unless there's nothing to do.
should_update = agent_id and agent_id != entity.agent_id or \
(metadata and metadata != entity.metadata)
if should_update and not should_delete:
entity.update(agent_id, metadata)
changed = True
if should_delete:
entity.delete()
else:
should_create = True
if should_create:
# Create a new Entity.
entity = cm.create_entity(label=label, agent=agent_id,
ip_addresses=named_ip_addresses,
metadata=metadata)
changed = True
else:
# Delete the existing Entities.
for e in existing:
e.delete()
changed = True
if entity:
entity_dict = {
"id": entity.id,
"name": entity.name,
"agent_id": entity.agent_id,
}
module.exit_json(changed=changed, entity=entity_dict)
else:
module.exit_json(changed=changed)
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
state=dict(default='present', choices=['present', 'absent']),
label=dict(required=True),
agent_id=dict(),
named_ip_addresses=dict(type='dict', default={}),
metadata=dict(type='dict', default={})
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together()
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
state = module.params.get('state')
label = module.params.get('label')
agent_id = module.params.get('agent_id')
named_ip_addresses = module.params.get('named_ip_addresses')
metadata = module.params.get('metadata')
setup_rax_module(module, pyrax)
cloud_monitoring(module, state, label, agent_id, named_ip_addresses, metadata)
if __name__ == '__main__':
main()
|
jonashaag/jedi
|
refs/heads/master
|
test/test_evaluate/not_in_sys_path/not_in_sys_path_package/module.py
|
34
|
value = 'package.module'
|
Hellowlol/PyTunes
|
refs/heads/master
|
libs/simplejson/tests/test_default.py
|
149
|
from unittest import TestCase
import simplejson as json
class TestDefault(TestCase):
def test_default(self):
self.assertEqual(
json.dumps(type, default=repr),
json.dumps(repr(type)))
|
chjw8016/GreenOdoo7-haibao
|
refs/heads/master
|
openerp/addons/warning/warning.py
|
5
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields,osv
from openerp.tools.translate import _
WARNING_MESSAGE = [
('no-message','No Message'),
('warning','Warning'),
('block','Blocking Message')
]
WARNING_HELP = _('Selecting the "Warning" option will notify user with the message, Selecting "Blocking Message" will throw an exception with the message and block the flow. The Message has to be written in the next field.')
class res_partner(osv.osv):
_inherit = 'res.partner'
_columns = {
'sale_warn' : fields.selection(WARNING_MESSAGE, 'Sales Order', help=WARNING_HELP, required=True),
'sale_warn_msg' : fields.text('Message for Sales Order'),
'purchase_warn' : fields.selection(WARNING_MESSAGE, 'Purchase Order', help=WARNING_HELP, required=True),
'purchase_warn_msg' : fields.text('Message for Purchase Order'),
'picking_warn' : fields.selection(WARNING_MESSAGE, 'Stock Picking', help=WARNING_HELP, required=True),
'picking_warn_msg' : fields.text('Message for Stock Picking'),
'invoice_warn' : fields.selection(WARNING_MESSAGE, 'Invoice', help=WARNING_HELP, required=True),
'invoice_warn_msg' : fields.text('Message for Invoice'),
}
_defaults = {
'sale_warn' : 'no-message',
'purchase_warn' : 'no-message',
'picking_warn' : 'no-message',
'invoice_warn' : 'no-message',
}
res_partner()
class sale_order(osv.osv):
_inherit = 'sale.order'
def onchange_partner_id(self, cr, uid, ids, part, context=None):
if not part:
return {'value':{'partner_invoice_id': False, 'partner_shipping_id':False, 'payment_term' : False}}
warning = {}
title = False
message = False
partner = self.pool.get('res.partner').browse(cr, uid, part, context=context)
if partner.sale_warn != 'no-message':
if partner.sale_warn == 'block':
raise osv.except_osv(_('Alert for %s!') % (partner.name), partner.sale_warn_msg)
title = _("Warning for %s") % partner.name
message = partner.sale_warn_msg
warning = {
'title': title,
'message': message,
}
result = super(sale_order, self).onchange_partner_id(cr, uid, ids, part, context=context)
if result.get('warning',False):
warning['title'] = title and title +' & '+ result['warning']['title'] or result['warning']['title']
warning['message'] = message and message + ' ' + result['warning']['message'] or result['warning']['message']
return {'value': result.get('value',{}), 'warning':warning}
sale_order()
class purchase_order(osv.osv):
_inherit = 'purchase.order'
def onchange_partner_id(self, cr, uid, ids, part):
if not part:
return {'value':{'partner_address_id': False}}
warning = {}
title = False
message = False
partner = self.pool.get('res.partner').browse(cr, uid, part)
if partner.purchase_warn != 'no-message':
if partner.purchase_warn == 'block':
raise osv.except_osv(_('Alert for %s!') % (partner.name), partner.purchase_warn_msg)
title = _("Warning for %s") % partner.name
message = partner.purchase_warn_msg
warning = {
'title': title,
'message': message
}
result = super(purchase_order, self).onchange_partner_id(cr, uid, ids, part)
if result.get('warning',False):
warning['title'] = title and title +' & '+ result['warning']['title'] or result['warning']['title']
warning['message'] = message and message + ' ' + result['warning']['message'] or result['warning']['message']
return {'value': result.get('value',{}), 'warning':warning}
purchase_order()
class account_invoice(osv.osv):
_inherit = 'account.invoice'
def onchange_partner_id(self, cr, uid, ids, type, partner_id,
date_invoice=False, payment_term=False, partner_bank_id=False, company_id=False):
if not partner_id:
return {'value': {
'account_id': False,
'payment_term': False,
}
}
warning = {}
title = False
message = False
partner = self.pool.get('res.partner').browse(cr, uid, partner_id)
if partner.invoice_warn != 'no-message':
if partner.invoice_warn == 'block':
raise osv.except_osv(_('Alert for %s!') % (partner.name), partner.invoice_warn_msg)
title = _("Warning for %s") % partner.name
message = partner.invoice_warn_msg
warning = {
'title': title,
'message': message
}
result = super(account_invoice, self).onchange_partner_id(cr, uid, ids, type, partner_id,
date_invoice=date_invoice, payment_term=payment_term,
partner_bank_id=partner_bank_id, company_id=company_id)
if result.get('warning',False):
warning['title'] = title and title +' & '+ result['warning']['title'] or result['warning']['title']
warning['message'] = message and message + ' ' + result['warning']['message'] or result['warning']['message']
return {'value': result.get('value',{}), 'warning':warning}
account_invoice()
class stock_picking(osv.osv):
_inherit = 'stock.picking'
def onchange_partner_in(self, cr, uid, ids, partner_id=None, context=None):
if not partner_id:
return {}
partner = self.pool.get('res.partner').browse(cr, uid, partner_id, context=context)
warning = {}
title = False
message = False
if partner.picking_warn != 'no-message':
if partner.picking_warn == 'block':
raise osv.except_osv(_('Alert for %s!') % (partner.name), partner.picking_warn_msg)
title = _("Warning for %s") % partner.name
message = partner.picking_warn_msg
warning = {
'title': title,
'message': message
}
result = super(stock_picking, self).onchange_partner_in(cr, uid, ids, partner_id, context)
if result.get('warning',False):
warning['title'] = title and title +' & '+ result['warning']['title'] or result['warning']['title']
warning['message'] = message and message + ' ' + result['warning']['message'] or result['warning']['message']
return {'value': result.get('value',{}), 'warning':warning}
stock_picking()
# FIXME:(class stock_picking_in and stock_picking_out) this is a temporary workaround because of a framework bug (ref: lp:996816).
# It should be removed as soon as the bug is fixed
class stock_picking_in(osv.osv):
_inherit = 'stock.picking.in'
def onchange_partner_in(self, cr, uid, ids, partner_id=None, context=None):
if not partner_id:
return {}
partner = self.pool.get('res.partner').browse(cr, uid, partner_id, context=context)
warning = {}
title = False
message = False
if partner.picking_warn != 'no-message':
if partner.picking_warn == 'block':
raise osv.except_osv(_('Alert for %s!') % (partner.name), partner.picking_warn_msg)
title = _("Warning for %s") % partner.name
message = partner.picking_warn_msg
warning = {
'title': title,
'message': message
}
result = super(stock_picking_in, self).onchange_partner_in(cr, uid, ids, partner_id, context)
if result.get('warning',False):
warning['title'] = title and title +' & '+ result['warning']['title'] or result['warning']['title']
warning['message'] = message and message + ' ' + result['warning']['message'] or result['warning']['message']
return {'value': result.get('value',{}), 'warning':warning}
class stock_picking_out(osv.osv):
_inherit = 'stock.picking.out'
def onchange_partner_in(self, cr, uid, ids, partner_id=None, context=None):
if not partner_id:
return {}
partner = self.pool.get('res.partner').browse(cr, uid, partner_id, context=context)
warning = {}
title = False
message = False
if partner.picking_warn != 'no-message':
if partner.picking_warn == 'block':
raise osv.except_osv(_('Alert for %s!') % (partner.name), partner.picking_warn_msg)
title = _("Warning for %s") % partner.name
message = partner.picking_warn_msg
warning = {
'title': title,
'message': message
}
result = super(stock_picking_out, self).onchange_partner_in(cr, uid, ids, partner_id, context)
if result.get('warning',False):
warning['title'] = title and title +' & '+ result['warning']['title'] or result['warning']['title']
warning['message'] = message and message + ' ' + result['warning']['message'] or result['warning']['message']
return {'value': result.get('value',{}), 'warning':warning}
class product_product(osv.osv):
_inherit = 'product.product'
_columns = {
'sale_line_warn' : fields.selection(WARNING_MESSAGE,'Sales Order Line', help=WARNING_HELP, required=True),
'sale_line_warn_msg' : fields.text('Message for Sales Order Line'),
'purchase_line_warn' : fields.selection(WARNING_MESSAGE,'Purchase Order Line', help=WARNING_HELP, required=True),
'purchase_line_warn_msg' : fields.text('Message for Purchase Order Line'),
}
_defaults = {
'sale_line_warn' : 'no-message',
'purchase_line_warn' : 'no-message',
}
product_product()
class sale_order_line(osv.osv):
_inherit = 'sale.order.line'
def product_id_change(self, cr, uid, ids, pricelist, product, qty=0,
uom=False, qty_uos=0, uos=False, name='', partner_id=False,
lang=False, update_tax=True, date_order=False, packaging=False,
fiscal_position=False, flag=False, context=None):
warning = {}
if not product:
return {'value': {'th_weight' : 0, 'product_packaging': False,
'product_uos_qty': qty}, 'domain': {'product_uom': [],
'product_uos': []}}
product_obj = self.pool.get('product.product')
product_info = product_obj.browse(cr, uid, product)
title = False
message = False
if product_info.sale_line_warn != 'no-message':
if product_info.sale_line_warn == 'block':
raise osv.except_osv(_('Alert for %s!') % (product_info.name), product_info.sale_line_warn_msg)
title = _("Warning for %s") % product_info.name
message = product_info.sale_line_warn_msg
warning['title'] = title
warning['message'] = message
result = super(sale_order_line, self).product_id_change( cr, uid, ids, pricelist, product, qty,
uom, qty_uos, uos, name, partner_id,
lang, update_tax, date_order, packaging, fiscal_position, flag, context=context)
if result.get('warning',False):
warning['title'] = title and title +' & '+result['warning']['title'] or result['warning']['title']
warning['message'] = message and message +'\n\n'+result['warning']['message'] or result['warning']['message']
return {'value': result.get('value',{}), 'warning':warning}
sale_order_line()
class purchase_order_line(osv.osv):
_inherit = 'purchase.order.line'
def onchange_product_id(self,cr, uid, ids, pricelist, product, qty, uom,
partner_id, date_order=False, fiscal_position_id=False, date_planned=False,
name=False, price_unit=False, notes=False, context=None):
warning = {}
if not product:
return {'value': {'price_unit': price_unit or 0.0, 'name': name or '', 'notes': notes or '', 'product_uom' : uom or False}, 'domain':{'product_uom':[]}}
product_obj = self.pool.get('product.product')
product_info = product_obj.browse(cr, uid, product)
title = False
message = False
if product_info.purchase_line_warn != 'no-message':
if product_info.purchase_line_warn == 'block':
raise osv.except_osv(_('Alert for %s!') % (product_info.name), product_info.purchase_line_warn_msg)
title = _("Warning for %s") % product_info.name
message = product_info.purchase_line_warn_msg
warning['title'] = title
warning['message'] = message
result = super(purchase_order_line, self).onchange_product_id(cr, uid, ids, pricelist, product, qty, uom,
partner_id, date_order, fiscal_position_id)
if result.get('warning',False):
warning['title'] = title and title +' & '+result['warning']['title'] or result['warning']['title']
warning['message'] = message and message +'\n\n'+result['warning']['message'] or result['warning']['message']
return {'value': result.get('value',{}), 'warning':warning}
purchase_order_line()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
sarutobi/djamizdat
|
refs/heads/master
|
migration/insert_data.py
|
1
|
# -*- coding: utf-8 -*-
import sys
import sqlite3 as lite
from datetime import datetime
from lxml import etree
def parse_file(dname):
fname = "data/{0}.xml".format(dname)
return etree.parse(fname)
def store_data(conn, query, data):
conn.executemany(query, data)
conn.commit()
def get_int_value(parent, node_name):
n = parent.find(node_name)
if n is not None:
return int(n.text)
return None
def get_node_value(parent, node_name):
n = parent.find(node_name)
if n is not None:
return n.text
return ''
def get_datetime_value(parent, node_name):
try:
n = parent.find(node_name)
except ValueError:
n = None
if n is not None:
return datetime.strptime(n.text, "%Y-%m-%dT%H:%M:%S")
return None
def load_docname(conn):
t_query = "insert into documents_docname(id, docname) values (?, ?);"
tree = parse_file("DOCNAME")
data = []
for dnm in tree.findall("DOCNAME"):
data.append((
int(dnm.find("ID_DOCNAME").text),
dnm.find("DocName").text
))
store_data(conn, t_query, data)
def load_languages(conn):
t_query = "insert into documents_languages(id, language) values(?, ?);"
tree = parse_file("Languages")
data = []
for lng in tree.findall("Languages"):
data.append((
int(lng.find(u"Код").text),
lng.find(u"Язык").text
))
store_data(conn, t_query, data)
def load_typedoc(conn):
t_query = "insert into documents_typedoc(doctype) values(?);"
tree = parse_file("TypeDoc")
data = []
for lng in tree.findall("TypeDoc"):
data.append((
lng.find("Type").text
))
store_data(conn, t_query, data)
def load_reference(conn):
t_query = "insert into documents_reference(id, ACnumber, notes, page, name_id) values(?, ?, ?, ?, ?);"
tree = parse_file("Ссылки")
data = []
for lng in tree.findall(u"Ссылки"):
data.append((
int(lng.find(u"Код1").text),
lng.find(u"НомерАС").text,
get_node_value(lng, u"Примечания"),
get_node_value(lng, u"Стр"),
int(lng.find(u"Имена_Код").text)
))
store_data(conn, t_query, data)
def load_category(conn):
t_query = "insert into documents_category(id, parent_code_id, level, name, path) values(?, ?, ?, ?, ?);"
tree = parse_file("Категории")
data = []
for lng in tree.findall(u"Категории"):
data.append((
int(lng.find(u"Код_категории").text),
int(lng.find(u"Код_родительской_категории").text),
int(lng.find(u"Уровень").text),
get_node_value(lng, u"Категория"),
lng.find("Path").text
))
store_data(conn, t_query, data)
def load_txtcategory(conn):
t_query = "insert into documents_textcategory(catalog_id, category_id, timestamp) values(?, ?, ?);"
tree = parse_file("Категоризация текстов")
data = []
for lng in tree.findall(u"Категоризация_x0020_текстов"):
data.append((
int(lng.find(u"Код_текста").text),
int(lng.find(u"Код_категории").text),
get_datetime_value(lng, "TimeStamp"),
))
store_data(conn, t_query, data)
def load_name(conn):
t_query = "insert into personalies_name(id, name, info) values(?, ?, ?);"
tree = parse_file("Имена")
data = []
for lng in tree.findall(u"Имена"):
data.append((
int(lng.find(u"Код1").text),
lng.find(u"Имя").text,
get_node_value(lng, u"Инфо"),
))
store_data(conn, t_query, data)
def load_authors(conn):
t_query = "insert into personalies_author(id, catalog_id, names_code, status, note, operator, date) values(?, ?, ?, ?, ?, ?, ?);"
tree = parse_file("Авторы")
data = []
for lng in tree.findall(u"Авторы"):
data.append((
get_int_value(lng, u"ID_Авторы"),
get_int_value(lng, u"Код_каталог"),
get_int_value(lng, u"Код_именник"),
get_node_value(lng, u"Статус"),
get_node_value(lng, u"Примечание"),
get_node_value(lng, u"Оператор"),
get_datetime_value(lng, u"Дата")
))
store_data(conn, t_query, data)
print ("Authors complete\n")
def load_receiver(conn):
t_query = "insert into personalies_receiver(id, person, title, 'group', id_doc) values(?, ?, ?, ?, ?);"
tree = parse_file("Адресат")
data = []
for lng in tree.findall(u"Адресат"):
data.append((
int(lng.find(u"id_adr").text),
get_node_value(lng, u"Адресат-персона"),
get_node_value(lng, u"Адресат-титул"),
get_node_value(lng, u"Адресат-группа"),
int(lng.find("id_doc").text)
))
store_data(conn, t_query, data)
def load_wiki(conn):
t_query = "insert into samizdat_wikitexts values(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?);"
tree = parse_file("wiki_texts")
data = []
for lng in tree.findall("wiki_texts"):
data.append((
get_int_value(lng, "ID_TRUE"),
get_node_value(lng, "nazv"),
get_node_value(lng, "red_zag"),
get_node_value(lng, "avtor"),
get_node_value(lng, "perevodchik"),
get_node_value(lng, "redaktor"),
get_node_value(lng, "data_n"),
get_node_value(lng, "mesto_n"),
get_node_value(lng, "data_i"),
get_node_value(lng, "mesto_i"),
get_node_value(lng, "zhanr"),
get_node_value(lng, "picture"),
get_node_value(lng, "samizdat"),
get_node_value(lng, "kategorii"),
get_node_value(lng, "title"),
get_node_value(lng, "link"),
get_node_value(lng, "user"),
get_node_value(lng, "ruwiki"),
get_node_value(lng, "enwiki"),
get_datetime_value(lng, "timestamp"),
get_node_value(lng, "oborotka"),
))
store_data(conn, t_query, data)
def load_xtc(conn):
t_query = "insert into samizdat_xtc values(?, ?, ?, ?, ?, ?, ?, ?, ?, ?);"
tree = parse_file("ХТС")
data = []
for lng in tree.findall(u"ХТС"):
data.append((
get_int_value(lng, "ID"),
get_node_value(lng, "Nom"),
get_node_value(lng, "Pages"),
get_int_value(lng, "Pages_from"),
get_int_value(lng, "Pages_to"),
get_node_value(lng, "Profile"),
get_node_value(lng, "Notes"),
get_node_value(lng, "NomDoc"),
get_node_value(lng, u"Оператор"),
get_datetime_value(lng, u"Дата_x020_ввода"),
))
store_data(conn, t_query, data)
def load_catalog(conn):
t_query = "insert into samizdat_catalog values(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?);" #53 params
tree = parse_file("Каталог")
data = []
for lng in tree.findall(u"Каталог"):
data.append((
get_int_value(lng, u"Код"),
get_node_value(lng, u"НомерАС"),
get_int_value(lng, u"Язык"),
get_node_value(lng, u"Translated"),
get_node_value(lng, u"Автор"),
get_node_value(lng, u"AvtPrim"),
get_node_value(lng, u"ГруппаАвт"),
get_node_value(lng, u"GrAvtMembers"),
get_node_value(lng, u"Подписанты"),
get_node_value(lng, u"Podpisant"),
get_node_value(lng, u"РедакторыСоставители"),
get_node_value(lng, u"EditorsSostPrim"),
get_node_value(lng, u"Самоназвание"),
get_node_value(lng, u"Name1"),
get_node_value(lng, u"TypeDoc"),
get_node_value(lng, u"Название"),
get_node_value(lng, u"Name2"),
get_node_value(lng, u"Место"),
get_node_value(lng, u"M-ind"),
get_node_value(lng, u"PlacePrim"),
get_node_value(lng, u"Дата"),
get_node_value(lng, u"DatePrim"),
get_datetime_value(lng, u"Date1"),
get_datetime_value(lng, u"Date2"),
get_node_value(lng, u"Способ воспроизведения"),
get_node_value(lng, u"Подлинность"),
get_node_value(lng, u"Количество экземпляров"),
get_node_value(lng, u"Правка"),
get_node_value(lng, u"Носитель"),
get_node_value(lng, u"Страниц"),
get_node_value(lng, u"Архивные примечания"),
get_node_value(lng, u"Примечания"),
get_node_value(lng, u"Опубликован"),
get_node_value(lng, u"Том"),
get_node_value(lng, u"ВыпускМС"),
get_node_value(lng, u"Год"),
get_node_value(lng, u"Фонд"),
get_node_value(lng, u"Опись"),
get_node_value(lng, u"Дело"),
get_node_value(lng, u"Листы"),
get_node_value(lng, u"Аннотация"),
get_node_value(lng, u"Адрес документа"),
get_int_value(lng, u"NAS"),
get_node_value(lng, u"NAS-ind"),
get_node_value(lng, u"Troubles"),
get_node_value(lng, u"Hr"),
get_node_value(lng, u"HrPoisk"),
get_node_value(lng, u"Оператор"),
get_datetime_value(lng, u"Дата ввода"),
get_node_value(lng, u"Ready"),
get_node_value(lng, u"Художка"),
get_node_value(lng, u"Ссылка"),
get_node_value(lng, u"AKA_Name"),
))
store_data(conn, t_query, data)
print "Wiki complete"
def load_all():
con = None
try:
con = lite.connect("../djamizdat/db.sqlite3")
load_docname(con)
load_languages(con)
load_reference(con)
load_category(con)
load_txtcategory(con)
load_name(con)
load_authors(con)
load_receiver(con)
load_wiki(con)
load_xtc(con)
load_catalog(con)
except lite.Error, e:
print "error:{0}".format(e.args[0])
sys.exit(1)
finally:
if con:
con.close()
if __name__ == '__main__':
load_all()
|
rbrito/pkg-youtube-dl
|
refs/heads/master
|
youtube_dl/extractor/rottentomatoes.py
|
64
|
from __future__ import unicode_literals
from .common import InfoExtractor
from .internetvideoarchive import InternetVideoArchiveIE
class RottenTomatoesIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?rottentomatoes\.com/m/[^/]+/trailers/(?P<id>\d+)'
_TEST = {
'url': 'http://www.rottentomatoes.com/m/toy_story_3/trailers/11028566/',
'info_dict': {
'id': '11028566',
'ext': 'mp4',
'title': 'Toy Story 3',
'description': 'From the creators of the beloved TOY STORY films, comes a story that will reunite the gang in a whole new way.',
'thumbnail': r're:^https?://.*\.jpg$',
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
iva_id = self._search_regex(r'publishedid=(\d+)', webpage, 'internet video archive id')
return {
'_type': 'url_transparent',
'url': 'http://video.internetvideoarchive.net/player/6/configuration.ashx?domain=www.videodetective.com&customerid=69249&playerid=641&publishedid=' + iva_id,
'ie_key': InternetVideoArchiveIE.ie_key(),
'id': video_id,
'title': self._og_search_title(webpage),
}
|
goodwinnk/intellij-community
|
refs/heads/master
|
python/testData/completion/funcParamsStar.after.py
|
83
|
def foo(xboo1, *boo2, **boo3):
pass
foo(xboo1=)
|
psdh/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/webdriver/command_contexts/open_and_close_window_test.py
|
141
|
import os
import sys
import random
import unittest
sys.path.insert(1, os.path.abspath(os.path.join(__file__, "../..")))
import base_test
repo_root = os.path.abspath(os.path.join(__file__, "../../.."))
sys.path.insert(1, os.path.join(repo_root, "tools", "webdriver"))
from webdriver import exceptions
class OpenAndCloseWindowTest(base_test.WebDriverBaseTest):
def setUp(self):
self.driver.get(self.webserver.where_is("command_contexts/res/first-page.html"))
def tearDown(self):
handles = self.driver.get_window_handles()
for i in range(len(handles) - 1):
self.driver.switch_to_window(handles[i])
self.driver.close()
self.driver.switch_to_window(self.driver.get_window_handles()[0])
def test_open_new_window(self):
handles = self.driver.get_window_handles()
self.driver.find_element_by_id("open_new_window").click()
self.assertEquals(len(handles) + 1, len(self.driver.get_window_handles()))
def test_get_window_handles_returns_the_windows_that_have_been_opened(self):
self.driver.find_element_by_id("open_new_window").click()
handles = self.driver.get_window_handles()
self.driver.switch_to_window(handles[0])
url1 = self.driver.get_current_url()
self.driver.switch_to_window(handles[1])
url2 = self.driver.get_current_url()
if url1 == self.webserver.where_is("controlling_windows/res/other-page.html"):
self.assertEquals(url2, self.webserver.where_is("controlling_windows/res/first-page.html"))
elif url1 == self.webserver.where_is("controlling_windows/res/first-page.html"):
self.assertEquals(url2, self.webserver.where_is("controlling_windows/res/other-page.html"))
else:
self.fail("The wrong set of URLs were returned")
def test_close_window(self):
open_windows = len(self.driver.get_window_handles())
self.driver.find_element_by_id("open_new_window").click()
self.assertEquals(1 + open_windows, len(self.driver.get_window_handles()))
self.driver.close()
self.assertEquals(open_windows, len(self.driver.get_window_handles()))
def test_command_sent_to_closed_window_returns_no_such_window_exception(self):
self.driver.find_element_by_id("open_new_window").click()
self.driver.close()
with self.assertRaises(exceptions.NoSuchWindowException):
self.driver.get_window_handle()
if __name__ == "__main__":
unittest.main()
|
skybird6672/micropython
|
refs/heads/master
|
tests/basics/try_reraise.py
|
118
|
# Reraising last exception with raise w/o args
def f():
try:
raise ValueError("val", 3)
except:
raise
try:
f()
except ValueError as e:
print(repr(e))
# Can reraise only in except block
try:
raise
except RuntimeError:
print("RuntimeError")
|
Bolt64/sdes
|
refs/heads/master
|
file_encryptor.py
|
2
|
#!/usr/bin/env python3
import s_des_encryption_tools as sdes
import bit_manipulator as bt
import s_des_key_creator as keygen
def encryptor(src, key, decrypt=False):
"""
This generates encrypted bytes from the plaintext source file
"""
assert 0<=key<1023, "key must be a 10 bit binary"
key1,key2=keygen.s_des_keygen(key)
for byte in bt.get_bytes(src):
bitstring=bin(bt.byte_to_bits(byte))[2:].zfill(8)
cipher_bitstring=sdes.s_des_encrypt(bitstring, key1, key2, decrypt)
cipher_bit=int(cipher_bitstring, 2)
yield bt.bits_to_byte(cipher_bit)
def encrypt_file(src, dest, key, decrypt):
"""
This writes the byte string to a destination file
"""
bt.write_bytes(dest, encryptor(src, key, decrypt))
if __name__=="__main__":
from sys import argv
try:
src=argv[2]
dest=argv[3]
key=int(argv[4])
decrypt=bool(argv[1]=="-d")
if decrypt:
encrypt_file(src, dest, key, decrypt=True)
else:
encrypt_file(src, dest, key, decrypt=False)
except IndexError:
print("Usage: ./file_encryptor <-e|-d> plaintext ciphertext key(0-1023)")
|
HiSPARC/station-software
|
refs/heads/master
|
user/python/Lib/site-packages/chardet/sbcharsetprober.py
|
269
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .charsetprober import CharSetProber
from .enums import CharacterCategory, ProbingState, SequenceLikelihood
class SingleByteCharSetProber(CharSetProber):
SAMPLE_SIZE = 64
SB_ENOUGH_REL_THRESHOLD = 1024 # 0.25 * SAMPLE_SIZE^2
POSITIVE_SHORTCUT_THRESHOLD = 0.95
NEGATIVE_SHORTCUT_THRESHOLD = 0.05
def __init__(self, model, reversed=False, name_prober=None):
super(SingleByteCharSetProber, self).__init__()
self._model = model
# TRUE if we need to reverse every pair in the model lookup
self._reversed = reversed
# Optional auxiliary prober for name decision
self._name_prober = name_prober
self._last_order = None
self._seq_counters = None
self._total_seqs = None
self._total_char = None
self._freq_char = None
self.reset()
def reset(self):
super(SingleByteCharSetProber, self).reset()
# char order of last character
self._last_order = 255
self._seq_counters = [0] * SequenceLikelihood.get_num_categories()
self._total_seqs = 0
self._total_char = 0
# characters that fall in our sampling range
self._freq_char = 0
@property
def charset_name(self):
if self._name_prober:
return self._name_prober.charset_name
else:
return self._model['charset_name']
@property
def language(self):
if self._name_prober:
return self._name_prober.language
else:
return self._model.get('language')
def feed(self, byte_str):
if not self._model['keep_english_letter']:
byte_str = self.filter_international_words(byte_str)
if not byte_str:
return self.state
char_to_order_map = self._model['char_to_order_map']
for i, c in enumerate(byte_str):
# XXX: Order is in range 1-64, so one would think we want 0-63 here,
# but that leads to 27 more test failures than before.
order = char_to_order_map[c]
# XXX: This was SYMBOL_CAT_ORDER before, with a value of 250, but
# CharacterCategory.SYMBOL is actually 253, so we use CONTROL
# to make it closer to the original intent. The only difference
# is whether or not we count digits and control characters for
# _total_char purposes.
if order < CharacterCategory.CONTROL:
self._total_char += 1
if order < self.SAMPLE_SIZE:
self._freq_char += 1
if self._last_order < self.SAMPLE_SIZE:
self._total_seqs += 1
if not self._reversed:
i = (self._last_order * self.SAMPLE_SIZE) + order
model = self._model['precedence_matrix'][i]
else: # reverse the order of the letters in the lookup
i = (order * self.SAMPLE_SIZE) + self._last_order
model = self._model['precedence_matrix'][i]
self._seq_counters[model] += 1
self._last_order = order
charset_name = self._model['charset_name']
if self.state == ProbingState.DETECTING:
if self._total_seqs > self.SB_ENOUGH_REL_THRESHOLD:
confidence = self.get_confidence()
if confidence > self.POSITIVE_SHORTCUT_THRESHOLD:
self.logger.debug('%s confidence = %s, we have a winner',
charset_name, confidence)
self._state = ProbingState.FOUND_IT
elif confidence < self.NEGATIVE_SHORTCUT_THRESHOLD:
self.logger.debug('%s confidence = %s, below negative '
'shortcut threshhold %s', charset_name,
confidence,
self.NEGATIVE_SHORTCUT_THRESHOLD)
self._state = ProbingState.NOT_ME
return self.state
def get_confidence(self):
r = 0.01
if self._total_seqs > 0:
r = ((1.0 * self._seq_counters[SequenceLikelihood.POSITIVE]) /
self._total_seqs / self._model['typical_positive_ratio'])
r = r * self._freq_char / self._total_char
if r >= 1.0:
r = 0.99
return r
|
Brother-Simon/django-dynamic-scraper
|
refs/heads/master
|
dynamic_scraper/south_migrations/0001_initial.py
|
10
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'ScrapedObjClass'
db.create_table('dynamic_scraper_scrapedobjclass', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=200)),
('scraper_scheduler_conf', self.gf('django.db.models.fields.TextField')(default='"MIN_TIME": 15,\n"MAX_TIME": 10080,\n"INITIAL_NEXT_ACTION_FACTOR": 10,\n"ZERO_ACTIONS_FACTOR_CHANGE": 20,\n"FACTOR_CHANGE_FACTOR": 1.3,\n')),
('checker_scheduler_conf', self.gf('django.db.models.fields.TextField')(default='"MIN_TIME": 1440,\n"MAX_TIME": 10080,\n"INITIAL_NEXT_ACTION_FACTOR": 1,\n"ZERO_ACTIONS_FACTOR_CHANGE": 5,\n"FACTOR_CHANGE_FACTOR": 1.3,\n')),
('comments', self.gf('django.db.models.fields.TextField')(blank=True)),
))
db.send_create_signal('dynamic_scraper', ['ScrapedObjClass'])
# Adding model 'ScrapedObjAttr'
db.create_table('dynamic_scraper_scrapedobjattr', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=200)),
('obj_class', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['dynamic_scraper.ScrapedObjClass'])),
('attr_type', self.gf('django.db.models.fields.CharField')(max_length=1)),
))
db.send_create_signal('dynamic_scraper', ['ScrapedObjAttr'])
# Adding model 'Scraper'
db.create_table('dynamic_scraper_scraper', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=200)),
('scraped_obj_class', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['dynamic_scraper.ScrapedObjClass'])),
('status', self.gf('django.db.models.fields.CharField')(default='P', max_length=1)),
('max_items_read', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('max_items_save', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('pagination_type', self.gf('django.db.models.fields.CharField')(default='N', max_length=1)),
('pagination_on_start', self.gf('django.db.models.fields.BooleanField')(default=False)),
('pagination_append_str', self.gf('django.db.models.fields.CharField')(max_length=200, blank=True)),
('pagination_page_replace', self.gf('django.db.models.fields.TextField')(blank=True)),
('checker_type', self.gf('django.db.models.fields.CharField')(default='N', max_length=1)),
('checker_x_path', self.gf('django.db.models.fields.CharField')(max_length=200, blank=True)),
('checker_x_path_result', self.gf('django.db.models.fields.CharField')(max_length=200, blank=True)),
('checker_ref_url', self.gf('django.db.models.fields.URLField')(max_length=200, blank=True)),
('comments', self.gf('django.db.models.fields.TextField')(blank=True)),
))
db.send_create_signal('dynamic_scraper', ['Scraper'])
# Adding model 'ScraperElem'
db.create_table('dynamic_scraper_scraperelem', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('scraped_obj_attr', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['dynamic_scraper.ScrapedObjAttr'])),
('scraper', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['dynamic_scraper.Scraper'])),
('x_path', self.gf('django.db.models.fields.CharField')(max_length=200)),
('reg_exp', self.gf('django.db.models.fields.CharField')(max_length=200, blank=True)),
('from_detail_page', self.gf('django.db.models.fields.BooleanField')(default=False)),
('processors', self.gf('django.db.models.fields.CharField')(max_length=200, blank=True)),
('proc_ctxt', self.gf('django.db.models.fields.CharField')(max_length=200, blank=True)),
('mandatory', self.gf('django.db.models.fields.BooleanField')(default=True)),
))
db.send_create_signal('dynamic_scraper', ['ScraperElem'])
# Adding model 'SchedulerRuntime'
db.create_table('dynamic_scraper_schedulerruntime', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('runtime_type', self.gf('django.db.models.fields.CharField')(default='P', max_length=1)),
('next_action_time', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('next_action_factor', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('num_zero_actions', self.gf('django.db.models.fields.IntegerField')(default=0)),
))
db.send_create_signal('dynamic_scraper', ['SchedulerRuntime'])
# Adding model 'Log'
db.create_table('dynamic_scraper_log', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('message', self.gf('django.db.models.fields.CharField')(max_length=255)),
('ref_object', self.gf('django.db.models.fields.CharField')(max_length=200)),
('level', self.gf('django.db.models.fields.IntegerField')()),
('spider_name', self.gf('django.db.models.fields.CharField')(max_length=200)),
('scraper', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['dynamic_scraper.Scraper'], null=True, blank=True)),
('date', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
))
db.send_create_signal('dynamic_scraper', ['Log'])
def backwards(self, orm):
# Deleting model 'ScrapedObjClass'
db.delete_table('dynamic_scraper_scrapedobjclass')
# Deleting model 'ScrapedObjAttr'
db.delete_table('dynamic_scraper_scrapedobjattr')
# Deleting model 'Scraper'
db.delete_table('dynamic_scraper_scraper')
# Deleting model 'ScraperElem'
db.delete_table('dynamic_scraper_scraperelem')
# Deleting model 'SchedulerRuntime'
db.delete_table('dynamic_scraper_schedulerruntime')
# Deleting model 'Log'
db.delete_table('dynamic_scraper_log')
models = {
'dynamic_scraper.log': {
'Meta': {'ordering': "['-date']", 'object_name': 'Log'},
'date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.IntegerField', [], {}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'ref_object': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'scraper': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dynamic_scraper.Scraper']", 'null': 'True', 'blank': 'True'}),
'spider_name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'dynamic_scraper.schedulerruntime': {
'Meta': {'object_name': 'SchedulerRuntime'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'next_action_factor': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'next_action_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'num_zero_actions': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'runtime_type': ('django.db.models.fields.CharField', [], {'default': "'P'", 'max_length': '1'})
},
'dynamic_scraper.scrapedobjattr': {
'Meta': {'object_name': 'ScrapedObjAttr'},
'attr_type': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'obj_class': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dynamic_scraper.ScrapedObjClass']"})
},
'dynamic_scraper.scrapedobjclass': {
'Meta': {'object_name': 'ScrapedObjClass'},
'checker_scheduler_conf': ('django.db.models.fields.TextField', [], {'default': '\'"MIN_TIME": 1440,\\n"MAX_TIME": 10080,\\n"INITIAL_NEXT_ACTION_FACTOR": 1,\\n"ZERO_ACTIONS_FACTOR_CHANGE": 5,\\n"FACTOR_CHANGE_FACTOR": 1.3,\\n\''}),
'comments': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'scraper_scheduler_conf': ('django.db.models.fields.TextField', [], {'default': '\'"MIN_TIME": 15,\\n"MAX_TIME": 10080,\\n"INITIAL_NEXT_ACTION_FACTOR": 10,\\n"ZERO_ACTIONS_FACTOR_CHANGE": 20,\\n"FACTOR_CHANGE_FACTOR": 1.3,\\n\''})
},
'dynamic_scraper.scraper': {
'Meta': {'object_name': 'Scraper'},
'checker_ref_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'checker_type': ('django.db.models.fields.CharField', [], {'default': "'N'", 'max_length': '1'}),
'checker_x_path': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'checker_x_path_result': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'comments': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'max_items_read': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'max_items_save': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'pagination_append_str': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'pagination_on_start': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'pagination_page_replace': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'pagination_type': ('django.db.models.fields.CharField', [], {'default': "'N'", 'max_length': '1'}),
'scraped_obj_class': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dynamic_scraper.ScrapedObjClass']"}),
'status': ('django.db.models.fields.CharField', [], {'default': "'P'", 'max_length': '1'})
},
'dynamic_scraper.scraperelem': {
'Meta': {'object_name': 'ScraperElem'},
'from_detail_page': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mandatory': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'proc_ctxt': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'processors': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'reg_exp': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'scraped_obj_attr': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dynamic_scraper.ScrapedObjAttr']"}),
'scraper': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dynamic_scraper.Scraper']"}),
'x_path': ('django.db.models.fields.CharField', [], {'max_length': '200'})
}
}
complete_apps = ['dynamic_scraper']
|
laysakura/chainer
|
refs/heads/master
|
chainer/functions/activation/lstm.py
|
18
|
import numpy
import six
from chainer import cuda
from chainer import function
from chainer.utils import type_check
def _extract_gates(x):
r = x.reshape((x.shape[0], x.shape[1] // 4, 4) + x.shape[2:])
return (r[:, :, i] for i in six.moves.range(4))
def _sigmoid(x):
return 1 / (1 + numpy.exp(-x))
def _grad_sigmoid(x):
return x * (1 - x)
def _grad_tanh(x):
return 1 - x * x
_preamble = '''
template <typename T> __device__ T sigmoid(T x) { return 1 / (1 + exp(-x)); }
template <typename T> __device__ T grad_sigmoid(T y) { return y * (1 - y); }
template <typename T> __device__ T grad_tanh(T y) { return 1 - y * y; }
#define COMMON_ROUTINE \
T aa = tanh(a); \
T ai = sigmoid(i_); \
T af = sigmoid(f); \
T ao = sigmoid(o);
'''
class LSTM(function.Function):
"""Long short-term memory unit with forget gate.
It has two inputs (c, x) and two outputs (c, h), where c indicates the cell
state. x must have four times channels compared to the number of units.
"""
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 2)
c_type, x_type = in_types
type_check.expect(
c_type.dtype == numpy.float32,
x_type.dtype == numpy.float32,
c_type.ndim >= 2,
x_type.ndim >= 2,
c_type.ndim == x_type.ndim,
x_type.shape[0] == c_type.shape[0],
x_type.shape[1] == 4 * c_type.shape[1],
)
for i in range(2, c_type.ndim.eval()):
type_check.expect(x_type.shape[i] == c_type.shape[i])
def forward(self, inputs):
c_prev, x = inputs
a, i, f, o = _extract_gates(x)
if isinstance(x, numpy.ndarray):
self.a = numpy.tanh(a)
self.i = _sigmoid(i)
self.f = _sigmoid(f)
self.o = _sigmoid(o)
self.c = self.a * self.i + self.f * c_prev
h = self.o * numpy.tanh(self.c)
else:
self.c, h = cuda.elementwise(
'T c_prev, T a, T i_, T f, T o', 'T c, T h',
'''
COMMON_ROUTINE;
c = aa * ai + af * c_prev;
h = ao * tanh(c);
''',
'lstm_fwd', preamble=_preamble)(c_prev, a, i, f, o)
return self.c, h
def backward(self, inputs, grad_outputs):
xp = cuda.get_array_module(*inputs)
c_prev, x = inputs
gc, gh = grad_outputs
gx = xp.empty_like(x)
ga, gi, gf, go = _extract_gates(gx)
# Consider the case that either gradient is not given
if gc is None:
gc = 0
if gh is None:
gh = 0
if xp is numpy:
co = numpy.tanh(self.c)
gc_prev = gh * self.o * _grad_tanh(co) + gc # multiply f later
ga[:] = gc_prev * self.i * _grad_tanh(self.a)
gi[:] = gc_prev * self.a * _grad_sigmoid(self.i)
gf[:] = gc_prev * c_prev * _grad_sigmoid(self.f)
go[:] = gh * co * _grad_sigmoid(self.o)
gc_prev *= self.f # multiply f here
else:
a, i, f, o = _extract_gates(x)
gc_prev = xp.empty_like(c_prev)
cuda.elementwise(
'T c_prev, T c, T gc, T gh, T a, T i_, T f, T o',
'T gc_prev, T ga, T gi, T gf, T go',
'''
COMMON_ROUTINE;
T co = tanh(c);
T temp = gh * ao * grad_tanh(co) + gc;
ga = temp * ai * grad_tanh(aa);
gi = temp * aa * grad_sigmoid(ai);
gf = temp * c_prev * grad_sigmoid(af);
go = gh * co * grad_sigmoid(ao);
gc_prev = temp * af;
''',
'lstm_bwd', preamble=_preamble)(
c_prev, self.c, gc, gh, a, i, f, o,
gc_prev, ga, gi, gf, go)
return gc_prev, gx
def lstm(c_prev, x):
"""Long Short-Term Memory units as an activation function.
This function implements LSTM units with forget gates. Let the previous
cell state :math:`c_{\\text{prev}}` and the incoming signal :math:`x`.
First, the incoming signal :math:`x` is split into four arrays
:math:`a, i, f, o` of the same shapes along the second axis.
It means that :math:`x` 's second axis must have 4 times the length of
:math:`c_{\\text{prev}}`.
The splitted input signals are corresponding to:
- :math:`a` : sources of cell input
- :math:`i` : sources of input gate
- :math:`f` : sources of forget gate
- :math:`o` : sources of output gate
Second, it computes outputs as:
.. math::
c &= \\tanh(a) \\text{sigmoid}(i)
+ c_{\\text{prev}} \\text{sigmoid}(f), \\\\
h &= \\tanh(c) \\text{sigmoid}(o).
These are returned as a tuple of two variables.
Args:
c_prev (~chainer.Variable): Variable that holds the previous cell
state. The cell state should be a zero array or the output of the
previous call of LSTM.
x (~chainer.Variable): Variable that holds the incoming signal. It must
have the second dimension four times of that of the cell state,
Returns:
tuple: Two :class:`~chainer.Variable` objects ``c`` and ``h``. ``c`` is
the updated cell state. ``h`` indicates the outgoing signal.
See the original paper proposing LSTM with forget gates:
`Long Short-Term Memory in Recurrent Neural Networks \
<http://www.felixgers.de/papers/phd.pdf>`_.
.. admonition:: Example
Assuming ``y`` is the current input signal, ``c`` is the previous cell
state, and ``h`` is the previous output signal from an ``lstm``
function. Each of ``y``, ``c`` and ``h`` has ``n_units`` channels.
Most typical preparation of ``x`` is:
>>> model = FunctionSet(w=F.Linear(n_units, 4 * n_units),
... v=F.Linear(n_units, 4 * n_units),
... ...)
>>> x = model.w(y) + model.v(h)
>>> c, h = F.lstm(c, x)
It corresponds to calculate the input sources :math:`a, i, f, o` from
the current input ``y`` and the previous output ``h``. Different
parameters are used for different kind of input sources.
"""
return LSTM()(c_prev, x)
|
indautgrp/erpnext
|
refs/heads/develop
|
erpnext/patches/v7_0/update_prevdoc_values_for_supplier_quotation_item.py
|
44
|
import frappe
def execute():
frappe.reload_doctype('Supplier Quotation Item')
for data in frappe.db.sql(""" select prevdoc_docname, prevdoc_detail_docname, name
from `tabSupplier Quotation Item` where prevdoc_docname is not null""", as_dict=True):
frappe.db.set_value("Supplier Quotation Item", data.name, "material_request", data.prevdoc_docname)
frappe.db.set_value("Supplier Quotation Item", data.name, "material_request_item", data.prevdoc_detail_docname)
|
crwilcox/PTVS
|
refs/heads/master
|
Python/Tests/TestData/DjangoTemplateCodeIntelligence/urls.py
|
18
|
from django.conf.urls import patterns, include, url
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'DjangoTemplateCodeIntelligence.views.home', name='home'),
# url(r'^DjangoTemplateCodeIntelligence/', include('DjangoTemplateCodeIntelligence.example.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# url(r'^admin/', include(admin.site.urls)),
)
|
creativcoder/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/tools/pytest/testing/test_genscript.py
|
194
|
import pytest
import sys
@pytest.fixture(scope="module")
def standalone(request):
return Standalone(request)
class Standalone:
def __init__(self, request):
self.testdir = request.getfuncargvalue("testdir")
script = "mypytest"
result = self.testdir.runpytest("--genscript=%s" % script)
assert result.ret == 0
self.script = self.testdir.tmpdir.join(script)
assert self.script.check()
def run(self, anypython, testdir, *args):
return testdir._run(anypython, self.script, *args)
def test_gen(testdir, anypython, standalone):
if sys.version_info >= (2,7):
result = testdir._run(anypython, "-c",
"import sys;print (sys.version_info >=(2,7))")
assert result.ret == 0
if result.stdout.str() == "False":
pytest.skip("genscript called from python2.7 cannot work "
"earlier python versions")
result = standalone.run(anypython, testdir, '--version')
if result.ret == 2:
result.stderr.fnmatch_lines(["*ERROR: setuptools not installed*"])
elif result.ret == 0:
result.stderr.fnmatch_lines([
"*imported from*mypytest*"
])
p = testdir.makepyfile("def test_func(): assert 0")
result = standalone.run(anypython, testdir, p)
assert result.ret != 0
else:
pytest.fail("Unexpected return code")
def test_freeze_includes():
"""
Smoke test for freeze_includes(), to ensure that it works across all
supported python versions.
"""
includes = pytest.freeze_includes()
assert len(includes) > 1
assert '_pytest.genscript' in includes
|
dfdx2/django
|
refs/heads/master
|
django/contrib/gis/geometry/regex.py
|
657
|
import re
# Regular expression for recognizing HEXEWKB and WKT. A prophylactic measure
# to prevent potentially malicious input from reaching the underlying C
# library. Not a substitute for good Web security programming practices.
hex_regex = re.compile(r'^[0-9A-F]+$', re.I)
wkt_regex = re.compile(r'^(SRID=(?P<srid>\-?\d+);)?'
r'(?P<wkt>'
r'(?P<type>POINT|LINESTRING|LINEARRING|POLYGON|MULTIPOINT|'
r'MULTILINESTRING|MULTIPOLYGON|GEOMETRYCOLLECTION)'
r'[ACEGIMLONPSRUTYZ\d,\.\-\(\) ]+)$',
re.I)
json_regex = re.compile(r'^(\s+)?\{.*}(\s+)?$', re.DOTALL)
|
cowlicks/numpy
|
refs/heads/master
|
numpy/distutils/tests/swig_ext/setup.py
|
135
|
#!/usr/bin/env python
from __future__ import division, print_function
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('swig_ext', parent_package, top_path)
config.add_extension('_example',
['src/example.i', 'src/example.c']
)
config.add_extension('_example2',
['src/zoo.i', 'src/zoo.cc'],
depends=['src/zoo.h'],
include_dirs=['src']
)
config.add_data_dir('tests')
return config
if __name__ == "__main__":
from numpy.distutils.core import setup
setup(configuration=configuration)
|
Shedward/timus
|
refs/heads/master
|
templates/template.py
|
1
|
print('Hello!')
|
waldyrious/GraphUI
|
refs/heads/master
|
old/capnf/code/__init__.py
|
6
|
import base
import builtins
|
lucychambers/pepperrage
|
refs/heads/gh-pages
|
.bundle/ruby/2.0.0/gems/pygments.rb-0.6.0/vendor/pygments-main/pygments/styles/vs.py
|
135
|
# -*- coding: utf-8 -*-
"""
pygments.styles.vs
~~~~~~~~~~~~~~~~~~
Simple style with MS Visual Studio colors.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Operator, Generic
class VisualStudioStyle(Style):
background_color = "#ffffff"
default_style = ""
styles = {
Comment: "#008000",
Comment.Preproc: "#0000ff",
Keyword: "#0000ff",
Operator.Word: "#0000ff",
Keyword.Type: "#2b91af",
Name.Class: "#2b91af",
String: "#a31515",
Generic.Heading: "bold",
Generic.Subheading: "bold",
Generic.Emph: "italic",
Generic.Strong: "bold",
Generic.Prompt: "bold",
Error: "border:#FF0000"
}
|
bjodah/mpmath
|
refs/heads/master
|
mpmath/tests/test_levin.py
|
5
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from mpmath import mp
from mpmath import libmp
xrange = libmp.backend.xrange
# Attention:
# These tests run with 15-20 decimal digits precision. For higher precision the
# working precision must be raised.
def test_levin_0():
mp.dps = 17
eps = mp.mpf(mp.eps)
with mp.extraprec(2 * mp.prec):
L = mp.levin(method = "levin", variant = "u")
S, s, n = [], 0, 1
while 1:
s += mp.one / (n * n)
n += 1
S.append(s)
v, e = L.update_psum(S)
if e < eps:
break
if n > 1000: raise RuntimeError("iteration limit exceeded")
eps = mp.exp(0.9 * mp.log(eps))
err = abs(v - mp.pi ** 2 / 6)
assert err < eps
w = mp.nsum(lambda n: 1/(n * n), [1, mp.inf], method = "levin", levin_variant = "u")
err = abs(v - w)
assert err < eps
def test_levin_1():
mp.dps = 17
eps = mp.mpf(mp.eps)
with mp.extraprec(2 * mp.prec):
L = mp.levin(method = "levin", variant = "v")
A, n = [], 1
while 1:
s = mp.mpf(n) ** (2 + 3j)
n += 1
A.append(s)
v, e = L.update(A)
if e < eps:
break
if n > 1000: raise RuntimeError("iteration limit exceeded")
eps = mp.exp(0.9 * mp.log(eps))
err = abs(v - mp.zeta(-2-3j))
assert err < eps
w = mp.nsum(lambda n: n ** (2 + 3j), [1, mp.inf], method = "levin", levin_variant = "v")
err = abs(v - w)
assert err < eps
def test_levin_2():
# [2] A. Sidi - "Pratical Extrapolation Methods" p.373
mp.dps = 17
z=mp.mpf(10)
eps = mp.mpf(mp.eps)
with mp.extraprec(2 * mp.prec):
L = mp.levin(method = "sidi", variant = "t")
n = 0
while 1:
s = (-1)**n * mp.fac(n) * z ** (-n)
v, e = L.step(s)
n += 1
if e < eps:
break
if n > 1000: raise RuntimeError("iteration limit exceeded")
eps = mp.exp(0.9 * mp.log(eps))
exact = mp.quad(lambda x: mp.exp(-x)/(1+x/z),[0,mp.inf])
# there is also a symbolic expression for the integral:
# exact = z * mp.exp(z) * mp.expint(1,z)
err = abs(v - exact)
assert err < eps
w = mp.nsum(lambda n: (-1) ** n * mp.fac(n) * z ** (-n), [0, mp.inf], method = "sidi", levin_variant = "t")
assert err < eps
def test_levin_3():
mp.dps = 17
z=mp.mpf(2)
eps = mp.mpf(mp.eps)
with mp.extraprec(7*mp.prec): # we need copious amount of precision to sum this highly divergent series
L = mp.levin(method = "levin", variant = "t")
n, s = 0, 0
while 1:
s += (-z)**n * mp.fac(4 * n) / (mp.fac(n) * mp.fac(2 * n) * (4 ** n))
n += 1
v, e = L.step_psum(s)
if e < eps:
break
if n > 1000: raise RuntimeError("iteration limit exceeded")
eps = mp.exp(0.8 * mp.log(eps))
exact = mp.quad(lambda x: mp.exp( -x * x / 2 - z * x ** 4), [0,mp.inf]) * 2 / mp.sqrt(2 * mp.pi)
# there is also a symbolic expression for the integral:
# exact = mp.exp(mp.one / (32 * z)) * mp.besselk(mp.one / 4, mp.one / (32 * z)) / (4 * mp.sqrt(z * mp.pi))
err = abs(v - exact)
assert err < eps
w = mp.nsum(lambda n: (-z)**n * mp.fac(4 * n) / (mp.fac(n) * mp.fac(2 * n) * (4 ** n)), [0, mp.inf], method = "levin", levin_variant = "t", workprec = 8*mp.prec, steps = [2] + [1 for x in xrange(1000)])
err = abs(v - w)
assert err < eps
def test_levin_nsum():
mp.dps = 17
with mp.extraprec(mp.prec):
z = mp.mpf(10) ** (-10)
a = mp.nsum(lambda n: n**(-(1+z)), [1, mp.inf], method = "l") - 1 / z
assert abs(a - mp.euler) < 1e-10
eps = mp.exp(0.8 * mp.log(mp.eps))
a = mp.nsum(lambda n: (-1)**(n-1) / n, [1, mp.inf], method = "sidi")
assert abs(a - mp.log(2)) < eps
z = 2 + 1j
f = lambda n: mp.rf(2 / mp.mpf(3), n) * mp.rf(4 / mp.mpf(3), n) * z**n / (mp.rf(1 / mp.mpf(3), n) * mp.fac(n))
v = mp.nsum(f, [0, mp.inf], method = "levin", steps = [10 for x in xrange(1000)])
exact = mp.hyp2f1(2 / mp.mpf(3), 4 / mp.mpf(3), 1 / mp.mpf(3), z)
assert abs(exact - v) < eps
def test_cohen_alt_0():
mp.dps = 17
AC = mp.cohen_alt()
S, s, n = [], 0, 1
while 1:
s += -((-1) ** n) * mp.one / (n * n)
n += 1
S.append(s)
v, e = AC.update_psum(S)
if e < mp.eps:
break
if n > 1000: raise RuntimeError("iteration limit exceeded")
eps = mp.exp(0.9 * mp.log(mp.eps))
err = abs(v - mp.pi ** 2 / 12)
assert err < eps
def test_cohen_alt_1():
mp.dps = 17
A = []
AC = mp.cohen_alt()
n = 1
while 1:
A.append( mp.loggamma(1 + mp.one / (2 * n - 1)))
A.append(-mp.loggamma(1 + mp.one / (2 * n)))
n += 1
v, e = AC.update(A)
if e < mp.eps:
break
if n > 1000: raise RuntimeError("iteration limit exceeded")
v = mp.exp(v)
err = abs(v - 1.06215090557106)
assert err < 1e-12
|
Menooker/gem5_pcm
|
refs/heads/master
|
src/sim/Process.py
|
15
|
# Copyright (c) 2005-2008 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
from m5.SimObject import SimObject
from m5.params import *
from m5.proxy import *
class Process(SimObject):
type = 'Process'
abstract = True
cxx_header = "sim/process.hh"
input = Param.String('cin', "filename for stdin")
output = Param.String('cout', 'filename for stdout')
errout = Param.String('cerr', 'filename for stderr')
system = Param.System(Parent.any, "system process will run on")
max_stack_size = Param.MemorySize('64MB', 'maximum size of the stack')
@classmethod
def export_methods(cls, code):
code('bool map(Addr vaddr, Addr paddr, int size);')
class LiveProcess(Process):
type = 'LiveProcess'
cxx_header = "sim/process.hh"
executable = Param.String('', "executable (overrides cmd[0] if set)")
cmd = VectorParam.String("command line (executable plus arguments)")
env = VectorParam.String([], "environment settings")
cwd = Param.String('', "current working directory")
uid = Param.Int(100, 'user id')
euid = Param.Int(100, 'effective user id')
gid = Param.Int(100, 'group id')
egid = Param.Int(100, 'effective group id')
pid = Param.Int(100, 'process id')
ppid = Param.Int(99, 'parent process id')
simpoint = Param.UInt64(0, 'simulation point at which to start simulation')
|
dydek/django
|
refs/heads/master
|
django/contrib/admindocs/middleware.py
|
477
|
from django import http
from django.conf import settings
class XViewMiddleware(object):
"""
Adds an X-View header to internal HEAD requests -- used by the documentation system.
"""
def process_view(self, request, view_func, view_args, view_kwargs):
"""
If the request method is HEAD and either the IP is internal or the
user is a logged-in staff member, quickly return with an x-header
indicating the view function. This is used by the documentation module
to lookup the view function for an arbitrary page.
"""
assert hasattr(request, 'user'), (
"The XView middleware requires authentication middleware to be "
"installed. Edit your MIDDLEWARE_CLASSES setting to insert "
"'django.contrib.auth.middleware.AuthenticationMiddleware'.")
if request.method == 'HEAD' and (request.META.get('REMOTE_ADDR') in settings.INTERNAL_IPS or
(request.user.is_active and request.user.is_staff)):
response = http.HttpResponse()
response['X-View'] = "%s.%s" % (view_func.__module__, view_func.__name__)
return response
|
MalloyPower/parsing-python
|
refs/heads/master
|
front-end/testsuite-python-lib/Python-2.4/Lib/encodings/punycode.py
|
15
|
# -*- coding: iso-8859-1 -*-
""" Codec for the Punicode encoding, as specified in RFC 3492
Written by Martin v. Löwis.
"""
import codecs
##################### Encoding #####################################
def segregate(str):
"""3.1 Basic code point segregation"""
base = []
extended = {}
for c in str:
if ord(c) < 128:
base.append(c)
else:
extended[c] = 1
extended = extended.keys()
extended.sort()
return "".join(base).encode("ascii"),extended
def selective_len(str, max):
"""Return the length of str, considering only characters below max."""
res = 0
for c in str:
if ord(c) < max:
res += 1
return res
def selective_find(str, char, index, pos):
"""Return a pair (index, pos), indicating the next occurrence of
char in str. index is the position of the character considering
only ordinals up to and including char, and pos is the position in
the full string. index/pos is the starting position in the full
string."""
l = len(str)
while 1:
pos += 1
if pos == l:
return (-1, -1)
c = str[pos]
if c == char:
return index+1, pos
elif c < char:
index += 1
def insertion_unsort(str, extended):
"""3.2 Insertion unsort coding"""
oldchar = 0x80
result = []
oldindex = -1
for c in extended:
index = pos = -1
char = ord(c)
curlen = selective_len(str, char)
delta = (curlen+1) * (char - oldchar)
while 1:
index,pos = selective_find(str,c,index,pos)
if index == -1:
break
delta += index - oldindex
result.append(delta-1)
oldindex = index
delta = 0
oldchar = char
return result
def T(j, bias):
# Punycode parameters: tmin = 1, tmax = 26, base = 36
res = 36 * (j + 1) - bias
if res < 1: return 1
if res > 26: return 26
return res
digits = "abcdefghijklmnopqrstuvwxyz0123456789"
def generate_generalized_integer(N, bias):
"""3.3 Generalized variable-length integers"""
result = []
j = 0
while 1:
t = T(j, bias)
if N < t:
result.append(digits[N])
return result
result.append(digits[t + ((N - t) % (36 - t))])
N = (N - t) // (36 - t)
j += 1
def adapt(delta, first, numchars):
if first:
delta //= 700
else:
delta //= 2
delta += delta // numchars
# ((base - tmin) * tmax) // 2 == 455
divisions = 0
while delta > 455:
delta = delta // 35 # base - tmin
divisions += 36
bias = divisions + (36 * delta // (delta + 38))
return bias
def generate_integers(baselen, deltas):
"""3.4 Bias adaptation"""
# Punycode parameters: initial bias = 72, damp = 700, skew = 38
result = []
bias = 72
for points, delta in enumerate(deltas):
s = generate_generalized_integer(delta, bias)
result.extend(s)
bias = adapt(delta, points==0, baselen+points+1)
return "".join(result)
def punycode_encode(text):
base, extended = segregate(text)
base = base.encode("ascii")
deltas = insertion_unsort(text, extended)
extended = generate_integers(len(base), deltas)
if base:
return base + "-" + extended
return extended
##################### Decoding #####################################
def decode_generalized_number(extended, extpos, bias, errors):
"""3.3 Generalized variable-length integers"""
result = 0
w = 1
j = 0
while 1:
try:
char = ord(extended[extpos])
except IndexError:
if errors == "strict":
raise UnicodeError, "incomplete punicode string"
return extpos + 1, None
extpos += 1
if 0x41 <= char <= 0x5A: # A-Z
digit = char - 0x41
elif 0x30 <= char <= 0x39:
digit = char - 22 # 0x30-26
elif errors == "strict":
raise UnicodeError("Invalid extended code point '%s'"
% extended[extpos])
else:
return extpos, None
t = T(j, bias)
result += digit * w
if digit < t:
return extpos, result
w = w * (36 - t)
j += 1
def insertion_sort(base, extended, errors):
"""3.2 Insertion unsort coding"""
char = 0x80
pos = -1
bias = 72
extpos = 0
while extpos < len(extended):
newpos, delta = decode_generalized_number(extended, extpos,
bias, errors)
if delta is None:
# There was an error in decoding. We can't continue because
# synchronization is lost.
return base
pos += delta+1
char += pos // (len(base) + 1)
if char > 0x10FFFF:
if errors == "strict":
raise UnicodeError, ("Invalid character U+%x" % char)
char = ord('?')
pos = pos % (len(base) + 1)
base = base[:pos] + unichr(char) + base[pos:]
bias = adapt(delta, (extpos == 0), len(base))
extpos = newpos
return base
def punycode_decode(text, errors):
pos = text.rfind("-")
if pos == -1:
base = ""
extended = text
else:
base = text[:pos]
extended = text[pos+1:]
base = unicode(base, "ascii", errors)
extended = extended.upper()
return insertion_sort(base, extended, errors)
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
res = punycode_encode(input)
return res, len(input)
def decode(self,input,errors='strict'):
if errors not in ('strict', 'replace', 'ignore'):
raise UnicodeError, "Unsupported error handling "+errors
res = punycode_decode(input, errors)
return res, len(input)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
|
40023255/w16b_test
|
refs/heads/master
|
static/Brython3.1.3-20150514-095342/Lib/site-packages/pygame/version.py
|
607
|
## pygame - Python Game Library
## Copyright (C) 2000-2003 Pete Shinners
##
## This library is free software; you can redistribute it and/or
## modify it under the terms of the GNU Library General Public
## License as published by the Free Software Foundation; either
## version 2 of the License, or (at your option) any later version.
##
## This library is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## Library General Public License for more details.
##
## You should have received a copy of the GNU Library General Public
## License along with this library; if not, write to the Free
## Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##
## Pete Shinners
## pete@shinners.org
"""Simply the current installed pygame version. The version information is
stored in the regular pygame module as 'pygame.ver'. Keeping the version
information also available in a separate module allows you to test the
pygame version without importing the main pygame module.
The python version information should always compare greater than any previous
releases. (hmm, until we get to versions > 10)
"""
ver = '1.8.0pre'
vernum = 1,8,0
|
tbeadle/django
|
refs/heads/master
|
tests/utils_tests/test_feedgenerator.py
|
6
|
from __future__ import unicode_literals
import datetime
import unittest
from django.test import TestCase
from django.utils import feedgenerator
from django.utils.timezone import get_fixed_timezone, utc
class FeedgeneratorTest(unittest.TestCase):
"""
Tests for the low-level syndication feed framework.
"""
def test_get_tag_uri(self):
"""
Test get_tag_uri() correctly generates TagURIs.
"""
self.assertEqual(
feedgenerator.get_tag_uri('http://example.org/foo/bar#headline', datetime.date(2004, 10, 25)),
'tag:example.org,2004-10-25:/foo/bar/headline')
def test_get_tag_uri_with_port(self):
"""
Test that get_tag_uri() correctly generates TagURIs from URLs with port
numbers.
"""
self.assertEqual(
feedgenerator.get_tag_uri(
'http://www.example.org:8000/2008/11/14/django#headline',
datetime.datetime(2008, 11, 14, 13, 37, 0),
),
'tag:www.example.org,2008-11-14:/2008/11/14/django/headline')
def test_rfc2822_date(self):
"""
Test rfc2822_date() correctly formats datetime objects.
"""
self.assertEqual(
feedgenerator.rfc2822_date(datetime.datetime(2008, 11, 14, 13, 37, 0)),
"Fri, 14 Nov 2008 13:37:00 -0000"
)
def test_rfc2822_date_with_timezone(self):
"""
Test rfc2822_date() correctly formats datetime objects with tzinfo.
"""
self.assertEqual(
feedgenerator.rfc2822_date(datetime.datetime(2008, 11, 14, 13, 37, 0, tzinfo=get_fixed_timezone(60))),
"Fri, 14 Nov 2008 13:37:00 +0100"
)
def test_rfc2822_date_without_time(self):
"""
Test rfc2822_date() correctly formats date objects.
"""
self.assertEqual(
feedgenerator.rfc2822_date(datetime.date(2008, 11, 14)),
"Fri, 14 Nov 2008 00:00:00 -0000"
)
def test_rfc3339_date(self):
"""
Test rfc3339_date() correctly formats datetime objects.
"""
self.assertEqual(
feedgenerator.rfc3339_date(datetime.datetime(2008, 11, 14, 13, 37, 0)),
"2008-11-14T13:37:00Z"
)
def test_rfc3339_date_with_timezone(self):
"""
Test rfc3339_date() correctly formats datetime objects with tzinfo.
"""
self.assertEqual(
feedgenerator.rfc3339_date(datetime.datetime(2008, 11, 14, 13, 37, 0, tzinfo=get_fixed_timezone(120))),
"2008-11-14T13:37:00+02:00"
)
def test_rfc3339_date_without_time(self):
"""
Test rfc3339_date() correctly formats date objects.
"""
self.assertEqual(
feedgenerator.rfc3339_date(datetime.date(2008, 11, 14)),
"2008-11-14T00:00:00Z"
)
def test_atom1_mime_type(self):
"""
Test to make sure Atom MIME type has UTF8 Charset parameter set
"""
atom_feed = feedgenerator.Atom1Feed("title", "link", "description")
self.assertEqual(
atom_feed.content_type, "application/atom+xml; charset=utf-8"
)
def test_rss_mime_type(self):
"""
Test to make sure RSS MIME type has UTF8 Charset parameter set
"""
rss_feed = feedgenerator.Rss201rev2Feed("title", "link", "description")
self.assertEqual(
rss_feed.content_type, "application/rss+xml; charset=utf-8"
)
# Two regression tests for #14202
def test_feed_without_feed_url_gets_rendered_without_atom_link(self):
feed = feedgenerator.Rss201rev2Feed('title', '/link/', 'descr')
self.assertEqual(feed.feed['feed_url'], None)
feed_content = feed.writeString('utf-8')
self.assertNotIn('<atom:link', feed_content)
self.assertNotIn('href="/feed/"', feed_content)
self.assertNotIn('rel="self"', feed_content)
def test_feed_with_feed_url_gets_rendered_with_atom_link(self):
feed = feedgenerator.Rss201rev2Feed('title', '/link/', 'descr', feed_url='/feed/')
self.assertEqual(feed.feed['feed_url'], '/feed/')
feed_content = feed.writeString('utf-8')
self.assertIn('<atom:link', feed_content)
self.assertIn('href="/feed/"', feed_content)
self.assertIn('rel="self"', feed_content)
class FeedgeneratorDBTest(TestCase):
# setting the timezone requires a database query on PostgreSQL.
def test_latest_post_date_returns_utc_time(self):
for use_tz in (True, False):
with self.settings(USE_TZ=use_tz):
rss_feed = feedgenerator.Rss201rev2Feed('title', 'link', 'description')
self.assertEqual(rss_feed.latest_post_date().tzinfo, utc)
|
FireBladeNooT/Medusa_1_6
|
refs/heads/master
|
lib/feedparser/namespaces/dc.py
|
43
|
# Support for the Dublin Core metadata extensions
# Copyright 2010-2015 Kurt McKee <contactme@kurtmckee.org>
# Copyright 2002-2008 Mark Pilgrim
# All rights reserved.
#
# This file is a part of feedparser.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import absolute_import, unicode_literals
from ..util import FeedParserDict
from ..datetimes import _parse_date
class Namespace(object):
supported_namespaces = {
'http://purl.org/dc/elements/1.1/': 'dc',
'http://purl.org/dc/terms/': 'dcterms',
}
def _end_dc_author(self):
self._end_author()
def _end_dc_creator(self):
self._end_author()
def _end_dc_date(self):
self._end_updated()
def _end_dc_description(self):
self._end_description()
def _end_dc_language(self):
self._end_language()
def _end_dc_publisher(self):
self._end_webmaster()
def _end_dc_rights(self):
self._end_rights()
def _end_dc_subject(self):
self._end_category()
def _end_dc_title(self):
self._end_title()
def _end_dcterms_created(self):
self._end_created()
def _end_dcterms_issued(self):
self._end_published()
def _end_dcterms_modified(self):
self._end_updated()
def _start_dc_author(self, attrsD):
self._start_author(attrsD)
def _start_dc_creator(self, attrsD):
self._start_author(attrsD)
def _start_dc_date(self, attrsD):
self._start_updated(attrsD)
def _start_dc_description(self, attrsD):
self._start_description(attrsD)
def _start_dc_language(self, attrsD):
self._start_language(attrsD)
def _start_dc_publisher(self, attrsD):
self._start_webmaster(attrsD)
def _start_dc_rights(self, attrsD):
self._start_rights(attrsD)
def _start_dc_subject(self, attrsD):
self._start_category(attrsD)
def _start_dc_title(self, attrsD):
self._start_title(attrsD)
def _start_dcterms_created(self, attrsD):
self._start_created(attrsD)
def _start_dcterms_issued(self, attrsD):
self._start_published(attrsD)
def _start_dcterms_modified(self, attrsD):
self._start_updated(attrsD)
def _start_dcterms_valid(self, attrsD):
self.push('validity', 1)
def _end_dcterms_valid(self):
for validity_detail in self.pop('validity').split(';'):
if '=' in validity_detail:
key, value = validity_detail.split('=', 1)
if key == 'start':
self._save('validity_start', value, overwrite=True)
self._save('validity_start_parsed', _parse_date(value), overwrite=True)
elif key == 'end':
self._save('validity_end', value, overwrite=True)
self._save('validity_end_parsed', _parse_date(value), overwrite=True)
def _start_dc_contributor(self, attrsD):
self.incontributor = 1
context = self._getContext()
context.setdefault('contributors', [])
context['contributors'].append(FeedParserDict())
self.push('name', 0)
def _end_dc_contributor(self):
self._end_name()
self.incontributor = 0
|
shishengjia/OnlineCourses
|
refs/heads/master
|
extra_apps/xadmin/plugins/topnav.py
|
21
|
from django.template import loader
from django.utils.text import capfirst
from django.core.urlresolvers import reverse, NoReverseMatch
from django.utils.translation import ugettext as _
from xadmin.sites import site
from xadmin.filters import SEARCH_VAR
from xadmin.views import BaseAdminPlugin, CommAdminView
class TopNavPlugin(BaseAdminPlugin):
global_search_models = None
global_add_models = None
def get_context(self, context):
return context
# Block Views
def block_top_navbar(self, context, nodes):
search_models = []
site_name = self.admin_site.name
if self.global_search_models == None:
models = self.admin_site._registry.keys()
else:
models = self.global_search_models
for model in models:
app_label = model._meta.app_label
if self.has_model_perm(model, "view"):
info = (app_label, model._meta.model_name)
if getattr(self.admin_site._registry[model], 'search_fields', None):
try:
search_models.append({
'title': _('Search %s') % capfirst(model._meta.verbose_name_plural),
'url': reverse('xadmin:%s_%s_changelist' % info, current_app=site_name),
'model': model
})
except NoReverseMatch:
pass
return nodes.append(loader.render_to_string('xadmin/blocks/comm.top.topnav.html', {'search_models': search_models, 'search_name': SEARCH_VAR}))
def block_top_navmenu(self, context, nodes):
add_models = []
site_name = self.admin_site.name
if self.global_add_models == None:
models = self.admin_site._registry.keys()
else:
models = self.global_add_models
for model in models:
app_label = model._meta.app_label
if self.has_model_perm(model, "add"):
info = (app_label, model._meta.model_name)
try:
add_models.append({
'title': _('Add %s') % capfirst(model._meta.verbose_name),
'url': reverse('xadmin:%s_%s_add' % info, current_app=site_name),
'model': model
})
except NoReverseMatch:
pass
nodes.append(
loader.render_to_string('xadmin/blocks/comm.top.topnav.html', {'add_models': add_models}))
site.register_plugin(TopNavPlugin, CommAdminView)
|
ltowarek/budget-supervisor
|
refs/heads/dependabot/pip/django-3.1.10
|
third_party/saltedge/swagger_client/models/connection_response.py
|
1
|
# coding: utf-8
"""
Salt Edge Account Information API
API Reference for services # noqa: E501
OpenAPI spec version: 5.0.0
Contact: support@saltedge.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class ConnectionResponse(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'data': 'Connection'
}
attribute_map = {
'data': 'data'
}
def __init__(self, data=None): # noqa: E501
"""ConnectionResponse - a model defined in Swagger""" # noqa: E501
self._data = None
self.discriminator = None
if data is not None:
self.data = data
@property
def data(self):
"""Gets the data of this ConnectionResponse. # noqa: E501
:return: The data of this ConnectionResponse. # noqa: E501
:rtype: Connection
"""
return self._data
@data.setter
def data(self, data):
"""Sets the data of this ConnectionResponse.
:param data: The data of this ConnectionResponse. # noqa: E501
:type: Connection
"""
self._data = data
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ConnectionResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ConnectionResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
seraphlnWu/wekka
|
refs/heads/master
|
wekka/wekka/themes/steward/admin.py
|
1
|
# coding=utf8
#
from django.contrib import admin
from models import SiteConfig, Category, Videos
class SiteConfigAdmin(admin.ModelAdmin):
''' '''
list_display = ('site_name', 'site_url', 'admin_email', 'icp_info',
'show_icp', 'third_analysis_code', 'close_site')
class CategoryAdmin(admin.ModelAdmin):
''' '''
list_display = ('cate_name', 'cate_parent', 'is_online', 'create_on', 'modify_on')
search_fields = ('cate_name', 'cate_parent')
class VideosAdmin(admin.ModelAdmin):
''' '''
list_display = ('video_title', 'video_category', 'video_source', 'video_tags',
'video_urls', 'video_username', 'is_online')
search_fields = ('video_title', 'video_source')
admin.site.register(SiteConfig, SiteConfigAdmin)
admin.site.register(Category, CategoryAdmin)
admin.site.register(Videos, VideosAdmin)
|
gencer/python-phonenumbers
|
refs/heads/dev
|
python/phonenumbers/data/region_DK.py
|
2
|
"""Auto-generated file, do not edit by hand. DK metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_DK = PhoneMetadata(id='DK', country_code=45, international_prefix='00',
general_desc=PhoneNumberDesc(national_number_pattern='[2-9]\\d{7}', possible_length=(8,)),
fixed_line=PhoneNumberDesc(national_number_pattern='(?:[2-7]\\d|8[126-9]|9[1-36-9])\\d{6}', example_number='32123456', possible_length=(8,)),
mobile=PhoneNumberDesc(national_number_pattern='(?:[2-7]\\d|8[126-9]|9[1-36-9])\\d{6}', example_number='20123456', possible_length=(8,)),
toll_free=PhoneNumberDesc(national_number_pattern='80\\d{6}', example_number='80123456', possible_length=(8,)),
premium_rate=PhoneNumberDesc(national_number_pattern='90\\d{6}', example_number='90123456', possible_length=(8,)),
number_format=[NumberFormat(pattern='(\\d{2})(\\d{2})(\\d{2})(\\d{2})', format='\\1 \\2 \\3 \\4')],
mobile_number_portable_region=True)
|
Freso/beets
|
refs/heads/master
|
beetsplug/importfeeds.py
|
7
|
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Fabrice Laporte.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
from __future__ import division, absolute_import, print_function
"""Write paths of imported files in various formats to ease later import in a
music player. Also allow printing the new file locations to stdout in case
one wants to manually add music to a player by its path.
"""
import datetime
import os
import re
from beets.plugins import BeetsPlugin
from beets.util import mkdirall, normpath, syspath, bytestring_path
from beets import config
M3U_DEFAULT_NAME = 'imported.m3u'
def _get_feeds_dir(lib):
"""Given a Library object, return the path to the feeds directory to be
used (either in the library directory or an explicitly configured
path). Ensures that the directory exists.
"""
# Inside library directory.
dirpath = lib.directory
# Ensure directory exists.
if not os.path.exists(syspath(dirpath)):
os.makedirs(syspath(dirpath))
return dirpath
def _build_m3u_filename(basename):
"""Builds unique m3u filename by appending given basename to current
date."""
basename = re.sub(r"[\s,/\\'\"]", '_', basename)
date = datetime.datetime.now().strftime("%Y%m%d_%Hh%M")
path = normpath(os.path.join(
config['importfeeds']['dir'].as_filename(),
date + '_' + basename + '.m3u'
))
return path
def _write_m3u(m3u_path, items_paths):
"""Append relative paths to items into m3u file.
"""
mkdirall(m3u_path)
with open(syspath(m3u_path), 'a') as f:
for path in items_paths:
f.write(path + b'\n')
class ImportFeedsPlugin(BeetsPlugin):
def __init__(self):
super(ImportFeedsPlugin, self).__init__()
self.config.add({
'formats': [],
'm3u_name': u'imported.m3u',
'dir': None,
'relative_to': None,
'absolute_path': False,
})
feeds_dir = self.config['dir'].get()
if feeds_dir:
feeds_dir = os.path.expanduser(bytestring_path(feeds_dir))
self.config['dir'] = feeds_dir
if not os.path.exists(syspath(feeds_dir)):
os.makedirs(syspath(feeds_dir))
relative_to = self.config['relative_to'].get()
if relative_to:
self.config['relative_to'] = normpath(relative_to)
else:
self.config['relative_to'] = feeds_dir
self.register_listener('library_opened', self.library_opened)
self.register_listener('album_imported', self.album_imported)
self.register_listener('item_imported', self.item_imported)
def _record_items(self, lib, basename, items):
"""Records relative paths to the given items for each feed format
"""
feedsdir = bytestring_path(self.config['dir'].as_filename())
formats = self.config['formats'].as_str_seq()
relative_to = self.config['relative_to'].get() \
or self.config['dir'].as_filename()
relative_to = bytestring_path(relative_to)
paths = []
for item in items:
if self.config['absolute_path']:
paths.append(item.path)
else:
try:
relpath = os.path.relpath(item.path, relative_to)
except ValueError:
# On Windows, it is sometimes not possible to construct a
# relative path (if the files are on different disks).
relpath = item.path
paths.append(relpath)
if 'm3u' in formats:
m3u_basename = bytestring_path(
self.config['m3u_name'].get(unicode))
m3u_path = os.path.join(feedsdir, m3u_basename)
_write_m3u(m3u_path, paths)
if 'm3u_multi' in formats:
m3u_path = _build_m3u_filename(basename)
_write_m3u(m3u_path, paths)
if 'link' in formats:
for path in paths:
dest = os.path.join(feedsdir, os.path.basename(path))
if not os.path.exists(syspath(dest)):
os.symlink(syspath(path), syspath(dest))
if 'echo' in formats:
self._log.info(u"Location of imported music:")
for path in paths:
self._log.info(u" {0}", path)
def library_opened(self, lib):
if self.config['dir'].get() is None:
self.config['dir'] = _get_feeds_dir(lib)
def album_imported(self, lib, album):
self._record_items(lib, album.album, album.items())
def item_imported(self, lib, item):
self._record_items(lib, item.title, [item])
|
schristakidis/p2ner
|
refs/heads/development
|
p2ner/components/ui/gtkgui/gtkgui/options/optServers.py
|
1
|
import os, sys
# Copyright 2012 Loris Corazza, Sakis Christakidis
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pygtk
from twisted.internet import reactor
pygtk.require("2.0")
import gtk
import gobject
from helper import validateIp,validatePort
from generic import genericFrame
from pkg_resources import resource_string
from addServer import AddServerGui
class serversFrame(genericFrame):
def initUI(self):
self.builder = gtk.Builder()
self.builder.add_from_string(resource_string(__name__, 'optServers.glade'))
self.builder.connect_signals(self)
self.serversTreeview = self.builder.get_object("serversTreeview")
model=self.serversTreeview.get_model()
renderer=gtk.CellRendererText()
renderer.set_property('xpad',10)
column=gtk.TreeViewColumn("ip",renderer, text=0)
column.set_resizable(True)
self.serversTreeview.append_column(column)
renderer=gtk.CellRendererText()
renderer.set_property('xpad',10)
column=gtk.TreeViewColumn("port",renderer, text=1)
column.set_resizable(True)
self.serversTreeview.append_column(column)
renderer=gtk.CellRendererToggle()
renderer.set_property('xpad',5)
column=gtk.TreeViewColumn("valid",renderer, active=2)
renderer.connect("toggled", self.toggled_cb, model)
column.set_resizable(True)
self.serversTreeview.append_column(column)
self.serversTreeview.show()
self.ui = self.builder.get_object("ui")
self.ui.show()
self.frame=self.ui
self.loadServers()
def refresh(self):
self.loadServers()
def toggled_cb(self,cell, path, user_data):
model = user_data
model[path][2] = not model[path][2]
self.preferences.setActiveServer(model[path][0],model[path][1],model[path][2])
return
def loadServers(self):
model=self.serversTreeview.get_model()
model.clear()
servers=self.preferences.loadServers()
for server in servers:
model.append((str(server['ip']),int(server['port']),server['valid']))
self.setDefaultServer()
def setDefaultServer(self):
default=self.preferences.getDefaultServer()
if default:
self.builder.get_object('defaultEntry').set_text(str(default))
else:
self.builder.get_object('defaultEntry').set_text('')
def on_editButton_clicked(self,widget):
treeselection=self.serversTreeview.get_selection()
try:
(model, iter) = treeselection.get_selected()
path=model.get_path(iter)
except:
return
self.serversTreeview.set_sensitive(False)
AddServerGui(self.newServer,model.get_value(iter,0),model.get_value(iter,1),model.get_value(iter,2),iter)
def on_deleteButton_clicked(self,widget):
treeselection=self.serversTreeview.get_selection()
(model, iter) = treeselection.get_selected()
try:
path=model.get_path(iter)
except:
return
self.preferences.removeServer(model.get_value(iter,0),model.get_value(iter,1))
s=model[path][0]
model.remove(iter)
self.setDefaultServer()
def on_newButton_clicked(self,widget):
self.serversTreeview.set_sensitive(False)
AddServerGui(self.newServer)
return
def newServer(self,res=None,args=None):
self.serversTreeview.set_sensitive(True)
if not res:
return
ip=res[0]
port=res[1]
valid=res[2]
model=self.serversTreeview.get_model()
if not args:
if self.checkNewServer(ip,port):
model.append((ip,port,valid))
self.preferences.addServer(ip,port,valid)
else:
iter=args
old=[]
old.append(model.get_value(iter,0))
model.set_value(iter,0,ip)
old.append(model.get_value(iter,1))
model.set_value(iter,1,port)
old.append(model.get_value(iter,2))
model.set_value(iter,2,valid)
self.preferences.changeServer(old,(ip,port,valid))
self.setDefaultServer()
def checkNewServer(self,ip,port):
model=self.serversTreeview.get_model()
m=[s for s in model if ip==s[0] and port==s[1]]
if len(m):
return False
else:
return True
def on_default_clicked(self,widget):
treeselection=self.serversTreeview.get_selection()
(model, iter) = treeselection.get_selected()
try:
path=model.get_path(iter)
except:
return
self.builder.get_object('defaultEntry').set_text(model[path][0])
self.preferences.setDefaultServer(model[path][0])
|
andreif/django
|
refs/heads/master
|
tests/select_for_update/tests.py
|
123
|
from __future__ import unicode_literals
import threading
import time
from multiple_database.routers import TestRouter
from django.db import DatabaseError, connection, router, transaction
from django.test import (
TransactionTestCase, override_settings, skipIfDBFeature,
skipUnlessDBFeature,
)
from .models import Person
# We need to set settings.DEBUG to True so we can capture the output SQL
# to examine.
@override_settings(DEBUG=True)
class SelectForUpdateTests(TransactionTestCase):
available_apps = ['select_for_update']
def setUp(self):
# This is executed in autocommit mode so that code in
# run_select_for_update can see this data.
self.person = Person.objects.create(name='Reinhardt')
# We need another database connection in transaction to test that one
# connection issuing a SELECT ... FOR UPDATE will block.
self.new_connection = connection.copy()
def tearDown(self):
try:
self.end_blocking_transaction()
except (DatabaseError, AttributeError):
pass
self.new_connection.close()
def start_blocking_transaction(self):
self.new_connection.set_autocommit(False)
# Start a blocking transaction. At some point,
# end_blocking_transaction() should be called.
self.cursor = self.new_connection.cursor()
sql = 'SELECT * FROM %(db_table)s %(for_update)s;' % {
'db_table': Person._meta.db_table,
'for_update': self.new_connection.ops.for_update_sql(),
}
self.cursor.execute(sql, ())
self.cursor.fetchone()
def end_blocking_transaction(self):
# Roll back the blocking transaction.
self.new_connection.rollback()
self.new_connection.set_autocommit(True)
def has_for_update_sql(self, tested_connection, nowait=False):
# Examine the SQL that was executed to determine whether it
# contains the 'SELECT..FOR UPDATE' stanza.
for_update_sql = tested_connection.ops.for_update_sql(nowait)
sql = tested_connection.queries[-1]['sql']
return bool(sql.find(for_update_sql) > -1)
@skipUnlessDBFeature('has_select_for_update')
def test_for_update_sql_generated(self):
"""
Test that the backend's FOR UPDATE variant appears in
generated SQL when select_for_update is invoked.
"""
with transaction.atomic():
list(Person.objects.all().select_for_update())
self.assertTrue(self.has_for_update_sql(connection))
@skipUnlessDBFeature('has_select_for_update_nowait')
def test_for_update_sql_generated_nowait(self):
"""
Test that the backend's FOR UPDATE NOWAIT variant appears in
generated SQL when select_for_update is invoked.
"""
with transaction.atomic():
list(Person.objects.all().select_for_update(nowait=True))
self.assertTrue(self.has_for_update_sql(connection, nowait=True))
@skipUnlessDBFeature('has_select_for_update_nowait')
def test_nowait_raises_error_on_block(self):
"""
If nowait is specified, we expect an error to be raised rather
than blocking.
"""
self.start_blocking_transaction()
status = []
thread = threading.Thread(
target=self.run_select_for_update,
args=(status,),
kwargs={'nowait': True},
)
thread.start()
time.sleep(1)
thread.join()
self.end_blocking_transaction()
self.assertIsInstance(status[-1], DatabaseError)
@skipIfDBFeature('has_select_for_update_nowait')
@skipUnlessDBFeature('has_select_for_update')
def test_unsupported_nowait_raises_error(self):
"""
If a SELECT...FOR UPDATE NOWAIT is run on a database backend
that supports FOR UPDATE but not NOWAIT, then we should find
that a DatabaseError is raised.
"""
self.assertRaises(
DatabaseError,
list,
Person.objects.all().select_for_update(nowait=True)
)
@skipUnlessDBFeature('has_select_for_update')
def test_for_update_requires_transaction(self):
"""
Test that a TransactionManagementError is raised
when a select_for_update query is executed outside of a transaction.
"""
with self.assertRaises(transaction.TransactionManagementError):
list(Person.objects.all().select_for_update())
@skipUnlessDBFeature('has_select_for_update')
def test_for_update_requires_transaction_only_in_execution(self):
"""
Test that no TransactionManagementError is raised
when select_for_update is invoked outside of a transaction -
only when the query is executed.
"""
people = Person.objects.all().select_for_update()
with self.assertRaises(transaction.TransactionManagementError):
list(people)
def run_select_for_update(self, status, nowait=False):
"""
Utility method that runs a SELECT FOR UPDATE against all
Person instances. After the select_for_update, it attempts
to update the name of the only record, save, and commit.
This function expects to run in a separate thread.
"""
status.append('started')
try:
# We need to enter transaction management again, as this is done on
# per-thread basis
with transaction.atomic():
people = list(
Person.objects.all().select_for_update(nowait=nowait)
)
people[0].name = 'Fred'
people[0].save()
except DatabaseError as e:
status.append(e)
finally:
# This method is run in a separate thread. It uses its own
# database connection. Close it without waiting for the GC.
connection.close()
@skipUnlessDBFeature('has_select_for_update')
@skipUnlessDBFeature('supports_transactions')
def test_block(self):
"""
Check that a thread running a select_for_update that
accesses rows being touched by a similar operation
on another connection blocks correctly.
"""
# First, let's start the transaction in our thread.
self.start_blocking_transaction()
# Now, try it again using the ORM's select_for_update
# facility. Do this in a separate thread.
status = []
thread = threading.Thread(
target=self.run_select_for_update, args=(status,)
)
# The thread should immediately block, but we'll sleep
# for a bit to make sure.
thread.start()
sanity_count = 0
while len(status) != 1 and sanity_count < 10:
sanity_count += 1
time.sleep(1)
if sanity_count >= 10:
raise ValueError('Thread did not run and block')
# Check the person hasn't been updated. Since this isn't
# using FOR UPDATE, it won't block.
p = Person.objects.get(pk=self.person.pk)
self.assertEqual('Reinhardt', p.name)
# When we end our blocking transaction, our thread should
# be able to continue.
self.end_blocking_transaction()
thread.join(5.0)
# Check the thread has finished. Assuming it has, we should
# find that it has updated the person's name.
self.assertFalse(thread.isAlive())
# We must commit the transaction to ensure that MySQL gets a fresh read,
# since by default it runs in REPEATABLE READ mode
transaction.commit()
p = Person.objects.get(pk=self.person.pk)
self.assertEqual('Fred', p.name)
@skipUnlessDBFeature('has_select_for_update')
def test_raw_lock_not_available(self):
"""
Check that running a raw query which can't obtain a FOR UPDATE lock
raises the correct exception
"""
self.start_blocking_transaction()
def raw(status):
try:
list(
Person.objects.raw(
'SELECT * FROM %s %s' % (
Person._meta.db_table,
connection.ops.for_update_sql(nowait=True)
)
)
)
except DatabaseError as e:
status.append(e)
finally:
# This method is run in a separate thread. It uses its own
# database connection. Close it without waiting for the GC.
connection.close()
status = []
thread = threading.Thread(target=raw, kwargs={'status': status})
thread.start()
time.sleep(1)
thread.join()
self.end_blocking_transaction()
self.assertIsInstance(status[-1], DatabaseError)
@skipUnlessDBFeature('has_select_for_update')
@override_settings(DATABASE_ROUTERS=[TestRouter()])
def test_select_for_update_on_multidb(self):
query = Person.objects.select_for_update()
self.assertEqual(router.db_for_write(Person), query.db)
@skipUnlessDBFeature('has_select_for_update')
def test_select_for_update_with_get(self):
with transaction.atomic():
person = Person.objects.select_for_update().get(name='Reinhardt')
self.assertEqual(person.name, 'Reinhardt')
|
QUANTAXIS/QUANTAXIS
|
refs/heads/master
|
docker/kite/qa-jupyter_rust2/jupyter_notebook_config.py
|
2
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
from IPython.lib import passwd
c.NotebookApp.ip = '*'
c.NotebookApp.port = int(os.getenv('PORT', 8888))
c.NotebookApp.open_browser = False
c.MultiKernelManager.default_kernel_name = 'python3'
c.NotebookApp.token = ''
c.NotebookApp.password = u'sha1:a658c59030b6:910b8fff6920f60a451b19a82e465c60f4880b60'
c.NotebookApp.allow_credentials = True
c.NotebookApp.allow_origin = '*'
c.NotebookApp.allow_remote_access = True
c.NotebookApp.disable_check_xsrf = True
c.NotebookApp.tornado_settings = { 'headers': { 'Content-Security-Policy': "" }}
# sets a password if PASSWORD is set in the environment
# if 'PASSWORD' in os.environ:
# password = os.environ['PASSWORD']
# if password:
# c.NotebookApp.password = passwd(password)
# else:
# c.NotebookApp.password = 'quantaxis'
# c.NotebookApp.token = ''
# del os.environ['PASSWORD']
|
AlanZatarain/python-astm
|
refs/heads/master
|
astm/tests/test_protocol.py
|
16
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Alexander Shorin
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
import unittest
from astm import constants
from astm.tests.utils import DummyMixIn, track_call
from astm import protocol
class DummyTimer(object):
def __init__(self, timeout, callback):
self.timeout = timeout
self.callback = callback
self._alive = False
def is_alive(self):
return self._alive
def start(self):
self._alive = True
def cancel(self):
self._alive = False
class DummyProto(DummyMixIn, protocol.ASTMProtocol):
_timer_cls = DummyTimer
class DispatcherTestCase(unittest.TestCase):
def test_found_terminator(self):
obj = DummyProto()
obj.dispatch = track_call(obj.dispatch)
obj.found_terminator()
self.assertTrue(not obj.dispatch.was_called)
obj.inbox.append(constants.ENQ)
obj.found_terminator()
self.assertTrue(obj.dispatch.was_called)
def test_found_terminator_skip_empty(self):
obj = DummyProto()
obj.dispatch = track_call(obj.dispatch)
obj.inbox.append('')
obj.inbox.append(None)
obj.found_terminator()
self.assertTrue(not obj.dispatch.was_called)
def test_on_enq(self):
obj = DummyProto()
obj.on_enq = track_call(obj.on_enq)
obj.dispatch(constants.ENQ)
self.assertTrue(obj.on_enq.was_called)
def test_on_ack(self):
obj = DummyProto()
obj.on_ack = track_call(obj.on_ack)
obj.dispatch(constants.ACK)
self.assertTrue(obj.on_ack.was_called)
def test_on_nak(self):
obj = DummyProto()
obj.on_nak = track_call(obj.on_nak)
obj.dispatch(constants.NAK)
self.assertTrue(obj.on_nak.was_called)
def test_on_eot(self):
obj = DummyProto()
obj.on_eot = track_call(obj.on_eot)
obj.dispatch(constants.EOT)
self.assertTrue(obj.on_eot.was_called)
def test_on_message(self):
obj = DummyProto()
obj.on_message = track_call(obj.on_message)
obj.dispatch(constants.STX)
self.assertTrue(obj.on_message.was_called)
def test_default_hanlder(self):
obj = DummyProto()
obj.default_handler = track_call(obj.default_handler)
self.assertRaises(ValueError, obj.dispatch, b'foo')
self.assertTrue(obj.default_handler.was_called)
def test_push_event_response(self):
obj = DummyProto()
obj.on_message = lambda: '42'
obj.dispatch(constants.STX)
self.assertEqual(obj.outbox.pop(), '42')
if __name__ == '__main__':
unittest.main()
|
MrLoick/python-for-android
|
refs/heads/master
|
python3-alpha/python3-src/Lib/cgi.py
|
46
|
#! /usr/local/bin/python
# NOTE: the above "/usr/local/bin/python" is NOT a mistake. It is
# intentionally NOT "/usr/bin/env python". On many systems
# (e.g. Solaris), /usr/local/bin is not in $PATH as passed to CGI
# scripts, and /usr/local/bin is the default directory where Python is
# installed, so /usr/bin/env would be unable to find python. Granted,
# binary installations by Linux vendors often install Python in
# /usr/bin. So let those vendors patch cgi.py to match their choice
# of installation.
"""Support module for CGI (Common Gateway Interface) scripts.
This module defines a number of utilities for use by CGI scripts
written in Python.
"""
# History
# -------
#
# Michael McLay started this module. Steve Majewski changed the
# interface to SvFormContentDict and FormContentDict. The multipart
# parsing was inspired by code submitted by Andreas Paepcke. Guido van
# Rossum rewrote, reformatted and documented the module and is currently
# responsible for its maintenance.
#
__version__ = "2.6"
# Imports
# =======
from io import StringIO, BytesIO, TextIOWrapper
import sys
import os
import urllib.parse
from email.parser import FeedParser
from warnings import warn
import html
import locale
import tempfile
__all__ = ["MiniFieldStorage", "FieldStorage",
"parse", "parse_qs", "parse_qsl", "parse_multipart",
"parse_header", "print_exception", "print_environ",
"print_form", "print_directory", "print_arguments",
"print_environ_usage", "escape"]
# Logging support
# ===============
logfile = "" # Filename to log to, if not empty
logfp = None # File object to log to, if not None
def initlog(*allargs):
"""Write a log message, if there is a log file.
Even though this function is called initlog(), you should always
use log(); log is a variable that is set either to initlog
(initially), to dolog (once the log file has been opened), or to
nolog (when logging is disabled).
The first argument is a format string; the remaining arguments (if
any) are arguments to the % operator, so e.g.
log("%s: %s", "a", "b")
will write "a: b" to the log file, followed by a newline.
If the global logfp is not None, it should be a file object to
which log data is written.
If the global logfp is None, the global logfile may be a string
giving a filename to open, in append mode. This file should be
world writable!!! If the file can't be opened, logging is
silently disabled (since there is no safe place where we could
send an error message).
"""
global logfp, log
if logfile and not logfp:
try:
logfp = open(logfile, "a")
except IOError:
pass
if not logfp:
log = nolog
else:
log = dolog
log(*allargs)
def dolog(fmt, *args):
"""Write a log message to the log file. See initlog() for docs."""
logfp.write(fmt%args + "\n")
def nolog(*allargs):
"""Dummy function, assigned to log when logging is disabled."""
pass
log = initlog # The current logging function
# Parsing functions
# =================
# Maximum input we will accept when REQUEST_METHOD is POST
# 0 ==> unlimited input
maxlen = 0
def parse(fp=None, environ=os.environ, keep_blank_values=0, strict_parsing=0):
"""Parse a query in the environment or from a file (default stdin)
Arguments, all optional:
fp : file pointer; default: sys.stdin.buffer
environ : environment dictionary; default: os.environ
keep_blank_values: flag indicating whether blank values in
percent-encoded forms should be treated as blank strings.
A true value indicates that blanks should be retained as
blank strings. The default false value indicates that
blank values are to be ignored and treated as if they were
not included.
strict_parsing: flag indicating what to do with parsing errors.
If false (the default), errors are silently ignored.
If true, errors raise a ValueError exception.
"""
if fp is None:
fp = sys.stdin
# field keys and values (except for files) are returned as strings
# an encoding is required to decode the bytes read from self.fp
if hasattr(fp,'encoding'):
encoding = fp.encoding
else:
encoding = 'latin-1'
# fp.read() must return bytes
if isinstance(fp, TextIOWrapper):
fp = fp.buffer
if not 'REQUEST_METHOD' in environ:
environ['REQUEST_METHOD'] = 'GET' # For testing stand-alone
if environ['REQUEST_METHOD'] == 'POST':
ctype, pdict = parse_header(environ['CONTENT_TYPE'])
if ctype == 'multipart/form-data':
return parse_multipart(fp, pdict)
elif ctype == 'application/x-www-form-urlencoded':
clength = int(environ['CONTENT_LENGTH'])
if maxlen and clength > maxlen:
raise ValueError('Maximum content length exceeded')
qs = fp.read(clength).decode(encoding)
else:
qs = '' # Unknown content-type
if 'QUERY_STRING' in environ:
if qs: qs = qs + '&'
qs = qs + environ['QUERY_STRING']
elif sys.argv[1:]:
if qs: qs = qs + '&'
qs = qs + sys.argv[1]
environ['QUERY_STRING'] = qs # XXX Shouldn't, really
elif 'QUERY_STRING' in environ:
qs = environ['QUERY_STRING']
else:
if sys.argv[1:]:
qs = sys.argv[1]
else:
qs = ""
environ['QUERY_STRING'] = qs # XXX Shouldn't, really
return urllib.parse.parse_qs(qs, keep_blank_values, strict_parsing,
encoding=encoding)
# parse query string function called from urlparse,
# this is done in order to maintain backward compatiblity.
def parse_qs(qs, keep_blank_values=0, strict_parsing=0):
"""Parse a query given as a string argument."""
warn("cgi.parse_qs is deprecated, use urllib.parse.parse_qs instead",
DeprecationWarning, 2)
return urllib.parse.parse_qs(qs, keep_blank_values, strict_parsing)
def parse_qsl(qs, keep_blank_values=0, strict_parsing=0):
"""Parse a query given as a string argument."""
warn("cgi.parse_qsl is deprecated, use urllib.parse.parse_qsl instead",
DeprecationWarning, 2)
return urllib.parse.parse_qsl(qs, keep_blank_values, strict_parsing)
def parse_multipart(fp, pdict):
"""Parse multipart input.
Arguments:
fp : input file
pdict: dictionary containing other parameters of content-type header
Returns a dictionary just like parse_qs(): keys are the field names, each
value is a list of values for that field. This is easy to use but not
much good if you are expecting megabytes to be uploaded -- in that case,
use the FieldStorage class instead which is much more flexible. Note
that content-type is the raw, unparsed contents of the content-type
header.
XXX This does not parse nested multipart parts -- use FieldStorage for
that.
XXX This should really be subsumed by FieldStorage altogether -- no
point in having two implementations of the same parsing algorithm.
Also, FieldStorage protects itself better against certain DoS attacks
by limiting the size of the data read in one chunk. The API here
does not support that kind of protection. This also affects parse()
since it can call parse_multipart().
"""
import http.client
boundary = ""
if 'boundary' in pdict:
boundary = pdict['boundary']
if not valid_boundary(boundary):
raise ValueError('Invalid boundary in multipart form: %r'
% (boundary,))
nextpart = "--" + boundary
lastpart = "--" + boundary + "--"
partdict = {}
terminator = ""
while terminator != lastpart:
bytes = -1
data = None
if terminator:
# At start of next part. Read headers first.
headers = http.client.parse_headers(fp)
clength = headers.get('content-length')
if clength:
try:
bytes = int(clength)
except ValueError:
pass
if bytes > 0:
if maxlen and bytes > maxlen:
raise ValueError('Maximum content length exceeded')
data = fp.read(bytes)
else:
data = ""
# Read lines until end of part.
lines = []
while 1:
line = fp.readline()
if not line:
terminator = lastpart # End outer loop
break
if line.startswith("--"):
terminator = line.rstrip()
if terminator in (nextpart, lastpart):
break
lines.append(line)
# Done with part.
if data is None:
continue
if bytes < 0:
if lines:
# Strip final line terminator
line = lines[-1]
if line[-2:] == "\r\n":
line = line[:-2]
elif line[-1:] == "\n":
line = line[:-1]
lines[-1] = line
data = "".join(lines)
line = headers['content-disposition']
if not line:
continue
key, params = parse_header(line)
if key != 'form-data':
continue
if 'name' in params:
name = params['name']
else:
continue
if name in partdict:
partdict[name].append(data)
else:
partdict[name] = [data]
return partdict
def _parseparam(s):
while s[:1] == ';':
s = s[1:]
end = s.find(';')
while end > 0 and s.count('"', 0, end) % 2:
end = s.find(';', end + 1)
if end < 0:
end = len(s)
f = s[:end]
yield f.strip()
s = s[end:]
def parse_header(line):
"""Parse a Content-type like header.
Return the main content-type and a dictionary of options.
"""
parts = _parseparam(';' + line)
key = parts.__next__()
pdict = {}
for p in parts:
i = p.find('=')
if i >= 0:
name = p[:i].strip().lower()
value = p[i+1:].strip()
if len(value) >= 2 and value[0] == value[-1] == '"':
value = value[1:-1]
value = value.replace('\\\\', '\\').replace('\\"', '"')
pdict[name] = value
return key, pdict
# Classes for field storage
# =========================
class MiniFieldStorage:
"""Like FieldStorage, for use when no file uploads are possible."""
# Dummy attributes
filename = None
list = None
type = None
file = None
type_options = {}
disposition = None
disposition_options = {}
headers = {}
def __init__(self, name, value):
"""Constructor from field name and value."""
self.name = name
self.value = value
# self.file = StringIO(value)
def __repr__(self):
"""Return printable representation."""
return "MiniFieldStorage(%r, %r)" % (self.name, self.value)
class FieldStorage:
"""Store a sequence of fields, reading multipart/form-data.
This class provides naming, typing, files stored on disk, and
more. At the top level, it is accessible like a dictionary, whose
keys are the field names. (Note: None can occur as a field name.)
The items are either a Python list (if there's multiple values) or
another FieldStorage or MiniFieldStorage object. If it's a single
object, it has the following attributes:
name: the field name, if specified; otherwise None
filename: the filename, if specified; otherwise None; this is the
client side filename, *not* the file name on which it is
stored (that's a temporary file you don't deal with)
value: the value as a *string*; for file uploads, this
transparently reads the file every time you request the value
and returns *bytes*
file: the file(-like) object from which you can read the data *as
bytes* ; None if the data is stored a simple string
type: the content-type, or None if not specified
type_options: dictionary of options specified on the content-type
line
disposition: content-disposition, or None if not specified
disposition_options: dictionary of corresponding options
headers: a dictionary(-like) object (sometimes email.message.Message or a
subclass thereof) containing *all* headers
The class is subclassable, mostly for the purpose of overriding
the make_file() method, which is called internally to come up with
a file open for reading and writing. This makes it possible to
override the default choice of storing all files in a temporary
directory and unlinking them as soon as they have been opened.
"""
def __init__(self, fp=None, headers=None, outerboundary=b'',
environ=os.environ, keep_blank_values=0, strict_parsing=0,
limit=None, encoding='utf-8', errors='replace'):
"""Constructor. Read multipart/* until last part.
Arguments, all optional:
fp : file pointer; default: sys.stdin.buffer
(not used when the request method is GET)
Can be :
1. a TextIOWrapper object
2. an object whose read() and readline() methods return bytes
headers : header dictionary-like object; default:
taken from environ as per CGI spec
outerboundary : terminating multipart boundary
(for internal use only)
environ : environment dictionary; default: os.environ
keep_blank_values: flag indicating whether blank values in
percent-encoded forms should be treated as blank strings.
A true value indicates that blanks should be retained as
blank strings. The default false value indicates that
blank values are to be ignored and treated as if they were
not included.
strict_parsing: flag indicating what to do with parsing errors.
If false (the default), errors are silently ignored.
If true, errors raise a ValueError exception.
limit : used internally to read parts of multipart/form-data forms,
to exit from the reading loop when reached. It is the difference
between the form content-length and the number of bytes already
read
encoding, errors : the encoding and error handler used to decode the
binary stream to strings. Must be the same as the charset defined
for the page sending the form (content-type : meta http-equiv or
header)
"""
method = 'GET'
self.keep_blank_values = keep_blank_values
self.strict_parsing = strict_parsing
if 'REQUEST_METHOD' in environ:
method = environ['REQUEST_METHOD'].upper()
self.qs_on_post = None
if method == 'GET' or method == 'HEAD':
if 'QUERY_STRING' in environ:
qs = environ['QUERY_STRING']
elif sys.argv[1:]:
qs = sys.argv[1]
else:
qs = ""
qs = qs.encode(locale.getpreferredencoding(), 'surrogateescape')
fp = BytesIO(qs)
if headers is None:
headers = {'content-type':
"application/x-www-form-urlencoded"}
if headers is None:
headers = {}
if method == 'POST':
# Set default content-type for POST to what's traditional
headers['content-type'] = "application/x-www-form-urlencoded"
if 'CONTENT_TYPE' in environ:
headers['content-type'] = environ['CONTENT_TYPE']
if 'QUERY_STRING' in environ:
self.qs_on_post = environ['QUERY_STRING']
if 'CONTENT_LENGTH' in environ:
headers['content-length'] = environ['CONTENT_LENGTH']
if fp is None:
self.fp = sys.stdin.buffer
# self.fp.read() must return bytes
elif isinstance(fp, TextIOWrapper):
self.fp = fp.buffer
else:
self.fp = fp
self.encoding = encoding
self.errors = errors
self.headers = headers
if not isinstance(outerboundary, bytes):
raise TypeError('outerboundary must be bytes, not %s'
% type(outerboundary).__name__)
self.outerboundary = outerboundary
self.bytes_read = 0
self.limit = limit
# Process content-disposition header
cdisp, pdict = "", {}
if 'content-disposition' in self.headers:
cdisp, pdict = parse_header(self.headers['content-disposition'])
self.disposition = cdisp
self.disposition_options = pdict
self.name = None
if 'name' in pdict:
self.name = pdict['name']
self.filename = None
if 'filename' in pdict:
self.filename = pdict['filename']
self._binary_file = self.filename is not None
# Process content-type header
#
# Honor any existing content-type header. But if there is no
# content-type header, use some sensible defaults. Assume
# outerboundary is "" at the outer level, but something non-false
# inside a multi-part. The default for an inner part is text/plain,
# but for an outer part it should be urlencoded. This should catch
# bogus clients which erroneously forget to include a content-type
# header.
#
# See below for what we do if there does exist a content-type header,
# but it happens to be something we don't understand.
if 'content-type' in self.headers:
ctype, pdict = parse_header(self.headers['content-type'])
elif self.outerboundary or method != 'POST':
ctype, pdict = "text/plain", {}
else:
ctype, pdict = 'application/x-www-form-urlencoded', {}
self.type = ctype
self.type_options = pdict
if 'boundary' in pdict:
self.innerboundary = pdict['boundary'].encode(self.encoding)
else:
self.innerboundary = b""
clen = -1
if 'content-length' in self.headers:
try:
clen = int(self.headers['content-length'])
except ValueError:
pass
if maxlen and clen > maxlen:
raise ValueError('Maximum content length exceeded')
self.length = clen
if self.limit is None and clen:
self.limit = clen
self.list = self.file = None
self.done = 0
if ctype == 'application/x-www-form-urlencoded':
self.read_urlencoded()
elif ctype[:10] == 'multipart/':
self.read_multi(environ, keep_blank_values, strict_parsing)
else:
self.read_single()
def __repr__(self):
"""Return a printable representation."""
return "FieldStorage(%r, %r, %r)" % (
self.name, self.filename, self.value)
def __iter__(self):
return iter(self.keys())
def __getattr__(self, name):
if name != 'value':
raise AttributeError(name)
if self.file:
self.file.seek(0)
value = self.file.read()
self.file.seek(0)
elif self.list is not None:
value = self.list
else:
value = None
return value
def __getitem__(self, key):
"""Dictionary style indexing."""
if self.list is None:
raise TypeError("not indexable")
found = []
for item in self.list:
if item.name == key: found.append(item)
if not found:
raise KeyError(key)
if len(found) == 1:
return found[0]
else:
return found
def getvalue(self, key, default=None):
"""Dictionary style get() method, including 'value' lookup."""
if key in self:
value = self[key]
if isinstance(value, list):
return [x.value for x in value]
else:
return value.value
else:
return default
def getfirst(self, key, default=None):
""" Return the first value received."""
if key in self:
value = self[key]
if isinstance(value, list):
return value[0].value
else:
return value.value
else:
return default
def getlist(self, key):
""" Return list of received values."""
if key in self:
value = self[key]
if isinstance(value, list):
return [x.value for x in value]
else:
return [value.value]
else:
return []
def keys(self):
"""Dictionary style keys() method."""
if self.list is None:
raise TypeError("not indexable")
return list(set(item.name for item in self.list))
def __contains__(self, key):
"""Dictionary style __contains__ method."""
if self.list is None:
raise TypeError("not indexable")
return any(item.name == key for item in self.list)
def __len__(self):
"""Dictionary style len(x) support."""
return len(self.keys())
def __nonzero__(self):
return bool(self.list)
def read_urlencoded(self):
"""Internal: read data in query string format."""
qs = self.fp.read(self.length)
if not isinstance(qs, bytes):
raise ValueError("%s should return bytes, got %s" \
% (self.fp, type(qs).__name__))
qs = qs.decode(self.encoding, self.errors)
if self.qs_on_post:
qs += '&' + self.qs_on_post
self.list = []
query = urllib.parse.parse_qsl(
qs, self.keep_blank_values, self.strict_parsing,
encoding=self.encoding, errors=self.errors)
for key, value in query:
self.list.append(MiniFieldStorage(key, value))
self.skip_lines()
FieldStorageClass = None
def read_multi(self, environ, keep_blank_values, strict_parsing):
"""Internal: read a part that is itself multipart."""
ib = self.innerboundary
if not valid_boundary(ib):
raise ValueError('Invalid boundary in multipart form: %r' % (ib,))
self.list = []
if self.qs_on_post:
query = urllib.parse.parse_qsl(
self.qs_on_post, self.keep_blank_values, self.strict_parsing,
encoding=self.encoding, errors=self.errors)
for key, value in query:
self.list.append(MiniFieldStorage(key, value))
FieldStorageClass = None
klass = self.FieldStorageClass or self.__class__
first_line = self.fp.readline() # bytes
if not isinstance(first_line, bytes):
raise ValueError("%s should return bytes, got %s" \
% (self.fp, type(first_line).__name__))
self.bytes_read += len(first_line)
# first line holds boundary ; ignore it, or check that
# b"--" + ib == first_line.strip() ?
while True:
parser = FeedParser()
hdr_text = b""
while True:
data = self.fp.readline()
hdr_text += data
if not data.strip():
break
if not hdr_text:
break
# parser takes strings, not bytes
self.bytes_read += len(hdr_text)
parser.feed(hdr_text.decode(self.encoding, self.errors))
headers = parser.close()
part = klass(self.fp, headers, ib, environ, keep_blank_values,
strict_parsing,self.limit-self.bytes_read,
self.encoding, self.errors)
self.bytes_read += part.bytes_read
self.list.append(part)
if self.bytes_read >= self.length:
break
self.skip_lines()
def read_single(self):
"""Internal: read an atomic part."""
if self.length >= 0:
self.read_binary()
self.skip_lines()
else:
self.read_lines()
self.file.seek(0)
bufsize = 8*1024 # I/O buffering size for copy to file
def read_binary(self):
"""Internal: read binary data."""
self.file = self.make_file()
todo = self.length
if todo >= 0:
while todo > 0:
data = self.fp.read(min(todo, self.bufsize)) # bytes
if not isinstance(data, bytes):
raise ValueError("%s should return bytes, got %s"
% (self.fp, type(data).__name__))
self.bytes_read += len(data)
if not data:
self.done = -1
break
self.file.write(data)
todo = todo - len(data)
def read_lines(self):
"""Internal: read lines until EOF or outerboundary."""
if self._binary_file:
self.file = self.__file = BytesIO() # store data as bytes for files
else:
self.file = self.__file = StringIO() # as strings for other fields
if self.outerboundary:
self.read_lines_to_outerboundary()
else:
self.read_lines_to_eof()
def __write(self, line):
"""line is always bytes, not string"""
if self.__file is not None:
if self.__file.tell() + len(line) > 1000:
self.file = self.make_file()
data = self.__file.getvalue()
self.file.write(data)
self.__file = None
if self._binary_file:
# keep bytes
self.file.write(line)
else:
# decode to string
self.file.write(line.decode(self.encoding, self.errors))
def read_lines_to_eof(self):
"""Internal: read lines until EOF."""
while 1:
line = self.fp.readline(1<<16) # bytes
self.bytes_read += len(line)
if not line:
self.done = -1
break
self.__write(line)
def read_lines_to_outerboundary(self):
"""Internal: read lines until outerboundary.
Data is read as bytes: boundaries and line ends must be converted
to bytes for comparisons.
"""
next_boundary = b"--" + self.outerboundary
last_boundary = next_boundary + b"--"
delim = b""
last_line_lfend = True
_read = 0
while 1:
if _read >= self.limit:
break
line = self.fp.readline(1<<16) # bytes
self.bytes_read += len(line)
_read += len(line)
if not line:
self.done = -1
break
if line.startswith(b"--") and last_line_lfend:
strippedline = line.rstrip()
if strippedline == next_boundary:
break
if strippedline == last_boundary:
self.done = 1
break
odelim = delim
if line.endswith(b"\r\n"):
delim = b"\r\n"
line = line[:-2]
last_line_lfend = True
elif line.endswith(b"\n"):
delim = b"\n"
line = line[:-1]
last_line_lfend = True
else:
delim = b""
last_line_lfend = False
self.__write(odelim + line)
def skip_lines(self):
"""Internal: skip lines until outer boundary if defined."""
if not self.outerboundary or self.done:
return
next_boundary = b"--" + self.outerboundary
last_boundary = next_boundary + b"--"
last_line_lfend = True
while True:
line = self.fp.readline(1<<16)
self.bytes_read += len(line)
if not line:
self.done = -1
break
if line.endswith(b"--") and last_line_lfend:
strippedline = line.strip()
if strippedline == next_boundary:
break
if strippedline == last_boundary:
self.done = 1
break
last_line_lfend = line.endswith(b'\n')
def make_file(self):
"""Overridable: return a readable & writable file.
The file will be used as follows:
- data is written to it
- seek(0)
- data is read from it
The file is opened in binary mode for files, in text mode
for other fields
This version opens a temporary file for reading and writing,
and immediately deletes (unlinks) it. The trick (on Unix!) is
that the file can still be used, but it can't be opened by
another process, and it will automatically be deleted when it
is closed or when the current process terminates.
If you want a more permanent file, you derive a class which
overrides this method. If you want a visible temporary file
that is nevertheless automatically deleted when the script
terminates, try defining a __del__ method in a derived class
which unlinks the temporary files you have created.
"""
if self._binary_file:
return tempfile.TemporaryFile("wb+")
else:
return tempfile.TemporaryFile("w+",
encoding=self.encoding, newline = '\n')
# Test/debug code
# ===============
def test(environ=os.environ):
"""Robust test CGI script, usable as main program.
Write minimal HTTP headers and dump all information provided to
the script in HTML form.
"""
print("Content-type: text/html")
print()
sys.stderr = sys.stdout
try:
form = FieldStorage() # Replace with other classes to test those
print_directory()
print_arguments()
print_form(form)
print_environ(environ)
print_environ_usage()
def f():
exec("testing print_exception() -- <I>italics?</I>")
def g(f=f):
f()
print("<H3>What follows is a test, not an actual exception:</H3>")
g()
except:
print_exception()
print("<H1>Second try with a small maxlen...</H1>")
global maxlen
maxlen = 50
try:
form = FieldStorage() # Replace with other classes to test those
print_directory()
print_arguments()
print_form(form)
print_environ(environ)
except:
print_exception()
def print_exception(type=None, value=None, tb=None, limit=None):
if type is None:
type, value, tb = sys.exc_info()
import traceback
print()
print("<H3>Traceback (most recent call last):</H3>")
list = traceback.format_tb(tb, limit) + \
traceback.format_exception_only(type, value)
print("<PRE>%s<B>%s</B></PRE>" % (
html.escape("".join(list[:-1])),
html.escape(list[-1]),
))
del tb
def print_environ(environ=os.environ):
"""Dump the shell environment as HTML."""
keys = sorted(environ.keys())
print()
print("<H3>Shell Environment:</H3>")
print("<DL>")
for key in keys:
print("<DT>", html.escape(key), "<DD>", html.escape(environ[key]))
print("</DL>")
print()
def print_form(form):
"""Dump the contents of a form as HTML."""
keys = sorted(form.keys())
print()
print("<H3>Form Contents:</H3>")
if not keys:
print("<P>No form fields.")
print("<DL>")
for key in keys:
print("<DT>" + html.escape(key) + ":", end=' ')
value = form[key]
print("<i>" + html.escape(repr(type(value))) + "</i>")
print("<DD>" + html.escape(repr(value)))
print("</DL>")
print()
def print_directory():
"""Dump the current directory as HTML."""
print()
print("<H3>Current Working Directory:</H3>")
try:
pwd = os.getcwd()
except os.error as msg:
print("os.error:", html.escape(str(msg)))
else:
print(html.escape(pwd))
print()
def print_arguments():
print()
print("<H3>Command Line Arguments:</H3>")
print()
print(sys.argv)
print()
def print_environ_usage():
"""Dump a list of environment variables used by CGI as HTML."""
print("""
<H3>These environment variables could have been set:</H3>
<UL>
<LI>AUTH_TYPE
<LI>CONTENT_LENGTH
<LI>CONTENT_TYPE
<LI>DATE_GMT
<LI>DATE_LOCAL
<LI>DOCUMENT_NAME
<LI>DOCUMENT_ROOT
<LI>DOCUMENT_URI
<LI>GATEWAY_INTERFACE
<LI>LAST_MODIFIED
<LI>PATH
<LI>PATH_INFO
<LI>PATH_TRANSLATED
<LI>QUERY_STRING
<LI>REMOTE_ADDR
<LI>REMOTE_HOST
<LI>REMOTE_IDENT
<LI>REMOTE_USER
<LI>REQUEST_METHOD
<LI>SCRIPT_NAME
<LI>SERVER_NAME
<LI>SERVER_PORT
<LI>SERVER_PROTOCOL
<LI>SERVER_ROOT
<LI>SERVER_SOFTWARE
</UL>
In addition, HTTP headers sent by the server may be passed in the
environment as well. Here are some common variable names:
<UL>
<LI>HTTP_ACCEPT
<LI>HTTP_CONNECTION
<LI>HTTP_HOST
<LI>HTTP_PRAGMA
<LI>HTTP_REFERER
<LI>HTTP_USER_AGENT
</UL>
""")
# Utilities
# =========
def escape(s, quote=None):
"""Deprecated API."""
warn("cgi.escape is deprecated, use html.escape instead",
PendingDeprecationWarning, stacklevel=2)
s = s.replace("&", "&") # Must be done first!
s = s.replace("<", "<")
s = s.replace(">", ">")
if quote:
s = s.replace('"', """)
return s
def valid_boundary(s, _vb_pattern=None):
import re
if isinstance(s, bytes):
_vb_pattern = b"^[ -~]{0,200}[!-~]$"
else:
_vb_pattern = "^[ -~]{0,200}[!-~]$"
return re.match(_vb_pattern, s)
# Invoke mainline
# ===============
# Call test() when this file is run as a script (not imported as a module)
if __name__ == '__main__':
test()
|
stefan-jonasson/home-assistant
|
refs/heads/dev
|
tests/components/test_sleepiq.py
|
24
|
"""The tests for the SleepIQ component."""
import unittest
from unittest.mock import MagicMock, patch
import requests_mock
from homeassistant import setup
import homeassistant.components.sleepiq as sleepiq
from tests.common import load_fixture, get_test_home_assistant
def mock_responses(mock):
"""Mock responses for SleepIQ."""
base_url = 'https://api.sleepiq.sleepnumber.com/rest/'
mock.put(
base_url + 'login',
text=load_fixture('sleepiq-login.json'))
mock.get(
base_url + 'bed?_k=0987',
text=load_fixture('sleepiq-bed.json'))
mock.get(
base_url + 'sleeper?_k=0987',
text=load_fixture('sleepiq-sleeper.json'))
mock.get(
base_url + 'bed/familyStatus?_k=0987',
text=load_fixture('sleepiq-familystatus.json'))
class TestSleepIQ(unittest.TestCase):
"""Tests the SleepIQ component."""
def setUp(self):
"""Initialize values for this test case class."""
self.hass = get_test_home_assistant()
self.username = 'foo'
self.password = 'bar'
self.config = {
'sleepiq': {
'username': self.username,
'password': self.password,
}
}
def tearDown(self): # pylint: disable=invalid-name
"""Stop everything that was started."""
self.hass.stop()
@requests_mock.Mocker()
def test_setup(self, mock):
"""Test the setup."""
mock_responses(mock)
# We're mocking the load_platform discoveries or else the platforms
# will be setup during tear down when blocking till done, but the mocks
# are no longer active.
with patch(
'homeassistant.helpers.discovery.load_platform', MagicMock()):
assert sleepiq.setup(self.hass, self.config)
@requests_mock.Mocker()
def test_setup_login_failed(self, mock):
"""Test the setup if a bad username or password is given."""
mock.put('https://api.sleepiq.sleepnumber.com/rest/login',
status_code=401,
json=load_fixture('sleepiq-login-failed.json'))
response = sleepiq.setup(self.hass, self.config)
self.assertFalse(response)
def test_setup_component_no_login(self):
"""Test the setup when no login is configured."""
conf = self.config.copy()
del conf['sleepiq']['username']
assert not setup.setup_component(self.hass, sleepiq.DOMAIN, conf)
def test_setup_component_no_password(self):
"""Test the setup when no password is configured."""
conf = self.config.copy()
del conf['sleepiq']['password']
assert not setup.setup_component(self.hass, sleepiq.DOMAIN, conf)
|
shon/simplenote.py
|
refs/heads/master
|
docs/conf.py
|
2
|
# -*- coding: utf-8 -*-
#
# simplenote.py documentation build configuration file, created by
# sphinx-quickstart on Sat Jun 25 17:40:25 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
import simplenote
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'simplenote.py'
copyright = u'2011, Daniel Schauenberg'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = simplenote.__version__
# The full version, including alpha/beta/rc tags.
release = simplenote.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'simplenotepydoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'simplenotepy.tex', u'simplenote.py Documentation',
u'Daniel Schauenberg', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'simplenotepy', u'simplenote.py Documentation',
[u'Daniel Schauenberg'], 1)
]
|
mattharrison/PgPartition
|
refs/heads/master
|
test/testpgpartitionlib.py
|
1
|
# Copyright (c) 2010 Matt Harrison
import unittest
import sqlalchemy as sa
import pgpartitionlib
dburl = "postgres://postgres@localhost"
start_part = 1
end_part = 3
new_end = 6
class TestPgpartitionlib(unittest.TestCase):
def test_main(self):
ddl = """CREATE TABLE test_part (
key INTEGER NOT NULL,
junk NUMERIC(9,0) NOT NULL);"""
run_sql(dburl, ddl)
partitioner = pgpartitionlib.Partitioner()
create_part = partitioner.create_ddl('test_part', 'key', start_part, end_part)
run_sql(dburl, create_part, autocommit=False)
func = partitioner.function_code('test_part', 'key', start_part, end_part)
run_sql(dburl, func, autocommit=False)
trig = partitioner.trigger_code('test_part', 'key', start_part, end_part)
run_sql(dburl, trig, autocommit=False)
#test insert
insert = """INSERT INTO test_part VALUES(1, 5);"""
run_sql(dburl, insert)
select = """SELECT key from test_part_1;"""
result = run_sql(dburl, select)
self.assertEqual(list(result), [(1,)])
#test insert bad
insert = """INSERT INTO test_part VALUES(10, 5);"""
self.assertRaises(Exception, run_sql, dburl, insert)
#run_sql(dburl, insert)
select = """SELECT key from test_part;"""
result = run_sql(dburl, select)
self.assertEqual(list(result), [(1,)])
# modify to add up to 6
# create new tables
for i in range(end_part, new_end):
create_part = partitioner.create_ddl('test_part', 'key', i, i+1)
run_sql(dburl, create_part, autocommit=False)
# new function - for end values
func = partitioner.function_code('test_part', 'key', start_part, new_end)
run_sql(dburl, func, autocommit=False)
#test insert
insert = """INSERT INTO test_part VALUES(4, 4);"""
run_sql(dburl, insert)
select = """SELECT * from test_part;"""
result = run_sql(dburl, select)
from decimal import Decimal
self.assertEqual(list(result), [(1, Decimal('5')), (4, Decimal('4'))])
# test delete
insert = """DELETE FROM test_part WHERE key=1;"""
run_sql(dburl, insert)
select = """SELECT key from test_part;"""
result = run_sql(dburl, select)
self.assertEqual(list(result), [(4,)])
# # drop part
# drop_part = partitioner.drop_ddl('test_part', 'key', start_part, new_end)
# run_sql(dburl, drop_part, autocommit=False)
def tearDown(self):
# drop part
partitioner = pgpartitionlib.Partitioner()
for i in range(start_part, new_end):
drop_part = partitioner.drop_ddl('test_part', 'key', i, i+1)
try:
run_sql(dburl, drop_part, autocommit=False)
except sa.exceptions.ProgrammingError, e:
if 'does not exist' in str(e):
continue
else:
raise
drop_master = """DROP table test_part;"""
run_sql(dburl, drop_master, autocommit=False)
def run_sql(dburl, sql, autocommit=True, **kw):
"""
Allow up to 2 gigs mem usage.
Postgres doesn't like to vacuum in autocommit, so make it False for that
"""
engine = sa.create_engine(dburl)
connection = engine.connect()
if not autocommit:
import psycopg2.extensions
raw = engine.raw_connection()
connection.connection.connection.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
for key in kw:
if key in {'work_mem':1, 'maintenance_work_mem':1}:
connection.execute('set %s = %d;' % (key, kw[key]))
# for speedy index creation
#results = connection.execute('set maintenance_work_mem = %d;' % mem)
results = connection.execute(sql)
if autocommit:
connection._commit_impl()
#connection.commit()
return results
if __name__ == '__main__':
unittest.main()
|
damnfine/mezzanine
|
refs/heads/master
|
mezzanine/accounts/defaults.py
|
44
|
"""
Default settings for all mezzanine.accounts app. Each of these can be
overridden in your project's settings module, just like regular
Django settings. The ``editable`` argument for each controls whether
the setting is editable via Django's admin.
Thought should be given to how a setting is actually used before
making it editable, as it may be inappropriate - for example settings
that are only read during startup shouldn't be editable, since changing
them would require an application reload.
"""
from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
from mezzanine.conf import register_setting
register_setting(
name="ACCOUNTS_MIN_PASSWORD_LENGTH",
description=_("Minimum length for passwords"),
editable=False,
default=6,
)
register_setting(
name="ACCOUNTS_NO_USERNAME",
description=_("If ``True``, the username field will be excluded "
"from sign up and account update forms."),
editable=False,
default=False,
)
register_setting(
name="ACCOUNTS_PROFILE_FORM_EXCLUDE_FIELDS",
description=_("List of fields to exclude from the profile form."),
editable=False,
default=(),
)
register_setting(
name="ACCOUNTS_PROFILE_FORM_CLASS",
description=_("Dotted package path and class name of profile form to use "
"for users signing up and updating their profile, when "
"``mezzanine.accounts`` is installed."),
editable=False,
default="mezzanine.accounts.forms.ProfileForm",
)
register_setting(
name="ACCOUNTS_PROFILE_VIEWS_ENABLED",
description=_("If ``True``, users will have their own public profile "
"pages."),
editable=False,
default=False,
)
register_setting(
name="ACCOUNTS_VERIFICATION_REQUIRED",
description=_("If ``True``, when users create an account, they will be "
"sent an email with a verification link, which they must click to "
"enable their account."),
editable=False,
default=False,
)
register_setting(
name="ACCOUNTS_APPROVAL_REQUIRED",
description=_("If ``True``, when users create an account, they will "
"not be enabled by default and a staff member will need to activate "
"their account in the admin interface."),
editable=False,
default=False,
)
register_setting(
name="ACCOUNTS_APPROVAL_EMAILS",
label=_("Account approval email addresses"),
description=_("A comma separated list of email addresses that "
"will receive an email notification each time a "
"new account is created that requires approval."),
editable=True,
default="",
)
|
Bysmyyr/blink-crosswalk
|
refs/heads/master
|
Tools/Scripts/webkitpy/layout_tests/port/mac_unittest.py
|
21
|
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
from webkitpy.layout_tests.port import mac
from webkitpy.layout_tests.port import port_testcase
from webkitpy.tool.mocktool import MockOptions
class MacPortTest(port_testcase.PortTestCase):
os_name = 'mac'
os_version = 'snowleopard'
port_name = 'mac'
full_port_name = 'mac-snowleopard'
port_maker = mac.MacPort
def assert_name(self, port_name, os_version_string, expected):
port = self.make_port(os_version=os_version_string, port_name=port_name)
self.assertEqual(expected, port.name())
def test_versions(self):
self.assertTrue(self.make_port().name() in ('mac-snowleopard', 'mac-lion', 'mac-mountainlion', 'mac-mavericks'))
self.assert_name(None, 'snowleopard', 'mac-snowleopard')
self.assert_name('mac', 'snowleopard', 'mac-snowleopard')
self.assert_name('mac-snowleopard', 'leopard', 'mac-snowleopard')
self.assert_name('mac-snowleopard', 'snowleopard', 'mac-snowleopard')
self.assert_name(None, 'lion', 'mac-lion')
self.assert_name(None, 'mountainlion', 'mac-mountainlion')
self.assert_name(None, 'mavericks', 'mac-mavericks')
self.assert_name(None, 'future', 'mac-mavericks')
self.assert_name('mac', 'lion', 'mac-lion')
self.assertRaises(AssertionError, self.assert_name, None, 'tiger', 'should-raise-assertion-so-this-value-does-not-matter')
def test_baseline_path(self):
port = self.make_port(port_name='mac-snowleopard')
self.assertEqual(port.baseline_path(), port._webkit_baseline_path('mac-snowleopard'))
port = self.make_port(port_name='mac-lion')
self.assertEqual(port.baseline_path(), port._webkit_baseline_path('mac-lion'))
port = self.make_port(port_name='mac-mountainlion')
self.assertEqual(port.baseline_path(), port._webkit_baseline_path('mac-mountainlion'))
port = self.make_port(port_name='mac-mavericks')
self.assertEqual(port.baseline_path(), port._webkit_baseline_path('mac'))
def test_operating_system(self):
self.assertEqual('mac', self.make_port().operating_system())
def test_build_path(self):
# Test that optional paths are used regardless of whether they exist.
options = MockOptions(configuration='Release', build_directory='/foo')
self.assert_build_path(options, ['/mock-checkout/out/Release'], '/foo/Release')
# Test that optional relative paths are returned unmodified.
options = MockOptions(configuration='Release', build_directory='foo')
self.assert_build_path(options, ['/mock-checkout/out/Release'], 'foo/Release')
# Test that we prefer the legacy dir over the new dir.
options = MockOptions(configuration='Release', build_directory=None)
self.assert_build_path(options, ['/mock-checkout/xcodebuild/Release', '/mock-checkout/out/Release'], '/mock-checkout/xcodebuild/Release')
def test_build_path_timestamps(self):
options = MockOptions(configuration='Release', build_directory=None)
port = self.make_port(options=options)
port.host.filesystem.maybe_make_directory('/mock-checkout/out/Release')
port.host.filesystem.maybe_make_directory('/mock-checkout/xcodebuild/Release')
# Check with 'out' being newer.
port.host.filesystem.mtime = lambda f: 5 if '/out/' in f else 4
self.assertEqual(port._build_path(), '/mock-checkout/out/Release')
# Check with 'xcodebuild' being newer.
port.host.filesystem.mtime = lambda f: 5 if '/xcodebuild/' in f else 4
self.assertEqual(port._build_path(), '/mock-checkout/xcodebuild/Release')
def test_driver_name_option(self):
self.assertTrue(self.make_port()._path_to_driver().endswith('Content Shell'))
self.assertTrue(self.make_port(options=MockOptions(driver_name='OtherDriver'))._path_to_driver().endswith('OtherDriver'))
def test_path_to_image_diff(self):
self.assertEqual(self.make_port()._path_to_image_diff(), '/mock-checkout/out/Release/image_diff')
|
ric96/joypi
|
refs/heads/master
|
joypi.py
|
1
|
import time
import signal
import pygame
import sys
import RPi.GPIO as GPIO
# The following is an example code written to controll the l298n motor contoller
GPIO.setmode(GPIO.BOARD)
GPIO.setup(11, GPIO.OUT) #input-1
GPIO.setup(12, GPIO.OUT) #input-2
GPIO.setup(15, GPIO.OUT) #input-3
GPIO.setup(16, GPIO.OUT) #input-4
GPIO.setup(7, GPIO.IN) #Front Left
GPIO.setup(8, GPIO.IN) #Front Center
GPIO.setup(10, GPIO.IN) #Front Right
GPIO.setup(23, GPIO.IN) #Back Left
GPIO.setup(24, GPIO.IN) #Back Center
GPIO.setup(26, GPIO.IN) #Back Right
GPIO.setup(18, GPIO.OUT) #Status-LED
pygame.init()
done = False
# Initialize the joysticks
pygame.joystick.init()
################ Movement Definitions BEGIN #######################
def forward_left():
print "FL"
GPIO.output(11, False)
GPIO.output(12, False)
GPIO.output(16, True)
GPIO.output(15, False)
def forward_right():
print "FR"
GPIO.output(11, True)
GPIO.output(12, False)
GPIO.output(16, False)
GPIO.output(15, False)
def backward_left():
print "BL"
GPIO.output(11, False)
GPIO.output(12, False)
GPIO.output(16, False)
GPIO.output(15, True)
def backward_right():
print "BR"
GPIO.output(11, False)
GPIO.output(12, True)
GPIO.output(16, False)
GPIO.output(15, False)
def forward():
print "F"
GPIO.output(11, True)
GPIO.output(12, False)
GPIO.output(16, True)
GPIO.output(15, False)
def backward():
print "B"
GPIO.output(11, False)
GPIO.output(12, True)
GPIO.output(16, False)
GPIO.output(15, True)
def left():
print "L"
GPIO.output(11, False)
GPIO.output(12, True)
GPIO.output(16, True)
GPIO.output(15, False)
def right():
print "R"
GPIO.output(11, True)
GPIO.output(12, False)
GPIO.output(16, False)
GPIO.output(15, True)
def nutral():
print "N"
GPIO.output(11, False)
GPIO.output(12, False)
GPIO.output(16, False)
GPIO.output(15, False)
########################## Movement Definitions END ########################
GPIO.output(18, True) #Status-LED-On
def sigint_handler(signum, frame): #Catching Ctrl+c
GPIO.output(18, False) #Status-LED-Off
pygame.quit()
sys.exit(0)
signal.signal(signal.SIGINT, sigint_handler)
# -------- Main Program Loop -----------
while done==False:
# EVENT PROCESSING STEP
for event in pygame.event.get(): # User did something
if event.type == pygame.QUIT: # If user clicked close
GPIO.output(18, False) #Status-LED-Off
done=True # Flag that we are done so we exit this loop
# Possible joystick actions: JOYAXISMOTION JOYBALLMOTION JOYBUTTONDOWN JOYBUTTONUP JOYHATMOTION
if event.type == pygame.JOYBUTTONDOWN:
print ("Joystick button pressed.")
if event.type == pygame.JOYBUTTONUP:
print ("Joystick button released.")
joystick_count = pygame.joystick.get_count() #Get Joystick Count
if joystick_count == 0:
GPIO.output(18, False) #Status-LED-Off
# For each joystick:
for i in range(joystick_count):
joystick = pygame.joystick.Joystick(i)
joystick.init()
#Sensor Input
fl = GPIO.input(7)
fc = GPIO.input(8)
fr = GPIO.input(10)
bl = GPIO.input(23)
bc = GPIO.input(24)
br = GPIO.input(26)
#Start Writing Yout Code From Here
if ((fc == 1 or fr == 1 or fl == 1) and (bc == 1 or br == 1 or bl == 1)):
if (joystick.get_axis(0) < -0.5): #Left
left()
elif (joystick.get_axis(0) > 0.5): #Right
right()
else:
nutral()
else:
if (fc == 1): #forward_sensor blocked
if (joystick.get_axis(1) > 0.5 and joystick.get_axis(0) < -0.5): #Backward_Left
backward_left()
elif (joystick.get_axis(1) > 0.5 and joystick.get_axis(0) > 0.5): #Backward_Right
backward_right()
elif (joystick.get_axis(1) < -0.5): #Forward
nutral()
elif (joystick.get_axis(1) > 0.5): #backward
backward()
elif (joystick.get_axis(0) < -0.5): #Left
left()
elif (joystick.get_axis(0) > 0.5): #Right
right()
else:
nutral()
elif (bc == 1):
if (joystick.get_axis(1) < -0.5 and joystick.get_axis(0) < -0.5): #Forward_Left
forward_left()
elif (joystick.get_axis(1) < -0.5 and joystick.get_axis(0) > 0.5): #Forward_Right
forward_right()
elif (joystick.get_axis(1) < -0.5): #Forward
forward()
elif (joystick.get_axis(1) > 0.5): #backward
nutral()
elif (joystick.get_axis(0) < -0.5): #Left
left()
elif (joystick.get_axis(0) > 0.5): #Right
right()
else:
nutral()
elif (fl == 1): #forward_left sensor blocked
if (joystick.get_axis(1) < -0.5 and joystick.get_axis(0) < -0.5): #Forward_Left
nutral()
elif (joystick.get_axis(1) < -0.5 and joystick.get_axis(0) > 0.5): #Forward_Right
forward_right()
elif (joystick.get_axis(1) > 0.5 and joystick.get_axis(0) < -0.5): #Backward_Left
backward_left()
elif (joystick.get_axis(1) > 0.5 and joystick.get_axis(0) > 0.5): #Backward_Right
backward_right()
elif (joystick.get_axis(1) < -0.5): #Forward
nutral()
elif (joystick.get_axis(1) > 0.5): #backward
backward()
elif (joystick.get_axis(0) < -0.5): #Left
left()
elif (joystick.get_axis(0) > 0.5): #Right
right()
else:
nutral()
elif (fr == 1): #forward_right sensor blocked
if (joystick.get_axis(1) < -0.5 and joystick.get_axis(0) > 0.5): #Forward_Right
nutral()
elif (joystick.get_axis(1) < -0.5 and joystick.get_axis(0) < -0.5): #Forward_Left
forward_left()
elif (joystick.get_axis(1) > 0.5 and joystick.get_axis(0) < -0.5): #Backward_Left
backward_left()
elif (joystick.get_axis(1) > 0.5 and joystick.get_axis(0) > 0.5): #Backward_Right
backward_right()
elif (joystick.get_axis(1) < -0.5): #Forward
nutral()
elif (joystick.get_axis(1) > 0.5): #backward
backward()
elif (joystick.get_axis(0) < -0.5): #Left
left()
elif (joystick.get_axis(0) > 0.5): #Right
right()
else:
nutral()
elif (bl == 1):
if (joystick.get_axis(1) > 0.5 and joystick.get_axis(0) < -0.5): #Backward_Left
nutral()
elif (joystick.get_axis(1) < -0.5 and joystick.get_axis(0) < -0.5): #Forward_Left
forward_left()
elif (joystick.get_axis(1) < -0.5 and joystick.get_axis(0) > 0.5): #Forward_Right
forward_right()
elif (joystick.get_axis(1) > 0.5 and joystick.get_axis(0) > 0.5): #Backward_Right
backward_right()
elif (joystick.get_axis(1) < -0.5): #Forward
forward()
elif (joystick.get_axis(0) < -0.5): #Left
left()
elif (joystick.get_axis(0) > 0.5): #Right
right()
else:
nutral()
elif (br == 1):
if (joystick.get_axis(1) > 0.5 and joystick.get_axis(0) > 0.5): #Backward_Right
nutral()
elif (joystick.get_axis(1) < -0.5 and joystick.get_axis(0) < -0.5): #Forward_Left
forward_left()
elif (joystick.get_axis(1) < -0.5 and joystick.get_axis(0) > 0.5): #Forward_Right
forward_right()
elif (joystick.get_axis(1) > 0.5 and joystick.get_axis(0) < -0.5): #Backward_Left
backward_left()
elif (joystick.get_axis(1) < -0.5): #Forward
forward()
elif (joystick.get_axis(0) < -0.5): #Left
left()
elif (joystick.get_axis(0) > 0.5): #Right
right()
else:
nutral()
else: # All sensors free and also the controll flow for movement
if (joystick.get_axis(1) < -0.5 and joystick.get_axis(0) < -0.5): #Forward_Left
forward_left()
elif (joystick.get_axis(1) < -0.5 and joystick.get_axis(0) > 0.5): #Forward_Right
forward_right()
elif (joystick.get_axis(1) > 0.5 and joystick.get_axis(0) < -0.5): #Backward_Left
backward_left()
elif (joystick.get_axis(1) > 0.5 and joystick.get_axis(0) > 0.5): #Backward_Right
backward_right()
elif (joystick.get_axis(1) < -0.5): #Forward
forward()
elif (joystick.get_axis(1) > 0.5): #backward
backward()
elif (joystick.get_axis(0) < -0.5): #Left
left()
elif (joystick.get_axis(0) > 0.5): #Right
right()
else:
nutral()
time.sleep(0.01) #refresh rate
# ALL CODE SHOULD GO ABOVE THIS COMMENT
# Use Ctrl+C to quit.
# If you forget this line, the program will 'hang'
# on exit if running from IDLE.
pygame.quit ()
|
XiaosongWei/chromium-crosswalk
|
refs/heads/master
|
third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/process_json_data_unittest.py
|
59
|
# Copyright (C) 2014 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import json
import unittest
from webkitpy.layout_tests.generate_results_dashboard import ProcessJsonData
class ProcessJsonDataTester(unittest.TestCase):
def test_check_failing_results(self):
valid_json_data = {u'tests': {u'test_category': {u'test_sub_category': {u'test_name': {u'test.html': {u'expected': u'PASS', u'actual': u'TEXT', u'is_unexpected': True}}}}}}
valid_json_data_1 = {u'tests': {u'test_category': {u'test_sub_category': {u'test_name': {u'test.html': {u'expected': u'TEXT', u'actual': u'TEXT', u'is_unexpected': True}}}}}}
valid_json_data_2 = {u'tests': {u'test_category': {u'test_sub_category': {u'test_name_2': {u'test_2.html': {u'expected': u'PASS', u'actual': u'TEXT', u'is_unexpected': True}}, u'test_name': {u'test.html': {u'expected': u'TEXT', u'actual': u'TEXT', u'is_unexpected': True}}}}}}
expected_result = {u'tests': {u'test_category': {u'test_sub_category': {u'test_name': {u'test.html': {u'archived_results': [u'TEXT', u'PASS']}}}}}}
process_json_data = ProcessJsonData(valid_json_data, [valid_json_data_1], [valid_json_data_2])
actual_result = process_json_data.generate_archived_result()
self.assertEqual(expected_result, actual_result)
def test_check_full_results(self):
valid_json_data = {u'tests': {u'test_category': {u'test_sub_category': {u'test_name_2': {u'test_2.html': {u'expected': u'PASS', u'actual': u'TEXT', u'is_unexpected': True}}}}}}
valid_json_data_1 = {u'tests': {u'test_category': {u'test_sub_category': {u'test_name': {u'test.html': {u'expected': u'TEXT', u'actual': u'TEXT', u'is_unexpected': True}}}}}}
valid_json_data_2 = {u'tests': {u'test_category': {u'test_sub_category': {u'test_name_2': {u'test_2.html': {u'expected': u'PASS', u'actual': u'TEXT', u'is_unexpected': True}}, u'test_name': {u'test.html': {u'expected': u'TEXT', u'actual': u'TEXT', u'is_unexpected': True}}}}}}
expected_result = {u'tests': {u'test_category': {u'test_sub_category': {u'test_name_2': {u'test_2.html': {u'archived_results': [u'TEXT', u'TEXT']}}}}}}
process_json_data = ProcessJsonData(valid_json_data, [valid_json_data_1], [valid_json_data_2])
actual_result = process_json_data.generate_archived_result()
self.assertEqual(expected_result, actual_result)
def test_null_check(self):
valid_json_data = {u'tests': {u'test_category': {u'test_sub_category': {u'test_name': {u'test.html': {u'expected': u'PASS', u'actual': u'TEXT', u'is_unexpected': True}}}}}}
expected_result = {u'tests': {u'test_category': {u'test_sub_category': {u'test_name': {u'test.html': {u'archived_results': [u'TEXT']}}}}}}
process_json_data = ProcessJsonData(valid_json_data, [], [])
actual_result = process_json_data.generate_archived_result()
self.assertEqual(expected_result, actual_result)
|
apache/bigtop
|
refs/heads/master
|
bigtop-packages/src/charm/hbase/layer-hbase/tests/01-basic-deployment.py
|
10
|
#!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import amulet
import re
import unittest
class TestDeploy(unittest.TestCase):
"""
Trivial deployment test for Apache Bigtop HBase.
"""
@classmethod
def setUpClass(cls):
cls.d = amulet.Deployment(series='xenial')
cls.d.add('hbase')
cls.d.setup(timeout=1800)
cls.d.sentry.wait_for_messages({'hbase': re.compile('ready')}, timeout=1800)
cls.unit = cls.d.sentry['hbase'][0]
if __name__ == '__main__':
unittest.main()
|
KrishMunot/swift
|
refs/heads/master
|
test/Driver/Dependencies/Inputs/fake-build-for-bitcode.py
|
12
|
#!/usr/bin/env python
# fake-build-for-bitcode.py - Fake build with -embed-bitcode -*- python -*-
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See http://swift.org/LICENSE.txt for license information
# See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
# ----------------------------------------------------------------------------
#
# Emulates the frontend of an -embed-bitcode job. That means we have to handle
# -emit-bc and -c actions.
#
# ----------------------------------------------------------------------------
from __future__ import print_function
import os
import sys
assert sys.argv[1] == '-frontend'
primaryFile = sys.argv[sys.argv.index('-primary-file') + 1]
outputFile = sys.argv[sys.argv.index('-o') + 1]
# Update the output file mtime, or create it if necessary.
# From http://stackoverflow.com/a/1160227.
with open(outputFile, 'a'):
os.utime(outputFile, None)
if '-emit-bc' in sys.argv:
print("Handled", os.path.basename(primaryFile))
elif '-c' in sys.argv:
print("Produced", os.path.basename(outputFile))
else:
assert False, "unknown action"
|
lextoumbourou/txstripe
|
refs/heads/master
|
setup.py
|
1
|
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(
name='txstripe',
version='0.1.0',
description='Stripe Twisted bindings',
author='Lex Toumbourou',
author_email='lextoumbourou@gmail.com',
url='https://github.com/lextoumbourou/txstripe',
packages=['stripe', 'txstripe'],
install_requires=['Twisted', 'treq'],
classifiers=[
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Topic :: Software Development :: Libraries :: Python Modules"
]
)
|
bayasist/vbox
|
refs/heads/master
|
src/VBox/Devices/EFI/Firmware/BaseTools/Source/Python/PatchPcdValue/PatchPcdValue.py
|
11
|
## @file
# Patch value into the binary file.
#
# Copyright (c) 2010, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
##
# Import Modules
#
import os
import sys
import re
from optparse import OptionParser
from optparse import make_option
from Common.BuildToolError import *
import Common.EdkLogger as EdkLogger
from Common.BuildVersion import gBUILD_VERSION
import array
# Version and Copyright
__version_number__ = ("0.10" + " " + gBUILD_VERSION)
__version__ = "%prog Version " + __version_number__
__copyright__ = "Copyright (c) 2010, Intel Corporation. All rights reserved."
## PatchBinaryFile method
#
# This method mainly patches the data into binary file.
#
# @param FileName File path of the binary file
# @param ValueOffset Offset value
# @param TypeName DataType Name
# @param Value Value String
# @param MaxSize MaxSize value
#
# @retval 0 File is updated successfully.
# @retval not 0 File is updated failed.
#
def PatchBinaryFile(FileName, ValueOffset, TypeName, ValueString, MaxSize=0):
#
# Length of Binary File
#
FileHandle = open (FileName, 'rb')
FileHandle.seek (0, 2)
FileLength = FileHandle.tell()
FileHandle.close()
#
# Unify string to upper string
#
TypeName = TypeName.upper()
#
# Get PCD value data length
#
ValueLength = 0
if TypeName == 'BOOLEAN':
ValueLength = 1
elif TypeName == 'UINT8':
ValueLength = 1
elif TypeName == 'UINT16':
ValueLength = 2
elif TypeName == 'UINT32':
ValueLength = 4
elif TypeName == 'UINT64':
ValueLength = 8
elif TypeName == 'VOID*':
if MaxSize == 0:
return OPTION_MISSING, "PcdMaxSize is not specified for VOID* type PCD."
ValueLength = MaxSize
else:
return PARAMETER_INVALID, "PCD type %s is not valid." %(CommandOptions.PcdTypeName)
#
# Check PcdValue is in the input binary file.
#
if ValueOffset + ValueLength > FileLength:
return PARAMETER_INVALID, "PcdOffset + PcdMaxSize(DataType) is larger than the input file size."
#
# Read binary file into array
#
FileHandle = open (FileName, 'rb')
ByteArray = array.array('B')
ByteArray.fromfile(FileHandle, FileLength)
FileHandle.close()
OrigByteList = ByteArray.tolist()
ByteList = ByteArray.tolist()
#
# Clear the data in file
#
for Index in range(ValueLength):
ByteList[ValueOffset + Index] = 0
#
# Patch value into offset
#
ValueString = ValueString.upper()
ValueNumber = 0
if TypeName == 'BOOLEAN':
#
# Get PCD value for BOOLEAN data type
#
try:
if ValueString == 'TRUE':
ValueNumber = 1
elif ValueString == 'FALSE':
ValueNumber = 0
elif ValueString.startswith('0X'):
ValueNumber = int (Value, 16)
else:
ValueNumber = int (Value)
if ValueNumber != 0:
ValueNumber = 1
except:
return PARAMETER_INVALID, "PCD Value %s is not valid dec or hex string." %(ValueString)
#
# Set PCD value into binary data
#
ByteList[ValueOffset] = ValueNumber
elif TypeName in ['UINT8', 'UINT16', 'UINT32', 'UINT64']:
#
# Get PCD value for UINT* data type
#
try:
if ValueString.startswith('0X'):
ValueNumber = int (ValueString, 16)
else:
ValueNumber = int (ValueString)
except:
return PARAMETER_INVALID, "PCD Value %s is not valid dec or hex string." %(ValueString)
#
# Set PCD value into binary data
#
for Index in range(ValueLength):
ByteList[ValueOffset + Index] = ValueNumber % 0x100
ValueNumber = ValueNumber / 0x100
elif TypeName == 'VOID*':
if ValueString.startswith("L "):
#
# Patch Unicode String
#
Index = 0
for ByteString in ValueString[2:]:
#
# Reserve zero as unicode tail
#
if Index + 2 >= ValueLength:
break
#
# Set string value one by one
#
ByteList[ValueOffset + Index] = ord(ByteString)
Index = Index + 2
elif ValueString.startswith("{") and ValueString.endswith("}"):
#
# Patch {0x1, 0x2, ...} byte by byte
#
ValueList = ValueString[1 : len(ValueString) - 1].split(', ')
Index = 0
try:
for ByteString in ValueList:
if ByteString.upper().startswith('0X'):
ByteValue = int(ByteString, 16)
else:
ByteValue = int(ByteString)
ByteList[ValueOffset + Index] = ByteValue % 0x100
Index = Index + 1
if Index >= ValueLength:
break
except:
return PARAMETER_INVALID, "PCD Value %s is not valid dec or hex string array." %(ValueString)
else:
#
# Patch ascii string
#
Index = 0
for ByteString in ValueString:
#
# Reserve zero as string tail
#
if Index + 1 >= ValueLength:
break
#
# Set string value one by one
#
ByteList[ValueOffset + Index] = ord(ByteString)
Index = Index + 1
#
# Update new data into input file.
#
if ByteList != OrigByteList:
ByteArray = array.array('B')
ByteArray.fromlist(ByteList)
FileHandle = open (FileName, 'wb')
ByteArray.tofile(FileHandle)
FileHandle.close()
return 0, "Patch Value into File %s successfully." %(FileName)
## Parse command line options
#
# Using standard Python module optparse to parse command line option of this tool.
#
# @retval Options A optparse.Values object containing the parsed options
# @retval InputFile Path of file to be trimmed
#
def Options():
OptionList = [
make_option("-f", "--offset", dest="PcdOffset", action="store", type="int",
help="Start offset to the image is used to store PCD value."),
make_option("-u", "--value", dest="PcdValue", action="store",
help="PCD value will be updated into the image."),
make_option("-t", "--type", dest="PcdTypeName", action="store",
help="The name of PCD data type may be one of VOID*,BOOLEAN, UINT8, UINT16, UINT32, UINT64."),
make_option("-s", "--maxsize", dest="PcdMaxSize", action="store", type="int",
help="Max size of data buffer is taken by PCD value.It must be set when PCD type is VOID*."),
make_option("-v", "--verbose", dest="LogLevel", action="store_const", const=EdkLogger.VERBOSE,
help="Run verbosely"),
make_option("-d", "--debug", dest="LogLevel", type="int",
help="Run with debug information"),
make_option("-q", "--quiet", dest="LogLevel", action="store_const", const=EdkLogger.QUIET,
help="Run quietly"),
make_option("-?", action="help", help="show this help message and exit"),
]
# use clearer usage to override default usage message
UsageString = "%prog -f Offset -u Value -t Type [-s MaxSize] <input_file>"
Parser = OptionParser(description=__copyright__, version=__version__, option_list=OptionList, usage=UsageString)
Parser.set_defaults(LogLevel=EdkLogger.INFO)
Options, Args = Parser.parse_args()
# error check
if len(Args) == 0:
EdkLogger.error("PatchPcdValue", PARAMETER_INVALID, ExtraData=Parser.get_usage())
InputFile = Args[len(Args) - 1]
return Options, InputFile
## Entrance method
#
# This method mainly dispatch specific methods per the command line options.
# If no error found, return zero value so the caller of this tool can know
# if it's executed successfully or not.
#
# @retval 0 Tool was successful
# @retval 1 Tool failed
#
def Main():
try:
#
# Check input parameter
#
EdkLogger.Initialize()
CommandOptions, InputFile = Options()
if CommandOptions.LogLevel < EdkLogger.DEBUG_9:
EdkLogger.SetLevel(CommandOptions.LogLevel + 1)
else:
EdkLogger.SetLevel(CommandOptions.LogLevel)
if not os.path.exists (InputFile):
EdkLogger.error("PatchPcdValue", FILE_NOT_FOUND, ExtraData=InputFile)
return 1
if CommandOptions.PcdOffset == None or CommandOptions.PcdValue == None or CommandOptions.PcdTypeName == None:
EdkLogger.error("PatchPcdValue", OPTION_MISSING, ExtraData="PcdOffset or PcdValue of PcdTypeName is not specified.")
return 1
if CommandOptions.PcdTypeName.upper() not in ["BOOLEAN", "UINT8", "UINT16", "UINT32", "UINT64", "VOID*"]:
EdkLogger.error("PatchPcdValue", PARAMETER_INVALID, ExtraData="PCD type %s is not valid." %(CommandOptions.PcdTypeName))
return 1
if CommandOptions.PcdTypeName.upper() == "VOID*" and CommandOptions.PcdMaxSize == None:
EdkLogger.error("PatchPcdValue", OPTION_MISSING, ExtraData="PcdMaxSize is not specified for VOID* type PCD.")
return 1
#
# Patch value into binary image.
#
ReturnValue, ErrorInfo = PatchBinaryFile (InputFile, CommandOptions.PcdOffset, CommandOptions.PcdTypeName, CommandOptions.PcdValue, CommandOptions.PcdMaxSize)
if ReturnValue != 0:
EdkLogger.error("PatchPcdValue", ReturnValue, ExtraData=ErrorInfo)
return 1
return 0
except:
return 1
if __name__ == '__main__':
r = Main()
sys.exit(r)
|
NorfolkDataSci/presentations
|
refs/heads/master
|
2018-01_chatbot/serverless-chatbots-workshop-master/LambdaFunctions/sentiment-analysis/nltk/cluster/__init__.py
|
7
|
# Natural Language Toolkit: Clusterers
#
# Copyright (C) 2001-2016 NLTK Project
# Author: Trevor Cohn <tacohn@cs.mu.oz.au>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
This module contains a number of basic clustering algorithms. Clustering
describes the task of discovering groups of similar items with a large
collection. It is also describe as unsupervised machine learning, as the data
from which it learns is unannotated with class information, as is the case for
supervised learning. Annotated data is difficult and expensive to obtain in
the quantities required for the majority of supervised learning algorithms.
This problem, the knowledge acquisition bottleneck, is common to most natural
language processing tasks, thus fueling the need for quality unsupervised
approaches.
This module contains a k-means clusterer, E-M clusterer and a group average
agglomerative clusterer (GAAC). All these clusterers involve finding good
cluster groupings for a set of vectors in multi-dimensional space.
The K-means clusterer starts with k arbitrary chosen means then allocates each
vector to the cluster with the closest mean. It then recalculates the means of
each cluster as the centroid of the vectors in the cluster. This process
repeats until the cluster memberships stabilise. This is a hill-climbing
algorithm which may converge to a local maximum. Hence the clustering is
often repeated with random initial means and the most commonly occurring
output means are chosen.
The GAAC clusterer starts with each of the *N* vectors as singleton clusters.
It then iteratively merges pairs of clusters which have the closest centroids.
This continues until there is only one cluster. The order of merges gives rise
to a dendrogram - a tree with the earlier merges lower than later merges. The
membership of a given number of clusters *c*, *1 <= c <= N*, can be found by
cutting the dendrogram at depth *c*.
The Gaussian EM clusterer models the vectors as being produced by a mixture
of k Gaussian sources. The parameters of these sources (prior probability,
mean and covariance matrix) are then found to maximise the likelihood of the
given data. This is done with the expectation maximisation algorithm. It
starts with k arbitrarily chosen means, priors and covariance matrices. It
then calculates the membership probabilities for each vector in each of the
clusters - this is the 'E' step. The cluster parameters are then updated in
the 'M' step using the maximum likelihood estimate from the cluster membership
probabilities. This process continues until the likelihood of the data does
not significantly increase.
They all extend the ClusterI interface which defines common operations
available with each clusterer. These operations include.
- cluster: clusters a sequence of vectors
- classify: assign a vector to a cluster
- classification_probdist: give the probability distribution over cluster memberships
The current existing classifiers also extend cluster.VectorSpace, an
abstract class which allows for singular value decomposition (SVD) and vector
normalisation. SVD is used to reduce the dimensionality of the vector space in
such a manner as to preserve as much of the variation as possible, by
reparameterising the axes in order of variability and discarding all bar the
first d dimensions. Normalisation ensures that vectors fall in the unit
hypersphere.
Usage example (see also demo())::
from nltk import cluster
from nltk.cluster import euclidean_distance
from numpy import array
vectors = [array(f) for f in [[3, 3], [1, 2], [4, 2], [4, 0]]]
# initialise the clusterer (will also assign the vectors to clusters)
clusterer = cluster.KMeansClusterer(2, euclidean_distance)
clusterer.cluster(vectors, True)
# classify a new vector
print(clusterer.classify(array([3, 3])))
Note that the vectors must use numpy array-like
objects. nltk_contrib.unimelb.tacohn.SparseArrays may be used for
efficiency when required.
"""
from nltk.cluster.util import (VectorSpaceClusterer, Dendrogram,
euclidean_distance, cosine_distance)
from nltk.cluster.kmeans import KMeansClusterer
from nltk.cluster.gaac import GAAClusterer
from nltk.cluster.em import EMClusterer
|
pjdelport/django
|
refs/heads/master
|
django/contrib/gis/db/models/proxy.py
|
220
|
"""
The GeometryProxy object, allows for lazy-geometries. The proxy uses
Python descriptors for instantiating and setting Geometry objects
corresponding to geographic model fields.
Thanks to Robert Coup for providing this functionality (see #4322).
"""
from django.contrib.gis import memoryview
from django.utils import six
class GeometryProxy(object):
def __init__(self, klass, field):
"""
Proxy initializes on the given Geometry class (not an instance) and
the GeometryField.
"""
self._field = field
self._klass = klass
def __get__(self, obj, type=None):
"""
This accessor retrieves the geometry, initializing it using the geometry
class specified during initialization and the HEXEWKB value of the field.
Currently, only GEOS or OGR geometries are supported.
"""
if obj is None:
# Accessed on a class, not an instance
return self
# Getting the value of the field.
geom_value = obj.__dict__[self._field.attname]
if isinstance(geom_value, self._klass):
geom = geom_value
elif (geom_value is None) or (geom_value==''):
geom = None
else:
# Otherwise, a Geometry object is built using the field's contents,
# and the model's corresponding attribute is set.
geom = self._klass(geom_value)
setattr(obj, self._field.attname, geom)
return geom
def __set__(self, obj, value):
"""
This accessor sets the proxied geometry with the geometry class
specified during initialization. Values of None, HEXEWKB, or WKT may
be used to set the geometry as well.
"""
# The OGC Geometry type of the field.
gtype = self._field.geom_type
# The geometry type must match that of the field -- unless the
# general GeometryField is used.
if isinstance(value, self._klass) and (str(value.geom_type).upper() == gtype or gtype == 'GEOMETRY'):
# Assigning the SRID to the geometry.
if value.srid is None: value.srid = self._field.srid
elif value is None or isinstance(value, six.string_types + (memoryview,)):
# Set with None, WKT, HEX, or WKB
pass
else:
raise TypeError('cannot set %s GeometryProxy with value of type: %s' % (obj.__class__.__name__, type(value)))
# Setting the objects dictionary with the value, and returning.
obj.__dict__[self._field.attname] = value
return value
|
GedheFoundation/sidesa2.0
|
refs/heads/master
|
laporan_profil.py
|
1
|
#Boa:Frame:laporan_profil_desa
import wx
import sqlite3
def connect():
db = sqlite3.connect('sidesa')
return db
db.close()
def create(parent):
return laporan_profil_desa(parent)
[wxID_LAPORAN_PROFIL_DESA, wxID_LAPORAN_PROFIL_DESAINPUT_ALAMAT,
wxID_LAPORAN_PROFIL_DESAINPUT_DESA, wxID_LAPORAN_PROFIL_DESAINPUT_DUKUH_1,
wxID_LAPORAN_PROFIL_DESAINPUT_DUKUH_2, wxID_LAPORAN_PROFIL_DESAINPUT_DUKUH_3,
wxID_LAPORAN_PROFIL_DESAINPUT_DUKUH_4, wxID_LAPORAN_PROFIL_DESAINPUT_DUKUH_5,
wxID_LAPORAN_PROFIL_DESAINPUT_DUKUH_6,
wxID_LAPORAN_PROFIL_DESAINPUT_KABUPATEN, wxID_LAPORAN_PROFIL_DESAINPUT_KADES,
wxID_LAPORAN_PROFIL_DESAINPUT_KECAMATAN, wxID_LAPORAN_PROFIL_DESAINPUT_KODE,
wxID_LAPORAN_PROFIL_DESAINPUT_PROPINSI, wxID_LAPORAN_PROFIL_DESAINPUT_SEKDES,
wxID_LAPORAN_PROFIL_DESAINPUT_WEB, wxID_LAPORAN_PROFIL_DESALABEL_ALAMAT,
wxID_LAPORAN_PROFIL_DESALABEL_DUKUH, wxID_LAPORAN_PROFIL_DESALABEL_KADES,
wxID_LAPORAN_PROFIL_DESALABEL_KECAMATAN,
wxID_LAPORAN_PROFIL_DESALABEL_KODE_DESA,
wxID_LAPORAN_PROFIL_DESALABEL_NAMA_DESA,
wxID_LAPORAN_PROFIL_DESALABEL_PROPINSI, wxID_LAPORAN_PROFIL_DESALABEL_SEKDES,
wxID_LAPORAN_PROFIL_DESALABEL_WEB, wxID_LAPORAN_PROFIL_DESALABLE_KABUPATEN,
wxID_LAPORAN_PROFIL_DESALOGO, wxID_LAPORAN_PROFIL_DESASTATICTEXT1,
wxID_LAPORAN_PROFIL_DESASTATICTEXT3, wxID_LAPORAN_PROFIL_DESASTATICTEXT4,
wxID_LAPORAN_PROFIL_DESASTATICTEXT5, wxID_LAPORAN_PROFIL_DESASTATICTEXT6,
wxID_LAPORAN_PROFIL_DESASTATICTEXT7,
wxID_LAPORAN_PROFIL_DESATOMBOL_CETAK_LAPORAN,
wxID_LAPORAN_PROFIL_DESATOMBOL_KEMBALI,
] = [wx.NewId() for _init_ctrls in range(35)]
class laporan_profil_desa(wx.Frame):
def _init_ctrls(self, prnt):
# generated method, don't edit
wx.Frame.__init__(self, id=wxID_LAPORAN_PROFIL_DESA,
name=u'laporan_profil_desa', parent=prnt, pos=wx.Point(324, 272),
size=wx.Size(911, 325), style=wx.DEFAULT_FRAME_STYLE,
title=u'Laporan Profil Desa')
self.SetClientSize(wx.Size(911, 325))
self.Center(wx.BOTH)
self.logo = wx.StaticBitmap(bitmap=wx.NullBitmap,
id=wxID_LAPORAN_PROFIL_DESALOGO, name=u'logo', parent=self,
pos=wx.Point(16, 16), size=wx.Size(152, 144), style=0)
self.label_nama_desa = wx.StaticText(id=wxID_LAPORAN_PROFIL_DESALABEL_NAMA_DESA,
label=u'Nama Desa', name=u'label_nama_desa', parent=self,
pos=wx.Point(192, 16), size=wx.Size(77, 17), style=0)
self.label_kecamatan = wx.StaticText(id=wxID_LAPORAN_PROFIL_DESALABEL_KECAMATAN,
label=u'Kecamatan', name=u'label_kecamatan', parent=self,
pos=wx.Point(192, 48), size=wx.Size(73, 17), style=0)
self.lable_kabupaten = wx.StaticText(id=wxID_LAPORAN_PROFIL_DESALABLE_KABUPATEN,
label=u'Kabupaten', name=u'lable_kabupaten', parent=self,
pos=wx.Point(192, 80), size=wx.Size(70, 17), style=0)
self.label_propinsi = wx.StaticText(id=wxID_LAPORAN_PROFIL_DESALABEL_PROPINSI,
label=u'Propinsi', name=u'label_propinsi', parent=self,
pos=wx.Point(192, 112), size=wx.Size(51, 17), style=0)
self.input_desa = wx.TextCtrl(id=wxID_LAPORAN_PROFIL_DESAINPUT_DESA,
name=u'input_desa', parent=self, pos=wx.Point(280, 16),
size=wx.Size(240, 24), style=0, value='')
self.input_kabupaten = wx.TextCtrl(id=wxID_LAPORAN_PROFIL_DESAINPUT_KABUPATEN,
name=u'input_kabupaten', parent=self, pos=wx.Point(280, 80),
size=wx.Size(240, 24), style=0, value='')
self.input_propinsi = wx.TextCtrl(id=wxID_LAPORAN_PROFIL_DESAINPUT_PROPINSI,
name=u'input_propinsi', parent=self, pos=wx.Point(280, 112),
size=wx.Size(240, 24), style=0, value='')
self.input_web = wx.TextCtrl(id=wxID_LAPORAN_PROFIL_DESAINPUT_WEB,
name=u'input_web', parent=self, pos=wx.Point(656, 16),
size=wx.Size(240, 24), style=0, value='')
self.input_kode = wx.TextCtrl(id=wxID_LAPORAN_PROFIL_DESAINPUT_KODE,
name=u'input_kode', parent=self, pos=wx.Point(656, 48),
size=wx.Size(240, 24), style=0, value='')
self.input_kades = wx.TextCtrl(id=wxID_LAPORAN_PROFIL_DESAINPUT_KADES,
name=u'input_kades', parent=self, pos=wx.Point(656, 80),
size=wx.Size(240, 24), style=0, value='')
self.input_sekdes = wx.TextCtrl(id=wxID_LAPORAN_PROFIL_DESAINPUT_SEKDES,
name=u'input_sekdes', parent=self, pos=wx.Point(656, 112),
size=wx.Size(240, 24), style=0, value='')
self.label_web = wx.StaticText(id=wxID_LAPORAN_PROFIL_DESALABEL_WEB,
label=u'Alamat Web', name=u'label_web', parent=self,
pos=wx.Point(544, 16), size=wx.Size(104, 17), style=0)
self.label_kode_desa = wx.StaticText(id=wxID_LAPORAN_PROFIL_DESALABEL_KODE_DESA,
label=u'No Kode Desa', name=u'label_kode_desa', parent=self,
pos=wx.Point(544, 48), size=wx.Size(91, 17), style=0)
self.label_kades = wx.StaticText(id=wxID_LAPORAN_PROFIL_DESALABEL_KADES,
label=u'Nama KADES', name=u'label_kades', parent=self,
pos=wx.Point(544, 80), size=wx.Size(88, 17), style=0)
self.label_sekdes = wx.StaticText(id=wxID_LAPORAN_PROFIL_DESALABEL_SEKDES,
label=u'Nama SEKDES', name=u'label_sekdes', parent=self,
pos=wx.Point(544, 112), size=wx.Size(96, 17), style=0)
self.label_alamat = wx.StaticText(id=wxID_LAPORAN_PROFIL_DESALABEL_ALAMAT,
label=u'Alamat', name=u'label_alamat', parent=self,
pos=wx.Point(192, 144), size=wx.Size(47, 17), style=0)
self.label_dukuh = wx.StaticText(id=wxID_LAPORAN_PROFIL_DESALABEL_DUKUH,
label=u'Daftar Nama Dusun / Dukuh', name=u'label_dukuh',
parent=self, pos=wx.Point(24, 184), size=wx.Size(576, 17),
style=0)
self.staticText1 = wx.StaticText(id=wxID_LAPORAN_PROFIL_DESASTATICTEXT1,
label=u'1.', name='staticText1', parent=self, pos=wx.Point(16,
208), size=wx.Size(13, 17), style=0)
self.input_dukuh_1 = wx.TextCtrl(id=wxID_LAPORAN_PROFIL_DESAINPUT_DUKUH_1,
name=u'input_dukuh_1', parent=self, pos=wx.Point(40, 208),
size=wx.Size(256, 24), style=0, value='')
self.input_dukuh_2 = wx.TextCtrl(id=wxID_LAPORAN_PROFIL_DESAINPUT_DUKUH_2,
name=u'input_dukuh_2', parent=self, pos=wx.Point(40, 240),
size=wx.Size(256, 24), style=0, value='')
self.input_dukuh_3 = wx.TextCtrl(id=wxID_LAPORAN_PROFIL_DESAINPUT_DUKUH_3,
name=u'input_dukuh_3', parent=self, pos=wx.Point(336, 208),
size=wx.Size(256, 24), style=0, value='')
self.input_dukuh_4 = wx.TextCtrl(id=wxID_LAPORAN_PROFIL_DESAINPUT_DUKUH_4,
name=u'input_dukuh_4', parent=self, pos=wx.Point(336, 240),
size=wx.Size(256, 24), style=0, value='')
self.input_dukuh_5 = wx.TextCtrl(id=wxID_LAPORAN_PROFIL_DESAINPUT_DUKUH_5,
name=u'input_dukuh_5', parent=self, pos=wx.Point(640, 208),
size=wx.Size(256, 24), style=0, value='')
self.input_dukuh_6 = wx.TextCtrl(id=wxID_LAPORAN_PROFIL_DESAINPUT_DUKUH_6,
name=u'input_dukuh_6', parent=self, pos=wx.Point(640, 240),
size=wx.Size(256, 24), style=0, value='')
self.input_alamat = wx.TextCtrl(id=wxID_LAPORAN_PROFIL_DESAINPUT_ALAMAT,
name=u'input_alamat', parent=self, pos=wx.Point(280, 144),
size=wx.Size(616, 24), style=0, value='')
self.staticText3 = wx.StaticText(id=wxID_LAPORAN_PROFIL_DESASTATICTEXT3,
label=u'2.', name='staticText3', parent=self, pos=wx.Point(16,
240), size=wx.Size(13, 17), style=0)
self.staticText4 = wx.StaticText(id=wxID_LAPORAN_PROFIL_DESASTATICTEXT4,
label=u'3.', name='staticText4', parent=self, pos=wx.Point(312,
208), size=wx.Size(13, 17), style=0)
self.staticText5 = wx.StaticText(id=wxID_LAPORAN_PROFIL_DESASTATICTEXT5,
label=u'4.', name='staticText5', parent=self, pos=wx.Point(312,
240), size=wx.Size(13, 17), style=0)
self.staticText6 = wx.StaticText(id=wxID_LAPORAN_PROFIL_DESASTATICTEXT6,
label=u'5.', name='staticText6', parent=self, pos=wx.Point(616,
208), size=wx.Size(13, 17), style=0)
self.staticText7 = wx.StaticText(id=wxID_LAPORAN_PROFIL_DESASTATICTEXT7,
label=u'6.', name='staticText7', parent=self, pos=wx.Point(616,
240), size=wx.Size(13, 17), style=0)
self.input_kecamatan = wx.TextCtrl(id=wxID_LAPORAN_PROFIL_DESAINPUT_KECAMATAN,
name=u'input_kecamatan', parent=self, pos=wx.Point(280, 48),
size=wx.Size(240, 24), style=0, value='')
self.tombol_kembali = wx.Button(id=wxID_LAPORAN_PROFIL_DESATOMBOL_KEMBALI,
label=u'Kembali Ke Menu', name=u'tombol_kembali', parent=self,
pos=wx.Point(480, 280), size=wx.Size(184, 30), style=0)
self.tombol_kembali.Bind(wx.EVT_BUTTON, self.OnTombol_kembaliButton,
id=wxID_LAPORAN_PROFIL_DESATOMBOL_KEMBALI)
self.tombol_cetak_laporan = wx.Button(id=wxID_LAPORAN_PROFIL_DESATOMBOL_CETAK_LAPORAN,
label=u'Cetak Laporan', name=u'tombol_cetak_laporan', parent=self,
pos=wx.Point(248, 280), size=wx.Size(216, 30), style=0)
self.tombol_cetak_laporan.Bind(wx.EVT_BUTTON,
self.OnTombol_cetak_laporanButton,
id=wxID_LAPORAN_PROFIL_DESATOMBOL_CETAK_LAPORAN)
def __init__(self, parent):
self._init_ctrls(parent)
def OnTombol_kembaliButton(self, event):
self.Close()
def OnTombol_cetak_laporanButton(self, event):
event.Skip()
|
hustodemon/spacewalk
|
refs/heads/master
|
backend/server/action/kickstart_guest.py
|
2
|
#
# Copyright (c) 2008--2013 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
import sys
from spacewalk.common.rhnLog import log_debug
from spacewalk.server import rhnSQL
from spacewalk.server.rhnLib import InvalidAction, ShadowAction
from spacewalk.server.action.utils import SubscribedChannel, \
ChannelPackage, \
PackageInstallScheduler, \
NoActionInfo, \
PackageNotFound
from spacewalk.server.rhnChannel import subscribe_to_tools_channel
__rhnexport__ = ['initiate', 'schedule_virt_guest_pkg_install', 'add_tools_channel']
_query_initiate_guest = rhnSQL.Statement("""
select ksd.label as profile_name, akg.kickstart_host, kvt.label as virt_type,
akg.mem_kb, akg.vcpus, akg.disk_path, akg.virt_bridge, akg.cobbler_system_name,
akg.disk_gb, akg.append_string,
akg.guest_name, akg.ks_session_id from rhnActionKickstartGuest akg,
rhnKSData ksd, rhnKickstartSession ksess,
rhnKickstartDefaults ksdef, rhnKickstartVirtualizationType kvt
where akg.action_id = :action_id
and ksess.kickstart_id = ksd.id
and ksess.id = akg.ks_session_id
and ksdef.kickstart_id = ksd.id
and ksdef.virtualization_type = kvt.id
""")
def schedule_virt_guest_pkg_install(server_id, action_id, dry_run=0):
"""
ShadowAction that schedules a package installation action for the
rhn-virtualization-guest package.
"""
log_debug(3)
virt_host_package_name = "rhn-virtualization-guest"
tools_channel = SubscribedChannel(server_id, "rhn-tools")
found_tools_channel = tools_channel.is_subscribed_to_channel()
if not found_tools_channel:
raise InvalidAction("System not subscribed to the RHN Tools channel.")
rhn_v12n_package = ChannelPackage(server_id, virt_host_package_name)
if not rhn_v12n_package.exists():
raise InvalidAction("Could not find the rhn-virtualization-guest package.")
try:
install_scheduler = PackageInstallScheduler(server_id, action_id, rhn_v12n_package)
if (not dry_run):
install_scheduler.schedule_package_install()
else:
log_debug(4, "dry run requested")
except NoActionInfo, nai:
raise InvalidAction(str(nai)), None, sys.exc_info()[2]
except PackageNotFound, pnf:
raise InvalidAction(str(pnf)), None, sys.exc_info()[2]
except Exception, e:
raise InvalidAction(str(e)), None, sys.exc_info()[2]
log_debug(3, "Completed scheduling install of rhn-virtualization-guest!")
raise ShadowAction("Scheduled installation of RHN Virtualization Guest packages.")
def initiate(server_id, action_id, dry_run=0):
log_debug(3)
h = rhnSQL.prepare(_query_initiate_guest)
h.execute(action_id=action_id)
row = h.fetchone_dict()
if not row:
raise InvalidAction("Kickstart action without an associated kickstart")
kickstart_host = row['kickstart_host']
virt_type = row['virt_type']
name = row['guest_name']
boot_image = "spacewalk-koan"
append_string = row['append_string']
vcpus = row['vcpus']
disk_gb = row['disk_gb']
mem_kb = row['mem_kb']
ks_session_id = row['ks_session_id']
virt_bridge = row['virt_bridge']
disk_path = row['disk_path']
cobbler_system_name = row['cobbler_system_name']
if not boot_image:
raise InvalidAction("Boot image missing")
return (kickstart_host, cobbler_system_name, virt_type, ks_session_id, name,
mem_kb, vcpus, disk_gb, virt_bridge, disk_path, append_string)
def add_tools_channel(server_id, action_id, dry_run=0):
log_debug(3)
if (not dry_run):
subscribe_to_tools_channel(server_id)
else:
log_debug(4, "dry run requested")
raise ShadowAction("Subscribed guest to tools channel.")
|
mlperf/inference_results_v0.7
|
refs/heads/master
|
closed/CentaurTechnology/code/python-code/python/dataset.py
|
1
|
"""
dataset related classes and methods
"""
# pylint: disable=unused-argument,missing-docstring
import logging
import sys
import time
import cv2
import numpy as np
logging.basicConfig(level=logging.INFO)
log = logging.getLogger("dataset")
class Item():
def __init__(self, label, img, idx):
self.label = label
self.img = img
self.idx = idx
self.start = time.time()
def usleep(sec):
if sys.platform == 'win32':
# on windows time.sleep() doesn't work to well
import ctypes
kernel32 = ctypes.windll.kernel32
timer = kernel32.CreateWaitableTimerA(ctypes.c_void_p(), True, ctypes.c_void_p())
delay = ctypes.c_longlong(int(-1 * (10 * 1000000 * sec)))
kernel32.SetWaitableTimer(timer, ctypes.byref(delay), 0, ctypes.c_void_p(), ctypes.c_void_p(), False)
kernel32.WaitForSingleObject(timer, 0xffffffff)
else:
time.sleep(sec)
class Dataset():
def __init__(self):
self.arrival = None
self.image_list = []
self.label_list = []
self.image_list_inmemory = {}
self.last_loaded = -1
self.data = []
self.np_data = None
self.np_data_idx = {}
def preprocess(self, use_cache=True):
raise NotImplementedError("Dataset:preprocess")
def get_item_count(self):
return len(self.image_list)
def get_list(self):
raise NotImplementedError("Dataset:get_list")
def load_query_samples(self, sample_list):
self.image_list_inmemory = {}
self.np_data_idx = {}
for sample in sample_list:
self.image_list_inmemory[sample], _ = self.get_item(sample)
self.np_data_idx[sample] = len(self.np_data_idx)
self.last_loaded = time.time()
self.np_data = np.array([self.image_list_inmemory[sample] for sample in sample_list])
def unload_query_samples(self, sample_list):
if sample_list:
for sample in sample_list:
if sample in self.image_list_inmemory :
del self.image_list_inmemory[sample]
else:
self.image_list_inmemory = {}
def get_samples(self, id_list):
data = self.np_data[self.np_data_idx[id_list[0]]:self.np_data_idx[id_list[-1]]+1]
return data, self.label_list[id_list]
def get_item_loc(self, id):
raise NotImplementedError("Dataset:get_item_loc")
#
# Post processing
#
class PostProcessCommon:
def __init__(self, offset=0):
self.offset = offset
self.good = 0
self.total = 0
def __call__(self, results, ids, expected=None, result_dict=None):
processed_results = []
n = len(results[0])
for idx in range(0, n):
result = results[0][idx] + self.offset
processed_results.append([result])
if result == expected[idx]:
self.good += 1
self.total += n
return processed_results
def add_results(self, results):
pass
def start(self):
self.good = 0
self.total = 0
def finalize(self, results, ds=False, output_dir=None):
results["good"] = self.good
results["total"] = self.total
class PostProcessCommonNcore:
def __init__(self, offset=0):
self.offset = offset
self.good = 0
self.total = 0
def __call__(self, results, ids, expected=None, result_dict=None):
processed_results = [r[0] + self.offset for r in results]
return processed_results
#processed_results = []
#n = len(results)
#for idx in range(0, n):
# result = results[idx][0] + self.offset
# processed_results.append([result])
# if result == expected[idx]:
# self.good += 1
#self.total += n
#return processed_results
def add_results(self, results):
pass
def start(self):
self.good = 0
self.total = 0
def finalize(self, results, ds=False, output_dir=None):
results["good"] = self.good
results["total"] = self.total
class PostProcessArgMax:
def __init__(self, offset=0):
self.offset = offset
self.good = 0
self.total = 0
def __call__(self, results, ids, expected=None, result_dict=None):
results = np.argmax(results[0], axis=1).tolist()
for idx in range(0, len(results)):
results[idx] += self.offset
return results
#processed_results = []
#results = np.argmax(results[0], axis=1)
#n = results.shape[0]
#for idx in range(0, n):
# result = results[idx] + self.offset
# processed_results.append([result])
# if result == expected[idx]:
# self.good += 1
#self.total += n
##print("self.good = %d / %d" % (self.good, self.total))
#return processed_results
def add_results(self, results):
pass
def start(self):
self.good = 0
self.total = 0
def finalize(self, results, ds=False, output_dir=None):
results["good"] = self.good
results["total"] = self.total
class PostProcessArgMaxNcore:
def __init__(self, offset=0):
self.offset = offset
self.good = 0
self.total = 0
def __call__(self, results, ids, expected=None, result_dict=None):
results = np.argmax(results, axis=1).tolist()
for idx in range(0, len(results)):
results[idx] += self.offset
return results
#processed_results = []
#results = np.argmax(results, axis=1)
#n = results.shape[0]
#for idx in range(0, n):
# result = results[idx] + self.offset
# processed_results.append([result])
# if result == expected[idx]:
# self.good += 1
#self.total += n
##print("self.good = %d / %d" % (self.good, self.total))
#return processed_results
def add_results(self, results):
pass
def start(self):
self.good = 0
self.total = 0
def finalize(self, results, ds=False, output_dir=None):
results["good"] = self.good
results["total"] = self.total
#
# pre-processing
#
def center_crop(img, out_height, out_width):
height, width, _ = img.shape
left = int((width - out_width) / 2)
right = int((width + out_width) / 2)
top = int((height - out_height) / 2)
bottom = int((height + out_height) / 2)
img = img[top:bottom, left:right]
return img
def resize_with_aspectratio(img, out_height, out_width, scale=87.5, inter_pol=cv2.INTER_LINEAR):
height, width, _ = img.shape
new_height = int(100. * out_height / scale)
new_width = int(100. * out_width / scale)
if height > width:
w = new_width
h = int(new_height * height / width)
else:
h = new_height
w = int(new_width * width / height)
img = cv2.resize(img, (w, h), interpolation=inter_pol)
return img
def pre_process_vgg(img, dims=None, need_transpose=False):
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
output_height, output_width, _ = dims
cv2_interpol = cv2.INTER_AREA
img = resize_with_aspectratio(img, output_height, output_width, inter_pol=cv2_interpol)
img = center_crop(img, output_height, output_width)
img = np.asarray(img, dtype='float32')
# normalize image
means = np.array([123.68, 116.78, 103.94], dtype=np.float32)
img -= means
# transpose if needed
if need_transpose:
img = img.transpose([2, 0, 1])
return img
def pre_process_mobilenet(img, dims=None, need_transpose=False):
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
output_height, output_width, _ = dims
img = resize_with_aspectratio(img, output_height, output_width, inter_pol=cv2.INTER_LINEAR)
img = center_crop(img, output_height, output_width)
img = np.asarray(img, dtype='float32')
img /= 255.0
img -= 0.5
img *= 2
# transpose if needed
if need_transpose:
img = img.transpose([2, 0, 1])
return img
def pre_process_mobilenet_uint8(img, dims=None, need_transpose=False):
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
output_height, output_width, _ = dims
img = resize_with_aspectratio(img, output_height, output_width, inter_pol=cv2.INTER_LINEAR)
img = center_crop(img, output_height, output_width)
img = np.asarray(img, dtype='uint8')
#img /= 255.0
#img -= 0.5
#img *= 2
# transpose if needed
if need_transpose:
img = img.transpose([2, 0, 1])
return img
def maybe_resize(img, dims):
img = np.array(img, dtype=np.float32)
if len(img.shape) < 3 or img.shape[2] != 3:
# some images might be grayscale
img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
if dims != None:
im_height, im_width, _ = dims
img = cv2.resize(img, (im_width, im_height), interpolation=cv2.INTER_LINEAR)
return img
def pre_process_coco_mobilenet(img, dims=None, need_transpose=False):
img = maybe_resize(img, dims)
img = np.asarray(img, dtype=np.uint8)
# transpose if needed
if need_transpose:
img = img.transpose([2, 0, 1])
return img
def pre_process_coco_pt_mobilenet(img, dims=None, need_transpose=False):
img = maybe_resize(img, dims)
img -= 127.5
img /= 127.5
# transpose if needed
if need_transpose:
img = img.transpose([2, 0, 1])
return img
def pre_process_coco_resnet34(img, dims=None, need_transpose=False):
img = maybe_resize(img, dims)
mean = np.array([0.485, 0.456, 0.406], dtype=np.float32)
std = np.array([0.229, 0.224, 0.225], dtype=np.float32)
img = img / 255. - mean
img = img / std
if need_transpose:
img = img.transpose([2, 0, 1])
return img
def pre_process_coco_resnet34_tf(img, dims=None, need_transpose=False):
img = maybe_resize(img, dims)
mean = np.array([123.68, 116.78, 103.94], dtype=np.float32)
img = img - mean
if need_transpose:
img = img.transpose([2, 0, 1])
return img
|
prutseltje/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/webfaction/webfaction_db.py
|
51
|
#!/usr/bin/python
#
# (c) Quentin Stafford-Fraser 2015, with contributions gratefully acknowledged from:
# * Andy Baker
# * Federico Tarantini
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
# Create a webfaction database using Ansible and the Webfaction API
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: webfaction_db
short_description: Add or remove a database on Webfaction
description:
- Add or remove a database on a Webfaction host. Further documentation at https://github.com/quentinsf/ansible-webfaction.
author: Quentin Stafford-Fraser (@quentinsf)
version_added: "2.0"
notes:
- >
You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API.
The location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as
your host, you may want to add C(serial: 1) to the plays.
- See `the webfaction API <https://docs.webfaction.com/xmlrpc-api/>`_ for more info.
options:
name:
description:
- The name of the database
required: true
state:
description:
- Whether the database should exist
choices: ['present', 'absent']
default: "present"
type:
description:
- The type of database to create.
required: true
choices: ['mysql', 'postgresql']
password:
description:
- The password for the new database user.
login_name:
description:
- The webfaction account to use
required: true
login_password:
description:
- The webfaction password to use
required: true
machine:
description:
- The machine name to use (optional for accounts with only one machine)
'''
EXAMPLES = '''
# This will also create a default DB user with the same
# name as the database, and the specified password.
- name: Create a database
webfaction_db:
name: "{{webfaction_user}}_db1"
password: mytestsql
type: mysql
login_name: "{{webfaction_user}}"
login_password: "{{webfaction_passwd}}"
machine: "{{webfaction_machine}}"
# Note that, for symmetry's sake, deleting a database using
# 'state: absent' will also delete the matching user.
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six.moves import xmlrpc_client
webfaction = xmlrpc_client.ServerProxy('https://api.webfaction.com/')
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True),
state=dict(required=False, choices=['present', 'absent'], default='present'),
# You can specify an IP address or hostname.
type=dict(required=True, choices=['mysql', 'postgresql']),
password=dict(required=False, default=None, no_log=True),
login_name=dict(required=True),
login_password=dict(required=True, no_log=True),
machine=dict(required=False, default=None),
),
supports_check_mode=True
)
db_name = module.params['name']
db_state = module.params['state']
db_type = module.params['type']
db_passwd = module.params['password']
if module.params['machine']:
session_id, account = webfaction.login(
module.params['login_name'],
module.params['login_password'],
module.params['machine']
)
else:
session_id, account = webfaction.login(
module.params['login_name'],
module.params['login_password']
)
db_list = webfaction.list_dbs(session_id)
db_map = dict([(i['name'], i) for i in db_list])
existing_db = db_map.get(db_name)
user_list = webfaction.list_db_users(session_id)
user_map = dict([(i['username'], i) for i in user_list])
existing_user = user_map.get(db_name)
result = {}
# Here's where the real stuff happens
if db_state == 'present':
# Does a database with this name already exist?
if existing_db:
# Yes, but of a different type - fail
if existing_db['db_type'] != db_type:
module.fail_json(msg="Database already exists but is a different type. Please fix by hand.")
# If it exists with the right type, we don't change anything.
module.exit_json(
changed=False,
)
if not module.check_mode:
# If this isn't a dry run, create the db
# and default user.
result.update(
webfaction.create_db(
session_id, db_name, db_type, db_passwd
)
)
elif db_state == 'absent':
# If this isn't a dry run...
if not module.check_mode:
if not (existing_db or existing_user):
module.exit_json(changed=False,)
if existing_db:
# Delete the db if it exists
result.update(
webfaction.delete_db(session_id, db_name, db_type)
)
if existing_user:
# Delete the default db user if it exists
result.update(
webfaction.delete_db_user(session_id, db_name, db_type)
)
else:
module.fail_json(msg="Unknown state specified: {}".format(db_state))
module.exit_json(
changed=True,
result=result
)
if __name__ == '__main__':
main()
|
yuyangit/tornado
|
refs/heads/master
|
demos/chat/chatdemo.py
|
14
|
#!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import tornado.auth
import tornado.escape
import tornado.ioloop
import tornado.web
import os.path
import uuid
from tornado.concurrent import Future
from tornado import gen
from tornado.options import define, options, parse_command_line
define("port", default=8888, help="run on the given port", type=int)
define("debug", default=False, help="run in debug mode")
class MessageBuffer(object):
def __init__(self):
self.waiters = set()
self.cache = []
self.cache_size = 200
def wait_for_messages(self, cursor=None):
# Construct a Future to return to our caller. This allows
# wait_for_messages to be yielded from a coroutine even though
# it is not a coroutine itself. We will set the result of the
# Future when results are available.
result_future = Future()
if cursor:
new_count = 0
for msg in reversed(self.cache):
if msg["id"] == cursor:
break
new_count += 1
if new_count:
result_future.set_result(self.cache[-new_count:])
return result_future
self.waiters.add(result_future)
return result_future
def cancel_wait(self, future):
self.waiters.remove(future)
# Set an empty result to unblock any coroutines waiting.
future.set_result([])
def new_messages(self, messages):
logging.info("Sending new message to %r listeners", len(self.waiters))
for future in self.waiters:
future.set_result(messages)
self.waiters = set()
self.cache.extend(messages)
if len(self.cache) > self.cache_size:
self.cache = self.cache[-self.cache_size:]
# Making this a non-singleton is left as an exercise for the reader.
global_message_buffer = MessageBuffer()
class BaseHandler(tornado.web.RequestHandler):
def get_current_user(self):
user_json = self.get_secure_cookie("chatdemo_user")
if not user_json: return None
return tornado.escape.json_decode(user_json)
class MainHandler(BaseHandler):
@tornado.web.authenticated
def get(self):
self.render("index.html", messages=global_message_buffer.cache)
class MessageNewHandler(BaseHandler):
@tornado.web.authenticated
def post(self):
message = {
"id": str(uuid.uuid4()),
"from": self.current_user["first_name"],
"body": self.get_argument("body"),
}
# to_basestring is necessary for Python 3's json encoder,
# which doesn't accept byte strings.
message["html"] = tornado.escape.to_basestring(
self.render_string("message.html", message=message))
if self.get_argument("next", None):
self.redirect(self.get_argument("next"))
else:
self.write(message)
global_message_buffer.new_messages([message])
class MessageUpdatesHandler(BaseHandler):
@tornado.web.authenticated
@gen.coroutine
def post(self):
cursor = self.get_argument("cursor", None)
# Save the future returned by wait_for_messages so we can cancel
# it in wait_for_messages
self.future = global_message_buffer.wait_for_messages(cursor=cursor)
messages = yield self.future
if self.request.connection.stream.closed():
return
self.write(dict(messages=messages))
def on_connection_close(self):
global_message_buffer.cancel_wait(self.future)
class AuthLoginHandler(BaseHandler, tornado.auth.GoogleMixin):
@gen.coroutine
def get(self):
if self.get_argument("openid.mode", None):
user = yield self.get_authenticated_user()
self.set_secure_cookie("chatdemo_user",
tornado.escape.json_encode(user))
self.redirect("/")
return
self.authenticate_redirect(ax_attrs=["name"])
class AuthLogoutHandler(BaseHandler):
def get(self):
self.clear_cookie("chatdemo_user")
self.write("You are now logged out")
def main():
parse_command_line()
app = tornado.web.Application(
[
(r"/", MainHandler),
(r"/auth/login", AuthLoginHandler),
(r"/auth/logout", AuthLogoutHandler),
(r"/a/message/new", MessageNewHandler),
(r"/a/message/updates", MessageUpdatesHandler),
],
cookie_secret="__TODO:_GENERATE_YOUR_OWN_RANDOM_VALUE_HERE__",
login_url="/auth/login",
template_path=os.path.join(os.path.dirname(__file__), "templates"),
static_path=os.path.join(os.path.dirname(__file__), "static"),
xsrf_cookies=True,
debug=options.debug,
)
app.listen(options.port)
tornado.ioloop.IOLoop.instance().start()
if __name__ == "__main__":
main()
|
marteinn/Skeppa
|
refs/heads/master
|
examples/2-ecr-registry/web/example/views.py
|
3
|
from django.http import HttpResponse
def index(request):
return HttpResponse("Index")
|
n0m4dz/odoo
|
refs/heads/8.0
|
doc/_extensions/odoo/__init__.py
|
119
|
# -*- coding: utf-8 -*-
from . import pygments_override
from . import switcher
import sphinx.environment
import sphinx.builders.html
from docutils import nodes
def setup(app):
if getattr(app.config, 'html_translator_class', None):
app.warn("Overriding the explicitly set html_translator_class setting",
location="odoo extension")
app.config.html_translator_class = 'odoo.translator.BootstrapTranslator'
switcher.setup(app)
app.add_config_value('odoo_cover_default', None, 'env')
app.add_config_value('odoo_cover_external', {}, 'env')
app.add_config_value('odoo_cover_default_external', lambda conf: conf.odoo_cover_default, 'env')
app.connect('html-page-context', update_meta)
def update_meta(app, pagename, templatename, context, doctree):
meta = context.setdefault('meta', {})
meta.setdefault('banner', app.config.odoo_cover_default)
def navbarify(node, navbar=None):
"""
:param node: toctree node to navbarify
:param navbar: Whether this toctree is a 'main' navbar, a 'side' navbar or
not a navbar at all
"""
if navbar == 'main':
# add classes to just toplevel
node['classes'].extend(['nav', 'navbar-nav', 'navbar-right'])
for list_item in node.children:
# bullet_list
# list_item
# compact_paragraph
# reference
# bullet_list
# list_item
# compact_paragraph
# reference
# no bullet_list.list_item -> don't dropdownify
if len(list_item.children) < 2 or not list_item.children[1].children:
continue
list_item['classes'].append('dropdown')
# list_item.compact_paragraph.reference
link = list_item.children[0].children[0]
link['classes'].append('dropdown-toggle')
link.attributes['data-toggle'] = 'dropdown'
# list_item.bullet_list
list_item.children[1]['classes'].append('dropdown-menu')
elif navbar is None:
for n in node.traverse(nodes.reference):
# list_item
# compact_paragraph
# reference <- starting point
# bullet_list
# list_item+
# if the current list item (GP of current node) has bullet list
# children, unref it
list_item = n.parent.parent
# only has a reference -> ignore
if len(list_item.children) < 2:
continue
# no subrefs -> ignore
if not list_item.children[1].children:
continue
# otherwise replace reference node by its own children
para = n.parent
para.remove(n)
para.extend(n.children)
def resolve_content_toctree(
environment, docname, builder, toctree, prune=True, maxdepth=0,
titles_only=False, collapse=False, includehidden=False):
"""Alternative toctree resolution for main content: don't resolve the
toctree, just handle it as a normal node, in the translator
"""
return toctree
class monkey(object):
def __init__(self, obj):
self.obj = obj
def __call__(self, fn):
name = fn.__name__
old = getattr(self.obj, name)
setattr(self.obj, name, lambda self_, *args, **kwargs: \
fn(old, self_, *args, **kwargs))
@monkey(sphinx.environment.BuildEnvironment)
def resolve_toctree(old_resolve, self, docname, *args, **kwargs):
""" If navbar, bootstrapify TOC to yield a navbar
"""
navbar = kwargs.pop('navbar', None)
if docname == self.config.master_doc and not navbar:
return resolve_content_toctree(self, docname, *args, **kwargs)
toc = old_resolve(self, docname, *args, **kwargs)
if toc is None:
return None
navbarify(toc[0], navbar=navbar)
return toc
@monkey(sphinx.builders.html.StandaloneHTMLBuilder)
def render_partial(old_partial, self, node):
if isinstance(node, nodes.bullet_list) and node.children:
# side nav?
# remove single top-level item
# bullet_list/0(list_item)/1(bullet_list)
level1 = node.children[0].children
if len(level1) > 1:
node = level1[1]
node['classes'].extend(['list-group', 'nav', 'text-left'])
for n in node.traverse():
if isinstance(n, nodes.list_item):
n['classes'].append('list-group-item')
elif isinstance(n, nodes.reference):
n['classes'].append('ripple')
else:
node.clear()
return old_partial(self, node)
|
poljeff/odoo
|
refs/heads/8.0
|
doc/_extensions/odoo/__init__.py
|
119
|
# -*- coding: utf-8 -*-
from . import pygments_override
from . import switcher
import sphinx.environment
import sphinx.builders.html
from docutils import nodes
def setup(app):
if getattr(app.config, 'html_translator_class', None):
app.warn("Overriding the explicitly set html_translator_class setting",
location="odoo extension")
app.config.html_translator_class = 'odoo.translator.BootstrapTranslator'
switcher.setup(app)
app.add_config_value('odoo_cover_default', None, 'env')
app.add_config_value('odoo_cover_external', {}, 'env')
app.add_config_value('odoo_cover_default_external', lambda conf: conf.odoo_cover_default, 'env')
app.connect('html-page-context', update_meta)
def update_meta(app, pagename, templatename, context, doctree):
meta = context.setdefault('meta', {})
meta.setdefault('banner', app.config.odoo_cover_default)
def navbarify(node, navbar=None):
"""
:param node: toctree node to navbarify
:param navbar: Whether this toctree is a 'main' navbar, a 'side' navbar or
not a navbar at all
"""
if navbar == 'main':
# add classes to just toplevel
node['classes'].extend(['nav', 'navbar-nav', 'navbar-right'])
for list_item in node.children:
# bullet_list
# list_item
# compact_paragraph
# reference
# bullet_list
# list_item
# compact_paragraph
# reference
# no bullet_list.list_item -> don't dropdownify
if len(list_item.children) < 2 or not list_item.children[1].children:
continue
list_item['classes'].append('dropdown')
# list_item.compact_paragraph.reference
link = list_item.children[0].children[0]
link['classes'].append('dropdown-toggle')
link.attributes['data-toggle'] = 'dropdown'
# list_item.bullet_list
list_item.children[1]['classes'].append('dropdown-menu')
elif navbar is None:
for n in node.traverse(nodes.reference):
# list_item
# compact_paragraph
# reference <- starting point
# bullet_list
# list_item+
# if the current list item (GP of current node) has bullet list
# children, unref it
list_item = n.parent.parent
# only has a reference -> ignore
if len(list_item.children) < 2:
continue
# no subrefs -> ignore
if not list_item.children[1].children:
continue
# otherwise replace reference node by its own children
para = n.parent
para.remove(n)
para.extend(n.children)
def resolve_content_toctree(
environment, docname, builder, toctree, prune=True, maxdepth=0,
titles_only=False, collapse=False, includehidden=False):
"""Alternative toctree resolution for main content: don't resolve the
toctree, just handle it as a normal node, in the translator
"""
return toctree
class monkey(object):
def __init__(self, obj):
self.obj = obj
def __call__(self, fn):
name = fn.__name__
old = getattr(self.obj, name)
setattr(self.obj, name, lambda self_, *args, **kwargs: \
fn(old, self_, *args, **kwargs))
@monkey(sphinx.environment.BuildEnvironment)
def resolve_toctree(old_resolve, self, docname, *args, **kwargs):
""" If navbar, bootstrapify TOC to yield a navbar
"""
navbar = kwargs.pop('navbar', None)
if docname == self.config.master_doc and not navbar:
return resolve_content_toctree(self, docname, *args, **kwargs)
toc = old_resolve(self, docname, *args, **kwargs)
if toc is None:
return None
navbarify(toc[0], navbar=navbar)
return toc
@monkey(sphinx.builders.html.StandaloneHTMLBuilder)
def render_partial(old_partial, self, node):
if isinstance(node, nodes.bullet_list) and node.children:
# side nav?
# remove single top-level item
# bullet_list/0(list_item)/1(bullet_list)
level1 = node.children[0].children
if len(level1) > 1:
node = level1[1]
node['classes'].extend(['list-group', 'nav', 'text-left'])
for n in node.traverse():
if isinstance(n, nodes.list_item):
n['classes'].append('list-group-item')
elif isinstance(n, nodes.reference):
n['classes'].append('ripple')
else:
node.clear()
return old_partial(self, node)
|
prawn-cake/vk-requests
|
refs/heads/master
|
vk_requests/settings.py
|
1
|
# -*- coding: utf-8 -*-
"""Test settings"""
import os
# Set your environment variables for testing or put values here
# user email or phone number
USER_LOGIN = os.getenv('VK_USER_LOGIN', '')
USER_PASSWORD = os.getenv('VK_USER_PASSWORD', '')
# aka API/Client ID
APP_ID = os.getenv('VK_APP_ID')
PHONE_NUMBER = os.getenv('VK_PHONE_NUMBER')
SERVICE_TOKEN = os.getenv('VK_SERVICE_TOKEN')
CLIENT_SECRET = os.getenv('VK_CLIENT_SECRET')
|
raphaelmerx/django
|
refs/heads/master
|
tests/migrations/migrations_test_apps/alter_fk/author_app/migrations/0002_alter_id.py
|
379
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('author_app', '0001_initial'),
('book_app', '0001_initial'), # Forces the book table to alter the FK
]
operations = [
migrations.AlterField(
model_name='author',
name='id',
field=models.CharField(max_length=10, primary_key=True),
),
]
|
bhavanaananda/DataStage
|
refs/heads/master
|
test/RDFDatabank/rdflib/compare.py
|
4
|
# -*- coding: utf-8 -*-
"""
A collection of utilities for canonicalizing and inspecting graphs.
Among other things, they solve of the problem of deterministic bnode
comparisons.
Warning: the time to canonicalize bnodes may increase exponentially on larger
graphs. Use with care!
Example of comparing two graphs::
>>> g1 = Graph().parse(format='n3', data='''
... @prefix : <http://example.org/ns#> .
... <http://example.org> :rel
... <http://example.org/same>,
... [ :label "Same" ],
... <http://example.org/a>,
... [ :label "A" ] .
... ''')
>>> g2 = Graph().parse(format='n3', data='''
... @prefix : <http://example.org/ns#> .
... <http://example.org> :rel
... <http://example.org/same>,
... [ :label "Same" ],
... <http://example.org/b>,
... [ :label "B" ] .
... ''')
>>>
>>> iso1 = to_isomorphic(g1)
>>> iso2 = to_isomorphic(g2)
These are not isomorphic::
>>> iso1 == iso2
False
Diff the two graphs::
>>> in_both, in_first, in_second = graph_diff(iso1, iso2)
Present in both::
>>> def dump_nt_sorted(g):
... for l in sorted(g.serialize(format='nt').splitlines()):
... if l: print l
>>> dump_nt_sorted(in_both)
<http://example.org> <http://example.org/ns#rel> <http://example.org/same> .
<http://example.org> <http://example.org/ns#rel> _:cb1373e1895e37293a13204e8048bdcdc7 .
_:cb1373e1895e37293a13204e8048bdcdc7 <http://example.org/ns#label> "Same" .
Only in first::
>>> dump_nt_sorted(in_first)
<http://example.org> <http://example.org/ns#rel> <http://example.org/a> .
<http://example.org> <http://example.org/ns#rel> _:cb12f880a18a57364752aaeb157f2e66bb .
_:cb12f880a18a57364752aaeb157f2e66bb <http://example.org/ns#label> "A" .
Only in second::
>>> dump_nt_sorted(in_second)
<http://example.org> <http://example.org/ns#rel> <http://example.org/b> .
<http://example.org> <http://example.org/ns#rel> _:cb0a343fb77929ad37cf00a0317f06b801 .
_:cb0a343fb77929ad37cf00a0317f06b801 <http://example.org/ns#label> "B" .
"""
# TODO:
# - Doesn't handle quads.
# - Add warning and/or safety mechanism before working on large graphs?
# - use this in existing Graph.isomorphic?
from rdflib.graph import Graph, ConjunctiveGraph, ReadOnlyGraphAggregate
from rdflib.term import BNode
import hashlib
class IsomorphicGraph(ConjunctiveGraph):
"""
Ported from <http://www.w3.org/2001/sw/DataAccess/proto-tests/tools/rdfdiff.py>
(Sean B Palmer's RDF Graph Isomorphism Tester).
"""
def __init__(self, **kwargs):
super(IsomorphicGraph, self).__init__(**kwargs)
def __eq__(self, other):
"""Graph isomorphism testing."""
if not isinstance(other, IsomorphicGraph):
return False
elif len(self) != len(other):
return False
elif list(self) == list(other):
return True # TODO: really generally cheaper?
return self.internal_hash() == other.internal_hash()
def __ne__(self, other):
"""Negative graph isomorphism testing."""
return not self.__eq__(other)
def internal_hash(self):
"""
This is defined instead of __hash__ to avoid a circular recursion
scenario with the Memory store for rdflib which requires a hash lookup
in order to return a generator of triples.
"""
return _TripleCanonicalizer(self).to_hash()
class _TripleCanonicalizer(object):
def __init__(self, graph, hashfunc=hash):
self.graph = graph
self.hashfunc = hashfunc
def to_hash(self):
return self.hashfunc(tuple(sorted(
map(self.hashfunc, self.canonical_triples()) )))
def canonical_triples(self):
for triple in self.graph:
yield tuple(self._canonicalize_bnodes(triple))
def _canonicalize_bnodes(self, triple):
for term in triple:
if isinstance(term, BNode):
yield BNode(value="cb%s"%self._canonicalize(term))
else:
yield term
def _canonicalize(self, term, done=False):
return self.hashfunc(tuple(sorted(self._vhashtriples(term, done))))
def _vhashtriples(self, term, done):
for triple in self.graph:
if term in triple:
yield tuple(self._vhashtriple(triple, term, done))
def _vhashtriple(self, triple, target_term, done):
for i, term in enumerate(triple):
if not isinstance(term, BNode):
yield term
elif done or (term == target_term):
yield i
else:
yield self._canonicalize(term, done=True)
def to_isomorphic(graph):
if isinstance(graph, IsomorphicGraph):
return graph
return IsomorphicGraph(store=graph.store)
def isomorphic(graph1, graph2):
"""
Compare graph for equality. Uses an algorithm to compute unique hashes
which takes bnodes into account.
Examples::
>>> g1 = Graph().parse(format='n3', data='''
... @prefix : <http://example.org/ns#> .
... <http://example.org> :rel <http://example.org/a> .
... <http://example.org> :rel <http://example.org/b> .
... <http://example.org> :rel [ :label "A bnode." ] .
... ''')
>>> g2 = Graph().parse(format='n3', data='''
... @prefix ns: <http://example.org/ns#> .
... <http://example.org> ns:rel [ ns:label "A bnode." ] .
... <http://example.org> ns:rel <http://example.org/b>,
... <http://example.org/a> .
... ''')
>>> isomorphic(g1, g2)
True
>>> g3 = Graph().parse(format='n3', data='''
... @prefix : <http://example.org/ns#> .
... <http://example.org> :rel <http://example.org/a> .
... <http://example.org> :rel <http://example.org/b> .
... <http://example.org> :rel <http://example.org/c> .
... ''')
>>> isomorphic(g1, g3)
False
"""
return _TripleCanonicalizer(graph1).to_hash() == _TripleCanonicalizer(graph2).to_hash()
def to_canonical_graph(g1):
"""
Creates a canonical, read-only graph where all bnode id:s are based on
deterministical MD5 checksums, correlated with the graph contents.
"""
graph = Graph()
graph += _TripleCanonicalizer(g1, _md5_hash).canonical_triples()
return ReadOnlyGraphAggregate([graph])
def graph_diff(g1, g2):
"""
Returns three sets of triples: "in both", "in first" and "in second".
"""
# bnodes have deterministic values in canonical graphs:
cg1 = to_canonical_graph(g1)
cg2 = to_canonical_graph(g2)
in_both = cg1*cg2
in_first = cg1-cg2
in_second = cg2-cg1
return (in_both, in_first, in_second)
def _md5_hash(t):
h = hashlib.md5()
for i in t:
if isinstance(i, tuple):
h.update(_md5_hash(i))
else:
h.update("%s" % i)
return h.hexdigest()
_MOCK_BNODE = BNode()
def similar(g1, g2):
"""
Checks if the two graphs are "similar", by comparing sorted triples where
all bnodes have been replaced by a singular mock bnode (the
``_MOCK_BNODE``).
This is a much cheaper, but less reliable, alternative to the comparison
algorithm in ``isomorphic``.
"""
return all(t1 == t2 for (t1, t2) in _squashed_graphs_triples(g1, g2))
def _squashed_graphs_triples(g1, g2):
for (t1, t2) in zip(sorted(_squash_graph(g1)), sorted(_squash_graph(g2))):
yield t1, t2
def _squash_graph(graph):
return (_squash_bnodes(triple) for triple in graph)
def _squash_bnodes(triple):
return tuple((isinstance(t, BNode) and _MOCK_BNODE) or t for t in triple)
|
shakamunyi/tensorflow
|
refs/heads/master
|
tensorflow/python/training/adam.py
|
15
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Adam for TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.training import optimizer
from tensorflow.python.training import training_ops
class AdamOptimizer(optimizer.Optimizer):
"""Optimizer that implements the Adam algorithm.
See [Kingma et al., 2014](http://arxiv.org/abs/1412.6980)
([pdf](http://arxiv.org/pdf/1412.6980.pdf)).
"""
def __init__(self, learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8,
use_locking=False, name="Adam"):
"""Construct a new Adam optimizer.
Initialization:
```
m_0 <- 0 (Initialize initial 1st moment vector)
v_0 <- 0 (Initialize initial 2nd moment vector)
t <- 0 (Initialize timestep)
```
The update rule for `variable` with gradient `g` uses an optimization
described at the end of section2 of the paper:
```
t <- t + 1
lr_t <- learning_rate * sqrt(1 - beta2^t) / (1 - beta1^t)
m_t <- beta1 * m_{t-1} + (1 - beta1) * g
v_t <- beta2 * v_{t-1} + (1 - beta2) * g * g
variable <- variable - lr_t * m_t / (sqrt(v_t) + epsilon)
```
The default value of 1e-8 for epsilon might not be a good default in
general. For example, when training an Inception network on ImageNet a
current good choice is 1.0 or 0.1. Note that since AdamOptimizer uses the
formulation just before Section 2.1 of the Kingma and Ba paper rather than
the formulation in Algorithm 1, the "epsilon" referred to here is "epsilon
hat" in the paper.
The sparse implementation of this algorithm (used when the gradient is an
IndexedSlices object, typically because of `tf.gather` or an embedding
lookup in the forward pass) does apply momentum to variable slices even if
they were not used in the forward pass (meaning they have a gradient equal
to zero). Momentum decay (beta1) is also applied to the entire momentum
accumulator. This means that the sparse behavior is equivalent to the dense
behavior (in contrast to some momentum implementations which ignore momentum
unless a variable slice was actually used).
Args:
learning_rate: A Tensor or a floating point value. The learning rate.
beta1: A float value or a constant float tensor.
The exponential decay rate for the 1st moment estimates.
beta2: A float value or a constant float tensor.
The exponential decay rate for the 2nd moment estimates.
epsilon: A small constant for numerical stability. This epsilon is
"epsilon hat" in the Kingma and Ba paper (in the formula just before
Section 2.1), not the epsilon in Algorithm 1 of the paper.
use_locking: If True use locks for update operations.
name: Optional name for the operations created when applying gradients.
Defaults to "Adam".
"""
super(AdamOptimizer, self).__init__(use_locking, name)
self._lr = learning_rate
self._beta1 = beta1
self._beta2 = beta2
self._epsilon = epsilon
# Tensor versions of the constructor arguments, created in _prepare().
self._lr_t = None
self._beta1_t = None
self._beta2_t = None
self._epsilon_t = None
# Variables to accumulate the powers of the beta parameters.
# Created in _create_slots when we know the variables to optimize.
self._beta1_power = None
self._beta2_power = None
# Created in SparseApply if needed.
self._updated_lr = None
def _get_beta_accumulators(self):
return self._beta1_power, self._beta2_power
def _create_slots(self, var_list):
# Create the beta1 and beta2 accumulators on the same device as the first
# variable. Sort the var_list to make sure this device is consistent across
# workers (these need to go on the same PS, otherwise some updates are
# silently ignored).
first_var = min(var_list, key=lambda x: x.name)
create_new = self._beta1_power is None
if not create_new and context.in_graph_mode():
create_new = (self._beta1_power.graph is not first_var.graph)
if create_new:
with ops.colocate_with(first_var):
self._beta1_power = variable_scope.variable(self._beta1,
name="beta1_power",
trainable=False)
self._beta2_power = variable_scope.variable(self._beta2,
name="beta2_power",
trainable=False)
# Create slots for the first and second moments.
for v in var_list:
self._zeros_slot(v, "m", self._name)
self._zeros_slot(v, "v", self._name)
def _prepare(self):
self._lr_t = ops.convert_to_tensor(self._lr, name="learning_rate")
self._beta1_t = ops.convert_to_tensor(self._beta1, name="beta1")
self._beta2_t = ops.convert_to_tensor(self._beta2, name="beta2")
self._epsilon_t = ops.convert_to_tensor(self._epsilon, name="epsilon")
def _apply_dense(self, grad, var):
m = self.get_slot(var, "m")
v = self.get_slot(var, "v")
return training_ops.apply_adam(
var, m, v,
math_ops.cast(self._beta1_power, var.dtype.base_dtype),
math_ops.cast(self._beta2_power, var.dtype.base_dtype),
math_ops.cast(self._lr_t, var.dtype.base_dtype),
math_ops.cast(self._beta1_t, var.dtype.base_dtype),
math_ops.cast(self._beta2_t, var.dtype.base_dtype),
math_ops.cast(self._epsilon_t, var.dtype.base_dtype),
grad, use_locking=self._use_locking).op
def _resource_apply_dense(self, grad, var):
m = self.get_slot(var, "m")
v = self.get_slot(var, "v")
return training_ops.resource_apply_adam(
var.handle, m.handle, v.handle,
math_ops.cast(self._beta1_power, grad.dtype.base_dtype),
math_ops.cast(self._beta2_power, grad.dtype.base_dtype),
math_ops.cast(self._lr_t, grad.dtype.base_dtype),
math_ops.cast(self._beta1_t, grad.dtype.base_dtype),
math_ops.cast(self._beta2_t, grad.dtype.base_dtype),
math_ops.cast(self._epsilon_t, grad.dtype.base_dtype),
grad, use_locking=self._use_locking)
def _apply_sparse_shared(self, grad, var, indices, scatter_add):
beta1_power = math_ops.cast(self._beta1_power, var.dtype.base_dtype)
beta2_power = math_ops.cast(self._beta2_power, var.dtype.base_dtype)
lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
beta1_t = math_ops.cast(self._beta1_t, var.dtype.base_dtype)
beta2_t = math_ops.cast(self._beta2_t, var.dtype.base_dtype)
epsilon_t = math_ops.cast(self._epsilon_t, var.dtype.base_dtype)
lr = (lr_t * math_ops.sqrt(1 - beta2_power) / (1 - beta1_power))
# m_t = beta1 * m + (1 - beta1) * g_t
m = self.get_slot(var, "m")
m_scaled_g_values = grad * (1 - beta1_t)
m_t = state_ops.assign(m, m * beta1_t,
use_locking=self._use_locking)
with ops.control_dependencies([m_t]):
m_t = scatter_add(m, indices, m_scaled_g_values)
# v_t = beta2 * v + (1 - beta2) * (g_t * g_t)
v = self.get_slot(var, "v")
v_scaled_g_values = (grad * grad) * (1 - beta2_t)
v_t = state_ops.assign(v, v * beta2_t, use_locking=self._use_locking)
with ops.control_dependencies([v_t]):
v_t = scatter_add(v, indices, v_scaled_g_values)
v_sqrt = math_ops.sqrt(v_t)
var_update = state_ops.assign_sub(var,
lr * m_t / (v_sqrt + epsilon_t),
use_locking=self._use_locking)
return control_flow_ops.group(*[var_update, m_t, v_t])
def _apply_sparse(self, grad, var):
return self._apply_sparse_shared(
grad.values, var, grad.indices,
lambda x, i, v: state_ops.scatter_add( # pylint: disable=g-long-lambda
x, i, v, use_locking=self._use_locking))
def _resource_scatter_add(self, x, i, v):
with ops.control_dependencies(
[resource_variable_ops.resource_scatter_add(
x.handle, i, v)]):
return x.value()
def _resource_apply_sparse(self, grad, var, indices):
return self._apply_sparse_shared(
grad, var, indices, self._resource_scatter_add)
def _finish(self, update_ops, name_scope):
# Update the power accumulators.
with ops.control_dependencies(update_ops):
with ops.colocate_with(self._beta1_power):
update_beta1 = self._beta1_power.assign(
self._beta1_power * self._beta1_t,
use_locking=self._use_locking)
update_beta2 = self._beta2_power.assign(
self._beta2_power * self._beta2_t,
use_locking=self._use_locking)
return control_flow_ops.group(*update_ops + [update_beta1, update_beta2],
name=name_scope)
|
kuiwei/kuiwei
|
refs/heads/master
|
lms/djangoapps/courseware/features/events.py
|
16
|
# pylint: disable=C0111
from lettuce import step
from lettuce import world
from lettuce import before
from pymongo import MongoClient
from nose.tools import assert_equals
from nose.tools import assert_in
REQUIRED_EVENT_FIELDS = [
'agent',
'event',
'event_source',
'event_type',
'host',
'ip',
'page',
'time',
'username'
]
@before.all
def connect_to_mongodb():
world.mongo_client = MongoClient()
world.event_collection = world.mongo_client['track']['events']
@before.each_scenario
def reset_captured_events(_scenario):
world.event_collection.drop()
@before.outline
def reset_between_outline_scenarios(_scenario, order, outline, reasons_to_fail):
world.event_collection.drop()
@step(r'([aA]n?|\d+) "(.*)" (server|browser) events? is emitted$')
def n_events_are_emitted(_step, count, event_type, event_source):
# Ensure all events are written out to mongo before querying.
world.mongo_client.fsync()
# Note that splinter makes 2 requests when you call browser.visit('/foo')
# the first just checks to see if the server responds with a status
# code of 200, the next actually uses the browser to submit the request.
# We filter out events associated with the status code checks by ignoring
# events that come directly from splinter.
criteria = {
'event_type': event_type,
'event_source': event_source,
'agent': {
'$ne': 'python/splinter'
}
}
cursor = world.event_collection.find(criteria)
try:
number_events = int(count)
except ValueError:
number_events = 1
assert_equals(cursor.count(), number_events)
event = cursor.next()
expected_field_values = {
"username": world.scenario_dict['USER'].username,
"event_type": event_type,
}
for key, value in expected_field_values.iteritems():
assert_equals(event[key], value)
for field in REQUIRED_EVENT_FIELDS:
assert_in(field, event)
|
alexbredo/honeypot-pop3
|
refs/heads/master
|
pop3.py
|
1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2014 Alexander Bredo
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the
# following conditions are met:
#
# 1. Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials
# provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import time, uuid
import os, os.path
from datetime import datetime
from twisted.internet import protocol, reactor, ssl
from twisted.protocols.basic import LineReceiver
from twisted.conch.telnet import TelnetProtocol
from base.applog import *
from base.appconfig import Configuration
from handler.manager import HandlerManager
class Pop3Config(Configuration):
def setup(self, *args, **kwargs): # Defaults:
self.__version = '0.1.0'
self.__appname = 'honeypot_pop3'
self.port=110
self.hostname='mx2.example.com'
self.domain='example.com'
self.maildir='static/'
self.sslport=995
self.sslcertprivate='keys/smtp.private.key'
self.sslcertpublic='keys/smtp.public.key'
self.enabled_handlers = {
'elasticsearch': True,
'screen': True,
'file': True
}
self.elasticsearch = {
'host': '127.0.0.1',
'port': 9200,
'index': 'honeypot'
}
self.filename = 'honeypot_output.txt'
config = Pop3Config()
handler = HandlerManager(config)
class SimplePop3Session(LineReceiver, TelnetProtocol):
def __init__(self):
self.delimiter = '\n'
self.__mailcount()
self.session = str(uuid.uuid1()) # Dirty. todo.
self.myownhost = None
def connectionMade(self):
self.__logInfo('connected', '', True)
self.transport.write('+OK QPOP (version 2.2) at 127.0.0.1 starting.\r\n')
self.state = 'AUTHUSER'
def connectionLost(self, reason):
self.__logInfo('disconnected', '', True)
def lineReceived(self, line):
line = line.replace(b"\r", b"") # Remove unneccessary chars
command = line.strip().lower()
if (command in ['quit', 'exit']):
self.transport.write('+OK Pop server at %s signing off.\r\n' % config.hostname)
self.transport.loseConnection()
elif (command.startswith('capa')):
self.__logInfo('CAPABILITIES', command, True)
self.transport.write('+OK Capability list follows\r\n')
self.transport.write('TOP\r\n')
self.transport.write('APOP\r\n')
self.transport.write('USER\r\n')
self.transport.write('PASS\r\n')
self.transport.write('STAT\r\n')
self.transport.write('LIST\r\n')
self.transport.write('RETR\r\n')
self.transport.write('DELE\r\n')
self.transport.write('RSET\r\n')
self.transport.write('.\r\n')
else:
getattr(self, 'pop3_' + self.state)(command)
def pop3_AUTHUSER(self, command):
if (command.startswith('user')):
self.__logInfo('AUTHUSER', command, True)
self.transport.write('+OK Password required for %s.\r\n' % command[4:].strip())
self.state = 'AUTHPASS'
elif (command.startswith('apop') and len(command) > 15):
self.__logInfo('AUTHUSER', command, True)
self.transport.write('+OK')
self.state = 'META'
else:
self.__logInfo('ERR', command, False)
self.transport.write('-ERR Authentication required.\r\n')
def pop3_AUTHPASS(self, command):
if (command.startswith('pass')):
self.__logInfo('AUTHPASS', command, True)
mailcount = self.__mailcount()
self.transport.write('+OK User has %s messages (%s octets).\r\n' % (str(mailcount), mailcount*1123))
self.state = 'META'
else:
self.__logInfo('ERR', command, False)
self.transport.write('-ERR Password required.\r\n')
def pop3_META(self, command):
if (command.startswith('retr') or command.startswith('top')):
self.__logInfo('RETR', command, True)
file = config.maildir + command[4:].strip() + '.mail'
if (self.__existsmail(file)):
self.transport.write('+OK %s octets\r\n' % self.__mailsize(file))
fo = open(file)
for line in fo.readlines():
self.transport.write(line.strip() + '\r\n')
fo.close()
self.transport.write('.\r\n')
else:
self.transport.write('-ERR Requested mail does not exist.\r\n')
elif (command.startswith('stat')):
if (len(command) == 4):
self.transport.write('+OK %s 1532\r\n' % self.__mailcount())
else:
self.__logInfo('STAT', command, True)
file = config.maildir + command[4:].strip() + '.mail'
if (self.__existsmail(file)):
self.transport.write('+OK %s octets\r\n' % self.__mailsize(file))
else:
self.transport.write('-ERR Requested mail does not exist.\r\n')
elif (command.startswith('list')):
self.__logInfo('LIST', command, True)
self.transport.write('+OK %s messages:\r\n' % str(self.__mailcount()))
for name in os.listdir(config.maildir):
if os.path.isfile(config.maildir + name):
self.transport.write('%s %s\r\n' % (name.split('.')[0], str(self.__mailsize(config.maildir + name))))
self.transport.write('.\r\n')
elif (command.startswith('dele')):
self.__logInfo('DELETE', command, False)
self.transport.write('+OK Message deleted\r\n')
elif (command.startswith('rset')):
self.__logInfo('RESET', command, True)
self.transport.write('+OK Reset state\r\n')
self.state = 'AUTHPASS'
else:
self.__logInfo('ERR', command, False)
self.transport.write('-ERR Invalid command specified.\r\n')
def __mailcount(self):
return len([name for name in os.listdir(config.maildir) if os.path.isfile(config.maildir + name)])
def __mailsize(self, file):
try:
return os.path.getsize(file)
except:
return 371
def __existsmail(self, file):
return os.path.exists(file)
def __logInfo(self, type, command, successful):
try: # Hack: On Connection-Close socket unavailable. remember old ip.
self.myownhost = self.transport.getHost()
except AttributeError:
pass # nothing
data = {
'module': 'POP3',
'@timestamp': int(time.time() * 1000), # in milliseconds
'sourceIPv4Address': str(self.transport.getPeer().host),
'sourceTransportPort': self.transport.getPeer().port,
'type': type,
'command': command,
'success': successful,
'session': self.session
}
if self.myownhost:
data['destinationIPv4Address'] = str(self.myownhost.host)
data['destinationTransportPort'] = self.myownhost.port
handler.handle(data)
class Pop3Factory(protocol.Factory):
def buildProtocol(self, addr):
return SimplePop3Session()
try:
reactor.listenTCP(
config.port,
Pop3Factory()
)
reactor.listenSSL(
config.sslport,
Pop3Factory(),
ssl.DefaultOpenSSLContextFactory(
config.sslcertprivate,
config.sslcertpublic
))
log.info('Server listening on Port %s (Plain) and on %s (SSL).' % (config.port, config.sslport))
reactor.run()
except Exception, e:
log.error(str(e));
exit(-1)
log.info('Server shutdown.')
|
scripnichenko/nova
|
refs/heads/master
|
nova/objects/block_device.py
|
11
|
# Copyright 2013 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from nova import block_device
from nova.cells import opts as cells_opts
from nova.cells import rpcapi as cells_rpcapi
from nova import db
from nova import exception
from nova.i18n import _
from nova import objects
from nova.objects import base
from nova.objects import fields
LOG = logging.getLogger(__name__)
_BLOCK_DEVICE_OPTIONAL_JOINED_FIELD = ['instance']
BLOCK_DEVICE_OPTIONAL_ATTRS = _BLOCK_DEVICE_OPTIONAL_JOINED_FIELD
def _expected_cols(expected_attrs):
return [attr for attr in expected_attrs
if attr in _BLOCK_DEVICE_OPTIONAL_JOINED_FIELD]
# TODO(berrange): Remove NovaObjectDictCompat
@base.NovaObjectRegistry.register
class BlockDeviceMapping(base.NovaPersistentObject, base.NovaObject,
base.NovaObjectDictCompat):
# Version 1.0: Initial version
# Version 1.1: Add instance_uuid to get_by_volume_id method
# Version 1.2: Instance version 1.14
# Version 1.3: Instance version 1.15
# Version 1.4: Instance version 1.16
# Version 1.5: Instance version 1.17
# Version 1.6: Instance version 1.18
# Version 1.7: Add update_or_create method
# Version 1.8: Instance version 1.19
# Version 1.9: Instance version 1.20
# Version 1.10: Changed source_type field to BlockDeviceSourceTypeField.
# Version 1.11: Changed destination_type field to
# BlockDeviceDestinationTypeField.
# Version 1.12: Changed device_type field to BlockDeviceTypeField.
# Version 1.13: Instance version 1.21
# Version 1.14: Instance version 1.22
# Version 1.15: Instance version 1.23
VERSION = '1.15'
fields = {
'id': fields.IntegerField(),
'instance_uuid': fields.UUIDField(),
'instance': fields.ObjectField('Instance', nullable=True),
'source_type': fields.BlockDeviceSourceTypeField(nullable=True),
'destination_type': fields.BlockDeviceDestinationTypeField(
nullable=True),
'guest_format': fields.StringField(nullable=True),
'device_type': fields.BlockDeviceTypeField(nullable=True),
'disk_bus': fields.StringField(nullable=True),
'boot_index': fields.IntegerField(nullable=True),
'device_name': fields.StringField(nullable=True),
'delete_on_termination': fields.BooleanField(default=False),
'snapshot_id': fields.StringField(nullable=True),
'volume_id': fields.StringField(nullable=True),
'volume_size': fields.IntegerField(nullable=True),
'image_id': fields.StringField(nullable=True),
'no_device': fields.BooleanField(default=False),
'connection_info': fields.StringField(nullable=True),
}
obj_relationships = {
'instance': [('1.0', '1.13'), ('1.2', '1.14'), ('1.3', '1.15'),
('1.4', '1.16'), ('1.5', '1.17'), ('1.6', '1.18'),
('1.8', '1.19'), ('1.9', '1.20'), ('1.13', '1.21'),
('1.14', '1.22'), ('1.15', '1.23')],
}
@staticmethod
def _from_db_object(context, block_device_obj,
db_block_device, expected_attrs=None):
if expected_attrs is None:
expected_attrs = []
for key in block_device_obj.fields:
if key in BLOCK_DEVICE_OPTIONAL_ATTRS:
continue
block_device_obj[key] = db_block_device[key]
if 'instance' in expected_attrs:
my_inst = objects.Instance(context)
my_inst._from_db_object(context, my_inst,
db_block_device['instance'])
block_device_obj.instance = my_inst
block_device_obj._context = context
block_device_obj.obj_reset_changes()
return block_device_obj
def _create(self, context, update_or_create=False):
"""Create the block device record in the database.
In case the id field is set on the object, and if the instance is set
raise an ObjectActionError. Resets all the changes on the object.
Returns None
:param context: security context used for database calls
:param update_or_create: consider existing block devices for the
instance based on the device name and swap, and only update
the ones that match. Normally only used when creating the
instance for the first time.
"""
cell_type = cells_opts.get_cell_type()
if cell_type == 'api':
raise exception.ObjectActionError(
action='create',
reason='BlockDeviceMapping cannot be '
'created in the API cell.')
if self.obj_attr_is_set('id'):
raise exception.ObjectActionError(action='create',
reason='already created')
updates = self.obj_get_changes()
if 'instance' in updates:
raise exception.ObjectActionError(action='create',
reason='instance assigned')
cells_create = update_or_create or None
if update_or_create:
db_bdm = db.block_device_mapping_update_or_create(
context, updates, legacy=False)
else:
db_bdm = db.block_device_mapping_create(
context, updates, legacy=False)
self._from_db_object(context, self, db_bdm)
# NOTE(alaski): bdms are looked up by instance uuid and device_name
# so if we sync up with no device_name an entry will be created that
# will not be found on a later update_or_create call and a second bdm
# create will occur.
if cell_type == 'compute' and db_bdm.get('device_name') is not None:
cells_api = cells_rpcapi.CellsAPI()
cells_api.bdm_update_or_create_at_top(
context, self, create=cells_create)
@base.remotable
def create(self):
self._create(self._context)
@base.remotable
def update_or_create(self):
self._create(self._context, update_or_create=True)
@base.remotable
def destroy(self):
if not self.obj_attr_is_set('id'):
raise exception.ObjectActionError(action='destroy',
reason='already destroyed')
db.block_device_mapping_destroy(self._context, self.id)
delattr(self, base.get_attrname('id'))
cell_type = cells_opts.get_cell_type()
if cell_type == 'compute':
cells_api = cells_rpcapi.CellsAPI()
cells_api.bdm_destroy_at_top(self._context, self.instance_uuid,
device_name=self.device_name,
volume_id=self.volume_id)
@base.remotable
def save(self):
updates = self.obj_get_changes()
if 'instance' in updates:
raise exception.ObjectActionError(action='save',
reason='instance changed')
updates.pop('id', None)
updated = db.block_device_mapping_update(self._context, self.id,
updates, legacy=False)
if not updated:
raise exception.BDMNotFound(id=self.id)
self._from_db_object(self._context, self, updated)
cell_type = cells_opts.get_cell_type()
if cell_type == 'compute':
create = False
# NOTE(alaski): If the device name has just been set this bdm
# likely does not exist in the parent cell and we should create it.
# If this is a modification of the device name we should update
# rather than create which is why None is used here instead of True
if 'device_name' in updates:
create = None
cells_api = cells_rpcapi.CellsAPI()
cells_api.bdm_update_or_create_at_top(self._context, self,
create=create)
@base.remotable_classmethod
def get_by_volume_id(cls, context, volume_id,
instance_uuid=None, expected_attrs=None):
if expected_attrs is None:
expected_attrs = []
db_bdm = db.block_device_mapping_get_by_volume_id(
context, volume_id, _expected_cols(expected_attrs))
if not db_bdm:
raise exception.VolumeBDMNotFound(volume_id=volume_id)
# NOTE (ndipanov): Move this to the db layer into a
# get_by_instance_and_volume_id method
if instance_uuid and instance_uuid != db_bdm['instance_uuid']:
raise exception.InvalidVolume(
reason=_("Volume does not belong to the "
"requested instance."))
return cls._from_db_object(context, cls(), db_bdm,
expected_attrs=expected_attrs)
@property
def is_root(self):
return self.boot_index == 0
@property
def is_volume(self):
return (self.destination_type ==
fields.BlockDeviceDestinationType.VOLUME)
@property
def is_image(self):
return self.source_type == fields.BlockDeviceSourceType.IMAGE
def get_image_mapping(self):
return block_device.BlockDeviceDict(self).get_image_mapping()
def obj_load_attr(self, attrname):
if attrname not in BLOCK_DEVICE_OPTIONAL_ATTRS:
raise exception.ObjectActionError(
action='obj_load_attr',
reason='attribute %s not lazy-loadable' % attrname)
if not self._context:
raise exception.OrphanedObjectError(method='obj_load_attr',
objtype=self.obj_name())
LOG.debug("Lazy-loading `%(attr)s' on %(name)s uuid %(uuid)s",
{'attr': attrname,
'name': self.obj_name(),
'uuid': self.uuid,
})
self.instance = objects.Instance.get_by_uuid(self._context,
self.instance_uuid)
self.obj_reset_changes(fields=['instance'])
@base.NovaObjectRegistry.register
class BlockDeviceMappingList(base.ObjectListBase, base.NovaObject):
# Version 1.0: Initial version
# Version 1.1: BlockDeviceMapping <= version 1.1
# Version 1.2: Added use_slave to get_by_instance_uuid
# Version 1.3: BlockDeviceMapping <= version 1.2
# Version 1.4: BlockDeviceMapping <= version 1.3
# Version 1.5: BlockDeviceMapping <= version 1.4
# Version 1.6: BlockDeviceMapping <= version 1.5
# Version 1.7: BlockDeviceMapping <= version 1.6
# Version 1.8: BlockDeviceMapping <= version 1.7
# Version 1.9: BlockDeviceMapping <= version 1.8
# Version 1.10: BlockDeviceMapping <= version 1.9
# Version 1.11: BlockDeviceMapping <= version 1.10
# Version 1.12: BlockDeviceMapping <= version 1.11
# Version 1.13: BlockDeviceMapping <= version 1.12
# Version 1.14: BlockDeviceMapping <= version 1.13
# Version 1.15: BlockDeviceMapping <= version 1.14
# Version 1.16: BlockDeviceMapping <= version 1.15
VERSION = '1.16'
fields = {
'objects': fields.ListOfObjectsField('BlockDeviceMapping'),
}
obj_relationships = {
'objects': [('1.0', '1.0'), ('1.1', '1.1'), ('1.2', '1.1'),
('1.3', '1.2'), ('1.4', '1.3'), ('1.5', '1.4'),
('1.6', '1.5'), ('1.7', '1.6'), ('1.8', '1.7'),
('1.9', '1.8'), ('1.10', '1.9'), ('1.11', '1.10'),
('1.12', '1.11'), ('1.13', '1.12'), ('1.14', '1.13'),
('1.15', '1.14'), ('1.16', '1.15')],
}
@base.remotable_classmethod
def get_by_instance_uuid(cls, context, instance_uuid, use_slave=False):
db_bdms = db.block_device_mapping_get_all_by_instance(
context, instance_uuid, use_slave=use_slave)
return base.obj_make_list(
context, cls(), objects.BlockDeviceMapping, db_bdms or [])
def root_bdm(self):
try:
return next(bdm_obj for bdm_obj in self if bdm_obj.is_root)
except StopIteration:
return
def block_device_make_list(context, db_list, **extra_args):
return base.obj_make_list(context,
objects.BlockDeviceMappingList(context),
objects.BlockDeviceMapping, db_list,
**extra_args)
def block_device_make_list_from_dicts(context, bdm_dicts_list):
bdm_objects = [objects.BlockDeviceMapping(context=context, **bdm)
for bdm in bdm_dicts_list]
return BlockDeviceMappingList(objects=bdm_objects)
|
stvstnfrd/edx-platform
|
refs/heads/master
|
import_shims/studio/contentstore/management/commands/edit_course_tabs.py
|
2
|
"""Deprecated import support. Auto-generated by import_shims/generate_shims.sh."""
# pylint: disable=redefined-builtin,wrong-import-position,wildcard-import,useless-suppression,line-too-long
from import_shims.warn import warn_deprecated_import
warn_deprecated_import('contentstore.management.commands.edit_course_tabs', 'cms.djangoapps.contentstore.management.commands.edit_course_tabs')
from cms.djangoapps.contentstore.management.commands.edit_course_tabs import *
|
wangmiao1981/spark
|
refs/heads/master
|
examples/src/main/python/avro_inputformat.py
|
27
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Read data file users.avro in local Spark distro:
$ cd $SPARK_HOME
$ ./bin/spark-submit --driver-class-path /path/to/example/jar \
> ./examples/src/main/python/avro_inputformat.py \
> examples/src/main/resources/users.avro
{u'favorite_color': None, u'name': u'Alyssa', u'favorite_numbers': [3, 9, 15, 20]}
{u'favorite_color': u'red', u'name': u'Ben', u'favorite_numbers': []}
To read name and favorite_color fields only, specify the following reader schema:
$ cat examples/src/main/resources/user.avsc
{"namespace": "example.avro",
"type": "record",
"name": "User",
"fields": [
{"name": "name", "type": "string"},
{"name": "favorite_color", "type": ["string", "null"]}
]
}
$ ./bin/spark-submit --driver-class-path /path/to/example/jar \
> ./examples/src/main/python/avro_inputformat.py \
> examples/src/main/resources/users.avro examples/src/main/resources/user.avsc
{u'favorite_color': None, u'name': u'Alyssa'}
{u'favorite_color': u'red', u'name': u'Ben'}
"""
import sys
from functools import reduce
from pyspark.sql import SparkSession
if __name__ == "__main__":
if len(sys.argv) != 2 and len(sys.argv) != 3:
print("""
Usage: avro_inputformat <data_file> [reader_schema_file]
Run with example jar:
./bin/spark-submit --driver-class-path /path/to/example/jar \
/path/to/examples/avro_inputformat.py <data_file> [reader_schema_file]
Assumes you have Avro data stored in <data_file>. Reader schema can be optionally specified
in [reader_schema_file].
""", file=sys.stderr)
sys.exit(-1)
path = sys.argv[1]
spark = SparkSession\
.builder\
.appName("AvroKeyInputFormat")\
.getOrCreate()
sc = spark.sparkContext
conf = None
if len(sys.argv) == 3:
schema_rdd = sc.textFile(sys.argv[2], 1).collect()
conf = {"avro.schema.input.key": reduce(lambda x, y: x + y, schema_rdd)}
avro_rdd = sc.newAPIHadoopFile(
path,
"org.apache.avro.mapreduce.AvroKeyInputFormat",
"org.apache.avro.mapred.AvroKey",
"org.apache.hadoop.io.NullWritable",
keyConverter="org.apache.spark.examples.pythonconverters.AvroWrapperToJavaConverter",
conf=conf)
output = avro_rdd.map(lambda x: x[0]).collect()
for k in output:
print(k)
spark.stop()
|
frossigneux/blazar
|
refs/heads/master
|
climate/openstack/common/policy.py
|
2
|
# Copyright (c) 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Common Policy Engine Implementation
Policies can be expressed in one of two forms: A list of lists, or a
string written in the new policy language.
In the list-of-lists representation, each check inside the innermost
list is combined as with an "and" conjunction--for that check to pass,
all the specified checks must pass. These innermost lists are then
combined as with an "or" conjunction. This is the original way of
expressing policies, but there now exists a new way: the policy
language.
In the policy language, each check is specified the same way as in the
list-of-lists representation: a simple "a:b" pair that is matched to
the correct code to perform that check. However, conjunction
operators are available, allowing for more expressiveness in crafting
policies.
As an example, take the following rule, expressed in the list-of-lists
representation::
[["role:admin"], ["project_id:%(project_id)s", "role:projectadmin"]]
In the policy language, this becomes::
role:admin or (project_id:%(project_id)s and role:projectadmin)
The policy language also has the "not" operator, allowing a richer
policy rule::
project_id:%(project_id)s and not role:dunce
It is possible to perform policy checks on the following user
attributes (obtained through the token): user_id, domain_id or
project_id::
domain_id:<some_value>
Attributes sent along with API calls can be used by the policy engine
(on the right side of the expression), by using the following syntax::
<some_value>:user.id
Contextual attributes of objects identified by their IDs are loaded
from the database. They are also available to the policy engine and
can be checked through the `target` keyword::
<some_value>:target.role.name
All these attributes (related to users, API calls, and context) can be
checked against each other or against constants, be it literals (True,
<a_number>) or strings.
Finally, two special policy checks should be mentioned; the policy
check "@" will always accept an access, and the policy check "!" will
always reject an access. (Note that if a rule is either the empty
list ("[]") or the empty string, this is equivalent to the "@" policy
check.) Of these, the "!" policy check is probably the most useful,
as it allows particular rules to be explicitly disabled.
"""
import abc
import ast
import re
from oslo.config import cfg
import six
import six.moves.urllib.parse as urlparse
import six.moves.urllib.request as urlrequest
from climate.openstack.common import fileutils
from climate.openstack.common.gettextutils import _, _LE
from climate.openstack.common import jsonutils
from climate.openstack.common import log as logging
policy_opts = [
cfg.StrOpt('policy_file',
default='policy.json',
help=_('JSON file containing policy')),
cfg.StrOpt('policy_default_rule',
default='default',
help=_('Rule enforced when requested rule is not found')),
]
CONF = cfg.CONF
CONF.register_opts(policy_opts)
LOG = logging.getLogger(__name__)
_checks = {}
class PolicyNotAuthorized(Exception):
def __init__(self, rule):
msg = _("Policy doesn't allow %s to be performed.") % rule
super(PolicyNotAuthorized, self).__init__(msg)
class Rules(dict):
"""A store for rules. Handles the default_rule setting directly."""
@classmethod
def load_json(cls, data, default_rule=None):
"""Allow loading of JSON rule data."""
# Suck in the JSON data and parse the rules
rules = dict((k, parse_rule(v)) for k, v in
jsonutils.loads(data).items())
return cls(rules, default_rule)
def __init__(self, rules=None, default_rule=None):
"""Initialize the Rules store."""
super(Rules, self).__init__(rules or {})
self.default_rule = default_rule
def __missing__(self, key):
"""Implements the default rule handling."""
if isinstance(self.default_rule, dict):
raise KeyError(key)
# If the default rule isn't actually defined, do something
# reasonably intelligent
if not self.default_rule:
raise KeyError(key)
if isinstance(self.default_rule, BaseCheck):
return self.default_rule
# We need to check this or we can get infinite recursion
if self.default_rule not in self:
raise KeyError(key)
elif isinstance(self.default_rule, six.string_types):
return self[self.default_rule]
def __str__(self):
"""Dumps a string representation of the rules."""
# Start by building the canonical strings for the rules
out_rules = {}
for key, value in self.items():
# Use empty string for singleton TrueCheck instances
if isinstance(value, TrueCheck):
out_rules[key] = ''
else:
out_rules[key] = str(value)
# Dump a pretty-printed JSON representation
return jsonutils.dumps(out_rules, indent=4)
class Enforcer(object):
"""Responsible for loading and enforcing rules.
:param policy_file: Custom policy file to use, if none is
specified, `CONF.policy_file` will be
used.
:param rules: Default dictionary / Rules to use. It will be
considered just in the first instantiation. If
`load_rules(True)`, `clear()` or `set_rules(True)`
is called this will be overwritten.
:param default_rule: Default rule to use, CONF.default_rule will
be used if none is specified.
:param use_conf: Whether to load rules from cache or config file.
"""
def __init__(self, policy_file=None, rules=None,
default_rule=None, use_conf=True):
self.rules = Rules(rules, default_rule)
self.default_rule = default_rule or CONF.policy_default_rule
self.policy_path = None
self.policy_file = policy_file or CONF.policy_file
self.use_conf = use_conf
def set_rules(self, rules, overwrite=True, use_conf=False):
"""Create a new Rules object based on the provided dict of rules.
:param rules: New rules to use. It should be an instance of dict.
:param overwrite: Whether to overwrite current rules or update them
with the new rules.
:param use_conf: Whether to reload rules from cache or config file.
"""
if not isinstance(rules, dict):
raise TypeError(_("Rules must be an instance of dict or Rules, "
"got %s instead") % type(rules))
self.use_conf = use_conf
if overwrite:
self.rules = Rules(rules, self.default_rule)
else:
self.rules.update(rules)
def clear(self):
"""Clears Enforcer rules, policy's cache and policy's path."""
self.set_rules({})
self.default_rule = None
self.policy_path = None
def load_rules(self, force_reload=False):
"""Loads policy_path's rules.
Policy file is cached and will be reloaded if modified.
:param force_reload: Whether to overwrite current rules.
"""
if force_reload:
self.use_conf = force_reload
if self.use_conf:
if not self.policy_path:
self.policy_path = self._get_policy_path()
reloaded, data = fileutils.read_cached_file(
self.policy_path, force_reload=force_reload)
if reloaded or not self.rules:
rules = Rules.load_json(data, self.default_rule)
self.set_rules(rules)
LOG.debug("Rules successfully reloaded")
def _get_policy_path(self):
"""Locate the policy json data file.
:param policy_file: Custom policy file to locate.
:returns: The policy path
:raises: ConfigFilesNotFoundError if the file couldn't
be located.
"""
policy_file = CONF.find_file(self.policy_file)
if policy_file:
return policy_file
raise cfg.ConfigFilesNotFoundError((self.policy_file,))
def enforce(self, rule, target, creds, do_raise=False,
exc=None, *args, **kwargs):
"""Checks authorization of a rule against the target and credentials.
:param rule: A string or BaseCheck instance specifying the rule
to evaluate.
:param target: As much information about the object being operated
on as possible, as a dictionary.
:param creds: As much information about the user performing the
action as possible, as a dictionary.
:param do_raise: Whether to raise an exception or not if check
fails.
:param exc: Class of the exception to raise if the check fails.
Any remaining arguments passed to check() (both
positional and keyword arguments) will be passed to
the exception class. If not specified, PolicyNotAuthorized
will be used.
:return: Returns False if the policy does not allow the action and
exc is not provided; otherwise, returns a value that
evaluates to True. Note: for rules using the "case"
expression, this True value will be the specified string
from the expression.
"""
# NOTE(flaper87): Not logging target or creds to avoid
# potential security issues.
LOG.debug("Rule %s will be now enforced" % rule)
self.load_rules()
# Allow the rule to be a Check tree
if isinstance(rule, BaseCheck):
result = rule(target, creds, self)
elif not self.rules:
# No rules to reference means we're going to fail closed
result = False
else:
try:
# Evaluate the rule
result = self.rules[rule](target, creds, self)
except KeyError:
LOG.debug("Rule [%s] doesn't exist" % rule)
# If the rule doesn't exist, fail closed
result = False
# If it is False, raise the exception if requested
if do_raise and not result:
if exc:
raise exc(*args, **kwargs)
raise PolicyNotAuthorized(rule)
return result
@six.add_metaclass(abc.ABCMeta)
class BaseCheck(object):
"""Abstract base class for Check classes."""
@abc.abstractmethod
def __str__(self):
"""String representation of the Check tree rooted at this node."""
pass
@abc.abstractmethod
def __call__(self, target, cred, enforcer):
"""Triggers if instance of the class is called.
Performs the check. Returns False to reject the access or a
true value (not necessary True) to accept the access.
"""
pass
class FalseCheck(BaseCheck):
"""A policy check that always returns False (disallow)."""
def __str__(self):
"""Return a string representation of this check."""
return "!"
def __call__(self, target, cred, enforcer):
"""Check the policy."""
return False
class TrueCheck(BaseCheck):
"""A policy check that always returns True (allow)."""
def __str__(self):
"""Return a string representation of this check."""
return "@"
def __call__(self, target, cred, enforcer):
"""Check the policy."""
return True
class Check(BaseCheck):
"""A base class to allow for user-defined policy checks."""
def __init__(self, kind, match):
"""Initiates Check instance.
:param kind: The kind of the check, i.e., the field before the
':'.
:param match: The match of the check, i.e., the field after
the ':'.
"""
self.kind = kind
self.match = match
def __str__(self):
"""Return a string representation of this check."""
return "%s:%s" % (self.kind, self.match)
class NotCheck(BaseCheck):
"""Implements the "not" logical operator.
A policy check that inverts the result of another policy check.
"""
def __init__(self, rule):
"""Initialize the 'not' check.
:param rule: The rule to negate. Must be a Check.
"""
self.rule = rule
def __str__(self):
"""Return a string representation of this check."""
return "not %s" % self.rule
def __call__(self, target, cred, enforcer):
"""Check the policy.
Returns the logical inverse of the wrapped check.
"""
return not self.rule(target, cred, enforcer)
class AndCheck(BaseCheck):
"""Implements the "and" logical operator.
A policy check that requires that a list of other checks all return True.
"""
def __init__(self, rules):
"""Initialize the 'and' check.
:param rules: A list of rules that will be tested.
"""
self.rules = rules
def __str__(self):
"""Return a string representation of this check."""
return "(%s)" % ' and '.join(str(r) for r in self.rules)
def __call__(self, target, cred, enforcer):
"""Check the policy.
Requires that all rules accept in order to return True.
"""
for rule in self.rules:
if not rule(target, cred, enforcer):
return False
return True
def add_check(self, rule):
"""Adds rule to be tested.
Allows addition of another rule to the list of rules that will
be tested. Returns the AndCheck object for convenience.
"""
self.rules.append(rule)
return self
class OrCheck(BaseCheck):
"""Implements the "or" operator.
A policy check that requires that at least one of a list of other
checks returns True.
"""
def __init__(self, rules):
"""Initialize the 'or' check.
:param rules: A list of rules that will be tested.
"""
self.rules = rules
def __str__(self):
"""Return a string representation of this check."""
return "(%s)" % ' or '.join(str(r) for r in self.rules)
def __call__(self, target, cred, enforcer):
"""Check the policy.
Requires that at least one rule accept in order to return True.
"""
for rule in self.rules:
if rule(target, cred, enforcer):
return True
return False
def add_check(self, rule):
"""Adds rule to be tested.
Allows addition of another rule to the list of rules that will
be tested. Returns the OrCheck object for convenience.
"""
self.rules.append(rule)
return self
def _parse_check(rule):
"""Parse a single base check rule into an appropriate Check object."""
# Handle the special checks
if rule == '!':
return FalseCheck()
elif rule == '@':
return TrueCheck()
try:
kind, match = rule.split(':', 1)
except Exception:
LOG.exception(_LE("Failed to understand rule %s") % rule)
# If the rule is invalid, we'll fail closed
return FalseCheck()
# Find what implements the check
if kind in _checks:
return _checks[kind](kind, match)
elif None in _checks:
return _checks[None](kind, match)
else:
LOG.error(_LE("No handler for matches of kind %s") % kind)
return FalseCheck()
def _parse_list_rule(rule):
"""Translates the old list-of-lists syntax into a tree of Check objects.
Provided for backwards compatibility.
"""
# Empty rule defaults to True
if not rule:
return TrueCheck()
# Outer list is joined by "or"; inner list by "and"
or_list = []
for inner_rule in rule:
# Elide empty inner lists
if not inner_rule:
continue
# Handle bare strings
if isinstance(inner_rule, six.string_types):
inner_rule = [inner_rule]
# Parse the inner rules into Check objects
and_list = [_parse_check(r) for r in inner_rule]
# Append the appropriate check to the or_list
if len(and_list) == 1:
or_list.append(and_list[0])
else:
or_list.append(AndCheck(and_list))
# If we have only one check, omit the "or"
if not or_list:
return FalseCheck()
elif len(or_list) == 1:
return or_list[0]
return OrCheck(or_list)
# Used for tokenizing the policy language
_tokenize_re = re.compile(r'\s+')
def _parse_tokenize(rule):
"""Tokenizer for the policy language.
Most of the single-character tokens are specified in the
_tokenize_re; however, parentheses need to be handled specially,
because they can appear inside a check string. Thankfully, those
parentheses that appear inside a check string can never occur at
the very beginning or end ("%(variable)s" is the correct syntax).
"""
for tok in _tokenize_re.split(rule):
# Skip empty tokens
if not tok or tok.isspace():
continue
# Handle leading parens on the token
clean = tok.lstrip('(')
for i in range(len(tok) - len(clean)):
yield '(', '('
# If it was only parentheses, continue
if not clean:
continue
else:
tok = clean
# Handle trailing parens on the token
clean = tok.rstrip(')')
trail = len(tok) - len(clean)
# Yield the cleaned token
lowered = clean.lower()
if lowered in ('and', 'or', 'not'):
# Special tokens
yield lowered, clean
elif clean:
# Not a special token, but not composed solely of ')'
if len(tok) >= 2 and ((tok[0], tok[-1]) in
[('"', '"'), ("'", "'")]):
# It's a quoted string
yield 'string', tok[1:-1]
else:
yield 'check', _parse_check(clean)
# Yield the trailing parens
for i in range(trail):
yield ')', ')'
class ParseStateMeta(type):
"""Metaclass for the ParseState class.
Facilitates identifying reduction methods.
"""
def __new__(mcs, name, bases, cls_dict):
"""Create the class.
Injects the 'reducers' list, a list of tuples matching token sequences
to the names of the corresponding reduction methods.
"""
reducers = []
for key, value in cls_dict.items():
if not hasattr(value, 'reducers'):
continue
for reduction in value.reducers:
reducers.append((reduction, key))
cls_dict['reducers'] = reducers
return super(ParseStateMeta, mcs).__new__(mcs, name, bases, cls_dict)
def reducer(*tokens):
"""Decorator for reduction methods.
Arguments are a sequence of tokens, in order, which should trigger running
this reduction method.
"""
def decorator(func):
# Make sure we have a list of reducer sequences
if not hasattr(func, 'reducers'):
func.reducers = []
# Add the tokens to the list of reducer sequences
func.reducers.append(list(tokens))
return func
return decorator
@six.add_metaclass(ParseStateMeta)
class ParseState(object):
"""Implement the core of parsing the policy language.
Uses a greedy reduction algorithm to reduce a sequence of tokens into
a single terminal, the value of which will be the root of the Check tree.
Note: error reporting is rather lacking. The best we can get with
this parser formulation is an overall "parse failed" error.
Fortunately, the policy language is simple enough that this
shouldn't be that big a problem.
"""
def __init__(self):
"""Initialize the ParseState."""
self.tokens = []
self.values = []
def reduce(self):
"""Perform a greedy reduction of the token stream.
If a reducer method matches, it will be executed, then the
reduce() method will be called recursively to search for any more
possible reductions.
"""
for reduction, methname in self.reducers:
if (len(self.tokens) >= len(reduction) and
self.tokens[-len(reduction):] == reduction):
# Get the reduction method
meth = getattr(self, methname)
# Reduce the token stream
results = meth(*self.values[-len(reduction):])
# Update the tokens and values
self.tokens[-len(reduction):] = [r[0] for r in results]
self.values[-len(reduction):] = [r[1] for r in results]
# Check for any more reductions
return self.reduce()
def shift(self, tok, value):
"""Adds one more token to the state. Calls reduce()."""
self.tokens.append(tok)
self.values.append(value)
# Do a greedy reduce...
self.reduce()
@property
def result(self):
"""Obtain the final result of the parse.
Raises ValueError if the parse failed to reduce to a single result.
"""
if len(self.values) != 1:
raise ValueError("Could not parse rule")
return self.values[0]
@reducer('(', 'check', ')')
@reducer('(', 'and_expr', ')')
@reducer('(', 'or_expr', ')')
def _wrap_check(self, _p1, check, _p2):
"""Turn parenthesized expressions into a 'check' token."""
return [('check', check)]
@reducer('check', 'and', 'check')
def _make_and_expr(self, check1, _and, check2):
"""Create an 'and_expr'.
Join two checks by the 'and' operator.
"""
return [('and_expr', AndCheck([check1, check2]))]
@reducer('and_expr', 'and', 'check')
def _extend_and_expr(self, and_expr, _and, check):
"""Extend an 'and_expr' by adding one more check."""
return [('and_expr', and_expr.add_check(check))]
@reducer('check', 'or', 'check')
def _make_or_expr(self, check1, _or, check2):
"""Create an 'or_expr'.
Join two checks by the 'or' operator.
"""
return [('or_expr', OrCheck([check1, check2]))]
@reducer('or_expr', 'or', 'check')
def _extend_or_expr(self, or_expr, _or, check):
"""Extend an 'or_expr' by adding one more check."""
return [('or_expr', or_expr.add_check(check))]
@reducer('not', 'check')
def _make_not_expr(self, _not, check):
"""Invert the result of another check."""
return [('check', NotCheck(check))]
def _parse_text_rule(rule):
"""Parses policy to the tree.
Translates a policy written in the policy language into a tree of
Check objects.
"""
# Empty rule means always accept
if not rule:
return TrueCheck()
# Parse the token stream
state = ParseState()
for tok, value in _parse_tokenize(rule):
state.shift(tok, value)
try:
return state.result
except ValueError:
# Couldn't parse the rule
LOG.exception(_LE("Failed to understand rule %r") % rule)
# Fail closed
return FalseCheck()
def parse_rule(rule):
"""Parses a policy rule into a tree of Check objects."""
# If the rule is a string, it's in the policy language
if isinstance(rule, six.string_types):
return _parse_text_rule(rule)
return _parse_list_rule(rule)
def register(name, func=None):
"""Register a function or Check class as a policy check.
:param name: Gives the name of the check type, e.g., 'rule',
'role', etc. If name is None, a default check type
will be registered.
:param func: If given, provides the function or class to register.
If not given, returns a function taking one argument
to specify the function or class to register,
allowing use as a decorator.
"""
# Perform the actual decoration by registering the function or
# class. Returns the function or class for compliance with the
# decorator interface.
def decorator(func):
_checks[name] = func
return func
# If the function or class is given, do the registration
if func:
return decorator(func)
return decorator
@register("rule")
class RuleCheck(Check):
def __call__(self, target, creds, enforcer):
"""Recursively checks credentials based on the defined rules."""
try:
return enforcer.rules[self.match](target, creds, enforcer)
except KeyError:
# We don't have any matching rule; fail closed
return False
@register("role")
class RoleCheck(Check):
def __call__(self, target, creds, enforcer):
"""Check that there is a matching role in the cred dict."""
return self.match.lower() in [x.lower() for x in creds['roles']]
@register('http')
class HttpCheck(Check):
def __call__(self, target, creds, enforcer):
"""Check http: rules by calling to a remote server.
This example implementation simply verifies that the response
is exactly 'True'.
"""
url = ('http:' + self.match) % target
data = {'target': jsonutils.dumps(target),
'credentials': jsonutils.dumps(creds)}
post_data = urlparse.urlencode(data)
f = urlrequest.urlopen(url, post_data)
return f.read() == "True"
@register(None)
class GenericCheck(Check):
def __call__(self, target, creds, enforcer):
"""Check an individual match.
Matches look like:
tenant:%(tenant_id)s
role:compute:admin
True:%(user.enabled)s
'Member':%(role.name)s
"""
# TODO(termie): do dict inspection via dot syntax
try:
match = self.match % target
except KeyError:
# While doing GenericCheck if key not
# present in Target return false
return False
try:
# Try to interpret self.kind as a literal
leftval = ast.literal_eval(self.kind)
except ValueError:
try:
leftval = creds[self.kind]
except KeyError:
return False
return match == six.text_type(leftval)
|
mathLab/RBniCS
|
refs/heads/master
|
rbnics/backends/__init__.py
|
1
|
# Copyright (C) 2015-2021 by the RBniCS authors
#
# This file is part of RBniCS.
#
# SPDX-License-Identifier: LGPL-3.0-or-later
import importlib
import sys
# Initialize __all__ variable
__all__ = []
# Process configuration files first
from rbnics.utils.config import config
# Helper function to load required backends
def load_backends(required_backends):
# Clean up backends cache
from rbnics.utils.decorators.backend_for import _cache as backends_cache
for class_or_function_name in backends_cache.__all__:
delattr(backends_cache, class_or_function_name)
if hasattr(sys.modules[__name__], class_or_function_name):
delattr(sys.modules[__name__], class_or_function_name)
assert class_or_function_name in sys.modules[__name__].__all__
sys.modules[__name__].__all__.remove(class_or_function_name)
backends_cache.__all__ = set()
# Make sure to import all available backends, so that they are added to the backends cache
importlib.import_module(__name__ + ".abstract")
importlib.import_module(__name__ + ".common")
for backend in required_backends:
importlib.import_module(__name__ + "." + backend)
importlib.import_module(__name__ + "." + backend + ".wrapping")
importlib.import_module(__name__ + ".online")
# Copy imported backends from backends cache to this module
for class_or_function_name in backends_cache.__all__:
assert not hasattr(sys.modules[__name__], class_or_function_name)
setattr(sys.modules[__name__], class_or_function_name, getattr(backends_cache, class_or_function_name))
sys.modules[__name__].__all__.append(class_or_function_name)
# Next, extend modules with __overridden__ variables in backends wrapping. In order to account for
# multiple overrides, sort the list of available backends to account that
depends_on_backends = dict()
at_least_one_dependent_backend = False
for backend in required_backends:
if hasattr(sys.modules[__name__ + "." + backend + ".wrapping"], "__depends_on__"):
depends_on_backends[backend] = set(sys.modules[__name__ + "." + backend + ".wrapping"].__depends_on__)
at_least_one_dependent_backend = True
else:
depends_on_backends[backend] = set()
if at_least_one_dependent_backend:
from toposort import toposort_flatten
required_backends = toposort_flatten(depends_on_backends)
# Apply possible overriddes defined in backends wrapping
for backend in required_backends:
if hasattr(sys.modules[__name__ + "." + backend + ".wrapping"], "__overridden__"):
wrapping_overridden = sys.modules[__name__ + "." + backend + ".wrapping"].__overridden__
assert isinstance(wrapping_overridden, dict)
for (module_name, classes_or_functions) in wrapping_overridden.items():
assert isinstance(classes_or_functions, (list, dict))
if isinstance(classes_or_functions, list):
classes_or_functions = dict((class_or_function, class_or_function)
for class_or_function in classes_or_functions)
for (class_or_function_name, class_or_function_impl) in classes_or_functions.items():
setattr(sys.modules[module_name], class_or_function_name,
getattr(sys.modules[__name__ + "." + backend + ".wrapping"], class_or_function_impl))
if hasattr(sys.modules[module_name], "__all__"):
if class_or_function_name not in sys.modules[module_name].__all__:
sys.modules[module_name].__all__.append(class_or_function_name)
# Make sure that backends do not contain dispatcher functions (but rather, actual functions and classes)
import inspect
for backend in required_backends:
for class_or_function_name in sys.modules[__name__ + "." + backend].__all__:
class_or_function = getattr(sys.modules[__name__ + "." + backend], class_or_function_name)
assert inspect.isclass(class_or_function) or inspect.isfunction(class_or_function)
# In contrast, make sure that this module only contains dispatcher objects
from rbnics.utils.decorators.dispatch import Dispatcher
for dispatcher_name in sys.modules[__name__].__all__:
dispatcher = getattr(sys.modules[__name__], dispatcher_name)
# if there was at least a concrete implementation by @BackendFor or @backend_for
if isinstance(getattr(backends_cache, dispatcher_name), Dispatcher):
assert isinstance(dispatcher, Dispatcher)
# Store some additional classes, defined in the abstract module, which are base classes but not backends,
# and thus have not been processed by @BackendFor and @backend_for decorators
for extra_class in ("LinearProblemWrapper", "NonlinearProblemWrapper", "TimeDependentProblemWrapper"):
assert not hasattr(sys.modules[__name__], extra_class)
setattr(sys.modules[__name__], extra_class, getattr(sys.modules[__name__ + ".abstract"], extra_class))
sys.modules[__name__].__all__.append(extra_class)
# Load required backends
load_backends(config.get("backends", "required backends"))
|
pygeek/django
|
refs/heads/master
|
tests/regressiontests/null_fk/tests.py
|
118
|
from __future__ import absolute_import, unicode_literals
from django.db.models import Q
from django.test import TestCase
from .models import (SystemDetails, Item, PropertyValue, SystemInfo, Forum,
Post, Comment)
class NullFkTests(TestCase):
def test_null_fk(self):
d = SystemDetails.objects.create(details='First details')
s = SystemInfo.objects.create(system_name='First forum', system_details=d)
f = Forum.objects.create(system_info=s, forum_name='First forum')
p = Post.objects.create(forum=f, title='First Post')
c1 = Comment.objects.create(post=p, comment_text='My first comment')
c2 = Comment.objects.create(comment_text='My second comment')
# Starting from comment, make sure that a .select_related(...) with a specified
# set of fields will properly LEFT JOIN multiple levels of NULLs (and the things
# that come after the NULLs, or else data that should exist won't). Regression
# test for #7369.
c = Comment.objects.select_related().get(id=c1.id)
self.assertEqual(c.post, p)
self.assertEqual(Comment.objects.select_related().get(id=c2.id).post, None)
self.assertQuerysetEqual(
Comment.objects.select_related('post__forum__system_info').all(),
[
(c1.id, 'My first comment', '<Post: First Post>'),
(c2.id, 'My second comment', 'None')
],
transform = lambda c: (c.id, c.comment_text, repr(c.post))
)
# Regression test for #7530, #7716.
self.assertTrue(Comment.objects.select_related('post').filter(post__isnull=True)[0].post is None)
self.assertQuerysetEqual(
Comment.objects.select_related('post__forum__system_info__system_details'),
[
(c1.id, 'My first comment', '<Post: First Post>'),
(c2.id, 'My second comment', 'None')
],
transform = lambda c: (c.id, c.comment_text, repr(c.post))
)
def test_combine_isnull(self):
item = Item.objects.create(title='Some Item')
pv = PropertyValue.objects.create(label='Some Value')
item.props.create(key='a', value=pv)
item.props.create(key='b') # value=NULL
q1 = Q(props__key='a', props__value=pv)
q2 = Q(props__key='b', props__value__isnull=True)
# Each of these individually should return the item.
self.assertEqual(Item.objects.get(q1), item)
self.assertEqual(Item.objects.get(q2), item)
# Logically, qs1 and qs2, and qs3 and qs4 should be the same.
qs1 = Item.objects.filter(q1) & Item.objects.filter(q2)
qs2 = Item.objects.filter(q2) & Item.objects.filter(q1)
qs3 = Item.objects.filter(q1) | Item.objects.filter(q2)
qs4 = Item.objects.filter(q2) | Item.objects.filter(q1)
# Regression test for #15823.
self.assertEqual(list(qs1), list(qs2))
self.assertEqual(list(qs3), list(qs4))
|
nvoron23/paragraph2vec
|
refs/heads/master
|
test_word2vec.py
|
2
|
import gensim
s = []
with open("../text8") as f:
for l in f:
s.append(l.strip().split())
w = gensim.models.Word2Vec(s,workers=24)
print w.similarity("man","woman")
|
DougFirErickson/qgisSpaceSyntaxToolkit
|
refs/heads/master
|
esstoolkit/external/networkx/algorithms/operators/product.py
|
33
|
"""
Graph products.
"""
# Copyright (C) 2011 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
import networkx as nx
from itertools import product
__author__ = """\n""".join(['Aric Hagberg (hagberg@lanl.gov)',
'Pieter Swart (swart@lanl.gov)',
'Dan Schult(dschult@colgate.edu)'
'Ben Edwards(bedwards@cs.unm.edu)'])
__all__ = ['tensor_product','cartesian_product',
'lexicographic_product', 'strong_product']
def _dict_product(d1,d2):
return dict((k,(d1.get(k),d2.get(k))) for k in set(d1)|set(d2))
# Generators for producting graph products
def _node_product(G,H):
for u,v in product(G, H):
yield ((u,v), _dict_product(G.node[u], H.node[v]))
def _directed_edges_cross_edges(G,H):
if not G.is_multigraph() and not H.is_multigraph():
for u,v,c in G.edges_iter(data=True):
for x,y,d in H.edges_iter(data=True):
yield (u,x),(v,y),_dict_product(c,d)
if not G.is_multigraph() and H.is_multigraph():
for u,v,c in G.edges_iter(data=True):
for x,y,k,d in H.edges_iter(data=True,keys=True):
yield (u,x),(v,y),k,_dict_product(c,d)
if G.is_multigraph() and not H.is_multigraph():
for u,v,k,c in G.edges_iter(data=True,keys=True):
for x,y,d in H.edges_iter(data=True):
yield (u,x),(v,y),k,_dict_product(c,d)
if G.is_multigraph() and H.is_multigraph():
for u,v,j,c in G.edges_iter(data=True,keys=True):
for x,y,k,d in H.edges_iter(data=True,keys=True):
yield (u,x),(v,y),(j,k),_dict_product(c,d)
def _undirected_edges_cross_edges(G,H):
if not G.is_multigraph() and not H.is_multigraph():
for u,v,c in G.edges_iter(data=True):
for x,y,d in H.edges_iter(data=True):
yield (v,x),(u,y),_dict_product(c,d)
if not G.is_multigraph() and H.is_multigraph():
for u,v,c in G.edges_iter(data=True):
for x,y,k,d in H.edges_iter(data=True,keys=True):
yield (v,x),(u,y),k,_dict_product(c,d)
if G.is_multigraph() and not H.is_multigraph():
for u,v,k,c in G.edges_iter(data=True,keys=True):
for x,y,d in H.edges_iter(data=True):
yield (v,x),(u,y),k,_dict_product(c,d)
if G.is_multigraph() and H.is_multigraph():
for u,v,j,c in G.edges_iter(data=True,keys=True):
for x,y,k,d in H.edges_iter(data=True,keys=True):
yield (v,x),(u,y),(j,k),_dict_product(c,d)
def _edges_cross_nodes(G,H):
if G.is_multigraph():
for u,v,k,d in G.edges_iter(data=True,keys=True):
for x in H:
yield (u,x),(v,x),k,d
else:
for u,v,d in G.edges_iter(data=True):
for x in H:
if H.is_multigraph():
yield (u,x),(v,x),None,d
else:
yield (u,x),(v,x),d
def _nodes_cross_edges(G,H):
if H.is_multigraph():
for x in G:
for u,v,k,d in H.edges_iter(data=True,keys=True):
yield (x,u),(x,v),k,d
else:
for x in G:
for u,v,d in H.edges_iter(data=True):
if G.is_multigraph():
yield (x,u),(x,v),None,d
else:
yield (x,u),(x,v),d
def _edges_cross_nodes_and_nodes(G,H):
if G.is_multigraph():
for u,v,k,d in G.edges_iter(data=True,keys=True):
for x in H:
for y in H:
yield (u,x),(v,y),k,d
else:
for u,v,d in G.edges_iter(data=True):
for x in H:
for y in H:
if H.is_multigraph():
yield (u,x),(v,y),None,d
else:
yield (u,x),(v,y),d
def _init_product_graph(G,H):
if not G.is_directed() == H.is_directed():
raise nx.NetworkXError("G and H must be both directed or",
"both undirected")
if G.is_multigraph() or H.is_multigraph():
GH = nx.MultiGraph()
else:
GH = nx.Graph()
if G.is_directed():
GH = GH.to_directed()
return GH
def tensor_product(G,H):
r"""Return the tensor product of G and H.
The tensor product P of the graphs G and H has a node set that
is the Cartesian product of the node sets, $V(P)=V(G) \times V(H)$.
P has an edge ((u,v),(x,y)) if and only if (u,v) is an edge in G
and (x,y) is an edge in H.
Sometimes referred to as the categorical product.
Parameters
----------
G, H: graphs
Networkx graphs.
Returns
-------
P: NetworkX graph
The tensor product of G and H. P will be a multi-graph if either G
or H is a multi-graph. Will be a directed if G and H are directed,
and undirected if G and H are undirected.
Raises
------
NetworkXError
If G and H are not both directed or both undirected.
Notes
-----
Node attributes in P are two-tuple of the G and H node attributes.
Missing attributes are assigned None.
For example
>>> G = nx.Graph()
>>> H = nx.Graph()
>>> G.add_node(0,a1=True)
>>> H.add_node('a',a2='Spam')
>>> P = nx.tensor_product(G,H)
>>> P.nodes()
[(0, 'a')]
Edge attributes and edge keys (for multigraphs) are also copied to the
new product graph
"""
GH = _init_product_graph(G,H)
GH.add_nodes_from(_node_product(G,H))
GH.add_edges_from(_directed_edges_cross_edges(G,H))
if not GH.is_directed():
GH.add_edges_from(_undirected_edges_cross_edges(G,H))
GH.name = "Tensor product("+G.name+","+H.name+")"
return GH
def cartesian_product(G,H):
"""Return the Cartesian product of G and H.
The tensor product P of the graphs G and H has a node set that
is the Cartesian product of the node sets, $V(P)=V(G) \times V(H)$.
P has an edge ((u,v),(x,y)) if and only if (u,v) is an edge in G
and x==y or and (x,y) is an edge in H and u==v.
and (x,y) is an edge in H.
Parameters
----------
G, H: graphs
Networkx graphs.
Returns
-------
P: NetworkX graph
The Cartesian product of G and H. P will be a multi-graph if either G
or H is a multi-graph. Will be a directed if G and H are directed,
and undirected if G and H are undirected.
Raises
------
NetworkXError
If G and H are not both directed or both undirected.
Notes
-----
Node attributes in P are two-tuple of the G and H node attributes.
Missing attributes are assigned None.
For example
>>> G = nx.Graph()
>>> H = nx.Graph()
>>> G.add_node(0,a1=True)
>>> H.add_node('a',a2='Spam')
>>> P = nx.cartesian_product(G,H)
>>> P.nodes()
[(0, 'a')]
Edge attributes and edge keys (for multigraphs) are also copied to the
new product graph
"""
if not G.is_directed() == H.is_directed():
raise nx.NetworkXError("G and H must be both directed or",
"both undirected")
GH = _init_product_graph(G,H)
GH.add_nodes_from(_node_product(G,H))
GH.add_edges_from(_edges_cross_nodes(G,H))
GH.add_edges_from(_nodes_cross_edges(G,H))
GH.name = "Cartesian product("+G.name+","+H.name+")"
return GH
def lexicographic_product(G,H):
"""Return the lexicographic product of G and H.
The lexicographical product P of the graphs G and H has a node set that
is the Cartesian product of the node sets, $V(P)=V(G) \times V(H)$.
P has an edge ((u,v),(x,y)) if and only if (u,v) is an edge in G
or u==v and (x,y) is an edge in H.
Parameters
----------
G, H: graphs
Networkx graphs.
Returns
-------
P: NetworkX graph
The Cartesian product of G and H. P will be a multi-graph if either G
or H is a multi-graph. Will be a directed if G and H are directed,
and undirected if G and H are undirected.
Raises
------
NetworkXError
If G and H are not both directed or both undirected.
Notes
-----
Node attributes in P are two-tuple of the G and H node attributes.
Missing attributes are assigned None.
For example
>>> G = nx.Graph()
>>> H = nx.Graph()
>>> G.add_node(0,a1=True)
>>> H.add_node('a',a2='Spam')
>>> P = nx.lexicographic_product(G,H)
>>> P.nodes()
[(0, 'a')]
Edge attributes and edge keys (for multigraphs) are also copied to the
new product graph
"""
GH = _init_product_graph(G,H)
GH.add_nodes_from(_node_product(G,H))
# Edges in G regardless of H designation
GH.add_edges_from(_edges_cross_nodes_and_nodes(G,H))
# For each x in G, only if there is an edge in H
GH.add_edges_from(_nodes_cross_edges(G,H))
GH.name = "Lexicographic product("+G.name+","+H.name+")"
return GH
def strong_product(G,H):
"""Return the strong product of G and H.
The strong product P of the graphs G and H has a node set that
is the Cartesian product of the node sets, $V(P)=V(G) \times V(H)$.
P has an edge ((u,v),(x,y)) if and only if
u==v and (x,y) is an edge in H, or
x==y and (u,v) is an edge in G, or
(u,v) is an edge in G and (x,y) is an edge in H.
Parameters
----------
G, H: graphs
Networkx graphs.
Returns
-------
P: NetworkX graph
The Cartesian product of G and H. P will be a multi-graph if either G
or H is a multi-graph. Will be a directed if G and H are directed,
and undirected if G and H are undirected.
Raises
------
NetworkXError
If G and H are not both directed or both undirected.
Notes
-----
Node attributes in P are two-tuple of the G and H node attributes.
Missing attributes are assigned None.
For example
>>> G = nx.Graph()
>>> H = nx.Graph()
>>> G.add_node(0,a1=True)
>>> H.add_node('a',a2='Spam')
>>> P = nx.strong_product(G,H)
>>> P.nodes()
[(0, 'a')]
Edge attributes and edge keys (for multigraphs) are also copied to the
new product graph
"""
GH = _init_product_graph(G,H)
GH.add_nodes_from(_node_product(G,H))
GH.add_edges_from(_nodes_cross_edges(G,H))
GH.add_edges_from(_edges_cross_nodes(G,H))
GH.add_edges_from(_directed_edges_cross_edges(G,H))
if not GH.is_directed():
GH.add_edges_from(_undirected_edges_cross_edges(G,H))
GH.name = "Strong product("+G.name+","+H.name+")"
return GH
|
eramirem/astroML
|
refs/heads/master
|
book_figures/chapter10/fig_fft_example.py
|
3
|
"""
Fast Fourier Transform Example
------------------------------
Figure 10.5
The discrete Fourier transform (bottom panel) for two noisy data sets shown in
the top panel. For 512 evenly sampled times t (dt = 0.977), points are drawn
from h(t) = a + sin(t)G(t), where G(t) is a Gaussian N(mu = 0,sigma = 10).
Gaussian noise with sigma = 0.05 (top data set) and 0.005 (bottom data set)
is added to signal h(t). The value of the offset a is 0.15 and 0, respectively.
The discrete Fourier transform is computed as described in Section 10.2.3.
For both noise realizations, the correct frequency f = (2pi)-1 ~ 0.159 is
easily discernible in the bottom panel. Note that the height of peaks is the
same for both noise realizations. The large value of abs(H(f = 0)) for data
with larger noise is due to the vertical offset.
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
from matplotlib import pyplot as plt
from scipy.fftpack import fft
from scipy.stats import norm
from astroML.fourier import PSD_continuous
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
#------------------------------------------------------------
# Draw the data
np.random.seed(1)
tj = np.linspace(-25, 25, 512)
hj = np.sin(tj)
hj *= norm(0, 10).pdf(tj)
#------------------------------------------------------------
# plot the results
fig = plt.figure(figsize=(5, 3.75))
fig.subplots_adjust(hspace=0.25)
ax1 = fig.add_subplot(211)
ax2 = fig.add_subplot(212)
offsets = (0, 0.15)
colors = ('black', 'gray')
linewidths = (1, 2)
errors = (0.005, 0.05)
for (offset, color, error, linewidth) in zip(offsets, colors,
errors, linewidths):
# compute the PSD
err = np.random.normal(0, error, size=hj.shape)
hj_N = hj + err + offset
fk, PSD = PSD_continuous(tj, hj_N)
# plot the data and PSD
ax1.scatter(tj, hj_N, s=4, c=color, lw=0)
ax1.plot(tj, 0 * tj + offset, '-', c=color, lw=1)
ax2.plot(fk, PSD, '-', c=color, lw=linewidth)
# vertical line marking the expected peak location
ax2.plot([0.5 / np.pi, 0.5 / np.pi], [-0.1, 1], ':k', lw=1)
ax1.set_xlim(-25, 25)
ax1.set_ylim(-0.1, 0.3001)
ax1.set_xlabel('$t$')
ax1.set_ylabel('$h(t)$')
ax1.yaxis.set_major_locator(plt.MultipleLocator(0.1))
ax2.set_xlim(0, 0.8)
ax2.set_ylim(-0.101, 0.801)
ax2.set_xlabel('$f$')
ax2.set_ylabel('$PSD(f)$')
plt.show()
|
leonardocsantoss/ehventos
|
refs/heads/master
|
lib/reportlab/graphics/charts/axes.py
|
10
|
#Copyright ReportLab Europe Ltd. 2000-2010
#see license.txt for license details
__version__=''' $Id: axes.py 3748 2010-07-27 09:36:33Z rgbecker $ '''
__doc__="""Collection of axes for charts.
The current collection comprises axes for charts using cartesian
coordinate systems. All axes might have tick marks and labels.
There are two dichotomies for axes: one of X and Y flavours and
another of category and value flavours.
Category axes have an ordering but no metric. They are divided
into a number of equal-sized buckets. Their tick marks or labels,
if available, go BETWEEN the buckets, and the labels are placed
below to/left of the X/Y-axis, respectively.
Value axes have an ordering AND metric. They correspond to a nu-
meric quantity. Value axis have a real number quantity associated
with it. The chart tells it where to go.
The most basic axis divides the number line into equal spaces
and has tickmarks and labels associated with each; later we
will add variants where you can specify the sampling
interval.
The charts using axis tell them where the labels should be placed.
Axes of complementary X/Y flavours can be connected to each other
in various ways, i.e. with a specific reference point, like an
x/value axis to a y/value (or category) axis. In this case the
connection can be either at the top or bottom of the former or
at any absolute value (specified in points) or at some value of
the former axes in its own coordinate system.
"""
from reportlab.lib.validators import isNumber, isNumberOrNone, isListOfStringsOrNone, isListOfNumbers, \
isListOfNumbersOrNone, isColorOrNone, OneOf, isBoolean, SequenceOf, \
isString, EitherOr, Validator, _SequenceTypes, NoneOr, isInstanceOf, \
isNormalDate
from reportlab.lib.attrmap import *
from reportlab.lib import normalDate
from reportlab.graphics.shapes import Drawing, Line, PolyLine, Group, STATE_DEFAULTS, _textBoxLimits, _rotatedBoxLimits
from reportlab.graphics.widgetbase import Widget, TypedPropertyCollection
from reportlab.graphics.charts.textlabels import Label
from reportlab.graphics.charts.utils import nextRoundNumber
import copy
# Helpers.
def _findMinMaxValue(V, x, default, func, special=None):
if isinstance(V[0][0],_SequenceTypes):
if special:
f=lambda T,x=x,special=special,func=func: special(T,x,func)
else:
f=lambda T,x=x: T[x]
V=map(lambda e,f=f: map(f,e),V)
V = filter(len,map(lambda x: filter(lambda x: x is not None,x),V))
if len(V)==0: return default
return func(map(func,V))
def _findMin(V, x, default,special=None):
'''find minimum over V[i][x]'''
return _findMinMaxValue(V,x,default,min,special=special)
def _findMax(V, x, default,special=None):
'''find maximum over V[i][x]'''
return _findMinMaxValue(V,x,default,max,special=special)
def _allInt(values):
'''true if all values are int'''
for v in values:
try:
if int(v)!=v: return 0
except:
return 0
return 1
class AxisLineAnnotation:
'''Create a grid like line using the given user value to draw the line
kwds may contain
startOffset offset from the default grid start position
endOffset offset from the default grid end position
scaleValue True/not given --> scale the value
otherwise use the absolute value
lo lowest coordinate to draw default 0
hi highest coordinate to draw at default = length
drawAtLimit True draw line at appropriate limit if its coordinate exceeds the lo, hi range
False ignore if it's outside the range
all Line keywords are acceptable
'''
def __init__(self,v,**kwds):
self._v = v
self._kwds = kwds
def __call__(self,axis):
kwds = self._kwds.copy()
scaleValue = kwds.pop('scaleValue',True)
if axis.isYAxis:
offs = axis._x
else:
offs = axis._y
s = kwds.pop('start',None)
e = kwds.pop('end',None)
if s is None or e is None:
dim = getattr(getattr(axis,'joinAxis',None),'getGridDims',None)
if dim and hasattr(dim,'__call__'):
dim = dim()
if dim:
if s is None: s = dim[0]
if e is None: e = dim[1]
else:
if s is None: s = 0
if e is None: e = 0
hi = kwds.pop('hi',axis._length)
lo = kwds.pop('lo',0)
lo,hi=min(lo,hi),max(lo,hi)
drawAtLimit = kwds.pop('drawAtLimit',False)
if not scaleValue:
oaglp = axis._get_line_pos
axis._get_line_pos = lambda x: x
try:
v = self._v
func = axis._getLineFunc(s-offs,e-offs,kwds.pop('parent',None))
if not hasattr(axis,'_tickValues'):
axis._pseudo_configure()
d = axis._get_line_pos(v)
if d<lo or d>hi:
if not drawAtLimit: return None
if d<lo:
d = lo
else:
d = hi
axis._get_line_pos = lambda x: d
L = func(v)
for k,v in kwds.iteritems():
setattr(L,k,v)
finally:
if not scaleValue:
axis._get_line_pos = oaglp
return L
class TickLU:
'''lookup special cases for tick values'''
def __init__(self,*T,**kwds):
self.accuracy = kwds.pop('accuracy',1e-8)
self.T = T
def __contains__(self,t):
accuracy = self.accuracy
for x,v in self.T:
if abs(x-t)<accuracy:
return True
return False
def __getitem__(self,t):
accuracy = self.accuracy
for x,v in self.T:
if abs(x-t)<self.accuracy:
return v
raise IndexError('cannot locate index %r' % t)
class _AxisG(Widget):
def _get_line_pos(self,v):
v = self.scale(v)
try:
v = v[0]
except:
pass
return v
def _cxLine(self,x,start,end):
x = self._get_line_pos(x)
return Line(x, self._y + start, x, self._y + end)
def _cyLine(self,y,start,end):
y = self._get_line_pos(y)
return Line(self._x + start, y, self._x + end, y)
def _cxLine3d(self,x,start,end,_3d_dx,_3d_dy):
x = self._get_line_pos(x)
y0 = self._y + start
y1 = self._y + end
y0, y1 = min(y0,y1),max(y0,y1)
x1 = x + _3d_dx
return PolyLine([x,y0,x1,y0+_3d_dy,x1,y1+_3d_dy],strokeLineJoin=1)
def _cyLine3d(self,y,start,end,_3d_dx,_3d_dy):
y = self._get_line_pos(y)
x0 = self._x + start
x1 = self._x + end
x0, x1 = min(x0,x1),max(x0,x1)
y1 = y + _3d_dy
return PolyLine([x0,y,x0+_3d_dx,y1,x1+_3d_dx,y1],strokeLineJoin=1)
def _getLineFunc(self, start, end, parent=None):
_3d_dx = getattr(parent,'_3d_dx',None)
if _3d_dx is not None:
_3d_dy = getattr(parent,'_3d_dy',None)
f = self.isYAxis and self._cyLine3d or self._cxLine3d
return lambda v, s=start, e=end, f=f,_3d_dx=_3d_dx,_3d_dy=_3d_dy: f(v,s,e,_3d_dx=_3d_dx,_3d_dy=_3d_dy)
else:
f = self.isYAxis and self._cyLine or self._cxLine
return lambda v, s=start, e=end, f=f: f(v,s,e)
def _makeLines(self,g,start,end,strokeColor,strokeWidth,strokeDashArray,strokeLineJoin,strokeLineCap,strokeMiterLimit,parent=None,exclude=[],specials={}):
func = self._getLineFunc(start,end,parent)
if not hasattr(self,'_tickValues'):
self._pseudo_configure()
if exclude:
exf = self.isYAxis and (lambda l: l.y1 in exclude) or (lambda l: l.x1 in exclude)
else:
exf = None
for t in self._tickValues:
L = func(t)
if exf and exf(L): continue
L.strokeColor = strokeColor
L.strokeWidth = strokeWidth
L.strokeDashArray = strokeDashArray
L.strokeLineJoin = strokeLineJoin
L.strokeLineCap = strokeLineCap
L.strokeMiterLimit = strokeMiterLimit
if t in specials:
for a,v in specials[t].iteritems():
setattr(L,a,v)
g.add(L)
def makeGrid(self,g,dim=None,parent=None,exclude=[]):
'''this is only called by a container object'''
c = self.gridStrokeColor
w = self.gridStrokeWidth or 0
if w and c and self.visibleGrid:
s = self.gridStart
e = self.gridEnd
if s is None or e is None:
if dim and hasattr(dim,'__call__'):
dim = dim()
if dim:
if s is None: s = dim[0]
if e is None: e = dim[1]
else:
if s is None: s = 0
if e is None: e = 0
if s or e:
if self.isYAxis: offs = self._x
else: offs = self._y
self._makeLines(g,s-offs,e-offs,c,w,self.gridStrokeDashArray,self.gridStrokeLineJoin,self.gridStrokeLineCap,self.gridStrokeMiterLimit,parent=parent,exclude=exclude,specials=getattr(self,'_gridSpecials',{}))
self._makeSubGrid(g,dim,parent,exclude=[])
def _makeSubGrid(self,g,dim=None,parent=None,exclude=[]):
'''this is only called by a container object'''
if not (getattr(self,'visibleSubGrid',0) and self.subTickNum>0): return
c = self.subGridStrokeColor
w = self.subGridStrokeWidth or 0
if not(w and c): return
s = self.subGridStart
e = self.subGridEnd
if s is None or e is None:
if dim and hasattr(dim,'__call__'):
dim = dim()
if dim:
if s is None: s = dim[0]
if e is None: e = dim[1]
else:
if s is None: s = 0
if e is None: e = 0
if s or e:
if self.isYAxis: offs = self._x
else: offs = self._y
otv = self._calcSubTicks()
try:
self._makeLines(g,s-offs,e-offs,c,w,self.subGridStrokeDashArray,self.subGridStrokeLineJoin,self.subGridStrokeLineCap,self.subGridStrokeMiterLimit,parent=parent,exclude=exclude)
finally:
self._tickValues = otv
def getGridDims(self,start=None,end=None):
if start is None: start = (self._x,self._y)[self.isYAxis]
if end is None: end = start+self._length
return start,end
def isYAxis(self):
if getattr(self,'_dataIndex',None)==1: return True
acn = self.__class__.__name__
return acn[0]=='Y' or acn[:4]=='AdjY'
isYAxis = property(isYAxis)
def isXAxis(self):
if getattr(self,'_dataIndex',None)==0: return True
acn = self.__class__.__name__
return acn[0]=='X' or acn[:11]=='NormalDateX'
isXAxis = property(isXAxis)
def addAnnotations(self,g,A=None):
if A is None: getattr(self,'annotations',[])
for x in A:
g.add(x(self))
def _splitAnnotations(self):
A = getattr(self,'annotations',[])[:]
D = {}
for v in ('early','beforeAxis','afterAxis','beforeTicks',
'afterTicks','beforeTickLabels',
'afterTickLabels','late'):
R = [].append
P = [].append
for a in A:
if getattr(a,v,0):
R(a)
else:
P(a)
D[v] = R.__self__
A[:] = P.__self__
D['late'] += A
return D
def draw(self):
g = Group()
A = self._splitAnnotations()
self.addAnnotations(g,A['early'])
if self.visible:
self.addAnnotations(g,A['beforeAxis'])
g.add(self.makeAxis())
self.addAnnotations(g,A['afterAxis'])
self.addAnnotations(g,A['beforeTicks'])
g.add(self.makeTicks())
self.addAnnotations(g,A['afterTicks'])
self.addAnnotations(g,A['beforeTickLabels'])
g.add(self.makeTickLabels())
self.addAnnotations(g,A['afterTickLabels'])
self.addAnnotations(g,A['late'])
return g
class CALabel(Label):
_attrMap = AttrMap(BASE=Label,
labelPosFrac = AttrMapValue(isNumber, desc='where in the category range [0,1] the labels should be anchored'),
)
def __init__(self,**kw):
Label.__init__(self,**kw)
self._setKeywords(
labelPosFrac = 0.5,
)
# Category axes.
class CategoryAxis(_AxisG):
"Abstract category axis, unusable in itself."
_nodoc = 1
_attrMap = AttrMap(
visible = AttrMapValue(isBoolean, desc='Display entire object, if true.'),
visibleAxis = AttrMapValue(isBoolean, desc='Display axis line, if true.'),
visibleTicks = AttrMapValue(isBoolean, desc='Display axis ticks, if true.'),
visibleLabels = AttrMapValue(isBoolean, desc='Display axis labels, if true.'),
visibleGrid = AttrMapValue(isBoolean, desc='Display axis grid, if true.'),
strokeWidth = AttrMapValue(isNumber, desc='Width of axis line and ticks.'),
strokeColor = AttrMapValue(isColorOrNone, desc='Color of axis line and ticks.'),
strokeDashArray = AttrMapValue(isListOfNumbersOrNone, desc='Dash array used for axis line.'),
strokeLineCap = AttrMapValue(OneOf(0,1,2),desc="Line cap 0=butt, 1=round & 2=square"),
strokeLineJoin = AttrMapValue(OneOf(0,1,2),desc="Line join 0=miter, 1=round & 2=bevel"),
strokeMiterLimit = AttrMapValue(isNumber,desc="miter limit control miter line joins"),
gridStrokeWidth = AttrMapValue(isNumber, desc='Width of grid lines.'),
gridStrokeColor = AttrMapValue(isColorOrNone, desc='Color of grid lines.'),
gridStrokeDashArray = AttrMapValue(isListOfNumbersOrNone, desc='Dash array used for grid lines.'),
gridStrokeLineCap = AttrMapValue(OneOf(0,1,2),desc="Grid Line cap 0=butt, 1=round & 2=square"),
gridStrokeLineJoin = AttrMapValue(OneOf(0,1,2),desc="Grid Line join 0=miter, 1=round & 2=bevel"),
gridStrokeMiterLimit = AttrMapValue(isNumber,desc="Grid miter limit control miter line joins"),
gridStart = AttrMapValue(isNumberOrNone, desc='Start of grid lines wrt axis origin'),
gridEnd = AttrMapValue(isNumberOrNone, desc='End of grid lines wrt axis origin'),
drawGridLast = AttrMapValue(isBoolean, desc='if true draw gridlines after everything else.'),
labels = AttrMapValue(None, desc='Handle of the axis labels.'),
categoryNames = AttrMapValue(isListOfStringsOrNone, desc='List of category names.'),
joinAxis = AttrMapValue(None, desc='Join both axes if true.'),
joinAxisPos = AttrMapValue(isNumberOrNone, desc='Position at which to join with other axis.'),
reverseDirection = AttrMapValue(isBoolean, desc='If true reverse category direction.'),
style = AttrMapValue(OneOf('parallel','stacked','parallel_3d'),"How common category bars are plotted"),
labelAxisMode = AttrMapValue(OneOf('high','low','axis'), desc="Like joinAxisMode, but for the axis labels"),
tickShift = AttrMapValue(isBoolean, desc='Tick shift typically'),
loPad = AttrMapValue(isNumber, desc='extra inner space before start of the axis'),
hiPad = AttrMapValue(isNumber, desc='extra inner space after end of the axis'),
annotations = AttrMapValue(None,desc='list of annotations'),
loLLen = AttrMapValue(isNumber, desc='extra line length before start of the axis'),
hiLLen = AttrMapValue(isNumber, desc='extra line length after end of the axis'),
)
def __init__(self):
assert self.__class__.__name__!='CategoryAxis', "Abstract Class CategoryAxis Instantiated"
# private properties set by methods. The initial values
# here are to make demos easy; they would always be
# overridden in real life.
self._x = 50
self._y = 50
self._length = 100
self._catCount = 0
# public properties
self.visible = 1
self.visibleAxis = 1
self.visibleTicks = 1
self.visibleLabels = 1
self.visibleGrid = 0
self.drawGridLast = False
self.strokeWidth = 1
self.strokeColor = STATE_DEFAULTS['strokeColor']
self.strokeDashArray = STATE_DEFAULTS['strokeDashArray']
self.gridStrokeLineJoin = self.strokeLineJoin = STATE_DEFAULTS['strokeLineJoin']
self.gridStrokeLineCap = self.strokeLineCap = STATE_DEFAULTS['strokeLineCap']
self.gridStrokeMiterLimit = self.strokeMiterLimit = STATE_DEFAULTS['strokeMiterLimit']
self.gridStrokeWidth = 0.25
self.gridStrokeColor = STATE_DEFAULTS['strokeColor']
self.gridStrokeDashArray = STATE_DEFAULTS['strokeDashArray']
self.gridStart = self.gridEnd = None
self.strokeLineJoin = STATE_DEFAULTS['strokeLineJoin']
self.strokeLineCap = STATE_DEFAULTS['strokeLineCap']
self.strokeMiterLimit = STATE_DEFAULTS['strokeMiterLimit']
self.labels = TypedPropertyCollection(CALabel)
# if None, they don't get labels. If provided,
# you need one name per data point and they are
# used for label text.
self.categoryNames = None
self.joinAxis = None
self.joinAxisPos = None
self.joinAxisMode = None
self.labelAxisMode = 'axis'
self.reverseDirection = 0
self.style = 'parallel'
#various private things which need to be initialized
self._labelTextFormat = None
self.tickShift = 0
self.loPad = 0
self.hiPad = 0
self.loLLen = 0
self.hiLLen = 0
def setPosition(self, x, y, length):
# ensure floating point
self._x = float(x)
self._y = float(y)
self._length = float(length)
def configure(self, multiSeries,barWidth=None):
self._catCount = max(map(len,multiSeries))
self._barWidth = barWidth or ((self._length-self.loPad-self.hiPad)/float(self._catCount or 1))
self._calcTickmarkPositions()
def _calcTickmarkPositions(self):
n = self._catCount
if self.tickShift:
self._tickValues = [t+0.5 for t in xrange(n)]
else:
if self.reverseDirection:
self._tickValues = range(-1,n)
else:
self._tickValues = range(n+1)
def _scale(self,idx):
if self.reverseDirection: idx = self._catCount-idx-1
return idx
def _assertYAxis(axis):
assert axis.isYAxis, "Cannot connect to other axes (%s), but Y- ones." % axis.__class__.__name__
def _assertXAxis(axis):
assert axis.isXAxis, "Cannot connect to other axes (%s), but X- ones." % axis.__class__.__name__
class _XTicks:
_tickTweaks = 0 #try 0.25-0.5
def _drawTicksInner(self,tU,tD,g):
if tU or tD:
sW = self.strokeWidth
tW = self._tickTweaks
if tW:
if tU and not tD:
tD = tW*sW
elif tD and not tU:
tU = tW*sW
self._makeLines(g,tU,-tD,self.strokeColor,sW,self.strokeDashArray,self.strokeLineJoin,self.strokeLineCap,self.strokeMiterLimit)
def _drawTicks(self,tU,tD,g=None):
g = g or Group()
if self.visibleTicks:
self._drawTicksInner(tU,tD,g)
return g
def _calcSubTicks(self):
if not hasattr(self,'_tickValues'):
self._pseudo_configure()
otv = self._tickValues
if not hasattr(self,'_subTickValues'):
acn = self.__class__.__name__
if acn[:11]=='NormalDateX':
iFuzz = 0
dCnv = int
else:
iFuzz = 1e-8
dCnv = lambda x:x
OTV = [tv for tv in otv if getattr(tv,'_doSubTicks',1)]
T = [].append
nst = int(self.subTickNum)
i = len(OTV)
if i<2:
self._subTickValues = []
else:
if i==2:
dst = OTV[1]-OTV[0]
elif i==3:
dst = max(OTV[1]-OTV[0],OTV[2]-OTV[1])
else:
i >>= 1
dst = OTV[i+1] - OTV[i]
fuzz = dst*iFuzz
vn = self._valueMin+fuzz
vx = self._valueMax-fuzz
if OTV[0]>vn: OTV.insert(0,OTV[0]-dst)
if OTV[-1]<vx: OTV.append(OTV[-1]+dst)
dst /= float(nst+1)
for i,x in enumerate(OTV[:-1]):
for j in xrange(nst):
t = x+dCnv((j+1)*dst)
if t<=vn or t>=vx: continue
T(t)
self._subTickValues = T.__self__
self._tickValues = self._subTickValues
return otv
def _drawSubTicks(self,tU,tD,g):
if getattr(self,'visibleSubTicks',0) and self.subTickNum>0:
otv = self._calcSubTicks()
try:
self._drawTicksInner(tU,tD,g)
finally:
self._tickValues = otv
def makeTicks(self):
yold=self._y
try:
self._y = self._labelAxisPos(getattr(self,'tickAxisMode','axis'))
g = self._drawTicks(self.tickUp,self.tickDown)
self._drawSubTicks(getattr(self,'subTickHi',0),getattr(self,'subTickLo',0),g)
return g
finally:
self._y = yold
def _labelAxisPos(self,mode=None):
axis = self.joinAxis
if axis:
mode = mode or self.labelAxisMode
if mode == 'low':
return axis._y
elif mode == 'high':
return axis._y + axis._length
return self._y
class _YTicks(_XTicks):
def _labelAxisPos(self,mode=None):
axis = self.joinAxis
if axis:
mode = mode or self.labelAxisMode
if mode == 'low':
return axis._x
elif mode == 'high':
return axis._x + axis._length
return self._x
def makeTicks(self):
xold=self._x
try:
self._x = self._labelAxisPos(getattr(self,'tickAxisMode','axis'))
g = self._drawTicks(self.tickRight,self.tickLeft)
self._drawSubTicks(getattr(self,'subTickHi',0),getattr(self,'subTickLo',0),g)
return g
finally:
self._x = xold
class XCategoryAxis(_XTicks,CategoryAxis):
"X/category axis"
_attrMap = AttrMap(BASE=CategoryAxis,
tickUp = AttrMapValue(isNumber,
desc='Tick length up the axis.'),
tickDown = AttrMapValue(isNumber,
desc='Tick length down the axis.'),
joinAxisMode = AttrMapValue(OneOf('bottom', 'top', 'value', 'points', None),
desc="Mode used for connecting axis ('bottom', 'top', 'value', 'points', None)."),
)
_dataIndex = 0
def __init__(self):
CategoryAxis.__init__(self)
self.labels.boxAnchor = 'n' #north - top edge
self.labels.dy = -5
# ultra-simple tick marks for now go between categories
# and have same line style as axis - need more
self.tickUp = 0 # how far into chart does tick go?
self.tickDown = 5 # how far below axis does tick go?
def demo(self):
self.setPosition(30, 70, 140)
self.configure([(10,20,30,40,50)])
self.categoryNames = ['One','Two','Three','Four','Five']
# all labels top-centre aligned apart from the last
self.labels.boxAnchor = 'n'
self.labels[4].boxAnchor = 'e'
self.labels[4].angle = 90
d = Drawing(200, 100)
d.add(self)
return d
def joinToAxis(self, yAxis, mode='bottom', pos=None):
"Join with y-axis using some mode."
_assertYAxis(yAxis)
if mode == 'bottom':
self._x = yAxis._x
self._y = yAxis._y
elif mode == 'top':
self._x = yAxis._x
self._y = yAxis._y + yAxis._length
elif mode == 'value':
self._x = yAxis._x
self._y = yAxis.scale(pos)
elif mode == 'points':
self._x = yAxis._x
self._y = pos
def _joinToAxis(self):
ja = self.joinAxis
if ja:
jam = self.joinAxisMode
jap = self.joinAxisPos
jta = self.joinToAxis
if jam in ('bottom', 'top'):
jta(ja, mode=jam)
elif jam in ('value', 'points'):
jta(ja, mode=jam, pos=jap)
def scale(self, idx):
"""returns the x position and width in drawing units of the slice"""
return (self._x + self.loPad + self._scale(idx)*self._barWidth, self._barWidth)
def makeAxis(self):
g = Group()
self._joinToAxis()
if not self.visibleAxis: return g
axis = Line(self._x-self.loLLen, self._y, self._x + self._length+self.hiLLen, self._y)
axis.strokeColor = self.strokeColor
axis.strokeWidth = self.strokeWidth
axis.strokeDashArray = self.strokeDashArray
g.add(axis)
return g
def makeTickLabels(self):
g = Group()
if not self.visibleLabels: return g
categoryNames = self.categoryNames
if categoryNames is not None:
catCount = self._catCount
n = len(categoryNames)
reverseDirection = self.reverseDirection
barWidth = self._barWidth
_y = self._labelAxisPos()
_x = self._x
for i in xrange(catCount):
if reverseDirection: ic = catCount-i-1
else: ic = i
if ic>=n: continue
label=i-catCount
if label in self.labels:
label = self.labels[label]
else:
label = self.labels[i]
lpf = label.labelPosFrac
x = _x + (i+lpf) * barWidth
label.setOrigin(x, _y)
label.setText(categoryNames[ic] or '')
g.add(label)
return g
class YCategoryAxis(_YTicks,CategoryAxis):
"Y/category axis"
_attrMap = AttrMap(BASE=CategoryAxis,
tickLeft = AttrMapValue(isNumber,
desc='Tick length left of the axis.'),
tickRight = AttrMapValue(isNumber,
desc='Tick length right of the axis.'),
joinAxisMode = AttrMapValue(OneOf(('left', 'right', 'value', 'points', None)),
desc="Mode used for connecting axis ('left', 'right', 'value', 'points', None)."),
)
_dataIndex = 1
def __init__(self):
CategoryAxis.__init__(self)
self.labels.boxAnchor = 'e' #east - right edge
self.labels.dx = -5
# ultra-simple tick marks for now go between categories
# and have same line style as axis - need more
self.tickLeft = 5 # how far left of axis does tick go?
self.tickRight = 0 # how far right of axis does tick go?
def demo(self):
self.setPosition(50, 10, 80)
self.configure([(10,20,30)])
self.categoryNames = ['One','Two','Three']
# all labels top-centre aligned apart from the last
self.labels.boxAnchor = 'e'
self.labels[2].boxAnchor = 's'
self.labels[2].angle = 90
d = Drawing(200, 100)
d.add(self)
return d
def joinToAxis(self, xAxis, mode='left', pos=None):
"Join with x-axis using some mode."
_assertXAxis(xAxis)
if mode == 'left':
self._x = xAxis._x * 1.0
self._y = xAxis._y * 1.0
elif mode == 'right':
self._x = (xAxis._x + xAxis._length) * 1.0
self._y = xAxis._y * 1.0
elif mode == 'value':
self._x = xAxis.scale(pos) * 1.0
self._y = xAxis._y * 1.0
elif mode == 'points':
self._x = pos * 1.0
self._y = xAxis._y * 1.0
def _joinToAxis(self):
ja = self.joinAxis
if ja:
jam = self.joinAxisMode
jap = self.joinAxisPos
jta = self.joinToAxis
if jam in ('left', 'right'):
jta(ja, mode=jam)
elif jam in ('value', 'points'):
jta(ja, mode=jam, pos=jap)
def scale(self, idx):
"Returns the y position and width in drawing units of the slice."
return (self._y + self._scale(idx)*self._barWidth, self._barWidth)
def makeAxis(self):
g = Group()
self._joinToAxis()
if not self.visibleAxis: return g
axis = Line(self._x, self._y-self.loLLen, self._x, self._y + self._length+self.hiLLen)
axis.strokeColor = self.strokeColor
axis.strokeWidth = self.strokeWidth
axis.strokeDashArray = self.strokeDashArray
g.add(axis)
return g
def makeTickLabels(self):
g = Group()
if not self.visibleLabels: return g
categoryNames = self.categoryNames
if categoryNames is not None:
catCount = self._catCount
n = len(categoryNames)
reverseDirection = self.reverseDirection
barWidth = self._barWidth
labels = self.labels
_x = self._labelAxisPos()
_y = self._y
for i in xrange(catCount):
if reverseDirection: ic = catCount-i-1
else: ic = i
if ic>=n: continue
label=i-catCount
if label in self.labels:
label = self.labels[label]
else:
label = self.labels[i]
lpf = label.labelPosFrac
y = _y + (i+lpf) * barWidth
label.setOrigin(_x, y)
label.setText(categoryNames[ic] or '')
g.add(label)
return g
class TickLabeller:
'''Abstract base class which may be used to indicate a change
in the call signature for callable label formats
'''
def __call__(self,axis,value):
return 'Abstract class instance called'
# Value axes.
class ValueAxis(_AxisG):
"Abstract value axis, unusable in itself."
_attrMap = AttrMap(
forceZero = AttrMapValue(EitherOr((isBoolean,OneOf('near'))), desc='Ensure zero in range if true.'),
visible = AttrMapValue(isBoolean, desc='Display entire object, if true.'),
visibleAxis = AttrMapValue(isBoolean, desc='Display axis line, if true.'),
visibleLabels = AttrMapValue(isBoolean, desc='Display axis labels, if true.'),
visibleTicks = AttrMapValue(isBoolean, desc='Display axis ticks, if true.'),
visibleGrid = AttrMapValue(isBoolean, desc='Display axis grid, if true.'),
strokeWidth = AttrMapValue(isNumber, desc='Width of axis line and ticks.'),
strokeColor = AttrMapValue(isColorOrNone, desc='Color of axis line and ticks.'),
strokeDashArray = AttrMapValue(isListOfNumbersOrNone, desc='Dash array used for axis line.'),
strokeLineCap = AttrMapValue(OneOf(0,1,2),desc="Line cap 0=butt, 1=round & 2=square"),
strokeLineJoin = AttrMapValue(OneOf(0,1,2),desc="Line join 0=miter, 1=round & 2=bevel"),
strokeMiterLimit = AttrMapValue(isNumber,desc="miter limit control miter line joins"),
gridStrokeWidth = AttrMapValue(isNumber, desc='Width of grid lines.'),
gridStrokeColor = AttrMapValue(isColorOrNone, desc='Color of grid lines.'),
gridStrokeDashArray = AttrMapValue(isListOfNumbersOrNone, desc='Dash array used for grid lines.'),
gridStrokeLineCap = AttrMapValue(OneOf(0,1,2),desc="Grid Line cap 0=butt, 1=round & 2=square"),
gridStrokeLineJoin = AttrMapValue(OneOf(0,1,2),desc="Grid Line join 0=miter, 1=round & 2=bevel"),
gridStrokeMiterLimit = AttrMapValue(isNumber,desc="Grid miter limit control miter line joins"),
gridStart = AttrMapValue(isNumberOrNone, desc='Start of grid lines wrt axis origin'),
gridEnd = AttrMapValue(isNumberOrNone, desc='End of grid lines wrt axis origin'),
drawGridLast = AttrMapValue(isBoolean, desc='if true draw gridlines after everything else.'),
minimumTickSpacing = AttrMapValue(isNumber, desc='Minimum value for distance between ticks.'),
maximumTicks = AttrMapValue(isNumber, desc='Maximum number of ticks.'),
labels = AttrMapValue(None, desc='Handle of the axis labels.'),
labelAxisMode = AttrMapValue(OneOf('high','low','axis'), desc="Like joinAxisMode, but for the axis labels"),
labelTextFormat = AttrMapValue(None, desc='Formatting string or function used for axis labels.'),
labelTextPostFormat = AttrMapValue(None, desc='Extra Formatting string.'),
labelTextScale = AttrMapValue(isNumberOrNone, desc='Scaling for label tick values.'),
valueMin = AttrMapValue(isNumberOrNone, desc='Minimum value on axis.'),
valueMax = AttrMapValue(isNumberOrNone, desc='Maximum value on axis.'),
valueStep = AttrMapValue(isNumberOrNone, desc='Step size used between ticks.'),
valueSteps = AttrMapValue(isListOfNumbersOrNone, desc='List of step sizes used between ticks.'),
avoidBoundFrac = AttrMapValue(EitherOr((isNumberOrNone,SequenceOf(isNumber,emptyOK=0,lo=2,hi=2))), desc='Fraction of interval to allow above and below.'),
avoidBoundSpace = AttrMapValue(EitherOr((isNumberOrNone,SequenceOf(isNumber,emptyOK=0,lo=2,hi=2))), desc='Space to allow above and below.'),
abf_ignore_zero = AttrMapValue(EitherOr((NoneOr(isBoolean),SequenceOf(isBoolean,emptyOK=0,lo=2,hi=2))), desc='Set to True to make the avoidBoundFrac calculations treat zero as non-special'),
rangeRound=AttrMapValue(OneOf('none','both','ceiling','floor'),'How to round the axis limits'),
zrangePref = AttrMapValue(isNumberOrNone, desc='Zero range axis limit preference.'),
style = AttrMapValue(OneOf('normal','stacked','parallel_3d'),"How values are plotted!"),
skipEndL = AttrMapValue(OneOf('none','start','end','both'), desc='Skip high/low tick labels'),
origShiftIPC = AttrMapValue(isNumberOrNone, desc='Lowest label shift interval ratio.'),
origShiftMin = AttrMapValue(isNumberOrNone, desc='Minimum amount to shift.'),
origShiftSpecialValue = AttrMapValue(isNumberOrNone, desc='special value for shift'),
tickAxisMode = AttrMapValue(OneOf('high','low','axis'), desc="Like joinAxisMode, but for the ticks"),
reverseDirection = AttrMapValue(isBoolean, desc='If true reverse category direction.'),
annotations = AttrMapValue(None,desc='list of annotations'),
loLLen = AttrMapValue(isNumber, desc='extra line length before start of the axis'),
hiLLen = AttrMapValue(isNumber, desc='extra line length after end of the axis'),
subTickNum = AttrMapValue(isNumber, desc='Number of axis sub ticks, if >0'),
subTickLo = AttrMapValue(isNumber, desc='sub tick down or left'),
subTickHi = AttrMapValue(isNumber, desc='sub tick up or right'),
visibleSubTicks = AttrMapValue(isBoolean, desc='Display axis sub ticks, if true.'),
visibleSubGrid = AttrMapValue(isBoolean, desc='Display axis sub grid, if true.'),
subGridStrokeWidth = AttrMapValue(isNumber, desc='Width of grid lines.'),
subGridStrokeColor = AttrMapValue(isColorOrNone, desc='Color of grid lines.'),
subGridStrokeDashArray = AttrMapValue(isListOfNumbersOrNone, desc='Dash array used for grid lines.'),
subGridStrokeLineCap = AttrMapValue(OneOf(0,1,2),desc="Grid Line cap 0=butt, 1=round & 2=square"),
subGridStrokeLineJoin = AttrMapValue(OneOf(0,1,2),desc="Grid Line join 0=miter, 1=round & 2=bevel"),
subGridStrokeMiterLimit = AttrMapValue(isNumber,desc="Grid miter limit control miter line joins"),
subGridStart = AttrMapValue(isNumberOrNone, desc='Start of grid lines wrt axis origin'),
subGridEnd = AttrMapValue(isNumberOrNone, desc='End of grid lines wrt axis origin'),
keepTickLabelsInside = AttrMapValue(isBoolean, desc='Ensure tick labels do not project beyond bounds of axis if true'),
)
def __init__(self,**kw):
assert self.__class__.__name__!='ValueAxis', 'Abstract Class ValueAxis Instantiated'
self._setKeywords(**kw)
self._setKeywords(
_configured = 0,
# private properties set by methods. The initial values
# here are to make demos easy; they would always be
# overridden in real life.
_x = 50,
_y = 50,
_length = 100,
# public properties
visible = 1,
visibleAxis = 1,
visibleLabels = 1,
visibleTicks = 1,
visibleGrid = 0,
forceZero = 0,
strokeWidth = 1,
strokeColor = STATE_DEFAULTS['strokeColor'],
strokeDashArray = STATE_DEFAULTS['strokeDashArray'],
strokeLineJoin = STATE_DEFAULTS['strokeLineJoin'],
strokeLineCap = STATE_DEFAULTS['strokeLineCap'],
strokeMiterLimit = STATE_DEFAULTS['strokeMiterLimit'],
gridStrokeWidth = 0.25,
gridStrokeColor = STATE_DEFAULTS['strokeColor'],
gridStrokeDashArray = STATE_DEFAULTS['strokeDashArray'],
gridStrokeLineJoin = STATE_DEFAULTS['strokeLineJoin'],
gridStrokeLineCap = STATE_DEFAULTS['strokeLineCap'],
gridStrokeMiterLimit = STATE_DEFAULTS['strokeMiterLimit'],
gridStart = None,
gridEnd = None,
drawGridLast = False,
visibleSubGrid = 0,
visibleSubTicks = 0,
subTickNum = 0,
subTickLo = 0,
subTickHi = 0,
subGridStrokeLineJoin = STATE_DEFAULTS['strokeLineJoin'],
subGridStrokeLineCap = STATE_DEFAULTS['strokeLineCap'],
subGridStrokeMiterLimit = STATE_DEFAULTS['strokeMiterLimit'],
subGridStrokeWidth = 0.25,
subGridStrokeColor = STATE_DEFAULTS['strokeColor'],
subGridStrokeDashArray = STATE_DEFAULTS['strokeDashArray'],
subGridStart = None,
subGridEnd = None,
labels = TypedPropertyCollection(Label),
keepTickLabelsInside = 0,
# how close can the ticks be?
minimumTickSpacing = 10,
maximumTicks = 7,
# a format string like '%0.2f'
# or a function which takes the value as an argument and returns a string
_labelTextFormat = None,
labelAxisMode = 'axis',
labelTextFormat = None,
labelTextPostFormat = None,
labelTextScale = None,
# if set to None, these will be worked out for you.
# if you override any or all of them, your values
# will be used.
valueMin = None,
valueMax = None,
valueStep = None,
avoidBoundFrac = None,
avoidBoundSpace = None,
abf_ignore_zero = False,
rangeRound = 'none',
zrangePref = 0,
style = 'normal',
skipEndL='none',
origShiftIPC = None,
origShiftMin = None,
origShiftSpecialValue = None,
tickAxisMode = 'axis',
reverseDirection=0,
loLLen=0,
hiLLen=0,
)
self.labels.angle = 0
def setPosition(self, x, y, length):
# ensure floating point
self._x = float(x)
self._y = float(y)
self._length = float(length)
def configure(self, dataSeries):
"""Let the axis configure its scale and range based on the data.
Called after setPosition. Let it look at a list of lists of
numbers determine the tick mark intervals. If valueMin,
valueMax and valueStep are configured then it
will use them; if any of them are set to None it
will look at the data and make some sensible decision.
You may override this to build custom axes with
irregular intervals. It creates an internal
variable self._values, which is a list of numbers
to use in plotting.
"""
self._setRange(dataSeries)
self._configure_end()
def _configure_end(self):
self._calcTickmarkPositions()
self._calcScaleFactor()
self._configured = 1
def _getValueStepAndTicks(self, valueMin, valueMax,cache={}):
try:
K = (valueMin,valueMax)
r = cache[K]
except:
self._valueMin = valueMin
self._valueMax = valueMax
valueStep,T = self._calcStepAndTickPositions()
r = cache[K] = valueStep, T, valueStep*1e-8
return r
def _setRange(self, dataSeries):
"""Set minimum and maximum axis values.
The dataSeries argument is assumed to be a list of data
vectors. Each vector is itself a list or tuple of numbers.
Returns a min, max tuple.
"""
oMin = valueMin = self.valueMin
oMax = valueMax = self.valueMax
rangeRound = self.rangeRound
if valueMin is None: valueMin = self._cValueMin = _findMin(dataSeries,self._dataIndex,0)
if valueMax is None: valueMax = self._cValueMax = _findMax(dataSeries,self._dataIndex,0)
if valueMin == valueMax:
if valueMax==0:
if oMin is None and oMax is None:
zrp = getattr(self,'zrangePref',0)
if zrp>0:
valueMax = zrp
valueMin = 0
elif zrp<0:
valueMax = 0
valueMin = zrp
else:
valueMax = 0.01
valueMin = -0.01
elif self.valueMin is None:
valueMin = -0.01
else:
valueMax = 0.01
else:
if valueMax>0:
valueMax = 1.2*valueMax
valueMin = 0.0
else:
valueMax = 0.0
valueMin = 1.2*valueMin
if getattr(self,'_bubblePlot',None):
bubbleMax = float(_findMax(dataSeries,2,0))
frac=.25
bubbleV=frac*(valueMax-valueMin)
self._bubbleV = bubbleV
self._bubbleMax = bubbleMax
self._bubbleRadius = frac*self._length
def special(T,x,func,bubbleV=bubbleV,bubbleMax=bubbleMax):
try:
v = T[2]
except IndexError:
v = bubbleMAx*0.1
bubbleV *= (v/bubbleMax)**0.5
return func(T[x]+bubbleV,T[x]-bubbleV)
if oMin is None: valueMin = self._cValueMin = _findMin(dataSeries,self._dataIndex,0,special=special)
if oMax is None: valueMax = self._cValueMax = _findMax(dataSeries,self._dataIndex,0,special=special)
cMin = valueMin
cMax = valueMax
forceZero = self.forceZero
if forceZero:
if forceZero=='near':
forceZero = min(abs(valueMin),abs(valueMax)) <= 5*(valueMax-valueMin)
if forceZero:
if valueMax<0: valueMax=0
elif valueMin>0: valueMin = 0
abf = self.avoidBoundFrac
do_rr = not getattr(self,'valueSteps',None)
do_abf = abf and do_rr
if not isinstance(abf,_SequenceTypes):
abf = abf, abf
abfiz = getattr(self,'abf_ignore_zero', False)
if not isinstance(abfiz,_SequenceTypes):
abfiz = abfiz, abfiz
do_rr = rangeRound is not 'none' and do_rr
if do_rr:
rrn = rangeRound in ['both','floor']
rrx = rangeRound in ['both','ceiling']
else:
rrn = rrx = 0
abS = self.avoidBoundSpace
do_abs = abS
if do_abs:
if not isinstance(abS,_SequenceTypes):
abS = abS, abS
aL = float(self._length)
go = do_rr or do_abf or do_abs
cache = {}
iter = 0
while go and iter<=10:
iter += 1
go = 0
if do_abf or do_abs:
valueStep, T, fuzz = self._getValueStepAndTicks(valueMin, valueMax, cache)
if do_abf:
i0 = valueStep*abf[0]
i1 = valueStep*abf[1]
else:
i0 = i1 = 0
if do_abs:
sf = (valueMax-valueMin)/aL
i0 = max(i0,abS[0]*sf)
i1 = max(i1,abS[1]*sf)
if rrn: v = T[0]
else: v = valueMin
u = cMin-i0
if (abfiz[0] or abs(v)>fuzz) and v>=u+fuzz:
valueMin = u
go = 1
if rrx: v = T[-1]
else: v = valueMax
u = cMax+i1
if (abfiz[1] or abs(v)>fuzz) and v<=u-fuzz:
valueMax = u
go = 1
if do_rr:
valueStep, T, fuzz = self._getValueStepAndTicks(valueMin, valueMax, cache)
if rrn:
if valueMin<T[0]-fuzz:
valueMin = T[0]-valueStep
go = 1
else:
go = valueMin>=T[0]+fuzz
valueMin = T[0]
if rrx:
if valueMax>T[-1]+fuzz:
valueMax = T[-1]+valueStep
go = 1
else:
go = valueMax<=T[-1]-fuzz
valueMax = T[-1]
if iter and not go:
self._computedValueStep = valueStep
else:
self._computedValueStep = None
self._valueMin = valueMin
self._valueMax = valueMax
origShiftIPC = self.origShiftIPC
origShiftMin = self.origShiftMin
if origShiftMin is not None or origShiftIPC is not None:
origShiftSpecialValue = self.origShiftSpecialValue
self._calcValueStep()
valueMax, valueMin = self._valueMax, self._valueMin
if origShiftSpecialValue is None or abs(origShiftSpecialValue-valueMin)<1e-6:
if origShiftIPC:
m = origShiftIPC*self._valueStep
else:
m = 0
if origShiftMin:
m = max(m,(valueMax-valueMin)*origShiftMin/self._length)
self._valueMin -= m
self._rangeAdjust()
def _pseudo_configure(self):
self._valueMin = self.valueMin
self._valueMax = self.valueMax
self._configure_end()
def _rangeAdjust(self):
"""Override this if you want to alter the calculated range.
E.g. if want a minumamum range of 30% or don't want 100%
as the first point.
"""
pass
def _adjustAxisTicks(self):
'''Override if you want to put slack at the ends of the axis
eg if you don't want the last tick to be at the bottom etc
'''
pass
def _calcScaleFactor(self):
"""Calculate the axis' scale factor.
This should be called only *after* the axis' range is set.
Returns a number.
"""
self._scaleFactor = self._length / float(self._valueMax - self._valueMin)
return self._scaleFactor
def _calcStepAndTickPositions(self):
valueStep = getattr(self,'_computedValueStep',None)
if valueStep:
del self._computedValueStep
self._valueStep = valueStep
else:
self._calcValueStep()
valueStep = self._valueStep
valueMin = self._valueMin
valueMax = self._valueMax
fuzz = 1e-8*valueStep
rangeRound = self.rangeRound
i0 = int(float(valueMin)/valueStep)
v = i0*valueStep
if rangeRound in ('both','floor'):
if v>valueMin+fuzz: i0 -= 1
elif v<valueMin-fuzz: i0 += 1
i1 = int(float(valueMax)/valueStep)
v = i1*valueStep
if rangeRound in ('both','ceiling'):
if v<valueMax-fuzz: i1 += 1
elif v>valueMax+fuzz: i1 -= 1
return valueStep,[i*valueStep for i in xrange(i0,i1+1)]
def _calcTickPositions(self):
return self._calcStepAndTickPositions()[1]
def _calcTickmarkPositions(self):
"""Calculate a list of tick positions on the axis. Returns a list of numbers."""
self._tickValues = getattr(self,'valueSteps',None)
if self._tickValues: return self._tickValues
self._tickValues = self._calcTickPositions()
self._adjustAxisTicks()
return self._tickValues
def _calcValueStep(self):
'''Calculate _valueStep for the axis or get from valueStep.'''
if self.valueStep is None:
rawRange = self._valueMax - self._valueMin
rawInterval = rawRange / min(float(self.maximumTicks-1),(float(self._length)/self.minimumTickSpacing))
self._valueStep = nextRoundNumber(rawInterval)
else:
self._valueStep = self.valueStep
def _allIntTicks(self):
return _allInt(self._tickValues)
def makeTickLabels(self):
g = Group()
if not self.visibleLabels: return g
f = self._labelTextFormat # perhaps someone already set it
if f is None:
f = self.labelTextFormat or (self._allIntTicks() and '%.0f' or str)
elif f is str and self._allIntTicks(): f = '%.0f'
elif hasattr(f,'calcPlaces'):
f.calcPlaces(self._tickValues)
post = self.labelTextPostFormat
scl = self.labelTextScale
pos = [self._x, self._y]
d = self._dataIndex
pos[1-d] = self._labelAxisPos()
labels = self.labels
if self.skipEndL!='none':
if self.isXAxis:
sk = self._x
else:
sk = self._y
if self.skipEndL=='start':
sk = [sk]
else:
sk = [sk,sk+self._length]
if self.skipEndL=='end':
del sk[0]
else:
sk = []
nticks = len(self._tickValues)
nticks1 = nticks - 1
for i,tick in enumerate(self._tickValues):
label = i-nticks
if label in labels:
label = labels[label]
else:
label = labels[i]
if f and label.visible:
v = self.scale(tick)
if sk:
for skv in sk:
if abs(skv-v)<1e-6:
v = None
break
if v is not None:
if scl is not None:
t = tick*scl
else:
t = tick
if type(f) is str: txt = f % t
elif isinstance(f,_SequenceTypes):
#it's a list, use as many items as we get
if i < len(f):
txt = f[i]
else:
txt = ''
elif hasattr(f,'__call__'):
if isinstance(f,TickLabeller):
txt = f(self,t)
else:
txt = f(t)
else:
raise ValueError, 'Invalid labelTextFormat %s' % f
if post: txt = post % txt
pos[d] = v
label.setOrigin(*pos)
label.setText(txt)
#special property to ensure a label doesn't project beyond the bounds of an x-axis
if self.keepTickLabelsInside:
if isinstance(self, XValueAxis): #not done yet for y axes
a_x = self._x
if not i: #first one
x0, y0, x1, y1 = label.getBounds()
if x0 < a_x:
label = label.clone(dx=label.dx + a_x - x0)
if i==nticks1: #final one
a_x1 = a_x +self._length
x0, y0, x1, y1 = label.getBounds()
if x1 > a_x1:
label=label.clone(dx=label.dx-x1+a_x1)
g.add(label)
return g
def scale(self, value):
"""Converts a numeric value to a plotarea position.
The chart first configures the axis, then asks it to
"""
assert self._configured, "Axis cannot scale numbers before it is configured"
if value is None: value = 0
#this could be made more efficient by moving the definition of org and sf into the configuration
org = (self._x, self._y)[self._dataIndex]
sf = self._scaleFactor
if self.reverseDirection:
sf = -sf
org += self._length
return org + sf*(value - self._valueMin)
class XValueAxis(_XTicks,ValueAxis):
"X/value axis"
_attrMap = AttrMap(BASE=ValueAxis,
tickUp = AttrMapValue(isNumber,
desc='Tick length up the axis.'),
tickDown = AttrMapValue(isNumber,
desc='Tick length down the axis.'),
joinAxis = AttrMapValue(None,
desc='Join both axes if true.'),
joinAxisMode = AttrMapValue(OneOf('bottom', 'top', 'value', 'points', None),
desc="Mode used for connecting axis ('bottom', 'top', 'value', 'points', None)."),
joinAxisPos = AttrMapValue(isNumberOrNone,
desc='Position at which to join with other axis.'),
)
# Indicate the dimension of the data we're interested in.
_dataIndex = 0
def __init__(self,**kw):
ValueAxis.__init__(self,**kw)
self.labels.boxAnchor = 'n'
self.labels.dx = 0
self.labels.dy = -5
self.tickUp = 0
self.tickDown = 5
self.joinAxis = None
self.joinAxisMode = None
self.joinAxisPos = None
def demo(self):
self.setPosition(20, 50, 150)
self.configure([(10,20,30,40,50)])
d = Drawing(200, 100)
d.add(self)
return d
def joinToAxis(self, yAxis, mode='bottom', pos=None):
"Join with y-axis using some mode."
_assertYAxis(yAxis)
if mode == 'bottom':
self._x = yAxis._x * 1.0
self._y = yAxis._y * 1.0
elif mode == 'top':
self._x = yAxis._x * 1.0
self._y = (yAxis._y + yAxis._length) * 1.0
elif mode == 'value':
self._x = yAxis._x * 1.0
self._y = yAxis.scale(pos) * 1.0
elif mode == 'points':
self._x = yAxis._x * 1.0
self._y = pos * 1.0
def _joinToAxis(self):
ja = self.joinAxis
if ja:
jam = self.joinAxisMode
jap = self.joinAxisPos
jta = self.joinToAxis
if jam in ('bottom', 'top'):
jta(ja, mode=jam)
elif jam in ('value', 'points'):
jta(ja, mode=jam, pos=jap)
def makeAxis(self):
g = Group()
self._joinToAxis()
if not self.visibleAxis: return g
axis = Line(self._x-self.loLLen, self._y, self._x + self._length+self.hiLLen, self._y)
axis.strokeColor = self.strokeColor
axis.strokeWidth = self.strokeWidth
axis.strokeDashArray = self.strokeDashArray
g.add(axis)
return g
#additional utilities to help specify calendar dates on which tick marks
#are to be plotted. After some thought, when the magic algorithm fails,
#we can let them specify a number of days-of-the-year to tick in any given
#year.
#################################################################################
#
# Preliminary support objects/functions for the axis used in time series charts
#
#################################################################################
_months = ['jan','feb','mar','apr','may','jun','jul','aug','sep','oct','nov','dec']
_maxDays = [31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
def parseDayAndMonth(dmstr):
"""This accepts and validates strings like "31-Dec" i.e. dates
of no particular year. 29 Feb is allowed. These can be used
for recurring dates. It returns a (dd, mm) pair where mm is the
month integer. If the text is not valid it raises an error.
"""
dstr, mstr = dmstr.split('-')
dd = int(dstr)
mstr = mstr.lower()
mm = _months.index(mstr) + 1
assert dd <= _maxDays[mm-1]
return (dd, mm)
class _isListOfDaysAndMonths(Validator):
"""This accepts and validates lists of strings like "31-Dec" i.e. dates
of no particular year. 29 Feb is allowed. These can be used
for recurring dates.
"""
def test(self,x):
if isinstance(x,_SequenceTypes):
answer = True
for element in x:
try:
dd, mm = parseDayAndMonth(element)
except:
answer = False
return answer
else:
return False
def normalize(self,x):
#we store them as presented, it's the most presentable way
return x
isListOfDaysAndMonths = _isListOfDaysAndMonths()
class NormalDateXValueAxis(XValueAxis):
"""An X axis applying additional rules.
Depending on the data and some built-in rules, the axis
displays normalDate values as nicely formatted dates.
The client chart should have NormalDate X values.
"""
_attrMap = AttrMap(BASE = XValueAxis,
bottomAxisLabelSlack = AttrMapValue(isNumber, desc="Fractional amount used to adjust label spacing"),
niceMonth = AttrMapValue(isBoolean, desc="Flag for displaying months 'nicely'."),
forceEndDate = AttrMapValue(isBoolean, desc='Flag for enforced displaying of last date value.'),
forceFirstDate = AttrMapValue(isBoolean, desc='Flag for enforced displaying of first date value.'),
forceDatesEachYear = AttrMapValue(isListOfDaysAndMonths, desc='List of dates in format "31-Dec",' +
'"1-Jan". If present they will always be used for tick marks in the current year, rather ' +
'than the dates chosen by the automatic algorithm. Hyphen compulsory, case of month optional.'),
xLabelFormat = AttrMapValue(None, desc="Label format string (e.g. '{mm}/{yy}') or function."),
dayOfWeekName = AttrMapValue(SequenceOf(isString,emptyOK=0,lo=7,hi=7), desc='Weekday names.'),
monthName = AttrMapValue(SequenceOf(isString,emptyOK=0,lo=12,hi=12), desc='Month names.'),
dailyFreq = AttrMapValue(isBoolean, desc='True if we are to assume daily data to be ticked at end of month.'),
specifiedTickDates = AttrMapValue(NoneOr(SequenceOf(isNormalDate)), desc='Actual tick values to use; no calculations done'),
specialTickClear = AttrMapValue(isBoolean, desc='clear rather than delete close ticks when forced first/end dates'),
)
_valueClass = normalDate.ND
def __init__(self,**kw):
XValueAxis.__init__(self,**kw)
# some global variables still used...
self.bottomAxisLabelSlack = 0.1
self.niceMonth = 1
self.forceEndDate = 0
self.forceFirstDate = 0
self.forceDatesEachYear = []
self.dailyFreq = 0
self.xLabelFormat = "{mm}/{yy}"
self.dayOfWeekName = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
self.monthName = ['January', 'February', 'March', 'April', 'May', 'June', 'July',
'August', 'September', 'October', 'November', 'December']
self.specialTickClear = 0
self.valueSteps = self.specifiedTickDates = None
def _scalar2ND(self, x):
"Convert a scalar to a NormalDate value."
d = self._valueClass()
d.normalize(x)
return d
def _dateFormatter(self, v):
"Create a formatted label for some value."
if not isinstance(v,normalDate.NormalDate):
v = self._scalar2ND(v)
d, m = normalDate._dayOfWeekName, normalDate._monthName
try:
normalDate._dayOfWeekName, normalDate._monthName = self.dayOfWeekName, self.monthName
return v.formatMS(self.xLabelFormat)
finally:
normalDate._dayOfWeekName, normalDate._monthName = d, m
def _xAxisTicker(self, xVals):
"""Complex stuff...
Needs explanation...
Yes please says Andy :-(. Modified on 19 June 2006 to attempt to allow
a mode where one can specify recurring days and months.
"""
axisLength = self._length
formatter = self._dateFormatter
if isinstance(formatter,TickLabeller):
def formatter(tick):
return self._dateFormatter(self,tick)
firstDate = xVals[0]
endDate = xVals[-1]
labels = self.labels
fontName, fontSize, leading = labels.fontName, labels.fontSize, labels.leading
textAnchor, boxAnchor, angle = labels.textAnchor, labels.boxAnchor, labels.angle
RBL = _textBoxLimits(formatter(firstDate).split('\n'),fontName,
fontSize,leading or 1.2*fontSize,textAnchor,boxAnchor)
RBL = _rotatedBoxLimits(RBL[0],RBL[1],RBL[2],RBL[3], angle)
xLabelW = RBL[1]-RBL[0]
xLabelH = RBL[3]-RBL[2]
w = max(xLabelW,labels.width,self.minimumTickSpacing)
W = w+w*self.bottomAxisLabelSlack
n = len(xVals)
ticks = []
labels = []
maximumTicks = self.maximumTicks
if self.specifiedTickDates:
VC = self._valueClass
ticks = [VC(x) for x in self.specifiedTickDates]
labels = [formatter(d) for d in ticks]
if self.forceFirstDate and firstDate==ticks[0] and (axisLength/float(ticks[-1]-ticks[0]))*(ticks[1]-ticks[0])<=W:
if self.specialTickClear:
labels[1] = ''
else:
del ticks[1], labels[1]
if self.forceEndDate and endDate==ticks[-1] and (axisLength/float(ticks[-1]-ticks[0]))*(ticks[-1]-ticks[-2])<=W:
if self.specialTickClear:
labels[-2] = ''
else:
del ticks[-2], labels[-2]
return ticks, labels
def addTick(i, xVals=xVals, formatter=formatter, ticks=ticks, labels=labels):
ticks.insert(0,xVals[i])
labels.insert(0,formatter(xVals[i]))
#AR 20060619 - first we try the approach where the user has explicitly
#specified the days of year to be ticked. Other explicit routes may
#be added.
if self.forceDatesEachYear:
forcedPartialDates = map(parseDayAndMonth, self.forceDatesEachYear)
#generate the list of dates in the range.
#print 'dates range from %s to %s' % (firstDate, endDate)
firstYear = firstDate.year()
lastYear = endDate.year()
ticks = []
labels = []
yyyy = firstYear
#generate all forced dates between the year it starts and the year it
#ends, adding them if within range.
while yyyy <= lastYear:
for (dd, mm) in forcedPartialDates:
theDate = normalDate.ND((yyyy, mm, dd))
if theDate >= firstDate and theDate <= endDate:
ticks.append(theDate)
labels.append(formatter(theDate))
yyyy += 1
#first and last may still be forced in.
if self.forceFirstDate and firstDate!=ticks[0]:
ticks.insert(0, firstDate)
labels.insert(0,formatter(firstDate))
if (axisLength/float(ticks[-1]-ticks[0]))*(ticks[1]-ticks[0])<=W:
if self.specialTickClear:
labels[1] = ''
else:
del ticks[1], labels[1]
if self.forceEndDate and endDate!=ticks[-1]:
ticks.append(endDate)
labels.append(formatter(endDate))
if (axisLength/float(ticks[-1]-ticks[0]))*(ticks[-1]-ticks[-2])<=W:
if self.specialTickClear:
labels[-2] = ''
else:
del ticks[-2], labels[-2]
#print 'xVals found on forced dates =', ticks
return ticks, labels
#otherwise, we apply the 'magic algorithm...' which looks for nice spacing
#based on the size and separation of the labels.
for d in (1,2,3,6,12,24,60,120):
k = n/d
if k<=maximumTicks and k*W <= axisLength:
i = n-1
if self.niceMonth:
j = endDate.month() % (d<=12 and d or 12)
if j:
if self.forceEndDate:
addTick(i)
ticks[0]._doSubTicks=0
i -= j
#weird first date ie not at end of month
try:
wfd = firstDate.month() == xVals[1].month()
except:
wfd = 0
while i>=wfd:
addTick(i)
i -= d
if self.forceFirstDate and ticks[0]!=firstDate:
addTick(0)
ticks[0]._doSubTicks=0
if (axisLength/float(ticks[-1]-ticks[0]))*(ticks[1]-ticks[0])<=W:
if self.specialTickClear:
labels[1] = ''
else:
del ticks[1], labels[1]
if self.forceEndDate and self.niceMonth and j:
if (axisLength/float(ticks[-1]-ticks[0]))*(ticks[-1]-ticks[-2])<=W:
if self.specialTickClear:
labels[-2] = ''
else:
del ticks[-2], labels[-2]
try:
if labels[0] and labels[0]==labels[1]:
del ticks[1], labels[1]
except IndexError:
pass
return ticks, labels
def _convertXV(self,data):
'''Convert all XValues to a standard normalDate type'''
VC = self._valueClass
for D in data:
for i in xrange(len(D)):
x, y = D[i]
if not isinstance(x,VC):
D[i] = (VC(x),y)
def _getStepsAndLabels(self,xVals):
if self.dailyFreq:
xEOM = []
pm = 0
px = xVals[0]
for x in xVals:
m = x.month()
if pm!=m:
if pm: xEOM.append(px)
pm = m
px = x
px = xVals[-1]
if xEOM[-1]!=x: xEOM.append(px)
steps, labels = self._xAxisTicker(xEOM)
else:
steps, labels = self._xAxisTicker(xVals)
return steps, labels
def configure(self, data):
self._convertXV(data)
from reportlab.lib.set_ops import union
xVals = reduce(union,map(lambda x: map(lambda dv: dv[0],x),data),[])
xVals.sort()
steps,labels = self._getStepsAndLabels(xVals)
valueMin, valueMax = self.valueMin, self.valueMax
if valueMin is None: valueMin = xVals[0]
if valueMax is None: valueMax = xVals[-1]
self._valueMin, self._valueMax = valueMin, valueMax
self._tickValues = steps
self._labelTextFormat = labels
self._scaleFactor = self._length / float(valueMax - valueMin)
self._tickValues = steps
self._configured = 1
class YValueAxis(_YTicks,ValueAxis):
"Y/value axis"
_attrMap = AttrMap(BASE=ValueAxis,
tickLeft = AttrMapValue(isNumber,
desc='Tick length left of the axis.'),
tickRight = AttrMapValue(isNumber,
desc='Tick length right of the axis.'),
joinAxis = AttrMapValue(None,
desc='Join both axes if true.'),
joinAxisMode = AttrMapValue(OneOf(('left', 'right', 'value', 'points', None)),
desc="Mode used for connecting axis ('left', 'right', 'value', 'points', None)."),
joinAxisPos = AttrMapValue(isNumberOrNone,
desc='Position at which to join with other axis.'),
)
# Indicate the dimension of the data we're interested in.
_dataIndex = 1
def __init__(self):
ValueAxis.__init__(self)
self.labels.boxAnchor = 'e'
self.labels.dx = -5
self.labels.dy = 0
self.tickRight = 0
self.tickLeft = 5
self.joinAxis = None
self.joinAxisMode = None
self.joinAxisPos = None
def demo(self):
data = [(10, 20, 30, 42)]
self.setPosition(100, 10, 80)
self.configure(data)
drawing = Drawing(200, 100)
drawing.add(self)
return drawing
def joinToAxis(self, xAxis, mode='left', pos=None):
"Join with x-axis using some mode."
_assertXAxis(xAxis)
if mode == 'left':
self._x = xAxis._x * 1.0
self._y = xAxis._y * 1.0
elif mode == 'right':
self._x = (xAxis._x + xAxis._length) * 1.0
self._y = xAxis._y * 1.0
elif mode == 'value':
self._x = xAxis.scale(pos) * 1.0
self._y = xAxis._y * 1.0
elif mode == 'points':
self._x = pos * 1.0
self._y = xAxis._y * 1.0
def _joinToAxis(self):
ja = self.joinAxis
if ja:
jam = self.joinAxisMode
jap = self.joinAxisPos
jta = self.joinToAxis
if jam in ('left', 'right'):
jta(ja, mode=jam)
elif jam in ('value', 'points'):
jta(ja, mode=jam, pos=jap)
def makeAxis(self):
g = Group()
self._joinToAxis()
if not self.visibleAxis: return g
axis = Line(self._x, self._y-self.loLLen, self._x, self._y + self._length+self.hiLLen)
axis.strokeColor = self.strokeColor
axis.strokeWidth = self.strokeWidth
axis.strokeDashArray = self.strokeDashArray
g.add(axis)
return g
class AdjYValueAxis(YValueAxis):
"""A Y-axis applying additional rules.
Depending on the data and some built-in rules, the axis
may choose to adjust its range and origin.
"""
_attrMap = AttrMap(BASE = YValueAxis,
requiredRange = AttrMapValue(isNumberOrNone, desc='Minimum required value range.'),
leftAxisPercent = AttrMapValue(isBoolean, desc='When true add percent sign to label values.'),
leftAxisOrigShiftIPC = AttrMapValue(isNumber, desc='Lowest label shift interval ratio.'),
leftAxisOrigShiftMin = AttrMapValue(isNumber, desc='Minimum amount to shift.'),
leftAxisSkipLL0 = AttrMapValue(EitherOr((isBoolean,isListOfNumbers)), desc='Skip/Keep lowest tick label when true/false.\nOr skiplist'),
labelVOffset = AttrMapValue(isNumber, desc='add this to the labels'),
)
def __init__(self,**kw):
YValueAxis.__init__(self,**kw)
self.requiredRange = 30
self.leftAxisPercent = 1
self.leftAxisOrigShiftIPC = 0.15
self.leftAxisOrigShiftMin = 12
self.leftAxisSkipLL0 = self.labelVOffset = 0
self.valueSteps = None
def _rangeAdjust(self):
"Adjusts the value range of the axis."
from reportlab.graphics.charts.utils import find_good_grid, ticks
y_min, y_max = self._valueMin, self._valueMax
m = self.maximumTicks
n = filter(lambda x,m=m: x<=m,[4,5,6,7,8,9])
if not n: n = [m]
valueStep, requiredRange = self.valueStep, self.requiredRange
if requiredRange and y_max - y_min < requiredRange:
y1, y2 = find_good_grid(y_min, y_max,n=n,grid=valueStep)[:2]
if y2 - y1 < requiredRange:
ym = (y1+y2)*0.5
y1 = min(ym-requiredRange*0.5,y_min)
y2 = max(ym+requiredRange*0.5,y_max)
if y_min>=100 and y1<100:
y2 = y2 + 100 - y1
y1 = 100
elif y_min>=0 and y1<0:
y2 = y2 - y1
y1 = 0
self._valueMin, self._valueMax = y1, y2
T, L = ticks(self._valueMin, self._valueMax, split=1, n=n, percent=self.leftAxisPercent,grid=valueStep, labelVOffset=self.labelVOffset)
abf = self.avoidBoundFrac
if abf:
i1 = (T[1]-T[0])
if not isinstance(abf,_SequenceTypes):
i0 = i1 = i1*abf
else:
i0 = i1*abf[0]
i1 = i1*abf[1]
_n = getattr(self,'_cValueMin',T[0])
_x = getattr(self,'_cValueMax',T[-1])
if _n - T[0] < i0: self._valueMin = self._valueMin - i0
if T[-1]-_x < i1: self._valueMax = self._valueMax + i1
T, L = ticks(self._valueMin, self._valueMax, split=1, n=n, percent=self.leftAxisPercent,grid=valueStep, labelVOffset=self.labelVOffset)
self._valueMin = T[0]
self._valueMax = T[-1]
self._tickValues = T
if self.labelTextFormat is None:
self._labelTextFormat = L
else:
self._labelTextFormat = self.labelTextFormat
if abs(self._valueMin-100)<1e-6:
self._calcValueStep()
vMax, vMin = self._valueMax, self._valueMin
m = max(self.leftAxisOrigShiftIPC*self._valueStep,
(vMax-vMin)*self.leftAxisOrigShiftMin/self._length)
self._valueMin = self._valueMin - m
if self.leftAxisSkipLL0:
if isinstance(self.leftAxisSkipLL0,_SequenceTypes):
for x in self.leftAxisSkipLL0:
try:
L[x] = ''
except IndexError:
pass
L[0] = ''
# Sample functions.
def sample0a():
"Sample drawing with one xcat axis and two buckets."
drawing = Drawing(400, 200)
data = [(10, 20)]
xAxis = XCategoryAxis()
xAxis.setPosition(75, 75, 300)
xAxis.configure(data)
xAxis.categoryNames = ['Ying', 'Yang']
xAxis.labels.boxAnchor = 'n'
drawing.add(xAxis)
return drawing
def sample0b():
"Sample drawing with one xcat axis and one bucket only."
drawing = Drawing(400, 200)
data = [(10,)]
xAxis = XCategoryAxis()
xAxis.setPosition(75, 75, 300)
xAxis.configure(data)
xAxis.categoryNames = ['Ying']
xAxis.labels.boxAnchor = 'n'
drawing.add(xAxis)
return drawing
def sample1():
"Sample drawing containing two unconnected axes."
from reportlab.graphics.shapes import _baseGFontNameB
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
xAxis = XCategoryAxis()
xAxis.setPosition(75, 75, 300)
xAxis.configure(data)
xAxis.categoryNames = ['Beer','Wine','Meat','Cannelloni']
xAxis.labels.boxAnchor = 'n'
xAxis.labels[3].dy = -15
xAxis.labels[3].angle = 30
xAxis.labels[3].fontName = _baseGFontNameB
yAxis = YValueAxis()
yAxis.setPosition(50, 50, 125)
yAxis.configure(data)
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample4a():
"Sample drawing, xvalue/yvalue axes, y connected at 100 pts to x."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
yAxis = YValueAxis()
yAxis.setPosition(50, 50, 125)
yAxis.configure(data)
xAxis = XValueAxis()
xAxis._length = 300
xAxis.joinAxis = yAxis
xAxis.joinAxisMode = 'points'
xAxis.joinAxisPos = 100
xAxis.configure(data)
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample4b():
"Sample drawing, xvalue/yvalue axes, y connected at value 35 of x."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
yAxis = YValueAxis()
yAxis.setPosition(50, 50, 125)
yAxis.configure(data)
xAxis = XValueAxis()
xAxis._length = 300
xAxis.joinAxis = yAxis
xAxis.joinAxisMode = 'value'
xAxis.joinAxisPos = 35
xAxis.configure(data)
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample4c():
"Sample drawing, xvalue/yvalue axes, y connected to bottom of x."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
yAxis = YValueAxis()
yAxis.setPosition(50, 50, 125)
yAxis.configure(data)
xAxis = XValueAxis()
xAxis._length = 300
xAxis.joinAxis = yAxis
xAxis.joinAxisMode = 'bottom'
xAxis.configure(data)
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample4c1():
"xvalue/yvalue axes, without drawing axis lines/ticks."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
yAxis = YValueAxis()
yAxis.setPosition(50, 50, 125)
yAxis.configure(data)
yAxis.visibleAxis = 0
yAxis.visibleTicks = 0
xAxis = XValueAxis()
xAxis._length = 300
xAxis.joinAxis = yAxis
xAxis.joinAxisMode = 'bottom'
xAxis.configure(data)
xAxis.visibleAxis = 0
xAxis.visibleTicks = 0
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample4d():
"Sample drawing, xvalue/yvalue axes, y connected to top of x."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
yAxis = YValueAxis()
yAxis.setPosition(50, 50, 125)
yAxis.configure(data)
xAxis = XValueAxis()
xAxis._length = 300
xAxis.joinAxis = yAxis
xAxis.joinAxisMode = 'top'
xAxis.configure(data)
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample5a():
"Sample drawing, xvalue/yvalue axes, y connected at 100 pts to x."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
xAxis = XValueAxis()
xAxis.setPosition(50, 50, 300)
xAxis.configure(data)
yAxis = YValueAxis()
yAxis.setPosition(50, 50, 125)
yAxis.joinAxis = xAxis
yAxis.joinAxisMode = 'points'
yAxis.joinAxisPos = 100
yAxis.configure(data)
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample5b():
"Sample drawing, xvalue/yvalue axes, y connected at value 35 of x."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
xAxis = XValueAxis()
xAxis.setPosition(50, 50, 300)
xAxis.configure(data)
yAxis = YValueAxis()
yAxis.setPosition(50, 50, 125)
yAxis.joinAxis = xAxis
yAxis.joinAxisMode = 'value'
yAxis.joinAxisPos = 35
yAxis.configure(data)
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample5c():
"Sample drawing, xvalue/yvalue axes, y connected at right of x."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
xAxis = XValueAxis()
xAxis.setPosition(50, 50, 300)
xAxis.configure(data)
yAxis = YValueAxis()
yAxis.setPosition(50, 50, 125)
yAxis.joinAxis = xAxis
yAxis.joinAxisMode = 'right'
yAxis.configure(data)
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample5d():
"Sample drawing, xvalue/yvalue axes, y connected at left of x."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
xAxis = XValueAxis()
xAxis.setPosition(50, 50, 300)
xAxis.configure(data)
yAxis = YValueAxis()
yAxis.setPosition(50, 50, 125)
yAxis.joinAxis = xAxis
yAxis.joinAxisMode = 'left'
yAxis.configure(data)
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample6a():
"Sample drawing, xcat/yvalue axes, x connected at top of y."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
yAxis = YValueAxis()
yAxis.setPosition(50, 50, 125)
yAxis.configure(data)
xAxis = XCategoryAxis()
xAxis._length = 300
xAxis.configure(data)
xAxis.joinAxis = yAxis
xAxis.joinAxisMode = 'top'
xAxis.categoryNames = ['Beer', 'Wine', 'Meat', 'Cannelloni']
xAxis.labels.boxAnchor = 'n'
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample6b():
"Sample drawing, xcat/yvalue axes, x connected at bottom of y."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
yAxis = YValueAxis()
yAxis.setPosition(50, 50, 125)
yAxis.configure(data)
xAxis = XCategoryAxis()
xAxis._length = 300
xAxis.configure(data)
xAxis.joinAxis = yAxis
xAxis.joinAxisMode = 'bottom'
xAxis.categoryNames = ['Beer', 'Wine', 'Meat', 'Cannelloni']
xAxis.labels.boxAnchor = 'n'
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample6c():
"Sample drawing, xcat/yvalue axes, x connected at 100 pts to y."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
yAxis = YValueAxis()
yAxis.setPosition(50, 50, 125)
yAxis.configure(data)
xAxis = XCategoryAxis()
xAxis._length = 300
xAxis.configure(data)
xAxis.joinAxis = yAxis
xAxis.joinAxisMode = 'points'
xAxis.joinAxisPos = 100
xAxis.categoryNames = ['Beer', 'Wine', 'Meat', 'Cannelloni']
xAxis.labels.boxAnchor = 'n'
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample6d():
"Sample drawing, xcat/yvalue axes, x connected at value 20 of y."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
yAxis = YValueAxis()
yAxis.setPosition(50, 50, 125)
yAxis.configure(data)
xAxis = XCategoryAxis()
xAxis._length = 300
xAxis.configure(data)
xAxis.joinAxis = yAxis
xAxis.joinAxisMode = 'value'
xAxis.joinAxisPos = 20
xAxis.categoryNames = ['Beer', 'Wine', 'Meat', 'Cannelloni']
xAxis.labels.boxAnchor = 'n'
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample7a():
"Sample drawing, xvalue/ycat axes, y connected at right of x."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
xAxis = XValueAxis()
xAxis._length = 300
xAxis.configure(data)
yAxis = YCategoryAxis()
yAxis.setPosition(50, 50, 125)
yAxis.joinAxis = xAxis
yAxis.joinAxisMode = 'right'
yAxis.categoryNames = ['Beer', 'Wine', 'Meat', 'Cannelloni']
yAxis.labels.boxAnchor = 'e'
yAxis.configure(data)
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample7b():
"Sample drawing, xvalue/ycat axes, y connected at left of x."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
xAxis = XValueAxis()
xAxis._length = 300
xAxis.configure(data)
yAxis = YCategoryAxis()
yAxis.setPosition(50, 50, 125)
yAxis.joinAxis = xAxis
yAxis.joinAxisMode = 'left'
yAxis.categoryNames = ['Beer', 'Wine', 'Meat', 'Cannelloni']
yAxis.labels.boxAnchor = 'e'
yAxis.configure(data)
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample7c():
"Sample drawing, xvalue/ycat axes, y connected at value 30 of x."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
xAxis = XValueAxis()
xAxis._length = 300
xAxis.configure(data)
yAxis = YCategoryAxis()
yAxis.setPosition(50, 50, 125)
yAxis.joinAxis = xAxis
yAxis.joinAxisMode = 'value'
yAxis.joinAxisPos = 30
yAxis.categoryNames = ['Beer', 'Wine', 'Meat', 'Cannelloni']
yAxis.labels.boxAnchor = 'e'
yAxis.configure(data)
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample7d():
"Sample drawing, xvalue/ycat axes, y connected at 200 pts to x."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
xAxis = XValueAxis()
xAxis._length = 300
xAxis.configure(data)
yAxis = YCategoryAxis()
yAxis.setPosition(50, 50, 125)
yAxis.joinAxis = xAxis
yAxis.joinAxisMode = 'points'
yAxis.joinAxisPos = 200
yAxis.categoryNames = ['Beer', 'Wine', 'Meat', 'Cannelloni']
yAxis.labels.boxAnchor = 'e'
yAxis.configure(data)
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
|
pataquets/namecoin-core
|
refs/heads/master
|
test/functional/test_framework/test_node.py
|
1
|
#!/usr/bin/env python3
# Copyright (c) 2017-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Class for namecoind node under test"""
import contextlib
import decimal
import errno
from enum import Enum
import http.client
import json
import logging
import os
import re
import subprocess
import tempfile
import time
import urllib.parse
import collections
import shlex
import sys
from .authproxy import JSONRPCException
from .descriptors import descsum_create
from .messages import MY_SUBVERSION
from .util import (
MAX_NODES,
append_config,
config_file,
delete_cookie_file,
get_auth_cookie,
get_rpc_proxy,
rpc_url,
wait_until_helper,
p2p_port,
EncodeDecimal,
)
BITCOIND_PROC_WAIT_TIMEOUT = 60
class FailedToStartError(Exception):
"""Raised when a node fails to start correctly."""
class ErrorMatch(Enum):
FULL_TEXT = 1
FULL_REGEX = 2
PARTIAL_REGEX = 3
class TestNode():
"""A class for representing a namecoind node under test.
This class contains:
- state about the node (whether it's running, etc)
- a Python subprocess.Popen object representing the running process
- an RPC connection to the node
- one or more P2P connections to the node
To make things easier for the test writer, any unrecognised messages will
be dispatched to the RPC connection."""
def __init__(self, i, datadir, *, chain, rpchost, timewait, timeout_factor, bitcoind, bitcoin_cli, coverage_dir, cwd, extra_conf=None, extra_args=None, use_cli=False, start_perf=False, use_valgrind=False, version=None, descriptors=False):
"""
Kwargs:
start_perf (bool): If True, begin profiling the node with `perf` as soon as
the node starts.
"""
self.index = i
self.datadir = datadir
self.bitcoinconf = os.path.join(self.datadir, config_file)
self.stdout_dir = os.path.join(self.datadir, "stdout")
self.stderr_dir = os.path.join(self.datadir, "stderr")
self.chain = chain
self.rpchost = rpchost
self.rpc_timeout = timewait
self.binary = bitcoind
self.coverage_dir = coverage_dir
self.cwd = cwd
self.descriptors = descriptors
if extra_conf is not None:
append_config(datadir, extra_conf)
# Most callers will just need to add extra args to the standard list below.
# For those callers that need more flexibility, they can just set the args property directly.
# Note that common args are set in the config file (see initialize_datadir)
self.extra_args = extra_args
self.version = version
# Configuration for logging is set as command-line args rather than in the bitcoin.conf file.
# This means that starting a bitcoind using the temp dir to debug a failed test won't
# spam debug.log.
self.args = [
self.binary,
"-datadir=" + self.datadir,
"-logtimemicros",
"-debug",
"-debugexclude=libevent",
"-debugexclude=leveldb",
"-uacomment=testnode%d" % i,
]
if use_valgrind:
default_suppressions_file = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"..", "..", "..", "contrib", "valgrind.supp")
suppressions_file = os.getenv("VALGRIND_SUPPRESSIONS_FILE",
default_suppressions_file)
self.args = ["valgrind", "--suppressions={}".format(suppressions_file),
"--gen-suppressions=all", "--exit-on-first-error=yes",
"--error-exitcode=1", "--quiet"] + self.args
if self.version_is_at_least(190000):
self.args.append("-logthreadnames")
self.cli = TestNodeCLI(bitcoin_cli, self.datadir)
self.use_cli = use_cli
self.start_perf = start_perf
self.running = False
self.process = None
self.rpc_connected = False
self.rpc = None
self.url = None
self.log = logging.getLogger('TestFramework.node%d' % i)
self.cleanup_on_exit = True # Whether to kill the node when this object goes away
# Cache perf subprocesses here by their data output filename.
self.perf_subprocesses = {}
self.p2ps = []
self.timeout_factor = timeout_factor
AddressKeyPair = collections.namedtuple('AddressKeyPair', ['address', 'key'])
PRIV_KEYS = [
# address , privkey
AddressKeyPair('mjTkW3DjgyZck4KbiRusZsqTgaYTxdSz6z', 'cVpF924EspNh8KjYsfhgY96mmxvT6DgdWiTYMtMjuM74hJaU5psW'),
AddressKeyPair('msX6jQXvxiNhx3Q62PKeLPrhrqZQdSimTg', 'cUxsWyKyZ9MAQTaAhUQWJmBbSvHMwSmuv59KgxQV7oZQU3PXN3KE'),
AddressKeyPair('mnonCMyH9TmAsSj3M59DsbH8H63U3RKoFP', 'cTrh7dkEAeJd6b3MRX9bZK8eRmNqVCMH3LSUkE3dSFDyzjU38QxK'),
AddressKeyPair('mqJupas8Dt2uestQDvV2NH3RU8uZh2dqQR', 'cVuKKa7gbehEQvVq717hYcbE9Dqmq7KEBKqWgWrYBa2CKKrhtRim'),
AddressKeyPair('msYac7Rvd5ywm6pEmkjyxhbCDKqWsVeYws', 'cQDCBuKcjanpXDpCqacNSjYfxeQj8G6CAtH1Dsk3cXyqLNC4RPuh'),
AddressKeyPair('n2rnuUnwLgXqf9kk2kjvVm8R5BZK1yxQBi', 'cQakmfPSLSqKHyMFGwAqKHgWUiofJCagVGhiB4KCainaeCSxeyYq'),
AddressKeyPair('myzuPxRwsf3vvGzEuzPfK9Nf2RfwauwYe6', 'cQMpDLJwA8DBe9NcQbdoSb1BhmFxVjWD5gRyrLZCtpuF9Zi3a9RK'),
AddressKeyPair('mumwTaMtbxEPUswmLBBN3vM9oGRtGBrys8', 'cSXmRKXVcoouhNNVpcNKFfxsTsToY5pvB9DVsFksF1ENunTzRKsy'),
AddressKeyPair('mpV7aGShMkJCZgbW7F6iZgrvuPHjZjH9qg', 'cSoXt6tm3pqy43UMabY6eUTmR3eSUYFtB2iNQDGgb3VUnRsQys2k'),
AddressKeyPair('mq4fBNdckGtvY2mijd9am7DRsbRB4KjUkf', 'cN55daf1HotwBAgAKWVgDcoppmUNDtQSfb7XLutTLeAgVc3u8hik'),
AddressKeyPair('mpFAHDjX7KregM3rVotdXzQmkbwtbQEnZ6', 'cT7qK7g1wkYEMvKowd2ZrX1E5f6JQ7TM246UfqbCiyF7kZhorpX3'),
AddressKeyPair('mzRe8QZMfGi58KyWCse2exxEFry2sfF2Y7', 'cPiRWE8KMjTRxH1MWkPerhfoHFn5iHPWVK5aPqjW8NxmdwenFinJ'),
]
def get_deterministic_priv_key(self):
"""Return a deterministic priv key in base58, that only depends on the node's index"""
assert len(self.PRIV_KEYS) == MAX_NODES
return self.PRIV_KEYS[self.index]
def _node_msg(self, msg: str) -> str:
"""Return a modified msg that identifies this node by its index as a debugging aid."""
return "[node %d] %s" % (self.index, msg)
def _raise_assertion_error(self, msg: str):
"""Raise an AssertionError with msg modified to identify this node."""
raise AssertionError(self._node_msg(msg))
def __del__(self):
# Ensure that we don't leave any bitcoind processes lying around after
# the test ends
if self.process and self.cleanup_on_exit:
# Should only happen on test failure
# Avoid using logger, as that may have already been shutdown when
# this destructor is called.
print(self._node_msg("Cleaning up leftover process"))
self.process.kill()
def __getattr__(self, name):
"""Dispatches any unrecognised messages to the RPC connection or a CLI instance."""
if self.use_cli:
return getattr(RPCOverloadWrapper(self.cli, True, self.descriptors), name)
else:
assert self.rpc_connected and self.rpc is not None, self._node_msg("Error: no RPC connection")
return getattr(RPCOverloadWrapper(self.rpc, descriptors=self.descriptors), name)
def start(self, extra_args=None, *, cwd=None, stdout=None, stderr=None, **kwargs):
"""Start the node."""
if extra_args is None:
extra_args = self.extra_args
# Set the value of -minrelaytxfee and -mintxfee to the defaults used
# in upstream Bitcoin (rather than the one from Namecoin) unless an
# explicit value is given. This makes sure that tx fees hardcoded in
# some tests are adequate and do not need changes for Namecoin.
explicit_fees = set ()
fee_args = ["-minrelaytxfee", "-mintxfee"]
for arg in extra_args:
for fee_arg in fee_args:
if arg.startswith (fee_arg):
explicit_fees.add (fee_arg)
for fee_arg in fee_args:
if fee_arg in explicit_fees:
continue
# There is some extra handling of -wallet arguments at the end
# of extra_args, so we add ours at the beginning to not mess with
# the way upstream works.
extra_args = ["%s=0.00001" % fee_arg] + extra_args
# Add a new stdout and stderr file each time bitcoind is started
if stderr is None:
stderr = tempfile.NamedTemporaryFile(dir=self.stderr_dir, delete=False)
if stdout is None:
stdout = tempfile.NamedTemporaryFile(dir=self.stdout_dir, delete=False)
self.stderr = stderr
self.stdout = stdout
if cwd is None:
cwd = self.cwd
# Delete any existing cookie file -- if such a file exists (eg due to
# unclean shutdown), it will get overwritten anyway by bitcoind, and
# potentially interfere with our attempt to authenticate
delete_cookie_file(self.datadir, self.chain)
# add environment variable LIBC_FATAL_STDERR_=1 so that libc errors are written to stderr and not the terminal
subp_env = dict(os.environ, LIBC_FATAL_STDERR_="1")
self.process = subprocess.Popen(self.args + extra_args, env=subp_env, stdout=stdout, stderr=stderr, cwd=cwd, **kwargs)
self.running = True
self.log.debug("namecoind started, waiting for RPC to come up")
if self.start_perf:
self._start_perf()
def wait_for_rpc_connection(self):
"""Sets up an RPC connection to the bitcoind process. Returns False if unable to connect."""
# Poll at a rate of four times per second
poll_per_s = 4
for _ in range(poll_per_s * self.rpc_timeout):
if self.process.poll() is not None:
raise FailedToStartError(self._node_msg(
'namecoind exited with status {} during initialization'.format(self.process.returncode)))
try:
rpc = get_rpc_proxy(
rpc_url(self.datadir, self.index, self.chain, self.rpchost),
self.index,
timeout=self.rpc_timeout // 2, # Shorter timeout to allow for one retry in case of ETIMEDOUT
coveragedir=self.coverage_dir,
)
rpc.getblockcount()
# If the call to getblockcount() succeeds then the RPC connection is up
if self.version_is_at_least(190000):
# getmempoolinfo.loaded is available since commit
# bb8ae2c (version 0.19.0)
wait_until_helper(lambda: rpc.getmempoolinfo()['loaded'], timeout_factor=self.timeout_factor)
# Wait for the node to finish reindex, block import, and
# loading the mempool. Usually importing happens fast or
# even "immediate" when the node is started. However, there
# is no guarantee and sometimes ThreadImport might finish
# later. This is going to cause intermittent test failures,
# because generally the tests assume the node is fully
# ready after being started.
#
# For example, the node will reject block messages from p2p
# when it is still importing with the error "Unexpected
# block message received"
#
# The wait is done here to make tests as robust as possible
# and prevent racy tests and intermittent failures as much
# as possible. Some tests might not need this, but the
# overhead is trivial, and the added guarantees are worth
# the minimal performance cost.
self.log.debug("RPC successfully started")
if self.use_cli:
return
self.rpc = rpc
self.rpc_connected = True
self.url = self.rpc.url
return
except JSONRPCException as e: # Initialization phase
# -28 RPC in warmup
# -342 Service unavailable, RPC server started but is shutting down due to error
if e.error['code'] != -28 and e.error['code'] != -342:
raise # unknown JSON RPC exception
except ConnectionResetError:
# This might happen when the RPC server is in warmup, but shut down before the call to getblockcount
# succeeds. Try again to properly raise the FailedToStartError
pass
except OSError as e:
if e.errno == errno.ETIMEDOUT:
pass # Treat identical to ConnectionResetError
elif e.errno == errno.ECONNREFUSED:
pass # Port not yet open?
else:
raise # unknown OS error
except ValueError as e: # cookie file not found and no rpcuser or rpcpassword; bitcoind is still starting
if "No RPC credentials" not in str(e):
raise
time.sleep(1.0 / poll_per_s)
self._raise_assertion_error("Unable to connect to namecoind after {}s".format(self.rpc_timeout))
def wait_for_cookie_credentials(self):
"""Ensures auth cookie credentials can be read, e.g. for testing CLI with -rpcwait before RPC connection is up."""
self.log.debug("Waiting for cookie credentials")
# Poll at a rate of four times per second.
poll_per_s = 4
for _ in range(poll_per_s * self.rpc_timeout):
try:
get_auth_cookie(self.datadir, self.chain)
self.log.debug("Cookie credentials successfully retrieved")
return
except ValueError: # cookie file not found and no rpcuser or rpcpassword; bitcoind is still starting
pass # so we continue polling until RPC credentials are retrieved
time.sleep(1.0 / poll_per_s)
self._raise_assertion_error("Unable to retrieve cookie credentials after {}s".format(self.rpc_timeout))
def generate(self, nblocks, maxtries=1000000):
self.log.debug("TestNode.generate() dispatches `generate` call to `generatetoaddress`")
return self.generatetoaddress(nblocks=nblocks, address=self.get_deterministic_priv_key().address, maxtries=maxtries)
def get_wallet_rpc(self, wallet_name):
if self.use_cli:
return RPCOverloadWrapper(self.cli("-rpcwallet={}".format(wallet_name)), True, self.descriptors)
else:
assert self.rpc_connected and self.rpc, self._node_msg("RPC not connected")
wallet_path = "wallet/{}".format(urllib.parse.quote(wallet_name))
return RPCOverloadWrapper(self.rpc / wallet_path, descriptors=self.descriptors)
def version_is_at_least(self, ver):
return self.version is None or self.version >= ver
def stop_node(self, expected_stderr='', wait=0):
"""Stop the node."""
if not self.running:
return
self.log.debug("Stopping node")
try:
# Do not use wait argument when testing older nodes, e.g. in feature_backwards_compatibility.py
if self.version_is_at_least(180000):
self.stop(wait=wait)
else:
self.stop()
except http.client.CannotSendRequest:
self.log.exception("Unable to stop node.")
# If there are any running perf processes, stop them.
for profile_name in tuple(self.perf_subprocesses.keys()):
self._stop_perf(profile_name)
# Check that stderr is as expected
self.stderr.seek(0)
stderr = self.stderr.read().decode('utf-8').strip()
if stderr != expected_stderr:
raise AssertionError("Unexpected stderr {} != {}".format(stderr, expected_stderr))
self.stdout.close()
self.stderr.close()
del self.p2ps[:]
def is_node_stopped(self):
"""Checks whether the node has stopped.
Returns True if the node has stopped. False otherwise.
This method is responsible for freeing resources (self.process)."""
if not self.running:
return True
return_code = self.process.poll()
if return_code is None:
return False
# process has stopped. Assert that it didn't return an error code.
assert return_code == 0, self._node_msg(
"Node returned non-zero exit code (%d) when stopping" % return_code)
self.running = False
self.process = None
self.rpc_connected = False
self.rpc = None
self.log.debug("Node stopped")
return True
def wait_until_stopped(self, timeout=BITCOIND_PROC_WAIT_TIMEOUT):
wait_until_helper(self.is_node_stopped, timeout=timeout, timeout_factor=self.timeout_factor)
@contextlib.contextmanager
def assert_debug_log(self, expected_msgs, unexpected_msgs=None, timeout=2):
if unexpected_msgs is None:
unexpected_msgs = []
time_end = time.time() + timeout * self.timeout_factor
debug_log = os.path.join(self.datadir, self.chain, 'debug.log')
with open(debug_log, encoding='utf-8') as dl:
dl.seek(0, 2)
prev_size = dl.tell()
yield
while True:
found = True
with open(debug_log, encoding='utf-8') as dl:
dl.seek(prev_size)
log = dl.read()
print_log = " - " + "\n - ".join(log.splitlines())
for unexpected_msg in unexpected_msgs:
if re.search(re.escape(unexpected_msg), log, flags=re.MULTILINE):
self._raise_assertion_error('Unexpected message "{}" partially matches log:\n\n{}\n\n'.format(unexpected_msg, print_log))
for expected_msg in expected_msgs:
if re.search(re.escape(expected_msg), log, flags=re.MULTILINE) is None:
found = False
if found:
return
if time.time() >= time_end:
break
time.sleep(0.05)
self._raise_assertion_error('Expected messages "{}" does not partially match log:\n\n{}\n\n'.format(str(expected_msgs), print_log))
@contextlib.contextmanager
def profile_with_perf(self, profile_name):
"""
Context manager that allows easy profiling of node activity using `perf`.
See `test/functional/README.md` for details on perf usage.
Args:
profile_name (str): This string will be appended to the
profile data filename generated by perf.
"""
subp = self._start_perf(profile_name)
yield
if subp:
self._stop_perf(profile_name)
def _start_perf(self, profile_name=None):
"""Start a perf process to profile this node.
Returns the subprocess running perf."""
subp = None
def test_success(cmd):
return subprocess.call(
# shell=True required for pipe use below
cmd, shell=True,
stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL) == 0
if not sys.platform.startswith('linux'):
self.log.warning("Can't profile with perf; only available on Linux platforms")
return None
if not test_success('which perf'):
self.log.warning("Can't profile with perf; must install perf-tools")
return None
if not test_success('readelf -S {} | grep .debug_str'.format(shlex.quote(self.binary))):
self.log.warning(
"perf output won't be very useful without debug symbols compiled into bitcoind")
output_path = tempfile.NamedTemporaryFile(
dir=self.datadir,
prefix="{}.perf.data.".format(profile_name or 'test'),
delete=False,
).name
cmd = [
'perf', 'record',
'-g', # Record the callgraph.
'--call-graph', 'dwarf', # Compatibility for gcc's --fomit-frame-pointer.
'-F', '101', # Sampling frequency in Hz.
'-p', str(self.process.pid),
'-o', output_path,
]
subp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.perf_subprocesses[profile_name] = subp
return subp
def _stop_perf(self, profile_name):
"""Stop (and pop) a perf subprocess."""
subp = self.perf_subprocesses.pop(profile_name)
output_path = subp.args[subp.args.index('-o') + 1]
subp.terminate()
subp.wait(timeout=10)
stderr = subp.stderr.read().decode()
if 'Consider tweaking /proc/sys/kernel/perf_event_paranoid' in stderr:
self.log.warning(
"perf couldn't collect data! Try "
"'sudo sysctl -w kernel.perf_event_paranoid=-1'")
else:
report_cmd = "perf report -i {}".format(output_path)
self.log.info("See perf output by running '{}'".format(report_cmd))
def assert_start_raises_init_error(self, extra_args=None, expected_msg=None, match=ErrorMatch.FULL_TEXT, *args, **kwargs):
"""Attempt to start the node and expect it to raise an error.
extra_args: extra arguments to pass through to bitcoind
expected_msg: regex that stderr should match when bitcoind fails
Will throw if bitcoind starts without an error.
Will throw if an expected_msg is provided and it does not match bitcoind's stdout."""
with tempfile.NamedTemporaryFile(dir=self.stderr_dir, delete=False) as log_stderr, \
tempfile.NamedTemporaryFile(dir=self.stdout_dir, delete=False) as log_stdout:
try:
self.start(extra_args, stdout=log_stdout, stderr=log_stderr, *args, **kwargs)
self.wait_for_rpc_connection()
self.stop_node()
self.wait_until_stopped()
except FailedToStartError as e:
self.log.debug('namecoind failed to start: %s', e)
self.running = False
self.process = None
# Check stderr for expected message
if expected_msg is not None:
log_stderr.seek(0)
stderr = log_stderr.read().decode('utf-8').strip()
if match == ErrorMatch.PARTIAL_REGEX:
if re.search(expected_msg, stderr, flags=re.MULTILINE) is None:
self._raise_assertion_error(
'Expected message "{}" does not partially match stderr:\n"{}"'.format(expected_msg, stderr))
elif match == ErrorMatch.FULL_REGEX:
if re.fullmatch(expected_msg, stderr) is None:
self._raise_assertion_error(
'Expected message "{}" does not fully match stderr:\n"{}"'.format(expected_msg, stderr))
elif match == ErrorMatch.FULL_TEXT:
if expected_msg != stderr:
self._raise_assertion_error(
'Expected message "{}" does not fully match stderr:\n"{}"'.format(expected_msg, stderr))
else:
if expected_msg is None:
assert_msg = "namecoind should have exited with an error"
else:
assert_msg = "namecoind should have exited with expected error " + expected_msg
self._raise_assertion_error(assert_msg)
def add_p2p_connection(self, p2p_conn, *, wait_for_verack=True, **kwargs):
"""Add a p2p connection to the node.
This method adds the p2p connection to the self.p2ps list and also
returns the connection to the caller."""
if 'dstport' not in kwargs:
kwargs['dstport'] = p2p_port(self.index)
if 'dstaddr' not in kwargs:
kwargs['dstaddr'] = '127.0.0.1'
p2p_conn.peer_connect(**kwargs, net=self.chain, timeout_factor=self.timeout_factor)()
self.p2ps.append(p2p_conn)
p2p_conn.wait_until(lambda: p2p_conn.is_connected, check_connected=False)
if wait_for_verack:
# Wait for the node to send us the version and verack
p2p_conn.wait_for_verack()
# At this point we have sent our version message and received the version and verack, however the full node
# has not yet received the verack from us (in reply to their version). So, the connection is not yet fully
# established (fSuccessfullyConnected).
#
# This shouldn't lead to any issues when sending messages, since the verack will be in-flight before the
# message we send. However, it might lead to races where we are expecting to receive a message. E.g. a
# transaction that will be added to the mempool as soon as we return here.
#
# So syncing here is redundant when we only want to send a message, but the cost is low (a few milliseconds)
# in comparison to the upside of making tests less fragile and unexpected intermittent errors less likely.
p2p_conn.sync_with_ping()
return p2p_conn
def num_test_p2p_connections(self):
"""Return number of test framework p2p connections to the node."""
return len([peer for peer in self.getpeerinfo() if peer['subver'] == MY_SUBVERSION])
def disconnect_p2ps(self):
"""Close all p2p connections to the node."""
for p in self.p2ps:
p.peer_disconnect()
del self.p2ps[:]
wait_until_helper(lambda: self.num_test_p2p_connections() == 0, timeout_factor=self.timeout_factor)
class TestNodeCLIAttr:
def __init__(self, cli, command):
self.cli = cli
self.command = command
def __call__(self, *args, **kwargs):
return self.cli.send_cli(self.command, *args, **kwargs)
def get_request(self, *args, **kwargs):
return lambda: self(*args, **kwargs)
def arg_to_cli(arg):
if isinstance(arg, bool):
return str(arg).lower()
elif arg is None:
return 'null'
elif isinstance(arg, dict) or isinstance(arg, list):
return json.dumps(arg, default=EncodeDecimal)
else:
return str(arg)
class TestNodeCLI():
"""Interface to bitcoin-cli for an individual node"""
def __init__(self, binary, datadir):
self.options = []
self.binary = binary
self.datadir = datadir
self.input = None
self.log = logging.getLogger('TestFramework.namecoincli')
def __call__(self, *options, input=None):
# TestNodeCLI is callable with bitcoin-cli command-line options
cli = TestNodeCLI(self.binary, self.datadir)
cli.options = [str(o) for o in options]
cli.input = input
return cli
def __getattr__(self, command):
return TestNodeCLIAttr(self, command)
def batch(self, requests):
results = []
for request in requests:
try:
results.append(dict(result=request()))
except JSONRPCException as e:
results.append(dict(error=e))
return results
def send_cli(self, command=None, *args, **kwargs):
"""Run bitcoin-cli command. Deserializes returned string as python object."""
pos_args = [arg_to_cli(arg) for arg in args]
named_args = [str(key) + "=" + arg_to_cli(value) for (key, value) in kwargs.items()]
assert not (pos_args and named_args), "Cannot use positional arguments and named arguments in the same namecoin-cli call"
p_args = [self.binary, "-datadir=" + self.datadir] + self.options
if named_args:
p_args += ["-named"]
if command is not None:
p_args += [command]
p_args += pos_args + named_args
self.log.debug("Running namecoin-cli {}".format(p_args[2:]))
process = subprocess.Popen(p_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
cli_stdout, cli_stderr = process.communicate(input=self.input)
returncode = process.poll()
if returncode:
match = re.match(r'error code: ([-0-9]+)\nerror message:\n(.*)', cli_stderr)
if match:
code, message = match.groups()
raise JSONRPCException(dict(code=int(code), message=message))
# Ignore cli_stdout, raise with cli_stderr
raise subprocess.CalledProcessError(returncode, self.binary, output=cli_stderr)
try:
return json.loads(cli_stdout, parse_float=decimal.Decimal)
except (json.JSONDecodeError, decimal.InvalidOperation):
return cli_stdout.rstrip("\n")
class RPCOverloadWrapper():
def __init__(self, rpc, cli=False, descriptors=False):
self.rpc = rpc
self.is_cli = cli
self.descriptors = descriptors
def __getattr__(self, name):
return getattr(self.rpc, name)
def createwallet(self, wallet_name, disable_private_keys=None, blank=None, passphrase='', avoid_reuse=None, descriptors=None, load_on_startup=None):
if descriptors is None:
descriptors = self.descriptors
return self.__getattr__('createwallet')(wallet_name, disable_private_keys, blank, passphrase, avoid_reuse, descriptors, load_on_startup)
def importprivkey(self, privkey, label=None, rescan=None):
wallet_info = self.getwalletinfo()
if 'descriptors' not in wallet_info or ('descriptors' in wallet_info and not wallet_info['descriptors']):
return self.__getattr__('importprivkey')(privkey, label, rescan)
desc = descsum_create('combo(' + privkey + ')')
req = [{
'desc': desc,
'timestamp': 0 if rescan else 'now',
'label': label if label else ''
}]
import_res = self.importdescriptors(req)
if not import_res[0]['success']:
raise JSONRPCException(import_res[0]['error'])
def addmultisigaddress(self, nrequired, keys, label=None, address_type=None):
wallet_info = self.getwalletinfo()
if 'descriptors' not in wallet_info or ('descriptors' in wallet_info and not wallet_info['descriptors']):
return self.__getattr__('addmultisigaddress')(nrequired, keys, label, address_type)
cms = self.createmultisig(nrequired, keys, address_type)
req = [{
'desc': cms['descriptor'],
'timestamp': 0,
'label': label if label else ''
}]
import_res = self.importdescriptors(req)
if not import_res[0]['success']:
raise JSONRPCException(import_res[0]['error'])
return cms
def importpubkey(self, pubkey, label=None, rescan=None):
wallet_info = self.getwalletinfo()
if 'descriptors' not in wallet_info or ('descriptors' in wallet_info and not wallet_info['descriptors']):
return self.__getattr__('importpubkey')(pubkey, label, rescan)
desc = descsum_create('combo(' + pubkey + ')')
req = [{
'desc': desc,
'timestamp': 0 if rescan else 'now',
'label': label if label else ''
}]
import_res = self.importdescriptors(req)
if not import_res[0]['success']:
raise JSONRPCException(import_res[0]['error'])
def importaddress(self, address, label=None, rescan=None, p2sh=None):
wallet_info = self.getwalletinfo()
if 'descriptors' not in wallet_info or ('descriptors' in wallet_info and not wallet_info['descriptors']):
return self.__getattr__('importaddress')(address, label, rescan, p2sh)
is_hex = False
try:
int(address ,16)
is_hex = True
desc = descsum_create('raw(' + address + ')')
except:
desc = descsum_create('addr(' + address + ')')
reqs = [{
'desc': desc,
'timestamp': 0 if rescan else 'now',
'label': label if label else ''
}]
if is_hex and p2sh:
reqs.append({
'desc': descsum_create('p2sh(raw(' + address + '))'),
'timestamp': 0 if rescan else 'now',
'label': label if label else ''
})
import_res = self.importdescriptors(reqs)
for res in import_res:
if not res['success']:
raise JSONRPCException(res['error'])
|
CeltonMcGrath/TACTIC
|
refs/heads/master
|
src/tactic/ui/cgapp/__init__.py
|
7
|
###########################################################
#
# Copyright (c) 2005-2008, Southpaw Technology
# All Rights Reserved
#
# PROPRIETARY INFORMATION. This software is proprietary to
# Southpaw Technology, and is not to be reproduced, transmitted,
# or disclosed in any way without written permission.
#
#
#
from introspect import *
from app_init_wdg import *
from load_options_wdg import *
from sobject_load_wdg import *
from loader_wdg import *
from loader_element_wdg import *
from loader_button_wdg import *
from publish_element_wdg import *
from app_panel_wdg import *
from checkin_wdg import *
from version_wdg import *
from session_wdg import *
|
ScalaInc/exp-python2-sdk
|
refs/heads/master
|
setup.py
|
1
|
from setuptools import setup
setup(
name='exp-sdk',
packages= ['exp_sdk'],
version='1.0.7',
description='EXP Python SDK',
author='Scala',
author_email='james.dalessio@scala.com',
url='https://github.com/scalainc/exp-python2-sdk',
download_url='https://github.com/scalainc/exp-python2-sdk/tarball/1.0.7',
install_requires=["requests", "socketIO_client"],
license='MIT',
keywords=['scala', 'exp', 'sdk', 'signage'],
classifiers=[
'Programming Language :: Python :: 2'
]
)
|
riadnassiffe/Simulator
|
refs/heads/master
|
src/tools/ecos/cvxpy/examples/extensions/mixed_integer/sparse_var.py
|
1
|
"""
Copyright 2013 Steven Diamond
This file is part of CVXPY.
CVXPY is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
CVXPY is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with CVXPY. If not, see <http://www.gnu.org/licenses/>.
"""
from noncvx_variable import NonCvxVariable
import cvxpy.interface.matrix_utilities as intf
from itertools import product
class SparseVar(NonCvxVariable):
""" A variable with constrained cardinality. """
# k - the maximum cardinality of the variable.
def __init__(self, rows=1, cols=1, nonzeros=None, *args, **kwargs):
self.k = nonzeros
super(SparseVar, self).__init__(rows, cols, *args, **kwargs)
# All values except k-largest (by magnitude) set to zero.
def _round(self, matrix):
indices = product(xrange(self.size[0]), xrange(self.size[1]))
v_ind = sorted(indices, key=lambda ind: -abs(matrix[ind]))
for ind in v_ind[self.k:]:
matrix[ind] = 0
return matrix
# Constrain all entries to be zero that correspond to
# zeros in the matrix.
def _fix(self, matrix):
constraints = []
rows,cols = intf.size(matrix)
for i in range(rows):
for j in range(cols):
if matrix[i, j] == 0:
constraints.append(self[i, j] == 0)
return constraints
|
razvanphp/arangodb
|
refs/heads/devel
|
3rdParty/V8-3.31.74.1/third_party/python_26/Lib/site-packages/win32comext/shell/demos/dump_link.py
|
17
|
# dump_link.py - dumps information about shell shortcuts
#
import sys, os
from win32com.shell import shell, shellcon
import pythoncom
import glob
from win32com.storagecon import *
def DumpLink(fname):
shellLink = pythoncom.CoCreateInstance(shell.CLSID_ShellLink, None, pythoncom.CLSCTX_INPROC_SERVER, shell.IID_IShellLink)
persistFile = shellLink.QueryInterface(pythoncom.IID_IPersistFile)
persistFile.Load(fname,STGM_READ)
shellLink.Resolve(0, shell.SLR_ANY_MATCH | shell.SLR_NO_UI)
fname, findData = shellLink.GetPath(0)
print "Filename", fname, ", UNC=", shellLink.GetPath(shell.SLGP_UNCPRIORITY)[0]
def FavDumper(nothing, path, names):
# called by os.path.walk
for name in names:
print name,
try:
DumpLink(name)
except pythoncom.com_error:
print " - not a link"
def DumpFavorites():
favfold = str(shell.SHGetSpecialFolderPath(0, shellcon.CSIDL_FAVORITES))
print "Your favourites are at", favfold
os.path.walk(favfold, FavDumper, None)
if __name__=='__main__':
if len(sys.argv)>1:
for fspec in sys.argv[1:]:
files = glob.glob(fspec)
if files:
for file in files:
print file
DumpLink(file)
print
else:
print "Can not find", fspec
else:
print "Dumping your favorites folder!"
DumpFavorites()
|
TanguyPatte/phantomjs-packaging
|
refs/heads/master
|
src/qt/qtbase/src/3rdparty/freetype/src/tools/docmaker/utils.py
|
515
|
# Utils (c) 2002, 2004, 2007, 2008 David Turner <david@freetype.org>
#
import string, sys, os, glob
# current output directory
#
output_dir = None
# This function is used to sort the index. It is a simple lexicographical
# sort, except that it places capital letters before lowercase ones.
#
def index_sort( s1, s2 ):
if not s1:
return -1
if not s2:
return 1
l1 = len( s1 )
l2 = len( s2 )
m1 = string.lower( s1 )
m2 = string.lower( s2 )
for i in range( l1 ):
if i >= l2 or m1[i] > m2[i]:
return 1
if m1[i] < m2[i]:
return -1
if s1[i] < s2[i]:
return -1
if s1[i] > s2[i]:
return 1
if l2 > l1:
return -1
return 0
# Sort input_list, placing the elements of order_list in front.
#
def sort_order_list( input_list, order_list ):
new_list = order_list[:]
for id in input_list:
if not id in order_list:
new_list.append( id )
return new_list
# Open the standard output to a given project documentation file. Use
# "output_dir" to determine the filename location if necessary and save the
# old stdout in a tuple that is returned by this function.
#
def open_output( filename ):
global output_dir
if output_dir and output_dir != "":
filename = output_dir + os.sep + filename
old_stdout = sys.stdout
new_file = open( filename, "w" )
sys.stdout = new_file
return ( new_file, old_stdout )
# Close the output that was returned by "close_output".
#
def close_output( output ):
output[0].close()
sys.stdout = output[1]
# Check output directory.
#
def check_output():
global output_dir
if output_dir:
if output_dir != "":
if not os.path.isdir( output_dir ):
sys.stderr.write( "argument" + " '" + output_dir + "' " + \
"is not a valid directory" )
sys.exit( 2 )
else:
output_dir = None
def file_exists( pathname ):
"""checks that a given file exists"""
result = 1
try:
file = open( pathname, "r" )
file.close()
except:
result = None
sys.stderr.write( pathname + " couldn't be accessed\n" )
return result
def make_file_list( args = None ):
"""builds a list of input files from command-line arguments"""
file_list = []
# sys.stderr.write( repr( sys.argv[1 :] ) + '\n' )
if not args:
args = sys.argv[1 :]
for pathname in args:
if string.find( pathname, '*' ) >= 0:
newpath = glob.glob( pathname )
newpath.sort() # sort files -- this is important because
# of the order of files
else:
newpath = [pathname]
file_list.extend( newpath )
if len( file_list ) == 0:
file_list = None
else:
# now filter the file list to remove non-existing ones
file_list = filter( file_exists, file_list )
return file_list
# eof
|
shinose/kodi-qplay
|
refs/heads/master
|
lib/gtest/test/gtest_break_on_failure_unittest.py
|
2140
|
#!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for Google Test's break-on-failure mode.
A user can ask Google Test to seg-fault when an assertion fails, using
either the GTEST_BREAK_ON_FAILURE environment variable or the
--gtest_break_on_failure flag. This script tests such functionality
by invoking gtest_break_on_failure_unittest_ (a program written with
Google Test) with different environments and command line flags.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import gtest_test_utils
import os
import sys
# Constants.
IS_WINDOWS = os.name == 'nt'
# The environment variable for enabling/disabling the break-on-failure mode.
BREAK_ON_FAILURE_ENV_VAR = 'GTEST_BREAK_ON_FAILURE'
# The command line flag for enabling/disabling the break-on-failure mode.
BREAK_ON_FAILURE_FLAG = 'gtest_break_on_failure'
# The environment variable for enabling/disabling the throw-on-failure mode.
THROW_ON_FAILURE_ENV_VAR = 'GTEST_THROW_ON_FAILURE'
# The environment variable for enabling/disabling the catch-exceptions mode.
CATCH_EXCEPTIONS_ENV_VAR = 'GTEST_CATCH_EXCEPTIONS'
# Path to the gtest_break_on_failure_unittest_ program.
EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'gtest_break_on_failure_unittest_')
environ = gtest_test_utils.environ
SetEnvVar = gtest_test_utils.SetEnvVar
# Tests in this file run a Google-Test-based test program and expect it
# to terminate prematurely. Therefore they are incompatible with
# the premature-exit-file protocol by design. Unset the
# premature-exit filepath to prevent Google Test from creating
# the file.
SetEnvVar(gtest_test_utils.PREMATURE_EXIT_FILE_ENV_VAR, None)
def Run(command):
"""Runs a command; returns 1 if it was killed by a signal, or 0 otherwise."""
p = gtest_test_utils.Subprocess(command, env=environ)
if p.terminated_by_signal:
return 1
else:
return 0
# The tests.
class GTestBreakOnFailureUnitTest(gtest_test_utils.TestCase):
"""Tests using the GTEST_BREAK_ON_FAILURE environment variable or
the --gtest_break_on_failure flag to turn assertion failures into
segmentation faults.
"""
def RunAndVerify(self, env_var_value, flag_value, expect_seg_fault):
"""Runs gtest_break_on_failure_unittest_ and verifies that it does
(or does not) have a seg-fault.
Args:
env_var_value: value of the GTEST_BREAK_ON_FAILURE environment
variable; None if the variable should be unset.
flag_value: value of the --gtest_break_on_failure flag;
None if the flag should not be present.
expect_seg_fault: 1 if the program is expected to generate a seg-fault;
0 otherwise.
"""
SetEnvVar(BREAK_ON_FAILURE_ENV_VAR, env_var_value)
if env_var_value is None:
env_var_value_msg = ' is not set'
else:
env_var_value_msg = '=' + env_var_value
if flag_value is None:
flag = ''
elif flag_value == '0':
flag = '--%s=0' % BREAK_ON_FAILURE_FLAG
else:
flag = '--%s' % BREAK_ON_FAILURE_FLAG
command = [EXE_PATH]
if flag:
command.append(flag)
if expect_seg_fault:
should_or_not = 'should'
else:
should_or_not = 'should not'
has_seg_fault = Run(command)
SetEnvVar(BREAK_ON_FAILURE_ENV_VAR, None)
msg = ('when %s%s, an assertion failure in "%s" %s cause a seg-fault.' %
(BREAK_ON_FAILURE_ENV_VAR, env_var_value_msg, ' '.join(command),
should_or_not))
self.assert_(has_seg_fault == expect_seg_fault, msg)
def testDefaultBehavior(self):
"""Tests the behavior of the default mode."""
self.RunAndVerify(env_var_value=None,
flag_value=None,
expect_seg_fault=0)
def testEnvVar(self):
"""Tests using the GTEST_BREAK_ON_FAILURE environment variable."""
self.RunAndVerify(env_var_value='0',
flag_value=None,
expect_seg_fault=0)
self.RunAndVerify(env_var_value='1',
flag_value=None,
expect_seg_fault=1)
def testFlag(self):
"""Tests using the --gtest_break_on_failure flag."""
self.RunAndVerify(env_var_value=None,
flag_value='0',
expect_seg_fault=0)
self.RunAndVerify(env_var_value=None,
flag_value='1',
expect_seg_fault=1)
def testFlagOverridesEnvVar(self):
"""Tests that the flag overrides the environment variable."""
self.RunAndVerify(env_var_value='0',
flag_value='0',
expect_seg_fault=0)
self.RunAndVerify(env_var_value='0',
flag_value='1',
expect_seg_fault=1)
self.RunAndVerify(env_var_value='1',
flag_value='0',
expect_seg_fault=0)
self.RunAndVerify(env_var_value='1',
flag_value='1',
expect_seg_fault=1)
def testBreakOnFailureOverridesThrowOnFailure(self):
"""Tests that gtest_break_on_failure overrides gtest_throw_on_failure."""
SetEnvVar(THROW_ON_FAILURE_ENV_VAR, '1')
try:
self.RunAndVerify(env_var_value=None,
flag_value='1',
expect_seg_fault=1)
finally:
SetEnvVar(THROW_ON_FAILURE_ENV_VAR, None)
if IS_WINDOWS:
def testCatchExceptionsDoesNotInterfere(self):
"""Tests that gtest_catch_exceptions doesn't interfere."""
SetEnvVar(CATCH_EXCEPTIONS_ENV_VAR, '1')
try:
self.RunAndVerify(env_var_value='1',
flag_value='1',
expect_seg_fault=1)
finally:
SetEnvVar(CATCH_EXCEPTIONS_ENV_VAR, None)
if __name__ == '__main__':
gtest_test_utils.Main()
|
argentumproject/electrum-arg
|
refs/heads/master
|
gui/kivy/nfc_scanner/scanner_dummy.py
|
1
|
''' Dummy NFC Provider to be used on desktops in case no other provider is found
'''
from electrum_arg_gui.kivy.nfc_scanner import NFCBase
from kivy.clock import Clock
from kivy.logger import Logger
class ScannerDummy(NFCBase):
'''This is the dummy interface that gets selected in case any other
hardware interface to NFC is not available.
'''
_initialised = False
name = 'NFCDummy'
def nfc_init(self):
# print 'nfc_init()'
Logger.debug('NFC: configure nfc')
self._initialised = True
self.nfc_enable()
return True
def on_new_intent(self, dt):
tag_info = {'type': 'dymmy',
'message': 'dummy',
'extra details': None}
# let Main app know that a tag has been detected
app = App.get_running_app()
app.tag_discovered(tag_info)
app.show_info('New tag detected.', duration=2)
Logger.debug('NFC: got new dummy tag')
def nfc_enable(self):
Logger.debug('NFC: enable')
if self._initialised:
Clock.schedule_interval(self.on_new_intent, 22)
def nfc_disable(self):
# print 'nfc_enable()'
Clock.unschedule(self.on_new_intent)
def nfc_enable_exchange(self, data):
''' Start sending data
'''
Logger.debug('NFC: sending data {}'.format(data))
def nfc_disable_exchange(self):
''' Disable/Stop ndef exchange
'''
Logger.debug('NFC: disable nfc exchange')
|
yongtang/tensorflow
|
refs/heads/master
|
tensorflow/lite/testing/op_tests/zeros_like.py
|
16
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test configs for zeros_like."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from tensorflow.lite.testing.zip_test_utils import create_tensor_data
from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
from tensorflow.lite.testing.zip_test_utils import register_make_test_function
@register_make_test_function()
def make_zeros_like_tests(options):
"""Make a set of tests to do zeros_like."""
test_parameters = [{
"input_dtype": [tf.float32, tf.int32, tf.int64],
"input_shape": [[], [1], [1, 2], [5, 6, 7, 8], [3, 4, 5, 6]],
}]
def build_graph(parameters):
"""Build the zeros_like op testing graph."""
input_tensor = tf.compat.v1.placeholder(
dtype=parameters["input_dtype"],
name="input",
shape=parameters["input_shape"])
zeros = tf.zeros_like(input_tensor)
# This maximum node is so that toco can perform the constants-propagation
# through the above zeros_like, which it can't do if the output of the
# zeros_like as an output of the whole graphs (graph outputs can't be
# constants). If toco does not perform such constants-propagation then
# the resulting tflite graph retains the zeros_like as a Fill op, which
# is unsupported by TFLite, even as a custom op.
out = tf.maximum(zeros, input_tensor)
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
values = create_tensor_data(parameters["input_dtype"],
parameters["input_shape"])
return [values], sess.run(outputs, feed_dict=dict(zip(inputs, [values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
|
okere-prince/googlemock
|
refs/heads/master
|
scripts/generator/cpp/utils.py
|
1158
|
#!/usr/bin/env python
#
# Copyright 2007 Neal Norwitz
# Portions Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generic utilities for C++ parsing."""
__author__ = 'nnorwitz@google.com (Neal Norwitz)'
import sys
# Set to True to see the start/end token indices.
DEBUG = True
def ReadFile(filename, print_error=True):
"""Returns the contents of a file."""
try:
fp = open(filename)
try:
return fp.read()
finally:
fp.close()
except IOError:
if print_error:
print('Error reading %s: %s' % (filename, sys.exc_info()[1]))
return None
|
tectronics/mpmath
|
refs/heads/master
|
doc/source/plots/coulombg.py
|
30
|
# Irregular Coulomb wave functions -- equivalent to figure 14.5 in A&S
F1 = lambda x: coulombg(0,0,x)
F2 = lambda x: coulombg(0,1,x)
F3 = lambda x: coulombg(0,5,x)
F4 = lambda x: coulombg(0,10,x)
F5 = lambda x: coulombg(0,x/2,x)
plot([F1,F2,F3,F4,F5], [0,30], [-2,2])
|
Dapid/numpy
|
refs/heads/master
|
numpy/lib/_version.py
|
156
|
"""Utility to compare (Numpy) version strings.
The NumpyVersion class allows properly comparing numpy version strings.
The LooseVersion and StrictVersion classes that distutils provides don't
work; they don't recognize anything like alpha/beta/rc/dev versions.
"""
from __future__ import division, absolute_import, print_function
import re
from numpy.compat import basestring
__all__ = ['NumpyVersion']
class NumpyVersion():
"""Parse and compare numpy version strings.
Numpy has the following versioning scheme (numbers given are examples; they
can be > 9) in principle):
- Released version: '1.8.0', '1.8.1', etc.
- Alpha: '1.8.0a1', '1.8.0a2', etc.
- Beta: '1.8.0b1', '1.8.0b2', etc.
- Release candidates: '1.8.0rc1', '1.8.0rc2', etc.
- Development versions: '1.8.0.dev-f1234afa' (git commit hash appended)
- Development versions after a1: '1.8.0a1.dev-f1234afa',
'1.8.0b2.dev-f1234afa',
'1.8.1rc1.dev-f1234afa', etc.
- Development versions (no git hash available): '1.8.0.dev-Unknown'
Comparing needs to be done against a valid version string or other
`NumpyVersion` instance. Note that all development versions of the same
(pre-)release compare equal.
.. versionadded:: 1.9.0
Parameters
----------
vstring : str
Numpy version string (``np.__version__``).
Examples
--------
>>> from numpy.lib import NumpyVersion
>>> if NumpyVersion(np.__version__) < '1.7.0'):
... print('skip')
skip
>>> NumpyVersion('1.7') # raises ValueError, add ".0"
"""
def __init__(self, vstring):
self.vstring = vstring
ver_main = re.match(r'\d[.]\d+[.]\d+', vstring)
if not ver_main:
raise ValueError("Not a valid numpy version string")
self.version = ver_main.group()
self.major, self.minor, self.bugfix = [int(x) for x in
self.version.split('.')]
if len(vstring) == ver_main.end():
self.pre_release = 'final'
else:
alpha = re.match(r'a\d', vstring[ver_main.end():])
beta = re.match(r'b\d', vstring[ver_main.end():])
rc = re.match(r'rc\d', vstring[ver_main.end():])
pre_rel = [m for m in [alpha, beta, rc] if m is not None]
if pre_rel:
self.pre_release = pre_rel[0].group()
else:
self.pre_release = ''
self.is_devversion = bool(re.search(r'.dev', vstring))
def _compare_version(self, other):
"""Compare major.minor.bugfix"""
if self.major == other.major:
if self.minor == other.minor:
if self.bugfix == other.bugfix:
vercmp = 0
elif self.bugfix > other.bugfix:
vercmp = 1
else:
vercmp = -1
elif self.minor > other.minor:
vercmp = 1
else:
vercmp = -1
elif self.major > other.major:
vercmp = 1
else:
vercmp = -1
return vercmp
def _compare_pre_release(self, other):
"""Compare alpha/beta/rc/final."""
if self.pre_release == other.pre_release:
vercmp = 0
elif self.pre_release == 'final':
vercmp = 1
elif other.pre_release == 'final':
vercmp = -1
elif self.pre_release > other.pre_release:
vercmp = 1
else:
vercmp = -1
return vercmp
def _compare(self, other):
if not isinstance(other, (basestring, NumpyVersion)):
raise ValueError("Invalid object to compare with NumpyVersion.")
if isinstance(other, basestring):
other = NumpyVersion(other)
vercmp = self._compare_version(other)
if vercmp == 0:
# Same x.y.z version, check for alpha/beta/rc
vercmp = self._compare_pre_release(other)
if vercmp == 0:
# Same version and same pre-release, check if dev version
if self.is_devversion is other.is_devversion:
vercmp = 0
elif self.is_devversion:
vercmp = -1
else:
vercmp = 1
return vercmp
def __lt__(self, other):
return self._compare(other) < 0
def __le__(self, other):
return self._compare(other) <= 0
def __eq__(self, other):
return self._compare(other) == 0
def __ne__(self, other):
return self._compare(other) != 0
def __gt__(self, other):
return self._compare(other) > 0
def __ge__(self, other):
return self._compare(other) >= 0
def __repr(self):
return "NumpyVersion(%s)" % self.vstring
|
JanikNex/adventure16
|
refs/heads/master
|
src/utilclasses/cdialoguehandler.py
|
1
|
from src.utilclasses.cjsonhandler import *
class DialogueHandler(object):
def __init__(self, game):
"""
Erstellt neuen DialogueHandler
:param game: Game
:type game: Game
"""
self.game = game
self.jsonparser = JSONHandler()
self.path = None
self.inDialogue = False
self.step = 0
self.usePrestige = 0
self.buttonArray = []
self.textOutput = ''
def startDialogue(self, path):
"""
Startet einen neuen Dialog zwischen dem Spieler und der übergebenen JSON Datei
:param path: Dateiname der JSON Datei
:type path: str
"""
if not self.inDialogue:
self.path = path
self.jsonparser.openNewFile(self.path)
self.usePrestige = self.game.getPlayer().getPrestige()
self.inDialogue = True
self.game.getPlayer().startInteractWith(self)
self.nextStep()
def endDialogue(self):
"""
Beendet den aktuellen Dialog, falls einer aktiv ist
"""
if self.inDialogue:
self.inDialogue = False
self.path = None
self.step = 0
self.game.getPlayer().endInteraction()
def getButtonArray(self, mode='all'):
"""
Gibt die Inhalte der Antwortbuttons zurück.
:param mode: default ist all, durch text bekommt man nur den Text und durch goto nur die goTo-Zahlen
:rtype: list
"""
if len(self.buttonArray) == 0 or (
len(self.buttonArray[0]) == 0 and len(self.buttonArray[1]) == 0 and len(
self.buttonArray[2]) == 0):
return []
else:
if mode == 'all':
return self.buttonArray
elif mode == 'text':
return [self.buttonArray[0][0], self.buttonArray[1][0], self.buttonArray[2][0]]
elif mode == 'goto':
return [self.buttonArray[0][1], self.buttonArray[1][1], self.buttonArray[2][1]]
def getTextOutput(self):
"""
Gibt den aktuellen Textoutput zurück.
:return: ['Text', 'Operator-char']
:rtype: list
"""
return self.textOutput
def nextStep(self, select=None):
"""
Geht mit dem Dialog in den nächsten Schritt und updated alle Parameter
:param select: Button, welcher als Antwortmöglichkeit aufgewählt wurde
"""
if self.step == -1:
self.endDialogue()
return
if select is None:
thisStep = self.jsonparser.getData()[str(self.usePrestige)][str(self.step)]
self.textOutput = [thisStep['text'], thisStep['operator']]
if thisStep['operator'] == 'P':
self.step = int(thisStep['goTo'])
self.game.getPlayer().increasePrestige(int(thisStep['prestigeChange']))
elif thisStep['operator'] == 'C':
if 'buttons' in thisStep:
self.buttonArray = thisStep['buttons']
else:
self.step = int(thisStep['skipTo'])
else:
self.step = self.getButtonArray('goto')[select]
thisStep = self.jsonparser.getData()[str(self.usePrestige)][str(self.step)]
self.textOutput = [thisStep['text'], thisStep['operator']]
if thisStep['operator'] == 'P':
self.step = int(thisStep['goTo'])
self.game.getPlayer().increasePrestige(
int(thisStep['prestigeChange']))
elif thisStep['operator'] == 'C':
if 'buttons' in thisStep:
self.buttonArray = thisStep['buttons']
if not self.getButtonArray('goto')[select]:
self.step = int(thisStep['skipTo'])
else:
self.step = self.getButtonArray('goto')[select]
def isInDialogue(self):
"""
Gibt zurück, ob gerade ein Dialog aktiv ist
:rtype: bool
"""
return self.inDialogue
|
SKA-ScienceDataProcessor/FastImaging-Python
|
refs/heads/master
|
tests/test_utils.py
|
1
|
import numpy as np
from fastimgproto.utils import nonzero_bounding_slice_2d
def test_bounding_slice_2d():
a = np.zeros((5, 5), dtype=np.float_)
# Should return None if no non-zero elements:
assert nonzero_bounding_slice_2d(a) == None
val1 = 42
val2 = 3.14
a[1, 2] = val1
bs = nonzero_bounding_slice_2d(a)
y_slice, x_slice = bs
assert y_slice == slice(1, 2)
assert x_slice == slice(2, 3)
assert len(a[bs].ravel()) == 1
a[2, 3] = val2
bs = nonzero_bounding_slice_2d(a)
bb_subarray = a[bs]
expected_subarray = np.array([[val1, 0],
[0, val2], ])
assert (bb_subarray == expected_subarray).all()
bb_nz_vals = bb_subarray[np.nonzero(bb_subarray)].ravel()
assert len(bb_nz_vals) == 2
assert (bb_nz_vals == np.array([val1, val2])).all()
# Add a non-connected value and check the bounding box encompasses the lot
a[4, 4] = val1
bs = nonzero_bounding_slice_2d(a)
y_slice, x_slice = bs
assert y_slice == slice(1, 5)
assert x_slice == slice(2, 5)
|
dbckz/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/amazon/ec2_elb_facts.py
|
27
|
#!/usr/bin/python
#
# This is a free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This Ansible library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this library. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ec2_elb_facts
short_description: Gather facts about EC2 Elastic Load Balancers in AWS
description:
- Gather facts about EC2 Elastic Load Balancers in AWS
version_added: "2.0"
author:
- "Michael Schultz (github.com/mjschultz)"
- "Fernando Jose Pando (@nand0p)"
options:
names:
description:
- List of ELB names to gather facts about. Pass this option to gather facts about a set of ELBs, otherwise, all ELBs are returned.
required: false
default: null
aliases: ['elb_ids', 'ec2_elbs']
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Output format tries to match ec2_elb_lb module input parameters
# Gather facts about all ELBs
- action:
module: ec2_elb_facts
register: elb_facts
- action:
module: debug
msg: "{{ item.dns_name }}"
with_items: "{{ elb_facts.elbs }}"
# Gather facts about a particular ELB
- action:
module: ec2_elb_facts
names: frontend-prod-elb
register: elb_facts
- action:
module: debug
msg: "{{ elb_facts.elbs.0.dns_name }}"
# Gather facts about a set of ELBs
- action:
module: ec2_elb_facts
names:
- frontend-prod-elb
- backend-prod-elb
register: elb_facts
- action:
module: debug
msg: "{{ item.dns_name }}"
with_items: "{{ elb_facts.elbs }}"
'''
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import (
AWSRetry,
connect_to_aws,
ec2_argument_spec,
get_aws_connection_info,
)
try:
import boto.ec2.elb
from boto.ec2.tag import Tag
from boto.exception import BotoServerError
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
class ElbInformation(object):
"""Handles ELB information."""
def __init__(self,
module,
names,
region,
**aws_connect_params):
self.module = module
self.names = names
self.region = region
self.aws_connect_params = aws_connect_params
self.connection = self._get_elb_connection()
def _get_tags(self, elbname):
params = {'LoadBalancerNames.member.1': elbname}
elb_tags = self.connection.get_list('DescribeTags', params, [('member', Tag)])
return dict((tag.Key, tag.Value) for tag in elb_tags if hasattr(tag, 'Key'))
@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
def _get_elb_connection(self):
return connect_to_aws(boto.ec2.elb, self.region, **self.aws_connect_params)
def _get_elb_listeners(self, listeners):
listener_list = []
for listener in listeners:
listener_dict = {
'load_balancer_port': listener[0],
'instance_port': listener[1],
'protocol': listener[2],
}
try:
ssl_certificate_id = listener[4]
except IndexError:
pass
else:
if ssl_certificate_id:
listener_dict['ssl_certificate_id'] = ssl_certificate_id
listener_list.append(listener_dict)
return listener_list
def _get_health_check(self, health_check):
protocol, port_path = health_check.target.split(':')
try:
port, path = port_path.split('/', 1)
path = '/{0}'.format(path)
except ValueError:
port = port_path
path = None
health_check_dict = {
'ping_protocol': protocol.lower(),
'ping_port': int(port),
'response_timeout': health_check.timeout,
'interval': health_check.interval,
'unhealthy_threshold': health_check.unhealthy_threshold,
'healthy_threshold': health_check.healthy_threshold,
}
if path:
health_check_dict['ping_path'] = path
return health_check_dict
@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
def _get_elb_info(self, elb):
elb_info = {
'name': elb.name,
'zones': elb.availability_zones,
'dns_name': elb.dns_name,
'canonical_hosted_zone_name': elb.canonical_hosted_zone_name,
'canonical_hosted_zone_name_id': elb.canonical_hosted_zone_name_id,
'hosted_zone_name': elb.canonical_hosted_zone_name,
'hosted_zone_id': elb.canonical_hosted_zone_name_id,
'instances': [instance.id for instance in elb.instances],
'listeners': self._get_elb_listeners(elb.listeners),
'scheme': elb.scheme,
'security_groups': elb.security_groups,
'health_check': self._get_health_check(elb.health_check),
'subnets': elb.subnets,
'instances_inservice': [],
'instances_inservice_count': 0,
'instances_outofservice': [],
'instances_outofservice_count': 0,
'instances_inservice_percent': 0.0,
'tags': self._get_tags(elb.name)
}
if elb.vpc_id:
elb_info['vpc_id'] = elb.vpc_id
if elb.instances:
instance_health = self.connection.describe_instance_health(elb.name)
elb_info['instances_inservice'] = [inst.instance_id for inst in instance_health if inst.state == 'InService']
elb_info['instances_inservice_count'] = len(elb_info['instances_inservice'])
elb_info['instances_outofservice'] = [inst.instance_id for inst in instance_health if inst.state == 'OutOfService']
elb_info['instances_outofservice_count'] = len(elb_info['instances_outofservice'])
try:
elb_info['instances_inservice_percent'] = (
float(elb_info['instances_inservice_count']) /
float(elb_info['instances_inservice_count'] + elb_info['instances_outofservice_count'])
) * 100.
except ZeroDivisionError:
elb_info['instances_inservice_percent'] = 0.
return elb_info
def list_elbs(self):
elb_array, token = [], None
get_elb_with_backoff = AWSRetry.backoff(tries=5, delay=5, backoff=2.0)(self.connection.get_all_load_balancers)
while True:
all_elbs = get_elb_with_backoff(marker=token)
token = all_elbs.next_token
if all_elbs:
if self.names:
for existing_lb in all_elbs:
if existing_lb.name in self.names:
elb_array.append(existing_lb)
else:
elb_array.extend(all_elbs)
else:
break
if token is None:
break
return list(map(self._get_elb_info, elb_array))
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
names={'default': [], 'type': 'list'}
)
)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
try:
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if not region:
module.fail_json(msg="region must be specified")
names = module.params['names']
elb_information = ElbInformation(
module, names, region, **aws_connect_params)
ec2_facts_result = dict(changed=False,
elbs=elb_information.list_elbs())
except BotoServerError as err:
self.module.fail_json(msg="{0}: {1}".format(err.error_code, err.error_message),
exception=traceback.format_exc())
module.exit_json(**ec2_facts_result)
if __name__ == '__main__':
main()
|
iizukak/nupic-nlp-experiment
|
refs/heads/master
|
src/encoder/vector_test.py
|
1
|
#!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Unit tests for VectorEncoder."""
CL_VERBOSITY = 0
import unittest2 as unittest
from vector import VectorEncoder, VectorEncoderOPF, SimpleVectorEncoder
from nupic.encoders.scalar import ScalarEncoder
class VectorEncoderTest(unittest.TestCase):
"""Unit tests for VectorEncoder class."""
def setUp(self):
self._tmp = None # to pass around values
def testInitialization(self):
e = VectorEncoder(3, ScalarEncoder(21, 0, 10, n=200), name="vec")
self.assertIsInstance(e, VectorEncoder)
def testEncoding(self):
s = ScalarEncoder(1,1,3,n=3, name='idx', forced=True)
v = VectorEncoder(3, s, typeCastFn=float)
data=[1,2,3]
print "data=", data
# encode
enc = v.encode(data)
print "encoded=", enc
correct = [1,0,0,0,1,0,0,0,1]
self.assertTrue((enc==correct).all(), "Did not encode correctly")
def testDecoding(self):
s = ScalarEncoder(1,1,3,n=3, name='idx', forced=True)
v = VectorEncoder(3, s, typeCastFn=float)
data=[1,2,3]
enc = v.encode(data)
#decode
dec = v.decode(enc)
print "decoded=", dec
res= v.getData(dec)
self.assertEqual(data, res, "Decoded data not equal to original")
def testVectorEncoderOPFInstance(self):
"""calling VectorEncoder from OPF"""
opfVect = VectorEncoderOPF(3, 1, 3, n=211, w=21, dataType="int")
data=[1,2,3]
enc=opfVect.encode(data)
dec=opfVect.decode(enc)
data2=opfVect.getData(dec)
self.assertEqual(data, data2, "VectorEncoderOPF did not encode/decode correctly.")
def testVectorEncoderOPFTypeCast(self):
"""for calling from OPF, use this to cast data type"""
opfVect = VectorEncoderOPF(3, 1, 3, n=300, w=21, dataType="str")
data=[1,2,3]
enc=opfVect.encode(data)
dec=opfVect.decode(enc)
data2=opfVect.getData(dec)
self.assertIsInstance(data2[0], str, "VectorEncoderOPF did not cast output to str(ing)")
opfVect = VectorEncoderOPF(3, 1, 3, n=300, w=21, dataType="int")
data=[1,2,3]
enc=opfVect.encode(data)
dec=opfVect.decode(enc)
data2=opfVect.getData(dec)
self.assertIsInstance(data2[0], int, "VectorEncoderOPF did not cast output to int")
def testSimpleVectorEncoderInstance(self):
""" simple demo version"""
simpleVect = SimpleVectorEncoder()
data=[1.0, 2.0, 3.0, 4.0, 5.0]
enc=simpleVect.encode(data)
dec=simpleVect.decode(enc)
data2=simpleVect.getData(dec)
self.assertEqual(data, data2, "Simple vector did not encode/decode correctly")
if __name__ == '__main__':
unittest.main()
|
jkocherhans/maillib
|
refs/heads/master
|
maillib/tests/attachments.py
|
1
|
from maillib import Message
import unittest
RAW_MESSAGE = """\
Return-path: <jkocherhans@gmail.com>
Received: from smtpin132-bge351000 ([10.150.68.132])
by ms041.mac.com (Sun Java(tm) System Messaging Server 6.3-7.04 (built Sep 26
2008; 64bit)) with ESMTP id <0KRV00HDRSA264C0@ms041.mac.com> for
jkocherhans@mac.com; Wed, 21 Oct 2009 13:15:38 -0700 (PDT)
Original-recipient: rfc822;jkocherhans@mac.com
Received: from mail-yx0-f185.google.com ([unknown] [209.85.210.185])
by smtpin132.mac.com
(Sun Java(tm) System Messaging Server 7u2-7.04 32bit (built Jul 2 2009))
with ESMTP id <0KRV00IJ2SA1IC00@smtpin132.mac.com> for jkocherhans@mac.com
(ORCPT jkocherhans@mac.com); Wed, 21 Oct 2009 13:15:38 -0700 (PDT)
X-Proofpoint-Virus-Version: vendor=fsecure
engine=1.12.8161:2.4.5,1.2.40,4.0.166
definitions=2009-10-21_11:2009-09-29,2009-10-21,2009-10-21 signatures=0
X-Proofpoint-Spam-Details: rule=notspam policy=default score=0 spamscore=0
ipscore=0 phishscore=0 bulkscore=0 adultscore=0 classifier=spam adjust=0
reason=mlx engine=5.0.0-0908210000 definitions=main-0910210183
Received: by yxe15 with SMTP id 15so8537359yxe.9 for <jkocherhans@mac.com>;
Wed, 21 Oct 2009 13:15:37 -0700 (PDT)
DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com;
s=gamma; h=domainkey-signature:mime-version:received:date:message-id:subject
:from:to:content-type; bh=YrtdrLl8RzaFKTuh2w7XAQpOVTR+X/x5bmVCE6Prmzo=;
b=xJFEqZbmCHhfZet9QYywAg0hs7v8t6Kl1VuHoiZaKUT5E7+Z50vaZDCL9gExAuECAW
Njeq3HraZHR/mv7bE4Z8o/Xe4TvPIAkUjHZWenqnOpB7FrL9Fesp3Trx/tAesS1boVUg
pEnEzK3izsZsnxamzdkeVSJuwrfu9O7VRc+8c=
DomainKey-Signature: a=rsa-sha1; c=nofws; d=gmail.com; s=gamma;
h=mime-version:date:message-id:subject:from:to:content-type;
b=gjzGdmdiH9cUeHr7Hmgm2s1SRINtXKmSYORcObaY2Mv/L8ugVOmj1EBfMfZb6ZbX76
lHg4f0YbA7BAw9diroSv9fXoUXw0mLpdatmTgmYWyFilvGFach4N7U8Z7PD7Iz4GGS9h
kzntvWSGHmtL44UZidVxUZ4PZS45zsBLJ8NXw=
MIME-version: 1.0
Received: by 10.101.213.34 with SMTP id p34mr1544254anq.151.1256156136938; Wed,
21 Oct 2009 13:15:36 -0700 (PDT)
Date: Wed, 21 Oct 2009 15:15:36 -0500
Message-id: <c90890f0910211315o3bc3c8a6m6e08248e867fd1e@mail.gmail.com>
Subject: Attachments
From: Joseph Kocherhans <jkocherhans@gmail.com>
To: jkocherhans@mac.com
Content-type: multipart/mixed; boundary=001636c92b78531c06047677a40b
--001636c92b78531c06047677a40b
Content-Type: multipart/alternative; boundary=001636c92b78531bfd047677a409
--001636c92b78531bfd047677a409
Content-Type: text/plain; charset=ISO-8859-1
This is a *test*.
--001636c92b78531bfd047677a409
Content-Type: text/html; charset=ISO-8859-1
This is a <i>test</i>.
--001636c92b78531bfd047677a409--
--001636c92b78531c06047677a40b
Content-Type: text/plain; charset=US-ASCII; name="text_attachment.txt"
Content-Disposition: attachment; filename="text_attachment.txt"
Content-Transfer-Encoding: base64
X-Attachment-Id: f_g12iw4060
c2ltcGxlIHRleHQgYXR0YWNobWVudAo=
--001636c92b78531c06047677a40b
Content-Type: image/gif; name="apache_pb.gif"
Content-Disposition: attachment; filename="apache_pb.gif"
Content-Transfer-Encoding: base64
X-Attachment-Id: f_g12iwbbo1
R0lGODlhAwEgAPcAAP///87OzqWlpYSEhHNzc2tra1paWiEYGP9CGP8xAL1SEN6thO+cQv+EAP/O
GP/OAM7Oxr29xjEA/2MA/5wA/+8A/+dK79573talzv8AnO9KjP8AY+eEnO9SY/8AGAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACH5BAEAAAEALAAAAAADASAA
Rwj/AAMIHEiwoMGDCBMqXMiwocOHECNKnEixokWGEQ4cGDCwwIABAgJsMLAhwwYPKFEmWNnAQYSB
B/79MxDzXwSZB2j+OxAgpgEDMgn4FLAzwoCZPWUGJTAzAlACAXDq/FcQ6EyrBKzetHmxq9evYMMW
fEl2g9kKGypQoDBhwj8JSuO+lUB3Atu1FTJQyDAhg0m/BVD+O5kSAQIBLl+KXcy4sePHYiNEaDAA
QYOVCVIKTvnPgwEPZv2G9ivaZF69pdNmGPCgdesGDxrM/CeAo8ALdSdIaNuW7u7duiVcMHhh7drh
Ei+QXs68eQYNBjV7WNhBM4aCGDCvZLhAe4KDDBqI/28A+auA8+fLL4yQniD7kOrjy5/v8OV5uW/n
tl2bIW/fvRlMlZIBCChQU1ECHcVTVLQJJMCBUMW0oEBMKRWSU3JVFddGDsqkGH0ghjhfBAgYsFJn
oJVk2nIUVGDXWm5ZhZ9ccP32G1sVqDXAXhsAmMAALbnUUAS6tRWABb9N8OFBxRlHwUJOIofdcsNZ
sNx1CUm3kAbWYecdd18aFN54IpZp5plopqnmmmy26aZBDy450HkFKOaAAA3g2VBNAljFkVEGDGDf
Ty8ZAJVTBpBFgKAUGiCAU/AJQAB8AkVAQFYyafjPAI4G0Geinhr65qhlGlUTUCDt91+PKkqnUk0f
bv8FVYWSKfUTTgEcRRt6SU0I1AHoPSpTolZp6umwAcgoJ6nMqvfoTwQUEABfwdE4lwESGHCXi6hN
MBJhCaDIkniGIbCsfFtR2uy6IhqVklmhqdWfi2zBNWN+vvW2H714oTbSXyaB1sC5E+U7gQUNNenk
wgwbhyVCGjT3cEIYuGqxqxwYlB1mHYCpHXjjhSzyyAs8hqeJmg2GkoqkscyccWrdNcFPbtk7I1AN
GBDba605IKTGRU5QEJJFTkyQwg0nvZbRypEm5W3LZXyQlhVt7N3VWGMG8sgilzyqZAFIZlQDGyFw
AAIpHuDBABsMANrb8JqF0gFxG+XAAHeqe2S+Rh//hEFwEiA8ENJPN8Rc334fPhAHFzeOUsdefrxQ
d5IXNCbXmIsHAbucd+7556CHLvropJfOOcFhg03RgciKWCxBr5suO0Sm6i0po2hrloBlAyNU07H/
TCqXsJsSZdOvySoFbFJW8XTgAS9VGJemym9lAPOzZ2/QABoJCkABBUCFWgUEmDRSSiztfhjrwBo/
q0yS4iQh/BVqlCmfz+5kP7AycRS7QK/zyVZ8or0CHkRGt6qLk/yTms94QCkeMMzAakJBAMJPV9ez
nqSct5M5DetTSckQ7PATEuNdz4AoDFsBgGIAj9hMKb6RAIyM46+9EABcHliJAiyDGNUlhD0MeVRX
/1iYQhSKjT0v6VZvXgjDfMnQLn3pVtw048AcNqBceoPMBXJSxC5WKmy565Ff8jLDttxrLjd6Ihnb
ohrS3BBcmSkM6ihSl8IlRGl4vADiCnIBC/hRcA2BAAcY57iUcABxGOiAIjvgNYVQbiWNJAgDJknJ
SlqykpG0yGTc9i6XnaZFMGIifmpko/3gSC0xo0AB5FYYBLDNXBLJzW8AmRCkUeACuMylLgmXEOf4
ZY8Dkc4gh0lMDmigOinZHEGs5gHIOTJMlgvZAqZJzWpakzF3u0wcVxaatKjmNDlCpV0KABcZnVEm
wOENb9omntjA5mcIIRJwBkK03aDOlrvMZ5Q0RvOlwV0pS5pZCAYOicisGfRqWxPPJSeZSfWMzUQr
2QxKSMLNbi7nNP3xy17I2KK+xMwAM7tVXH5iAPHAUyB/S5I6eZMkxNkSj8bRIx99SVOpFYRqCeGA
Ig+pMe8UkwPWfKTWxCTNoC5AmWmSTG1c6Z0cuqpHFWWlAf5CGJKoqADthI1rfPYh3ABHjxgIawDC
StYLLLFwvIxI0/wCVrK6NawcWA50CCLMnw7zmJpBKkqhmRChfoeo46GkUa/pRdrpco7uOexAMKBL
YPpNlxAZ6DAnJlm7WlavA4FANR0rEM1W8yCDDS01C0va0pr2tKg9CEACBAQAOw==
--001636c92b78531c06047677a40b--
"""
class AttachmentsTestCase(unittest.TestCase):
def setUp(self):
self.msg = Message.from_string(RAW_MESSAGE)
def test_attachments(self):
attachments = list(self.msg.attachments())
self.assertEqual(len(attachments), 2)
text_filename, text_content = attachments[0]
self.assertEqual(text_filename, u'text_attachment.txt')
self.assertEqual(text_content, u'simple text attachment')
if __name__ == '__main__':
unittest.main()
|
kreatorkodi/repository.torrentbr
|
refs/heads/master
|
plugin.video.quasar/resources/site-packages/bjsonrpc/main.py
|
11
|
"""
bjson/main.py
Copyright (c) 2010 David Martinez Marti
All rights reserved.
Licensed under 3-clause BSD License.
See LICENSE.txt for the full license text.
"""
import socket
import bjsonrpc.server
import bjsonrpc.connection
import bjsonrpc.handlers
__all__ = [
"createserver",
"connect",
]
def createserver(host="127.0.0.1", port=10123, handler_factory=bjsonrpc.handlers.NullHandler):
"""
Creates a *bjson.server.Server* object linked to a listening socket.
Parameters:
**host**
Address (IP or Host Name) to listen to as in *socket.bind*.
Use "0.0.0.0" to listen to all address. By default this points to
127.0.0.1 to avoid security flaws.
**port**
Port number to bind the socket. In Unix, port numbers less
than 1024 requires special permissions.
**handler_factory**
Class to instantiate to publish remote functions.
**(return value)**
A *bjson.server.Server* instance or raises an exception.
Servers are usually created this way::
import bjsonrpc
server = bjsonrpc.createserver("0.0.0.0")
server.serve()
Check :ref:`bjsonrpc.server` documentation
"""
sck = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sck.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sck.bind((host, port))
sck.listen(3)
return bjsonrpc.server.Server(sck, handler_factory=handler_factory)
def connect(host="127.0.0.1", port=10123, handler_factory=bjsonrpc.handlers.NullHandler):
"""
Creates a *bjson.connection.Connection* object linked to a connected
socket.
Parameters:
**host**
Address (IP or Host Name) to connect to.
**port**
Port number to connect to.
**handler_factory**
Class to instantiate to publish remote functions to the server.
By default this is *NullHandler* which means that no functions are
executable by the server.
**(return value)**
A *bjson.connection.Connection* instance or raises an exception.
Connections are usually created this way::
import bjsonrpc
conn = bjsonrpc.connect("rpc.host.net")
print conn.call.some_method_in_server_side()
"""
sck = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sck.connect((host, port))
return bjsonrpc.connection.Connection(sck, handler_factory=handler_factory)
|
physycom/QGIS
|
refs/heads/master
|
tests/src/python/test_qgsserver_wms.py
|
4
|
# -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsServer WMS.
From build dir, run: ctest -R PyQgsServerWMS -V
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Alessandro Pasotti'
__date__ = '25/05/2015'
__copyright__ = 'Copyright 2015, The QGIS Project'
import os
import json
# Needed on Qt 5 so that the serialization of XML is consistent among all executions
os.environ['QT_HASH_SEED'] = '1'
import re
import urllib.request
import urllib.parse
import urllib.error
from qgis.testing import unittest
import osgeo.gdal # NOQA
from owslib.wms import WebMapService
from test_qgsserver import QgsServerTestBase
from qgis.core import QgsProject
# Strip path and content length because path may vary
RE_STRIP_UNCHECKABLE = b'MAP=[^"]+|Content-Length: \\d+'
RE_STRIP_EXTENTS = b'<(north|east|south|west)Bound(Lat|Long)itude>.*</(north|east|south|west)Bound(Lat|Long)itude>|<BoundingBox .*/>'
RE_ATTRIBUTES = b'[^>\\s]+=[^>\\s]+'
class TestQgsServerWMSTestBase(QgsServerTestBase):
"""QGIS Server WMS Tests"""
# Set to True to re-generate reference files for this class
regenerate_reference = False
def wms_request(self, request, extra=None, project='test_project.qgs', version='1.3.0'):
if not os.path.exists(project):
project = os.path.join(self.testdata_path, project)
assert os.path.exists(project), "Project file not found: " + project
query_string = 'https://www.qgis.org/?MAP=%s&SERVICE=WMS&VERSION=%s&REQUEST=%s' % (urllib.parse.quote(project), version, request)
if extra is not None:
query_string += extra
header, body = self._execute_request(query_string)
return (header, body, query_string)
def wms_request_compare(self, request, extra=None, reference_file=None, project='test_project.qgs', version='1.3.0', ignoreExtent=False, normalizeJson=False):
response_header, response_body, query_string = self.wms_request(request, extra, project, version)
response = response_header + response_body
reference_path = os.path.join(self.testdata_path, (request.lower() if not reference_file else reference_file) + '.txt')
self.store_reference(reference_path, response)
f = open(reference_path, 'rb')
expected = f.read()
def _n(r):
lines = r.split(b'\n')
b = lines[2:]
h = lines[:2]
try:
return b'\n'.join(h) + json.dumps(json.loads(b'\n'.join(b))).encode('utf8')
except:
return r
response = _n(response)
expected = _n(expected)
f.close()
response = re.sub(RE_STRIP_UNCHECKABLE, b'*****', response)
expected = re.sub(RE_STRIP_UNCHECKABLE, b'*****', expected)
if ignoreExtent:
response = re.sub(RE_STRIP_EXTENTS, b'*****', response)
expected = re.sub(RE_STRIP_EXTENTS, b'*****', expected)
msg = "request %s failed.\nQuery: %s\nExpected file: %s\nResponse:\n%s" % (query_string, request, reference_path, response.decode('utf-8'))
self.assertXMLEqual(response, expected, msg=msg)
class TestQgsServerWMS(TestQgsServerWMSTestBase):
"""QGIS Server WMS Tests"""
def test_getcapabilities(self):
self.wms_request_compare('GetCapabilities')
def test_getcapabilities_case_insensitive(self):
self.wms_request_compare('getcapabilities')
self.wms_request_compare('GETCAPABILITIES')
def test_getprojectsettings(self):
self.wms_request_compare('GetProjectSettings')
def test_getcontext(self):
self.wms_request_compare('GetContext')
def test_operation_not_supported(self):
qs = '?MAP=%s&SERVICE=WMS&VERSION=1.3.0&REQUEST=NotAValidRequest' % urllib.parse.quote(self.projectPath)
self._assert_status_code(501, qs)
def test_describelayer(self):
# Test DescribeLayer
self.wms_request_compare('DescribeLayer',
'&layers=testlayer%20%C3%A8%C3%A9&' +
'SLD_VERSION=1.1.0',
'describelayer')
def test_getstyles(self):
# Test GetStyles
self.wms_request_compare('GetStyles',
'&layers=testlayer%20%C3%A8%C3%A9&',
'getstyles')
# Test GetStyles with labeling
self.wms_request_compare('GetStyles',
'&layers=pointlabel',
'getstyles_pointlabel',
project=self.projectPath)
def test_wms_getschemaextension(self):
self.wms_request_compare('GetSchemaExtension',
'',
'getschemaextension')
def wms_request_compare_project(self, request, extra=None, reference_file=None, project_name="test_project.qgs"):
projectPath = self.testdata_path + project_name
assert os.path.exists(projectPath), "Project file not found: " + projectPath
project = QgsProject()
project.read(projectPath)
query_string = 'https://www.qgis.org/?SERVICE=WMS&VERSION=1.3.0&REQUEST=%s' % (request)
if extra is not None:
query_string += extra
header, body = self._execute_request_project(query_string, project)
response = header + body
reference_path = self.testdata_path + (request.lower() if not reference_file else reference_file) + '.txt'
self.store_reference(reference_path, response)
f = open(reference_path, 'rb')
expected = f.read()
f.close()
response = re.sub(RE_STRIP_UNCHECKABLE, b'*****', response)
expected = re.sub(RE_STRIP_UNCHECKABLE, b'*****', expected)
self.assertXMLEqual(response, expected, msg="request %s failed.\nQuery: %s\nExpected file: %s\nResponse:\n%s" % (query_string, request, reference_path, response.decode('utf-8')))
def test_wms_getcapabilities_project(self):
"""WMS GetCapabilities without map parameter"""
self.wms_request_compare_project('GetCapabilities')
# reference_file='getcapabilities_without_map_param' could be the right response
def test_wms_getcapabilities_project_empty_layer(self):
"""WMS GetCapabilities with empty layer different CRS: wrong bbox - Regression GH 30264"""
self.wms_request_compare_project('GetCapabilities', reference_file='wms_getcapabilities_empty_layer', project_name='bug_gh30264_empty_layer_wrong_bbox.qgs')
def wms_inspire_request_compare(self, request):
"""WMS INSPIRE tests"""
project = self.testdata_path + "test_project_inspire.qgs"
assert os.path.exists(project), "Project file not found: " + project
query_string = '?MAP=%s&SERVICE=WMS&VERSION=1.3.0&REQUEST=%s' % (urllib.parse.quote(project), request)
header, body = self._execute_request(query_string)
response = header + body
reference_path = self.testdata_path + request.lower() + '_inspire.txt'
self.store_reference(reference_path, response)
f = open(reference_path, 'rb')
expected = f.read()
f.close()
response = re.sub(RE_STRIP_UNCHECKABLE, b'', response)
expected = re.sub(RE_STRIP_UNCHECKABLE, b'', expected)
self.assertXMLEqual(response, expected, msg="request %s failed.\nQuery: %s\nExpected file: %s\nResponse:\n%s" % (query_string, request, reference_path, response.decode('utf-8')))
def test_project_wms_inspire(self):
"""Test some WMS request"""
for request in ('GetCapabilities',):
self.wms_inspire_request_compare(request)
def test_wms_getcapabilities_without_title(self):
# Empty title in project leads to a Layer element without Name, Title
# and Abstract tags. However, it should still have a CRS and a BBOX
# according to OGC specifications tests.
self.wms_request_compare('GetCapabilities', reference_file='wms_getcapabilities_without_title', project='test_project_without_title.qgs')
def test_wms_getcapabilities_empty_spatial_layer(self):
# The project contains a spatial layer without feature and the WMS
# extent is not configured in the project.
self.wms_request_compare('GetCapabilities',
reference_file='wms_getcapabilities_empty_spatial_layer',
project='test_project_empty_spatial_layer.qgz',
ignoreExtent=True)
def test_wms_getcapabilities_versions(self):
# default version 1.3.0 when empty VERSION parameter
project = os.path.join(self.testdata_path, "test_project.qgs")
qs = "?" + "&".join(["%s=%s" % i for i in list({
"MAP": urllib.parse.quote(project),
"SERVICE": "WMS",
"REQUEST": "GetCapabilities",
}.items())])
self.wms_request_compare(qs, reference_file='wms_getcapabilities_1_3_0', version='')
# default version 1.3.0 when VERSION = 1.3.0 parameter
project = os.path.join(self.testdata_path, "test_project.qgs")
qs = "?" + "&".join(["%s=%s" % i for i in list({
"MAP": urllib.parse.quote(project),
"SERVICE": "WMS",
"REQUEST": "GetCapabilities",
}.items())])
self.wms_request_compare(qs, reference_file='wms_getcapabilities_1_3_0', version='1.3.0')
# version 1.1.1
project = os.path.join(self.testdata_path, "test_project.qgs")
qs = "?" + "&".join(["%s=%s" % i for i in list({
"MAP": urllib.parse.quote(project),
"SERVICE": "WMS",
"REQUEST": "GetCapabilities",
}.items())])
self.wms_request_compare(qs, reference_file='wms_getcapabilities_1_1_1', version='1.1.1')
# default version 1.3.0 when invalid VERSION parameter
project = os.path.join(self.testdata_path, "test_project.qgs")
qs = "?" + "&".join(["%s=%s" % i for i in list({
"MAP": urllib.parse.quote(project),
"SERVICE": "WMS",
"REQUEST": "GetCapabilities",
}.items())])
self.wms_request_compare(qs, reference_file='wms_getcapabilities_1_3_0', version='33.33.33')
def test_wms_getcapabilities_url(self):
# empty url in project
project = os.path.join(self.testdata_path, "test_project_without_urls.qgs")
qs = "?" + "&".join(["%s=%s" % i for i in list({
"MAP": urllib.parse.quote(project),
"SERVICE": "WMS",
"VERSION": "1.3.0",
"REQUEST": "GetCapabilities",
"STYLES": ""
}.items())])
r, h = self._result(self._execute_request(qs))
item_found = False
for item in str(r).split("\\n"):
if "OnlineResource" in item:
self.assertEqual("xlink:href=\"?" in item, True)
item_found = True
self.assertTrue(item_found)
# url passed in query string
# verify that GetCapabilities isn't put into the url for non-uppercase parameter names
project = os.path.join(self.testdata_path, "test_project_without_urls.qgs")
qs = "https://www.qgis-server.org?" + "&".join(["%s=%s" % i for i in list({
"MAP": urllib.parse.quote(project),
"SeRvIcE": "WMS",
"VeRsIoN": "1.3.0",
"ReQuEsT": "GetCapabilities",
"STYLES": ""
}.items())])
r, h = self._result(self._execute_request(qs))
item_found = False
for item in str(r).split("\\n"):
if "OnlineResource" in item:
self.assertEqual("xlink:href=\"https://www.qgis-server.org?" in item, True)
self.assertEqual("GetCapabilities" in item, False)
item_found = True
self.assertTrue(item_found)
# url well defined in project
project = os.path.join(self.testdata_path, "test_project_with_urls.qgs")
qs = "?" + "&".join(["%s=%s" % i for i in list({
"MAP": urllib.parse.quote(project),
"SERVICE": "WMS",
"VERSION": "1.3.0",
"REQUEST": "GetCapabilities",
"STYLES": ""
}.items())])
r, h = self._result(self._execute_request(qs))
item_found = False
for item in str(r).split("\\n"):
if "OnlineResource" in item and "xlink:href=\"my_wms_advertised_url?" in item:
item_found = True
self.assertTrue(item_found)
@unittest.skip('Timeout issues')
def test_wms_GetProjectSettings_wms_print_layers(self):
projectPath = self.testdata_path + "test_project_wms_printlayers.qgs"
qs = "?" + "&".join(["%s=%s" % i for i in list({
"MAP": projectPath,
"SERVICE": "WMS",
"VERSION": "1.3.0",
"REQUEST": "GetProjectSettings"
}.items())])
header, body = self._execute_request(qs)
xmlResult = body.decode('utf-8')
self.assertTrue(xmlResult.find("<WMSBackgroundLayer>1</WMSBackgroundLayer>") != -1)
self.assertTrue(xmlResult.find("<WMSDataSource>contextualWMSLegend=0&crs=EPSG:21781&dpiMode=7&featureCount=10&format=image/png&layers=public_geo_gemeinden&styles=&url=https://qgiscloud.com/mhugent/qgis_unittest_wms/wms?</WMSDataSource>") != -1)
self.assertTrue(xmlResult.find("<WMSPrintLayer>contextualWMSLegend=0&amp;crs=EPSG:21781&amp;dpiMode=7&amp;featureCount=10&amp;format=image/png&amp;layers=public_geo_gemeinden&amp;styles=&amp;url=https://qgiscloud.com/mhugent/qgis_unittest_wms_print/wms?</WMSPrintLayer>") != -1)
def test_getcapabilities_owslib(self):
# read getcapabilities document
docPath = self.testdata_path + 'getcapabilities.txt'
f = open(docPath, 'r')
doc = f.read()
f.close()
# clean header in doc
doc = doc.replace('Content-Length: 15066\n', '')
doc = doc.replace('Content-Type: text/xml; charset=utf-8\n\n', '')
doc = doc.replace('<?xml version="1.0" encoding="utf-8"?>\n', '')
# read capabilities document with owslib
w = WebMapService(None, xml=doc, version='1.3.0')
# check content
rootLayerName = 'QGIS Test Project'
self.assertTrue(rootLayerName in w.contents.keys())
if __name__ == '__main__':
unittest.main()
|
hhLeo/ucore_lab
|
refs/heads/master
|
related_info/lab7/semaphore_condition/thr-ex4.py
|
48
|
import threading
class MyThread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
def run(self):
print "I am %s" % (self.name)
if __name__ == "__main__":
for i in range(0, 5):
my_thread = MyThread()
my_thread.start()
|
collegboi/Raspi-Media
|
refs/heads/master
|
TVShowClass.py
|
1
|
#!/usr/bin/python
class TVClass(object):
def __init__(self, name, season, episode, avail):
self.name = name
self.season = season
self.episode = episode
self.avail = avail
def getName(self):
return self.name
def getSeason(self):
return self.season
def getEpisode(self):
return self.episode
def setSeason(self, season):
self.season = season
def setEpisode(self, episode):
self.episode = episode
def setAvail(self, avail):
self.avail = avail
def getAvail(self):
return self.avail
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.