repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
beacloudgenius/edx-platform | common/lib/xmodule/xmodule/tests/test_editing_module.py | 181 | 2640 | """ Tests for editing descriptors"""
import unittest
import os
import logging
from mock import Mock
from pkg_resources import resource_string
from opaque_keys.edx.locations import Location
from xmodule.editing_module import TabsEditingDescriptor
from xblock.field_data import DictFieldData
from xblock.fields import ScopeIds
from xmodule.tests import get_test_descriptor_system
log = logging.getLogger(__name__)
class TabsEditingDescriptorTestCase(unittest.TestCase):
""" Testing TabsEditingDescriptor"""
def setUp(self):
super(TabsEditingDescriptorTestCase, self).setUp()
system = get_test_descriptor_system()
system.render_template = Mock(return_value="<div>Test Template HTML</div>")
self.tabs = [
{
'name': "Test_css",
'template': "tabs/codemirror-edit.html",
'current': True,
'css': {
'scss': [
resource_string(
__name__,
'../../test_files/test_tabseditingdescriptor.scss'
)
],
'css': [
resource_string(
__name__,
'../../test_files/test_tabseditingdescriptor.css'
)
]
}
},
{
'name': "Subtitles",
'template': "video/subtitles.html",
},
{
'name': "Settings",
'template': "tabs/video-metadata-edit-tab.html"
}
]
TabsEditingDescriptor.tabs = self.tabs
self.descriptor = system.construct_xblock_from_class(
TabsEditingDescriptor,
scope_ids=ScopeIds(None, None, None, Location('org', 'course', 'run', 'category', 'name', 'revision')),
field_data=DictFieldData({}),
)
def test_get_css(self):
"""test get_css"""
css = self.descriptor.get_css()
test_files_dir = os.path.dirname(__file__).replace('xmodule/tests', 'test_files')
test_css_file = os.path.join(test_files_dir, 'test_tabseditingdescriptor.scss')
with open(test_css_file) as new_css:
added_css = new_css.read()
self.assertEqual(css['scss'].pop(), added_css)
self.assertEqual(css['css'].pop(), added_css)
def test_get_context(self):
""""test get_context"""
rendered_context = self.descriptor.get_context()
self.assertListEqual(rendered_context['tabs'], self.tabs)
| agpl-3.0 |
kmee/PySPED | pysped/nfe/leiaute/consrecinfe_310.py | 6 | 5873 | # -*- coding: utf-8 -*-
#
# PySPED - Python libraries to deal with Brazil's SPED Project
#
# Copyright (C) 2010-2012
# Copyright (C) Aristides Caldeira <aristides.caldeira at tauga.com.br>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# PySPED - Bibliotecas Python para o
# SPED - Sistema Público de Escrituração Digital
#
# Copyright (C) 2010-2012
# Copyright (C) Aristides Caldeira <aristides.caldeira arroba tauga.com.br>
#
# Este programa é um software livre: você pode redistribuir e/ou modificar
# este programa sob os termos da licença GNU Affero General Public License,
# publicada pela Free Software Foundation, em sua versão 3 ou, de acordo
# com sua opção, qualquer versão posterior.
#
# Este programa é distribuido na esperança de que venha a ser útil,
# porém SEM QUAISQUER GARANTIAS, nem mesmo a garantia implícita de
# COMERCIABILIDADE ou ADEQUAÇÃO A UMA FINALIDADE ESPECÍFICA. Veja a
# GNU Affero General Public License para mais detalhes.
#
# Você deve ter recebido uma cópia da GNU Affero General Public License
# juntamente com este programa. Caso esse não seja o caso, acesse:
# <http://www.gnu.org/licenses/>
#
from __future__ import division, print_function, unicode_literals
from pysped.xml_sped import *
from pysped.nfe.leiaute import ESQUEMA_ATUAL_VERSAO_3 as ESQUEMA_ATUAL
from pysped.nfe.leiaute import consrecinfe_200
import os
from nfe_310 import NFe
DIRNAME = os.path.dirname(__file__)
class ConsReciNFe(consrecinfe_200.ConsReciNFe):
def __init__(self):
super(ConsReciNFe, self).__init__()
self.versao = TagDecimal(nome='consReciNFe', codigo='BP02', propriedade='versao', namespace=NAMESPACE_NFE, valor='3.10', raiz='/')
self.caminho_esquema = os.path.join(DIRNAME, 'schema/', ESQUEMA_ATUAL + '/')
self.arquivo_esquema = 'consReciNFe_v3.10.xsd'
class InfProt(consrecinfe_200.InfProt):
def __init__(self):
super(InfProt, self).__init__()
self.dhRecbto = TagDataHoraUTC(nome='dhRecbto', codigo='PR08', raiz='//infProt')
def get_xml(self):
xml = XMLNFe.get_xml(self)
if self.Id.valor:
xml += self.Id.xml
else:
xml += '<infProt>'
xml += self.tpAmb.xml
xml += self.verAplic.xml
xml += self.chNFe.xml
xml += self.dhRecbto.xml
xml += self.nProt.xml
xml += self.digVal.xml
xml += self.cStat.xml
xml += self.xMotivo.xml
xml += '</infProt>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.Id.xml = arquivo
self.tpAmb.xml = arquivo
self.verAplic.xml = arquivo
self.chNFe.xml = arquivo
self.dhRecbto.xml = arquivo
self.nProt.xml = arquivo
self.digVal.xml = arquivo
self.cStat.xml = arquivo
self.xMotivo.xml = arquivo
xml = property(get_xml, set_xml)
class ProtNFe(consrecinfe_200.ProtNFe):
def __init__(self):
super(ProtNFe, self).__init__()
self.versao = TagDecimal(nome='protNFe', codigo='PR02' , propriedade='versao', namespace=NAMESPACE_NFE, valor='3.10', raiz='/')
self.infProt = InfProt()
class RetConsReciNFe(consrecinfe_200.RetConsReciNFe):
def __init__(self):
super(RetConsReciNFe, self).__init__()
self.versao = TagDecimal(nome='retConsReciNFe', codigo='BR02' , propriedade='versao', namespace=NAMESPACE_NFE, valor='3.10', raiz='/')
self.caminho_esquema = os.path.join(DIRNAME, 'schema/', ESQUEMA_ATUAL + '/')
self.arquivo_esquema = 'retConsReciNFe_v3.10.xsd'
def get_xml(self):
xml = XMLNFe.get_xml(self)
xml += self.versao.xml
xml += self.tpAmb.xml
xml += self.verAplic.xml
xml += self.nRec.xml
xml += self.cStat.xml
xml += self.xMotivo.xml
xml += self.cUF.xml
xml += self.cMsg.xml
xml += self.xMsg.xml
for pn in self.protNFe:
xml += pn.xml
xml += '</retConsReciNFe>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.versao.xml = arquivo
self.tpAmb.xml = arquivo
self.verAplic.xml = arquivo
self.nRec.xml = arquivo
self.cStat.xml = arquivo
self.xMotivo.xml = arquivo
self.cUF.xml = arquivo
self.cMsg.xml = arquivo
self.xMsg.xml = arquivo
self.protNFe = self.le_grupo('//retConsReciNFe/protNFe', ProtNFe)
#
# Monta o dicionário dos protocolos
#
for pn in self.protNFe:
self.dic_protNFe[pn.infProt.chNFe.valor] = pn
xml = property(get_xml, set_xml)
class ProcNFe(consrecinfe_200.ProcNFe):
def __init__(self):
super(ProcNFe, self).__init__()
self.versao = TagDecimal(nome='nfeProc', propriedade='versao', namespace=NAMESPACE_NFE, valor='3.10', raiz='/')
self.NFe = NFe()
self.protNFe = ProtNFe()
self.caminho_esquema = os.path.join(DIRNAME, 'schema/', ESQUEMA_ATUAL + '/')
self.arquivo_esquema = 'procNFe_v3.10.xsd'
| lgpl-2.1 |
zenodo/invenio | invenio/modules/messages/views.py | 12 | 10582 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2011, 2013, 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""WebMessage Flask Blueprint"""
from datetime import datetime
from flask import render_template, request, flash, redirect, url_for, Blueprint
from flask_breadcrumbs import default_breadcrumb_root, register_breadcrumb
from flask_login import current_user, login_required
from flask_menu import register_menu
from sqlalchemy.sql import operators
from invenio.base.decorators import (wash_arguments, templated, sorted_by,
filtered_by)
from invenio.base.globals import cfg
from invenio.base.i18n import _
from invenio.ext.template import render_template_to_string
from invenio.ext.principal import permission_required
from invenio.ext.sqlalchemy import db
from . import dblayer
from . import query as dbquery
from .forms import AddMsgMESSAGEForm, FilterMsgMESSAGEForm
from .models import MsgMESSAGE, UserMsgMESSAGE, email_alert_register
class MessagesMenu(object):
def __str__(self):
uid = current_user.get_id()
dbquery.update_user_inbox_for_reminders(uid)
unread = db.session.query(db.func.count(UserMsgMESSAGE.id_msgMESSAGE)).\
filter(db.and_(
UserMsgMESSAGE.id_user_to == uid,
UserMsgMESSAGE.status == cfg['CFG_WEBMESSAGE_STATUS_CODE']['NEW']
)).scalar()
return render_template_to_string("messages/menu_item.html", unread=unread)
not_guest = lambda: not current_user.is_guest
blueprint = Blueprint('webmessage', __name__, url_prefix="/yourmessages",
template_folder='templates', static_folder='static')
default_breadcrumb_root(blueprint, '.webaccount.messages')
@blueprint.route('/menu', methods=['GET'])
#FIXME if request is_xhr then do not return 401
#@login_required
#@permission_required('usemessages')
#@templated('messages/menu.html')
def menu():
uid = current_user.get_id()
dbquery.update_user_inbox_for_reminders(uid)
# join: msgMESSAGE -> user_msgMESSAGE, msgMESSAGE -> users
# filter: all messages from user AND filter form
# order: sorted by one of the table column
messages = db.session.query(MsgMESSAGE, UserMsgMESSAGE).\
join(MsgMESSAGE.user_from, MsgMESSAGE.sent_to_users).\
filter(db.and_(dbquery.filter_all_messages_from_user(uid))).\
order_by(db.desc(MsgMESSAGE.received_date)).limit(5)
#return dict(messages=messages.all())
return render_template('messages/menu.html', messages=messages.all())
@blueprint.route('/', methods=['GET', 'POST'])
@blueprint.route('/index', methods=['GET', 'POST'])
@blueprint.route('/display', methods=['GET', 'POST'])
@login_required
@permission_required('usemessages')
@sorted_by(MsgMESSAGE)
@filtered_by(MsgMESSAGE, columns={
'subject': operators.startswith_op,
'user_from.nickname': operators.contains_op},
form=FilterMsgMESSAGEForm)
@templated('messages/index.html')
@register_breadcrumb(blueprint, '.', _('Your Messages'))
@register_menu(blueprint, 'personalize.messages', _('Your messages'), order=10)
@register_menu(blueprint, 'main.messages', MessagesMenu(), order=-3,
visible_when=not_guest)
def index(sort=False, filter=None):
from invenio.legacy.webmessage.api import is_no_quota_user
uid = current_user.get_id()
dbquery.update_user_inbox_for_reminders(uid)
# join: msgMESSAGE -> user_msgMESSAGE, msgMESSAGE -> users
# filter: all messages from user AND filter form
# order: sorted by one of the table column
messages = db.session.query(MsgMESSAGE, UserMsgMESSAGE).\
join(MsgMESSAGE.user_from, MsgMESSAGE.sent_to_users).\
filter(db.and_(dbquery.filter_all_messages_from_user(uid), (filter))).\
order_by(sort)
return dict(messages=messages.all(),
nb_messages=dbquery.count_nb_messages(uid),
no_quota=is_no_quota_user(uid))
@blueprint.route("/add", methods=['GET', 'POST'])
@blueprint.route("/write", methods=['GET', 'POST'])
@register_breadcrumb(blueprint, '.add', _('Write a message'))
@login_required
@permission_required('usemessages')
@wash_arguments({'msg_reply_id': (int, 0)})
def add(msg_reply_id):
from invenio.utils.mail import email_quote_txt
uid = current_user.get_id()
if msg_reply_id:
if (dblayer.check_user_owns_message(uid, msg_reply_id) == 0):
flash(_('Sorry, this message in not in your mailbox.'), "error")
return redirect(url_for('.index'))
else:
try:
m = dbquery.get_message(uid, msg_reply_id)
message = MsgMESSAGE()
message.sent_to_user_nicks = m.message.user_from.nickname \
or str(m.message.id_user_from)
message.subject = _("Re:") + " " + m.message.subject
message.body = email_quote_txt(m.message.body)
form = AddMsgMESSAGEForm(request.form, obj=message)
return render_template('messages/add.html', form=form)
except db.sqlalchemy.orm.exc.NoResultFound:
# The message exists in table user_msgMESSAGE
# but not in table msgMESSAGE => table inconsistency
flash(_('This message does not exist.'), "error")
except:
flash(_('Problem with loading message.'), "error")
return redirect(url_for('.index'))
form = AddMsgMESSAGEForm(request.values)
if form.validate_on_submit():
m = MsgMESSAGE()
form.populate_obj(m)
m.id_user_from = uid
m.sent_date = datetime.now()
quotas = dblayer.check_quota(cfg['CFG_WEBMESSAGE_MAX_NB_OF_MESSAGES'] - 1)
users = filter(lambda x: x.id in quotas, m.recipients)
#m.recipients = m.recipients.difference(users))
for u in users:
m.recipients.remove(u)
if len(users) > 0:
flash(_('Following users reached their quota %(quota)d messages: %(users)s',
quota=cfg['CFG_WEBMESSAGE_MAX_NB_OF_MESSAGES'],
users=', '.join([u.nickname for u in users])), "error")
flash(_('Message has %(recipients)d valid recipients.',
recipients=len(m.recipients)), "info")
if len(m.recipients) == 0:
flash(_('Message was not sent'), "info")
else:
if m.received_date is not None and m.received_date > datetime.now():
for um in m.sent_to_users:
um.status = cfg['CFG_WEBMESSAGE_STATUS_CODE']['REMINDER']
else:
m.received_date = datetime.now()
try:
db.session.add(m)
db.session.commit()
flash(_('Message was sent'), "info")
return redirect(url_for('.index'))
except:
db.session.rollback()
return render_template('messages/add.html', form=form)
@blueprint.route("/view")
@blueprint.route("/display_msg")
@register_breadcrumb(blueprint, '.view', _('View a message'))
@login_required
@permission_required('usemessages')
@wash_arguments({'msgid': (int, 0)})
@templated('messages/view.html')
def view(msgid):
uid = current_user.get_id()
if (dbquery.check_user_owns_message(uid, msgid) == 0):
flash(_('Sorry, this message (#%(x_msg)d) is not in your mailbox.',
x_msg=msgid), "error")
else:
try:
m = dbquery.get_message(uid, msgid)
m.status = cfg['CFG_WEBMESSAGE_STATUS_CODE']['READ']
## It's not necessary since "m" is SQLAlchemy object bind with same
## session.
##db.session.add(m)
## I wonder if the autocommit works ...
# Commit changes before rendering for correct menu update.
db.session.commit()
return dict(m=m)
except db.sqlalchemy.orm.exc.NoResultFound:
flash(_('This message does not exist.'), "error")
except:
flash(_('Problem with loading message.'), "error")
return redirect(url_for('.index'))
@blueprint.route("/delete", methods=['GET', 'POST'])
@login_required
@permission_required('usemessages')
def delete():
"""
Delete message specified by 'msgid' that belongs to logged user.
"""
uid = current_user.get_id()
msgids = request.values.getlist('msgid', type=int)
if len(msgids) <= 0:
flash(_('Sorry, no valid message specified.'), "error")
elif dbquery.check_user_owns_message(uid, msgids) < len(msgids):
flash(_('Sorry, this message (#%(x_msg)s) is not in your mailbox.', x_msg=(str(msgids), )), "error")
else:
if dbquery.delete_message_from_user_inbox(uid, msgids) == 0:
flash(_("The message could not be deleted."), "error")
else:
flash(_("The message was successfully deleted."), "info")
return redirect(url_for('.index'))
@blueprint.route("/delete_all", methods=['GET', 'POST'])
@register_breadcrumb(blueprint, '.delete', _('Delete all messages'))
@login_required
@permission_required('usemessages')
@wash_arguments({'confirmed': (int, 0)})
def delete_all(confirmed=0):
"""
Delete every message belonging a logged user.
@param confirmed: 0 will produce a confirmation message.
"""
uid = current_user.get_id()
if confirmed != 1:
return render_template('messages/confirm_delete.html')
if dbquery.delete_all_messages(uid):
flash(_("Your mailbox has been emptied."), "info")
else:
flash(_("Could not empty your mailbox."), "warning")
return redirect(url_for('.index'))
# Registration of email_alert invoked from blueprint
# in order to use before_app_first_request.
# Reading config CFG_WEBMESSAGE_EMAIL_ALERT
# required app context.
@blueprint.before_app_first_request
def invoke_email_alert_register():
email_alert_register()
| gpl-2.0 |
allenp/odoo | addons/delivery/tests/test_delivery_cost.py | 44 | 5192 | # -*- coding: utf-8 -*-
from openerp.tests import common
from openerp.tools import float_compare
class TestDeliveryCost(common.TransactionCase):
def setUp(self):
super(TestDeliveryCost, self).setUp()
self.SaleOrder = self.env['sale.order']
self.SaleOrderLine = self.env['sale.order.line']
self.AccountAccount = self.env['account.account']
self.SaleConfigSetting = self.env['sale.config.settings']
self.Product = self.env['product.product']
self.partner_18 = self.env.ref('base.res_partner_18')
self.pricelist = self.env.ref('product.list0')
self.product_4 = self.env.ref('product.product_product_4')
self.product_uom_unit = self.env.ref('product.product_uom_unit')
self.normal_delivery = self.env.ref('delivery.normal_delivery_carrier')
self.partner_4 = self.env.ref('base.res_partner_4')
self.partner_address_13 = self.env.ref('base.res_partner_address_13')
self.product_uom_hour = self.env.ref('product.product_uom_hour')
self.account_data = self.env.ref('account.data_account_type_revenue')
self.account_tag_operating = self.env.ref('account.account_tag_operating')
self.product_2 = self.env.ref('product.product_product_2')
self.product_category = self.env.ref('product.product_category_all')
self.free_delivery = self.env.ref('delivery.free_delivery_carrier')
def test_00_delivery_cost(self):
# In order to test Carrier Cost
# Create sale order with Normal Delivery Charges
self.sale_normal_delivery_charges = self.SaleOrder.create({
'partner_id': self.partner_18.id,
'partner_invoice_id': self.partner_18.id,
'partner_shipping_id': self.partner_18.id,
'pricelist_id': self.pricelist.id,
'order_line': [(0, 0, {
'name': 'PC Assamble + 2GB RAM',
'product_id': self.product_4.id,
'product_uom_qty': 1,
'product_uom': self.product_uom_unit.id,
'price_unit': 750.00,
})],
'carrier_id': self.normal_delivery.id
})
# I add delivery cost in Sale order
self.a_sale = self.AccountAccount.create({
'code': 'X2020',
'name': 'Product Sales - (test)',
'user_type_id': self.account_data.id,
'tag_ids': [(6, 0, {
self.account_tag_operating.id
})]
})
self.product_consultant = self.Product.create({
'sale_ok': True,
'list_price': 75.0,
'standard_price': 30.0,
'uom_id': self.product_uom_hour.id,
'uom_po_id': self.product_uom_hour.id,
'name': 'Service',
'categ_id': self.product_category.id,
'type': 'service'
})
# I add delivery cost in Sale order
self.sale_normal_delivery_charges.delivery_set()
# I check sale order after added delivery cost
line = self.SaleOrderLine.search([('order_id', '=', self.sale_normal_delivery_charges.id),
('product_id', '=', self.sale_normal_delivery_charges.carrier_id.product_id.id)])
self.assertEqual(len(line), 1, "Delivery cost is not Added")
self.assertEqual(float_compare(line.price_subtotal, 10, precision_digits=2), 0,
"Delivey cost is not correspond.")
# I confirm the sale order
self.sale_normal_delivery_charges.action_confirm()
# Create one more sale order with Free Delivery Charges
self.delivery_sale_order_cost = self.SaleOrder.create({
'partner_id': self.partner_4.id,
'partner_invoice_id': self.partner_address_13.id,
'partner_shipping_id': self.partner_address_13.id,
'pricelist_id': self.pricelist.id,
'order_line': [(0, 0, {
'name': 'Service on demand',
'product_id': self.product_consultant.id,
'product_uom_qty': 24,
'product_uom': self.product_uom_hour.id,
'price_unit': 75.00,
}), (0, 0, {
'name': 'On Site Assistance',
'product_id': self.product_2.id,
'product_uom_qty': 30,
'product_uom': self.product_uom_hour.id,
'price_unit': 38.25,
})],
'carrier_id': self.free_delivery.id
})
# I add free delivery cost in Sale order
self.delivery_sale_order_cost.delivery_set()
# I check sale order after adding delivery cost
line = self.SaleOrderLine.search([('order_id', '=', self.delivery_sale_order_cost.id),
('product_id', '=', self.delivery_sale_order_cost.carrier_id.product_id.id)])
self.assertEqual(len(line), 1, "Delivery cost is not Added")
self.assertEqual(float_compare(line.price_subtotal, 0, precision_digits=2), 0,
"Delivey cost is not correspond.")
# I set default delivery policy
self.default_delivery_policy = self.SaleConfigSetting.create({})
self.default_delivery_policy.execute()
| gpl-3.0 |
zachpodbielniak/PodNet | Linux/Python/CCoordinates.py | 1 | 1904 | '''
____ _ _ _ _ _ ____ ___
| _ \ ___ __| | \ | | ___| |_ / \ | _ \_ _|
| |_) / _ \ / _` | \| |/ _ \ __| / _ \ | |_) | |
| __/ (_) | (_| | |\ | __/ |_ / ___ \| __/| |
|_| \___/ \__,_|_| \_|\___|\__| /_/ \_\_| |___|
File: CCoordinates.py
Author: Zach Podbielniak
Last Update: 01/05/18
Overview: This file sets forth forwarding the PodNet C API to Python.
This file is part of the PodNet API and comes with no warranty,
use with your own discretion.
'''
from PodNet.PodNetLib import *
class COORDINATE(Structure):
_fields_ = [ ("dX", DOUBLE),
("dY", DOUBLE),
("dZ", DOUBLE),
("bIs3DCoordinate", BOOL)]
LPCOORDINATE = POINTER(COORDINATE)
PodNetLib.CreateCoordinate.restype = LPCOORDINATE
PodNetLib.CreateCoordinate.argtypes = [LPCOORDINATE, DOUBLE, DOUBLE, DOUBLE, BOOL]
PodNetLib.FreeCoordinate.restype = BOOL
PodNetLib.FreeCoordinate.argtypes = [LPCOORDINATE]
PodNetLib.Distance.restype = DOUBLE
PodNetLib.Distance.argtypes = [LPCOORDINATE, LPCOORDINATE]
PodNetLib.DistanceBetweenCoordinateAndPoint.restype = DOUBLE
PodNetLib.DistanceBetweenCoordinateAndPoint.argtypes = [LPCOORDINATE, DOUBLE, DOUBLE, DOUBLE]
def CreateCoordinate(x, y):
s = COORDINATE()
PodNetLib.CreateCoordinate(byref(s), x, y, 0.0, FALSE)
return s
def CreateCoordinate3D(x, z, y):
s = COORDINATE()
PodNetLib.CreateCoordinate(byref(s), x, y, z, TRUE)
return s
def FreeCoordinate(Coordinate):
return PodNetLib.FreeCoordinate(byref(Coordinate))
def Distance(CoordinateX, CoordinateY):
return PodNetLib.Distance(byref(CoordinateX), byref(CoordinateY))
def DistanceBetweenCoordinateAndPoint(Coordinate, X, Y, Z):
return PodNetLib.DistanceBetweenCoordinateAndPoint(byref(Coordinate), X, Y, Z)
| gpl-3.0 |
louistin/fullstack | Python/processing_thread/distribute/taskmanager.py | 1 | 1047 | #!/usr/bin/python
# _*_ coding: utf-8 _*_
import random, time, Queue
from multiprocessing.managers import BaseManager
# 发送任务队列
task_queue = Queue.Queue()
# 接收结果队列
result_queue = Queue.Queue()
# 从BaseManager继承的QueueManager
class QueueManager(BaseManager):
pass
# 将两个Queue都注册到网络上, callable参数关联了Queue对象
QueueManager.register('get_task_queue', callable = lambda: task_queue)
QueueManager.register('get_result_queue', callable = lambda: result_queue)
# 绑定端口5000, 验证码为luna
manager = QueueManager(address = ('', 5000), authkey = 'luna')
# 启动Queue
manager.start()
# 获取通过网络访问的Queue对象
task = manager.get_task_queue()
result = manager.get_result_queue()
# 放几个任务进去
for i in range(10):
n = random.randint(0, 10000)
print('Put task %d...' % n)
task.put(n)
# 从result队列中读取结果
print "Try get results..."
for i in range(10):
r = result.get(timeout = 10)
print 'Result: %s' % r
manager.shutdown()
| mit |
40223137/2015cbaa | static/Brython3.1.1-20150328-091302/Lib/unittest/test/_test_warnings.py | 858 | 2304 | # helper module for test_runner.Test_TextTestRunner.test_warnings
"""
This module has a number of tests that raise different kinds of warnings.
When the tests are run, the warnings are caught and their messages are printed
to stdout. This module also accepts an arg that is then passed to
unittest.main to affect the behavior of warnings.
Test_TextTestRunner.test_warnings executes this script with different
combinations of warnings args and -W flags and check that the output is correct.
See #10535.
"""
import sys
import unittest
import warnings
def warnfun():
warnings.warn('rw', RuntimeWarning)
class TestWarnings(unittest.TestCase):
# unittest warnings will be printed at most once per type (max one message
# for the fail* methods, and one for the assert* methods)
def test_assert(self):
self.assertEquals(2+2, 4)
self.assertEquals(2*2, 4)
self.assertEquals(2**2, 4)
def test_fail(self):
self.failUnless(1)
self.failUnless(True)
def test_other_unittest(self):
self.assertAlmostEqual(2+2, 4)
self.assertNotAlmostEqual(4+4, 2)
# these warnings are normally silenced, but they are printed in unittest
def test_deprecation(self):
warnings.warn('dw', DeprecationWarning)
warnings.warn('dw', DeprecationWarning)
warnings.warn('dw', DeprecationWarning)
def test_import(self):
warnings.warn('iw', ImportWarning)
warnings.warn('iw', ImportWarning)
warnings.warn('iw', ImportWarning)
# user warnings should always be printed
def test_warning(self):
warnings.warn('uw')
warnings.warn('uw')
warnings.warn('uw')
# these warnings come from the same place; they will be printed
# only once by default or three times if the 'always' filter is used
def test_function(self):
warnfun()
warnfun()
warnfun()
if __name__ == '__main__':
with warnings.catch_warnings(record=True) as ws:
# if an arg is provided pass it to unittest.main as 'warnings'
if len(sys.argv) == 2:
unittest.main(exit=False, warnings=sys.argv.pop())
else:
unittest.main(exit=False)
# print all the warning messages collected
for w in ws:
print(w.message)
| gpl-3.0 |
silverlogic/itunes-iap | itunesiap/core.py | 1 | 5229 |
import json
import requests
import contextlib
from six import u
from . import exceptions
RECEIPT_PRODUCTION_VALIDATION_URL = "https://buy.itunes.apple.com/verifyReceipt"
RECEIPT_SANDBOX_VALIDATION_URL = "https://sandbox.itunes.apple.com/verifyReceipt"
USE_PRODUCTION = True
USE_SANDBOX = False
def config_from_mode(mode):
if mode not in ('production', 'sandbox', 'review', 'reject'):
raise exceptions.ModeNotAvailable(mode)
production = mode in ('production', 'review')
sandbox = mode in ('sandbox', 'review')
return production, sandbox
def set_verification_mode(mode):
"""Set global verification mode that where allows production or sandbox.
`production`, `sandbox`, `review` or `reject` availble. Or raise
an exception.
`production`: Allows production receipts only. Default.
`sandbox`: Allows sandbox receipts only.
`review`: Allows production receipts but use sandbox as fallback.
`reject`: Reject all receipts.
"""
global USE_PRODUCTION, USE_SANDBOX
USE_PRODUCTION, USE_SANDBOX = config_from_mode(mode)
def get_verification_mode():
if USE_PRODUCTION and USE_SANDBOX:
return 'review'
if USE_PRODUCTION:
return 'production'
if USE_SANDBOX:
return 'sandbox'
return 'reject'
class Request(object):
"""Validation request with raw receipt. Receipt must be base64 encoded string.
Use `verify` method to try verification and get Receipt or exception.
"""
def __init__(self, receipt, password='', **kwargs):
self.receipt = receipt
self.password = password
self.use_production = kwargs.get('use_production', USE_PRODUCTION)
self.use_sandbox = kwargs.get('use_sandbox', USE_SANDBOX)
self.response = None
self.result = None
def __repr__(self):
valid = None
if self.result:
valid = self.result['status'] == 0
return u'<Request(valid:{0}, data:{1}...)>'.format(valid, self.receipt[:20])
def verify_from(self, url):
"""Try verification from given url."""
# If the password exists from kwargs, pass it up with the request, otherwise leave it alone
if len(self.password) > 1:
self.response = requests.post(url, json.dumps({'receipt-data': self.receipt, 'password': self.password}), verify=False)
else:
self.response = requests.post(url, json.dumps({'receipt-data': self.receipt}), verify=False)
if self.response.status_code != 200:
raise exceptions.ItunesServerNotAvailable(self.response.status_code, self.response.content)
self.result = self._extract_receipt(self.response.json())
status = self.result['status']
if status != 0:
raise exceptions.InvalidReceipt(status, receipt=self.result.get('receipt', None))
return self.result
def _extract_receipt(self, receipt_data):
"""There are two formats that itunes iap purchase receipts are
sent back in
"""
if 'receipt' not in receipt_data:
return receipt_data
in_app_purchase = receipt_data['receipt'].get('in_app', [])
if len(in_app_purchase) > 0:
receipt_data['receipt'].update(in_app_purchase[-1])
return receipt_data
def validate(self):
return self.verify()
def verify(self):
"""Try verification with settings. Returns a Receipt object if successed.
Or raise an exception. See `self.response` or `self.result` to see details.
"""
ex = None
receipt = None
assert (self.use_production or self.use_sandbox)
if self.use_production:
try:
receipt = self.verify_from(RECEIPT_PRODUCTION_VALIDATION_URL)
except exceptions.InvalidReceipt as e:
ex = e
if not receipt and self.use_sandbox:
try:
receipt = self.verify_from(RECEIPT_SANDBOX_VALIDATION_URL)
except exceptions.InvalidReceipt as e:
if not self.use_production:
ex = e
if not receipt:
raise ex # raise original error
return Receipt(receipt)
@contextlib.contextmanager
def verification_mode(self, mode):
configs = self.use_production, self.use_sandbox
self.use_production, self.use_sandbox = config_from_mode(mode)
yield
self.use_production, self.use_sandbox = configs
class Receipt(object):
"""Pretty interface for decoded receipt obejct.
"""
def __init__(self, data):
self.data = data
self.receipt = data['receipt']
self.receipt_keys = list(self.receipt.keys())
def __repr__(self):
return u'<Receipt({0}, {1})>'.format(self.status, self.receipt)
@property
def status(self):
return self.data['status']
@property
def latest_receipt(self):
return self.data['latest_receipt']
def __getattr__(self, key):
if key in self.receipt_keys:
return self.receipt[key]
try:
return super(Receipt, self).__getattr__(key)
except AttributeError:
return super(Receipt, self).__getattribute__(key)
| bsd-2-clause |
ajfriend/cvxpy | doc/source/conf.py | 2 | 9590 | # -*- coding: utf-8 -*-
#
# CVXPY documentation build configuration file, created by
# sphinx-quickstart on Mon Jan 27 20:47:07 2014.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# To import CVXPY:
sys.path.insert(0, os.path.abspath('../..'))
# To import sphinx extensions we've put in the repository:
sys.path.insert(0, os.path.abspath('../sphinxext'))
__version__ = "0.3.8"
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'numpydoc']
# To suppress autodoc/numpydoc warning.
# http://stackoverflow.com/questions/12206334/sphinx-autosummary-toctree-contains-reference-to-nonexisting-document-warnings
numpydoc_show_class_members = False
# Since readthedocs.org has trouble compiling `cvxopt`, autodoc fails
# whenever it tries to import a CVXPY module to document it.
# The following code replaces the relevant cvxopt modules with
# a dummy namespace, allowing autodoc to work.
class Mocked(object):
def __setattr__(self, name, value):
self.__dict__[name] = value
def __getattr__(self, name):
if name in self.__dict__:
return self.__dict__[name]
else:
return None
MOCK_MODULES = ['cvxopt', 'cvxopt.base', 'cvxopt.misc']
sys.modules.update((mod_name, Mocked()) for mod_name in MOCK_MODULES)
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'CVXPY'
copyright = u'2014, Steven Diamond, Eric Chu, Stephen Boyd'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '.'.join(__version__.split('.')[:2])
# The full version, including alpha/beta/rc tags.
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
import alabaster
table_styling_embed_css = False
html_theme_path = [alabaster.get_path(), "../themes"]
extensions += ['alabaster']
html_theme = 'cvxpy_alabaster'
html_sidebars = {
'**': [
'about.html', 'navigation.html', 'searchbox.html',
]
}
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'github_user': 'cvxgrp',
'github_repo': 'cvxpy',
'github_banner': True,
'travis_button': True,
'analytics_id': 'UA-50248335-1',
}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = ['../themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'cvxpydoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'cvxpy.tex', u'CVXPY Documentation',
u'Steven Diamond, Eric Chu, Stephen Boyd', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'cvxpy', u'CVXPY Documentation',
[u'Steven Diamond, Eric Chu, Stephen Boyd'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'cvxpy', u'CVXPY Documentation',
u'Steven Diamond, Eric Chu, Stephen Boyd', 'CVXPY', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
| gpl-3.0 |
pgleeson/TempRepo3 | lib/jython/Lib/distutils/command/bdist_wininst.py | 82 | 13374 | """distutils.command.bdist_wininst
Implements the Distutils 'bdist_wininst' command: create a windows installer
exe-program."""
# This module should be kept compatible with Python 2.1.
__revision__ = "$Id: bdist_wininst.py 38697 2005-03-23 18:54:36Z loewis $"
import sys, os, string
from distutils.core import Command
from distutils.util import get_platform
from distutils.dir_util import create_tree, remove_tree
from distutils.errors import *
from distutils.sysconfig import get_python_version
from distutils import log
class bdist_wininst (Command):
description = "create an executable installer for MS Windows"
user_options = [('bdist-dir=', None,
"temporary directory for creating the distribution"),
('keep-temp', 'k',
"keep the pseudo-installation tree around after " +
"creating the distribution archive"),
('target-version=', None,
"require a specific python version" +
" on the target system"),
('no-target-compile', 'c',
"do not compile .py to .pyc on the target system"),
('no-target-optimize', 'o',
"do not compile .py to .pyo (optimized)"
"on the target system"),
('dist-dir=', 'd',
"directory to put final built distributions in"),
('bitmap=', 'b',
"bitmap to use for the installer instead of python-powered logo"),
('title=', 't',
"title to display on the installer background instead of default"),
('skip-build', None,
"skip rebuilding everything (for testing/debugging)"),
('install-script=', None,
"basename of installation script to be run after"
"installation or before deinstallation"),
('pre-install-script=', None,
"Fully qualified filename of a script to be run before "
"any files are installed. This script need not be in the "
"distribution"),
]
boolean_options = ['keep-temp', 'no-target-compile', 'no-target-optimize',
'skip-build']
def initialize_options (self):
self.bdist_dir = None
self.keep_temp = 0
self.no_target_compile = 0
self.no_target_optimize = 0
self.target_version = None
self.dist_dir = None
self.bitmap = None
self.title = None
self.skip_build = 0
self.install_script = None
self.pre_install_script = None
# initialize_options()
def finalize_options (self):
if self.bdist_dir is None:
bdist_base = self.get_finalized_command('bdist').bdist_base
self.bdist_dir = os.path.join(bdist_base, 'wininst')
if not self.target_version:
self.target_version = ""
if not self.skip_build and self.distribution.has_ext_modules():
short_version = get_python_version()
if self.target_version and self.target_version != short_version:
raise DistutilsOptionError, \
"target version can only be %s, or the '--skip_build'" \
" option must be specified" % (short_version,)
self.target_version = short_version
self.set_undefined_options('bdist', ('dist_dir', 'dist_dir'))
if self.install_script:
for script in self.distribution.scripts:
if self.install_script == os.path.basename(script):
break
else:
raise DistutilsOptionError, \
"install_script '%s' not found in scripts" % \
self.install_script
# finalize_options()
def run (self):
if (sys.platform != "win32" and
(self.distribution.has_ext_modules() or
self.distribution.has_c_libraries())):
raise DistutilsPlatformError \
("distribution contains extensions and/or C libraries; "
"must be compiled on a Windows 32 platform")
if not self.skip_build:
self.run_command('build')
install = self.reinitialize_command('install', reinit_subcommands=1)
install.root = self.bdist_dir
install.skip_build = self.skip_build
install.warn_dir = 0
install_lib = self.reinitialize_command('install_lib')
# we do not want to include pyc or pyo files
install_lib.compile = 0
install_lib.optimize = 0
if self.distribution.has_ext_modules():
# If we are building an installer for a Python version other
# than the one we are currently running, then we need to ensure
# our build_lib reflects the other Python version rather than ours.
# Note that for target_version!=sys.version, we must have skipped the
# build step, so there is no issue with enforcing the build of this
# version.
target_version = self.target_version
if not target_version:
assert self.skip_build, "Should have already checked this"
target_version = sys.version[0:3]
plat_specifier = ".%s-%s" % (get_platform(), target_version)
build = self.get_finalized_command('build')
build.build_lib = os.path.join(build.build_base,
'lib' + plat_specifier)
# Use a custom scheme for the zip-file, because we have to decide
# at installation time which scheme to use.
for key in ('purelib', 'platlib', 'headers', 'scripts', 'data'):
value = string.upper(key)
if key == 'headers':
value = value + '/Include/$dist_name'
setattr(install,
'install_' + key,
value)
log.info("installing to %s", self.bdist_dir)
install.ensure_finalized()
# avoid warning of 'install_lib' about installing
# into a directory not in sys.path
sys.path.insert(0, os.path.join(self.bdist_dir, 'PURELIB'))
install.run()
del sys.path[0]
# And make an archive relative to the root of the
# pseudo-installation tree.
from tempfile import mktemp
archive_basename = mktemp()
fullname = self.distribution.get_fullname()
arcname = self.make_archive(archive_basename, "zip",
root_dir=self.bdist_dir)
# create an exe containing the zip-file
self.create_exe(arcname, fullname, self.bitmap)
if self.distribution.has_ext_modules():
pyversion = get_python_version()
else:
pyversion = 'any'
self.distribution.dist_files.append(('bdist_wininst', pyversion,
self.get_installer_filename(fullname)))
# remove the zip-file again
log.debug("removing temporary file '%s'", arcname)
os.remove(arcname)
if not self.keep_temp:
remove_tree(self.bdist_dir, dry_run=self.dry_run)
# run()
def get_inidata (self):
# Return data describing the installation.
lines = []
metadata = self.distribution.metadata
# Write the [metadata] section.
lines.append("[metadata]")
# 'info' will be displayed in the installer's dialog box,
# describing the items to be installed.
info = (metadata.long_description or '') + '\n'
# Escape newline characters
def escape(s):
return string.replace(s, "\n", "\\n")
for name in ["author", "author_email", "description", "maintainer",
"maintainer_email", "name", "url", "version"]:
data = getattr(metadata, name, "")
if data:
info = info + ("\n %s: %s" % \
(string.capitalize(name), escape(data)))
lines.append("%s=%s" % (name, escape(data)))
# The [setup] section contains entries controlling
# the installer runtime.
lines.append("\n[Setup]")
if self.install_script:
lines.append("install_script=%s" % self.install_script)
lines.append("info=%s" % escape(info))
lines.append("target_compile=%d" % (not self.no_target_compile))
lines.append("target_optimize=%d" % (not self.no_target_optimize))
if self.target_version:
lines.append("target_version=%s" % self.target_version)
title = self.title or self.distribution.get_fullname()
lines.append("title=%s" % escape(title))
import time
import distutils
build_info = "Built %s with distutils-%s" % \
(time.ctime(time.time()), distutils.__version__)
lines.append("build_info=%s" % build_info)
return string.join(lines, "\n")
# get_inidata()
def create_exe (self, arcname, fullname, bitmap=None):
import struct
self.mkpath(self.dist_dir)
cfgdata = self.get_inidata()
installer_name = self.get_installer_filename(fullname)
self.announce("creating %s" % installer_name)
if bitmap:
bitmapdata = open(bitmap, "rb").read()
bitmaplen = len(bitmapdata)
else:
bitmaplen = 0
file = open(installer_name, "wb")
file.write(self.get_exe_bytes())
if bitmap:
file.write(bitmapdata)
# Convert cfgdata from unicode to ascii, mbcs encoded
try:
unicode
except NameError:
pass
else:
if isinstance(cfgdata, unicode):
cfgdata = cfgdata.encode("mbcs")
# Append the pre-install script
cfgdata = cfgdata + "\0"
if self.pre_install_script:
script_data = open(self.pre_install_script, "r").read()
cfgdata = cfgdata + script_data + "\n\0"
else:
# empty pre-install script
cfgdata = cfgdata + "\0"
file.write(cfgdata)
# The 'magic number' 0x1234567B is used to make sure that the
# binary layout of 'cfgdata' is what the wininst.exe binary
# expects. If the layout changes, increment that number, make
# the corresponding changes to the wininst.exe sources, and
# recompile them.
header = struct.pack("<iii",
0x1234567B, # tag
len(cfgdata), # length
bitmaplen, # number of bytes in bitmap
)
file.write(header)
file.write(open(arcname, "rb").read())
# create_exe()
def get_installer_filename(self, fullname):
# Factored out to allow overriding in subclasses
if self.target_version:
# if we create an installer for a specific python version,
# it's better to include this in the name
installer_name = os.path.join(self.dist_dir,
"%s.win32-py%s.exe" %
(fullname, self.target_version))
else:
installer_name = os.path.join(self.dist_dir,
"%s.win32.exe" % fullname)
return installer_name
# get_installer_filename()
def get_exe_bytes (self):
from distutils.msvccompiler import get_build_version
# If a target-version other than the current version has been
# specified, then using the MSVC version from *this* build is no good.
# Without actually finding and executing the target version and parsing
# its sys.version, we just hard-code our knowledge of old versions.
# NOTE: Possible alternative is to allow "--target-version" to
# specify a Python executable rather than a simple version string.
# We can then execute this program to obtain any info we need, such
# as the real sys.version string for the build.
cur_version = get_python_version()
if self.target_version and self.target_version != cur_version:
# If the target version is *later* than us, then we assume they
# use what we use
# string compares seem wrong, but are what sysconfig.py itself uses
if self.target_version > cur_version:
bv = get_build_version()
else:
if self.target_version < "2.4":
bv = "6"
else:
bv = "7.1"
else:
# for current version - use authoritative check.
bv = get_build_version()
# wininst-x.y.exe is in the same directory as this file
directory = os.path.dirname(__file__)
# we must use a wininst-x.y.exe built with the same C compiler
# used for python. XXX What about mingw, borland, and so on?
filename = os.path.join(directory, "wininst-%s.exe" % bv)
return open(filename, "rb").read()
# class bdist_wininst
| gpl-2.0 |
cirrusone/phantom2 | src/qt/qtwebkit/Tools/QueueStatusServer/model/warninglog.py | 122 | 2147 | # Copyright (C) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from time import time
from datetime import datetime
from google.appengine.ext import db
class WarningLog(db.Model):
date = db.DateTimeProperty(auto_now_add=True)
event = db.StringProperty()
message = db.StringProperty()
attachment_id = db.IntegerProperty()
queue_name = db.StringProperty()
bot_id = db.StringProperty()
@classmethod
def record(cls, event, message=None, attachment_id=None, queue_name=None, bot_id=None):
entity = cls(event=event, message=message, queue_name=queue_name, bot_id=bot_id, attachment_id=attachment_id)
entity.put()
return entity
| bsd-3-clause |
wehkamp/ansible | lib/ansible/playbook/__init__.py | 62 | 3162 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible.errors import AnsibleError, AnsibleParserError
from ansible.parsing import DataLoader
from ansible.playbook.attribute import Attribute, FieldAttribute
from ansible.playbook.play import Play
from ansible.playbook.playbook_include import PlaybookInclude
from ansible.plugins import push_basedir
__all__ = ['Playbook']
class Playbook:
def __init__(self, loader):
# Entries in the datastructure of a playbook may
# be either a play or an include statement
self._entries = []
self._basedir = os.getcwd()
self._loader = loader
@staticmethod
def load(file_name, variable_manager=None, loader=None):
pb = Playbook(loader=loader)
pb._load_playbook_data(file_name=file_name, variable_manager=variable_manager)
return pb
def _load_playbook_data(self, file_name, variable_manager):
if os.path.isabs(file_name):
self._basedir = os.path.dirname(file_name)
else:
self._basedir = os.path.normpath(os.path.join(self._basedir, os.path.dirname(file_name)))
# set the loaders basedir
self._loader.set_basedir(self._basedir)
# also add the basedir to the list of module directories
push_basedir(self._basedir)
ds = self._loader.load_from_file(os.path.basename(file_name))
if not isinstance(ds, list):
raise AnsibleParserError("playbooks must be a list of plays", obj=ds)
# Parse the playbook entries. For plays, we simply parse them
# using the Play() object, and includes are parsed using the
# PlaybookInclude() object
for entry in ds:
if not isinstance(entry, dict):
raise AnsibleParserError("playbook entries must be either a valid play or an include statement", obj=entry)
if 'include' in entry:
pb = PlaybookInclude.load(entry, basedir=self._basedir, variable_manager=variable_manager, loader=self._loader)
self._entries.extend(pb._entries)
else:
entry_obj = Play.load(entry, variable_manager=variable_manager, loader=self._loader)
self._entries.append(entry_obj)
def get_loader(self):
return self._loader
def get_plays(self):
return self._entries[:]
| gpl-3.0 |
pylixm/sae-django-demo | django1.7-sae/site-packages/django/utils/encoding.py | 73 | 8538 | from __future__ import unicode_literals
import codecs
import datetime
from decimal import Decimal
import locale
from django.utils.functional import Promise
from django.utils import six
from django.utils.six.moves.urllib.parse import quote
class DjangoUnicodeDecodeError(UnicodeDecodeError):
def __init__(self, obj, *args):
self.obj = obj
UnicodeDecodeError.__init__(self, *args)
def __str__(self):
original = UnicodeDecodeError.__str__(self)
return '%s. You passed in %r (%s)' % (original, self.obj,
type(self.obj))
def python_2_unicode_compatible(klass):
"""
A decorator that defines __unicode__ and __str__ methods under Python 2.
Under Python 3 it does nothing.
To support Python 2 and 3 with a single code base, define a __str__ method
returning text and apply this decorator to the class.
"""
if six.PY2:
if '__str__' not in klass.__dict__:
raise ValueError("@python_2_unicode_compatible cannot be applied "
"to %s because it doesn't define __str__()." %
klass.__name__)
klass.__unicode__ = klass.__str__
klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
return klass
def smart_text(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Returns a text object representing 's' -- unicode on Python 2 and str on
Python 3. Treats bytestrings using the 'encoding' codec.
If strings_only is True, don't convert (some) non-string-like objects.
"""
if isinstance(s, Promise):
# The input is the result of a gettext_lazy() call.
return s
return force_text(s, encoding, strings_only, errors)
def is_protected_type(obj):
"""Determine if the object instance is of a protected type.
Objects of protected types are preserved as-is when passed to
force_text(strings_only=True).
"""
return isinstance(obj, six.integer_types + (type(None), float, Decimal,
datetime.datetime, datetime.date, datetime.time))
def force_text(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Similar to smart_text, except that lazy instances are resolved to
strings, rather than kept as lazy objects.
If strings_only is True, don't convert (some) non-string-like objects.
"""
# Handle the common case first for performance reasons.
if isinstance(s, six.text_type):
return s
if strings_only and is_protected_type(s):
return s
try:
if not isinstance(s, six.string_types):
if six.PY3:
if isinstance(s, bytes):
s = six.text_type(s, encoding, errors)
else:
s = six.text_type(s)
elif hasattr(s, '__unicode__'):
s = six.text_type(s)
else:
s = six.text_type(bytes(s), encoding, errors)
else:
# Note: We use .decode() here, instead of six.text_type(s, encoding,
# errors), so that if s is a SafeBytes, it ends up being a
# SafeText at the end.
s = s.decode(encoding, errors)
except UnicodeDecodeError as e:
if not isinstance(s, Exception):
raise DjangoUnicodeDecodeError(s, *e.args)
else:
# If we get to here, the caller has passed in an Exception
# subclass populated with non-ASCII bytestring data without a
# working unicode method. Try to handle this without raising a
# further exception by individually forcing the exception args
# to unicode.
s = ' '.join([force_text(arg, encoding, strings_only,
errors) for arg in s])
return s
def smart_bytes(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Returns a bytestring version of 's', encoded as specified in 'encoding'.
If strings_only is True, don't convert (some) non-string-like objects.
"""
if isinstance(s, Promise):
# The input is the result of a gettext_lazy() call.
return s
return force_bytes(s, encoding, strings_only, errors)
def force_bytes(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Similar to smart_bytes, except that lazy instances are resolved to
strings, rather than kept as lazy objects.
If strings_only is True, don't convert (some) non-string-like objects.
"""
# Handle the common case first for performance reasons.
if isinstance(s, bytes):
if encoding == 'utf-8':
return s
else:
return s.decode('utf-8', errors).encode(encoding, errors)
if strings_only and is_protected_type(s):
return s
if isinstance(s, six.memoryview):
return bytes(s)
if isinstance(s, Promise):
return six.text_type(s).encode(encoding, errors)
if not isinstance(s, six.string_types):
try:
if six.PY3:
return six.text_type(s).encode(encoding)
else:
return bytes(s)
except UnicodeEncodeError:
if isinstance(s, Exception):
# An Exception subclass containing non-ASCII data that doesn't
# know how to print itself properly. We shouldn't raise a
# further exception.
return b' '.join([force_bytes(arg, encoding, strings_only,
errors) for arg in s])
return six.text_type(s).encode(encoding, errors)
else:
return s.encode(encoding, errors)
if six.PY3:
smart_str = smart_text
force_str = force_text
else:
smart_str = smart_bytes
force_str = force_bytes
# backwards compatibility for Python 2
smart_unicode = smart_text
force_unicode = force_text
smart_str.__doc__ = """
Apply smart_text in Python 3 and smart_bytes in Python 2.
This is suitable for writing to sys.stdout (for instance).
"""
force_str.__doc__ = """
Apply force_text in Python 3 and force_bytes in Python 2.
"""
def iri_to_uri(iri):
"""
Convert an Internationalized Resource Identifier (IRI) portion to a URI
portion that is suitable for inclusion in a URL.
This is the algorithm from section 3.1 of RFC 3987. However, since we are
assuming input is either UTF-8 or unicode already, we can simplify things a
little from the full method.
Returns an ASCII string containing the encoded result.
"""
# The list of safe characters here is constructed from the "reserved" and
# "unreserved" characters specified in sections 2.2 and 2.3 of RFC 3986:
# reserved = gen-delims / sub-delims
# gen-delims = ":" / "/" / "?" / "#" / "[" / "]" / "@"
# sub-delims = "!" / "$" / "&" / "'" / "(" / ")"
# / "*" / "+" / "," / ";" / "="
# unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~"
# Of the unreserved characters, urllib.quote already considers all but
# the ~ safe.
# The % character is also added to the list of safe characters here, as the
# end of section 3.1 of RFC 3987 specifically mentions that % must not be
# converted.
if iri is None:
return iri
return quote(force_bytes(iri), safe=b"/#%[]=:;$&()+,!?*@'~")
def filepath_to_uri(path):
"""Convert a file system path to a URI portion that is suitable for
inclusion in a URL.
We are assuming input is either UTF-8 or unicode already.
This method will encode certain chars that would normally be recognized as
special chars for URIs. Note that this method does not encode the '
character, as it is a valid character within URIs. See
encodeURIComponent() JavaScript function for more details.
Returns an ASCII string containing the encoded result.
"""
if path is None:
return path
# I know about `os.sep` and `os.altsep` but I want to leave
# some flexibility for hardcoding separators.
return quote(force_bytes(path).replace(b"\\", b"/"), safe=b"/~!*()'")
def get_system_encoding():
"""
The encoding of the default system locale but falls back to the given
fallback encoding if the encoding is unsupported by python or could
not be determined. See tickets #10335 and #5846
"""
try:
encoding = locale.getdefaultlocale()[1] or 'ascii'
codecs.lookup(encoding)
except Exception:
encoding = 'ascii'
return encoding
DEFAULT_LOCALE_ENCODING = get_system_encoding()
| apache-2.0 |
XVicarious/GarGUI | GarGUI.py | 1 | 8318 | __module_name__ = "GarGUI"
__module_version__ = "1.2.1"
__module_description__ = "A GUI Front-End for Gargoyle"
version = 121
import wx, xchat, string, os, sys, ConfigParser
from urllib2 import urlopen
# Fix path to load GUI
path = os.path.dirname(os.path.abspath(__file__))
path2 = path
path = "".join([path, "/Gar"])
sys.path.append(path)
from GarSimple import Simple
from GarAdvanced import Advanced
print "\0034>>>>>",__module_name__, __module_version__, "has been loaded <<<<<\003"
print "Welcome to the GarGUI Suite. GarGUI is a GUI front-end for the IRC RPG bot Gargoyle. For help please ask XVicarious for now because there is no manual yet!"
# Read the configuration
diceCFG = ConfigParser.RawConfigParser()
diceCFG.read(path + "/dice.gar")
iDie = diceCFG.get("dice", "init")
hDie = diceCFG.get("dice", "hit")
aDie = diceCFG.get("dice", "attack")
# Extra variables to make life easier
command = "msg " + xchat.get_info("channel")
v = unicode('\u039B', "unicode_escape").encode("utf8")
class AdvancedWindow(Advanced):
def __init__(self, *args, **kwds):
Advanced.__init__(self, *args, **kwds)
self.bindActions()
def bindActions(self):
self.rollButton.Bind(wx.EVT_BUTTON, self.rollAdv)
self.commonDie1.Bind(wx.EVT_BUTTON, self.commonButton1)
self.commonDie2.Bind(wx.EVT_BUTTON, self.commonButton2)
self.commonDie3.Bind(wx.EVT_BUTTON, self.commonButton3)
def commonButton1(self, event):
xchat.command(command + " !val %roll:1d4%")
def commonButton2(self, event):
xchat.command(command + " !val %roll:1d6%")
def commonButton3(self, event):
xchat.command(command + " !val %roll:1d20%")
def rollAdv(self, event):
numDie = self.numberOfDice.GetValue()
diceSize = self.dieSize.GetValue()
modifier = self.modifierAddition.GetValue()
if not "+" and "-" in modifier:
modifier = "+" + modifier
xchat.prnt("You failed to include a '+' or a '-' in your modifier value... Assuming '+'.")
dieRoll = " !val %roll:" + numDie + "d" + diceSize + "%" + modifier
xchat.command(command + dieRoll)
class SimpleWindow(Simple):
def __init__(self, *args, **kwds):
Simple.__init__(self, *args, **kwds)
self.bindActions()
self.initDieValue.ChangeValue(iDie)
self.hitDieValue.ChangeValue(hDie)
self.attackDieValue.ChangeValue(aDie)
def bindActions(self):
self.initButton.Bind(wx.EVT_BUTTON, self.initButtonClick)
self.hitButton.Bind(wx.EVT_BUTTON, self.hitButtonClick)
self.attackButton.Bind(wx.EVT_BUTTON, self.attackButtonClick)
self.aboutButton.Bind(wx.EVT_BUTTON, self.aboutButtonClick)
self.hideSetters.Bind(wx.EVT_BUTTON, self.showSettersMeth)
self.setInitDie.Bind(wx.EVT_BUTTON, self.initDieEdit)
self.setHitDie.Bind(wx.EVT_BUTTON, self.hitDieEdit)
self.setAttackDie.Bind(wx.EVT_BUTTON, self.hitDieEdit)
self.rollMisc.Bind(wx.EVT_BUTTON, self.rollMiscClick)
def rollMiscClick(self, event):
numDie = self.dieCount.GetValue()
sizeDie = self.dieSize.GetValue()
dieMod = self.dieMod.GetValue()
if not "+" and "-" in dieMod:
dieMod = "+" + dieMod
xchat.prnt("You failed to include a '+' or a '-' in your modifier value... Assuming '+'.")
dieRoll = " !val %roll:" + numDie + "d" + sizeDie + "%" + dieMod
xchat.command(command + dieRoll)
def hideSettersMeth(self, event):
self.frame_sizer.Hide(self.configSizer, recursive=True)
self.Fit()
self.hideSetters.Bind(wx.EVT_BUTTON, self.showSettersMeth)
self.hideSetters.SetLabel("V")
def showSettersMeth(self, event):
try:
self.frame_sizer.Show(self.configSizer, recursive=True)
self.Fit()
except Exception as e:
wx.MessageBox(str(e))
self.hideSetters.Bind(wx.EVT_BUTTON, self.hideSettersMeth)
self.hideSetters.SetLabel(v)
def initDieEdit(self, event):
setcfg = self.initDieValue.GetValue()
diceCFG.set("dice", "init", setcfg)
xchat.command("savecfg")
iDie = setcfg
def hitDieEdit(self, event):
setcfg = self.hitDieValue.GetValue()
diceCFG.set("dice", "hit", setcfg)
xchat.command("savecfg")
hDie = setcfg
def attackDieEdit(self, event):
setcfg = self.attackDieValue.GetValue()
diceCFG.set("dice", "attack", setcfg)
xchat.command("savecfg")
aDie = setcfg
def attackButtonClick(self, event):
try:
if not "+" and "-" in aDie:
roller = " !val %roll:" + aDie + "%"
xchat.command(command + roller)
else:
if "+" in aDie:
index = aDie.find('+')
elif "-" in aDie:
index = aDie.find('-')
roller = " !val %roll:" + aDie[0:index] + "%" + aDie[index:len(aDie)]
xchat.command(command + roller)
except Exception as e:
wx.MessageBox(str(e))
def hitButtonClick(self, event):
if not "+" and "-" in hDie:
roller = " !val %roll:" + hDie + "%"
xchat.command(command + roller)
else:
if "+" in hDie:
index = hDie.find('+')
elif "-" in hDie:
index = hDie.find('-')
roller = " !val %roll:" + hDie[0:index] + "%" + hDie[index:len(hDie)]
xchat.command(command + roller)
def aboutButtonClick(self, event):
wx.MessageBox("GarGUI " + __module_version__ + "\n\nGarGUI is a front-end for the IRC bot \"Gargoyle\" by CyberXZT. GarGUI attempts to make the usage of Gargoyle easier by the simplification of the !val command to simple GUI buttons. \nModes exist in GarGUI to utilize it in different ways. The normal GUI is the most simple having just a few button for rolling Initiative, Hit Chance, and Attack. The \"Advanced\" GUI allows users to fully modify each roll, with the ability to have a few common dice as quick settings.\n\n GarGUI is licensed unded the GNU GPL v3. The full terms of this license can be found in the \"LICENSE\" file in GarGUI's directory.", "About", wx.OK | wx.ICON_INFORMATION)
def initButtonClick(self, event):
if not "+" and "-" in iDie:
roller = " !val %roll:" + iDie + "%"
xchat.command(command + roller)
else:
if "+" in iDie:
index = iDie.find('+')
elif "-" in iDie:
index = iDie.find('-')
roller = " !val %roll:" + iDie[0:index] + "%" + iDie[index:len(iDie)]
xchat.command(command + roller)
def showGUI(word, word_eol, userdata):
#path = raw(path)
if __name__ == "__main__":
app = wx.PySimpleApp(0)
wx.InitAllImageHandlers()
try:
if word[1] == "derp":
frame_1 = AdvancedWindow(None, -1, "")
app.SetTopWindow(frame_1)
frame_1.Show()
app.MainLoop()
if word[1] == "update":
latest = urlopen("http://xvicario.us/gar/latest")
latest = int(latest.read())
if version == latest:
xchat.prnt("GarGUI: No Updates Found...")
elif version < latest:
xchat.prnt("GarGUI: Update Found... Downloading.")
garLatest = urlopen("http://xvicario.us/gar/GarGUI.py")
xchat.prnt("GarGUI: Downloaded... Applying Update.")
garLatest = garLatest.read()
GarGUI = open(path2 + "/GarGUI.py", "w")
GarGUI.write(garLatest)
GarGUI.close()
xchat.prnt("GarGUI: Updated... Unloading module. Please load GarGUI to finish the update.")
xchat.command("py unload GarGUI")
except IndexError:
frame_1 = SimpleWindow(None, -1, "")
app.SetTopWindow(frame_1)
frame_1.Show()
app.MainLoop()
def writeCfg(word, word_eol, userdata):
with open(path + "/dice.gar", "wb") as configfile:
diceCFG.write(configfile)
xchat.hook_command("gar", showGUI)
xchat.hook_command("savecfg", writeCfg) | gpl-3.0 |
apporc/oslo.messaging | oslo_messaging/tests/rpc/test_client.py | 6 | 18262 |
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
import testscenarios
import oslo_messaging
from oslo_messaging import exceptions
from oslo_messaging import serializer as msg_serializer
from oslo_messaging.tests import utils as test_utils
load_tests = testscenarios.load_tests_apply_scenarios
class _FakeTransport(object):
def __init__(self, conf):
self.conf = conf
def _send(self, *args, **kwargs):
pass
class TestCastCall(test_utils.BaseTestCase):
scenarios = [
('cast_no_ctxt_no_args', dict(call=False, ctxt={}, args={})),
('call_no_ctxt_no_args', dict(call=True, ctxt={}, args={})),
('cast_ctxt_and_args',
dict(call=False,
ctxt=dict(user='testuser', project='testtenant'),
args=dict(bar='blaa', foobar=11.01))),
('call_ctxt_and_args',
dict(call=True,
ctxt=dict(user='testuser', project='testtenant'),
args=dict(bar='blaa', foobar=11.01))),
]
def test_cast_call(self):
self.config(rpc_response_timeout=None)
transport = _FakeTransport(self.conf)
client = oslo_messaging.RPCClient(transport, oslo_messaging.Target())
self.mox.StubOutWithMock(transport, '_send')
msg = dict(method='foo', args=self.args)
kwargs = {'retry': None}
if self.call:
kwargs['wait_for_reply'] = True
kwargs['timeout'] = None
transport._send(oslo_messaging.Target(), self.ctxt, msg, **kwargs)
self.mox.ReplayAll()
method = client.call if self.call else client.cast
method(self.ctxt, 'foo', **self.args)
class TestCastToTarget(test_utils.BaseTestCase):
_base = [
('all_none', dict(ctor={}, prepare={}, expect={})),
('ctor_exchange',
dict(ctor=dict(exchange='testexchange'),
prepare={},
expect=dict(exchange='testexchange'))),
('prepare_exchange',
dict(ctor={},
prepare=dict(exchange='testexchange'),
expect=dict(exchange='testexchange'))),
('prepare_exchange_none',
dict(ctor=dict(exchange='testexchange'),
prepare=dict(exchange=None),
expect={})),
('both_exchange',
dict(ctor=dict(exchange='ctorexchange'),
prepare=dict(exchange='testexchange'),
expect=dict(exchange='testexchange'))),
('ctor_topic',
dict(ctor=dict(topic='testtopic'),
prepare={},
expect=dict(topic='testtopic'))),
('prepare_topic',
dict(ctor={},
prepare=dict(topic='testtopic'),
expect=dict(topic='testtopic'))),
('prepare_topic_none',
dict(ctor=dict(topic='testtopic'),
prepare=dict(topic=None),
expect={})),
('both_topic',
dict(ctor=dict(topic='ctortopic'),
prepare=dict(topic='testtopic'),
expect=dict(topic='testtopic'))),
('ctor_namespace',
dict(ctor=dict(namespace='testnamespace'),
prepare={},
expect=dict(namespace='testnamespace'))),
('prepare_namespace',
dict(ctor={},
prepare=dict(namespace='testnamespace'),
expect=dict(namespace='testnamespace'))),
('prepare_namespace_none',
dict(ctor=dict(namespace='testnamespace'),
prepare=dict(namespace=None),
expect={})),
('both_namespace',
dict(ctor=dict(namespace='ctornamespace'),
prepare=dict(namespace='testnamespace'),
expect=dict(namespace='testnamespace'))),
('ctor_version',
dict(ctor=dict(version='1.1'),
prepare={},
expect=dict(version='1.1'))),
('prepare_version',
dict(ctor={},
prepare=dict(version='1.1'),
expect=dict(version='1.1'))),
('prepare_version_none',
dict(ctor=dict(version='1.1'),
prepare=dict(version=None),
expect={})),
('both_version',
dict(ctor=dict(version='ctorversion'),
prepare=dict(version='1.1'),
expect=dict(version='1.1'))),
('ctor_server',
dict(ctor=dict(server='testserver'),
prepare={},
expect=dict(server='testserver'))),
('prepare_server',
dict(ctor={},
prepare=dict(server='testserver'),
expect=dict(server='testserver'))),
('prepare_server_none',
dict(ctor=dict(server='testserver'),
prepare=dict(server=None),
expect={})),
('both_server',
dict(ctor=dict(server='ctorserver'),
prepare=dict(server='testserver'),
expect=dict(server='testserver'))),
('ctor_fanout',
dict(ctor=dict(fanout=True),
prepare={},
expect=dict(fanout=True))),
('prepare_fanout',
dict(ctor={},
prepare=dict(fanout=True),
expect=dict(fanout=True))),
('prepare_fanout_none',
dict(ctor=dict(fanout=True),
prepare=dict(fanout=None),
expect={})),
('both_fanout',
dict(ctor=dict(fanout=True),
prepare=dict(fanout=False),
expect=dict(fanout=False))),
]
_prepare = [
('single_prepare', dict(double_prepare=False)),
('double_prepare', dict(double_prepare=True)),
]
@classmethod
def generate_scenarios(cls):
cls.scenarios = testscenarios.multiply_scenarios(cls._base,
cls._prepare)
def setUp(self):
super(TestCastToTarget, self).setUp(conf=cfg.ConfigOpts())
def test_cast_to_target(self):
target = oslo_messaging.Target(**self.ctor)
expect_target = oslo_messaging.Target(**self.expect)
transport = _FakeTransport(self.conf)
client = oslo_messaging.RPCClient(transport, target)
self.mox.StubOutWithMock(transport, '_send')
msg = dict(method='foo', args={})
if 'namespace' in self.expect:
msg['namespace'] = self.expect['namespace']
if 'version' in self.expect:
msg['version'] = self.expect['version']
transport._send(expect_target, {}, msg, retry=None)
self.mox.ReplayAll()
if self.prepare:
client = client.prepare(**self.prepare)
if self.double_prepare:
client = client.prepare(**self.prepare)
client.cast({}, 'foo')
TestCastToTarget.generate_scenarios()
_notset = object()
class TestCallTimeout(test_utils.BaseTestCase):
scenarios = [
('all_none',
dict(confval=None, ctor=None, prepare=_notset, expect=None)),
('confval',
dict(confval=21.1, ctor=None, prepare=_notset, expect=21.1)),
('ctor',
dict(confval=None, ctor=21.1, prepare=_notset, expect=21.1)),
('ctor_zero',
dict(confval=None, ctor=0, prepare=_notset, expect=0)),
('prepare',
dict(confval=None, ctor=None, prepare=21.1, expect=21.1)),
('prepare_override',
dict(confval=None, ctor=10.1, prepare=21.1, expect=21.1)),
('prepare_zero',
dict(confval=None, ctor=None, prepare=0, expect=0)),
]
def test_call_timeout(self):
self.config(rpc_response_timeout=self.confval)
transport = _FakeTransport(self.conf)
client = oslo_messaging.RPCClient(transport, oslo_messaging.Target(),
timeout=self.ctor)
self.mox.StubOutWithMock(transport, '_send')
msg = dict(method='foo', args={})
kwargs = dict(wait_for_reply=True, timeout=self.expect, retry=None)
transport._send(oslo_messaging.Target(), {}, msg, **kwargs)
self.mox.ReplayAll()
if self.prepare is not _notset:
client = client.prepare(timeout=self.prepare)
client.call({}, 'foo')
class TestCallRetry(test_utils.BaseTestCase):
scenarios = [
('all_none', dict(ctor=None, prepare=_notset, expect=None)),
('ctor', dict(ctor=21, prepare=_notset, expect=21)),
('ctor_zero', dict(ctor=0, prepare=_notset, expect=0)),
('prepare', dict(ctor=None, prepare=21, expect=21)),
('prepare_override', dict(ctor=10, prepare=21, expect=21)),
('prepare_zero', dict(ctor=None, prepare=0, expect=0)),
]
def test_call_retry(self):
transport = _FakeTransport(self.conf)
client = oslo_messaging.RPCClient(transport, oslo_messaging.Target(),
retry=self.ctor)
self.mox.StubOutWithMock(transport, '_send')
msg = dict(method='foo', args={})
kwargs = dict(wait_for_reply=True, timeout=60,
retry=self.expect)
transport._send(oslo_messaging.Target(), {}, msg, **kwargs)
self.mox.ReplayAll()
if self.prepare is not _notset:
client = client.prepare(retry=self.prepare)
client.call({}, 'foo')
class TestCallFanout(test_utils.BaseTestCase):
scenarios = [
('target', dict(prepare=_notset, target={'fanout': True})),
('prepare', dict(prepare={'fanout': True}, target={})),
('both', dict(prepare={'fanout': True}, target={'fanout': True})),
]
def test_call_fanout(self):
transport = _FakeTransport(self.conf)
client = oslo_messaging.RPCClient(transport,
oslo_messaging.Target(**self.target))
if self.prepare is not _notset:
client = client.prepare(**self.prepare)
self.assertRaises(exceptions.InvalidTarget,
client.call, {}, 'foo')
class TestSerializer(test_utils.BaseTestCase):
scenarios = [
('cast',
dict(call=False,
ctxt=dict(user='bob'),
args=dict(a='a', b='b', c='c'),
retval=None)),
('call',
dict(call=True,
ctxt=dict(user='bob'),
args=dict(a='a', b='b', c='c'),
retval='d')),
]
def test_call_serializer(self):
self.config(rpc_response_timeout=None)
transport = _FakeTransport(self.conf)
serializer = msg_serializer.NoOpSerializer()
client = oslo_messaging.RPCClient(transport, oslo_messaging.Target(),
serializer=serializer)
self.mox.StubOutWithMock(transport, '_send')
msg = dict(method='foo',
args=dict([(k, 's' + v) for k, v in self.args.items()]))
kwargs = dict(wait_for_reply=True, timeout=None) if self.call else {}
kwargs['retry'] = None
transport._send(oslo_messaging.Target(),
dict(user='alice'),
msg,
**kwargs).AndReturn(self.retval)
self.mox.StubOutWithMock(serializer, 'serialize_entity')
self.mox.StubOutWithMock(serializer, 'deserialize_entity')
self.mox.StubOutWithMock(serializer, 'serialize_context')
for arg in self.args:
serializer.serialize_entity(self.ctxt, arg).AndReturn('s' + arg)
if self.call:
serializer.deserialize_entity(self.ctxt, self.retval).\
AndReturn('d' + self.retval)
serializer.serialize_context(self.ctxt).AndReturn(dict(user='alice'))
self.mox.ReplayAll()
method = client.call if self.call else client.cast
retval = method(self.ctxt, 'foo', **self.args)
if self.retval is not None:
self.assertEqual('d' + self.retval, retval)
class TestVersionCap(test_utils.BaseTestCase):
_call_vs_cast = [
('call', dict(call=True)),
('cast', dict(call=False)),
]
_cap_scenarios = [
('all_none',
dict(cap=None, prepare_cap=_notset,
version=None, prepare_version=_notset,
success=True)),
('ctor_cap_ok',
dict(cap='1.1', prepare_cap=_notset,
version='1.0', prepare_version=_notset,
success=True)),
('ctor_cap_override_ok',
dict(cap='2.0', prepare_cap='1.1',
version='1.0', prepare_version='1.0',
success=True)),
('ctor_cap_override_none_ok',
dict(cap='1.1', prepare_cap=None,
version='1.0', prepare_version=_notset,
success=True)),
('ctor_cap_minor_fail',
dict(cap='1.0', prepare_cap=_notset,
version='1.1', prepare_version=_notset,
success=False)),
('ctor_cap_major_fail',
dict(cap='2.0', prepare_cap=_notset,
version=None, prepare_version='1.0',
success=False)),
]
@classmethod
def generate_scenarios(cls):
cls.scenarios = (
testscenarios.multiply_scenarios(cls._call_vs_cast,
cls._cap_scenarios))
def test_version_cap(self):
self.config(rpc_response_timeout=None)
transport = _FakeTransport(self.conf)
target = oslo_messaging.Target(version=self.version)
client = oslo_messaging.RPCClient(transport, target,
version_cap=self.cap)
if self.success:
self.mox.StubOutWithMock(transport, '_send')
if self.prepare_version is not _notset:
target = target(version=self.prepare_version)
msg = dict(method='foo', args={})
if target.version is not None:
msg['version'] = target.version
kwargs = {'retry': None}
if self.call:
kwargs['wait_for_reply'] = True
kwargs['timeout'] = None
transport._send(target, {}, msg, **kwargs)
self.mox.ReplayAll()
prep_kwargs = {}
if self.prepare_cap is not _notset:
prep_kwargs['version_cap'] = self.prepare_cap
if self.prepare_version is not _notset:
prep_kwargs['version'] = self.prepare_version
if prep_kwargs:
client = client.prepare(**prep_kwargs)
method = client.call if self.call else client.cast
try:
method({}, 'foo')
except Exception as ex:
self.assertIsInstance(ex, oslo_messaging.RPCVersionCapError, ex)
self.assertFalse(self.success)
else:
self.assertTrue(self.success)
TestVersionCap.generate_scenarios()
class TestCanSendVersion(test_utils.BaseTestCase):
scenarios = [
('all_none',
dict(cap=None, prepare_cap=_notset,
version=None, prepare_version=_notset,
can_send_version=_notset,
can_send=True)),
('ctor_cap_ok',
dict(cap='1.1', prepare_cap=_notset,
version='1.0', prepare_version=_notset,
can_send_version=_notset,
can_send=True)),
('ctor_cap_override_ok',
dict(cap='2.0', prepare_cap='1.1',
version='1.0', prepare_version='1.0',
can_send_version=_notset,
can_send=True)),
('ctor_cap_override_none_ok',
dict(cap='1.1', prepare_cap=None,
version='1.0', prepare_version=_notset,
can_send_version=_notset,
can_send=True)),
('ctor_cap_can_send_ok',
dict(cap='1.1', prepare_cap=None,
version='1.0', prepare_version=_notset,
can_send_version='1.1',
can_send=True)),
('ctor_cap_can_send_none_ok',
dict(cap='1.1', prepare_cap=None,
version='1.0', prepare_version=_notset,
can_send_version=None,
can_send=True)),
('ctor_cap_minor_fail',
dict(cap='1.0', prepare_cap=_notset,
version='1.1', prepare_version=_notset,
can_send_version=_notset,
can_send=False)),
('ctor_cap_major_fail',
dict(cap='2.0', prepare_cap=_notset,
version=None, prepare_version='1.0',
can_send_version=_notset,
can_send=False)),
]
def test_version_cap(self):
self.config(rpc_response_timeout=None)
transport = _FakeTransport(self.conf)
target = oslo_messaging.Target(version=self.version)
client = oslo_messaging.RPCClient(transport, target,
version_cap=self.cap)
prep_kwargs = {}
if self.prepare_cap is not _notset:
prep_kwargs['version_cap'] = self.prepare_cap
if self.prepare_version is not _notset:
prep_kwargs['version'] = self.prepare_version
if prep_kwargs:
client = client.prepare(**prep_kwargs)
if self.can_send_version is not _notset:
can_send = client.can_send_version(version=self.can_send_version)
else:
can_send = client.can_send_version()
self.assertEqual(self.can_send, can_send)
def test_invalid_version_type(self):
target = oslo_messaging.Target(topic='sometopic')
transport = _FakeTransport(self.conf)
client = oslo_messaging.RPCClient(transport, target)
self.assertRaises(exceptions.MessagingException,
client.prepare, version='5')
self.assertRaises(exceptions.MessagingException,
client.prepare, version='5.a')
| apache-2.0 |
ciudadanointeligente/deldichoalhecho | ddah_web/static/bootstrap/test-infra/s3_cache.py | 1700 | 3523 | #!/usr/bin/env python2.7
from __future__ import absolute_import, unicode_literals, print_function, division
from sys import argv
from os import environ, stat, remove as _delete_file
from os.path import isfile, dirname, basename, abspath
from hashlib import sha256
from subprocess import check_call as run
from boto.s3.connection import S3Connection
from boto.s3.key import Key
from boto.exception import S3ResponseError
NEED_TO_UPLOAD_MARKER = '.need-to-upload'
BYTES_PER_MB = 1024 * 1024
try:
BUCKET_NAME = environ['TWBS_S3_BUCKET']
except KeyError:
raise SystemExit("TWBS_S3_BUCKET environment variable not set!")
def _sha256_of_file(filename):
hasher = sha256()
with open(filename, 'rb') as input_file:
hasher.update(input_file.read())
file_hash = hasher.hexdigest()
print('sha256({}) = {}'.format(filename, file_hash))
return file_hash
def _delete_file_quietly(filename):
try:
_delete_file(filename)
except (OSError, IOError):
pass
def _tarball_size(directory):
kib = stat(_tarball_filename_for(directory)).st_size // BYTES_PER_MB
return "{} MiB".format(kib)
def _tarball_filename_for(directory):
return abspath('./{}.tar.gz'.format(basename(directory)))
def _create_tarball(directory):
print("Creating tarball of {}...".format(directory))
run(['tar', '-czf', _tarball_filename_for(directory), '-C', dirname(directory), basename(directory)])
def _extract_tarball(directory):
print("Extracting tarball of {}...".format(directory))
run(['tar', '-xzf', _tarball_filename_for(directory), '-C', dirname(directory)])
def download(directory):
_delete_file_quietly(NEED_TO_UPLOAD_MARKER)
try:
print("Downloading {} tarball from S3...".format(friendly_name))
key.get_contents_to_filename(_tarball_filename_for(directory))
except S3ResponseError as err:
open(NEED_TO_UPLOAD_MARKER, 'a').close()
print(err)
raise SystemExit("Cached {} download failed!".format(friendly_name))
print("Downloaded {}.".format(_tarball_size(directory)))
_extract_tarball(directory)
print("{} successfully installed from cache.".format(friendly_name))
def upload(directory):
_create_tarball(directory)
print("Uploading {} tarball to S3... ({})".format(friendly_name, _tarball_size(directory)))
key.set_contents_from_filename(_tarball_filename_for(directory))
print("{} cache successfully updated.".format(friendly_name))
_delete_file_quietly(NEED_TO_UPLOAD_MARKER)
if __name__ == '__main__':
# Uses environment variables:
# AWS_ACCESS_KEY_ID -- AWS Access Key ID
# AWS_SECRET_ACCESS_KEY -- AWS Secret Access Key
argv.pop(0)
if len(argv) != 4:
raise SystemExit("USAGE: s3_cache.py <download | upload> <friendly name> <dependencies file> <directory>")
mode, friendly_name, dependencies_file, directory = argv
conn = S3Connection()
bucket = conn.lookup(BUCKET_NAME, validate=False)
if bucket is None:
raise SystemExit("Could not access bucket!")
dependencies_file_hash = _sha256_of_file(dependencies_file)
key = Key(bucket, dependencies_file_hash)
key.storage_class = 'REDUCED_REDUNDANCY'
if mode == 'download':
download(directory)
elif mode == 'upload':
if isfile(NEED_TO_UPLOAD_MARKER): # FIXME
upload(directory)
else:
print("No need to upload anything.")
else:
raise SystemExit("Unrecognized mode {!r}".format(mode))
| gpl-3.0 |
trishnaguha/ansible | test/integration/targets/vars_prompt/test-vars_prompt.py | 30 | 3147 | #!/usr/bin/env python
import os
import pexpect
import sys
from ansible.module_utils.six import PY2
if PY2:
log_buffer = sys.stdout
else:
log_buffer = sys.stdout.buffer
env_vars = {
'ANSIBLE_ROLES_PATH': './roles',
'ANSIBLE_NOCOLOR': 'True',
'ANSIBLE_RETRY_FILES_ENABLED': 'False',
}
def run_test(playbook, test_spec, args=None, timeout=10, env=None):
if not env:
env = os.environ.copy()
env.update(env_vars)
if not args:
args = sys.argv[1:]
vars_prompt_test = pexpect.spawn(
'ansible-playbook',
args=[playbook] + args,
timeout=timeout,
env=env,
)
vars_prompt_test.logfile = log_buffer
for item in test_spec[0]:
vars_prompt_test.expect(item[0])
if item[1]:
vars_prompt_test.send(item[1])
vars_prompt_test.expect(test_spec[1])
vars_prompt_test.expect(pexpect.EOF)
vars_prompt_test.close()
# These are the tests to run. Each test is a playbook and a test_spec.
#
# The test_spec is a list with two elements.
#
# The first element is a list of two element tuples. The first is the regexp to look
# for in the output, the second is the line to send.
#
# The last element is the last string of text to look for in the output.
#
tests = [
# Basic vars_prompt
{'playbook': 'vars_prompt-1.yml',
'test_spec': [
[('input:', 'some input\r')],
'"input": "some input"']},
# Custom prompt
{'playbook': 'vars_prompt-2.yml',
'test_spec': [
[('Enter some input:', 'some more input\r')],
'"input": "some more input"']},
# Test confirm, both correct and incorrect
{'playbook': 'vars_prompt-3.yml',
'test_spec': [
[('input:', 'confirm me\r'),
('confirm input:', 'confirm me\r')],
'"input": "confirm me"']},
{'playbook': 'vars_prompt-3.yml',
'test_spec': [
[('input:', 'confirm me\r'),
('confirm input:', 'incorrect\r'),
(r'\*\*\*\*\* VALUES ENTERED DO NOT MATCH \*\*\*\*', ''),
('input:', 'confirm me\r'),
('confirm input:', 'confirm me\r')],
'"input": "confirm me"']},
# Test private
{'playbook': 'vars_prompt-4.yml',
'test_spec': [
[('not_secret', 'this is displayed\r'),
('this is displayed', '')],
'"not_secret": "this is displayed"']},
# Test hashing
{'playbook': 'vars_prompt-5.yml',
'test_spec': [
[('password', 'Scenic-Improving-Payphone\r'),
('confirm password', 'Scenic-Improving-Payphone\r')],
r'"password": "\$6\$']},
# Test variables in prompt field
# https://github.com/ansible/ansible/issues/32723
{'playbook': 'vars_prompt-6.yml',
'test_spec': [
[('prompt from variable:', 'input\r')],
'']},
# Test play vars coming from vars_prompt
# https://github.com/ansible/ansible/issues/37984
{'playbook': 'vars_prompt-7.yml',
'test_spec': [
[('prompting for host:', 'testhost\r')],
r'testhost.*ok=1']},
]
for t in tests:
run_test(playbook=t['playbook'], test_spec=t['test_spec'])
| gpl-3.0 |
zstars/weblabdeusto | server/src/weblab/core/coordinator/scheduler.py | 1 | 7300 | #!/usr/bin/env python
#-*-*- encoding: utf-8 -*-*-
#
# Copyright (C) 2005 onwards University of Deusto
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
# This software consists of contributions made by many individuals,
# listed below:
#
# Author: Pablo Orduña <pablo@ordunya.com>
#
from abc import ABCMeta, abstractmethod
import weblab.configuration_doc as configuration_doc
class GenericSchedulerArguments(object):
def __init__(self, cfg_manager, resource_type_name, reservations_manager, resources_manager, confirmer, data_manager, time_provider, core_server_url, initial_store, finished_store, completed_store, post_reservation_data_manager, **kwargs):
self.cfg_manager = cfg_manager
self.resource_type_name = resource_type_name
self.reservations_manager = reservations_manager
self.resources_manager = resources_manager
self.confirmer = confirmer
self.data_manager = data_manager
self.time_provider = time_provider
self.core_server_url = core_server_url
self.initial_store = initial_store
self.finished_store = finished_store
self.completed_store = completed_store
self.post_reservation_data_manager = post_reservation_data_manager
if 'enqueuing_timeout' in kwargs:
self.confirmer.enqueuing_timeout = kwargs.pop('enqueuing_timeout')
if len(kwargs) > 0:
raise RuntimeError("Unrecognized arguments: %s" % kwargs)
###################################################################
#
# Scheduler is the basic abstract class from which all schedulers
# inherit. It provides the generic scheduler arguments to all the
# implementations, and defines the interface that will be used by
# the Coordinator.
#
# In order to implement a new scheduler, implement this interface
# and add your model to the "CoordinatorModel.load" method. As long
# as it uses CoordinatorModel.Base as sqlalchemy Base, it will be
# deployed by the WebLab deployment tools, and managed with the
# coordinator configuration parameters.
#
# XXX The interface below has been extracted from PriorityQueueScheduler, the original WebLab-Deusto scheduler. It will change to support other schemas.
#
class Scheduler(object):
__metaclass__ = ABCMeta
def __init__(self, generic_scheduler_arguments):
#
# cfg_manager is the Configuration Manager. It provides general configuration,
# managed by the system.
#
self.cfg_manager = generic_scheduler_arguments.cfg_manager
#
# confirmer is the Reservation Confirmer. You can enqueue confirmation requests
# to the laboratory servers, so they can check if the experiment is running or
# there is a problem in the system or in the network. These requests will be
# managed in an asynchronous basis.
#
self.confirmer = generic_scheduler_arguments.confirmer
#
# reservations_manager is the Reservations Manager. You can create, delete,
# list, or modify reservations through this object. It ensures that the reservation
# identifiers are unique among the different scheduling schemas.
#
self.reservations_manager = generic_scheduler_arguments.reservations_manager
#
# resources_manager is the Resources Manager. You can perform operations
# with experiment instances, experiment types, resources, etc.
#
self.resources_manager = generic_scheduler_arguments.resources_manager
#
# A data manager. It can be a redis client creator or a sqlalchemy session maker.
# It is already configured, so you can directly use it to create new sessions and
# perform changes against the database or the redis database. As long as you
# have used sqlalchemy in your tables, and added your module to CoordinatorModel.load,
# you will be able to use it. Otherwise, your scheduler should use another system and
# require to configure it through the cfg_manager
#
self.session_maker = generic_scheduler_arguments.data_manager
self.data_manager = generic_scheduler_arguments.data_manager
self.redis_maker = generic_scheduler_arguments.data_manager
#
# An instance of Coordinator.TimeProvider. It provides the time in different formats,
# and it is easy to override while testing. As long as you are going to develop tests,
# you should use it.
#
self.time_provider = generic_scheduler_arguments.time_provider
#
# The Resource Type of the experiment being managed by this scheduler.
#
self.resource_type_name = generic_scheduler_arguments.resource_type_name
#
# The address of the core server, such as 'https://www.weblab.deusto.es/weblab/',
# so as to point out where is the server
#
self.core_server_url = generic_scheduler_arguments.core_server_url
self.core_server_route = self.cfg_manager.get_doc_value(configuration_doc.CORE_FACADE_SERVER_ROUTE)
self.core_server_uuid = self.cfg_manager.get_value(configuration_doc.CORE_UNIVERSAL_IDENTIFIER)
self.core_server_uuid_human = self.cfg_manager.get_value(configuration_doc.CORE_UNIVERSAL_IDENTIFIER_HUMAN)
self.completed_store = generic_scheduler_arguments.completed_store
self.post_reservation_data_manager = generic_scheduler_arguments.post_reservation_data_manager
def stop(self):
pass
@abstractmethod
def is_remote(self):
pass
####################################################################################
#
# Experiment resources management. They should be implemented if the experiment
# developer wants the users to administrate the resources through the WebLab
# administration panel. Otherwise, the scheduler should handle the notifications
# of "experiment broken", "experiment fixed", etc.
#
def removing_current_resource_slot(self, session, resource_instance_id):
pass
#######################################
#
# Kernel of the reservations
#
#
@abstractmethod
def reserve_experiment(self, reservation_id, experiment_id, time, priority, initialization_in_accounting, client_initial_data, request_info):
pass
@abstractmethod
def get_reservation_status(self, reservation_id):
pass
@abstractmethod
def confirm_experiment(self, reservation_id, lab_session_id, initial_configuration, exp_info):
pass
@abstractmethod
def finish_reservation(self, reservation_id):
pass
def get_uuids(self):
return []
# Not abstract since most schedulers will not have it.
def assign_single_scheduler(self, reservation_id, assigned_resource_type_name, locking):
return []
#############################################################
#
# Auxiliar method, used by tests to create a new scenario.
#
def _clean(self):
pass
| bsd-2-clause |
dimasad/corujas | corujas.py | 1 | 8869 | import sys
import attrdict
import numpy as np
import numpy.linalg
from scipy import io, interpolate
from thesis2 import codegen, kalman, mocos, symstats, vme
from thesis2.experiments import base
class SymbolicModel(base.SymbolicModel):
'''trakSTAR path reconstruction model.'''
x = ['x', 'y', 'z', 'q0', 'q1', 'q2', 'q3',
'vx', 'vy', 'vz', 'wx', 'wy', 'wz']
'''State vector.'''
y = ['x_meas', 'y_meas', 'z_meas',
'q1_meas', 'q2_meas', 'q3_meas']
'''Measurement vector.'''
p = []
'''Parameter vector.'''
s = []
'''Exogenous signals vector.'''
c = ['x_meas_std', 'y_meas_std', 'z_meas_std',
'q1_meas_std', 'q2_meas_std', 'q3_meas_std',
'w0_std', 'v0_std', 'pos0_std', 'q_renorm_gain']
'''Constants vector.'''
def f(self, t, x, p, s, c):
'''Drift function.'''
a = attrdict.AttrDict(self.unpack_arguments(t=t, x=x, p=p, s=s, c=c))
renorm = a.q_renorm_gain * (1 - a.q0**2 - a.q1**2 - a.q2**2 - a.q3**2)
return [
a.vx,
a.vy,
a.vz,
-0.5 * (a.q1 * a.wx + a.q2 * a.wy + a.q3 * a.wz) + renorm * a.q0,
-0.5 * (-a.q0 * a.wx - a.q2 * a.wz + a.q3 * a.wy) + renorm * a.q1,
-0.5 * (-a.q0 * a.wy + a.q1 * a.wz - a.q3 * a.wx) + renorm * a.q2,
-0.5 * (-a.q0 * a.wz - a.q1 * a.wy + a.q2 * a.wx) + renorm * a.q3,
0,
0,
0,
0,
0,
0,
]
def meas_mean(self, t, x, p, s, c):
'''Measurement mean.'''
a = attrdict.AttrDict(self.unpack_arguments(t=t, x=x, p=p, s=s, c=c))
return [a.x, a.y, a.z, a.q1, a.q2, a.q3]
def meas_cov(self, t, x, p, s, c):
'''Measurement covariance matrix.'''
a = attrdict.AttrDict(self.unpack_arguments(t=t, x=x, p=p, s=s, c=c))
stds = [
a.x_meas_std, a.y_meas_std, a.z_meas_std,
a.q1_meas_std, a.q2_meas_std, a.q3_meas_std
]
return np.diag(stds) ** 2
def meas_ll(self, y, t, x, p, s, c):
'''Measurement log-likelihood.'''
a = attrdict.AttrDict(
self.unpack_arguments(t=t, x=x, y=y, p=p, s=s, c=c)
)
return (symstats.normal_logpdf(a.x_meas, a.x, a.x_meas_std) +
symstats.normal_logpdf(a.y_meas, a.y, a.y_meas_std) +
symstats.normal_logpdf(a.z_meas, a.z, a.z_meas_std) +
symstats.normal_logpdf(a.q1_meas, a.q1, a.q1_meas_std) +
symstats.normal_logpdf(a.q2_meas, a.q2, a.q2_meas_std) +
symstats.normal_logpdf(a.q3_meas, a.q3, a.q3_meas_std))
def prior_logpdf(self, x, p, c):
a = attrdict.AttrDict(
self.unpack_arguments(x=x, p=p, c=c)
)
return (symstats.normal_logpdf(a.wx, 0, a.w0_std) +
symstats.normal_logpdf(a.wy, 0, a.w0_std) +
symstats.normal_logpdf(a.wz, 0, a.w0_std) +
symstats.normal_logpdf(a.vx, 0, a.v0_std) +
symstats.normal_logpdf(a.vy, 0, a.v0_std) +
symstats.normal_logpdf(a.vz, 0, a.v0_std) +
symstats.normal_logpdf(a.x, 0, a.pos0_std) +
symstats.normal_logpdf(a.y, 0, a.pos0_std) +
symstats.normal_logpdf(a.z, 0, a.pos0_std))
def generated_src():
model_generator = base.ModelGenerator(SymbolicModel(), 'GeneratedModel')
return model_generator.generate()
def print_generated_module():
from os import path
module_path = path.join(path.dirname(__file__), 'generated_corujas.py')
with open(module_path, 'w') as module_file:
module_file.write(generated_src())
try:
from generated_corujas import GeneratedModel
except ImportError:
context = {'__name__': __name__}
exec(generated_src(), context)
GeneratedModel = context['GeneratedModel']
def unwrap_quaternion(q):
unwrapped = np.array(q)
increments = np.linalg.norm(q[1:] - q[:-1], axis=1)
jumps = np.flatnonzero(increments > 1) + 1
for k in jumps:
unwrapped[k:] *= -1
return unwrapped
def load_data(filepath, start=None, stop=None):
range_ = slice(start, stop)
data = io.loadmat(filepath)
tmeas = data['time'].flatten()[range_]
q = data['q'][range_]
q_unwrapped = unwrap_quaternion(q)
y_dict = dict(
x_meas=data['x'].flatten()[range_],
y_meas=data['y'].flatten()[range_],
z_meas=data['z'].flatten()[range_],
q0_meas=q_unwrapped[:, 0],
q1_meas=q_unwrapped[:, 1],
q2_meas=q_unwrapped[:, 2],
q3_meas=q_unwrapped[:, 3],
)
y_dict['x_meas'] -= y_dict['x_meas'][0]
y_dict['y_meas'] -= y_dict['y_meas'][0]
y_dict['z_meas'] -= y_dict['z_meas'][0]
return tmeas, y_dict
def spline_fit(tmeas, y_dict, smoothing_factor):
Tknot = (tmeas[1] - tmeas[0]) * smoothing_factor
knots = np.arange(tmeas[0] + 2 * Tknot, tmeas[-1] - 2 * Tknot, Tknot)
splines = {}
for yname in SymbolicModel.y + ['q0_meas']:
splines[yname] = interpolate.LSQUnivariateSpline(
tmeas, y_dict[yname], knots, k=5
)
return splines
def given_params():
return dict(q_renorm_gain=1, w0_std=1e-3, v0_std=10, pos0_std=1,
x_meas_std=0.04, y_meas_std=0.04, z_meas_std=0.04,
q0_meas_std=0.0002, q1_meas_std=0.0002,
q2_meas_std=0.0002, q3_meas_std=0.0002)
def param_guess():
return {
'x_meas_std': 0.2, 'y_meas_std': 0.2, 'z_meas_std': 0.2,
'q1_meas_std': 0.0005, 'q2_meas_std': 0.0005, 'q3_meas_std': 0.0005,
}
def estim_problem(tmeas, y, model, col_order, meas_subdivide):
yind = meas_subdivide * np.arange(tmeas.size)
test = np.linspace(
tmeas[0], tmeas[-1], (tmeas.size - 1) * meas_subdivide + 1
)
collocation = mocos.LGLCollocation(col_order)
problem = vme.Problem(model, test, y, yind, collocation, True)
t_fine = problem.t_fine
return problem, t_fine
def pack_x_guess(splines, t_fine):
q0 = splines['q0_meas'](t_fine)
q1 = splines['q1_meas'](t_fine)
q2 = splines['q2_meas'](t_fine)
q3 = splines['q3_meas'](t_fine)
q0_dot = [splines['q0_meas'].derivatives(t)[1] for t in t_fine]
q1_dot = [splines['q1_meas'].derivatives(t)[1] for t in t_fine]
q2_dot = [splines['q2_meas'].derivatives(t)[1] for t in t_fine]
q3_dot = [splines['q3_meas'].derivatives(t)[1] for t in t_fine]
wx = 2 * (q0 * q1_dot + q3 * q2_dot - q2 * q3_dot - q1 * q0_dot)
wy = 2 * (-q3 * q1_dot + q0 * q2_dot + q1 * q3_dot - q2 * q0_dot)
wz = 2 * (q2 * q1_dot - q1 * q2_dot + q0 * q3_dot - q3 * q0_dot)
x_dict = dict(
x=splines['x_meas'](t_fine),
y=splines['y_meas'](t_fine),
z=splines['z_meas'](t_fine),
vx=[splines['x_meas'].derivatives(t)[1] for t in t_fine],
vy=[splines['y_meas'].derivatives(t)[1] for t in t_fine],
vz=[splines['z_meas'].derivatives(t)[1] for t in t_fine],
q0=q0, q1=q1, q2=q2, q3=q3, wx=wx, wy=wy, wz=wz
)
return GeneratedModel.pack_x(t_fine.shape, **x_dict)
def save_data(tmeas, y_dict, t_fine, xopt, filename):
data = y_dict.copy()
data.update(zip(GeneratedModel.xnames, xopt))
data.update(t_meas=tmeas, t=t_fine)
io.matlab.mio.savemat(filename, data)
def main(argv):
file_name = str(argv[0])
start = int(argv[1]) if len(argv) > 1 else None
stop = int(argv[2]) if len(argv) > 2 else None
tmeas, y_dict = load_data(file_name, start, stop)
splines = spline_fit(tmeas, y_dict, 4)
params = {}
params.update(given_params())
params.update(param_guess())
G = np.zeros((GeneratedModel.nx, 6))
G[-6:] = np.eye(6) * [50, 50, 50, 2.71, 2.71, 2.71]
c = GeneratedModel.pack_c(**params)
p = GeneratedModel.pack_p(**params)
y = GeneratedModel.pack_y(tmeas.shape, **y_dict)
model = GeneratedModel(G, c=c, p=p)
problem, t_fine = estim_problem(tmeas, y, model, 5, 1)
x_guess = pack_x_guess(splines, t_fine)
z0 = problem.pack_decision(x_guess, None, p)
p_lb = dict(x_meas_std=0, y_meas_std=0, z_meas_std=0,
q0_meas_std=0, q1_meas_std=0, q2_meas_std=0, q3_meas_std=0)
p_fix = dict()
z_bounds = problem.pack_bounds(p_lb=p_lb, p_fix=p_fix)
z_bounds[:, 3:7] = [y_dict['q0_meas'][0], y_dict['q1_meas'][0],
y_dict['q2_meas'][0],y_dict['q3_meas'][0]]
nlp = problem.nlp(z_bounds)
nlp.num_option(b'tol', 1e-6)
nlp.int_option(b'max_iter', 100)
zopt, solinfo = nlp.solve(z0)
xopt, dopt, popt = problem.unpack_decision(zopt)
yopt = model.meas_mean(t_fine, xopt, popt)
save_data(tmeas, y_dict, t_fine, xopt, file_name + 'meas')
if __name__ == '__main__':
main(sys.argv[1:])
| mit |
albertjan/pypyjs | website/js/pypy.js-0.2.0/lib/modules/test/test_multifile.py | 138 | 1702 | from test import test_support
mimetools = test_support.import_module('mimetools', deprecated=True)
multifile = test_support.import_module('multifile', deprecated=True)
import cStringIO
msg = """Mime-Version: 1.0
Content-Type: multipart/mixed;
boundary="=====================_590453667==_"
X-OriginalArrivalTime: 05 Feb 2002 03:43:23.0310 (UTC) FILETIME=[42D88CE0:01C1ADF7]
--=====================_590453667==_
Content-Type: multipart/alternative;
boundary="=====================_590453677==_.ALT"
--=====================_590453677==_.ALT
Content-Type: text/plain; charset="us-ascii"; format=flowed
test A
--=====================_590453677==_.ALT
Content-Type: text/html; charset="us-ascii"
<html>
<b>test B</font></b></html>
--=====================_590453677==_.ALT--
--=====================_590453667==_
Content-Type: text/plain; charset="us-ascii"
Content-Disposition: attachment; filename="att.txt"
Attached Content.
Attached Content.
Attached Content.
Attached Content.
--=====================_590453667==_--
"""
def getMIMEMsg(mf):
global boundaries, linecount
msg = mimetools.Message(mf)
#print "TYPE: %s" % msg.gettype()
if msg.getmaintype() == 'multipart':
boundary = msg.getparam("boundary")
boundaries += 1
mf.push(boundary)
while mf.next():
getMIMEMsg(mf)
mf.pop()
else:
lines = mf.readlines()
linecount += len(lines)
def test_main():
global boundaries, linecount
boundaries = 0
linecount = 0
f = cStringIO.StringIO(msg)
getMIMEMsg(multifile.MultiFile(f))
assert boundaries == 2
assert linecount == 9
if __name__ == '__main__':
test_main()
| mit |
michaelpacer/python-future | src/future/backports/email/_encoded_words.py | 82 | 8443 | """ Routines for manipulating RFC2047 encoded words.
This is currently a package-private API, but will be considered for promotion
to a public API if there is demand.
"""
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from future.builtins import bytes
from future.builtins import chr
from future.builtins import int
from future.builtins import str
# An ecoded word looks like this:
#
# =?charset[*lang]?cte?encoded_string?=
#
# for more information about charset see the charset module. Here it is one
# of the preferred MIME charset names (hopefully; you never know when parsing).
# cte (Content Transfer Encoding) is either 'q' or 'b' (ignoring case). In
# theory other letters could be used for other encodings, but in practice this
# (almost?) never happens. There could be a public API for adding entries
# to the CTE tables, but YAGNI for now. 'q' is Quoted Printable, 'b' is
# Base64. The meaning of encoded_string should be obvious. 'lang' is optional
# as indicated by the brackets (they are not part of the syntax) but is almost
# never encountered in practice.
#
# The general interface for a CTE decoder is that it takes the encoded_string
# as its argument, and returns a tuple (cte_decoded_string, defects). The
# cte_decoded_string is the original binary that was encoded using the
# specified cte. 'defects' is a list of MessageDefect instances indicating any
# problems encountered during conversion. 'charset' and 'lang' are the
# corresponding strings extracted from the EW, case preserved.
#
# The general interface for a CTE encoder is that it takes a binary sequence
# as input and returns the cte_encoded_string, which is an ascii-only string.
#
# Each decoder must also supply a length function that takes the binary
# sequence as its argument and returns the length of the resulting encoded
# string.
#
# The main API functions for the module are decode, which calls the decoder
# referenced by the cte specifier, and encode, which adds the appropriate
# RFC 2047 "chrome" to the encoded string, and can optionally automatically
# select the shortest possible encoding. See their docstrings below for
# details.
import re
import base64
import binascii
import functools
from string import ascii_letters, digits
from future.backports.email import errors
__all__ = ['decode_q',
'encode_q',
'decode_b',
'encode_b',
'len_q',
'len_b',
'decode',
'encode',
]
#
# Quoted Printable
#
# regex based decoder.
_q_byte_subber = functools.partial(re.compile(br'=([a-fA-F0-9]{2})').sub,
lambda m: bytes([int(m.group(1), 16)]))
def decode_q(encoded):
encoded = bytes(encoded.replace(b'_', b' '))
return _q_byte_subber(encoded), []
# dict mapping bytes to their encoded form
class _QByteMap(dict):
safe = bytes(b'-!*+/' + ascii_letters.encode('ascii') + digits.encode('ascii'))
def __missing__(self, key):
if key in self.safe:
self[key] = chr(key)
else:
self[key] = "={:02X}".format(key)
return self[key]
_q_byte_map = _QByteMap()
# In headers spaces are mapped to '_'.
_q_byte_map[ord(' ')] = '_'
def encode_q(bstring):
return str(''.join(_q_byte_map[x] for x in bytes(bstring)))
def len_q(bstring):
return sum(len(_q_byte_map[x]) for x in bytes(bstring))
#
# Base64
#
def decode_b(encoded):
defects = []
pad_err = len(encoded) % 4
if pad_err:
defects.append(errors.InvalidBase64PaddingDefect())
padded_encoded = encoded + b'==='[:4-pad_err]
else:
padded_encoded = encoded
try:
# The validate kwarg to b64decode is not supported in Py2.x
if not re.match(b'^[A-Za-z0-9+/]*={0,2}$', padded_encoded):
raise binascii.Error('Non-base64 digit found')
return base64.b64decode(padded_encoded), defects
except binascii.Error:
# Since we had correct padding, this must an invalid char error.
defects = [errors.InvalidBase64CharactersDefect()]
# The non-alphabet characters are ignored as far as padding
# goes, but we don't know how many there are. So we'll just
# try various padding lengths until something works.
for i in 0, 1, 2, 3:
try:
return base64.b64decode(encoded+b'='*i), defects
except (binascii.Error, TypeError): # Py2 raises a TypeError
if i==0:
defects.append(errors.InvalidBase64PaddingDefect())
else:
# This should never happen.
raise AssertionError("unexpected binascii.Error")
def encode_b(bstring):
return base64.b64encode(bstring).decode('ascii')
def len_b(bstring):
groups_of_3, leftover = divmod(len(bstring), 3)
# 4 bytes out for each 3 bytes (or nonzero fraction thereof) in.
return groups_of_3 * 4 + (4 if leftover else 0)
_cte_decoders = {
'q': decode_q,
'b': decode_b,
}
def decode(ew):
"""Decode encoded word and return (string, charset, lang, defects) tuple.
An RFC 2047/2243 encoded word has the form:
=?charset*lang?cte?encoded_string?=
where '*lang' may be omitted but the other parts may not be.
This function expects exactly such a string (that is, it does not check the
syntax and may raise errors if the string is not well formed), and returns
the encoded_string decoded first from its Content Transfer Encoding and
then from the resulting bytes into unicode using the specified charset. If
the cte-decoded string does not successfully decode using the specified
character set, a defect is added to the defects list and the unknown octets
are replaced by the unicode 'unknown' character \uFDFF.
The specified charset and language are returned. The default for language,
which is rarely if ever encountered, is the empty string.
"""
_, charset, cte, cte_string, _ = str(ew).split('?')
charset, _, lang = charset.partition('*')
cte = cte.lower()
# Recover the original bytes and do CTE decoding.
bstring = cte_string.encode('ascii', 'surrogateescape')
bstring, defects = _cte_decoders[cte](bstring)
# Turn the CTE decoded bytes into unicode.
try:
string = bstring.decode(charset)
except UnicodeError:
defects.append(errors.UndecodableBytesDefect("Encoded word "
"contains bytes not decodable using {} charset".format(charset)))
string = bstring.decode(charset, 'surrogateescape')
except LookupError:
string = bstring.decode('ascii', 'surrogateescape')
if charset.lower() != 'unknown-8bit':
defects.append(errors.CharsetError("Unknown charset {} "
"in encoded word; decoded as unknown bytes".format(charset)))
return string, charset, lang, defects
_cte_encoders = {
'q': encode_q,
'b': encode_b,
}
_cte_encode_length = {
'q': len_q,
'b': len_b,
}
def encode(string, charset='utf-8', encoding=None, lang=''):
"""Encode string using the CTE encoding that produces the shorter result.
Produces an RFC 2047/2243 encoded word of the form:
=?charset*lang?cte?encoded_string?=
where '*lang' is omitted unless the 'lang' parameter is given a value.
Optional argument charset (defaults to utf-8) specifies the charset to use
to encode the string to binary before CTE encoding it. Optional argument
'encoding' is the cte specifier for the encoding that should be used ('q'
or 'b'); if it is None (the default) the encoding which produces the
shortest encoded sequence is used, except that 'q' is preferred if it is up
to five characters longer. Optional argument 'lang' (default '') gives the
RFC 2243 language string to specify in the encoded word.
"""
string = str(string)
if charset == 'unknown-8bit':
bstring = string.encode('ascii', 'surrogateescape')
else:
bstring = string.encode(charset)
if encoding is None:
qlen = _cte_encode_length['q'](bstring)
blen = _cte_encode_length['b'](bstring)
# Bias toward q. 5 is arbitrary.
encoding = 'q' if qlen - blen < 5 else 'b'
encoded = _cte_encoders[encoding](bstring)
if lang:
lang = '*' + lang
return "=?{0}{1}?{2}?{3}?=".format(charset, lang, encoding, encoded)
| mit |
vvv1559/intellij-community | plugins/hg4idea/testData/bin/mercurial/hbisect.py | 92 | 9226 | # changelog bisection for mercurial
#
# Copyright 2007 Matt Mackall
# Copyright 2005, 2006 Benoit Boissinot <benoit.boissinot@ens-lyon.org>
#
# Inspired by git bisect, extension skeleton taken from mq.py.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
import os, error
from i18n import _
from node import short, hex
import util
def bisect(changelog, state):
"""find the next node (if any) for testing during a bisect search.
returns a (nodes, number, good) tuple.
'nodes' is the final result of the bisect if 'number' is 0.
Otherwise 'number' indicates the remaining possible candidates for
the search and 'nodes' contains the next bisect target.
'good' is True if bisect is searching for a first good changeset, False
if searching for a first bad one.
"""
clparents = changelog.parentrevs
skip = set([changelog.rev(n) for n in state['skip']])
def buildancestors(bad, good):
# only the earliest bad revision matters
badrev = min([changelog.rev(n) for n in bad])
goodrevs = [changelog.rev(n) for n in good]
goodrev = min(goodrevs)
# build visit array
ancestors = [None] * (len(changelog) + 1) # an extra for [-1]
# set nodes descended from goodrevs
for rev in goodrevs:
ancestors[rev] = []
for rev in changelog.revs(goodrev + 1):
for prev in clparents(rev):
if ancestors[prev] == []:
ancestors[rev] = []
# clear good revs from array
for rev in goodrevs:
ancestors[rev] = None
for rev in changelog.revs(len(changelog), goodrev):
if ancestors[rev] is None:
for prev in clparents(rev):
ancestors[prev] = None
if ancestors[badrev] is None:
return badrev, None
return badrev, ancestors
good = False
badrev, ancestors = buildancestors(state['bad'], state['good'])
if not ancestors: # looking for bad to good transition?
good = True
badrev, ancestors = buildancestors(state['good'], state['bad'])
bad = changelog.node(badrev)
if not ancestors: # now we're confused
if len(state['bad']) == 1 and len(state['good']) == 1:
raise util.Abort(_("starting revisions are not directly related"))
raise util.Abort(_("inconsistent state, %s:%s is good and bad")
% (badrev, short(bad)))
# build children dict
children = {}
visit = util.deque([badrev])
candidates = []
while visit:
rev = visit.popleft()
if ancestors[rev] == []:
candidates.append(rev)
for prev in clparents(rev):
if prev != -1:
if prev in children:
children[prev].append(rev)
else:
children[prev] = [rev]
visit.append(prev)
candidates.sort()
# have we narrowed it down to one entry?
# or have all other possible candidates besides 'bad' have been skipped?
tot = len(candidates)
unskipped = [c for c in candidates if (c not in skip) and (c != badrev)]
if tot == 1 or not unskipped:
return ([changelog.node(rev) for rev in candidates], 0, good)
perfect = tot // 2
# find the best node to test
best_rev = None
best_len = -1
poison = set()
for rev in candidates:
if rev in poison:
# poison children
poison.update(children.get(rev, []))
continue
a = ancestors[rev] or [rev]
ancestors[rev] = None
x = len(a) # number of ancestors
y = tot - x # number of non-ancestors
value = min(x, y) # how good is this test?
if value > best_len and rev not in skip:
best_len = value
best_rev = rev
if value == perfect: # found a perfect candidate? quit early
break
if y < perfect and rev not in skip: # all downhill from here?
# poison children
poison.update(children.get(rev, []))
continue
for c in children.get(rev, []):
if ancestors[c]:
ancestors[c] = list(set(ancestors[c] + a))
else:
ancestors[c] = a + [c]
assert best_rev is not None
best_node = changelog.node(best_rev)
return ([best_node], tot, good)
def load_state(repo):
state = {'current': [], 'good': [], 'bad': [], 'skip': []}
if os.path.exists(repo.join("bisect.state")):
for l in repo.opener("bisect.state"):
kind, node = l[:-1].split()
node = repo.lookup(node)
if kind not in state:
raise util.Abort(_("unknown bisect kind %s") % kind)
state[kind].append(node)
return state
def save_state(repo, state):
f = repo.opener("bisect.state", "w", atomictemp=True)
wlock = repo.wlock()
try:
for kind in sorted(state):
for node in state[kind]:
f.write("%s %s\n" % (kind, hex(node)))
f.close()
finally:
wlock.release()
def get(repo, status):
"""
Return a list of revision(s) that match the given status:
- ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
- ``goods``, ``bads`` : csets topologically good/bad
- ``range`` : csets taking part in the bisection
- ``pruned`` : csets that are goods, bads or skipped
- ``untested`` : csets whose fate is yet unknown
- ``ignored`` : csets ignored due to DAG topology
- ``current`` : the cset currently being bisected
"""
state = load_state(repo)
if status in ('good', 'bad', 'skip', 'current'):
return map(repo.changelog.rev, state[status])
else:
# In the following sets, we do *not* call 'bisect()' with more
# than one level of recursion, because that can be very, very
# time consuming. Instead, we always develop the expression as
# much as possible.
# 'range' is all csets that make the bisection:
# - have a good ancestor and a bad descendant, or conversely
# that's because the bisection can go either way
range = '( bisect(bad)::bisect(good) | bisect(good)::bisect(bad) )'
_t = repo.revs('bisect(good)::bisect(bad)')
# The sets of topologically good or bad csets
if len(_t) == 0:
# Goods are topologically after bads
goods = 'bisect(good)::' # Pruned good csets
bads = '::bisect(bad)' # Pruned bad csets
else:
# Goods are topologically before bads
goods = '::bisect(good)' # Pruned good csets
bads = 'bisect(bad)::' # Pruned bad csets
# 'pruned' is all csets whose fate is already known: good, bad, skip
skips = 'bisect(skip)' # Pruned skipped csets
pruned = '( (%s) | (%s) | (%s) )' % (goods, bads, skips)
# 'untested' is all cset that are- in 'range', but not in 'pruned'
untested = '( (%s) - (%s) )' % (range, pruned)
# 'ignored' is all csets that were not used during the bisection
# due to DAG topology, but may however have had an impact.
# E.g., a branch merged between bads and goods, but whose branch-
# point is out-side of the range.
iba = '::bisect(bad) - ::bisect(good)' # Ignored bads' ancestors
iga = '::bisect(good) - ::bisect(bad)' # Ignored goods' ancestors
ignored = '( ( (%s) | (%s) ) - (%s) )' % (iba, iga, range)
if status == 'range':
return repo.revs(range)
elif status == 'pruned':
return repo.revs(pruned)
elif status == 'untested':
return repo.revs(untested)
elif status == 'ignored':
return repo.revs(ignored)
elif status == "goods":
return repo.revs(goods)
elif status == "bads":
return repo.revs(bads)
else:
raise error.ParseError(_('invalid bisect state'))
def label(repo, node):
rev = repo.changelog.rev(node)
# Try explicit sets
if rev in get(repo, 'good'):
# i18n: bisect changeset status
return _('good')
if rev in get(repo, 'bad'):
# i18n: bisect changeset status
return _('bad')
if rev in get(repo, 'skip'):
# i18n: bisect changeset status
return _('skipped')
if rev in get(repo, 'untested') or rev in get(repo, 'current'):
# i18n: bisect changeset status
return _('untested')
if rev in get(repo, 'ignored'):
# i18n: bisect changeset status
return _('ignored')
# Try implicit sets
if rev in get(repo, 'goods'):
# i18n: bisect changeset status
return _('good (implicit)')
if rev in get(repo, 'bads'):
# i18n: bisect changeset status
return _('bad (implicit)')
return None
def shortlabel(label):
if label:
return label[0].upper()
return None
| apache-2.0 |
mmezzavilla/ns3-mmwave | .waf-1.8.19-b1fc8f7baef51bd2db4c2971909a568d/waflib/Scripting.py | 9 | 11665 | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! https://waf.io/book/index.html#_obtaining_the_waf_file
import os,shlex,shutil,traceback,errno,sys,stat
from waflib import Utils,Configure,Logs,Options,ConfigSet,Context,Errors,Build,Node
build_dir_override=None
no_climb_commands=['configure']
default_cmd="build"
def waf_entry_point(current_directory,version,wafdir):
Logs.init_log()
if Context.WAFVERSION!=version:
Logs.error('Waf script %r and library %r do not match (directory %r)'%(version,Context.WAFVERSION,wafdir))
sys.exit(1)
if'--version'in sys.argv:
Context.run_dir=current_directory
ctx=Context.create_context('options')
ctx.curdir=current_directory
ctx.parse_args()
sys.exit(0)
if len(sys.argv)>1:
potential_wscript=os.path.join(current_directory,sys.argv[1])
if os.path.basename(potential_wscript)=='wscript'and os.path.isfile(potential_wscript):
current_directory=os.path.normpath(os.path.dirname(potential_wscript))
sys.argv.pop(1)
Context.waf_dir=wafdir
Context.launch_dir=current_directory
no_climb=os.environ.get('NOCLIMB',None)
if not no_climb:
for k in no_climb_commands:
for y in sys.argv:
if y.startswith(k):
no_climb=True
break
for i,x in enumerate(sys.argv):
if x.startswith('--top='):
Context.run_dir=Context.top_dir=Utils.sane_path(x[6:])
sys.argv[i]='--top='+Context.run_dir
if x.startswith('--out='):
Context.out_dir=Utils.sane_path(x[6:])
sys.argv[i]='--out='+Context.out_dir
cur=current_directory
while cur and not Context.top_dir:
lst=os.listdir(cur)
if Options.lockfile in lst:
env=ConfigSet.ConfigSet()
try:
env.load(os.path.join(cur,Options.lockfile))
ino=os.stat(cur)[stat.ST_INO]
except Exception:
pass
else:
for x in(env.run_dir,env.top_dir,env.out_dir):
if Utils.is_win32:
if cur==x:
load=True
break
else:
try:
ino2=os.stat(x)[stat.ST_INO]
except OSError:
pass
else:
if ino==ino2:
load=True
break
else:
Logs.warn('invalid lock file in %s'%cur)
load=False
if load:
Context.run_dir=env.run_dir
Context.top_dir=env.top_dir
Context.out_dir=env.out_dir
break
if not Context.run_dir:
if Context.WSCRIPT_FILE in lst:
Context.run_dir=cur
next=os.path.dirname(cur)
if next==cur:
break
cur=next
if no_climb:
break
if not Context.run_dir:
if'-h'in sys.argv or'--help'in sys.argv:
Logs.warn('No wscript file found: the help message may be incomplete')
Context.run_dir=current_directory
ctx=Context.create_context('options')
ctx.curdir=current_directory
ctx.parse_args()
sys.exit(0)
Logs.error('Waf: Run from a directory containing a file named %r'%Context.WSCRIPT_FILE)
sys.exit(1)
try:
os.chdir(Context.run_dir)
except OSError:
Logs.error('Waf: The folder %r is unreadable'%Context.run_dir)
sys.exit(1)
try:
set_main_module(os.path.normpath(os.path.join(Context.run_dir,Context.WSCRIPT_FILE)))
except Errors.WafError ,e:
Logs.pprint('RED',e.verbose_msg)
Logs.error(str(e))
sys.exit(1)
except Exception ,e:
Logs.error('Waf: The wscript in %r is unreadable'%Context.run_dir,e)
traceback.print_exc(file=sys.stdout)
sys.exit(2)
try:
run_commands()
except Errors.WafError ,e:
if Logs.verbose>1:
Logs.pprint('RED',e.verbose_msg)
Logs.error(e.msg)
sys.exit(1)
except SystemExit:
raise
except Exception ,e:
traceback.print_exc(file=sys.stdout)
sys.exit(2)
except KeyboardInterrupt:
Logs.pprint('RED','Interrupted')
sys.exit(68)
def set_main_module(file_path):
Context.g_module=Context.load_module(file_path)
Context.g_module.root_path=file_path
def set_def(obj):
name=obj.__name__
if not name in Context.g_module.__dict__:
setattr(Context.g_module,name,obj)
for k in(update,dist,distclean,distcheck):
set_def(k)
if not'init'in Context.g_module.__dict__:
Context.g_module.init=Utils.nada
if not'shutdown'in Context.g_module.__dict__:
Context.g_module.shutdown=Utils.nada
if not'options'in Context.g_module.__dict__:
Context.g_module.options=Utils.nada
def parse_options():
Context.create_context('options').execute()
for var in Options.envvars:
(name,value)=var.split('=',1)
os.environ[name.strip()]=value
if not Options.commands:
Options.commands=[default_cmd]
Options.commands=[x for x in Options.commands if x!='options']
Logs.verbose=Options.options.verbose
if Options.options.zones:
Logs.zones=Options.options.zones.split(',')
if not Logs.verbose:
Logs.verbose=1
elif Logs.verbose>0:
Logs.zones=['runner']
if Logs.verbose>2:
Logs.zones=['*']
def run_command(cmd_name):
ctx=Context.create_context(cmd_name)
ctx.log_timer=Utils.Timer()
ctx.options=Options.options
ctx.cmd=cmd_name
try:
ctx.execute()
finally:
ctx.finalize()
return ctx
def run_commands():
parse_options()
run_command('init')
while Options.commands:
cmd_name=Options.commands.pop(0)
ctx=run_command(cmd_name)
Logs.info('%r finished successfully (%s)'%(cmd_name,str(ctx.log_timer)))
run_command('shutdown')
def _can_distclean(name):
for k in'.o .moc .exe'.split():
if name.endswith(k):
return True
return False
def distclean_dir(dirname):
for(root,dirs,files)in os.walk(dirname):
for f in files:
if _can_distclean(f):
fname=os.path.join(root,f)
try:
os.remove(fname)
except OSError:
Logs.warn('Could not remove %r'%fname)
for x in(Context.DBFILE,'config.log'):
try:
os.remove(x)
except OSError:
pass
try:
shutil.rmtree('c4che')
except OSError:
pass
def distclean(ctx):
'''removes the build directory'''
lst=os.listdir('.')
for f in lst:
if f==Options.lockfile:
try:
proj=ConfigSet.ConfigSet(f)
except IOError:
Logs.warn('Could not read %r'%f)
continue
if proj['out_dir']!=proj['top_dir']:
try:
shutil.rmtree(proj['out_dir'])
except IOError:
pass
except OSError ,e:
if e.errno!=errno.ENOENT:
Logs.warn('Could not remove %r'%proj['out_dir'])
else:
distclean_dir(proj['out_dir'])
for k in(proj['out_dir'],proj['top_dir'],proj['run_dir']):
p=os.path.join(k,Options.lockfile)
try:
os.remove(p)
except OSError ,e:
if e.errno!=errno.ENOENT:
Logs.warn('Could not remove %r'%p)
if not Options.commands:
for x in'.waf-1. waf-1. .waf3-1. waf3-1.'.split():
if f.startswith(x):
shutil.rmtree(f,ignore_errors=True)
class Dist(Context.Context):
'''creates an archive containing the project source code'''
cmd='dist'
fun='dist'
algo='tar.bz2'
ext_algo={}
def execute(self):
self.recurse([os.path.dirname(Context.g_module.root_path)])
self.archive()
def archive(self):
import tarfile
arch_name=self.get_arch_name()
try:
self.base_path
except AttributeError:
self.base_path=self.path
node=self.base_path.make_node(arch_name)
try:
node.delete()
except OSError:
pass
files=self.get_files()
if self.algo.startswith('tar.'):
tar=tarfile.open(arch_name,'w:'+self.algo.replace('tar.',''))
for x in files:
self.add_tar_file(x,tar)
tar.close()
elif self.algo=='zip':
import zipfile
zip=zipfile.ZipFile(arch_name,'w',compression=zipfile.ZIP_DEFLATED)
for x in files:
archive_name=self.get_base_name()+'/'+x.path_from(self.base_path)
zip.write(x.abspath(),archive_name,zipfile.ZIP_DEFLATED)
zip.close()
else:
self.fatal('Valid algo types are tar.bz2, tar.gz, tar.xz or zip')
try:
from hashlib import sha1 as sha
except ImportError:
from sha import sha
try:
digest=" (sha=%r)"%sha(node.read()).hexdigest()
except Exception:
digest=''
Logs.info('New archive created: %s%s'%(self.arch_name,digest))
def get_tar_path(self,node):
return node.abspath()
def add_tar_file(self,x,tar):
p=self.get_tar_path(x)
tinfo=tar.gettarinfo(name=p,arcname=self.get_tar_prefix()+'/'+x.path_from(self.base_path))
tinfo.uid=0
tinfo.gid=0
tinfo.uname='root'
tinfo.gname='root'
fu=None
try:
fu=open(p,'rb')
tar.addfile(tinfo,fileobj=fu)
finally:
if fu:
fu.close()
def get_tar_prefix(self):
try:
return self.tar_prefix
except AttributeError:
return self.get_base_name()
def get_arch_name(self):
try:
self.arch_name
except AttributeError:
self.arch_name=self.get_base_name()+'.'+self.ext_algo.get(self.algo,self.algo)
return self.arch_name
def get_base_name(self):
try:
self.base_name
except AttributeError:
appname=getattr(Context.g_module,Context.APPNAME,'noname')
version=getattr(Context.g_module,Context.VERSION,'1.0')
self.base_name=appname+'-'+version
return self.base_name
def get_excl(self):
try:
return self.excl
except AttributeError:
self.excl=Node.exclude_regs+' **/waf-1.8.* **/.waf-1.8* **/waf3-1.8.* **/.waf3-1.8* **/*~ **/*.rej **/*.orig **/*.pyc **/*.pyo **/*.bak **/*.swp **/.lock-w*'
if Context.out_dir:
nd=self.root.find_node(Context.out_dir)
if nd:
self.excl+=' '+nd.path_from(self.base_path)
return self.excl
def get_files(self):
try:
files=self.files
except AttributeError:
files=self.base_path.ant_glob('**/*',excl=self.get_excl())
return files
def dist(ctx):
'''makes a tarball for redistributing the sources'''
pass
class DistCheck(Dist):
fun='distcheck'
cmd='distcheck'
def execute(self):
self.recurse([os.path.dirname(Context.g_module.root_path)])
self.archive()
self.check()
def check(self):
import tempfile,tarfile
t=None
try:
t=tarfile.open(self.get_arch_name())
for x in t:
t.extract(x)
finally:
if t:
t.close()
cfg=[]
if Options.options.distcheck_args:
cfg=shlex.split(Options.options.distcheck_args)
else:
cfg=[x for x in sys.argv if x.startswith('-')]
instdir=tempfile.mkdtemp('.inst',self.get_base_name())
ret=Utils.subprocess.Popen([sys.executable,sys.argv[0],'configure','install','uninstall','--destdir='+instdir]+cfg,cwd=self.get_base_name()).wait()
if ret:
raise Errors.WafError('distcheck failed with code %i'%ret)
if os.path.exists(instdir):
raise Errors.WafError('distcheck succeeded, but files were left in %s'%instdir)
shutil.rmtree(self.get_base_name())
def distcheck(ctx):
'''checks if the project compiles (tarball from 'dist')'''
pass
def update(ctx):
lst=Options.options.files
if lst:
lst=lst.split(',')
else:
path=os.path.join(Context.waf_dir,'waflib','extras')
lst=[x for x in Utils.listdir(path)if x.endswith('.py')]
for x in lst:
tool=x.replace('.py','')
if not tool:
continue
try:
dl=Configure.download_tool
except AttributeError:
ctx.fatal('The command "update" is dangerous; include the tool "use_config" in your project!')
try:
dl(tool,force=True,ctx=ctx)
except Errors.WafError:
Logs.error('Could not find the tool %r in the remote repository'%x)
else:
Logs.warn('Updated %r'%tool)
def autoconfigure(execute_method):
def execute(self):
if not Configure.autoconfig:
return execute_method(self)
env=ConfigSet.ConfigSet()
do_config=False
try:
env.load(os.path.join(Context.top_dir,Options.lockfile))
except Exception:
Logs.warn('Configuring the project')
do_config=True
else:
if env.run_dir!=Context.run_dir:
do_config=True
else:
h=0
for f in env['files']:
h=Utils.h_list((h,Utils.readf(f,'rb')))
do_config=h!=env.hash
if do_config:
Options.commands.insert(0,self.cmd)
Options.commands.insert(0,'configure')
if Configure.autoconfig=='clobber':
Options.options.__dict__=env.options
return
return execute_method(self)
return execute
Build.BuildContext.execute=autoconfigure(Build.BuildContext.execute)
| gpl-2.0 |
SaikWolf/gnuradio | gr-digital/python/digital/ofdm_receiver.py | 57 | 7980 | #!/usr/bin/env python
#
# Copyright 2006-2008 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
import math
from numpy import fft
from gnuradio import fft as gr_fft
from gnuradio import gr
from gnuradio import analog
from gnuradio import blocks
from gnuradio import filter
import digital_swig as digital
from ofdm_sync_pn import ofdm_sync_pn
from ofdm_sync_fixed import ofdm_sync_fixed
from ofdm_sync_pnac import ofdm_sync_pnac
from ofdm_sync_ml import ofdm_sync_ml
try:
from gnuradio import filter
except ImportError:
import filter_swig as filter
class ofdm_receiver(gr.hier_block2):
"""
Performs receiver synchronization on OFDM symbols.
The receiver performs channel filtering as well as symbol, frequency, and phase synchronization.
The synchronization routines are available in three flavors: preamble correlator (Schmidl and Cox),
modifid preamble correlator with autocorrelation (not yet working), and cyclic prefix correlator
(Van de Beeks).
"""
def __init__(self, fft_length, cp_length, occupied_tones, snr, ks, logging=False):
"""
Hierarchical block for receiving OFDM symbols.
The input is the complex modulated signal at baseband.
Synchronized packets are sent back to the demodulator.
Args:
fft_length: total number of subcarriers (int)
cp_length: length of cyclic prefix as specified in subcarriers (<= fft_length) (int)
occupied_tones: number of subcarriers used for data (int)
snr: estimated signal to noise ratio used to guide cyclic prefix synchronizer (float)
ks: known symbols used as preambles to each packet (list of lists)
logging: turn file logging on or off (bool)
"""
gr.hier_block2.__init__(self, "ofdm_receiver",
gr.io_signature(1, 1, gr.sizeof_gr_complex), # Input signature
gr.io_signature2(2, 2, gr.sizeof_gr_complex*occupied_tones, gr.sizeof_char)) # Output signature
bw = (float(occupied_tones) / float(fft_length)) / 2.0
tb = bw*0.08
chan_coeffs = filter.firdes.low_pass (1.0, # gain
1.0, # sampling rate
bw+tb, # midpoint of trans. band
tb, # width of trans. band
filter.firdes.WIN_HAMMING) # filter type
self.chan_filt = filter.fft_filter_ccc(1, chan_coeffs)
win = [1 for i in range(fft_length)]
zeros_on_left = int(math.ceil((fft_length - occupied_tones)/2.0))
ks0 = fft_length*[0,]
ks0[zeros_on_left : zeros_on_left + occupied_tones] = ks[0]
ks0 = fft.ifftshift(ks0)
ks0time = fft.ifft(ks0)
# ADD SCALING FACTOR
ks0time = ks0time.tolist()
SYNC = "pn"
if SYNC == "ml":
nco_sensitivity = -1.0/fft_length # correct for fine frequency
self.ofdm_sync = ofdm_sync_ml(fft_length,
cp_length,
snr,
ks0time,
logging)
elif SYNC == "pn":
nco_sensitivity = -2.0/fft_length # correct for fine frequency
self.ofdm_sync = ofdm_sync_pn(fft_length,
cp_length,
logging)
elif SYNC == "pnac":
nco_sensitivity = -2.0/fft_length # correct for fine frequency
self.ofdm_sync = ofdm_sync_pnac(fft_length,
cp_length,
ks0time,
logging)
# for testing only; do not user over the air
# remove filter and filter delay for this
elif SYNC == "fixed":
self.chan_filt = blocks.multiply_const_cc(1.0)
nsymbols = 18 # enter the number of symbols per packet
freq_offset = 0.0 # if you use a frequency offset, enter it here
nco_sensitivity = -2.0/fft_length # correct for fine frequency
self.ofdm_sync = ofdm_sync_fixed(fft_length,
cp_length,
nsymbols,
freq_offset,
logging)
# Set up blocks
self.nco = analog.frequency_modulator_fc(nco_sensitivity) # generate a signal proportional to frequency error of sync block
self.sigmix = blocks.multiply_cc()
self.sampler = digital.ofdm_sampler(fft_length, fft_length+cp_length)
self.fft_demod = gr_fft.fft_vcc(fft_length, True, win, True)
self.ofdm_frame_acq = digital.ofdm_frame_acquisition(occupied_tones,
fft_length,
cp_length, ks[0])
self.connect(self, self.chan_filt) # filter the input channel
self.connect(self.chan_filt, self.ofdm_sync) # into the synchronization alg.
self.connect((self.ofdm_sync,0), self.nco, (self.sigmix,1)) # use sync freq. offset output to derotate input signal
self.connect(self.chan_filt, (self.sigmix,0)) # signal to be derotated
self.connect(self.sigmix, (self.sampler,0)) # sample off timing signal detected in sync alg
self.connect((self.ofdm_sync,1), (self.sampler,1)) # timing signal to sample at
self.connect((self.sampler,0), self.fft_demod) # send derotated sampled signal to FFT
self.connect(self.fft_demod, (self.ofdm_frame_acq,0)) # find frame start and equalize signal
self.connect((self.sampler,1), (self.ofdm_frame_acq,1)) # send timing signal to signal frame start
self.connect((self.ofdm_frame_acq,0), (self,0)) # finished with fine/coarse freq correction,
self.connect((self.ofdm_frame_acq,1), (self,1)) # frame and symbol timing, and equalization
if logging:
self.connect(self.chan_filt, blocks.file_sink(gr.sizeof_gr_complex, "ofdm_receiver-chan_filt_c.dat"))
self.connect(self.fft_demod, blocks.file_sink(gr.sizeof_gr_complex*fft_length, "ofdm_receiver-fft_out_c.dat"))
self.connect(self.ofdm_frame_acq,
blocks.file_sink(gr.sizeof_gr_complex*occupied_tones, "ofdm_receiver-frame_acq_c.dat"))
self.connect((self.ofdm_frame_acq,1), blocks.file_sink(1, "ofdm_receiver-found_corr_b.dat"))
self.connect(self.sampler, blocks.file_sink(gr.sizeof_gr_complex*fft_length, "ofdm_receiver-sampler_c.dat"))
self.connect(self.sigmix, blocks.file_sink(gr.sizeof_gr_complex, "ofdm_receiver-sigmix_c.dat"))
self.connect(self.nco, blocks.file_sink(gr.sizeof_gr_complex, "ofdm_receiver-nco_c.dat"))
| gpl-3.0 |
seanli9jan/tensorflow | tensorflow/contrib/boosted_trees/examples/boston.py | 16 | 6242 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Demonstrates a regression on Boston housing data.
This example demonstrates how to run experiments with TF Boosted Trees on
a regression dataset. We split all the data into 20% test and 80% train,
and are using l2 loss and l2 regularization.
Example Usage:
python tensorflow/contrib/boosted_trees/examples/boston.py \
--batch_size=404 --output_dir="/tmp/boston" --depth=4 --learning_rate=0.1 \
--num_eval_steps=1 --num_trees=500 --l2=0.001 \
--vmodule=training_ops=1
When training is done, mean squared error on eval data is reported.
Point tensorboard to the directory for the run to see how the training
progresses:
tensorboard --logdir=/tmp/boston
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import sys
import tensorflow as tf
from tensorflow.contrib.boosted_trees.estimator_batch import custom_export_strategy
from tensorflow.contrib.boosted_trees.estimator_batch.estimator import GradientBoostedDecisionTreeRegressor
from tensorflow.contrib.boosted_trees.proto import learner_pb2
from tensorflow.contrib.layers.python.layers import feature_column
from tensorflow.contrib.learn import learn_runner
from tensorflow.python.util import compat
_BOSTON_NUM_FEATURES = 13
# Main config - creates a TF Boosted Trees Estimator based on flags.
def _get_tfbt(output_dir, feature_cols):
"""Configures TF Boosted Trees estimator based on flags."""
learner_config = learner_pb2.LearnerConfig()
learner_config.learning_rate_tuner.fixed.learning_rate = FLAGS.learning_rate
learner_config.regularization.l1 = 0.0
learner_config.regularization.l2 = FLAGS.l2
learner_config.constraints.max_tree_depth = FLAGS.depth
run_config = tf.contrib.learn.RunConfig(save_checkpoints_secs=300)
# Create a TF Boosted trees regression estimator.
estimator = GradientBoostedDecisionTreeRegressor(
learner_config=learner_config,
# This should be the number of examples. For large datasets it can be
# larger than the batch_size.
examples_per_layer=FLAGS.batch_size,
feature_columns=feature_cols,
label_dimension=1,
model_dir=output_dir,
num_trees=FLAGS.num_trees,
center_bias=False,
config=run_config)
return estimator
def _convert_fn(dtec, sorted_feature_names, num_dense, num_sparse_float,
num_sparse_int, export_dir, unused_eval_result):
universal_format = custom_export_strategy.convert_to_universal_format(
dtec, sorted_feature_names, num_dense, num_sparse_float, num_sparse_int)
with tf.gfile.GFile(os.path.join(
compat.as_bytes(export_dir), compat.as_bytes("tree_proto")), "w") as f:
f.write(str(universal_format))
def _make_experiment_fn(output_dir):
"""Creates experiment for gradient boosted decision trees."""
(x_train, y_train), (x_test,
y_test) = tf.keras.datasets.boston_housing.load_data()
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": x_train},
y=y_train,
batch_size=FLAGS.batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": x_test}, y=y_test, num_epochs=1, shuffle=False)
feature_columns = [
feature_column.real_valued_column("x", dimension=_BOSTON_NUM_FEATURES)
]
feature_spec = tf.contrib.layers.create_feature_spec_for_parsing(
feature_columns)
serving_input_fn = tf.contrib.learn.utils.build_parsing_serving_input_fn(
feature_spec)
# An export strategy that outputs the feature importance and also exports
# the internal tree representation in another format.
export_strategy = custom_export_strategy.make_custom_export_strategy(
"exports",
convert_fn=_convert_fn,
feature_columns=feature_columns,
export_input_fn=serving_input_fn)
return tf.contrib.learn.Experiment(
estimator=_get_tfbt(output_dir, feature_columns),
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
train_steps=None,
eval_steps=FLAGS.num_eval_steps,
eval_metrics=None,
export_strategies=[export_strategy])
def main(unused_argv):
learn_runner.run(
experiment_fn=_make_experiment_fn,
output_dir=FLAGS.output_dir,
schedule="train_and_evaluate")
if __name__ == "__main__":
tf.logging.set_verbosity(tf.logging.INFO)
parser = argparse.ArgumentParser()
# Define the list of flags that users can change.
parser.add_argument(
"--batch_size",
type=int,
default=1000,
help="The batch size for reading data.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Choose the dir for the output.")
parser.add_argument(
"--num_eval_steps",
type=int,
default=1,
help="The number of steps to run evaluation for.")
# Flags for gradient boosted trees config.
parser.add_argument(
"--depth", type=int, default=4, help="Maximum depth of weak learners.")
parser.add_argument(
"--l2", type=float, default=1.0, help="l2 regularization per batch.")
parser.add_argument(
"--learning_rate",
type=float,
default=0.1,
help="Learning rate (shrinkage weight) with which each new tree is added."
)
parser.add_argument(
"--num_trees",
type=int,
default=None,
required=True,
help="Number of trees to grow before stopping.")
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
onlysheep5200/NetnsEx | hostend/main.py | 1 | 1970 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import sys
sys.path.append('..')
from tornado.web import Application
import tornado.ioloop
import tornado.options
import tornado.httpserver
import tornado.autoreload
from tornado.options import define, options
from hostend.controller import *
from lib.utils import Host
from proxy import *
import docker
import json
from handlers import *
from concurrent.futures import ThreadPoolExecutor
config = json.load(open('config.json','r'))
controller = Controller()
controller.reportUrl = config.get('reportUrl')
controller.requestUrl = config.get('requestUrl')
host = Host.currentHost('',switchInterface=config['switchName'],transportInterface=config['transportInterface'])
data = controller.request('getHostId',[host.mac,host.transportIP,host.transportIP])
if data['state'] == 'success':
host.uuid = data['data']['id']
print host
print 'my host id is : %s'%host.uuid
define("port", default=8000, help="run on the given port", type=int)
class Application(tornado.web.Application):
def __init__(self):
handlers = [
(r'/createContainer',CreateContainerHandler),
(r'/bootSelf',BootSelfHandler)
]
settings = {
'template_path': 'templates',
'debug': True,
'cookie_secret' : "dfaew989q2kejczo8u8923e400ialsjdf",
'static_path': 'static'
}
self.host = host
self.controller = controller
self.containerProxy = DockerProxy(docker.Client('unix://var/run/docker.sock'),self.host,self.controller)
self.executionPool = ThreadPoolExecutor(max_workers=1)
tornado.web.Application.__init__(self, handlers, **settings)
if __name__ == "__main__":
tornado.options.parse_command_line()
app = Application()
http_server = tornado.httpserver.HTTPServer(app)
http_server.listen(options.port)
tornado.ioloop.IOLoop.instance().start()
| apache-2.0 |
marc-sensenich/ansible | lib/ansible/modules/network/fortios/fortios_antivirus_heuristic.py | 2 | 6869 | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2018 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# the lib use python logging can get it if the following is set in your
# Ansible config.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_antivirus_heuristic
short_description: Configure global heuristic options in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS by
allowing the user to configure antivirus feature and heuristic category.
Examples includes all options and need to be adjusted to datasources before usage.
Tested with FOS v6.0.2
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate ip adress.
required: true
username:
description:
- FortiOS or FortiGate username.
required: true
password:
description:
- FortiOS or FortiGate password.
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS
protocol
type: bool
default: false
antivirus_heuristic:
description:
- Configure global heuristic options.
default: null
suboptions:
mode:
description:
- Enable/disable heuristics and determine how the system behaves if heuristics detects a problem.
choices:
- pass
- block
- disable
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
tasks:
- name: Configure global heuristic options.
fortios_antivirus_heuristic:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
antivirus_heuristic:
mode: "pass"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
fos = None
def login(data):
host = data['host']
username = data['username']
password = data['password']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password)
def filter_antivirus_heuristic_data(json):
option_list = ['mode']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def antivirus_heuristic(data, fos):
vdom = data['vdom']
antivirus_heuristic_data = data['antivirus_heuristic']
filtered_data = filter_antivirus_heuristic_data(antivirus_heuristic_data)
return fos.set('antivirus',
'heuristic',
data=filtered_data,
vdom=vdom)
def fortios_antivirus(data, fos):
login(data)
methodlist = ['antivirus_heuristic']
for method in methodlist:
if data[method]:
resp = eval(method)(data, fos)
break
fos.logout()
return not resp['status'] == "success", resp['status'] == "success", resp
def main():
fields = {
"host": {"required": True, "type": "str"},
"username": {"required": True, "type": "str"},
"password": {"required": False, "type": "str", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": "False"},
"antivirus_heuristic": {
"required": False, "type": "dict",
"options": {
"mode": {"required": False, "type": "str",
"choices": ["pass", "block", "disable"]}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
global fos
fos = FortiOSAPI()
is_error, has_changed, result = fortios_antivirus(module.params, fos)
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| gpl-3.0 |
SantosDevelopers/sborganicos | venv/lib/python3.5/site-packages/wheel/archive.py | 93 | 2247 | """
Archive tools for wheel.
"""
import os
import time
import logging
import os.path
import zipfile
log = logging.getLogger("wheel")
def archive_wheelfile(base_name, base_dir):
'''Archive all files under `base_dir` in a whl file and name it like
`base_name`.
'''
olddir = os.path.abspath(os.curdir)
base_name = os.path.abspath(base_name)
try:
os.chdir(base_dir)
return make_wheelfile_inner(base_name)
finally:
os.chdir(olddir)
def make_wheelfile_inner(base_name, base_dir='.'):
"""Create a whl file from all the files under 'base_dir'.
Places .dist-info at the end of the archive."""
zip_filename = base_name + ".whl"
log.info("creating '%s' and adding '%s' to it", zip_filename, base_dir)
# Some applications need reproducible .whl files, but they can't do this
# without forcing the timestamp of the individual ZipInfo objects. See
# issue #143.
timestamp = os.environ.get('SOURCE_DATE_EPOCH')
if timestamp is None:
date_time = None
else:
date_time = time.gmtime(int(timestamp))[0:6]
# XXX support bz2, xz when available
zip = zipfile.ZipFile(zip_filename, "w", compression=zipfile.ZIP_DEFLATED)
score = {'WHEEL': 1, 'METADATA': 2, 'RECORD': 3}
deferred = []
def writefile(path, date_time):
st = os.stat(path)
if date_time is None:
mtime = time.gmtime(st.st_mtime)
date_time = mtime[0:6]
zinfo = zipfile.ZipInfo(path, date_time)
zinfo.external_attr = st.st_mode << 16
zinfo.compress_type = zipfile.ZIP_DEFLATED
with open(path, 'rb') as fp:
zip.writestr(zinfo, fp.read())
log.info("adding '%s'" % path)
for dirpath, dirnames, filenames in os.walk(base_dir):
for name in filenames:
path = os.path.normpath(os.path.join(dirpath, name))
if os.path.isfile(path):
if dirpath.endswith('.dist-info'):
deferred.append((score.get(name, 0), path))
else:
writefile(path, date_time)
deferred.sort()
for score, path in deferred:
writefile(path, date_time)
zip.close()
return zip_filename
| mit |
oscaro/django | tests/validators/tests.py | 1 | 14960 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from datetime import datetime, timedelta
import re
import types
from unittest import TestCase
from django.core.exceptions import ValidationError
from django.core.validators import (
BaseValidator, EmailValidator, MaxLengthValidator, MaxValueValidator,
MinLengthValidator, MinValueValidator, RegexValidator, URLValidator,
validate_comma_separated_integer_list, validate_email, validate_integer,
validate_ipv46_address, validate_ipv4_address, validate_ipv6_address,
validate_slug,
)
from django.test.utils import str_prefix
NOW = datetime.now()
EXTENDED_SCHEMES = ['http', 'https', 'ftp', 'ftps', 'git', 'file']
TEST_DATA = (
# (validator, value, expected),
(validate_integer, '42', None),
(validate_integer, '-42', None),
(validate_integer, -42, None),
(validate_integer, -42.5, ValidationError),
(validate_integer, None, ValidationError),
(validate_integer, 'a', ValidationError),
(validate_integer, '\n42', ValidationError),
(validate_integer, '42\n', ValidationError),
(validate_email, 'email@here.com', None),
(validate_email, 'weirder-email@here.and.there.com', None),
(validate_email, 'email@[127.0.0.1]', None),
(validate_email, 'email@[2001:dB8::1]', None),
(validate_email, 'email@[2001:dB8:0:0:0:0:0:1]', None),
(validate_email, 'email@[::fffF:127.0.0.1]', None),
(validate_email, 'example@valid-----hyphens.com', None),
(validate_email, 'example@valid-with-hyphens.com', None),
(validate_email, 'test@domain.with.idn.tld.उदाहरण.परीक्षा', None),
(validate_email, 'email@localhost', None),
(EmailValidator(whitelist=['localdomain']), 'email@localdomain', None),
(validate_email, '"test@test"@example.com', None),
(validate_email, None, ValidationError),
(validate_email, '', ValidationError),
(validate_email, 'abc', ValidationError),
(validate_email, 'abc@', ValidationError),
(validate_email, 'abc@bar', ValidationError),
(validate_email, 'a @x.cz', ValidationError),
(validate_email, 'abc@.com', ValidationError),
(validate_email, 'something@@somewhere.com', ValidationError),
(validate_email, 'email@127.0.0.1', ValidationError),
(validate_email, 'email@[127.0.0.256]', ValidationError),
(validate_email, 'email@[2001:db8::12345]', ValidationError),
(validate_email, 'email@[2001:db8:0:0:0:0:1]', ValidationError),
(validate_email, 'email@[::ffff:127.0.0.256]', ValidationError),
(validate_email, 'example@invalid-.com', ValidationError),
(validate_email, 'example@-invalid.com', ValidationError),
(validate_email, 'example@invalid.com-', ValidationError),
(validate_email, 'example@inv-.alid-.com', ValidationError),
(validate_email, 'example@inv-.-alid.com', ValidationError),
(validate_email, 'test@example.com\n\n<script src="x.js">', ValidationError),
# Quoted-string format (CR not allowed)
(validate_email, '"\\\011"@here.com', None),
(validate_email, '"\\\012"@here.com', ValidationError),
(validate_email, 'trailingdot@shouldfail.com.', ValidationError),
# Trailing newlines in username or domain not allowed
(validate_email, 'a@b.com\n', ValidationError),
(validate_email, 'a\n@b.com', ValidationError),
(validate_email, '"test@test"\n@example.com', ValidationError),
(validate_email, 'a@[127.0.0.1]\n', ValidationError),
(validate_slug, 'slug-ok', None),
(validate_slug, 'longer-slug-still-ok', None),
(validate_slug, '--------', None),
(validate_slug, 'nohyphensoranything', None),
(validate_slug, '', ValidationError),
(validate_slug, ' text ', ValidationError),
(validate_slug, ' ', ValidationError),
(validate_slug, 'some@mail.com', ValidationError),
(validate_slug, '你好', ValidationError),
(validate_slug, '\n', ValidationError),
(validate_slug, 'trailing-newline\n', ValidationError),
(validate_ipv4_address, '1.1.1.1', None),
(validate_ipv4_address, '255.0.0.0', None),
(validate_ipv4_address, '0.0.0.0', None),
(validate_ipv4_address, '256.1.1.1', ValidationError),
(validate_ipv4_address, '25.1.1.', ValidationError),
(validate_ipv4_address, '25,1,1,1', ValidationError),
(validate_ipv4_address, '25.1 .1.1', ValidationError),
(validate_ipv4_address, '1.1.1.1\n', ValidationError),
# validate_ipv6_address uses django.utils.ipv6, which
# is tested in much greater detail in its own testcase
(validate_ipv6_address, 'fe80::1', None),
(validate_ipv6_address, '::1', None),
(validate_ipv6_address, '1:2:3:4:5:6:7:8', None),
(validate_ipv6_address, '1:2', ValidationError),
(validate_ipv6_address, '::zzz', ValidationError),
(validate_ipv6_address, '12345::', ValidationError),
(validate_ipv46_address, '1.1.1.1', None),
(validate_ipv46_address, '255.0.0.0', None),
(validate_ipv46_address, '0.0.0.0', None),
(validate_ipv46_address, 'fe80::1', None),
(validate_ipv46_address, '::1', None),
(validate_ipv46_address, '1:2:3:4:5:6:7:8', None),
(validate_ipv46_address, '256.1.1.1', ValidationError),
(validate_ipv46_address, '25.1.1.', ValidationError),
(validate_ipv46_address, '25,1,1,1', ValidationError),
(validate_ipv46_address, '25.1 .1.1', ValidationError),
(validate_ipv46_address, '1:2', ValidationError),
(validate_ipv46_address, '::zzz', ValidationError),
(validate_ipv46_address, '12345::', ValidationError),
(validate_comma_separated_integer_list, '1', None),
(validate_comma_separated_integer_list, '1,2,3', None),
(validate_comma_separated_integer_list, '1,2,3,', None),
(validate_comma_separated_integer_list, '', ValidationError),
(validate_comma_separated_integer_list, 'a,b,c', ValidationError),
(validate_comma_separated_integer_list, '1, 2, 3', ValidationError),
(validate_comma_separated_integer_list, '1,2,3\n', ValidationError),
(MaxValueValidator(10), 10, None),
(MaxValueValidator(10), -10, None),
(MaxValueValidator(10), 0, None),
(MaxValueValidator(NOW), NOW, None),
(MaxValueValidator(NOW), NOW - timedelta(days=1), None),
(MaxValueValidator(0), 1, ValidationError),
(MaxValueValidator(NOW), NOW + timedelta(days=1), ValidationError),
(MinValueValidator(-10), -10, None),
(MinValueValidator(-10), 10, None),
(MinValueValidator(-10), 0, None),
(MinValueValidator(NOW), NOW, None),
(MinValueValidator(NOW), NOW + timedelta(days=1), None),
(MinValueValidator(0), -1, ValidationError),
(MinValueValidator(NOW), NOW - timedelta(days=1), ValidationError),
(MaxLengthValidator(10), '', None),
(MaxLengthValidator(10), 10 * 'x', None),
(MaxLengthValidator(10), 15 * 'x', ValidationError),
(MinLengthValidator(10), 15 * 'x', None),
(MinLengthValidator(10), 10 * 'x', None),
(MinLengthValidator(10), '', ValidationError),
(URLValidator(), 'http://www.djangoproject.com/', None),
(URLValidator(), 'HTTP://WWW.DJANGOPROJECT.COM/', None),
(URLValidator(), 'http://localhost/', None),
(URLValidator(), 'http://example.com/', None),
(URLValidator(), 'http://www.example.com/', None),
(URLValidator(), 'http://www.example.com:8000/test', None),
(URLValidator(), 'http://valid-with-hyphens.com/', None),
(URLValidator(), 'http://subdomain.example.com/', None),
(URLValidator(), 'http://200.8.9.10/', None),
(URLValidator(), 'http://200.8.9.10:8000/test', None),
(URLValidator(), 'http://valid-----hyphens.com/', None),
(URLValidator(), 'http://example.com?something=value', None),
(URLValidator(), 'http://example.com/index.php?something=value&another=value2', None),
(URLValidator(), 'https://example.com/', None),
(URLValidator(), 'ftp://example.com/', None),
(URLValidator(), 'ftps://example.com/', None),
(URLValidator(EXTENDED_SCHEMES), 'file://localhost/path', None),
(URLValidator(EXTENDED_SCHEMES), 'git://example.com/', None),
(URLValidator(), 'foo', ValidationError),
(URLValidator(), 'http://', ValidationError),
(URLValidator(), 'http://example', ValidationError),
(URLValidator(), 'http://example.', ValidationError),
(URLValidator(), 'http://.com', ValidationError),
(URLValidator(), 'http://invalid-.com', ValidationError),
(URLValidator(), 'http://-invalid.com', ValidationError),
(URLValidator(), 'http://invalid.com-', ValidationError),
(URLValidator(), 'http://inv-.alid-.com', ValidationError),
(URLValidator(), 'http://inv-.-alid.com', ValidationError),
(URLValidator(), 'file://localhost/path', ValidationError),
(URLValidator(), 'git://example.com/', ValidationError),
(URLValidator(EXTENDED_SCHEMES), 'git://-invalid.com', ValidationError),
# Trailing newlines not accepted
(URLValidator(), 'http://www.djangoproject.com/\n', ValidationError),
(URLValidator(), 'http://[::ffff:192.9.5.5]\n', ValidationError),
(BaseValidator(True), True, None),
(BaseValidator(True), False, ValidationError),
(RegexValidator(), '', None),
(RegexValidator(), 'x1x2', None),
(RegexValidator('[0-9]+'), 'xxxxxx', ValidationError),
(RegexValidator('[0-9]+'), '1234', None),
(RegexValidator(re.compile('[0-9]+')), '1234', None),
(RegexValidator('.*'), '', None),
(RegexValidator(re.compile('.*')), '', None),
(RegexValidator('.*'), 'xxxxx', None),
(RegexValidator('x'), 'y', ValidationError),
(RegexValidator(re.compile('x')), 'y', ValidationError),
(RegexValidator('x', inverse_match=True), 'y', None),
(RegexValidator(re.compile('x'), inverse_match=True), 'y', None),
(RegexValidator('x', inverse_match=True), 'x', ValidationError),
(RegexValidator(re.compile('x'), inverse_match=True), 'x', ValidationError),
(RegexValidator('x', flags=re.IGNORECASE), 'y', ValidationError),
(RegexValidator('a'), 'A', ValidationError),
(RegexValidator('a', flags=re.IGNORECASE), 'A', None),
)
def create_simple_test_method(validator, expected, value, num):
if expected is not None and issubclass(expected, Exception):
test_mask = 'test_%s_raises_error_%d'
def test_func(self):
# assertRaises not used, so as to be able to produce an error message
# containing the tested value
try:
validator(value)
except expected:
pass
else:
self.fail("%s not raised when validating '%s'" % (
expected.__name__, value))
else:
test_mask = 'test_%s_%d'
def test_func(self):
try:
self.assertEqual(expected, validator(value))
except ValidationError as e:
self.fail("Validation of '%s' failed. Error message was: %s" % (
value, str(e)))
if isinstance(validator, types.FunctionType):
val_name = validator.__name__
else:
val_name = validator.__class__.__name__
test_name = test_mask % (val_name, num)
return test_name, test_func
# Dynamically assemble a test class with the contents of TEST_DATA
class TestSimpleValidators(TestCase):
def test_single_message(self):
v = ValidationError('Not Valid')
self.assertEqual(str(v), str_prefix("[%(_)s'Not Valid']"))
self.assertEqual(repr(v), str_prefix("ValidationError([%(_)s'Not Valid'])"))
def test_message_list(self):
v = ValidationError(['First Problem', 'Second Problem'])
self.assertEqual(str(v), str_prefix("[%(_)s'First Problem', %(_)s'Second Problem']"))
self.assertEqual(repr(v), str_prefix("ValidationError([%(_)s'First Problem', %(_)s'Second Problem'])"))
def test_message_dict(self):
v = ValidationError({'first': ['First Problem']})
self.assertEqual(str(v), str_prefix("{%(_)s'first': [%(_)s'First Problem']}"))
self.assertEqual(repr(v), str_prefix("ValidationError({%(_)s'first': [%(_)s'First Problem']})"))
def test_regex_validator_flags(self):
try:
RegexValidator(re.compile('a'), flags=re.IGNORECASE)
except TypeError:
pass
else:
self.fail("TypeError not raised when flags and pre-compiled regex in RegexValidator")
test_counter = 0
for validator, value, expected in TEST_DATA:
name, method = create_simple_test_method(validator, expected, value, test_counter)
setattr(TestSimpleValidators, name, method)
test_counter += 1
class TestValidatorEquality(TestCase):
"""
Tests that validators have valid equality operators (#21638)
"""
def test_regex_equality(self):
self.assertEqual(
RegexValidator(r'^(?:[a-z0-9\.\-]*)://'),
RegexValidator(r'^(?:[a-z0-9\.\-]*)://'),
)
self.assertNotEqual(
RegexValidator(r'^(?:[a-z0-9\.\-]*)://'),
RegexValidator(r'^(?:[0-9\.\-]*)://'),
)
self.assertEqual(
RegexValidator(r'^(?:[a-z0-9\.\-]*)://', "oh noes", "invalid"),
RegexValidator(r'^(?:[a-z0-9\.\-]*)://', "oh noes", "invalid"),
)
self.assertNotEqual(
RegexValidator(r'^(?:[a-z0-9\.\-]*)://', "oh", "invalid"),
RegexValidator(r'^(?:[a-z0-9\.\-]*)://', "oh noes", "invalid"),
)
self.assertNotEqual(
RegexValidator(r'^(?:[a-z0-9\.\-]*)://', "oh noes", "invalid"),
RegexValidator(r'^(?:[a-z0-9\.\-]*)://'),
)
self.assertNotEqual(
RegexValidator('', flags=re.IGNORECASE),
RegexValidator(''),
)
self.assertNotEqual(
RegexValidator(''),
RegexValidator('', inverse_match=True),
)
def test_regex_equality_nocache(self):
pattern = r'^(?:[a-z0-9\.\-]*)://'
left = RegexValidator(pattern)
re.purge()
right = RegexValidator(pattern)
self.assertEqual(
left,
right,
)
def test_regex_equality_blank(self):
self.assertEqual(
RegexValidator(),
RegexValidator(),
)
def test_email_equality(self):
self.assertEqual(
EmailValidator(),
EmailValidator(),
)
self.assertNotEqual(
EmailValidator(message="BAD EMAIL"),
EmailValidator(),
)
self.assertEqual(
EmailValidator(message="BAD EMAIL", code="bad"),
EmailValidator(message="BAD EMAIL", code="bad"),
)
def test_basic_equality(self):
self.assertEqual(
MaxValueValidator(44),
MaxValueValidator(44),
)
self.assertNotEqual(
MaxValueValidator(44),
MinValueValidator(44),
)
self.assertNotEqual(
MinValueValidator(45),
MinValueValidator(11),
)
| bsd-3-clause |
ODMM/openflow-dmm | amm/amm.py | 1 | 6619 | # Copyright (C) IMDEA Networks Institute and NETCOM research group, Department of
# Telematics Engineering, University Carlos III of Madrid.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# IMDEA Networks Institute and NETCOM research group, Department of
# Telematics Engineering, University Carlos III of Madrid, hereby disclaims all
# copyright interest in the program 'OpenFlow-DMM', released by the Open Platform
# for DMM solutions (ODMM), written by Luca Cominardi <odmm-support@odmm.net>.
#
# signature of IMDEA Networks Institute and NETCOM research group, Department of
# Telematics Engineering, University Carlos III of Madrid, 12 June 2015.
# Albert Banchs, Deputy director of IMDEA Networks Institute and Titular professor
# at University Carlos III of Madrid.
# Start import from iJOIN solution files
import log
from node import Anchor
from event import *
from nmm.event import EventTopologyUpdate
from teem.event import EventRoutingUpdate
from mme.event import EventUEConnected
from mme.event import EventUEDisconnected
# End import from iJOIN solution files
# Start import from Ryu files
import numpy
from ryu.base import app_manager
from ryu.controller.handler import set_ev_cls
from ryu.controller.handler import MAIN_DISPATCHER
from ryu.ofproto import ofproto_v1_3 as ofproto
# End import from Ryu files
class Amm(app_manager.RyuApp):
"""
================ =========================================================
Attribute Description
================ =========================================================
OFP_VERSIONS Declaration of supported OFP version
_EVENTS The list of events provided by the RyuApp
================ =========================================================
"""
OFP_VERSIONS = [ofproto.OFP_VERSION]
_EVENTS = [EventUEAnchorsUpdate]
def __init__(self, *args, **kwargs):
"""
================ =========================================================
Attribute Description
================ =========================================================
switches The dictionary storing the switches
gateways The dictionary storing the switches enabled with
gateway functionalities
================ =========================================================
"""
super(Amm, self).__init__(*args, **kwargs)
self.logger = log.get_logger(self.name)
self.ues = {}
self.switches = {}
self.gateways = {}
self.pgw = None
def _assign_ue_anchors(self, ue):
"""
This function selects the gateways for the UE.
The selection is based on the UE profile.
An UE can have multiple gateways at the same time but
only one gateway can be used as default gateway (the
others are deprecated gateways).
"""
a_dict = {}
prev_att = self.ues[ue.id].get_prev_attachment()
if prev_att:
for anch in prev_att.anchors.itervalues():
a_dict[anch.gw.switch.dp.id] = anch
a_dict[anch.gw.switch.dp.id].preferred_lft = 0
self.logger.info("Switch <" + str(hex(anch.gw.switch.dp.id)) + "> " + \
"is a deprecated gateway for UE <" + str(self.ues[ue.id].hw_addr) + ">")
# The default gw
anch = self._get_closest_ue_anchor(ue)
if anch:
a_dict[anch.gw.switch.dp.id] = anch
self.logger.info("Switch <" + str(hex(anch.gw.switch.dp.id)) + "> " + \
"selected as default gateway for UE <" + str(self.ues[ue.id].hw_addr) + ">")
# Assign the selected gateways to the UE
self.ues[ue.id].attachment.anchors = a_dict
ev = EventUEAnchorsUpdate(self.ues[ue.id])
self.send_event_to_observers(ev)
def _get_closest_ue_anchor(self, ue):
"""
Find the Local GW
"""
# Find the Local-GW
dists = {}
for gw_dpid in self.gateways.keys():
try:
dists[gw_dpid] = self.distance[self.ues[ue.id].attachment.switch.switch.dp.id][gw_dpid]
except KeyError:
pass
if dists:
index = min(dists, key=dists.get)
if index in self.switches:
return self._get_anchor_from_gw_ue(self.switches[min(dists, key=dists.get)], ue)
return self.pgw
def _get_anchor_from_gw_ue(self, gw, ue):
"""
Return an Anchor object.
"""
nw_prefix = (gw.gw_conf.nw_prefix[0],
gw.gw_conf.nw_prefix[1],
'{0:08x}'.format(ue.id)[-8:-4],
'{0:08x}'.format(ue.id)[-4:],
'0', '0', '0', '0')
return Anchor(gw, nw_prefix)
@set_ev_cls(EventUEConnected, MAIN_DISPATCHER)
def _handler_ue_connected(self, ev):
"""
Manage UE connection
"""
self.ues[ev.ue.id] = ev.ue
self._assign_ue_anchors(ev.ue)
@set_ev_cls(EventUEDisconnected, MAIN_DISPATCHER)
def _handler_ue_disconnected(self, ev):
"""
Manage UE disconnection
"""
try:
del self.ues[ev.ue.id]
except KeyError:
pass
@set_ev_cls(EventRoutingUpdate, MAIN_DISPATCHER)
def _handler_routing_update(self, ev):
"""
Handler for EventRoutingUpdate.
Update the routing stored locally and find the P-GW.
"""
self.previous = ev.previous
self.distance = ev.distance
# Find the P-GW
dists = {}
for gw_dpid in self.gateways.keys():
try:
ap_dist = [ap_dist for ap_dpid, ap_dist in self.distance[gw_dpid].iteritems() if ap_dpid in self.switches and self.switches[ap_dpid].is_ap]
if ap_dist and float('inf') not in ap_dist:
dists[gw_dpid] = numpy.median(ap_dist)/(1+numpy.var(ap_dist))
if dists:
index = max(dists, key=dists.get)
if index in self.switches:
self.pgw = self.switches[index]
except KeyError:
pass
# Update UE anchors, the topology changed, so select the best anchor
for ue in self.ues.itervalues():
self._assign_ue_anchors(ue)
@set_ev_cls(EventTopologyUpdate, MAIN_DISPATCHER)
def _handler_topology_update(self, ev):
"""
Handler for EventTopologyUpdate.
Update the network topology stored locally.
"""
self.switches = ev.switches
for sw_dpid, sw in ev.switches.iteritems():
if sw.is_gw and sw_dpid not in self.gateways:
self.gateways[sw_dpid] = sw
if not sw.is_gw and sw_dpid in self.gateways:
del self.gateways[sw_dpid]
| gpl-2.0 |
fpy171/django | django/templatetags/i18n.py | 219 | 19311 | from __future__ import unicode_literals
import sys
from django.conf import settings
from django.template import Library, Node, TemplateSyntaxError, Variable
from django.template.base import TOKEN_TEXT, TOKEN_VAR, render_value_in_context
from django.template.defaulttags import token_kwargs
from django.utils import six, translation
from django.utils.safestring import SafeData, mark_safe
register = Library()
class GetAvailableLanguagesNode(Node):
def __init__(self, variable):
self.variable = variable
def render(self, context):
context[self.variable] = [(k, translation.ugettext(v)) for k, v in settings.LANGUAGES]
return ''
class GetLanguageInfoNode(Node):
def __init__(self, lang_code, variable):
self.lang_code = lang_code
self.variable = variable
def render(self, context):
lang_code = self.lang_code.resolve(context)
context[self.variable] = translation.get_language_info(lang_code)
return ''
class GetLanguageInfoListNode(Node):
def __init__(self, languages, variable):
self.languages = languages
self.variable = variable
def get_language_info(self, language):
# ``language`` is either a language code string or a sequence
# with the language code as its first item
if len(language[0]) > 1:
return translation.get_language_info(language[0])
else:
return translation.get_language_info(str(language))
def render(self, context):
langs = self.languages.resolve(context)
context[self.variable] = [self.get_language_info(lang) for lang in langs]
return ''
class GetCurrentLanguageNode(Node):
def __init__(self, variable):
self.variable = variable
def render(self, context):
context[self.variable] = translation.get_language()
return ''
class GetCurrentLanguageBidiNode(Node):
def __init__(self, variable):
self.variable = variable
def render(self, context):
context[self.variable] = translation.get_language_bidi()
return ''
class TranslateNode(Node):
def __init__(self, filter_expression, noop, asvar=None,
message_context=None):
self.noop = noop
self.asvar = asvar
self.message_context = message_context
self.filter_expression = filter_expression
if isinstance(self.filter_expression.var, six.string_types):
self.filter_expression.var = Variable("'%s'" %
self.filter_expression.var)
def render(self, context):
self.filter_expression.var.translate = not self.noop
if self.message_context:
self.filter_expression.var.message_context = (
self.message_context.resolve(context))
output = self.filter_expression.resolve(context)
value = render_value_in_context(output, context)
# Restore percent signs. Percent signs in template text are doubled
# so they are not interpreted as string format flags.
is_safe = isinstance(value, SafeData)
value = value.replace('%%', '%')
value = mark_safe(value) if is_safe else value
if self.asvar:
context[self.asvar] = value
return ''
else:
return value
class BlockTranslateNode(Node):
def __init__(self, extra_context, singular, plural=None, countervar=None,
counter=None, message_context=None, trimmed=False, asvar=None):
self.extra_context = extra_context
self.singular = singular
self.plural = plural
self.countervar = countervar
self.counter = counter
self.message_context = message_context
self.trimmed = trimmed
self.asvar = asvar
def render_token_list(self, tokens):
result = []
vars = []
for token in tokens:
if token.token_type == TOKEN_TEXT:
result.append(token.contents.replace('%', '%%'))
elif token.token_type == TOKEN_VAR:
result.append('%%(%s)s' % token.contents)
vars.append(token.contents)
msg = ''.join(result)
if self.trimmed:
msg = translation.trim_whitespace(msg)
return msg, vars
def render(self, context, nested=False):
if self.message_context:
message_context = self.message_context.resolve(context)
else:
message_context = None
tmp_context = {}
for var, val in self.extra_context.items():
tmp_context[var] = val.resolve(context)
# Update() works like a push(), so corresponding context.pop() is at
# the end of function
context.update(tmp_context)
singular, vars = self.render_token_list(self.singular)
if self.plural and self.countervar and self.counter:
count = self.counter.resolve(context)
context[self.countervar] = count
plural, plural_vars = self.render_token_list(self.plural)
if message_context:
result = translation.npgettext(message_context, singular,
plural, count)
else:
result = translation.ungettext(singular, plural, count)
vars.extend(plural_vars)
else:
if message_context:
result = translation.pgettext(message_context, singular)
else:
result = translation.ugettext(singular)
default_value = context.template.engine.string_if_invalid
def render_value(key):
if key in context:
val = context[key]
else:
val = default_value % key if '%s' in default_value else default_value
return render_value_in_context(val, context)
data = {v: render_value(v) for v in vars}
context.pop()
try:
result = result % data
except (KeyError, ValueError):
if nested:
# Either string is malformed, or it's a bug
raise TemplateSyntaxError("'blocktrans' is unable to format "
"string returned by gettext: %r using %r" % (result, data))
with translation.override(None):
result = self.render(context, nested=True)
if self.asvar:
context[self.asvar] = result
return ''
else:
return result
class LanguageNode(Node):
def __init__(self, nodelist, language):
self.nodelist = nodelist
self.language = language
def render(self, context):
with translation.override(self.language.resolve(context)):
output = self.nodelist.render(context)
return output
@register.tag("get_available_languages")
def do_get_available_languages(parser, token):
"""
This will store a list of available languages
in the context.
Usage::
{% get_available_languages as languages %}
{% for language in languages %}
...
{% endfor %}
This will just pull the LANGUAGES setting from
your setting file (or the default settings) and
put it into the named variable.
"""
# token.split_contents() isn't useful here because this tag doesn't accept variable as arguments
args = token.contents.split()
if len(args) != 3 or args[1] != 'as':
raise TemplateSyntaxError("'get_available_languages' requires 'as variable' (got %r)" % args)
return GetAvailableLanguagesNode(args[2])
@register.tag("get_language_info")
def do_get_language_info(parser, token):
"""
This will store the language information dictionary for the given language
code in a context variable.
Usage::
{% get_language_info for LANGUAGE_CODE as l %}
{{ l.code }}
{{ l.name }}
{{ l.name_translated }}
{{ l.name_local }}
{{ l.bidi|yesno:"bi-directional,uni-directional" }}
"""
args = token.split_contents()
if len(args) != 5 or args[1] != 'for' or args[3] != 'as':
raise TemplateSyntaxError("'%s' requires 'for string as variable' (got %r)" % (args[0], args[1:]))
return GetLanguageInfoNode(parser.compile_filter(args[2]), args[4])
@register.tag("get_language_info_list")
def do_get_language_info_list(parser, token):
"""
This will store a list of language information dictionaries for the given
language codes in a context variable. The language codes can be specified
either as a list of strings or a settings.LANGUAGES style list (or any
sequence of sequences whose first items are language codes).
Usage::
{% get_language_info_list for LANGUAGES as langs %}
{% for l in langs %}
{{ l.code }}
{{ l.name }}
{{ l.name_translated }}
{{ l.name_local }}
{{ l.bidi|yesno:"bi-directional,uni-directional" }}
{% endfor %}
"""
args = token.split_contents()
if len(args) != 5 or args[1] != 'for' or args[3] != 'as':
raise TemplateSyntaxError("'%s' requires 'for sequence as variable' (got %r)" % (args[0], args[1:]))
return GetLanguageInfoListNode(parser.compile_filter(args[2]), args[4])
@register.filter
def language_name(lang_code):
return translation.get_language_info(lang_code)['name']
@register.filter
def language_name_translated(lang_code):
english_name = translation.get_language_info(lang_code)['name']
return translation.ugettext(english_name)
@register.filter
def language_name_local(lang_code):
return translation.get_language_info(lang_code)['name_local']
@register.filter
def language_bidi(lang_code):
return translation.get_language_info(lang_code)['bidi']
@register.tag("get_current_language")
def do_get_current_language(parser, token):
"""
This will store the current language in the context.
Usage::
{% get_current_language as language %}
This will fetch the currently active language and
put it's value into the ``language`` context
variable.
"""
# token.split_contents() isn't useful here because this tag doesn't accept variable as arguments
args = token.contents.split()
if len(args) != 3 or args[1] != 'as':
raise TemplateSyntaxError("'get_current_language' requires 'as variable' (got %r)" % args)
return GetCurrentLanguageNode(args[2])
@register.tag("get_current_language_bidi")
def do_get_current_language_bidi(parser, token):
"""
This will store the current language layout in the context.
Usage::
{% get_current_language_bidi as bidi %}
This will fetch the currently active language's layout and
put it's value into the ``bidi`` context variable.
True indicates right-to-left layout, otherwise left-to-right
"""
# token.split_contents() isn't useful here because this tag doesn't accept variable as arguments
args = token.contents.split()
if len(args) != 3 or args[1] != 'as':
raise TemplateSyntaxError("'get_current_language_bidi' requires 'as variable' (got %r)" % args)
return GetCurrentLanguageBidiNode(args[2])
@register.tag("trans")
def do_translate(parser, token):
"""
This will mark a string for translation and will
translate the string for the current language.
Usage::
{% trans "this is a test" %}
This will mark the string for translation so it will
be pulled out by mark-messages.py into the .po files
and will run the string through the translation engine.
There is a second form::
{% trans "this is a test" noop %}
This will only mark for translation, but will return
the string unchanged. Use it when you need to store
values into forms that should be translated later on.
You can use variables instead of constant strings
to translate stuff you marked somewhere else::
{% trans variable %}
This will just try to translate the contents of
the variable ``variable``. Make sure that the string
in there is something that is in the .po file.
It is possible to store the translated string into a variable::
{% trans "this is a test" as var %}
{{ var }}
Contextual translations are also supported::
{% trans "this is a test" context "greeting" %}
This is equivalent to calling pgettext instead of (u)gettext.
"""
bits = token.split_contents()
if len(bits) < 2:
raise TemplateSyntaxError("'%s' takes at least one argument" % bits[0])
message_string = parser.compile_filter(bits[1])
remaining = bits[2:]
noop = False
asvar = None
message_context = None
seen = set()
invalid_context = {'as', 'noop'}
while remaining:
option = remaining.pop(0)
if option in seen:
raise TemplateSyntaxError(
"The '%s' option was specified more than once." % option,
)
elif option == 'noop':
noop = True
elif option == 'context':
try:
value = remaining.pop(0)
except IndexError:
msg = "No argument provided to the '%s' tag for the context option." % bits[0]
six.reraise(TemplateSyntaxError, TemplateSyntaxError(msg), sys.exc_info()[2])
if value in invalid_context:
raise TemplateSyntaxError(
"Invalid argument '%s' provided to the '%s' tag for the context option" % (value, bits[0]),
)
message_context = parser.compile_filter(value)
elif option == 'as':
try:
value = remaining.pop(0)
except IndexError:
msg = "No argument provided to the '%s' tag for the as option." % bits[0]
six.reraise(TemplateSyntaxError, TemplateSyntaxError(msg), sys.exc_info()[2])
asvar = value
else:
raise TemplateSyntaxError(
"Unknown argument for '%s' tag: '%s'. The only options "
"available are 'noop', 'context' \"xxx\", and 'as VAR'." % (
bits[0], option,
)
)
seen.add(option)
return TranslateNode(message_string, noop, asvar, message_context)
@register.tag("blocktrans")
def do_block_translate(parser, token):
"""
This will translate a block of text with parameters.
Usage::
{% blocktrans with bar=foo|filter boo=baz|filter %}
This is {{ bar }} and {{ boo }}.
{% endblocktrans %}
Additionally, this supports pluralization::
{% blocktrans count count=var|length %}
There is {{ count }} object.
{% plural %}
There are {{ count }} objects.
{% endblocktrans %}
This is much like ngettext, only in template syntax.
The "var as value" legacy format is still supported::
{% blocktrans with foo|filter as bar and baz|filter as boo %}
{% blocktrans count var|length as count %}
The translated string can be stored in a variable using `asvar`::
{% blocktrans with bar=foo|filter boo=baz|filter asvar var %}
This is {{ bar }} and {{ boo }}.
{% endblocktrans %}
{{ var }}
Contextual translations are supported::
{% blocktrans with bar=foo|filter context "greeting" %}
This is {{ bar }}.
{% endblocktrans %}
This is equivalent to calling pgettext/npgettext instead of
(u)gettext/(u)ngettext.
"""
bits = token.split_contents()
options = {}
remaining_bits = bits[1:]
asvar = None
while remaining_bits:
option = remaining_bits.pop(0)
if option in options:
raise TemplateSyntaxError('The %r option was specified more '
'than once.' % option)
if option == 'with':
value = token_kwargs(remaining_bits, parser, support_legacy=True)
if not value:
raise TemplateSyntaxError('"with" in %r tag needs at least '
'one keyword argument.' % bits[0])
elif option == 'count':
value = token_kwargs(remaining_bits, parser, support_legacy=True)
if len(value) != 1:
raise TemplateSyntaxError('"count" in %r tag expected exactly '
'one keyword argument.' % bits[0])
elif option == "context":
try:
value = remaining_bits.pop(0)
value = parser.compile_filter(value)
except Exception:
msg = (
'"context" in %r tag expected '
'exactly one argument.') % bits[0]
six.reraise(TemplateSyntaxError, TemplateSyntaxError(msg), sys.exc_info()[2])
elif option == "trimmed":
value = True
elif option == "asvar":
try:
value = remaining_bits.pop(0)
except IndexError:
msg = "No argument provided to the '%s' tag for the asvar option." % bits[0]
six.reraise(TemplateSyntaxError, TemplateSyntaxError(msg), sys.exc_info()[2])
asvar = value
else:
raise TemplateSyntaxError('Unknown argument for %r tag: %r.' %
(bits[0], option))
options[option] = value
if 'count' in options:
countervar, counter = list(options['count'].items())[0]
else:
countervar, counter = None, None
if 'context' in options:
message_context = options['context']
else:
message_context = None
extra_context = options.get('with', {})
trimmed = options.get("trimmed", False)
singular = []
plural = []
while parser.tokens:
token = parser.next_token()
if token.token_type in (TOKEN_VAR, TOKEN_TEXT):
singular.append(token)
else:
break
if countervar and counter:
if token.contents.strip() != 'plural':
raise TemplateSyntaxError("'blocktrans' doesn't allow other block tags inside it")
while parser.tokens:
token = parser.next_token()
if token.token_type in (TOKEN_VAR, TOKEN_TEXT):
plural.append(token)
else:
break
if token.contents.strip() != 'endblocktrans':
raise TemplateSyntaxError("'blocktrans' doesn't allow other block tags (seen %r) inside it" % token.contents)
return BlockTranslateNode(extra_context, singular, plural, countervar,
counter, message_context, trimmed=trimmed,
asvar=asvar)
@register.tag
def language(parser, token):
"""
This will enable the given language just for this block.
Usage::
{% language "de" %}
This is {{ bar }} and {{ boo }}.
{% endlanguage %}
"""
bits = token.split_contents()
if len(bits) != 2:
raise TemplateSyntaxError("'%s' takes one argument (language)" % bits[0])
language = parser.compile_filter(bits[1])
nodelist = parser.parse(('endlanguage',))
parser.delete_first_token()
return LanguageNode(nodelist, language)
| bsd-3-clause |
chaupt/google-diff-match-patch | python2/diff_match_patch_test.py | 319 | 41744 | #!/usr/bin/python2.4
"""Test harness for diff_match_patch.py
Copyright 2006 Google Inc.
http://code.google.com/p/google-diff-match-patch/
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
import time
import unittest
import diff_match_patch as dmp_module
# Force a module reload. Allows one to edit the DMP module and rerun the tests
# without leaving the Python interpreter.
reload(dmp_module)
class DiffMatchPatchTest(unittest.TestCase):
def setUp(self):
"Test harness for dmp_module."
self.dmp = dmp_module.diff_match_patch()
def diff_rebuildtexts(self, diffs):
# Construct the two texts which made up the diff originally.
text1 = ""
text2 = ""
for x in range(0, len(diffs)):
if diffs[x][0] != dmp_module.diff_match_patch.DIFF_INSERT:
text1 += diffs[x][1]
if diffs[x][0] != dmp_module.diff_match_patch.DIFF_DELETE:
text2 += diffs[x][1]
return (text1, text2)
class DiffTest(DiffMatchPatchTest):
"""DIFF TEST FUNCTIONS"""
def testDiffCommonPrefix(self):
# Detect any common prefix.
# Null case.
self.assertEquals(0, self.dmp.diff_commonPrefix("abc", "xyz"))
# Non-null case.
self.assertEquals(4, self.dmp.diff_commonPrefix("1234abcdef", "1234xyz"))
# Whole case.
self.assertEquals(4, self.dmp.diff_commonPrefix("1234", "1234xyz"))
def testDiffCommonSuffix(self):
# Detect any common suffix.
# Null case.
self.assertEquals(0, self.dmp.diff_commonSuffix("abc", "xyz"))
# Non-null case.
self.assertEquals(4, self.dmp.diff_commonSuffix("abcdef1234", "xyz1234"))
# Whole case.
self.assertEquals(4, self.dmp.diff_commonSuffix("1234", "xyz1234"))
def testDiffCommonOverlap(self):
# Null case.
self.assertEquals(0, self.dmp.diff_commonOverlap("", "abcd"))
# Whole case.
self.assertEquals(3, self.dmp.diff_commonOverlap("abc", "abcd"))
# No overlap.
self.assertEquals(0, self.dmp.diff_commonOverlap("123456", "abcd"))
# Overlap.
self.assertEquals(3, self.dmp.diff_commonOverlap("123456xxx", "xxxabcd"))
# Unicode.
# Some overly clever languages (C#) may treat ligatures as equal to their
# component letters. E.g. U+FB01 == 'fi'
self.assertEquals(0, self.dmp.diff_commonOverlap("fi", u"\ufb01i"))
def testDiffHalfMatch(self):
# Detect a halfmatch.
self.dmp.Diff_Timeout = 1
# No match.
self.assertEquals(None, self.dmp.diff_halfMatch("1234567890", "abcdef"))
self.assertEquals(None, self.dmp.diff_halfMatch("12345", "23"))
# Single Match.
self.assertEquals(("12", "90", "a", "z", "345678"), self.dmp.diff_halfMatch("1234567890", "a345678z"))
self.assertEquals(("a", "z", "12", "90", "345678"), self.dmp.diff_halfMatch("a345678z", "1234567890"))
self.assertEquals(("abc", "z", "1234", "0", "56789"), self.dmp.diff_halfMatch("abc56789z", "1234567890"))
self.assertEquals(("a", "xyz", "1", "7890", "23456"), self.dmp.diff_halfMatch("a23456xyz", "1234567890"))
# Multiple Matches.
self.assertEquals(("12123", "123121", "a", "z", "1234123451234"), self.dmp.diff_halfMatch("121231234123451234123121", "a1234123451234z"))
self.assertEquals(("", "-=-=-=-=-=", "x", "", "x-=-=-=-=-=-=-="), self.dmp.diff_halfMatch("x-=-=-=-=-=-=-=-=-=-=-=-=", "xx-=-=-=-=-=-=-="))
self.assertEquals(("-=-=-=-=-=", "", "", "y", "-=-=-=-=-=-=-=y"), self.dmp.diff_halfMatch("-=-=-=-=-=-=-=-=-=-=-=-=y", "-=-=-=-=-=-=-=yy"))
# Non-optimal halfmatch.
# Optimal diff would be -q+x=H-i+e=lloHe+Hu=llo-Hew+y not -qHillo+x=HelloHe-w+Hulloy
self.assertEquals(("qHillo", "w", "x", "Hulloy", "HelloHe"), self.dmp.diff_halfMatch("qHilloHelloHew", "xHelloHeHulloy"))
# Optimal no halfmatch.
self.dmp.Diff_Timeout = 0
self.assertEquals(None, self.dmp.diff_halfMatch("qHilloHelloHew", "xHelloHeHulloy"))
def testDiffLinesToChars(self):
# Convert lines down to characters.
self.assertEquals(("\x01\x02\x01", "\x02\x01\x02", ["", "alpha\n", "beta\n"]), self.dmp.diff_linesToChars("alpha\nbeta\nalpha\n", "beta\nalpha\nbeta\n"))
self.assertEquals(("", "\x01\x02\x03\x03", ["", "alpha\r\n", "beta\r\n", "\r\n"]), self.dmp.diff_linesToChars("", "alpha\r\nbeta\r\n\r\n\r\n"))
self.assertEquals(("\x01", "\x02", ["", "a", "b"]), self.dmp.diff_linesToChars("a", "b"))
# More than 256 to reveal any 8-bit limitations.
n = 300
lineList = []
charList = []
for x in range(1, n + 1):
lineList.append(str(x) + "\n")
charList.append(unichr(x))
self.assertEquals(n, len(lineList))
lines = "".join(lineList)
chars = "".join(charList)
self.assertEquals(n, len(chars))
lineList.insert(0, "")
self.assertEquals((chars, "", lineList), self.dmp.diff_linesToChars(lines, ""))
def testDiffCharsToLines(self):
# Convert chars up to lines.
diffs = [(self.dmp.DIFF_EQUAL, "\x01\x02\x01"), (self.dmp.DIFF_INSERT, "\x02\x01\x02")]
self.dmp.diff_charsToLines(diffs, ["", "alpha\n", "beta\n"])
self.assertEquals([(self.dmp.DIFF_EQUAL, "alpha\nbeta\nalpha\n"), (self.dmp.DIFF_INSERT, "beta\nalpha\nbeta\n")], diffs)
# More than 256 to reveal any 8-bit limitations.
n = 300
lineList = []
charList = []
for x in range(1, n + 1):
lineList.append(str(x) + "\n")
charList.append(unichr(x))
self.assertEquals(n, len(lineList))
lines = "".join(lineList)
chars = "".join(charList)
self.assertEquals(n, len(chars))
lineList.insert(0, "")
diffs = [(self.dmp.DIFF_DELETE, chars)]
self.dmp.diff_charsToLines(diffs, lineList)
self.assertEquals([(self.dmp.DIFF_DELETE, lines)], diffs)
def testDiffCleanupMerge(self):
# Cleanup a messy diff.
# Null case.
diffs = []
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([], diffs)
# No change case.
diffs = [(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "b"), (self.dmp.DIFF_INSERT, "c")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "b"), (self.dmp.DIFF_INSERT, "c")], diffs)
# Merge equalities.
diffs = [(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_EQUAL, "b"), (self.dmp.DIFF_EQUAL, "c")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "abc")], diffs)
# Merge deletions.
diffs = [(self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_DELETE, "b"), (self.dmp.DIFF_DELETE, "c")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abc")], diffs)
# Merge insertions.
diffs = [(self.dmp.DIFF_INSERT, "a"), (self.dmp.DIFF_INSERT, "b"), (self.dmp.DIFF_INSERT, "c")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_INSERT, "abc")], diffs)
# Merge interweave.
diffs = [(self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_INSERT, "b"), (self.dmp.DIFF_DELETE, "c"), (self.dmp.DIFF_INSERT, "d"), (self.dmp.DIFF_EQUAL, "e"), (self.dmp.DIFF_EQUAL, "f")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "ac"), (self.dmp.DIFF_INSERT, "bd"), (self.dmp.DIFF_EQUAL, "ef")], diffs)
# Prefix and suffix detection.
diffs = [(self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_INSERT, "abc"), (self.dmp.DIFF_DELETE, "dc")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "d"), (self.dmp.DIFF_INSERT, "b"), (self.dmp.DIFF_EQUAL, "c")], diffs)
# Prefix and suffix detection with equalities.
diffs = [(self.dmp.DIFF_EQUAL, "x"), (self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_INSERT, "abc"), (self.dmp.DIFF_DELETE, "dc"), (self.dmp.DIFF_EQUAL, "y")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "xa"), (self.dmp.DIFF_DELETE, "d"), (self.dmp.DIFF_INSERT, "b"), (self.dmp.DIFF_EQUAL, "cy")], diffs)
# Slide edit left.
diffs = [(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_INSERT, "ba"), (self.dmp.DIFF_EQUAL, "c")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_INSERT, "ab"), (self.dmp.DIFF_EQUAL, "ac")], diffs)
# Slide edit right.
diffs = [(self.dmp.DIFF_EQUAL, "c"), (self.dmp.DIFF_INSERT, "ab"), (self.dmp.DIFF_EQUAL, "a")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "ca"), (self.dmp.DIFF_INSERT, "ba")], diffs)
# Slide edit left recursive.
diffs = [(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "b"), (self.dmp.DIFF_EQUAL, "c"), (self.dmp.DIFF_DELETE, "ac"), (self.dmp.DIFF_EQUAL, "x")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abc"), (self.dmp.DIFF_EQUAL, "acx")], diffs)
# Slide edit right recursive.
diffs = [(self.dmp.DIFF_EQUAL, "x"), (self.dmp.DIFF_DELETE, "ca"), (self.dmp.DIFF_EQUAL, "c"), (self.dmp.DIFF_DELETE, "b"), (self.dmp.DIFF_EQUAL, "a")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "xca"), (self.dmp.DIFF_DELETE, "cba")], diffs)
def testDiffCleanupSemanticLossless(self):
# Slide diffs to match logical boundaries.
# Null case.
diffs = []
self.dmp.diff_cleanupSemanticLossless(diffs)
self.assertEquals([], diffs)
# Blank lines.
diffs = [(self.dmp.DIFF_EQUAL, "AAA\r\n\r\nBBB"), (self.dmp.DIFF_INSERT, "\r\nDDD\r\n\r\nBBB"), (self.dmp.DIFF_EQUAL, "\r\nEEE")]
self.dmp.diff_cleanupSemanticLossless(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "AAA\r\n\r\n"), (self.dmp.DIFF_INSERT, "BBB\r\nDDD\r\n\r\n"), (self.dmp.DIFF_EQUAL, "BBB\r\nEEE")], diffs)
# Line boundaries.
diffs = [(self.dmp.DIFF_EQUAL, "AAA\r\nBBB"), (self.dmp.DIFF_INSERT, " DDD\r\nBBB"), (self.dmp.DIFF_EQUAL, " EEE")]
self.dmp.diff_cleanupSemanticLossless(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "AAA\r\n"), (self.dmp.DIFF_INSERT, "BBB DDD\r\n"), (self.dmp.DIFF_EQUAL, "BBB EEE")], diffs)
# Word boundaries.
diffs = [(self.dmp.DIFF_EQUAL, "The c"), (self.dmp.DIFF_INSERT, "ow and the c"), (self.dmp.DIFF_EQUAL, "at.")]
self.dmp.diff_cleanupSemanticLossless(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "The "), (self.dmp.DIFF_INSERT, "cow and the "), (self.dmp.DIFF_EQUAL, "cat.")], diffs)
# Alphanumeric boundaries.
diffs = [(self.dmp.DIFF_EQUAL, "The-c"), (self.dmp.DIFF_INSERT, "ow-and-the-c"), (self.dmp.DIFF_EQUAL, "at.")]
self.dmp.diff_cleanupSemanticLossless(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "The-"), (self.dmp.DIFF_INSERT, "cow-and-the-"), (self.dmp.DIFF_EQUAL, "cat.")], diffs)
# Hitting the start.
diffs = [(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_EQUAL, "ax")]
self.dmp.diff_cleanupSemanticLossless(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_EQUAL, "aax")], diffs)
# Hitting the end.
diffs = [(self.dmp.DIFF_EQUAL, "xa"), (self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_EQUAL, "a")]
self.dmp.diff_cleanupSemanticLossless(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "xaa"), (self.dmp.DIFF_DELETE, "a")], diffs)
# Sentence boundaries.
diffs = [(self.dmp.DIFF_EQUAL, "The xxx. The "), (self.dmp.DIFF_INSERT, "zzz. The "), (self.dmp.DIFF_EQUAL, "yyy.")]
self.dmp.diff_cleanupSemanticLossless(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "The xxx."), (self.dmp.DIFF_INSERT, " The zzz."), (self.dmp.DIFF_EQUAL, " The yyy.")], diffs)
def testDiffCleanupSemantic(self):
# Cleanup semantically trivial equalities.
# Null case.
diffs = []
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([], diffs)
# No elimination #1.
diffs = [(self.dmp.DIFF_DELETE, "ab"), (self.dmp.DIFF_INSERT, "cd"), (self.dmp.DIFF_EQUAL, "12"), (self.dmp.DIFF_DELETE, "e")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "ab"), (self.dmp.DIFF_INSERT, "cd"), (self.dmp.DIFF_EQUAL, "12"), (self.dmp.DIFF_DELETE, "e")], diffs)
# No elimination #2.
diffs = [(self.dmp.DIFF_DELETE, "abc"), (self.dmp.DIFF_INSERT, "ABC"), (self.dmp.DIFF_EQUAL, "1234"), (self.dmp.DIFF_DELETE, "wxyz")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abc"), (self.dmp.DIFF_INSERT, "ABC"), (self.dmp.DIFF_EQUAL, "1234"), (self.dmp.DIFF_DELETE, "wxyz")], diffs)
# Simple elimination.
diffs = [(self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_EQUAL, "b"), (self.dmp.DIFF_DELETE, "c")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abc"), (self.dmp.DIFF_INSERT, "b")], diffs)
# Backpass elimination.
diffs = [(self.dmp.DIFF_DELETE, "ab"), (self.dmp.DIFF_EQUAL, "cd"), (self.dmp.DIFF_DELETE, "e"), (self.dmp.DIFF_EQUAL, "f"), (self.dmp.DIFF_INSERT, "g")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abcdef"), (self.dmp.DIFF_INSERT, "cdfg")], diffs)
# Multiple eliminations.
diffs = [(self.dmp.DIFF_INSERT, "1"), (self.dmp.DIFF_EQUAL, "A"), (self.dmp.DIFF_DELETE, "B"), (self.dmp.DIFF_INSERT, "2"), (self.dmp.DIFF_EQUAL, "_"), (self.dmp.DIFF_INSERT, "1"), (self.dmp.DIFF_EQUAL, "A"), (self.dmp.DIFF_DELETE, "B"), (self.dmp.DIFF_INSERT, "2")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "AB_AB"), (self.dmp.DIFF_INSERT, "1A2_1A2")], diffs)
# Word boundaries.
diffs = [(self.dmp.DIFF_EQUAL, "The c"), (self.dmp.DIFF_DELETE, "ow and the c"), (self.dmp.DIFF_EQUAL, "at.")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "The "), (self.dmp.DIFF_DELETE, "cow and the "), (self.dmp.DIFF_EQUAL, "cat.")], diffs)
# No overlap elimination.
diffs = [(self.dmp.DIFF_DELETE, "abcxx"), (self.dmp.DIFF_INSERT, "xxdef")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abcxx"), (self.dmp.DIFF_INSERT, "xxdef")], diffs)
# Overlap elimination.
diffs = [(self.dmp.DIFF_DELETE, "abcxxx"), (self.dmp.DIFF_INSERT, "xxxdef")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abc"), (self.dmp.DIFF_EQUAL, "xxx"), (self.dmp.DIFF_INSERT, "def")], diffs)
# Reverse overlap elimination.
diffs = [(self.dmp.DIFF_DELETE, "xxxabc"), (self.dmp.DIFF_INSERT, "defxxx")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([(self.dmp.DIFF_INSERT, "def"), (self.dmp.DIFF_EQUAL, "xxx"), (self.dmp.DIFF_DELETE, "abc")], diffs)
# Two overlap eliminations.
diffs = [(self.dmp.DIFF_DELETE, "abcd1212"), (self.dmp.DIFF_INSERT, "1212efghi"), (self.dmp.DIFF_EQUAL, "----"), (self.dmp.DIFF_DELETE, "A3"), (self.dmp.DIFF_INSERT, "3BC")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abcd"), (self.dmp.DIFF_EQUAL, "1212"), (self.dmp.DIFF_INSERT, "efghi"), (self.dmp.DIFF_EQUAL, "----"), (self.dmp.DIFF_DELETE, "A"), (self.dmp.DIFF_EQUAL, "3"), (self.dmp.DIFF_INSERT, "BC")], diffs)
def testDiffCleanupEfficiency(self):
# Cleanup operationally trivial equalities.
self.dmp.Diff_EditCost = 4
# Null case.
diffs = []
self.dmp.diff_cleanupEfficiency(diffs)
self.assertEquals([], diffs)
# No elimination.
diffs = [(self.dmp.DIFF_DELETE, "ab"), (self.dmp.DIFF_INSERT, "12"), (self.dmp.DIFF_EQUAL, "wxyz"), (self.dmp.DIFF_DELETE, "cd"), (self.dmp.DIFF_INSERT, "34")]
self.dmp.diff_cleanupEfficiency(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "ab"), (self.dmp.DIFF_INSERT, "12"), (self.dmp.DIFF_EQUAL, "wxyz"), (self.dmp.DIFF_DELETE, "cd"), (self.dmp.DIFF_INSERT, "34")], diffs)
# Four-edit elimination.
diffs = [(self.dmp.DIFF_DELETE, "ab"), (self.dmp.DIFF_INSERT, "12"), (self.dmp.DIFF_EQUAL, "xyz"), (self.dmp.DIFF_DELETE, "cd"), (self.dmp.DIFF_INSERT, "34")]
self.dmp.diff_cleanupEfficiency(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abxyzcd"), (self.dmp.DIFF_INSERT, "12xyz34")], diffs)
# Three-edit elimination.
diffs = [(self.dmp.DIFF_INSERT, "12"), (self.dmp.DIFF_EQUAL, "x"), (self.dmp.DIFF_DELETE, "cd"), (self.dmp.DIFF_INSERT, "34")]
self.dmp.diff_cleanupEfficiency(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "xcd"), (self.dmp.DIFF_INSERT, "12x34")], diffs)
# Backpass elimination.
diffs = [(self.dmp.DIFF_DELETE, "ab"), (self.dmp.DIFF_INSERT, "12"), (self.dmp.DIFF_EQUAL, "xy"), (self.dmp.DIFF_INSERT, "34"), (self.dmp.DIFF_EQUAL, "z"), (self.dmp.DIFF_DELETE, "cd"), (self.dmp.DIFF_INSERT, "56")]
self.dmp.diff_cleanupEfficiency(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abxyzcd"), (self.dmp.DIFF_INSERT, "12xy34z56")], diffs)
# High cost elimination.
self.dmp.Diff_EditCost = 5
diffs = [(self.dmp.DIFF_DELETE, "ab"), (self.dmp.DIFF_INSERT, "12"), (self.dmp.DIFF_EQUAL, "wxyz"), (self.dmp.DIFF_DELETE, "cd"), (self.dmp.DIFF_INSERT, "34")]
self.dmp.diff_cleanupEfficiency(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abwxyzcd"), (self.dmp.DIFF_INSERT, "12wxyz34")], diffs)
self.dmp.Diff_EditCost = 4
def testDiffPrettyHtml(self):
# Pretty print.
diffs = [(self.dmp.DIFF_EQUAL, "a\n"), (self.dmp.DIFF_DELETE, "<B>b</B>"), (self.dmp.DIFF_INSERT, "c&d")]
self.assertEquals("<span>a¶<br></span><del style=\"background:#ffe6e6;\"><B>b</B></del><ins style=\"background:#e6ffe6;\">c&d</ins>", self.dmp.diff_prettyHtml(diffs))
def testDiffText(self):
# Compute the source and destination texts.
diffs = [(self.dmp.DIFF_EQUAL, "jump"), (self.dmp.DIFF_DELETE, "s"), (self.dmp.DIFF_INSERT, "ed"), (self.dmp.DIFF_EQUAL, " over "), (self.dmp.DIFF_DELETE, "the"), (self.dmp.DIFF_INSERT, "a"), (self.dmp.DIFF_EQUAL, " lazy")]
self.assertEquals("jumps over the lazy", self.dmp.diff_text1(diffs))
self.assertEquals("jumped over a lazy", self.dmp.diff_text2(diffs))
def testDiffDelta(self):
# Convert a diff into delta string.
diffs = [(self.dmp.DIFF_EQUAL, "jump"), (self.dmp.DIFF_DELETE, "s"), (self.dmp.DIFF_INSERT, "ed"), (self.dmp.DIFF_EQUAL, " over "), (self.dmp.DIFF_DELETE, "the"), (self.dmp.DIFF_INSERT, "a"), (self.dmp.DIFF_EQUAL, " lazy"), (self.dmp.DIFF_INSERT, "old dog")]
text1 = self.dmp.diff_text1(diffs)
self.assertEquals("jumps over the lazy", text1)
delta = self.dmp.diff_toDelta(diffs)
self.assertEquals("=4\t-1\t+ed\t=6\t-3\t+a\t=5\t+old dog", delta)
# Convert delta string into a diff.
self.assertEquals(diffs, self.dmp.diff_fromDelta(text1, delta))
# Generates error (19 != 20).
try:
self.dmp.diff_fromDelta(text1 + "x", delta)
self.assertFalse(True)
except ValueError:
# Exception expected.
pass
# Generates error (19 != 18).
try:
self.dmp.diff_fromDelta(text1[1:], delta)
self.assertFalse(True)
except ValueError:
# Exception expected.
pass
# Generates error (%c3%xy invalid Unicode).
try:
self.dmp.diff_fromDelta("", "+%c3xy")
self.assertFalse(True)
except ValueError:
# Exception expected.
pass
# Test deltas with special characters.
diffs = [(self.dmp.DIFF_EQUAL, u"\u0680 \x00 \t %"), (self.dmp.DIFF_DELETE, u"\u0681 \x01 \n ^"), (self.dmp.DIFF_INSERT, u"\u0682 \x02 \\ |")]
text1 = self.dmp.diff_text1(diffs)
self.assertEquals(u"\u0680 \x00 \t %\u0681 \x01 \n ^", text1)
delta = self.dmp.diff_toDelta(diffs)
self.assertEquals("=7\t-7\t+%DA%82 %02 %5C %7C", delta)
# Convert delta string into a diff.
self.assertEquals(diffs, self.dmp.diff_fromDelta(text1, delta))
# Verify pool of unchanged characters.
diffs = [(self.dmp.DIFF_INSERT, "A-Z a-z 0-9 - _ . ! ~ * ' ( ) ; / ? : @ & = + $ , # ")]
text2 = self.dmp.diff_text2(diffs)
self.assertEquals("A-Z a-z 0-9 - _ . ! ~ * \' ( ) ; / ? : @ & = + $ , # ", text2)
delta = self.dmp.diff_toDelta(diffs)
self.assertEquals("+A-Z a-z 0-9 - _ . ! ~ * \' ( ) ; / ? : @ & = + $ , # ", delta)
# Convert delta string into a diff.
self.assertEquals(diffs, self.dmp.diff_fromDelta("", delta))
def testDiffXIndex(self):
# Translate a location in text1 to text2.
self.assertEquals(5, self.dmp.diff_xIndex([(self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_INSERT, "1234"), (self.dmp.DIFF_EQUAL, "xyz")], 2))
# Translation on deletion.
self.assertEquals(1, self.dmp.diff_xIndex([(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "1234"), (self.dmp.DIFF_EQUAL, "xyz")], 3))
def testDiffLevenshtein(self):
# Levenshtein with trailing equality.
self.assertEquals(4, self.dmp.diff_levenshtein([(self.dmp.DIFF_DELETE, "abc"), (self.dmp.DIFF_INSERT, "1234"), (self.dmp.DIFF_EQUAL, "xyz")]))
# Levenshtein with leading equality.
self.assertEquals(4, self.dmp.diff_levenshtein([(self.dmp.DIFF_EQUAL, "xyz"), (self.dmp.DIFF_DELETE, "abc"), (self.dmp.DIFF_INSERT, "1234")]))
# Levenshtein with middle equality.
self.assertEquals(7, self.dmp.diff_levenshtein([(self.dmp.DIFF_DELETE, "abc"), (self.dmp.DIFF_EQUAL, "xyz"), (self.dmp.DIFF_INSERT, "1234")]))
def testDiffBisect(self):
# Normal.
a = "cat"
b = "map"
# Since the resulting diff hasn't been normalized, it would be ok if
# the insertion and deletion pairs are swapped.
# If the order changes, tweak this test as required.
self.assertEquals([(self.dmp.DIFF_DELETE, "c"), (self.dmp.DIFF_INSERT, "m"), (self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "t"), (self.dmp.DIFF_INSERT, "p")], self.dmp.diff_bisect(a, b, sys.maxint))
# Timeout.
self.assertEquals([(self.dmp.DIFF_DELETE, "cat"), (self.dmp.DIFF_INSERT, "map")], self.dmp.diff_bisect(a, b, 0))
def testDiffMain(self):
# Perform a trivial diff.
# Null case.
self.assertEquals([], self.dmp.diff_main("", "", False))
# Equality.
self.assertEquals([(self.dmp.DIFF_EQUAL, "abc")], self.dmp.diff_main("abc", "abc", False))
# Simple insertion.
self.assertEquals([(self.dmp.DIFF_EQUAL, "ab"), (self.dmp.DIFF_INSERT, "123"), (self.dmp.DIFF_EQUAL, "c")], self.dmp.diff_main("abc", "ab123c", False))
# Simple deletion.
self.assertEquals([(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "123"), (self.dmp.DIFF_EQUAL, "bc")], self.dmp.diff_main("a123bc", "abc", False))
# Two insertions.
self.assertEquals([(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_INSERT, "123"), (self.dmp.DIFF_EQUAL, "b"), (self.dmp.DIFF_INSERT, "456"), (self.dmp.DIFF_EQUAL, "c")], self.dmp.diff_main("abc", "a123b456c", False))
# Two deletions.
self.assertEquals([(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "123"), (self.dmp.DIFF_EQUAL, "b"), (self.dmp.DIFF_DELETE, "456"), (self.dmp.DIFF_EQUAL, "c")], self.dmp.diff_main("a123b456c", "abc", False))
# Perform a real diff.
# Switch off the timeout.
self.dmp.Diff_Timeout = 0
# Simple cases.
self.assertEquals([(self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_INSERT, "b")], self.dmp.diff_main("a", "b", False))
self.assertEquals([(self.dmp.DIFF_DELETE, "Apple"), (self.dmp.DIFF_INSERT, "Banana"), (self.dmp.DIFF_EQUAL, "s are a"), (self.dmp.DIFF_INSERT, "lso"), (self.dmp.DIFF_EQUAL, " fruit.")], self.dmp.diff_main("Apples are a fruit.", "Bananas are also fruit.", False))
self.assertEquals([(self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_INSERT, u"\u0680"), (self.dmp.DIFF_EQUAL, "x"), (self.dmp.DIFF_DELETE, "\t"), (self.dmp.DIFF_INSERT, "\x00")], self.dmp.diff_main("ax\t", u"\u0680x\x00", False))
# Overlaps.
self.assertEquals([(self.dmp.DIFF_DELETE, "1"), (self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "y"), (self.dmp.DIFF_EQUAL, "b"), (self.dmp.DIFF_DELETE, "2"), (self.dmp.DIFF_INSERT, "xab")], self.dmp.diff_main("1ayb2", "abxab", False))
self.assertEquals([(self.dmp.DIFF_INSERT, "xaxcx"), (self.dmp.DIFF_EQUAL, "abc"), (self.dmp.DIFF_DELETE, "y")], self.dmp.diff_main("abcy", "xaxcxabc", False))
self.assertEquals([(self.dmp.DIFF_DELETE, "ABCD"), (self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "="), (self.dmp.DIFF_INSERT, "-"), (self.dmp.DIFF_EQUAL, "bcd"), (self.dmp.DIFF_DELETE, "="), (self.dmp.DIFF_INSERT, "-"), (self.dmp.DIFF_EQUAL, "efghijklmnopqrs"), (self.dmp.DIFF_DELETE, "EFGHIJKLMNOefg")], self.dmp.diff_main("ABCDa=bcd=efghijklmnopqrsEFGHIJKLMNOefg", "a-bcd-efghijklmnopqrs", False))
# Large equality.
self.assertEquals([(self.dmp.DIFF_INSERT, " "), (self.dmp.DIFF_EQUAL,"a"), (self.dmp.DIFF_INSERT,"nd"), (self.dmp.DIFF_EQUAL," [[Pennsylvania]]"), (self.dmp.DIFF_DELETE," and [[New")], self.dmp.diff_main("a [[Pennsylvania]] and [[New", " and [[Pennsylvania]]", False))
# Timeout.
self.dmp.Diff_Timeout = 0.1 # 100ms
a = "`Twas brillig, and the slithy toves\nDid gyre and gimble in the wabe:\nAll mimsy were the borogoves,\nAnd the mome raths outgrabe.\n"
b = "I am the very model of a modern major general,\nI've information vegetable, animal, and mineral,\nI know the kings of England, and I quote the fights historical,\nFrom Marathon to Waterloo, in order categorical.\n"
# Increase the text lengths by 1024 times to ensure a timeout.
for x in range(10):
a = a + a
b = b + b
startTime = time.time()
self.dmp.diff_main(a, b)
endTime = time.time()
# Test that we took at least the timeout period.
self.assertTrue(self.dmp.Diff_Timeout <= endTime - startTime)
# Test that we didn't take forever (be forgiving).
# Theoretically this test could fail very occasionally if the
# OS task swaps or locks up for a second at the wrong moment.
self.assertTrue(self.dmp.Diff_Timeout * 2 > endTime - startTime)
self.dmp.Diff_Timeout = 0
# Test the linemode speedup.
# Must be long to pass the 100 char cutoff.
# Simple line-mode.
a = "1234567890\n" * 13
b = "abcdefghij\n" * 13
self.assertEquals(self.dmp.diff_main(a, b, False), self.dmp.diff_main(a, b, True))
# Single line-mode.
a = "1234567890" * 13
b = "abcdefghij" * 13
self.assertEquals(self.dmp.diff_main(a, b, False), self.dmp.diff_main(a, b, True))
# Overlap line-mode.
a = "1234567890\n" * 13
b = "abcdefghij\n1234567890\n1234567890\n1234567890\nabcdefghij\n1234567890\n1234567890\n1234567890\nabcdefghij\n1234567890\n1234567890\n1234567890\nabcdefghij\n"
texts_linemode = self.diff_rebuildtexts(self.dmp.diff_main(a, b, True))
texts_textmode = self.diff_rebuildtexts(self.dmp.diff_main(a, b, False))
self.assertEquals(texts_textmode, texts_linemode)
# Test null inputs.
try:
self.dmp.diff_main(None, None)
self.assertFalse(True)
except ValueError:
# Exception expected.
pass
class MatchTest(DiffMatchPatchTest):
"""MATCH TEST FUNCTIONS"""
def testMatchAlphabet(self):
# Initialise the bitmasks for Bitap.
self.assertEquals({"a":4, "b":2, "c":1}, self.dmp.match_alphabet("abc"))
self.assertEquals({"a":37, "b":18, "c":8}, self.dmp.match_alphabet("abcaba"))
def testMatchBitap(self):
self.dmp.Match_Distance = 100
self.dmp.Match_Threshold = 0.5
# Exact matches.
self.assertEquals(5, self.dmp.match_bitap("abcdefghijk", "fgh", 5))
self.assertEquals(5, self.dmp.match_bitap("abcdefghijk", "fgh", 0))
# Fuzzy matches.
self.assertEquals(4, self.dmp.match_bitap("abcdefghijk", "efxhi", 0))
self.assertEquals(2, self.dmp.match_bitap("abcdefghijk", "cdefxyhijk", 5))
self.assertEquals(-1, self.dmp.match_bitap("abcdefghijk", "bxy", 1))
# Overflow.
self.assertEquals(2, self.dmp.match_bitap("123456789xx0", "3456789x0", 2))
self.assertEquals(0, self.dmp.match_bitap("abcdef", "xxabc", 4))
self.assertEquals(3, self.dmp.match_bitap("abcdef", "defyy", 4))
self.assertEquals(0, self.dmp.match_bitap("abcdef", "xabcdefy", 0))
# Threshold test.
self.dmp.Match_Threshold = 0.4
self.assertEquals(4, self.dmp.match_bitap("abcdefghijk", "efxyhi", 1))
self.dmp.Match_Threshold = 0.3
self.assertEquals(-1, self.dmp.match_bitap("abcdefghijk", "efxyhi", 1))
self.dmp.Match_Threshold = 0.0
self.assertEquals(1, self.dmp.match_bitap("abcdefghijk", "bcdef", 1))
self.dmp.Match_Threshold = 0.5
# Multiple select.
self.assertEquals(0, self.dmp.match_bitap("abcdexyzabcde", "abccde", 3))
self.assertEquals(8, self.dmp.match_bitap("abcdexyzabcde", "abccde", 5))
# Distance test.
self.dmp.Match_Distance = 10 # Strict location.
self.assertEquals(-1, self.dmp.match_bitap("abcdefghijklmnopqrstuvwxyz", "abcdefg", 24))
self.assertEquals(0, self.dmp.match_bitap("abcdefghijklmnopqrstuvwxyz", "abcdxxefg", 1))
self.dmp.Match_Distance = 1000 # Loose location.
self.assertEquals(0, self.dmp.match_bitap("abcdefghijklmnopqrstuvwxyz", "abcdefg", 24))
def testMatchMain(self):
# Full match.
# Shortcut matches.
self.assertEquals(0, self.dmp.match_main("abcdef", "abcdef", 1000))
self.assertEquals(-1, self.dmp.match_main("", "abcdef", 1))
self.assertEquals(3, self.dmp.match_main("abcdef", "", 3))
self.assertEquals(3, self.dmp.match_main("abcdef", "de", 3))
self.assertEquals(3, self.dmp.match_main("abcdef", "defy", 4))
self.assertEquals(0, self.dmp.match_main("abcdef", "abcdefy", 0))
# Complex match.
self.dmp.Match_Threshold = 0.7
self.assertEquals(4, self.dmp.match_main("I am the very model of a modern major general.", " that berry ", 5))
self.dmp.Match_Threshold = 0.5
# Test null inputs.
try:
self.dmp.match_main(None, None, 0)
self.assertFalse(True)
except ValueError:
# Exception expected.
pass
class PatchTest(DiffMatchPatchTest):
"""PATCH TEST FUNCTIONS"""
def testPatchObj(self):
# Patch Object.
p = dmp_module.patch_obj()
p.start1 = 20
p.start2 = 21
p.length1 = 18
p.length2 = 17
p.diffs = [(self.dmp.DIFF_EQUAL, "jump"), (self.dmp.DIFF_DELETE, "s"), (self.dmp.DIFF_INSERT, "ed"), (self.dmp.DIFF_EQUAL, " over "), (self.dmp.DIFF_DELETE, "the"), (self.dmp.DIFF_INSERT, "a"), (self.dmp.DIFF_EQUAL, "\nlaz")]
strp = str(p)
self.assertEquals("@@ -21,18 +22,17 @@\n jump\n-s\n+ed\n over \n-the\n+a\n %0Alaz\n", strp)
def testPatchFromText(self):
self.assertEquals([], self.dmp.patch_fromText(""))
strp = "@@ -21,18 +22,17 @@\n jump\n-s\n+ed\n over \n-the\n+a\n %0Alaz\n"
self.assertEquals(strp, str(self.dmp.patch_fromText(strp)[0]))
self.assertEquals("@@ -1 +1 @@\n-a\n+b\n", str(self.dmp.patch_fromText("@@ -1 +1 @@\n-a\n+b\n")[0]))
self.assertEquals("@@ -1,3 +0,0 @@\n-abc\n", str(self.dmp.patch_fromText("@@ -1,3 +0,0 @@\n-abc\n")[0]))
self.assertEquals("@@ -0,0 +1,3 @@\n+abc\n", str(self.dmp.patch_fromText("@@ -0,0 +1,3 @@\n+abc\n")[0]))
# Generates error.
try:
self.dmp.patch_fromText("Bad\nPatch\n")
self.assertFalse(True)
except ValueError:
# Exception expected.
pass
def testPatchToText(self):
strp = "@@ -21,18 +22,17 @@\n jump\n-s\n+ed\n over \n-the\n+a\n laz\n"
p = self.dmp.patch_fromText(strp)
self.assertEquals(strp, self.dmp.patch_toText(p))
strp = "@@ -1,9 +1,9 @@\n-f\n+F\n oo+fooba\n@@ -7,9 +7,9 @@\n obar\n-,\n+.\n tes\n"
p = self.dmp.patch_fromText(strp)
self.assertEquals(strp, self.dmp.patch_toText(p))
def testPatchAddContext(self):
self.dmp.Patch_Margin = 4
p = self.dmp.patch_fromText("@@ -21,4 +21,10 @@\n-jump\n+somersault\n")[0]
self.dmp.patch_addContext(p, "The quick brown fox jumps over the lazy dog.")
self.assertEquals("@@ -17,12 +17,18 @@\n fox \n-jump\n+somersault\n s ov\n", str(p))
# Same, but not enough trailing context.
p = self.dmp.patch_fromText("@@ -21,4 +21,10 @@\n-jump\n+somersault\n")[0]
self.dmp.patch_addContext(p, "The quick brown fox jumps.")
self.assertEquals("@@ -17,10 +17,16 @@\n fox \n-jump\n+somersault\n s.\n", str(p))
# Same, but not enough leading context.
p = self.dmp.patch_fromText("@@ -3 +3,2 @@\n-e\n+at\n")[0]
self.dmp.patch_addContext(p, "The quick brown fox jumps.")
self.assertEquals("@@ -1,7 +1,8 @@\n Th\n-e\n+at\n qui\n", str(p))
# Same, but with ambiguity.
p = self.dmp.patch_fromText("@@ -3 +3,2 @@\n-e\n+at\n")[0]
self.dmp.patch_addContext(p, "The quick brown fox jumps. The quick brown fox crashes.")
self.assertEquals("@@ -1,27 +1,28 @@\n Th\n-e\n+at\n quick brown fox jumps. \n", str(p))
def testPatchMake(self):
# Null case.
patches = self.dmp.patch_make("", "")
self.assertEquals("", self.dmp.patch_toText(patches))
text1 = "The quick brown fox jumps over the lazy dog."
text2 = "That quick brown fox jumped over a lazy dog."
# Text2+Text1 inputs.
expectedPatch = "@@ -1,8 +1,7 @@\n Th\n-at\n+e\n qui\n@@ -21,17 +21,18 @@\n jump\n-ed\n+s\n over \n-a\n+the\n laz\n"
# The second patch must be "-21,17 +21,18", not "-22,17 +21,18" due to rolling context.
patches = self.dmp.patch_make(text2, text1)
self.assertEquals(expectedPatch, self.dmp.patch_toText(patches))
# Text1+Text2 inputs.
expectedPatch = "@@ -1,11 +1,12 @@\n Th\n-e\n+at\n quick b\n@@ -22,18 +22,17 @@\n jump\n-s\n+ed\n over \n-the\n+a\n laz\n"
patches = self.dmp.patch_make(text1, text2)
self.assertEquals(expectedPatch, self.dmp.patch_toText(patches))
# Diff input.
diffs = self.dmp.diff_main(text1, text2, False)
patches = self.dmp.patch_make(diffs)
self.assertEquals(expectedPatch, self.dmp.patch_toText(patches))
# Text1+Diff inputs.
patches = self.dmp.patch_make(text1, diffs)
self.assertEquals(expectedPatch, self.dmp.patch_toText(patches))
# Text1+Text2+Diff inputs (deprecated).
patches = self.dmp.patch_make(text1, text2, diffs)
self.assertEquals(expectedPatch, self.dmp.patch_toText(patches))
# Character encoding.
patches = self.dmp.patch_make("`1234567890-=[]\\;',./", "~!@#$%^&*()_+{}|:\"<>?")
self.assertEquals("@@ -1,21 +1,21 @@\n-%601234567890-=%5B%5D%5C;',./\n+~!@#$%25%5E&*()_+%7B%7D%7C:%22%3C%3E?\n", self.dmp.patch_toText(patches))
# Character decoding.
diffs = [(self.dmp.DIFF_DELETE, "`1234567890-=[]\\;',./"), (self.dmp.DIFF_INSERT, "~!@#$%^&*()_+{}|:\"<>?")]
self.assertEquals(diffs, self.dmp.patch_fromText("@@ -1,21 +1,21 @@\n-%601234567890-=%5B%5D%5C;',./\n+~!@#$%25%5E&*()_+%7B%7D%7C:%22%3C%3E?\n")[0].diffs)
# Long string with repeats.
text1 = ""
for x in range(100):
text1 += "abcdef"
text2 = text1 + "123"
expectedPatch = "@@ -573,28 +573,31 @@\n cdefabcdefabcdefabcdefabcdef\n+123\n"
patches = self.dmp.patch_make(text1, text2)
self.assertEquals(expectedPatch, self.dmp.patch_toText(patches))
# Test null inputs.
try:
self.dmp.patch_make(None, None)
self.assertFalse(True)
except ValueError:
# Exception expected.
pass
def testPatchSplitMax(self):
# Assumes that Match_MaxBits is 32.
patches = self.dmp.patch_make("abcdefghijklmnopqrstuvwxyz01234567890", "XabXcdXefXghXijXklXmnXopXqrXstXuvXwxXyzX01X23X45X67X89X0")
self.dmp.patch_splitMax(patches)
self.assertEquals("@@ -1,32 +1,46 @@\n+X\n ab\n+X\n cd\n+X\n ef\n+X\n gh\n+X\n ij\n+X\n kl\n+X\n mn\n+X\n op\n+X\n qr\n+X\n st\n+X\n uv\n+X\n wx\n+X\n yz\n+X\n 012345\n@@ -25,13 +39,18 @@\n zX01\n+X\n 23\n+X\n 45\n+X\n 67\n+X\n 89\n+X\n 0\n", self.dmp.patch_toText(patches))
patches = self.dmp.patch_make("abcdef1234567890123456789012345678901234567890123456789012345678901234567890uvwxyz", "abcdefuvwxyz")
oldToText = self.dmp.patch_toText(patches)
self.dmp.patch_splitMax(patches)
self.assertEquals(oldToText, self.dmp.patch_toText(patches))
patches = self.dmp.patch_make("1234567890123456789012345678901234567890123456789012345678901234567890", "abc")
self.dmp.patch_splitMax(patches)
self.assertEquals("@@ -1,32 +1,4 @@\n-1234567890123456789012345678\n 9012\n@@ -29,32 +1,4 @@\n-9012345678901234567890123456\n 7890\n@@ -57,14 +1,3 @@\n-78901234567890\n+abc\n", self.dmp.patch_toText(patches))
patches = self.dmp.patch_make("abcdefghij , h : 0 , t : 1 abcdefghij , h : 0 , t : 1 abcdefghij , h : 0 , t : 1", "abcdefghij , h : 1 , t : 1 abcdefghij , h : 1 , t : 1 abcdefghij , h : 0 , t : 1")
self.dmp.patch_splitMax(patches)
self.assertEquals("@@ -2,32 +2,32 @@\n bcdefghij , h : \n-0\n+1\n , t : 1 abcdef\n@@ -29,32 +29,32 @@\n bcdefghij , h : \n-0\n+1\n , t : 1 abcdef\n", self.dmp.patch_toText(patches))
def testPatchAddPadding(self):
# Both edges full.
patches = self.dmp.patch_make("", "test")
self.assertEquals("@@ -0,0 +1,4 @@\n+test\n", self.dmp.patch_toText(patches))
self.dmp.patch_addPadding(patches)
self.assertEquals("@@ -1,8 +1,12 @@\n %01%02%03%04\n+test\n %01%02%03%04\n", self.dmp.patch_toText(patches))
# Both edges partial.
patches = self.dmp.patch_make("XY", "XtestY")
self.assertEquals("@@ -1,2 +1,6 @@\n X\n+test\n Y\n", self.dmp.patch_toText(patches))
self.dmp.patch_addPadding(patches)
self.assertEquals("@@ -2,8 +2,12 @@\n %02%03%04X\n+test\n Y%01%02%03\n", self.dmp.patch_toText(patches))
# Both edges none.
patches = self.dmp.patch_make("XXXXYYYY", "XXXXtestYYYY")
self.assertEquals("@@ -1,8 +1,12 @@\n XXXX\n+test\n YYYY\n", self.dmp.patch_toText(patches))
self.dmp.patch_addPadding(patches)
self.assertEquals("@@ -5,8 +5,12 @@\n XXXX\n+test\n YYYY\n", self.dmp.patch_toText(patches))
def testPatchApply(self):
self.dmp.Match_Distance = 1000
self.dmp.Match_Threshold = 0.5
self.dmp.Patch_DeleteThreshold = 0.5
# Null case.
patches = self.dmp.patch_make("", "")
results = self.dmp.patch_apply(patches, "Hello world.")
self.assertEquals(("Hello world.", []), results)
# Exact match.
patches = self.dmp.patch_make("The quick brown fox jumps over the lazy dog.", "That quick brown fox jumped over a lazy dog.")
results = self.dmp.patch_apply(patches, "The quick brown fox jumps over the lazy dog.")
self.assertEquals(("That quick brown fox jumped over a lazy dog.", [True, True]), results)
# Partial match.
results = self.dmp.patch_apply(patches, "The quick red rabbit jumps over the tired tiger.")
self.assertEquals(("That quick red rabbit jumped over a tired tiger.", [True, True]), results)
# Failed match.
results = self.dmp.patch_apply(patches, "I am the very model of a modern major general.")
self.assertEquals(("I am the very model of a modern major general.", [False, False]), results)
# Big delete, small change.
patches = self.dmp.patch_make("x1234567890123456789012345678901234567890123456789012345678901234567890y", "xabcy")
results = self.dmp.patch_apply(patches, "x123456789012345678901234567890-----++++++++++-----123456789012345678901234567890y")
self.assertEquals(("xabcy", [True, True]), results)
# Big delete, big change 1.
patches = self.dmp.patch_make("x1234567890123456789012345678901234567890123456789012345678901234567890y", "xabcy")
results = self.dmp.patch_apply(patches, "x12345678901234567890---------------++++++++++---------------12345678901234567890y")
self.assertEquals(("xabc12345678901234567890---------------++++++++++---------------12345678901234567890y", [False, True]), results)
# Big delete, big change 2.
self.dmp.Patch_DeleteThreshold = 0.6
patches = self.dmp.patch_make("x1234567890123456789012345678901234567890123456789012345678901234567890y", "xabcy")
results = self.dmp.patch_apply(patches, "x12345678901234567890---------------++++++++++---------------12345678901234567890y")
self.assertEquals(("xabcy", [True, True]), results)
self.dmp.Patch_DeleteThreshold = 0.5
# Compensate for failed patch.
self.dmp.Match_Threshold = 0.0
self.dmp.Match_Distance = 0
patches = self.dmp.patch_make("abcdefghijklmnopqrstuvwxyz--------------------1234567890", "abcXXXXXXXXXXdefghijklmnopqrstuvwxyz--------------------1234567YYYYYYYYYY890")
results = self.dmp.patch_apply(patches, "ABCDEFGHIJKLMNOPQRSTUVWXYZ--------------------1234567890")
self.assertEquals(("ABCDEFGHIJKLMNOPQRSTUVWXYZ--------------------1234567YYYYYYYYYY890", [False, True]), results)
self.dmp.Match_Threshold = 0.5
self.dmp.Match_Distance = 1000
# No side effects.
patches = self.dmp.patch_make("", "test")
patchstr = self.dmp.patch_toText(patches)
results = self.dmp.patch_apply(patches, "")
self.assertEquals(patchstr, self.dmp.patch_toText(patches))
# No side effects with major delete.
patches = self.dmp.patch_make("The quick brown fox jumps over the lazy dog.", "Woof")
patchstr = self.dmp.patch_toText(patches)
self.dmp.patch_apply(patches, "The quick brown fox jumps over the lazy dog.")
self.assertEquals(patchstr, self.dmp.patch_toText(patches))
# Edge exact match.
patches = self.dmp.patch_make("", "test")
self.dmp.patch_apply(patches, "")
self.assertEquals(("test", [True]), results)
# Near edge exact match.
patches = self.dmp.patch_make("XY", "XtestY")
results = self.dmp.patch_apply(patches, "XY")
self.assertEquals(("XtestY", [True]), results)
# Edge partial match.
patches = self.dmp.patch_make("y", "y123")
results = self.dmp.patch_apply(patches, "x")
self.assertEquals(("x123", [True]), results)
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
iamang/namebench | nb_third_party/dns/message.py | 215 | 40962 | # Copyright (C) 2001-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""DNS Messages"""
import cStringIO
import random
import struct
import sys
import time
import dns.exception
import dns.flags
import dns.name
import dns.opcode
import dns.entropy
import dns.rcode
import dns.rdata
import dns.rdataclass
import dns.rdatatype
import dns.rrset
import dns.renderer
import dns.tsig
class ShortHeader(dns.exception.FormError):
"""Raised if the DNS packet passed to from_wire() is too short."""
pass
class TrailingJunk(dns.exception.FormError):
"""Raised if the DNS packet passed to from_wire() has extra junk
at the end of it."""
pass
class UnknownHeaderField(dns.exception.DNSException):
"""Raised if a header field name is not recognized when converting from
text into a message."""
pass
class BadEDNS(dns.exception.FormError):
"""Raised if an OPT record occurs somewhere other than the start of
the additional data section."""
pass
class BadTSIG(dns.exception.FormError):
"""Raised if a TSIG record occurs somewhere other than the end of
the additional data section."""
pass
class UnknownTSIGKey(dns.exception.DNSException):
"""Raised if we got a TSIG but don't know the key."""
pass
class Message(object):
"""A DNS message.
@ivar id: The query id; the default is a randomly chosen id.
@type id: int
@ivar flags: The DNS flags of the message. @see: RFC 1035 for an
explanation of these flags.
@type flags: int
@ivar question: The question section.
@type question: list of dns.rrset.RRset objects
@ivar answer: The answer section.
@type answer: list of dns.rrset.RRset objects
@ivar authority: The authority section.
@type authority: list of dns.rrset.RRset objects
@ivar additional: The additional data section.
@type additional: list of dns.rrset.RRset objects
@ivar edns: The EDNS level to use. The default is -1, no Edns.
@type edns: int
@ivar ednsflags: The EDNS flags
@type ednsflags: long
@ivar payload: The EDNS payload size. The default is 0.
@type payload: int
@ivar options: The EDNS options
@type options: list of dns.edns.Option objects
@ivar request_payload: The associated request's EDNS payload size.
@type request_payload: int
@ivar keyring: The TSIG keyring to use. The default is None.
@type keyring: dict
@ivar keyname: The TSIG keyname to use. The default is None.
@type keyname: dns.name.Name object
@ivar keyalgorithm: The TSIG key algorithm to use. The default is
dns.tsig.default_algorithm.
@type keyalgorithm: string
@ivar request_mac: The TSIG MAC of the request message associated with
this message; used when validating TSIG signatures. @see: RFC 2845 for
more information on TSIG fields.
@type request_mac: string
@ivar fudge: TSIG time fudge; default is 300 seconds.
@type fudge: int
@ivar original_id: TSIG original id; defaults to the message's id
@type original_id: int
@ivar tsig_error: TSIG error code; default is 0.
@type tsig_error: int
@ivar other_data: TSIG other data.
@type other_data: string
@ivar mac: The TSIG MAC for this message.
@type mac: string
@ivar xfr: Is the message being used to contain the results of a DNS
zone transfer? The default is False.
@type xfr: bool
@ivar origin: The origin of the zone in messages which are used for
zone transfers or for DNS dynamic updates. The default is None.
@type origin: dns.name.Name object
@ivar tsig_ctx: The TSIG signature context associated with this
message. The default is None.
@type tsig_ctx: hmac.HMAC object
@ivar had_tsig: Did the message decoded from wire format have a TSIG
signature?
@type had_tsig: bool
@ivar multi: Is this message part of a multi-message sequence? The
default is false. This variable is used when validating TSIG signatures
on messages which are part of a zone transfer.
@type multi: bool
@ivar first: Is this message standalone, or the first of a multi
message sequence? This variable is used when validating TSIG signatures
on messages which are part of a zone transfer.
@type first: bool
@ivar index: An index of rrsets in the message. The index key is
(section, name, rdclass, rdtype, covers, deleting). Indexing can be
disabled by setting the index to None.
@type index: dict
"""
def __init__(self, id=None):
if id is None:
self.id = dns.entropy.random_16()
else:
self.id = id
self.flags = 0
self.question = []
self.answer = []
self.authority = []
self.additional = []
self.edns = -1
self.ednsflags = 0
self.payload = 0
self.options = []
self.request_payload = 0
self.keyring = None
self.keyname = None
self.keyalgorithm = dns.tsig.default_algorithm
self.request_mac = ''
self.other_data = ''
self.tsig_error = 0
self.fudge = 300
self.original_id = self.id
self.mac = ''
self.xfr = False
self.origin = None
self.tsig_ctx = None
self.had_tsig = False
self.multi = False
self.first = True
self.index = {}
def __repr__(self):
return '<DNS message, ID ' + `self.id` + '>'
def __str__(self):
return self.to_text()
def to_text(self, origin=None, relativize=True, **kw):
"""Convert the message to text.
The I{origin}, I{relativize}, and any other keyword
arguments are passed to the rrset to_wire() method.
@rtype: string
"""
s = cStringIO.StringIO()
print >> s, 'id %d' % self.id
print >> s, 'opcode %s' % \
dns.opcode.to_text(dns.opcode.from_flags(self.flags))
rc = dns.rcode.from_flags(self.flags, self.ednsflags)
print >> s, 'rcode %s' % dns.rcode.to_text(rc)
print >> s, 'flags %s' % dns.flags.to_text(self.flags)
if self.edns >= 0:
print >> s, 'edns %s' % self.edns
if self.ednsflags != 0:
print >> s, 'eflags %s' % \
dns.flags.edns_to_text(self.ednsflags)
print >> s, 'payload', self.payload
is_update = dns.opcode.is_update(self.flags)
if is_update:
print >> s, ';ZONE'
else:
print >> s, ';QUESTION'
for rrset in self.question:
print >> s, rrset.to_text(origin, relativize, **kw)
if is_update:
print >> s, ';PREREQ'
else:
print >> s, ';ANSWER'
for rrset in self.answer:
print >> s, rrset.to_text(origin, relativize, **kw)
if is_update:
print >> s, ';UPDATE'
else:
print >> s, ';AUTHORITY'
for rrset in self.authority:
print >> s, rrset.to_text(origin, relativize, **kw)
print >> s, ';ADDITIONAL'
for rrset in self.additional:
print >> s, rrset.to_text(origin, relativize, **kw)
#
# We strip off the final \n so the caller can print the result without
# doing weird things to get around eccentricities in Python print
# formatting
#
return s.getvalue()[:-1]
def __eq__(self, other):
"""Two messages are equal if they have the same content in the
header, question, answer, and authority sections.
@rtype: bool"""
if not isinstance(other, Message):
return False
if self.id != other.id:
return False
if self.flags != other.flags:
return False
for n in self.question:
if n not in other.question:
return False
for n in other.question:
if n not in self.question:
return False
for n in self.answer:
if n not in other.answer:
return False
for n in other.answer:
if n not in self.answer:
return False
for n in self.authority:
if n not in other.authority:
return False
for n in other.authority:
if n not in self.authority:
return False
return True
def __ne__(self, other):
"""Are two messages not equal?
@rtype: bool"""
return not self.__eq__(other)
def is_response(self, other):
"""Is other a response to self?
@rtype: bool"""
if other.flags & dns.flags.QR == 0 or \
self.id != other.id or \
dns.opcode.from_flags(self.flags) != \
dns.opcode.from_flags(other.flags):
return False
if dns.rcode.from_flags(other.flags, other.ednsflags) != \
dns.rcode.NOERROR:
return True
if dns.opcode.is_update(self.flags):
return True
for n in self.question:
if n not in other.question:
return False
for n in other.question:
if n not in self.question:
return False
return True
def section_number(self, section):
if section is self.question:
return 0
elif section is self.answer:
return 1
elif section is self.authority:
return 2
elif section is self.additional:
return 3
else:
raise ValueError('unknown section')
def find_rrset(self, section, name, rdclass, rdtype,
covers=dns.rdatatype.NONE, deleting=None, create=False,
force_unique=False):
"""Find the RRset with the given attributes in the specified section.
@param section: the section of the message to look in, e.g.
self.answer.
@type section: list of dns.rrset.RRset objects
@param name: the name of the RRset
@type name: dns.name.Name object
@param rdclass: the class of the RRset
@type rdclass: int
@param rdtype: the type of the RRset
@type rdtype: int
@param covers: the covers value of the RRset
@type covers: int
@param deleting: the deleting value of the RRset
@type deleting: int
@param create: If True, create the RRset if it is not found.
The created RRset is appended to I{section}.
@type create: bool
@param force_unique: If True and create is also True, create a
new RRset regardless of whether a matching RRset exists already.
@type force_unique: bool
@raises KeyError: the RRset was not found and create was False
@rtype: dns.rrset.RRset object"""
key = (self.section_number(section),
name, rdclass, rdtype, covers, deleting)
if not force_unique:
if not self.index is None:
rrset = self.index.get(key)
if not rrset is None:
return rrset
else:
for rrset in section:
if rrset.match(name, rdclass, rdtype, covers, deleting):
return rrset
if not create:
raise KeyError
rrset = dns.rrset.RRset(name, rdclass, rdtype, covers, deleting)
section.append(rrset)
if not self.index is None:
self.index[key] = rrset
return rrset
def get_rrset(self, section, name, rdclass, rdtype,
covers=dns.rdatatype.NONE, deleting=None, create=False,
force_unique=False):
"""Get the RRset with the given attributes in the specified section.
If the RRset is not found, None is returned.
@param section: the section of the message to look in, e.g.
self.answer.
@type section: list of dns.rrset.RRset objects
@param name: the name of the RRset
@type name: dns.name.Name object
@param rdclass: the class of the RRset
@type rdclass: int
@param rdtype: the type of the RRset
@type rdtype: int
@param covers: the covers value of the RRset
@type covers: int
@param deleting: the deleting value of the RRset
@type deleting: int
@param create: If True, create the RRset if it is not found.
The created RRset is appended to I{section}.
@type create: bool
@param force_unique: If True and create is also True, create a
new RRset regardless of whether a matching RRset exists already.
@type force_unique: bool
@rtype: dns.rrset.RRset object or None"""
try:
rrset = self.find_rrset(section, name, rdclass, rdtype, covers,
deleting, create, force_unique)
except KeyError:
rrset = None
return rrset
def to_wire(self, origin=None, max_size=0, **kw):
"""Return a string containing the message in DNS compressed wire
format.
Additional keyword arguments are passed to the rrset to_wire()
method.
@param origin: The origin to be appended to any relative names.
@type origin: dns.name.Name object
@param max_size: The maximum size of the wire format output; default
is 0, which means 'the message's request payload, if nonzero, or
65536'.
@type max_size: int
@raises dns.exception.TooBig: max_size was exceeded
@rtype: string
"""
if max_size == 0:
if self.request_payload != 0:
max_size = self.request_payload
else:
max_size = 65535
if max_size < 512:
max_size = 512
elif max_size > 65535:
max_size = 65535
r = dns.renderer.Renderer(self.id, self.flags, max_size, origin)
for rrset in self.question:
r.add_question(rrset.name, rrset.rdtype, rrset.rdclass)
for rrset in self.answer:
r.add_rrset(dns.renderer.ANSWER, rrset, **kw)
for rrset in self.authority:
r.add_rrset(dns.renderer.AUTHORITY, rrset, **kw)
if self.edns >= 0:
r.add_edns(self.edns, self.ednsflags, self.payload, self.options)
for rrset in self.additional:
r.add_rrset(dns.renderer.ADDITIONAL, rrset, **kw)
r.write_header()
if not self.keyname is None:
r.add_tsig(self.keyname, self.keyring[self.keyname],
self.fudge, self.original_id, self.tsig_error,
self.other_data, self.request_mac,
self.keyalgorithm)
self.mac = r.mac
return r.get_wire()
def use_tsig(self, keyring, keyname=None, fudge=300,
original_id=None, tsig_error=0, other_data='',
algorithm=dns.tsig.default_algorithm):
"""When sending, a TSIG signature using the specified keyring
and keyname should be added.
@param keyring: The TSIG keyring to use; defaults to None.
@type keyring: dict
@param keyname: The name of the TSIG key to use; defaults to None.
The key must be defined in the keyring. If a keyring is specified
but a keyname is not, then the key used will be the first key in the
keyring. Note that the order of keys in a dictionary is not defined,
so applications should supply a keyname when a keyring is used, unless
they know the keyring contains only one key.
@type keyname: dns.name.Name or string
@param fudge: TSIG time fudge; default is 300 seconds.
@type fudge: int
@param original_id: TSIG original id; defaults to the message's id
@type original_id: int
@param tsig_error: TSIG error code; default is 0.
@type tsig_error: int
@param other_data: TSIG other data.
@type other_data: string
@param algorithm: The TSIG algorithm to use; defaults to
dns.tsig.default_algorithm
"""
self.keyring = keyring
if keyname is None:
self.keyname = self.keyring.keys()[0]
else:
if isinstance(keyname, (str, unicode)):
keyname = dns.name.from_text(keyname)
self.keyname = keyname
self.keyalgorithm = algorithm
self.fudge = fudge
if original_id is None:
self.original_id = self.id
else:
self.original_id = original_id
self.tsig_error = tsig_error
self.other_data = other_data
def use_edns(self, edns=0, ednsflags=0, payload=1280, request_payload=None, options=None):
"""Configure EDNS behavior.
@param edns: The EDNS level to use. Specifying None, False, or -1
means 'do not use EDNS', and in this case the other parameters are
ignored. Specifying True is equivalent to specifying 0, i.e. 'use
EDNS0'.
@type edns: int or bool or None
@param ednsflags: EDNS flag values.
@type ednsflags: int
@param payload: The EDNS sender's payload field, which is the maximum
size of UDP datagram the sender can handle.
@type payload: int
@param request_payload: The EDNS payload size to use when sending
this message. If not specified, defaults to the value of payload.
@type request_payload: int or None
@param options: The EDNS options
@type options: None or list of dns.edns.Option objects
@see: RFC 2671
"""
if edns is None or edns is False:
edns = -1
if edns is True:
edns = 0
if request_payload is None:
request_payload = payload
if edns < 0:
ednsflags = 0
payload = 0
request_payload = 0
options = []
else:
# make sure the EDNS version in ednsflags agrees with edns
ednsflags &= 0xFF00FFFFL
ednsflags |= (edns << 16)
if options is None:
options = []
self.edns = edns
self.ednsflags = ednsflags
self.payload = payload
self.options = options
self.request_payload = request_payload
def want_dnssec(self, wanted=True):
"""Enable or disable 'DNSSEC desired' flag in requests.
@param wanted: Is DNSSEC desired? If True, EDNS is enabled if
required, and then the DO bit is set. If False, the DO bit is
cleared if EDNS is enabled.
@type wanted: bool
"""
if wanted:
if self.edns < 0:
self.use_edns()
self.ednsflags |= dns.flags.DO
elif self.edns >= 0:
self.ednsflags &= ~dns.flags.DO
def rcode(self):
"""Return the rcode.
@rtype: int
"""
return dns.rcode.from_flags(self.flags, self.ednsflags)
def set_rcode(self, rcode):
"""Set the rcode.
@param rcode: the rcode
@type rcode: int
"""
(value, evalue) = dns.rcode.to_flags(rcode)
self.flags &= 0xFFF0
self.flags |= value
self.ednsflags &= 0x00FFFFFFL
self.ednsflags |= evalue
if self.ednsflags != 0 and self.edns < 0:
self.edns = 0
def opcode(self):
"""Return the opcode.
@rtype: int
"""
return dns.opcode.from_flags(self.flags)
def set_opcode(self, opcode):
"""Set the opcode.
@param opcode: the opcode
@type opcode: int
"""
self.flags &= 0x87FF
self.flags |= dns.opcode.to_flags(opcode)
class _WireReader(object):
"""Wire format reader.
@ivar wire: the wire-format message.
@type wire: string
@ivar message: The message object being built
@type message: dns.message.Message object
@ivar current: When building a message object from wire format, this
variable contains the offset from the beginning of wire of the next octet
to be read.
@type current: int
@ivar updating: Is the message a dynamic update?
@type updating: bool
@ivar one_rr_per_rrset: Put each RR into its own RRset?
@type one_rr_per_rrset: bool
@ivar zone_rdclass: The class of the zone in messages which are
DNS dynamic updates.
@type zone_rdclass: int
"""
def __init__(self, wire, message, question_only=False,
one_rr_per_rrset=False):
self.wire = wire
self.message = message
self.current = 0
self.updating = False
self.zone_rdclass = dns.rdataclass.IN
self.question_only = question_only
self.one_rr_per_rrset = one_rr_per_rrset
def _get_question(self, qcount):
"""Read the next I{qcount} records from the wire data and add them to
the question section.
@param qcount: the number of questions in the message
@type qcount: int"""
if self.updating and qcount > 1:
raise dns.exception.FormError
for i in xrange(0, qcount):
(qname, used) = dns.name.from_wire(self.wire, self.current)
if not self.message.origin is None:
qname = qname.relativize(self.message.origin)
self.current = self.current + used
(rdtype, rdclass) = \
struct.unpack('!HH',
self.wire[self.current:self.current + 4])
self.current = self.current + 4
self.message.find_rrset(self.message.question, qname,
rdclass, rdtype, create=True,
force_unique=True)
if self.updating:
self.zone_rdclass = rdclass
def _get_section(self, section, count):
"""Read the next I{count} records from the wire data and add them to
the specified section.
@param section: the section of the message to which to add records
@type section: list of dns.rrset.RRset objects
@param count: the number of records to read
@type count: int"""
if self.updating or self.one_rr_per_rrset:
force_unique = True
else:
force_unique = False
seen_opt = False
for i in xrange(0, count):
rr_start = self.current
(name, used) = dns.name.from_wire(self.wire, self.current)
absolute_name = name
if not self.message.origin is None:
name = name.relativize(self.message.origin)
self.current = self.current + used
(rdtype, rdclass, ttl, rdlen) = \
struct.unpack('!HHIH',
self.wire[self.current:self.current + 10])
self.current = self.current + 10
if rdtype == dns.rdatatype.OPT:
if not section is self.message.additional or seen_opt:
raise BadEDNS
self.message.payload = rdclass
self.message.ednsflags = ttl
self.message.edns = (ttl & 0xff0000) >> 16
self.message.options = []
current = self.current
optslen = rdlen
while optslen > 0:
(otype, olen) = \
struct.unpack('!HH',
self.wire[current:current + 4])
current = current + 4
opt = dns.edns.option_from_wire(otype, self.wire, current, olen)
self.message.options.append(opt)
current = current + olen
optslen = optslen - 4 - olen
seen_opt = True
elif rdtype == dns.rdatatype.TSIG:
if not (section is self.message.additional and
i == (count - 1)):
raise BadTSIG
if self.message.keyring is None:
raise UnknownTSIGKey('got signed message without keyring')
secret = self.message.keyring.get(absolute_name)
if secret is None:
raise UnknownTSIGKey("key '%s' unknown" % name)
self.message.tsig_ctx = \
dns.tsig.validate(self.wire,
absolute_name,
secret,
int(time.time()),
self.message.request_mac,
rr_start,
self.current,
rdlen,
self.message.tsig_ctx,
self.message.multi,
self.message.first)
self.message.had_tsig = True
else:
if ttl < 0:
ttl = 0
if self.updating and \
(rdclass == dns.rdataclass.ANY or
rdclass == dns.rdataclass.NONE):
deleting = rdclass
rdclass = self.zone_rdclass
else:
deleting = None
if deleting == dns.rdataclass.ANY or \
(deleting == dns.rdataclass.NONE and \
section == self.message.answer):
covers = dns.rdatatype.NONE
rd = None
else:
rd = dns.rdata.from_wire(rdclass, rdtype, self.wire,
self.current, rdlen,
self.message.origin)
covers = rd.covers()
if self.message.xfr and rdtype == dns.rdatatype.SOA:
force_unique = True
rrset = self.message.find_rrset(section, name,
rdclass, rdtype, covers,
deleting, True, force_unique)
if not rd is None:
rrset.add(rd, ttl)
self.current = self.current + rdlen
def read(self):
"""Read a wire format DNS message and build a dns.message.Message
object."""
l = len(self.wire)
if l < 12:
raise ShortHeader
(self.message.id, self.message.flags, qcount, ancount,
aucount, adcount) = struct.unpack('!HHHHHH', self.wire[:12])
self.current = 12
if dns.opcode.is_update(self.message.flags):
self.updating = True
self._get_question(qcount)
if self.question_only:
return
self._get_section(self.message.answer, ancount)
self._get_section(self.message.authority, aucount)
self._get_section(self.message.additional, adcount)
if self.current != l:
raise TrailingJunk
if self.message.multi and self.message.tsig_ctx and \
not self.message.had_tsig:
self.message.tsig_ctx.update(self.wire)
def from_wire(wire, keyring=None, request_mac='', xfr=False, origin=None,
tsig_ctx = None, multi = False, first = True,
question_only = False, one_rr_per_rrset = False):
"""Convert a DNS wire format message into a message
object.
@param keyring: The keyring to use if the message is signed.
@type keyring: dict
@param request_mac: If the message is a response to a TSIG-signed request,
I{request_mac} should be set to the MAC of that request.
@type request_mac: string
@param xfr: Is this message part of a zone transfer?
@type xfr: bool
@param origin: If the message is part of a zone transfer, I{origin}
should be the origin name of the zone.
@type origin: dns.name.Name object
@param tsig_ctx: The ongoing TSIG context, used when validating zone
transfers.
@type tsig_ctx: hmac.HMAC object
@param multi: Is this message part of a multiple message sequence?
@type multi: bool
@param first: Is this message standalone, or the first of a multi
message sequence?
@type first: bool
@param question_only: Read only up to the end of the question section?
@type question_only: bool
@param one_rr_per_rrset: Put each RR into its own RRset
@type one_rr_per_rrset: bool
@raises ShortHeader: The message is less than 12 octets long.
@raises TrailingJunk: There were octets in the message past the end
of the proper DNS message.
@raises BadEDNS: An OPT record was in the wrong section, or occurred more
than once.
@raises BadTSIG: A TSIG record was not the last record of the additional
data section.
@rtype: dns.message.Message object"""
m = Message(id=0)
m.keyring = keyring
m.request_mac = request_mac
m.xfr = xfr
m.origin = origin
m.tsig_ctx = tsig_ctx
m.multi = multi
m.first = first
reader = _WireReader(wire, m, question_only, one_rr_per_rrset)
reader.read()
return m
class _TextReader(object):
"""Text format reader.
@ivar tok: the tokenizer
@type tok: dns.tokenizer.Tokenizer object
@ivar message: The message object being built
@type message: dns.message.Message object
@ivar updating: Is the message a dynamic update?
@type updating: bool
@ivar zone_rdclass: The class of the zone in messages which are
DNS dynamic updates.
@type zone_rdclass: int
@ivar last_name: The most recently read name when building a message object
from text format.
@type last_name: dns.name.Name object
"""
def __init__(self, text, message):
self.message = message
self.tok = dns.tokenizer.Tokenizer(text)
self.last_name = None
self.zone_rdclass = dns.rdataclass.IN
self.updating = False
def _header_line(self, section):
"""Process one line from the text format header section."""
token = self.tok.get()
what = token.value
if what == 'id':
self.message.id = self.tok.get_int()
elif what == 'flags':
while True:
token = self.tok.get()
if not token.is_identifier():
self.tok.unget(token)
break
self.message.flags = self.message.flags | \
dns.flags.from_text(token.value)
if dns.opcode.is_update(self.message.flags):
self.updating = True
elif what == 'edns':
self.message.edns = self.tok.get_int()
self.message.ednsflags = self.message.ednsflags | \
(self.message.edns << 16)
elif what == 'eflags':
if self.message.edns < 0:
self.message.edns = 0
while True:
token = self.tok.get()
if not token.is_identifier():
self.tok.unget(token)
break
self.message.ednsflags = self.message.ednsflags | \
dns.flags.edns_from_text(token.value)
elif what == 'payload':
self.message.payload = self.tok.get_int()
if self.message.edns < 0:
self.message.edns = 0
elif what == 'opcode':
text = self.tok.get_string()
self.message.flags = self.message.flags | \
dns.opcode.to_flags(dns.opcode.from_text(text))
elif what == 'rcode':
text = self.tok.get_string()
self.message.set_rcode(dns.rcode.from_text(text))
else:
raise UnknownHeaderField
self.tok.get_eol()
def _question_line(self, section):
"""Process one line from the text format question section."""
token = self.tok.get(want_leading = True)
if not token.is_whitespace():
self.last_name = dns.name.from_text(token.value, None)
name = self.last_name
token = self.tok.get()
if not token.is_identifier():
raise dns.exception.SyntaxError
# Class
try:
rdclass = dns.rdataclass.from_text(token.value)
token = self.tok.get()
if not token.is_identifier():
raise dns.exception.SyntaxError
except dns.exception.SyntaxError:
raise dns.exception.SyntaxError
except:
rdclass = dns.rdataclass.IN
# Type
rdtype = dns.rdatatype.from_text(token.value)
self.message.find_rrset(self.message.question, name,
rdclass, rdtype, create=True,
force_unique=True)
if self.updating:
self.zone_rdclass = rdclass
self.tok.get_eol()
def _rr_line(self, section):
"""Process one line from the text format answer, authority, or
additional data sections.
"""
deleting = None
# Name
token = self.tok.get(want_leading = True)
if not token.is_whitespace():
self.last_name = dns.name.from_text(token.value, None)
name = self.last_name
token = self.tok.get()
if not token.is_identifier():
raise dns.exception.SyntaxError
# TTL
try:
ttl = int(token.value, 0)
token = self.tok.get()
if not token.is_identifier():
raise dns.exception.SyntaxError
except dns.exception.SyntaxError:
raise dns.exception.SyntaxError
except:
ttl = 0
# Class
try:
rdclass = dns.rdataclass.from_text(token.value)
token = self.tok.get()
if not token.is_identifier():
raise dns.exception.SyntaxError
if rdclass == dns.rdataclass.ANY or rdclass == dns.rdataclass.NONE:
deleting = rdclass
rdclass = self.zone_rdclass
except dns.exception.SyntaxError:
raise dns.exception.SyntaxError
except:
rdclass = dns.rdataclass.IN
# Type
rdtype = dns.rdatatype.from_text(token.value)
token = self.tok.get()
if not token.is_eol_or_eof():
self.tok.unget(token)
rd = dns.rdata.from_text(rdclass, rdtype, self.tok, None)
covers = rd.covers()
else:
rd = None
covers = dns.rdatatype.NONE
rrset = self.message.find_rrset(section, name,
rdclass, rdtype, covers,
deleting, True, self.updating)
if not rd is None:
rrset.add(rd, ttl)
def read(self):
"""Read a text format DNS message and build a dns.message.Message
object."""
line_method = self._header_line
section = None
while 1:
token = self.tok.get(True, True)
if token.is_eol_or_eof():
break
if token.is_comment():
u = token.value.upper()
if u == 'HEADER':
line_method = self._header_line
elif u == 'QUESTION' or u == 'ZONE':
line_method = self._question_line
section = self.message.question
elif u == 'ANSWER' or u == 'PREREQ':
line_method = self._rr_line
section = self.message.answer
elif u == 'AUTHORITY' or u == 'UPDATE':
line_method = self._rr_line
section = self.message.authority
elif u == 'ADDITIONAL':
line_method = self._rr_line
section = self.message.additional
self.tok.get_eol()
continue
self.tok.unget(token)
line_method(section)
def from_text(text):
"""Convert the text format message into a message object.
@param text: The text format message.
@type text: string
@raises UnknownHeaderField:
@raises dns.exception.SyntaxError:
@rtype: dns.message.Message object"""
# 'text' can also be a file, but we don't publish that fact
# since it's an implementation detail. The official file
# interface is from_file().
m = Message()
reader = _TextReader(text, m)
reader.read()
return m
def from_file(f):
"""Read the next text format message from the specified file.
@param f: file or string. If I{f} is a string, it is treated
as the name of a file to open.
@raises UnknownHeaderField:
@raises dns.exception.SyntaxError:
@rtype: dns.message.Message object"""
if sys.hexversion >= 0x02030000:
# allow Unicode filenames; turn on universal newline support
str_type = basestring
opts = 'rU'
else:
str_type = str
opts = 'r'
if isinstance(f, str_type):
f = file(f, opts)
want_close = True
else:
want_close = False
try:
m = from_text(f)
finally:
if want_close:
f.close()
return m
def make_query(qname, rdtype, rdclass = dns.rdataclass.IN, use_edns=None,
want_dnssec=False):
"""Make a query message.
The query name, type, and class may all be specified either
as objects of the appropriate type, or as strings.
The query will have a randomly choosen query id, and its DNS flags
will be set to dns.flags.RD.
@param qname: The query name.
@type qname: dns.name.Name object or string
@param rdtype: The desired rdata type.
@type rdtype: int
@param rdclass: The desired rdata class; the default is class IN.
@type rdclass: int
@param use_edns: The EDNS level to use; the default is None (no EDNS).
See the description of dns.message.Message.use_edns() for the possible
values for use_edns and their meanings.
@type use_edns: int or bool or None
@param want_dnssec: Should the query indicate that DNSSEC is desired?
@type want_dnssec: bool
@rtype: dns.message.Message object"""
if isinstance(qname, (str, unicode)):
qname = dns.name.from_text(qname)
if isinstance(rdtype, str):
rdtype = dns.rdatatype.from_text(rdtype)
if isinstance(rdclass, str):
rdclass = dns.rdataclass.from_text(rdclass)
m = Message()
m.flags |= dns.flags.RD
m.find_rrset(m.question, qname, rdclass, rdtype, create=True,
force_unique=True)
m.use_edns(use_edns)
m.want_dnssec(want_dnssec)
return m
def make_response(query, recursion_available=False, our_payload=8192):
"""Make a message which is a response for the specified query.
The message returned is really a response skeleton; it has all
of the infrastructure required of a response, but none of the
content.
The response's question section is a shallow copy of the query's
question section, so the query's question RRsets should not be
changed.
@param query: the query to respond to
@type query: dns.message.Message object
@param recursion_available: should RA be set in the response?
@type recursion_available: bool
@param our_payload: payload size to advertise in EDNS responses; default
is 8192.
@type our_payload: int
@rtype: dns.message.Message object"""
if query.flags & dns.flags.QR:
raise dns.exception.FormError('specified query message is not a query')
response = dns.message.Message(query.id)
response.flags = dns.flags.QR | (query.flags & dns.flags.RD)
if recursion_available:
response.flags |= dns.flags.RA
response.set_opcode(query.opcode())
response.question = list(query.question)
if query.edns >= 0:
response.use_edns(0, 0, our_payload, query.payload)
if not query.keyname is None:
response.keyname = query.keyname
response.keyring = query.keyring
response.request_mac = query.mac
return response
| apache-2.0 |
PythonNut/servo | tests/wpt/web-platform-tests/tools/runner/report.py | 278 | 9660 | import argparse
import json
import sys
from cgi import escape
from collections import defaultdict
import types
def html_escape(item, escape_quote=False):
if isinstance(item, types.StringTypes):
rv = escape(item)
if escape_quote:
rv = rv.replace('"', """)
return rv
else:
return item
class Raw(object):
"""Simple wrapper around a string to stop it being escaped by html_escape"""
def __init__(self, value):
self.value = value
def __unicode__(self):
return unicode(self.value)
class Node(object):
"""Node structure used when building HTML"""
def __init__(self, name, attrs, children):
#Need list of void elements
self.name = name
self.attrs = attrs
self.children = children
def __unicode__(self):
if self.attrs:
#Need to escape
attrs_unicode = " " + " ".join("%s=\"%s\"" % (html_escape(key),
html_escape(value,
escape_quote=True))
for key, value in self.attrs.iteritems())
else:
attrs_unicode = ""
return "<%s%s>%s</%s>\n" % (self.name,
attrs_unicode,
"".join(unicode(html_escape(item))
for item in self.children),
self.name)
def __str__(self):
return unicode(self).encode("utf8")
class RootNode(object):
"""Special Node representing the document root"""
def __init__(self, *children):
self.children = ["<!DOCTYPE html>"] + list(children)
def __unicode__(self):
return "".join(unicode(item) for item in self.children)
def __str__(self):
return unicode(self).encode("utf8")
def flatten(iterable):
"""Flatten a list of lists by one level so that
[1,["abc"], "def",[2, [3]]]
becomes
[1, "abc", "def", 2, [3]]"""
rv = []
for item in iterable:
if hasattr(item, "__iter__") and not isinstance(item, types.StringTypes):
rv.extend(item)
else:
rv.append(item)
return rv
class HTML(object):
"""Simple HTML templating system. An instance of this class can create
element nodes by calling methods with the same name as the element,
passing in children as positional arguments or as a list, and attributes
as keyword arguments, with _ replacing - and trailing _ for python keywords
e.g.
h = HTML()
print h.html(
html.head(),
html.body([html.h1("Hello World!")], class_="body-class")
)
Would give
<!DOCTYPE html><html><head></head><body class="body-class"><h1>Hello World!</h1></body></html>"""
def __getattr__(self, name):
def make_html(self, *content, **attrs):
for attr_name in attrs.keys():
if "_" in attr_name:
new_name = attr_name.replace("_", "-")
if new_name.endswith("-"):
new_name = new_name[:-1]
attrs[new_name] = attrs.pop(attr_name)
return Node(name, attrs, flatten(content))
method = types.MethodType(make_html, self, HTML)
setattr(self, name, method)
return method
def __call__(self, *children):
return RootNode(*flatten(children))
h = HTML()
class TestResult(object):
"""Simple holder for the results of a single test in a single UA"""
def __init__(self, test):
self.test = test
self.results = {}
def __cmp__(self, other):
return self.test == other.test
def __hash__(self):
return hash(self.test)
def load_data(args):
"""Load data treating args as a list of UA name, filename pairs"""
pairs = []
for i in xrange(0, len(args), 2):
pairs.append(args[i:i+2])
rv = {}
for UA, filename in pairs:
with open(filename) as f:
rv[UA] = json.load(f)
return rv
def test_id(id):
"""Convert a test id in JSON into an immutable object that
can be used as a dictionary key"""
if isinstance(id, list):
return tuple(id)
else:
return id
def all_tests(data):
tests = defaultdict(set)
for UA, results in data.iteritems():
for result in results["results"]:
id = test_id(result["test"])
tests[id] |= set(subtest["name"] for subtest in result["subtests"])
return tests
def group_results(data):
"""Produce a list of UAs and a dictionary mapping specific tests to their
status in all UAs e.g.
["UA1", "UA2"], {"test_id":{"harness":{"UA1": (status1, message1),
"UA2": (status2, message2)},
"subtests":{"subtest1": "UA1": (status1-1, message1-1),
"UA2": (status2-1, message2-1)}}}
Status and message are None if the test didn't run in a particular UA.
Message is None if the test didn't produce a message"""
tests = all_tests(data)
UAs = data.keys()
def result():
return {
"harness": dict((UA, (None, None)) for UA in UAs),
"subtests": None # init this later
}
results_by_test = defaultdict(result)
for UA, results in data.iteritems():
for test_data in results["results"]:
id = test_id(test_data["test"])
result = results_by_test[id]
if result["subtests"] is None:
result["subtests"] = dict(
(name, dict((UA, (None, None)) for UA in UAs)) for name in tests[id]
)
result["harness"][UA] = (test_data["status"], test_data["message"])
for subtest in test_data["subtests"]:
result["subtests"][subtest["name"]][UA] = (subtest["status"],
subtest["message"])
return UAs, results_by_test
def status_cell(status, message=None):
"""Produce a table cell showing the status of a test"""
status = status if status is not None else "NONE"
kwargs = {}
if message:
kwargs["title"] = message
status_text = status.title()
return h.td(status_text, class_="status " + status,
**kwargs)
def test_link(test_id, subtest=None):
"""Produce an <a> element linking to a test"""
if isinstance(test_id, types.StringTypes):
rv = [h.a(test_id, href=test_id)]
else:
rv = [h.a(test_id[0], href=test_id[0]),
" %s " % test_id[1],
h.a(test_id[2], href=test_id[2])]
if subtest is not None:
rv.append(" [%s]" % subtest)
return rv
def summary(UAs, results_by_test):
"""Render the implementation report summary"""
not_passing = []
for test, results in results_by_test.iteritems():
if not any(item[0] in ("PASS", "OK") for item in results["harness"].values()):
not_passing.append((test, None))
for subtest_name, subtest_results in results["subtests"].iteritems():
if not any(item[0] == "PASS" for item in subtest_results.values()):
not_passing.append((test, subtest_name))
if not_passing:
rv = [
h.p("The following tests failed to pass in all UAs:"),
h.ul([h.li(test_link(test, subtest))
for test, subtest in not_passing])
]
else:
rv = "All tests passed in at least one UA"
return rv
def result_rows(UAs, test, result):
"""Render the results for each test run"""
yield h.tr(
h.td(
test_link(test),
rowspan=(1 + len(result["subtests"]))
),
h.td(),
[status_cell(status, message)
for UA, (status, message) in sorted(result["harness"].items())],
class_="test"
)
for name, subtest_result in sorted(result["subtests"].iteritems()):
yield h.tr(
h.td(name),
[status_cell(status, message)
for UA, (status, message) in sorted(subtest_result.items())],
class_="subtest"
)
def result_bodies(UAs, results_by_test):
return [h.tbody(result_rows(UAs, test, result))
for test, result in sorted(results_by_test.iteritems())]
def generate_html(UAs, results_by_test):
"""Generate all the HTML output"""
doc = h(h.html([
h.head(h.meta(charset="utf8"),
h.title("Implementation Report"),
h.link(href="report.css", rel="stylesheet")),
h.body(h.h1("Implementation Report"),
h.h2("Summary"),
summary(UAs, results_by_test),
h.h2("Full Results"),
h.table(
h.thead(
h.tr(
h.th("Test"),
h.th("Subtest"),
[h.th(UA) for UA in sorted(UAs)]
)
),
result_bodies(UAs, results_by_test)
)
)
]))
return doc
def main(filenames):
data = load_data(filenames)
UAs, results_by_test = group_results(data)
return generate_html(UAs, results_by_test)
if __name__ == "__main__":
if not sys.argv[1:]:
print """Please supply a list of UA name, filename pairs e.g.
python report.py Firefox firefox.json Chrome chrome.json IE internet_explorer.json"""
print main(sys.argv[1:])
| mpl-2.0 |
wunderlins/learning | python/zodb/lib/linux64/ZEO/tests/TestThread.py | 2 | 2249 | ##############################################################################
#
# Copyright (c) 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
"""A Thread base class for use with unittest."""
import threading
import sys
import six
class TestThread(threading.Thread):
"""Base class for defining threads that run from unittest.
The subclass should define a testrun() method instead of a run()
method.
Call cleanup() when the test is done with the thread, instead of join().
If the thread exits with an uncaught exception, it's captured and
re-raised when cleanup() is called. cleanup() should be called by
the main thread! Trying to tell unittest that a test failed from
another thread creates a nightmare of timing-depending cascading
failures and missed errors (tracebacks that show up on the screen,
but don't cause unittest to believe the test failed).
cleanup() also joins the thread. If the thread ended without raising
an uncaught exception, and the join doesn't succeed in the timeout
period, then the test is made to fail with a "Thread still alive"
message.
"""
def __init__(self, testcase):
threading.Thread.__init__(self)
# In case this thread hangs, don't stop Python from exiting.
self.setDaemon(1)
self._exc_info = None
self._testcase = testcase
def run(self):
try:
self.testrun()
except:
self._exc_info = sys.exc_info()
def cleanup(self, timeout=15):
self.join(timeout)
if self._exc_info:
six.reraise(self._exc_info[0], self._exc_info[1], self._exc_info[2])
if self.isAlive():
self._testcase.fail("Thread did not finish: %s" % self)
| gpl-2.0 |
Tim---/osmo-tetra | src/demod/python/osmosdr-tetra_demod_fft.py | 1 | 8260 | #!/usr/bin/env python
# Copyright 2012 Dimitri Stolnikov <horiz0n@gmx.net>
# Usage:
# src$ ./demod/python/osmosdr-tetra_demod_fft.py -o /dev/stdout | ./float_to_bits /dev/stdin /dev/stdout | ./tetra-rx /dev/stdin
#
# Adjust the center frequency (-f) and gain (-g) according to your needs.
# Use left click in Wideband Spectrum window to roughly select a TETRA carrier.
# In Wideband Spectrum you can also tune by 1/4 of the bandwidth by clicking on the rightmost/leftmost spectrum side.
# Use left click in Channel Spectrum windows to fine tune the carrier by clicking on the left or right side of the spectrum.
import sys
import math
from gnuradio import gr, gru, eng_notation, blocks, filter, wxgui
from gnuradio.filter import firdes
from gnuradio.eng_option import eng_option
from gnuradio.wxgui import fftsink2
from gnuradio.wxgui import scopesink2
from gnuradio.wxgui import forms
from grc_gnuradio import wxgui as grc_wxgui
from optparse import OptionParser
import osmosdr
import wx
try:
import cqpsk
except:
from tetra_demod import cqpsk
# applies frequency translation, resampling and demodulation
class top_block(grc_wxgui.top_block_gui):
def __init__(self):
grc_wxgui.top_block_gui.__init__(self, title="Top Block")
options = get_options()
self.ifreq = options.frequency
self.rfgain = options.gain
self.src = osmosdr.source(options.args)
self.src.set_center_freq(self.ifreq)
self.src.set_sample_rate(int(options.sample_rate))
if self.rfgain is None:
self.src.set_gain_mode(1)
self.iagc = 1
self.rfgain = 0
else:
self.iagc = 0
self.src.set_gain_mode(0)
self.src.set_gain(self.rfgain)
# may differ from the requested rate
sample_rate = self.src.get_sample_rate()
sys.stderr.write("sample rate: %d\n" % (sample_rate))
symbol_rate = 18000
sps = 2 # output rate will be 36,000
out_sample_rate = symbol_rate * sps
options.low_pass = options.low_pass / 2.0
if sample_rate == 96000: # FunCube Dongle
first_decim = 2
else:
first_decim = 10
self.offset = 0
taps = firdes.low_pass(1.0, sample_rate, options.low_pass, options.low_pass * 0.2, firdes.WIN_HANN)
self.tuner = filter.freq_xlating_fir_filter_ccf(first_decim, taps, self.offset, sample_rate)
self.demod = cqpsk.cqpsk_demod(
samples_per_symbol = sps,
excess_bw=0.35,
costas_alpha=0.03,
gain_mu=0.05,
mu=0.05,
omega_relative_limit=0.05,
log=options.log,
verbose=options.verbose)
self.output = blocks.file_sink(gr.sizeof_float, options.output_file)
rerate = float(sample_rate / float(first_decim)) / float(out_sample_rate)
sys.stderr.write("resampling factor: %f\n" % rerate)
if rerate.is_integer():
sys.stderr.write("using pfb decimator\n")
self.resamp = filter.pfb.decimator_ccf(int(rerate), None, 0, 110)
#self.resamp = filter.pfb_decimator_ccf(int(rerate), taps, 0, 110)
else:
sys.stderr.write("using pfb resampler\n")
self.resamp = filter.pfb_arb_resampler_ccf(1 / rerate)
self.connect(self.src, self.tuner, self.resamp, self.demod, self.output)
self.Main = wx.Notebook(self.GetWin(), style=wx.NB_TOP)
self.Main.AddPage(grc_wxgui.Panel(self.Main), "Wideband Spectrum")
self.Main.AddPage(grc_wxgui.Panel(self.Main), "Channel Spectrum")
self.Main.AddPage(grc_wxgui.Panel(self.Main), "Soft Bits")
def set_ifreq(ifreq):
self.ifreq = ifreq
self._ifreq_text_box.set_value(self.ifreq)
self.src.set_center_freq(self.ifreq)
self._ifreq_text_box = forms.text_box(
parent=self.GetWin(),
value=self.ifreq,
callback=set_ifreq,
label="Center Frequency",
converter=forms.float_converter(),
)
self.Add(self._ifreq_text_box)
def set_iagc(iagc):
self.iagc = iagc
self._agc_check_box.set_value(self.iagc)
self.src.set_gain_mode(self.iagc, 0)
self.src.set_gain(0 if self.iagc == 1 else self.rfgain, 0)
self._agc_check_box = forms.check_box(
parent=self.GetWin(),
value=self.iagc,
callback=set_iagc,
label="Automatic Gain",
true=1,
false=0,
)
self.Add(self._agc_check_box)
def set_rfgain(rfgain):
self.rfgain = rfgain
self._rfgain_slider.set_value(self.rfgain)
self._rfgain_text_box.set_value(self.rfgain)
self.src.set_gain(0 if self.iagc == 1 else self.rfgain, 0)
_rfgain_sizer = wx.BoxSizer(wx.VERTICAL)
self._rfgain_text_box = forms.text_box(
parent=self.GetWin(),
sizer=_rfgain_sizer,
value=self.rfgain,
callback=set_rfgain,
label="RF Gain",
converter=forms.float_converter(),
proportion=0,
)
self._rfgain_slider = forms.slider(
parent=self.GetWin(),
sizer=_rfgain_sizer,
value=self.rfgain,
callback=set_rfgain,
minimum=0,
maximum=50,
num_steps=200,
style=wx.SL_HORIZONTAL,
cast=float,
proportion=1,
)
self.Add(_rfgain_sizer)
self.Add(self.Main)
def fftsink2_callback(x, y):
if abs(x / (sample_rate / 2)) > 0.9:
set_ifreq(self.ifreq + x / 2)
else:
sys.stderr.write("coarse tuned to: %d Hz\n" % x)
self.offset = -x
self.tuner.set_center_freq(self.offset)
self.scope = fftsink2.fft_sink_c(self.Main.GetPage(0).GetWin(),
title="Wideband Spectrum (click to coarse tune)",
fft_size=1024,
sample_rate=sample_rate,
ref_scale=2.0,
ref_level=0,
y_divs=10,
fft_rate=10,
average=False,
avg_alpha=0.6)
self.Main.GetPage(0).Add(self.scope.win)
self.scope.set_callback(fftsink2_callback)
self.connect(self.src, self.scope)
def fftsink2_callback2(x, y):
self.offset = self.offset - (x / 10)
sys.stderr.write("fine tuned to: %d Hz\n" % self.offset)
self.tuner.set_center_freq(self.offset)
self.scope2 = fftsink2.fft_sink_c(self.Main.GetPage(1).GetWin(),
title="Channel Spectrum (click to fine tune)",
fft_size=1024,
sample_rate=out_sample_rate,
ref_scale=2.0,
ref_level=-20,
y_divs=10,
fft_rate=10,
average=False,
avg_alpha=0.6)
self.Main.GetPage(1).Add(self.scope2.win)
self.scope2.set_callback(fftsink2_callback2)
self.connect(self.resamp, self.scope2)
self.scope3 = scopesink2.scope_sink_f(
self.Main.GetPage(2).GetWin(),
title="Soft Bits",
sample_rate=out_sample_rate,
v_scale=0,
v_offset=0,
t_scale=0.001,
ac_couple=False,
xy_mode=False,
num_inputs=1,
trig_mode=wxgui.TRIG_MODE_AUTO,
y_axis_label="Counts",
)
self.Main.GetPage(2).Add(self.scope3.win)
self.connect(self.demod, self.scope3)
def get_options():
parser = OptionParser(option_class=eng_option)
parser.add_option("-a", "--args", type="string", default="",
help="gr-osmosdr device arguments")
parser.add_option("-s", "--sample-rate", type="eng_float", default=1800000,
help="set receiver sample rate (default 1800000)")
parser.add_option("-f", "--frequency", type="eng_float", default=394.4e6,
help="set receiver center frequency")
parser.add_option("-g", "--gain", type="eng_float", default=None,
help="set receiver gain")
# demodulator related settings
parser.add_option("-l", "--log", action="store_true", default=False, help="dump debug .dat files")
parser.add_option("-L", "--low-pass", type="eng_float", default=25e3, help="low pass cut-off", metavar="Hz")
parser.add_option("-o", "--output-file", type="string", default="out.float", help="specify the bit output file")
parser.add_option("-v", "--verbose", action="store_true", default=False, help="dump demodulation data")
(options, args) = parser.parse_args()
if len(args) != 0:
parser.print_help()
raise SystemExit, 1
return (options)
if __name__ == '__main__':
tb = top_block()
tb.Run(True)
| agpl-3.0 |
meng-sun/hil | haas/dev_support.py | 4 | 1979 | # Copyright 2013-2014 Massachusetts Open Cloud Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS
# IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language
# governing permissions and limitations under the License.
import logging
from haas import config
from functools import wraps
def have_dry_run():
"""Detect True if we're executing in dry_run mode, False otherwise."""
return config.cfg.has_option('devel', 'dry_run')
def no_dry_run(f):
"""A decorator which "disables" a function during a dry run.
A can specify a `dry_run` option in the `devel` section of `haas.cfg`.
If the option is present (regardless of its value), any function or
method decorated with `no_dry_run` will be "disabled." The call will
be logged (with level `logging.DEBUG`), but will not actually execute.
The function will instead return 'None'. Callers of decorated functions
must accept a None value gracefully.
The intended use case of `no_dry_run` is to disable functions which
cannot be run because, for example, the HaaS is executing on a
developer's workstation, which has no configured switch, libvirt, etc.
If the `dry_run` option is not specified, this decorator has no effect.
"""
@wraps(f)
def wrapper(*args, **kwargs):
if have_dry_run():
logger = logging.getLogger(__name__)
logger.info('dry run, not executing: %s.%s(*%r,**%r)' %
(f.__module__, f.__name__, args, kwargs))
return None
else:
return f(*args, **kwargs)
return wrapper
| apache-2.0 |
liamgh/liamgreenhughes-sl4a-tf101 | python/src/Lib/getpass.py | 59 | 5272 | """Utilities to get a password and/or the current user name.
getpass(prompt[, stream]) - Prompt for a password, with echo turned off.
getuser() - Get the user name from the environment or password database.
GetPassWarning - This UserWarning is issued when getpass() cannot prevent
echoing of the password contents while reading.
On Windows, the msvcrt module will be used.
On the Mac EasyDialogs.AskPassword is used, if available.
"""
# Authors: Piers Lauder (original)
# Guido van Rossum (Windows support and cleanup)
# Gregory P. Smith (tty support & GetPassWarning)
import os, sys, warnings
__all__ = ["getpass","getuser","GetPassWarning"]
class GetPassWarning(UserWarning): pass
def unix_getpass(prompt='Password: ', stream=None):
"""Prompt for a password, with echo turned off.
Args:
prompt: Written on stream to ask for the input. Default: 'Password: '
stream: A writable file object to display the prompt. Defaults to
the tty. If no tty is available defaults to sys.stderr.
Returns:
The seKr3t input.
Raises:
EOFError: If our input tty or stdin was closed.
GetPassWarning: When we were unable to turn echo off on the input.
Always restores terminal settings before returning.
"""
fd = None
tty = None
try:
# Always try reading and writing directly on the tty first.
fd = os.open('/dev/tty', os.O_RDWR|os.O_NOCTTY)
tty = os.fdopen(fd, 'w+', 1)
input = tty
if not stream:
stream = tty
except EnvironmentError, e:
# If that fails, see if stdin can be controlled.
try:
fd = sys.stdin.fileno()
except:
passwd = fallback_getpass(prompt, stream)
input = sys.stdin
if not stream:
stream = sys.stderr
if fd is not None:
passwd = None
try:
old = termios.tcgetattr(fd) # a copy to save
new = old[:]
new[3] &= ~termios.ECHO # 3 == 'lflags'
try:
termios.tcsetattr(fd, termios.TCSADRAIN, new)
passwd = _raw_input(prompt, stream, input=input)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old)
except termios.error, e:
if passwd is not None:
# _raw_input succeeded. The final tcsetattr failed. Reraise
# instead of leaving the terminal in an unknown state.
raise
# We can't control the tty or stdin. Give up and use normal IO.
# fallback_getpass() raises an appropriate warning.
del input, tty # clean up unused file objects before blocking
passwd = fallback_getpass(prompt, stream)
stream.write('\n')
return passwd
def win_getpass(prompt='Password: ', stream=None):
"""Prompt for password with echo off, using Windows getch()."""
if sys.stdin is not sys.__stdin__:
return fallback_getpass(prompt, stream)
import msvcrt
for c in prompt:
msvcrt.putch(c)
pw = ""
while 1:
c = msvcrt.getch()
if c == '\r' or c == '\n':
break
if c == '\003':
raise KeyboardInterrupt
if c == '\b':
pw = pw[:-1]
else:
pw = pw + c
msvcrt.putch('\r')
msvcrt.putch('\n')
return pw
def fallback_getpass(prompt='Password: ', stream=None):
warnings.warn("Can not control echo on the terminal.", GetPassWarning,
stacklevel=2)
if not stream:
stream = sys.stderr
print >>stream, "Warning: Password input may be echoed."
return _raw_input(prompt, stream)
def _raw_input(prompt="", stream=None, input=None):
# A raw_input() replacement that doesn't save the string in the
# GNU readline history.
if not stream:
stream = sys.stderr
if not input:
input = sys.stdin
prompt = str(prompt)
if prompt:
stream.write(prompt)
stream.flush()
line = input.readline()
if not line:
raise EOFError
if line[-1] == '\n':
line = line[:-1]
return line
def getuser():
"""Get the username from the environment or password database.
First try various environment variables, then the password
database. This works on Windows as long as USERNAME is set.
"""
import os
for name in ('LOGNAME', 'USER', 'LNAME', 'USERNAME'):
user = os.environ.get(name)
if user:
return user
# If this fails, the exception will "explain" why
import pwd
return pwd.getpwuid(os.getuid())[0]
# Bind the name getpass to the appropriate function
try:
import termios
# it's possible there is an incompatible termios from the
# McMillan Installer, make sure we have a UNIX-compatible termios
termios.tcgetattr, termios.tcsetattr
except (ImportError, AttributeError):
try:
import msvcrt
except ImportError:
try:
from EasyDialogs import AskPassword
except ImportError:
getpass = fallback_getpass
else:
getpass = AskPassword
else:
getpass = win_getpass
else:
getpass = unix_getpass
| apache-2.0 |
mahendra-r/edx-platform | common/djangoapps/terrain/stubs/video_source.py | 181 | 1368 | """
Serve HTML5 video sources for acceptance tests
"""
from SimpleHTTPServer import SimpleHTTPRequestHandler
from .http import StubHttpService
from contextlib import contextmanager
import os
from logging import getLogger
LOGGER = getLogger(__name__)
class VideoSourceRequestHandler(SimpleHTTPRequestHandler):
"""
Request handler for serving video sources locally.
"""
def translate_path(self, path):
"""
Remove any extra parameters from the path.
For example /gizmo.mp4?1397160769634
becomes /gizmo.mp4
"""
root_dir = self.server.config.get('root_dir')
path = '{}{}'.format(root_dir, path)
return path.split('?')[0]
class VideoSourceHttpService(StubHttpService):
"""
Simple HTTP server for serving HTML5 Video sources locally for tests
"""
HANDLER_CLASS = VideoSourceRequestHandler
def __init__(self, port_num=0):
@contextmanager
def _remember_cwd():
"""
Files are automatically served from the current directory
so we need to change it, start the server, then set it back.
"""
curdir = os.getcwd()
try:
yield
finally:
os.chdir(curdir)
with _remember_cwd():
StubHttpService.__init__(self, port_num=port_num)
| agpl-3.0 |
abadger/ansible | test/lib/ansible_test/_internal/commands/coverage/report.py | 12 | 4797 | """Generate console code coverage reports."""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ...io import (
read_json_file,
)
from ...util import (
display,
)
from ...data import (
data_context,
)
from .combine import (
command_coverage_combine,
CoverageCombineConfig,
)
from . import (
run_coverage,
)
def command_coverage_report(args):
"""
:type args: CoverageReportConfig
"""
output_files = command_coverage_combine(args)
for output_file in output_files:
if args.group_by or args.stub:
display.info('>>> Coverage Group: %s' % ' '.join(os.path.basename(output_file).split('=')[1:]))
if output_file.endswith('-powershell'):
display.info(_generate_powershell_output_report(args, output_file))
else:
options = []
if args.show_missing:
options.append('--show-missing')
if args.include:
options.extend(['--include', args.include])
if args.omit:
options.extend(['--omit', args.omit])
run_coverage(args, output_file, 'report', options)
def _generate_powershell_output_report(args, coverage_file):
"""
:type args: CoverageReportConfig
:type coverage_file: str
:rtype: str
"""
coverage_info = read_json_file(coverage_file)
root_path = data_context().content.root + '/'
name_padding = 7
cover_padding = 8
file_report = []
total_stmts = 0
total_miss = 0
for filename in sorted(coverage_info.keys()):
hit_info = coverage_info[filename]
if filename.startswith(root_path):
filename = filename[len(root_path):]
if args.omit and filename in args.omit:
continue
if args.include and filename not in args.include:
continue
stmts = len(hit_info)
miss = len([c for c in hit_info.values() if c == 0])
name_padding = max(name_padding, len(filename) + 3)
total_stmts += stmts
total_miss += miss
cover = "{0}%".format(int((stmts - miss) / stmts * 100))
missing = []
current_missing = None
sorted_lines = sorted([int(x) for x in hit_info.keys()])
for idx, line in enumerate(sorted_lines):
hit = hit_info[str(line)]
if hit == 0 and current_missing is None:
current_missing = line
elif hit != 0 and current_missing is not None:
end_line = sorted_lines[idx - 1]
if current_missing == end_line:
missing.append(str(current_missing))
else:
missing.append('%s-%s' % (current_missing, end_line))
current_missing = None
if current_missing is not None:
end_line = sorted_lines[-1]
if current_missing == end_line:
missing.append(str(current_missing))
else:
missing.append('%s-%s' % (current_missing, end_line))
file_report.append({'name': filename, 'stmts': stmts, 'miss': miss, 'cover': cover, 'missing': missing})
if total_stmts == 0:
return ''
total_percent = '{0}%'.format(int((total_stmts - total_miss) / total_stmts * 100))
stmts_padding = max(8, len(str(total_stmts)))
miss_padding = max(7, len(str(total_miss)))
line_length = name_padding + stmts_padding + miss_padding + cover_padding
header = 'Name'.ljust(name_padding) + 'Stmts'.rjust(stmts_padding) + 'Miss'.rjust(miss_padding) + \
'Cover'.rjust(cover_padding)
if args.show_missing:
header += 'Lines Missing'.rjust(16)
line_length += 16
line_break = '-' * line_length
lines = ['%s%s%s%s%s' % (f['name'].ljust(name_padding), str(f['stmts']).rjust(stmts_padding),
str(f['miss']).rjust(miss_padding), f['cover'].rjust(cover_padding),
' ' + ', '.join(f['missing']) if args.show_missing else '')
for f in file_report]
totals = 'TOTAL'.ljust(name_padding) + str(total_stmts).rjust(stmts_padding) + \
str(total_miss).rjust(miss_padding) + total_percent.rjust(cover_padding)
report = '{0}\n{1}\n{2}\n{1}\n{3}'.format(header, line_break, "\n".join(lines), totals)
return report
class CoverageReportConfig(CoverageCombineConfig):
"""Configuration for the coverage report command."""
def __init__(self, args):
"""
:type args: any
"""
super(CoverageReportConfig, self).__init__(args)
self.show_missing = args.show_missing # type: bool
self.include = args.include # type: str
self.omit = args.omit # type: str
| gpl-3.0 |
jrha/aquilon | lib/python2.6/aquilon/worker/commands/reset_advertised_status.py | 2 | 2700 | # -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2008,2009,2010,2011,2012,2013 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains the logic for `aq reset advertised status --hostname`."""
from aquilon.exceptions_ import ArgumentError, IncompleteError
from aquilon.worker.broker import BrokerCommand # pylint: disable=W0611
from aquilon.worker.dbwrappers.host import hostname_to_host
from aquilon.worker.templates.domain import TemplateDomain
from aquilon.worker.templates.host import PlenaryHost
from aquilon.worker.locks import lock_queue, CompileKey
class CommandResetAdvertisedStatus(BrokerCommand):
""" reset advertised status for single host """
required_parameters = ["hostname"]
def render(self, session, logger, hostname, **arguments):
dbhost = hostname_to_host(session, hostname)
if dbhost.status.name == 'ready':
raise ArgumentError("{0:l} is in ready status, "
"advertised status can be reset only "
"when host is in non ready state".format(dbhost))
dbhost.advertise_status = False
session.add(dbhost)
session.flush()
if dbhost.archetype.is_compileable:
return self.compile(session, logger, dbhost)
return
def compile(self, session, logger, dbhost):
""" compile plenary templates """
plenary = PlenaryHost(dbhost, logger=logger)
# Force a host lock as pan might overwrite the profile...
key = CompileKey(domain=dbhost.branch.name, profile=dbhost.fqdn,
logger=logger)
try:
lock_queue.acquire(key)
plenary.write(locked=True)
td = TemplateDomain(dbhost.branch, dbhost.sandbox_author,
logger=logger)
td.compile(session, only=[dbhost.fqdn], locked=True)
except IncompleteError:
raise ArgumentError("Run aq make for host %s first." % dbhost.fqdn)
except:
plenary.restore_stash()
raise
finally:
lock_queue.release(key)
| apache-2.0 |
jalexvig/tensorflow | tensorflow/python/ops/matmul_benchmark_test.py | 51 | 6272 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for matmul_benchmark.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import numpy as np
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import node_def_pb2
from tensorflow.python.framework import ops
from tensorflow.python.ops import matmul_benchmark
from tensorflow.python.platform import test as googletest
from tensorflow.python.platform import tf_logging
def BuildGraphTest(n, m, k, transpose_a, transpose_b, dtype):
def Test(self):
if not googletest.is_gpu_available():
tf_logging.info("Skipping BuildGraphTest %s",
(n, m, k, transpose_a, transpose_b))
return
tf_logging.info("Testing BuildGraphTest %s",
(n, m, k, transpose_a, transpose_b))
self._VerifyBuildGraph(n, m, k, transpose_a, transpose_b, dtype)
return Test
def RunGraphTest(n, m, k, transpose_a, transpose_b, dtype):
def Test(self):
if not googletest.is_gpu_available():
tf_logging.info("Skipping RunGraphTest %s",
(n, m, k, transpose_a, transpose_b))
return
tf_logging.info("Testing RunGraphTest %s",
(n, m, k, transpose_a, transpose_b))
self._VerifyRunGraph(n, m, k, transpose_a, transpose_b, dtype)
return Test
class MatmulBenchmarkTest(googletest.TestCase):
def _StripNode(self, nd):
snode = node_def_pb2.NodeDef(name=nd.name, op=nd.op, input=nd.input)
if nd.device:
snode.device = nd.device
return snode
def _StripGraph(self, gd):
return graph_pb2.GraphDef(node=[self._StripNode(nd) for nd in gd.node])
def _VerifyBuildGraph(self, n, m, k, transpose_a, transpose_b, dtype):
graph = ops.Graph()
with graph.as_default():
matmul_benchmark.build_graph(googletest.gpu_device_name(), n, m, k,
transpose_a, transpose_b, dtype)
gd = graph.as_graph_def()
dev = googletest.gpu_device_name()
proto_expected = """
node { name: "random_uniform/shape" op: "Const" device: \"""" + dev + """\" }
node { name: "random_uniform/min" op: "Const" device: \"""" + dev + """\" }
node { name: "random_uniform/max" op: "Const" device: \"""" + dev + """\" }
node { name: "random_uniform/RandomUniform" op: "RandomUniform" input: "random_uniform/shape" device: \"""" + dev + """\" }
node { name: "random_uniform/sub" op: "Sub" input: "random_uniform/max" input: "random_uniform/min" device: \"""" + dev + """\" }
node { name: "random_uniform/mul" op: "Mul" input: "random_uniform/RandomUniform" input: "random_uniform/sub" device: \"""" + dev + """\" }
node { name: "random_uniform" op: "Add" input: "random_uniform/mul" input: "random_uniform/min" device: \"""" + dev + """\" }
node { name: "Variable" op: "VariableV2" device: \"""" + dev + """\" }
node { name: "Variable/Assign" op: "Assign" input: "Variable" input: "random_uniform" device: \"""" + dev + """\" }
node { name: "Variable/read" op: "Identity" input: "Variable" device: \"""" + dev + """\" }
node { name: "random_uniform_1/shape" op: "Const" device: \"""" + dev + """\" }
node { name: "random_uniform_1/min" op: "Const" device: \"""" + dev + """\" }
node { name: "random_uniform_1/max" op: "Const" device: \"""" + dev + """\" }
node { name: "random_uniform_1/RandomUniform" op: "RandomUniform" input: "random_uniform_1/shape" device: \"""" + dev + """\" }
node { name: "random_uniform_1/sub" op: "Sub" input: "random_uniform_1/max" input: "random_uniform_1/min" device: \"""" + dev + """\" }
node { name: "random_uniform_1/mul" op: "Mul" input: "random_uniform_1/RandomUniform" input: "random_uniform_1/sub" device: \"""" + dev + """\" }
node { name: "random_uniform_1" op: "Add" input: "random_uniform_1/mul" input: "random_uniform_1/min" device: \"""" + dev + """\" }
node { name: "Variable_1" op: "VariableV2" device: \"""" + dev + """\" }
node { name: "Variable_1/Assign" op: "Assign" input: "Variable_1" input: "random_uniform_1" device: \"""" + dev + """\" }
node { name: "Variable_1/read" op: "Identity" input: "Variable_1" device: \"""" + dev + """\" }
node { name: "MatMul" op: "MatMul" input: "Variable/read" input: "Variable_1/read" device: \"""" + dev + """\" }
node { name: "group_deps" op: "NoOp" input: "^MatMul" device: \"""" + dev + """\" }
"""
self.assertProtoEquals(str(proto_expected), self._StripGraph(gd))
def _VerifyRunGraph(self, n, m, k, transpose_a, transpose_b, dtype):
benchmark_instance = matmul_benchmark.MatmulBenchmark()
duration = benchmark_instance.run_graph(googletest.gpu_device_name(), n, m,
k, transpose_a, transpose_b, 1,
dtype)
self.assertTrue(duration > 1e-6)
if __name__ == "__main__":
dtypes = [np.float32, np.float64]
index = 0
for _dtype in dtypes:
for _n, _m, (_transpose_a, _transpose_b) in itertools.product(
[512, 1024], [1, 8, 16, 128], [(False, False), (True, False),
(False, True)]):
_k = _n
setattr(MatmulBenchmarkTest, "testBuildGraph_" + str(index),
BuildGraphTest(_n, _m, _k, _transpose_a, _transpose_b, _dtype))
setattr(MatmulBenchmarkTest, "testRunGraph_" + str(index),
RunGraphTest(_n, _m, _k, _transpose_a, _transpose_b, _dtype))
index += 1
googletest.main()
| apache-2.0 |
wolfram74/numerical_methods_iserles_notes | venv/lib/python2.7/site-packages/pygments/lexers/tcl.py | 72 | 5398 | # -*- coding: utf-8 -*-
"""
pygments.lexers.tcl
~~~~~~~~~~~~~~~~~~~
Lexers for Tcl and related languages.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, include, words
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number
from pygments.util import shebang_matches
__all__ = ['TclLexer']
class TclLexer(RegexLexer):
"""
For Tcl source code.
.. versionadded:: 0.10
"""
keyword_cmds_re = words((
'after', 'apply', 'array', 'break', 'catch', 'continue', 'elseif', 'else', 'error',
'eval', 'expr', 'for', 'foreach', 'global', 'if', 'namespace', 'proc', 'rename', 'return',
'set', 'switch', 'then', 'trace', 'unset', 'update', 'uplevel', 'upvar', 'variable',
'vwait', 'while'), prefix=r'\b', suffix=r'\b')
builtin_cmds_re = words((
'append', 'bgerror', 'binary', 'cd', 'chan', 'clock', 'close', 'concat', 'dde', 'dict',
'encoding', 'eof', 'exec', 'exit', 'fblocked', 'fconfigure', 'fcopy', 'file',
'fileevent', 'flush', 'format', 'gets', 'glob', 'history', 'http', 'incr', 'info', 'interp',
'join', 'lappend', 'lassign', 'lindex', 'linsert', 'list', 'llength', 'load', 'loadTk',
'lrange', 'lrepeat', 'lreplace', 'lreverse', 'lsearch', 'lset', 'lsort', 'mathfunc',
'mathop', 'memory', 'msgcat', 'open', 'package', 'pid', 'pkg::create', 'pkg_mkIndex',
'platform', 'platform::shell', 'puts', 'pwd', 're_syntax', 'read', 'refchan',
'regexp', 'registry', 'regsub', 'scan', 'seek', 'socket', 'source', 'split', 'string',
'subst', 'tell', 'time', 'tm', 'unknown', 'unload'), prefix=r'\b', suffix=r'\b')
name = 'Tcl'
aliases = ['tcl']
filenames = ['*.tcl', '*.rvt']
mimetypes = ['text/x-tcl', 'text/x-script.tcl', 'application/x-tcl']
def _gen_command_rules(keyword_cmds_re, builtin_cmds_re, context=""):
return [
(keyword_cmds_re, Keyword, 'params' + context),
(builtin_cmds_re, Name.Builtin, 'params' + context),
(r'([\w.-]+)', Name.Variable, 'params' + context),
(r'#', Comment, 'comment'),
]
tokens = {
'root': [
include('command'),
include('basic'),
include('data'),
(r'\}', Keyword), # HACK: somehow we miscounted our braces
],
'command': _gen_command_rules(keyword_cmds_re, builtin_cmds_re),
'command-in-brace': _gen_command_rules(keyword_cmds_re,
builtin_cmds_re,
"-in-brace"),
'command-in-bracket': _gen_command_rules(keyword_cmds_re,
builtin_cmds_re,
"-in-bracket"),
'command-in-paren': _gen_command_rules(keyword_cmds_re,
builtin_cmds_re,
"-in-paren"),
'basic': [
(r'\(', Keyword, 'paren'),
(r'\[', Keyword, 'bracket'),
(r'\{', Keyword, 'brace'),
(r'"', String.Double, 'string'),
(r'(eq|ne|in|ni)\b', Operator.Word),
(r'!=|==|<<|>>|<=|>=|&&|\|\||\*\*|[-+~!*/%<>&^|?:]', Operator),
],
'data': [
(r'\s+', Text),
(r'0x[a-fA-F0-9]+', Number.Hex),
(r'0[0-7]+', Number.Oct),
(r'\d+\.\d+', Number.Float),
(r'\d+', Number.Integer),
(r'\$([\w.:-]+)', Name.Variable),
(r'([\w.:-]+)', Text),
],
'params': [
(r';', Keyword, '#pop'),
(r'\n', Text, '#pop'),
(r'(else|elseif|then)\b', Keyword),
include('basic'),
include('data'),
],
'params-in-brace': [
(r'\}', Keyword, ('#pop', '#pop')),
include('params')
],
'params-in-paren': [
(r'\)', Keyword, ('#pop', '#pop')),
include('params')
],
'params-in-bracket': [
(r'\]', Keyword, ('#pop', '#pop')),
include('params')
],
'string': [
(r'\[', String.Double, 'string-square'),
(r'(?s)(\\\\|\\[0-7]+|\\.|[^"\\])', String.Double),
(r'"', String.Double, '#pop')
],
'string-square': [
(r'\[', String.Double, 'string-square'),
(r'(?s)(\\\\|\\[0-7]+|\\.|\\\n|[^\]\\])', String.Double),
(r'\]', String.Double, '#pop')
],
'brace': [
(r'\}', Keyword, '#pop'),
include('command-in-brace'),
include('basic'),
include('data'),
],
'paren': [
(r'\)', Keyword, '#pop'),
include('command-in-paren'),
include('basic'),
include('data'),
],
'bracket': [
(r'\]', Keyword, '#pop'),
include('command-in-bracket'),
include('basic'),
include('data'),
],
'comment': [
(r'.*[^\\]\n', Comment, '#pop'),
(r'.*\\\n', Comment),
],
}
def analyse_text(text):
return shebang_matches(text, r'(tcl)')
| mit |
gltn/stdm | stdm/third_party/reportlab/pdfbase/cidfonts.py | 2 | 18838 | #Copyright ReportLab Europe Ltd. 2000-2017
#see license.txt for license details
#history https://hg.reportlab.com/hg-public/reportlab/log/tip/src/reportlab/pdfbase/cidfonts.py
#$Header $
__version__='3.3.0'
__doc__="""CID (Asian multi-byte) font support.
This defines classes to represent CID fonts. They know how to calculate
their own width and how to write themselves into PDF files."""
import os
import marshal
import time
try:
from hashlib import md5
except ImportError:
from md5 import md5
import reportlab
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase._cidfontdata import allowedTypeFaces, allowedEncodings, CIDFontInfo, \
defaultUnicodeEncodings, widthsByUnichar
from reportlab.pdfgen.canvas import Canvas
from reportlab.pdfbase import pdfdoc
from reportlab.lib.rl_accel import escapePDF
from reportlab.rl_config import CMapSearchPath
from reportlab.lib.utils import isSeq, isBytes
#quick hackery for 2.0 release. Now we always do unicode, and have built in
#the CMAP data, any code to load CMap files is not needed.
DISABLE_CMAP = True
def findCMapFile(name):
"Returns full filename, or raises error"
for dirname in CMapSearchPath:
cmapfile = dirname + os.sep + name
if os.path.isfile(cmapfile):
#print "found", cmapfile
return cmapfile
raise IOError('CMAP file for encodings "%s" not found!' % name)
def structToPDF(structure):
"Converts deeply nested structure to PDFdoc dictionary/array objects"
if isinstance(structure,dict):
newDict = {}
for k, v in structure.items():
newDict[k] = structToPDF(v)
return pdfdoc.PDFDictionary(newDict)
elif isSeq(structure):
newList = []
for elem in structure:
newList.append(structToPDF(elem))
return pdfdoc.PDFArray(newList)
else:
return structure
class CIDEncoding(pdfmetrics.Encoding):
"""Multi-byte encoding. These are loaded from CMAP files.
A CMAP file is like a mini-codec. It defines the correspondence
between code points in the (multi-byte) input data and Character
IDs. """
# aims to do similar things to Brian Hooper's CMap class,
# but I could not get it working and had to rewrite.
# also, we should really rearrange our current encoding
# into a SingleByteEncoding since many of its methods
# should not apply here.
def __init__(self, name, useCache=1):
self.name = name
self._mapFileHash = None
self._codeSpaceRanges = []
self._notDefRanges = []
self._cmap = {}
self.source = None
if not DISABLE_CMAP:
if useCache:
from reportlab.lib.utils import get_rl_tempdir
fontmapdir = get_rl_tempdir('FastCMAPS')
if os.path.isfile(fontmapdir + os.sep + name + '.fastmap'):
self.fastLoad(fontmapdir)
self.source = fontmapdir + os.sep + name + '.fastmap'
else:
self.parseCMAPFile(name)
self.source = 'CMAP: ' + name
self.fastSave(fontmapdir)
else:
self.parseCMAPFile(name)
def _hash(self, text):
hasher = md5()
hasher.update(text)
return hasher.digest()
def parseCMAPFile(self, name):
"""This is a tricky one as CMAP files are Postscript
ones. Some refer to others with a 'usecmap'
command"""
#started = time.clock()
cmapfile = findCMapFile(name)
# this will CRAWL with the unicode encodings...
rawdata = open(cmapfile, 'r').read()
self._mapFileHash = self._hash(rawdata)
#if it contains the token 'usecmap', parse the other
#cmap file first....
usecmap_pos = rawdata.find('usecmap')
if usecmap_pos > -1:
#they tell us to look in another file
#for the code space ranges. The one
# to use will be the previous word.
chunk = rawdata[0:usecmap_pos]
words = chunk.split()
otherCMAPName = words[-1]
#print 'referred to another CMAP %s' % otherCMAPName
self.parseCMAPFile(otherCMAPName)
# now continue parsing this, as it may
# override some settings
words = rawdata.split()
while words != []:
if words[0] == 'begincodespacerange':
words = words[1:]
while words[0] != 'endcodespacerange':
strStart, strEnd, words = words[0], words[1], words[2:]
start = int(strStart[1:-1], 16)
end = int(strEnd[1:-1], 16)
self._codeSpaceRanges.append((start, end),)
elif words[0] == 'beginnotdefrange':
words = words[1:]
while words[0] != 'endnotdefrange':
strStart, strEnd, strValue = words[0:3]
start = int(strStart[1:-1], 16)
end = int(strEnd[1:-1], 16)
value = int(strValue)
self._notDefRanges.append((start, end, value),)
words = words[3:]
elif words[0] == 'begincidrange':
words = words[1:]
while words[0] != 'endcidrange':
strStart, strEnd, strValue = words[0:3]
start = int(strStart[1:-1], 16)
end = int(strEnd[1:-1], 16)
value = int(strValue)
# this means that 'start' corresponds to 'value',
# start+1 corresponds to value+1 and so on up
# to end
offset = 0
while start + offset <= end:
self._cmap[start + offset] = value + offset
offset = offset + 1
words = words[3:]
else:
words = words[1:]
#finished = time.clock()
#print 'parsed CMAP %s in %0.4f seconds' % (self.name, finished - started)
def translate(self, text):
"Convert a string into a list of CIDs"
output = []
cmap = self._cmap
lastChar = ''
for char in text:
if lastChar != '':
#print 'convert character pair "%s"' % (lastChar + char)
num = ord(lastChar) * 256 + ord(char)
else:
#print 'convert character "%s"' % char
num = ord(char)
lastChar = char
found = 0
for low, high in self._codeSpaceRanges:
if low < num < high:
try:
cid = cmap[num]
#print '%d -> %d' % (num, cid)
except KeyError:
#not defined. Try to find the appropriate
# notdef character, or failing that return
# zero
cid = 0
for low2, high2, notdef in self._notDefRanges:
if low2 < num < high2:
cid = notdef
break
output.append(cid)
found = 1
break
if found:
lastChar = ''
else:
lastChar = char
return output
def fastSave(self, directory):
f = open(os.path.join(directory, self.name + '.fastmap'), 'wb')
marshal.dump(self._mapFileHash, f)
marshal.dump(self._codeSpaceRanges, f)
marshal.dump(self._notDefRanges, f)
marshal.dump(self._cmap, f)
f.close()
def fastLoad(self, directory):
started = time.clock()
f = open(os.path.join(directory, self.name + '.fastmap'), 'rb')
self._mapFileHash = marshal.load(f)
self._codeSpaceRanges = marshal.load(f)
self._notDefRanges = marshal.load(f)
self._cmap = marshal.load(f)
f.close()
finished = time.clock()
#print 'loaded %s in %0.4f seconds' % (self.name, finished - started)
def getData(self):
"""Simple persistence helper. Return a dict with all that matters."""
return {
'mapFileHash': self._mapFileHash,
'codeSpaceRanges': self._codeSpaceRanges,
'notDefRanges': self._notDefRanges,
'cmap': self._cmap,
}
class CIDTypeFace(pdfmetrics.TypeFace):
"""Multi-byte type face.
Conceptually similar to a single byte typeface,
but the glyphs are identified by a numeric Character
ID (CID) and not a glyph name. """
def __init__(self, name):
"""Initialised from one of the canned dictionaries in allowedEncodings
Or rather, it will be shortly..."""
pdfmetrics.TypeFace.__init__(self, name)
self._extractDictInfo(name)
def _extractDictInfo(self, name):
try:
fontDict = CIDFontInfo[name]
except KeyError:
raise KeyError("Unable to find information on CID typeface '%s'" % name +
"Only the following font names work:" + repr(allowedTypeFaces))
descFont = fontDict['DescendantFonts'][0]
self.ascent = descFont['FontDescriptor']['Ascent']
self.descent = descFont['FontDescriptor']['Descent']
self._defaultWidth = descFont['DW']
self._explicitWidths = self._expandWidths(descFont['W'])
# should really support self.glyphWidths, self.glyphNames
# but not done yet.
def _expandWidths(self, compactWidthArray):
"""Expands Adobe nested list structure to get a dictionary of widths.
Here is an example of such a structure.::
(
# starting at character ID 1, next n characters have the widths given.
1, (277,305,500,668,668,906,727,305,445,445,508,668,305,379,305,539),
# all Characters from ID 17 to 26 are 668 em units wide
17, 26, 668,
27, (305, 305, 668, 668, 668, 566, 871, 727, 637, 652, 699, 574, 555,
676, 687, 242, 492, 664, 582, 789, 707, 734, 582, 734, 605, 605,
641, 668, 727, 945, 609, 609, 574, 445, 668, 445, 668, 668, 590,
555, 609, 547, 602, 574, 391, 609, 582, 234, 277, 539, 234, 895,
582, 605, 602, 602, 387, 508, 441, 582, 562, 781, 531, 570, 555,
449, 246, 449, 668),
# these must be half width katakana and the like.
231, 632, 500
)
"""
data = compactWidthArray[:]
widths = {}
while data:
start, data = data[0], data[1:]
if isSeq(data[0]):
items, data = data[0], data[1:]
for offset in range(len(items)):
widths[start + offset] = items[offset]
else:
end, width, data = data[0], data[1], data[2:]
for idx in range(start, end+1):
widths[idx] = width
return widths
def getCharWidth(self, characterId):
return self._explicitWidths.get(characterId, self._defaultWidth)
class CIDFont(pdfmetrics.Font):
"Represents a built-in multi-byte font"
_multiByte = 1
def __init__(self, face, encoding):
assert face in allowedTypeFaces, "TypeFace '%s' not supported! Use any of these instead: %s" % (face, allowedTypeFaces)
self.faceName = face
#should cache in registry...
self.face = CIDTypeFace(face)
assert encoding in allowedEncodings, "Encoding '%s' not supported! Use any of these instead: %s" % (encoding, allowedEncodings)
self.encodingName = encoding
self.encoding = CIDEncoding(encoding)
#legacy hack doing quick cut and paste.
self.fontName = self.faceName + '-' + self.encodingName
self.name = self.fontName
# need to know if it is vertical or horizontal
self.isVertical = (self.encodingName[-1] == 'V')
#no substitutes initially
self.substitutionFonts = []
def formatForPdf(self, text):
encoded = escapePDF(text)
#print 'encoded CIDFont:', encoded
return encoded
def stringWidth(self, text, size, encoding=None):
"""This presumes non-Unicode input. UnicodeCIDFont wraps it for that context"""
cidlist = self.encoding.translate(text)
if self.isVertical:
#this part is "not checked!" but seems to work.
#assume each is 1000 ems high
return len(cidlist) * size
else:
w = 0
for cid in cidlist:
w = w + self.face.getCharWidth(cid)
return 0.001 * w * size
def addObjects(self, doc):
"""The explicit code in addMinchoObjects and addGothicObjects
will be replaced by something that pulls the data from
_cidfontdata.py in the next few days."""
internalName = 'F' + repr(len(doc.fontMapping)+1)
bigDict = CIDFontInfo[self.face.name]
bigDict['Name'] = '/' + internalName
bigDict['Encoding'] = '/' + self.encodingName
#convert to PDF dictionary/array objects
cidObj = structToPDF(bigDict)
# link into document, and add to font map
r = doc.Reference(cidObj, internalName)
fontDict = doc.idToObject['BasicFonts'].dict
fontDict[internalName] = r
doc.fontMapping[self.name] = '/' + internalName
class UnicodeCIDFont(CIDFont):
"""Wraps up CIDFont to hide explicit encoding choice;
encodes text for output as UTF16.
lang should be one of 'jpn',chs','cht','kor' for now.
if vertical is set, it will select a different widths array
and possibly glyphs for some punctuation marks.
halfWidth is only for Japanese.
>>> dodgy = UnicodeCIDFont('nonexistent')
Traceback (most recent call last):
...
KeyError: "don't know anything about CID font nonexistent"
>>> heisei = UnicodeCIDFont('HeiseiMin-W3')
>>> heisei.name
'HeiseiMin-W3'
>>> heisei.language
'jpn'
>>> heisei.encoding.name
'UniJIS-UCS2-H'
>>> #This is how PDF data gets encoded.
>>> print(heisei.formatForPdf('hello'))
\\000h\\000e\\000l\\000l\\000o
>>> tokyo = u'\u6771\u4AEC'
>>> print(heisei.formatForPdf(tokyo))
gqJ\\354
>>> print(heisei.stringWidth(tokyo,10))
20.0
>>> print(heisei.stringWidth('hello world',10))
45.83
"""
def __init__(self, face, isVertical=False, isHalfWidth=False):
#pass
try:
lang, defaultEncoding = defaultUnicodeEncodings[face]
except KeyError:
raise KeyError("don't know anything about CID font %s" % face)
#we know the languages now.
self.language = lang
#rebuilt encoding string. They follow rules which work
#for the 7 fonts provided.
enc = defaultEncoding[:-1]
if isHalfWidth:
enc = enc + 'HW-'
if isVertical:
enc = enc + 'V'
else:
enc = enc + 'H'
#now we can do the more general case
CIDFont.__init__(self, face, enc)
#self.encName = 'utf_16_le'
#it's simpler for unicode, just use the face name
self.name = self.fontName = face
self.vertical = isVertical
self.isHalfWidth = isHalfWidth
self.unicodeWidths = widthsByUnichar[self.name]
def formatForPdf(self, text):
#these ones should be encoded asUTF16 minus the BOM
from codecs import utf_16_be_encode
#print 'formatting %s: %s' % (type(text), repr(text))
if isBytes(text):
text = text.decode('utf8')
utfText = utf_16_be_encode(text)[0]
encoded = escapePDF(utfText)
#print ' encoded:',encoded
return encoded
#
#result = escapePDF(encoded)
#print ' -> %s' % repr(result)
#return result
def stringWidth(self, text, size, encoding=None):
"Just ensure we do width test on characters, not bytes..."
if isBytes(text):
text = text.decode('utf8')
widths = self.unicodeWidths
return size * 0.001 * sum([widths.get(uch, 1000) for uch in text])
#return CIDFont.stringWidth(self, text, size, encoding)
def precalculate(cmapdir):
# crunches through all, making 'fastmap' files
import os
files = os.listdir(cmapdir)
for file in files:
if os.path.isfile(cmapdir + os.sep + file + '.fastmap'):
continue
try:
enc = CIDEncoding(file)
except:
print('cannot parse %s, skipping' % enc)
continue
enc.fastSave(cmapdir)
print('saved %s.fastmap' % file)
def test():
# only works if you have cirrect encodings on your box!
c = Canvas('test_japanese.pdf')
c.setFont('Helvetica', 30)
c.drawString(100,700, 'Japanese Font Support')
pdfmetrics.registerFont(CIDFont('HeiseiMin-W3','90ms-RKSJ-H'))
pdfmetrics.registerFont(CIDFont('HeiseiKakuGo-W5','90ms-RKSJ-H'))
# the two typefaces
c.setFont('HeiseiMin-W3-90ms-RKSJ-H', 16)
# this says "This is HeiseiMincho" in shift-JIS. Not all our readers
# have a Japanese PC, so I escaped it. On a Japanese-capable
# system, print the string to see Kanji
message1 = '\202\261\202\352\202\315\225\275\220\254\226\276\222\251\202\305\202\267\201B'
c.drawString(100, 675, message1)
c.save()
print('saved test_japanese.pdf')
## print 'CMAP_DIR = ', CMAP_DIR
## tf1 = CIDTypeFace('HeiseiMin-W3')
## print 'ascent = ',tf1.ascent
## print 'descent = ',tf1.descent
## for cid in [1,2,3,4,5,18,19,28,231,1742]:
## print 'width of cid %d = %d' % (cid, tf1.getCharWidth(cid))
encName = '90ms-RKSJ-H'
enc = CIDEncoding(encName)
print(message1, '->', enc.translate(message1))
f = CIDFont('HeiseiMin-W3','90ms-RKSJ-H')
print('width = %0.2f' % f.stringWidth(message1, 10))
#testing all encodings
## import time
## started = time.time()
## import glob
## for encName in _cidfontdata.allowedEncodings:
## #encName = '90ms-RKSJ-H'
## enc = CIDEncoding(encName)
## print 'encoding %s:' % encName
## print ' codeSpaceRanges = %s' % enc._codeSpaceRanges
## print ' notDefRanges = %s' % enc._notDefRanges
## print ' mapping size = %d' % len(enc._cmap)
## finished = time.time()
## print 'constructed all encodings in %0.2f seconds' % (finished - started)
if __name__=='__main__':
import doctest
from reportlab.pdfbase import cidfonts
doctest.testmod(cidfonts)
#test()
| gpl-2.0 |
chyeh727/django | django/contrib/gis/geos/prototypes/prepared.py | 288 | 1214 | from ctypes import c_char
from django.contrib.gis.geos.libgeos import (
GEOM_PTR, PREPGEOM_PTR, GEOSFuncFactory,
)
from django.contrib.gis.geos.prototypes.errcheck import check_predicate
# Prepared geometry constructor and destructors.
geos_prepare = GEOSFuncFactory('GEOSPrepare', argtypes=[GEOM_PTR], restype=PREPGEOM_PTR)
prepared_destroy = GEOSFuncFactory('GEOSPreparedGeom_destroy', argtpes=[PREPGEOM_PTR])
# Prepared geometry binary predicate support.
class PreparedPredicate(GEOSFuncFactory):
argtypes = [PREPGEOM_PTR, GEOM_PTR]
restype = c_char
errcheck = staticmethod(check_predicate)
prepared_contains = PreparedPredicate('GEOSPreparedContains')
prepared_contains_properly = PreparedPredicate('GEOSPreparedContainsProperly')
prepared_covers = PreparedPredicate('GEOSPreparedCovers')
prepared_intersects = PreparedPredicate('GEOSPreparedIntersects')
# Functions added in GEOS 3.3
prepared_crosses = PreparedPredicate('GEOSPreparedCrosses')
prepared_disjoint = PreparedPredicate('GEOSPreparedDisjoint')
prepared_overlaps = PreparedPredicate('GEOSPreparedOverlaps')
prepared_touches = PreparedPredicate('GEOSPreparedTouches')
prepared_within = PreparedPredicate('GEOSPreparedWithin')
| bsd-3-clause |
itskewpie/tempest | tempest/api/identity/admin/test_users.py | 1 | 9827 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from testtools.matchers import Contains
from tempest.api.identity import base
from tempest.common.utils import data_utils
from tempest.test import attr
class UsersTestJSON(base.BaseIdentityAdminTest):
_interface = 'json'
@classmethod
def setUpClass(cls):
super(UsersTestJSON, cls).setUpClass()
cls.alt_user = data_utils.rand_name('test_user_')
cls.alt_password = data_utils.rand_name('pass_')
cls.alt_email = cls.alt_user + '@testmail.tm'
cls.alt_tenant = data_utils.rand_name('test_tenant_')
cls.alt_description = data_utils.rand_name('desc_')
@attr(type='smoke')
def test_create_user(self):
# Create a user
self.data.setup_test_tenant()
resp, user = self.client.create_user(self.alt_user, self.alt_password,
self.data.tenant['id'],
self.alt_email)
self.data.users.append(user)
self.assertEqual('200', resp['status'])
self.assertEqual(self.alt_user, user['name'])
@attr(type='smoke')
def test_create_user_with_enabled(self):
# Create a user with enabled : False
self.data.setup_test_tenant()
name = data_utils.rand_name('test_user_')
resp, user = self.client.create_user(name, self.alt_password,
self.data.tenant['id'],
self.alt_email, enabled=False)
self.data.users.append(user)
self.assertEqual('200', resp['status'])
self.assertEqual(name, user['name'])
self.assertEqual('false', str(user['enabled']).lower())
self.assertEqual(self.alt_email, user['email'])
@attr(type='smoke')
def test_update_user(self):
# Test case to check if updating of user attributes is successful.
test_user = data_utils.rand_name('test_user_')
self.data.setup_test_tenant()
resp, user = self.client.create_user(test_user, self.alt_password,
self.data.tenant['id'],
self.alt_email)
# Delete the User at the end of this method
self.addCleanup(self.client.delete_user, user['id'])
# Updating user details with new values
u_name2 = data_utils.rand_name('user2-')
u_email2 = u_name2 + '@testmail.tm'
resp, update_user = self.client.update_user(user['id'], name=u_name2,
email=u_email2,
enabled=False)
# Assert response body of update user.
self.assertEqual(200, resp.status)
self.assertEqual(u_name2, update_user['name'])
self.assertEqual(u_email2, update_user['email'])
self.assertEqual('false', str(update_user['enabled']).lower())
# GET by id after updating
resp, updated_user = self.client.get_user(user['id'])
# Assert response body of GET after updating
self.assertEqual(u_name2, updated_user['name'])
self.assertEqual(u_email2, updated_user['email'])
self.assertEqual('false', str(updated_user['enabled']).lower())
@attr(type='smoke')
def test_delete_user(self):
# Delete a user
test_user = data_utils.rand_name('test_user_')
self.data.setup_test_tenant()
resp, user = self.client.create_user(test_user, self.alt_password,
self.data.tenant['id'],
self.alt_email)
self.assertEqual('200', resp['status'])
resp, body = self.client.delete_user(user['id'])
self.assertEqual('204', resp['status'])
@attr(type='smoke')
def test_user_authentication(self):
# Valid user's token is authenticated
self.data.setup_test_user()
# Get a token
self.token_client.auth(self.data.test_user, self.data.test_password,
self.data.test_tenant)
# Re-auth
resp, body = self.token_client.auth(self.data.test_user,
self.data.test_password,
self.data.test_tenant)
self.assertEqual('200', resp['status'])
@attr(type='gate')
def test_authentication_request_without_token(self):
# Request for token authentication with a valid token in header
self.data.setup_test_user()
self.token_client.auth(self.data.test_user, self.data.test_password,
self.data.test_tenant)
# Get the token of the current client
token = self.client.get_auth()
# Delete the token from database
self.client.delete_token(token)
# Re-auth
resp, body = self.token_client.auth(self.data.test_user,
self.data.test_password,
self.data.test_tenant)
self.assertEqual('200', resp['status'])
self.client.clear_auth()
@attr(type='smoke')
def test_get_users(self):
# Get a list of users and find the test user
self.data.setup_test_user()
resp, users = self.client.get_users()
self.assertThat([u['name'] for u in users],
Contains(self.data.test_user),
"Could not find %s" % self.data.test_user)
@attr(type='gate')
def test_list_users_for_tenant(self):
# Return a list of all users for a tenant
self.data.setup_test_tenant()
user_ids = list()
fetched_user_ids = list()
alt_tenant_user1 = data_utils.rand_name('tenant_user1_')
resp, user1 = self.client.create_user(alt_tenant_user1, 'password1',
self.data.tenant['id'],
'user1@123')
self.assertEqual('200', resp['status'])
user_ids.append(user1['id'])
self.data.users.append(user1)
alt_tenant_user2 = data_utils.rand_name('tenant_user2_')
resp, user2 = self.client.create_user(alt_tenant_user2, 'password2',
self.data.tenant['id'],
'user2@123')
self.assertEqual('200', resp['status'])
user_ids.append(user2['id'])
self.data.users.append(user2)
# List of users for the respective tenant ID
resp, body = self.client.list_users_for_tenant(self.data.tenant['id'])
self.assertIn(resp['status'], ('200', '203'))
for i in body:
fetched_user_ids.append(i['id'])
# verifying the user Id in the list
missing_users =\
[user for user in user_ids if user not in fetched_user_ids]
self.assertEqual(0, len(missing_users),
"Failed to find user %s in fetched list" %
', '.join(m_user for m_user in missing_users))
@attr(type='gate')
def test_list_users_with_roles_for_tenant(self):
# Return list of users on tenant when roles are assigned to users
self.data.setup_test_user()
self.data.setup_test_role()
user = self.get_user_by_name(self.data.test_user)
tenant = self.get_tenant_by_name(self.data.test_tenant)
role = self.get_role_by_name(self.data.test_role)
# Assigning roles to two users
user_ids = list()
fetched_user_ids = list()
user_ids.append(user['id'])
resp, role = self.client.assign_user_role(tenant['id'], user['id'],
role['id'])
self.assertEqual('200', resp['status'])
alt_user2 = data_utils.rand_name('second_user_')
resp, second_user = self.client.create_user(alt_user2, 'password1',
self.data.tenant['id'],
'user2@123')
self.assertEqual('200', resp['status'])
user_ids.append(second_user['id'])
self.data.users.append(second_user)
resp, role = self.client.assign_user_role(tenant['id'],
second_user['id'],
role['id'])
self.assertEqual('200', resp['status'])
# List of users with roles for the respective tenant ID
resp, body = self.client.list_users_for_tenant(self.data.tenant['id'])
self.assertEqual('200', resp['status'])
for i in body:
fetched_user_ids.append(i['id'])
# verifying the user Id in the list
missing_users = [missing_user for missing_user in user_ids
if missing_user not in fetched_user_ids]
self.assertEqual(0, len(missing_users),
"Failed to find user %s in fetched list" %
', '.join(m_user for m_user in missing_users))
class UsersTestXML(UsersTestJSON):
_interface = 'xml'
| apache-2.0 |
ncliam/serverpos | openerp/addons/auth_openid/controllers/main.py | 382 | 10399 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010-2012 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
import os
import tempfile
import getpass
import werkzeug.urls
import werkzeug.exceptions
from openid import oidutil
from openid.store import filestore
from openid.consumer import consumer
from openid.cryptutil import randomString
from openid.extensions import ax, sreg
import openerp
from openerp import SUPERUSER_ID
from openerp.modules.registry import RegistryManager
from openerp.addons.web.controllers.main import login_and_redirect, set_cookie_and_redirect
import openerp.http as http
from openerp.http import request
from .. import utils
_logger = logging.getLogger(__name__)
oidutil.log = _logger.debug
def get_system_user():
"""Return system user info string, such as USERNAME-EUID"""
try:
info = getpass.getuser()
except ImportError:
if os.name == 'nt':
# when there is no 'USERNAME' in environment, getpass.getuser()
# fail when trying to import 'pwd' module - which is unix only.
# In that case we have to fallback to real win32 API.
import win32api
info = win32api.GetUserName()
else:
raise
euid = getattr(os, 'geteuid', None) # Non available on some platforms
if euid is not None:
info = '%s-%d' % (info, euid())
return info
_storedir = os.path.join(tempfile.gettempdir(),
'openerp-auth_openid-%s-store' % get_system_user())
class GoogleAppsAwareConsumer(consumer.GenericConsumer):
def complete(self, message, endpoint, return_to):
if message.getOpenIDNamespace() == consumer.OPENID2_NS:
server_url = message.getArg(consumer.OPENID2_NS, 'op_endpoint', '')
if server_url.startswith('https://www.google.com/a/'):
assoc_handle = message.getArg(consumer.OPENID_NS, 'assoc_handle')
assoc = self.store.getAssociation(server_url, assoc_handle)
if assoc:
# update fields
for attr in ['claimed_id', 'identity']:
value = message.getArg(consumer.OPENID2_NS, attr, '')
value = 'https://www.google.com/accounts/o8/user-xrds?uri=%s' % werkzeug.url_quote_plus(value)
message.setArg(consumer.OPENID2_NS, attr, value)
# now, resign the message
message.delArg(consumer.OPENID2_NS, 'sig')
message.delArg(consumer.OPENID2_NS, 'signed')
message = assoc.signMessage(message)
return super(GoogleAppsAwareConsumer, self).complete(message, endpoint, return_to)
class OpenIDController(http.Controller):
_store = filestore.FileOpenIDStore(_storedir)
_REQUIRED_ATTRIBUTES = ['email']
_OPTIONAL_ATTRIBUTES = 'nickname fullname postcode country language timezone'.split()
def _add_extensions(self, oidrequest):
"""Add extensions to the oidrequest"""
sreg_request = sreg.SRegRequest(required=self._REQUIRED_ATTRIBUTES,
optional=self._OPTIONAL_ATTRIBUTES)
oidrequest.addExtension(sreg_request)
ax_request = ax.FetchRequest()
for alias in self._REQUIRED_ATTRIBUTES:
uri = utils.SREG2AX[alias]
ax_request.add(ax.AttrInfo(uri, required=True, alias=alias))
for alias in self._OPTIONAL_ATTRIBUTES:
uri = utils.SREG2AX[alias]
ax_request.add(ax.AttrInfo(uri, required=False, alias=alias))
oidrequest.addExtension(ax_request)
def _get_attributes_from_success_response(self, success_response):
attrs = {}
all_attrs = self._REQUIRED_ATTRIBUTES + self._OPTIONAL_ATTRIBUTES
sreg_resp = sreg.SRegResponse.fromSuccessResponse(success_response)
if sreg_resp:
for attr in all_attrs:
value = sreg_resp.get(attr)
if value is not None:
attrs[attr] = value
ax_resp = ax.FetchResponse.fromSuccessResponse(success_response)
if ax_resp:
for attr in all_attrs:
value = ax_resp.getSingle(utils.SREG2AX[attr])
if value is not None:
attrs[attr] = value
return attrs
def _get_realm(self):
return request.httprequest.host_url
@http.route('/auth_openid/login/verify_direct', type='http', auth='none')
def verify_direct(self, db, url):
result = self._verify(db, url)
if 'error' in result:
return werkzeug.exceptions.BadRequest(result['error'])
if result['action'] == 'redirect':
return werkzeug.utils.redirect(result['value'])
return result['value']
@http.route('/auth_openid/login/verify', type='json', auth='none')
def verify(self, db, url):
return self._verify(db, url)
def _verify(self, db, url):
redirect_to = werkzeug.urls.Href(request.httprequest.host_url + 'auth_openid/login/process')(session_id=request.session_id)
realm = self._get_realm()
session = dict(dbname=db, openid_url=url) # TODO add origin page ?
oidconsumer = consumer.Consumer(session, self._store)
try:
oidrequest = oidconsumer.begin(url)
except consumer.DiscoveryFailure, exc:
fetch_error_string = 'Error in discovery: %s' % (str(exc[0]),)
return {'error': fetch_error_string, 'title': 'OpenID Error'}
if oidrequest is None:
return {'error': 'No OpenID services found', 'title': 'OpenID Error'}
request.session.openid_session = session
self._add_extensions(oidrequest)
if oidrequest.shouldSendRedirect():
redirect_url = oidrequest.redirectURL(realm, redirect_to)
return {'action': 'redirect', 'value': redirect_url, 'session_id': request.session_id}
else:
form_html = oidrequest.htmlMarkup(realm, redirect_to)
return {'action': 'post', 'value': form_html, 'session_id': request.session_id}
@http.route('/auth_openid/login/process', type='http', auth='none')
def process(self, **kw):
session = getattr(request.session, 'openid_session', None)
if not session:
return set_cookie_and_redirect('/')
oidconsumer = consumer.Consumer(session, self._store, consumer_class=GoogleAppsAwareConsumer)
query = request.httprequest.args
info = oidconsumer.complete(query, request.httprequest.base_url)
display_identifier = info.getDisplayIdentifier()
session['status'] = info.status
if info.status == consumer.SUCCESS:
dbname = session['dbname']
registry = RegistryManager.get(dbname)
with registry.cursor() as cr:
Modules = registry.get('ir.module.module')
installed = Modules.search_count(cr, SUPERUSER_ID, ['&', ('name', '=', 'auth_openid'), ('state', '=', 'installed')]) == 1
if installed:
Users = registry.get('res.users')
#openid_url = info.endpoint.canonicalID or display_identifier
openid_url = session['openid_url']
attrs = self._get_attributes_from_success_response(info)
attrs['openid_url'] = openid_url
session['attributes'] = attrs
openid_email = attrs.get('email', False)
domain = []
if openid_email:
domain += ['|', ('openid_email', '=', False)]
domain += [('openid_email', '=', openid_email)]
domain += [('openid_url', '=', openid_url), ('active', '=', True)]
ids = Users.search(cr, SUPERUSER_ID, domain)
assert len(ids) < 2
if ids:
user_id = ids[0]
login = Users.browse(cr, SUPERUSER_ID, user_id).login
key = randomString(utils.KEY_LENGTH, '0123456789abcdef')
Users.write(cr, SUPERUSER_ID, [user_id], {'openid_key': key})
# TODO fill empty fields with the ones from sreg/ax
cr.commit()
return login_and_redirect(dbname, login, key)
session['message'] = 'This OpenID identifier is not associated to any active users'
elif info.status == consumer.SETUP_NEEDED:
session['message'] = info.setup_url
elif info.status == consumer.FAILURE and display_identifier:
fmt = "Verification of %s failed: %s"
session['message'] = fmt % (display_identifier, info.message)
else: # FAILURE
# Either we don't understand the code or there is no
# openid_url included with the error. Give a generic
# failure message. The library should supply debug
# information in a log.
session['message'] = 'Verification failed.'
return set_cookie_and_redirect('/#action=login&loginerror=1')
@http.route('/auth_openid/login/status', type='json', auth='none')
def status(self):
session = getattr(request.session, 'openid_session', {})
return {'status': session.get('status'), 'message': session.get('message')}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
jeanmask/opps | opps/articles/migrations/0001_initial.py | 5 | 19640 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
from django.contrib.auth import get_user_model
User = get_user_model()
class Migration(SchemaMigration):
depends_on = (
("containers", "0001_initial"),
)
def forwards(self, orm):
# Adding model 'Post'
db.create_table(u'articles_post', (
(u'container_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['containers.Container'], unique=True, primary_key=True)),
('headline', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('short_title', self.gf('django.db.models.fields.CharField')(max_length=140, null=True, blank=True)),
('content', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal(u'articles', ['Post'])
# Adding M2M table for field albums on 'Post'
m2m_table_name = db.shorten_name(u'articles_post_albums')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('post', models.ForeignKey(orm[u'articles.post'], null=False)),
('album', models.ForeignKey(orm[u'articles.album'], null=False))
))
db.create_unique(m2m_table_name, ['post_id', 'album_id'])
# Adding model 'PostRelated'
db.create_table(u'articles_postrelated', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('post', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='postrelated_post', null=True, on_delete=models.SET_NULL, to=orm['articles.Post'])),
('related', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='postrelated_related', null=True, on_delete=models.SET_NULL, to=orm['containers.Container'])),
('order', self.gf('django.db.models.fields.PositiveIntegerField')(default=0)),
))
db.send_create_signal(u'articles', ['PostRelated'])
# Adding model 'Album'
db.create_table(u'articles_album', (
(u'container_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['containers.Container'], unique=True, primary_key=True)),
('headline', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('short_title', self.gf('django.db.models.fields.CharField')(max_length=140, null=True, blank=True)),
))
db.send_create_signal(u'articles', ['Album'])
# Adding model 'Link'
db.create_table(u'articles_link', (
(u'container_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['containers.Container'], unique=True, primary_key=True)),
('headline', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('short_title', self.gf('django.db.models.fields.CharField')(max_length=140, null=True, blank=True)),
('url', self.gf('django.db.models.fields.URLField')(max_length=200, null=True, blank=True)),
('container', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='link_containers', null=True, to=orm['containers.Container'])),
))
db.send_create_signal(u'articles', ['Link'])
def backwards(self, orm):
# Deleting model 'Post'
db.delete_table(u'articles_post')
# Removing M2M table for field albums on 'Post'
db.delete_table(db.shorten_name(u'articles_post_albums'))
# Deleting model 'PostRelated'
db.delete_table(u'articles_postrelated')
# Deleting model 'Album'
db.delete_table(u'articles_album')
# Deleting model 'Link'
db.delete_table(u'articles_link')
models = {
u'%s.%s' % (User._meta.app_label, User._meta.module_name): {
'Meta': {'object_name': User.__name__},
},
u'articles.album': {
'Meta': {'object_name': 'Album'},
u'container_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['containers.Container']", 'unique': 'True', 'primary_key': 'True'}),
'headline': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'short_title': ('django.db.models.fields.CharField', [], {'max_length': '140', 'null': 'True', 'blank': 'True'})
},
u'articles.link': {
'Meta': {'object_name': 'Link'},
'container': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'link_containers'", 'null': 'True', 'to': u"orm['containers.Container']"}),
u'container_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['containers.Container']", 'unique': 'True', 'primary_key': 'True'}),
'headline': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'short_title': ('django.db.models.fields.CharField', [], {'max_length': '140', 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
u'articles.post': {
'Meta': {'object_name': 'Post'},
'albums': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'post_albums'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['articles.Album']"}),
u'container_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['containers.Container']", 'unique': 'True', 'primary_key': 'True'}),
'content': ('django.db.models.fields.TextField', [], {}),
'headline': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'related_posts': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'post_relatedposts'", 'to': u"orm['containers.Container']", 'through': u"orm['articles.PostRelated']", 'blank': 'True', 'symmetrical': 'False', 'null': 'True'}),
'short_title': ('django.db.models.fields.CharField', [], {'max_length': '140', 'null': 'True', 'blank': 'True'})
},
u'articles.postrelated': {
'Meta': {'ordering': "('order',)", 'object_name': 'PostRelated'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'post': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'postrelated_post'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['articles.Post']"}),
'related': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'postrelated_related'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['containers.Container']"})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'channels.channel': {
'Meta': {'ordering': "['name', 'parent__id', 'published']", 'unique_together': "(('site', 'long_slug', 'slug', 'parent'),)", 'object_name': 'Channel'},
'date_available': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'date_insert': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_update': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'group': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'homepage': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'include_in_main_rss': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'layout': ('django.db.models.fields.CharField', [], {'default': "'default'", 'max_length': '250', 'db_index': 'True'}),
u'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'long_slug': ('django.db.models.fields.SlugField', [], {'max_length': '250'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'subchannel'", 'null': 'True', 'to': u"orm['channels.Channel']"}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
u'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'show_in_menu': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'default': '1', 'to': u"orm['sites.Site']"}),
'site_domain': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'site_iid': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'max_length': '4', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '150'}),
u'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['%s.%s']" % (User._meta.app_label, User._meta.object_name)})
},
u'containers.container': {
'Meta': {'ordering': "['-date_available']", 'unique_together': "(('site', 'child_class', 'channel_long_slug', 'slug'),)", 'object_name': 'Container'},
'channel': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['channels.Channel']"}),
'channel_long_slug': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '250', 'null': 'True', 'blank': 'True'}),
'channel_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '140', 'null': 'True', 'blank': 'True'}),
'child_app_label': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '30', 'null': 'True', 'blank': 'True'}),
'child_class': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '30', 'null': 'True', 'blank': 'True'}),
'child_module': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '120', 'null': 'True', 'blank': 'True'}),
'date_available': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'date_insert': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_update': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'hat': ('django.db.models.fields.CharField', [], {'max_length': '140', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'images': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['images.Image']", 'null': 'True', 'through': u"orm['containers.ContainerImage']", 'blank': 'True'}),
'main_image': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'containers_container_mainimage'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['images.Image']"}),
'main_image_caption': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'polymorphic_containers.container_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'short_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'show_on_root_channel': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'default': '1', 'to': u"orm['sites.Site']"}),
'site_domain': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'site_iid': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'max_length': '4', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '150'}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'tags': ('django.db.models.fields.CharField', [], {'max_length': '4000', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '140', 'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['%s.%s']" % (User._meta.app_label, User._meta.object_name)})
},
u'containers.containerimage': {
'Meta': {'ordering': "('order',)", 'object_name': 'ContainerImage'},
'caption': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'container': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['containers.Container']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['images.Image']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'images.image': {
'Meta': {'object_name': 'Image'},
'archive': ('django.db.models.fields.files.FileField', [], {'max_length': '255'}),
'crop_example': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'crop_x1': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'crop_x2': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'crop_y1': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'crop_y2': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'date_available': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'date_insert': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_update': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'fit_in': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'flip': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'flop': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'halign': ('django.db.models.fields.CharField', [], {'default': 'False', 'max_length': '6', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'default': '1', 'to': u"orm['sites.Site']"}),
'site_domain': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'site_iid': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'max_length': '4', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '150'}),
'smart': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'tags': ('django.db.models.fields.CharField', [], {'max_length': '4000', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '140', 'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['%s.%s']" % (User._meta.app_label, User._meta.object_name)}),
'valign': ('django.db.models.fields.CharField', [], {'default': 'False', 'max_length': '6', 'null': 'True', 'blank': 'True'})
},
u'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['articles'] | mit |
ptrendx/mxnet | tests/nightly/model_backwards_compatibility_check/model_backwards_compat_inference.py | 9 | 6664 | #!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from common import *
def test_module_checkpoint_api():
model_name = 'module_checkpoint_api'
print ('Performing inference for model/API %s' % model_name)
# For each MXNet version that has the saved models
for folder in get_top_level_folders_in_bucket(s3, model_bucket_name):
logging.info('Fetching files for MXNet version : %s and model %s' % (folder, model_name))
model_files = download_model_files_from_s3(model_name, folder)
if len(model_files) == 0:
logging.warn('No training files found for %s for MXNet version : %s' % (model_name, folder))
continue
data = mx.nd.load(''.join([model_name, '-data']))
data_iter = mx.io.NDArrayIter(data['data'], data['labels'], batch_size=10)
# Load the model and perform inference
loaded_model = get_module_api_model_definition()
sym, arg_params, aux_params = mx.model.load_checkpoint(model_name, 1)
loaded_model.bind(data_shapes=data_iter.provide_data, label_shapes=data_iter.provide_label)
loaded_model.set_params(arg_params, aux_params)
old_inference_results = load_inference_results(model_name)
inference_results = loaded_model.predict(data_iter)
# Check whether they are equal or not ?
assert_almost_equal(inference_results.asnumpy(), old_inference_results.asnumpy(), rtol=rtol_default, atol=atol_default)
clean_model_files(model_files, model_name)
logging.info('=================================')
logging.info('Assertion passed for model : %s' % model_name)
def test_lenet_gluon_load_params_api():
model_name = 'lenet_gluon_save_params_api'
logging.info('Performing inference for model/API %s' % model_name)
for folder in get_top_level_folders_in_bucket(s3, model_bucket_name):
logging.info('Fetching files for MXNet version : %s and model %s' % (folder, model_name))
model_files = download_model_files_from_s3(model_name, folder)
if len(model_files) == 0:
logging.warn('No training files found for %s for MXNet version : %s' % (model_name, folder))
continue
data = mx.nd.load(''.join([model_name, '-data']))
test_data = data['data']
# Load the model and perform inference
loaded_model = Net()
loaded_model.load_params(model_name + '-params')
output = loaded_model(test_data)
old_inference_results = mx.nd.load(model_name + '-inference')['inference']
assert_almost_equal(old_inference_results.asnumpy(), output.asnumpy(), rtol=rtol_default, atol=atol_default)
clean_model_files(model_files, model_name)
logging.info('=================================')
logging.info('Assertion passed for model : %s' % model_name)
def test_lenet_gluon_hybrid_imports_api():
model_name = 'lenet_gluon_hybrid_export_api'
logging.info('Performing inference for model/API %s' % model_name)
for folder in get_top_level_folders_in_bucket(s3, model_bucket_name):
logging.info('Fetching files for MXNet version : %s and model %s' % (folder, model_name))
model_files = download_model_files_from_s3(model_name, folder)
if len(model_files) == 0:
logging.warn('No training files found for %s for MXNet version : %s' % (model_name, folder))
continue
# Load the model and perform inference
data = mx.nd.load(''.join([model_name, '-data']))
test_data = data['data']
loaded_model = HybridNet()
loaded_model = gluon.SymbolBlock.imports(model_name + '-symbol.json', ['data'], model_name + '-0000.params')
output = loaded_model(test_data)
old_inference_results = mx.nd.load(model_name + '-inference')['inference']
assert_almost_equal(old_inference_results.asnumpy(), output.asnumpy(), rtol=rtol_default, atol=atol_default)
clean_model_files(model_files, model_name)
logging.info('=================================')
logging.info('Assertion passed for model : %s' % model_name)
def test_lstm_gluon_load_parameters_api():
# If this code is being run on version >= 1.2.0 only then execute it,
# since it uses save_parameters and load_parameters API
if compare_versions(str(mxnet_version), '1.2.1') < 0:
logging.warn('Found MXNet version %s and exiting because this version does not contain save_parameters'
' and load_parameters functions' % str(mxnet_version))
return
model_name = 'lstm_gluon_save_parameters_api'
logging.info('Performing inference for model/API %s and model' % model_name)
for folder in get_top_level_folders_in_bucket(s3, model_bucket_name):
logging.info('Fetching files for MXNet version : %s' % folder)
model_files = download_model_files_from_s3(model_name, folder)
if len(model_files) == 0:
logging.warn('No training files found for %s for MXNet version : %s' % (model_name, folder))
continue
data = mx.nd.load(''.join([model_name, '-data']))
test_data = data['data']
# Load the model and perform inference
loaded_model = SimpleLSTMModel()
loaded_model.load_parameters(model_name + '-params')
output = loaded_model(test_data)
old_inference_results = mx.nd.load(model_name + '-inference')['inference']
assert_almost_equal(old_inference_results.asnumpy(), output.asnumpy(), rtol=rtol_default, atol=atol_default)
clean_model_files(model_files, model_name)
logging.info('=================================')
logging.info('Assertion passed for model : %s' % model_name)
if __name__ == '__main__':
test_module_checkpoint_api()
test_lenet_gluon_load_params_api()
test_lenet_gluon_hybrid_imports_api()
test_lstm_gluon_load_parameters_api()
| apache-2.0 |
airbnb/superset | superset/migrations/versions/7e3ddad2a00b_results_key_to_query.py | 5 | 1233 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""results_key to query
Revision ID: 7e3ddad2a00b
Revises: b46fa1b0b39e
Create Date: 2016-10-14 11:17:54.995156
"""
# revision identifiers, used by Alembic.
revision = "7e3ddad2a00b"
down_revision = "b46fa1b0b39e"
import sqlalchemy as sa
from alembic import op
def upgrade():
op.add_column(
"query", sa.Column("results_key", sa.String(length=64), nullable=True)
)
def downgrade():
op.drop_column("query", "results_key")
| apache-2.0 |
rAntonioh/re2 | re2/make_unicode_casefold.py | 22 | 3609 | #!/usr/bin/python
# coding=utf-8
#
# Copyright 2008 The RE2 Authors. All Rights Reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
# See unicode_casefold.h for description of case folding tables.
"""Generate C++ table for Unicode case folding."""
import sys
import unicode
_header = """
// GENERATED BY make_unicode_casefold.py; DO NOT EDIT.
// make_unicode_casefold.py >unicode_casefold.cc
#include "re2/unicode_casefold.h"
namespace re2 {
"""
_trailer = """
} // namespace re2
"""
def _Delta(a, b):
"""Compute the delta for b - a. Even/odd and odd/even
are handled specially, as described above."""
if a+1 == b:
if a%2 == 0:
return 'EvenOdd'
else:
return 'OddEven'
if a == b+1:
if a%2 == 0:
return 'OddEven'
else:
return 'EvenOdd'
return b - a
def _AddDelta(a, delta):
"""Return a + delta, handling EvenOdd and OddEven specially."""
if type(delta) == int:
return a+delta
if delta == 'EvenOdd':
if a%2 == 0:
return a+1
else:
return a-1
if delta == 'OddEven':
if a%2 == 1:
return a+1
else:
return a-1
print >>sys.stderr, "Bad Delta: ", delta
raise "Bad Delta"
def _MakeRanges(pairs):
"""Turn a list like [(65,97), (66, 98), ..., (90,122)]
into [(65, 90, +32)]."""
ranges = []
last = -100
def evenodd(last, a, b, r):
if a != last+1 or b != _AddDelta(a, r[2]):
return False
r[1] = a
return True
def evenoddpair(last, a, b, r):
if a != last+2:
return False
delta = r[2]
d = delta
if type(delta) is not str:
return False
if delta.endswith('Skip'):
d = delta[:-4]
else:
delta = d + 'Skip'
if b != _AddDelta(a, d):
return False
r[1] = a
r[2] = delta
return True
for a, b in pairs:
if ranges and evenodd(last, a, b, ranges[-1]):
pass
elif ranges and evenoddpair(last, a, b, ranges[-1]):
pass
else:
ranges.append([a, a, _Delta(a, b)])
last = a
return ranges
# The maximum size of a case-folding group.
# Case folding is implemented in parse.cc by a recursive process
# with a recursion depth equal to the size of the largest
# case-folding group, so it is important that this bound be small.
# The current tables have no group bigger than 4.
# If there are ever groups bigger than 10 or so, it will be
# time to rework the code in parse.cc.
MaxCasefoldGroup = 4
def main():
lowergroups, casegroups = unicode.CaseGroups()
foldpairs = []
seen = {}
for c in casegroups:
if len(c) > MaxCasefoldGroup:
raise unicode.Error("casefold group too long: %s" % (c,))
for i in range(len(c)):
if c[i-1] in seen:
raise unicode.Error("bad casegroups %d -> %d" % (c[i-1], c[i]))
seen[c[i-1]] = True
foldpairs.append([c[i-1], c[i]])
lowerpairs = []
for lower, group in lowergroups.iteritems():
for g in group:
if g != lower:
lowerpairs.append([g, lower])
def printpairs(name, foldpairs):
foldpairs.sort()
foldranges = _MakeRanges(foldpairs)
print "// %d groups, %d pairs, %d ranges" % (len(casegroups), len(foldpairs), len(foldranges))
print "const CaseFold unicode_%s[] = {" % (name,)
for lo, hi, delta in foldranges:
print "\t{ %d, %d, %s }," % (lo, hi, delta)
print "};"
print "const int num_unicode_%s = %d;" % (name, len(foldranges),)
print ""
print _header
printpairs("casefold", foldpairs)
printpairs("tolower", lowerpairs)
print _trailer
if __name__ == '__main__':
main()
| bsd-3-clause |
atris/gpdb | gpMgmt/bin/gppylib/system/configurationImplGpdb.py | 20 | 17852 | #!/usr/bin/env python
#
# Copyright (c) Greenplum Inc 2010. All Rights Reserved.
# Copyright (c) EMC/Greenplum Inc 2011. All Rights Reserved.
#
"""
This file defines the interface that can be used to fetch and update system
configuration information.
"""
import os, copy
from gppylib.gplog import *
from gppylib.utils import checkNotNone
from gppylib.system.configurationInterface import *
from gppylib.system.ComputeCatalogUpdate import ComputeCatalogUpdate
from gppylib.gparray import GpArray, GpDB, InvalidSegmentConfiguration
from gppylib import gparray
from gppylib.db import dbconn
from gppylib.commands.gp import get_local_db_mode
logger = get_default_logger()
class GpConfigurationProviderUsingGpdbCatalog(GpConfigurationProvider) :
"""
An implementation of GpConfigurationProvider will provide functionality to
fetch and update gpdb system configuration information (as stored in the
database)
Note that the client of this is assuming that the database data is not
changed by another party between the time segment data is loaded and when it
is updated
"""
def __init__(self):
self.__masterDbUrl = None
def initializeProvider( self, masterPort ) :
"""
Initialize the provider to get information from the given master db, if
it chooses to get its data from the database
returns self
"""
checkNotNone("masterPort", masterPort)
dbUrl = dbconn.DbURL(port=masterPort, dbname='template1')
self.__masterDbUrl = dbUrl
return self
def loadSystemConfig( self, useUtilityMode ) :
"""
Load all segment information from the configuration source.
Returns a new GpArray object
"""
# ensure initializeProvider() was called
checkNotNone("masterDbUrl", self.__masterDbUrl)
logger.info("Obtaining Segment details from master...")
array = GpArray.initFromCatalog(self.__masterDbUrl, useUtilityMode)
if get_local_db_mode(array.master.getSegmentDataDirectory()) != 'UTILITY':
logger.debug("Validating configuration...")
if not array.is_array_valid():
raise InvalidSegmentConfiguration(array)
return array
def sendPgElogFromMaster( self, msg, sendAlerts):
"""
Send a message from the master database using select pg_elog ...
"""
# ensure initializeProvider() was called
checkNotNone("masterDbUrl", self.__masterDbUrl)
conn = None
try:
conn = dbconn.connect(self.__masterDbUrl, utility=True)
dbconn.execSQL(conn, "SELECT GP_ELOG(" +
self.__toSqlCharValue(msg) + "," +
("true" if sendAlerts else "false") + ")")
finally:
if conn:
conn.close()
def updateSystemConfig( self, gpArray, textForConfigTable, dbIdToForceMirrorRemoveAdd, useUtilityMode, allowPrimary) :
"""
Update the configuration for the given segments in the underlying
configuration store to match the current values
Also resets any dirty bits on saved/updated objects
@param textForConfigTable label to be used when adding to segment configuration history
@param dbIdToForceMirrorRemoveAdd a map of dbid -> True for mirrors for which we should force updating the mirror
@param useUtilityMode True if the operations we're doing are expected to run via utility moed
@param allowPrimary True if caller authorizes add/remove primary operations (e.g. gpexpand)
"""
# ensure initializeProvider() was called
checkNotNone("masterDbUrl", self.__masterDbUrl)
logger.debug("Validating configuration changes...")
if not gpArray.is_array_valid():
logger.critical("Configuration is invalid")
raise InvalidSegmentConfiguration(gpArray)
conn = dbconn.connect(self.__masterDbUrl, useUtilityMode, allowSystemTableMods='dml')
dbconn.execSQL(conn, "BEGIN")
# compute what needs to be updated
update = ComputeCatalogUpdate(gpArray, dbIdToForceMirrorRemoveAdd, useUtilityMode, allowPrimary)
update.validate()
# put the mirrors in a map by content id so we can update them later
mirror_map = {}
for seg in update.mirror_to_add:
mirror_map[ seg.getSegmentContentId() ] = seg
# reset dbId of new primary and mirror segments to -1
# before invoking the operations which will assign them new ids
for seg in update.primary_to_add:
seg.setSegmentDbId(-1)
for seg in update.mirror_to_add:
seg.setSegmentDbId(-1)
# remove mirror segments (e.g. for gpexpand rollback)
for seg in update.mirror_to_remove:
self.__updateSystemConfigRemoveMirror(conn, seg, textForConfigTable)
# remove primary segments (e.g for gpexpand rollback)
for seg in update.primary_to_remove:
self.__updateSystemConfigRemovePrimary(conn, seg, textForConfigTable)
# add new primary segments
for seg in update.primary_to_add:
self.__updateSystemConfigAddPrimary(conn, gpArray, seg, textForConfigTable, mirror_map)
# add new mirror segments
for seg in update.mirror_to_add:
self.__updateSystemConfigAddMirror(conn, gpArray, seg, textForConfigTable)
# remove and add mirror segments necessitated by catalog attribute update
for seg in update.mirror_to_remove_and_add:
self.__updateSystemConfigRemoveAddMirror(conn, gpArray, seg, textForConfigTable)
# apply updates to existing segments
for seg in update.segment_to_update:
originalSeg = update.dbsegmap.get(seg.getSegmentDbId())
self.__updateSystemConfigUpdateSegment(conn, gpArray, seg, originalSeg, textForConfigTable)
# apply update to fault strategy
if gpArray.getStrategyAsLoadedFromDb() != gpArray.getFaultStrategy():
self.__updateSystemConfigFaultStrategy(conn, gpArray)
# commit changes
logger.debug("Committing configuration table changes")
dbconn.execSQL(conn, "COMMIT")
conn.close()
gpArray.setStrategyAsLoadedFromDb( [gpArray.getFaultStrategy()])
gpArray.setSegmentsAsLoadedFromDb([seg.copy() for seg in gpArray.getDbList()])
def __updateSystemConfigRemoveMirror(self, conn, seg, textForConfigTable):
"""
Remove a mirror segment currently in gp_segment_configuration
but not present in the goal configuration and record our action
in gp_configuration_history.
"""
dbId = seg.getSegmentDbId()
self.__callSegmentRemoveMirror(conn, seg)
self.__insertConfigHistory(conn, dbId, "%s: removed mirror segment configuration" % textForConfigTable)
def __updateSystemConfigRemovePrimary(self, conn, seg, textForConfigTable):
"""
Remove a primary segment currently in gp_segment_configuration
but not present in the goal configuration and record our action
in gp_configuration_history.
"""
dbId = seg.getSegmentDbId()
self.__callSegmentRemove(conn, seg)
self.__insertConfigHistory(conn, dbId, "%s: removed primary segment configuration" % textForConfigTable)
def __updateSystemConfigAddPrimary(self, conn, gpArray, seg, textForConfigTable, mirror_map):
"""
Add a primary segment specified in our goal configuration but
which is missing from the current gp_segment_configuration table
and record our action in gp_configuration_history.
"""
# lookup the mirror (if any) so that we may correct its content id
mirrorseg = mirror_map.get( seg.getSegmentContentId() )
# add the new segment
dbId = self.__callSegmentAdd(conn, gpArray, seg)
# update the segment mode, status and replication port
self.__updateSegmentModeStatus(conn, seg)
if gpArray.get_mirroring_enabled() == True:
self.__updateSegmentReplicationPort(conn, seg)
# get the newly added segment's content id
# MPP-12393 et al WARNING: there is an unusual side effect going on here.
# Although gp_add_segment() executed by __callSegmentAdd() above returns
# the dbId of the new row in gp_segment_configuration, the following
# select from gp_segment_configuration can return 0 rows if the updates
# done by __updateSegmentModeStatus() and/or __updateSegmentReplicationPort()
# are not done first. Don't change the order of these operations unless you
# understand why gp_add_segment() behaves as it does.
sql = "select content from pg_catalog.gp_segment_configuration where dbId = %s" % self.__toSqlIntValue(seg.getSegmentDbId())
logger.debug(sql)
sqlResult = self.__fetchSingleOutputRow(conn, sql)
contentId = int(sqlResult[0])
# Set the new content id for the primary as well the mirror if present.
seg.setSegmentContentId(contentId)
if mirrorseg is not None:
mirrorseg.setSegmentContentId(contentId)
self.__insertConfigHistory(conn, dbId, "%s: inserted primary segment configuration with contentid %s" % (textForConfigTable, contentId))
def __updateSystemConfigAddMirror(self, conn, gpArray, seg, textForConfigTable):
"""
Add a mirror segment specified in our goal configuration but
which is missing from the current gp_segment_configuration table
and record our action in gp_configuration_history.
"""
dbId = self.__callSegmentAddMirror(conn, gpArray, seg)
self.__updateSegmentModeStatus(conn, seg)
self.__insertConfigHistory(conn, dbId, "%s: inserted mirror segment configuration" % textForConfigTable)
def __updateSystemConfigRemoveAddMirror(self, conn, gpArray, seg, textForConfigTable):
"""
We've been asked to update the mirror in a manner that require
it to be removed and then re-added. Perform the tasks
and record our action in gp_configuration_history.
"""
origDbId = seg.getSegmentDbId()
self.__callSegmentRemoveMirror(conn, seg)
dbId = self.__callSegmentAddMirror(conn, gpArray, seg)
# now update mode/status since this is not done by gp_add_segment_mirror
self.__updateSegmentModeStatus(conn, seg)
self.__insertConfigHistory(conn, seg.getSegmentDbId(),
"%s: inserted segment configuration for full recovery or original dbid %s" \
% (textForConfigTable, origDbId))
def __updateSystemConfigUpdateSegment(self, conn, gpArray, seg, originalSeg, textForConfigTable):
# update mode and status
# when adding a mirror, the replication port may change as well
#
what = "%s: segment mode and status"
self.__updateSegmentModeStatus(conn, seg)
if seg.getSegmentReplicationPort() != originalSeg.getSegmentReplicationPort():
what = "%s: segment mode, status, and replication port"
self.__updateSegmentReplicationPort(conn, seg)
self.__insertConfigHistory(conn, seg.getSegmentDbId(), what % textForConfigTable)
def __updateSystemConfigFaultStrategy(self, conn, gpArray):
"""
Update the fault strategy.
"""
fs = gpArray.getFaultStrategy()
sql = "UPDATE gp_fault_strategy\n SET fault_strategy = " + self.__toSqlCharValue(fs) + "\n"
logger.debug(sql)
dbconn.executeUpdateOrInsert(conn, sql, 1)
def __callSegmentRemoveMirror(self, conn, seg):
"""
Call gp_remove_segment_mirror() to remove the mirror.
"""
sql = "SELECT gp_remove_segment_mirror(%s::int2)" % (self.__toSqlIntValue(seg.getSegmentContentId()))
logger.debug(sql)
result = self.__fetchSingleOutputRow(conn, sql)
assert result[0] # must return True
def __callSegmentRemove(self, conn, seg):
"""
Call gp_remove_segment() to remove the primary.
"""
sql = "SELECT gp_remove_segment(%s::int2)" % (self.__toSqlIntValue(seg.getSegmentDbId()))
logger.debug(sql)
result = self.__fetchSingleOutputRow(conn, sql)
assert result[0]
def __callSegmentAdd(self, conn, gpArray, seg):
"""
Call gp_add_segment() to add the primary.
Return the new segment's dbid.
"""
logger.debug('callSegmentAdd %s' % repr(seg))
filespaceMapStr = self.__toSqlFilespaceMapStr(gpArray, seg)
sql = "SELECT gp_add_segment(%s, %s, %s, %s)" \
% (
self.__toSqlTextValue(seg.getSegmentHostName()),
self.__toSqlTextValue(seg.getSegmentAddress()),
self.__toSqlIntValue(seg.getSegmentPort()),
self.__toSqlTextValue(filespaceMapStr)
)
logger.debug(sql)
sqlResult = self.__fetchSingleOutputRow(conn, sql)
dbId = int(sqlResult[0])
seg.setSegmentDbId(dbId)
return dbId
def __callSegmentAddMirror(self, conn, gpArray, seg):
"""
Call gp_add_segment_mirror() to add the mirror.
Return the new segment's dbid.
"""
logger.debug('callSegmentAddMirror %s' % repr(seg))
filespaceMapStr = self.__toSqlFilespaceMapStr(gpArray, seg)
sql = "SELECT gp_add_segment_mirror(%s::int2, %s, %s, %s, %s, %s)" \
% (
self.__toSqlIntValue(seg.getSegmentContentId()),
self.__toSqlTextValue(seg.getSegmentHostName()),
self.__toSqlTextValue(seg.getSegmentAddress()),
self.__toSqlIntValue(seg.getSegmentPort()),
self.__toSqlIntValue(seg.getSegmentReplicationPort()),
self.__toSqlTextValue(filespaceMapStr)
)
logger.debug(sql)
sqlResult = self.__fetchSingleOutputRow(conn, sql)
dbId = int(sqlResult[0])
seg.setSegmentDbId(dbId)
return dbId
def __updateSegmentReplicationPort(self, conn, seg):
# run an update
sql = "UPDATE pg_catalog.gp_segment_configuration\n" + \
" SET\n" + \
" replication_port = " + self.__toSqlIntValue(seg.getSegmentReplicationPort()) + "\n" \
"WHERE dbid = " + self.__toSqlIntValue(seg.getSegmentDbId())
logger.debug(sql)
dbconn.executeUpdateOrInsert(conn, sql, 1)
def __updateSegmentModeStatus(self, conn, seg):
# run an update
sql = "UPDATE pg_catalog.gp_segment_configuration\n" + \
" SET\n" + \
" mode = " + self.__toSqlCharValue(seg.getSegmentMode()) + ",\n" \
" status = " + self.__toSqlCharValue(seg.getSegmentStatus()) + "\n" \
"WHERE dbid = " + self.__toSqlIntValue(seg.getSegmentDbId())
logger.debug(sql)
dbconn.executeUpdateOrInsert(conn, sql, 1)
def __fetchSingleOutputRow(self, conn, sql, retry=False):
"""
Execute specified SQL command and return what we expect to be a single row.
Raise an exception when more or fewer than one row is seen and when more
than one row is seen display up to 10 rows as logger warnings.
"""
cursor = dbconn.execSQL(conn, sql)
numrows = cursor.rowcount
numshown = 0
res = None
for row in cursor:
if numrows != 1:
#
# if we got back more than one row
# we print a few of the rows first
# instead of immediately raising an exception
#
numshown += 1
if numshown > 10:
break
logger.warning('>>> %s' % row)
else:
assert res is None
res = row
assert res is not None
cursor.close()
if numrows != 1:
raise Exception("SQL returned %d rows, not 1 as expected:\n%s" % (numrows, sql))
return res
def __insertConfigHistory(self, conn, dbId, msg ):
# now update change history
sql = "INSERT INTO gp_configuration_history (time, dbid, \"desc\") VALUES(\n" \
"now(),\n " + \
self.__toSqlIntValue(dbId) + ",\n " + \
self.__toSqlCharValue(msg) + "\n)"
logger.debug(sql)
dbconn.executeUpdateOrInsert(conn, sql, 1)
def __toSqlFilespaceMapStr(self, gpArray, seg):
"""
Return a string representation of the filespace map suitable
for inclusion into the call to gp_add_segment_mirror().
"""
filespaceArrayString = []
for fs in gpArray.getFilespaces():
path = seg.getSegmentFilespaces()[ fs.getOid() ]
filespaceArrayString.append("{%s,%s}" % \
(self.__toSqlArrayStringValue(fs.getName()), \
self.__toSqlArrayStringValue(path)))
filespaceMapStr = "{" + ",".join(filespaceArrayString) + "}"
return filespaceMapStr
def __toSqlIntValue(self, val):
if val is None:
return "null"
return str(val)
def __toSqlArrayStringValue(self, val):
if val is None:
return "null"
return '"' + val.replace('"','\\"').replace('\\','\\\\') + '"'
def __toSqlCharValue(self, val):
return self.__toSqlTextValue(val)
def __toSqlTextValue(self, val):
if val is None:
return "null"
return "'" + val.replace("'","''").replace('\\','\\\\') + "'"
| apache-2.0 |
AsgerPetersen/QGIS | python/plugins/processing/algs/exampleprovider/ExampleAlgorithmProvider.py | 20 | 3721 | # -*- coding: utf-8 -*-
"""
***************************************************************************
__init__.py
---------------------
Date : July 2013
Copyright : (C) 2013 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'July 2013'
__copyright__ = '(C) 2013, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from processing.core.AlgorithmProvider import AlgorithmProvider
from processing.core.ProcessingConfig import Setting, ProcessingConfig
from exampleprovider.ExampleAlgorithm import ExampleAlgorithm
class ExampleAlgorithmProvider(AlgorithmProvider):
MY_DUMMY_SETTING = 'MY_DUMMY_SETTING'
def __init__(self):
AlgorithmProvider.__init__(self)
# Deactivate provider by default
self.activate = False
# Load algorithms
self.alglist = [ExampleAlgorithm()]
for alg in self.alglist:
alg.provider = self
def initializeSettings(self):
"""In this method we add settings needed to configure our
provider.
Do not forget to call the parent method, since it takes care
or automatically adding a setting for activating or
deactivating the algorithms in the provider.
"""
AlgorithmProvider.initializeSettings(self)
ProcessingConfig.addSetting(Setting('Example algorithms',
ExampleAlgorithmProvider.MY_DUMMY_SETTING,
'Example setting', 'Default value'))
def unload(self):
"""Setting should be removed here, so they do not appear anymore
when the plugin is unloaded.
"""
AlgorithmProvider.unload(self)
ProcessingConfig.removeSetting(
ExampleAlgorithmProvider.MY_DUMMY_SETTING)
def getName(self):
"""This is the name that will appear on the toolbox group.
It is also used to create the command line name of all the
algorithms from this provider.
"""
return 'Example provider'
def getDescription(self):
"""This is the provired full name.
"""
return 'Example algorithms'
def getIcon(self):
"""We return the default icon.
"""
return AlgorithmProvider.getIcon(self)
def _loadAlgorithms(self):
"""Here we fill the list of algorithms in self.algs.
This method is called whenever the list of algorithms should
be updated. If the list of algorithms can change (for instance,
if it contains algorithms from user-defined scripts and a new
script might have been added), you should create the list again
here.
In this case, since the list is always the same, we assign from
the pre-made list. This assignment has to be done in this method
even if the list does not change, since the self.algs list is
cleared before calling this method.
"""
self.algs = self.alglist
| gpl-2.0 |
Endika/edx-platform | lms/djangoapps/ccx/tests/test_views.py | 1 | 39258 | """
test views
"""
import datetime
import json
import re
import pytz
import ddt
import urlparse
from mock import patch, MagicMock
from nose.plugins.attrib import attr
from capa.tests.response_xml_factory import StringResponseXMLFactory
from courseware.courses import get_course_by_id
from courseware.tests.factories import StudentModuleFactory
from courseware.tests.helpers import LoginEnrollmentTestCase
from courseware.tabs import get_course_tab_list
from instructor.access import list_with_level, allow_access
from django.conf import settings
from django.core.urlresolvers import reverse, resolve
from django.utils.timezone import UTC
from django.test.utils import override_settings
from django.test import RequestFactory
from edxmako.shortcuts import render_to_response
from request_cache.middleware import RequestCache
from opaque_keys.edx.keys import CourseKey
from student.roles import (
CourseCcxCoachRole,
CourseInstructorRole,
CourseStaffRole,
)
from student.models import (
CourseEnrollment,
CourseEnrollmentAllowed,
)
from student.tests.factories import (
AdminFactory,
CourseEnrollmentFactory,
UserFactory,
)
from xmodule.x_module import XModuleMixin
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import (
ModuleStoreTestCase,
SharedModuleStoreTestCase,
TEST_DATA_SPLIT_MODULESTORE)
from xmodule.modulestore.tests.factories import (
CourseFactory,
ItemFactory,
)
from ccx_keys.locator import CCXLocator
from lms.djangoapps.ccx.models import CustomCourseForEdX
from lms.djangoapps.ccx.overrides import get_override_for_ccx, override_field_for_ccx
from lms.djangoapps.ccx.views import ccx_course
from lms.djangoapps.ccx.tests.factories import CcxFactory
from lms.djangoapps.ccx.tests.utils import (
CcxTestCase,
flatten,
)
from lms.djangoapps.ccx.utils import is_email
from lms.djangoapps.ccx.views import get_date
def intercept_renderer(path, context):
"""
Intercept calls to `render_to_response` and attach the context dict to the
response for examination in unit tests.
"""
# I think Django already does this for you in their TestClient, except
# we're bypassing that by using edxmako. Probably edxmako should be
# integrated better with Django's rendering and event system.
response = render_to_response(path, context)
response.mako_context = context
response.mako_template = path
return response
def ccx_dummy_request():
"""
Returns dummy request object for CCX coach tab test
"""
factory = RequestFactory()
request = factory.get('ccx_coach_dashboard')
request.user = MagicMock()
return request
def setup_students_and_grades(context):
"""
Create students and set their grades.
:param context: class reference
"""
if context.course:
context.student = student = UserFactory.create()
CourseEnrollmentFactory.create(user=student, course_id=context.course.id)
context.student2 = student2 = UserFactory.create()
CourseEnrollmentFactory.create(user=student2, course_id=context.course.id)
# create grades for self.student as if they'd submitted the ccx
for chapter in context.course.get_children():
for i, section in enumerate(chapter.get_children()):
for j, problem in enumerate(section.get_children()):
# if not problem.visible_to_staff_only:
StudentModuleFactory.create(
grade=1 if i < j else 0,
max_grade=1,
student=context.student,
course_id=context.course.id,
module_state_key=problem.location
)
StudentModuleFactory.create(
grade=1 if i > j else 0,
max_grade=1,
student=context.student2,
course_id=context.course.id,
module_state_key=problem.location
)
@attr('shard_1')
@ddt.ddt
class TestCoachDashboard(CcxTestCase, LoginEnrollmentTestCase):
"""
Tests for Custom Courses views.
"""
@classmethod
def setUpClass(cls):
super(TestCoachDashboard, cls).setUpClass()
def setUp(self):
"""
Set up tests
"""
super(TestCoachDashboard, self).setUp()
# Login with the instructor account
self.client.login(username=self.coach.username, password="test")
# adding staff to master course.
staff = UserFactory()
allow_access(self.course, staff, 'staff')
self.assertTrue(CourseStaffRole(self.course.id).has_user(staff))
# adding instructor to master course.
instructor = UserFactory()
allow_access(self.course, instructor, 'instructor')
self.assertTrue(CourseInstructorRole(self.course.id).has_user(instructor))
def assert_elements_in_schedule(self, url, n_chapters=2, n_sequentials=4, n_verticals=8):
"""
Helper function to count visible elements in the schedule
"""
response = self.client.get(url)
# the schedule contains chapters
chapters = json.loads(response.mako_context['schedule']) # pylint: disable=no-member
sequentials = flatten([chapter.get('children', []) for chapter in chapters])
verticals = flatten([sequential.get('children', []) for sequential in sequentials])
# check that the numbers of nodes at different level are the expected ones
self.assertEqual(n_chapters, len(chapters))
self.assertEqual(n_sequentials, len(sequentials))
self.assertEqual(n_verticals, len(verticals))
# extract the locations of all the nodes
all_elements = chapters + sequentials + verticals
return [elem['location'] for elem in all_elements if 'location' in elem]
def hide_node(self, node):
"""
Helper function to set the node `visible_to_staff_only` property
to True and save the change
"""
node.visible_to_staff_only = True
self.mstore.update_item(node, self.coach.id)
def test_not_a_coach(self):
"""
User is not a coach, should get Forbidden response.
"""
ccx = self.make_ccx()
url = reverse(
'ccx_coach_dashboard',
kwargs={'course_id': CCXLocator.from_course_locator(self.course.id, ccx.id)})
response = self.client.get(url)
self.assertEqual(response.status_code, 403)
def test_no_ccx_created(self):
"""
No CCX is created, coach should see form to add a CCX.
"""
self.make_coach()
url = reverse(
'ccx_coach_dashboard',
kwargs={'course_id': unicode(self.course.id)})
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertTrue(re.search(
'<form action=".+create_ccx"',
response.content))
def test_create_ccx(self):
"""
Create CCX. Follow redirect to coach dashboard, confirm we see
the coach dashboard for the new CCX.
"""
self.make_coach()
url = reverse(
'create_ccx',
kwargs={'course_id': unicode(self.course.id)})
response = self.client.post(url, {'name': 'New CCX'})
self.assertEqual(response.status_code, 302)
url = response.get('location') # pylint: disable=no-member
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
# Get the ccx_key
path = urlparse.urlparse(url).path
resolver = resolve(path)
ccx_key = resolver.kwargs['course_id']
course_key = CourseKey.from_string(ccx_key)
self.assertTrue(CourseEnrollment.is_enrolled(self.coach, course_key))
self.assertTrue(re.search('id="ccx-schedule"', response.content))
# check if the max amount of student that can be enrolled has been overridden
ccx = CustomCourseForEdX.objects.get()
course_enrollments = get_override_for_ccx(ccx, self.course, 'max_student_enrollments_allowed')
self.assertEqual(course_enrollments, settings.CCX_MAX_STUDENTS_ALLOWED)
# assert ccx creator has role=ccx_coach
role = CourseCcxCoachRole(course_key)
self.assertTrue(role.has_user(self.coach))
# assert that staff and instructors of master course has staff and instructor roles on ccx
list_staff_master_course = list_with_level(self.course, 'staff')
list_instructor_master_course = list_with_level(self.course, 'instructor')
with ccx_course(course_key) as course_ccx:
list_staff_ccx_course = list_with_level(course_ccx, 'staff')
self.assertEqual(len(list_staff_master_course), len(list_staff_ccx_course))
self.assertEqual(list_staff_master_course[0].email, list_staff_ccx_course[0].email)
list_instructor_ccx_course = list_with_level(course_ccx, 'instructor')
self.assertEqual(len(list_instructor_ccx_course), len(list_instructor_master_course))
self.assertEqual(list_instructor_ccx_course[0].email, list_instructor_master_course[0].email)
def test_get_date(self):
"""
Assert that get_date returns valid date.
"""
ccx = self.make_ccx()
for section in self.course.get_children():
self.assertEqual(get_date(ccx, section, 'start'), self.mooc_start)
self.assertEqual(get_date(ccx, section, 'due'), None)
for subsection in section.get_children():
self.assertEqual(get_date(ccx, subsection, 'start'), self.mooc_start)
self.assertEqual(get_date(ccx, subsection, 'due'), self.mooc_due)
for unit in subsection.get_children():
self.assertEqual(get_date(ccx, unit, 'start', parent_node=subsection), self.mooc_start)
self.assertEqual(get_date(ccx, unit, 'due', parent_node=subsection), self.mooc_due)
@SharedModuleStoreTestCase.modifies_courseware
@patch('ccx.views.render_to_response', intercept_renderer)
@patch('ccx.views.TODAY')
def test_get_ccx_schedule(self, today):
"""
Gets CCX schedule and checks number of blocks in it.
Hides nodes at a different depth and checks that these nodes
are not in the schedule.
"""
today.return_value = datetime.datetime(2014, 11, 25, tzinfo=pytz.UTC)
self.make_coach()
ccx = self.make_ccx()
url = reverse(
'ccx_coach_dashboard',
kwargs={
'course_id': CCXLocator.from_course_locator(
self.course.id, ccx.id)
}
)
# all the elements are visible
self.assert_elements_in_schedule(url)
# hide a vertical
vertical = self.verticals[0]
self.hide_node(vertical)
locations = self.assert_elements_in_schedule(url, n_verticals=7)
self.assertNotIn(unicode(vertical.location), locations)
# hide a sequential
sequential = self.sequentials[0]
self.hide_node(sequential)
locations = self.assert_elements_in_schedule(url, n_sequentials=3, n_verticals=6)
self.assertNotIn(unicode(sequential.location), locations)
# hide a chapter
chapter = self.chapters[0]
self.hide_node(chapter)
locations = self.assert_elements_in_schedule(url, n_chapters=1, n_sequentials=2, n_verticals=4)
self.assertNotIn(unicode(chapter.location), locations)
@patch('ccx.views.render_to_response', intercept_renderer)
@patch('ccx.views.TODAY')
def test_edit_schedule(self, today):
"""
Get CCX schedule, modify it, save it.
"""
today.return_value = datetime.datetime(2014, 11, 25, tzinfo=pytz.UTC)
self.make_coach()
ccx = self.make_ccx()
url = reverse(
'ccx_coach_dashboard',
kwargs={'course_id': CCXLocator.from_course_locator(self.course.id, ccx.id)})
response = self.client.get(url)
schedule = json.loads(response.mako_context['schedule']) # pylint: disable=no-member
self.assertEqual(len(schedule), 2)
self.assertEqual(schedule[0]['hidden'], False)
# If a coach does not override dates, then dates will be imported from master course.
self.assertEqual(
schedule[0]['start'],
self.chapters[0].start.strftime('%Y-%m-%d %H:%M')
)
self.assertEqual(
schedule[0]['children'][0]['start'],
self.sequentials[0].start.strftime('%Y-%m-%d %H:%M')
)
if self.sequentials[0].due:
expected_due = self.sequentials[0].due.strftime('%Y-%m-%d %H:%M')
else:
expected_due = None
self.assertEqual(schedule[0]['children'][0]['due'], expected_due)
url = reverse(
'save_ccx',
kwargs={'course_id': CCXLocator.from_course_locator(self.course.id, ccx.id)})
def unhide(unit):
"""
Recursively unhide a unit and all of its children in the CCX
schedule.
"""
unit['hidden'] = False
for child in unit.get('children', ()):
unhide(child)
unhide(schedule[0])
schedule[0]['start'] = u'2014-11-20 00:00'
schedule[0]['children'][0]['due'] = u'2014-12-25 00:00' # what a jerk!
schedule[0]['children'][0]['children'][0]['start'] = u'2014-12-20 00:00'
schedule[0]['children'][0]['children'][0]['due'] = u'2014-12-25 00:00'
response = self.client.post(
url, json.dumps(schedule), content_type='application/json'
)
schedule = json.loads(response.content)['schedule']
self.assertEqual(schedule[0]['hidden'], False)
self.assertEqual(schedule[0]['start'], u'2014-11-20 00:00')
self.assertEqual(
schedule[0]['children'][0]['due'], u'2014-12-25 00:00'
)
self.assertEqual(
schedule[0]['children'][0]['children'][0]['due'], u'2014-12-25 00:00'
)
self.assertEqual(
schedule[0]['children'][0]['children'][0]['start'], u'2014-12-20 00:00'
)
# Make sure start date set on course, follows start date of earliest
# scheduled chapter
ccx = CustomCourseForEdX.objects.get()
course_start = get_override_for_ccx(ccx, self.course, 'start')
self.assertEqual(str(course_start)[:-9], self.chapters[0].start.strftime('%Y-%m-%d %H:%M'))
# Make sure grading policy adjusted
policy = get_override_for_ccx(ccx, self.course, 'grading_policy',
self.course.grading_policy)
self.assertEqual(policy['GRADER'][0]['type'], 'Homework')
self.assertEqual(policy['GRADER'][0]['min_count'], 8)
self.assertEqual(policy['GRADER'][1]['type'], 'Lab')
self.assertEqual(policy['GRADER'][1]['min_count'], 0)
self.assertEqual(policy['GRADER'][2]['type'], 'Midterm Exam')
self.assertEqual(policy['GRADER'][2]['min_count'], 0)
self.assertEqual(policy['GRADER'][3]['type'], 'Final Exam')
self.assertEqual(policy['GRADER'][3]['min_count'], 0)
@patch('ccx.views.render_to_response', intercept_renderer)
def test_save_without_min_count(self):
"""
POST grading policy without min_count field.
"""
self.make_coach()
ccx = self.make_ccx()
course_id = CCXLocator.from_course_locator(self.course.id, ccx.id)
save_policy_url = reverse(
'ccx_set_grading_policy', kwargs={'course_id': course_id})
# This policy doesn't include a min_count field
policy = {
"GRADE_CUTOFFS": {
"Pass": 0.5
},
"GRADER": [
{
"weight": 0.15,
"type": "Homework",
"drop_count": 2,
"short_label": "HW"
}
]
}
response = self.client.post(
save_policy_url, {"policy": json.dumps(policy)}
)
self.assertEqual(response.status_code, 302)
ccx = CustomCourseForEdX.objects.get()
# Make sure grading policy adjusted
policy = get_override_for_ccx(
ccx, self.course, 'grading_policy', self.course.grading_policy
)
self.assertEqual(len(policy['GRADER']), 1)
self.assertEqual(policy['GRADER'][0]['type'], 'Homework')
self.assertNotIn('min_count', policy['GRADER'][0])
save_ccx_url = reverse('save_ccx', kwargs={'course_id': course_id})
coach_dashboard_url = reverse(
'ccx_coach_dashboard',
kwargs={'course_id': course_id}
)
response = self.client.get(coach_dashboard_url)
schedule = json.loads(response.mako_context['schedule']) # pylint: disable=no-member
response = self.client.post(
save_ccx_url, json.dumps(schedule), content_type='application/json'
)
self.assertEqual(response.status_code, 200)
@ddt.data(
('ccx_invite', True, 1, 'student-ids', ('enrollment-button', 'Enroll')),
('ccx_invite', False, 0, 'student-ids', ('enrollment-button', 'Enroll')),
('ccx_manage_student', True, 1, 'student-id', ('student-action', 'add')),
('ccx_manage_student', False, 0, 'student-id', ('student-action', 'add')),
)
@ddt.unpack
def test_enroll_member_student(self, view_name, send_email, outbox_count, student_form_input_name, button_tuple):
"""
Tests the enrollment of a list of students who are members
of the class.
It tests 2 different views that use slightly different parameters,
but that perform the same task.
"""
self.make_coach()
ccx = self.make_ccx()
enrollment = CourseEnrollmentFactory(course_id=self.course.id)
student = enrollment.user
outbox = self.get_outbox()
self.assertEqual(outbox, [])
url = reverse(
view_name,
kwargs={'course_id': CCXLocator.from_course_locator(self.course.id, ccx.id)}
)
data = {
button_tuple[0]: button_tuple[1],
student_form_input_name: u','.join([student.email, ]), # pylint: disable=no-member
}
if send_email:
data['email-students'] = 'Notify-students-by-email'
response = self.client.post(url, data=data, follow=True)
self.assertEqual(response.status_code, 200)
# we were redirected to our current location
self.assertEqual(len(response.redirect_chain), 1)
self.assertIn(302, response.redirect_chain[0])
self.assertEqual(len(outbox), outbox_count)
if send_email:
self.assertIn(student.email, outbox[0].recipients()) # pylint: disable=no-member
# a CcxMembership exists for this student
self.assertTrue(
CourseEnrollment.objects.filter(course_id=self.course.id, user=student).exists()
)
def test_ccx_invite_enroll_up_to_limit(self):
"""
Enrolls a list of students up to the enrollment limit.
This test is specific to one of the enrollment views: the reason is because
the view used in this test can perform bulk enrollments.
"""
self.make_coach()
# create ccx and limit the maximum amount of students that can be enrolled to 2
ccx = self.make_ccx(max_students_allowed=2)
ccx_course_key = CCXLocator.from_course_locator(self.course.id, ccx.id)
# create some users
students = [
UserFactory.create(is_staff=False) for _ in range(3)
]
url = reverse(
'ccx_invite',
kwargs={'course_id': ccx_course_key}
)
data = {
'enrollment-button': 'Enroll',
'student-ids': u','.join([student.email for student in students]),
}
response = self.client.post(url, data=data, follow=True)
self.assertEqual(response.status_code, 200)
# a CcxMembership exists for the first two students but not the third
self.assertTrue(
CourseEnrollment.objects.filter(course_id=ccx_course_key, user=students[0]).exists()
)
self.assertTrue(
CourseEnrollment.objects.filter(course_id=ccx_course_key, user=students[1]).exists()
)
self.assertFalse(
CourseEnrollment.objects.filter(course_id=ccx_course_key, user=students[2]).exists()
)
def test_manage_student_enrollment_limit(self):
"""
Enroll students up to the enrollment limit.
This test is specific to one of the enrollment views: the reason is because
the view used in this test cannot perform bulk enrollments.
"""
students_limit = 1
self.make_coach()
ccx = self.make_ccx(max_students_allowed=students_limit)
ccx_course_key = CCXLocator.from_course_locator(self.course.id, ccx.id)
students = [
UserFactory.create(is_staff=False) for _ in range(2)
]
url = reverse(
'ccx_manage_student',
kwargs={'course_id': CCXLocator.from_course_locator(self.course.id, ccx.id)}
)
# enroll the first student
data = {
'student-action': 'add',
'student-id': u','.join([students[0].email, ]),
}
response = self.client.post(url, data=data, follow=True)
self.assertEqual(response.status_code, 200)
# a CcxMembership exists for this student
self.assertTrue(
CourseEnrollment.objects.filter(course_id=ccx_course_key, user=students[0]).exists()
)
# try to enroll the second student without success
# enroll the first student
data = {
'student-action': 'add',
'student-id': u','.join([students[1].email, ]),
}
response = self.client.post(url, data=data, follow=True)
self.assertEqual(response.status_code, 200)
# a CcxMembership does not exist for this student
self.assertFalse(
CourseEnrollment.objects.filter(course_id=ccx_course_key, user=students[1]).exists()
)
error_message = 'The course is full: the limit is {students_limit}'.format(
students_limit=students_limit
)
self.assertContains(response, error_message, status_code=200)
@ddt.data(
('ccx_invite', True, 1, 'student-ids', ('enrollment-button', 'Unenroll')),
('ccx_invite', False, 0, 'student-ids', ('enrollment-button', 'Unenroll')),
('ccx_manage_student', True, 1, 'student-id', ('student-action', 'revoke')),
('ccx_manage_student', False, 0, 'student-id', ('student-action', 'revoke')),
)
@ddt.unpack
def test_unenroll_member_student(self, view_name, send_email, outbox_count, student_form_input_name, button_tuple):
"""
Tests the unenrollment of a list of students who are members of the class.
It tests 2 different views that use slightly different parameters,
but that perform the same task.
"""
self.make_coach()
ccx = self.make_ccx()
course_key = CCXLocator.from_course_locator(self.course.id, ccx.id)
enrollment = CourseEnrollmentFactory(course_id=course_key)
student = enrollment.user
outbox = self.get_outbox()
self.assertEqual(outbox, [])
url = reverse(
view_name,
kwargs={'course_id': course_key}
)
data = {
button_tuple[0]: button_tuple[1],
student_form_input_name: u','.join([student.email, ]), # pylint: disable=no-member
}
if send_email:
data['email-students'] = 'Notify-students-by-email'
response = self.client.post(url, data=data, follow=True)
self.assertEqual(response.status_code, 200)
# we were redirected to our current location
self.assertEqual(len(response.redirect_chain), 1)
self.assertIn(302, response.redirect_chain[0])
self.assertEqual(len(outbox), outbox_count)
if send_email:
self.assertIn(student.email, outbox[0].recipients()) # pylint: disable=no-member
# a CcxMembership does not exists for this student
self.assertFalse(
CourseEnrollment.objects.filter(course_id=self.course.id, user=student).exists()
)
@ddt.data(
('ccx_invite', True, 1, 'student-ids', ('enrollment-button', 'Enroll'), 'nobody@nowhere.com'),
('ccx_invite', False, 0, 'student-ids', ('enrollment-button', 'Enroll'), 'nobody@nowhere.com'),
('ccx_invite', True, 0, 'student-ids', ('enrollment-button', 'Enroll'), 'nobody'),
('ccx_invite', False, 0, 'student-ids', ('enrollment-button', 'Enroll'), 'nobody'),
('ccx_manage_student', True, 0, 'student-id', ('student-action', 'add'), 'dummy_student_id'),
('ccx_manage_student', False, 0, 'student-id', ('student-action', 'add'), 'dummy_student_id'),
('ccx_manage_student', True, 1, 'student-id', ('student-action', 'add'), 'xyz@gmail.com'),
('ccx_manage_student', False, 0, 'student-id', ('student-action', 'add'), 'xyz@gmail.com'),
)
@ddt.unpack
def test_enroll_non_user_student(
self, view_name, send_email, outbox_count, student_form_input_name, button_tuple, identifier):
"""
Tests the enrollment of a list of students who are not users yet.
It tests 2 different views that use slightly different parameters,
but that perform the same task.
"""
self.make_coach()
ccx = self.make_ccx()
course_key = CCXLocator.from_course_locator(self.course.id, ccx.id)
outbox = self.get_outbox()
self.assertEqual(outbox, [])
url = reverse(
view_name,
kwargs={'course_id': course_key}
)
data = {
button_tuple[0]: button_tuple[1],
student_form_input_name: u','.join([identifier, ]),
}
if send_email:
data['email-students'] = 'Notify-students-by-email'
response = self.client.post(url, data=data, follow=True)
self.assertEqual(response.status_code, 200)
# we were redirected to our current location
self.assertEqual(len(response.redirect_chain), 1)
self.assertIn(302, response.redirect_chain[0])
self.assertEqual(len(outbox), outbox_count)
# some error messages are returned for one of the views only
if view_name == 'ccx_manage_student' and not is_email(identifier):
error_message = 'Could not find a user with name or email "{identifier}" '.format(
identifier=identifier
)
self.assertContains(response, error_message, status_code=200)
if is_email(identifier):
if send_email:
self.assertIn(identifier, outbox[0].recipients())
self.assertTrue(
CourseEnrollmentAllowed.objects.filter(course_id=course_key, email=identifier).exists()
)
else:
self.assertFalse(
CourseEnrollmentAllowed.objects.filter(course_id=course_key, email=identifier).exists()
)
@ddt.data(
('ccx_invite', True, 0, 'student-ids', ('enrollment-button', 'Unenroll'), 'nobody@nowhere.com'),
('ccx_invite', False, 0, 'student-ids', ('enrollment-button', 'Unenroll'), 'nobody@nowhere.com'),
('ccx_invite', True, 0, 'student-ids', ('enrollment-button', 'Unenroll'), 'nobody'),
('ccx_invite', False, 0, 'student-ids', ('enrollment-button', 'Unenroll'), 'nobody'),
)
@ddt.unpack
def test_unenroll_non_user_student(
self, view_name, send_email, outbox_count, student_form_input_name, button_tuple, identifier):
"""
Unenroll a list of students who are not users yet
"""
self.make_coach()
course = CourseFactory.create()
ccx = self.make_ccx()
course_key = CCXLocator.from_course_locator(course.id, ccx.id)
outbox = self.get_outbox()
CourseEnrollmentAllowed(course_id=course_key, email=identifier)
self.assertEqual(outbox, [])
url = reverse(
view_name,
kwargs={'course_id': course_key}
)
data = {
button_tuple[0]: button_tuple[1],
student_form_input_name: u','.join([identifier, ]),
}
if send_email:
data['email-students'] = 'Notify-students-by-email'
response = self.client.post(url, data=data, follow=True)
self.assertEqual(response.status_code, 200)
# we were redirected to our current location
self.assertEqual(len(response.redirect_chain), 1)
self.assertIn(302, response.redirect_chain[0])
self.assertEqual(len(outbox), outbox_count)
self.assertFalse(
CourseEnrollmentAllowed.objects.filter(
course_id=course_key, email=identifier
).exists()
)
GET_CHILDREN = XModuleMixin.get_children
def patched_get_children(self, usage_key_filter=None):
"""Emulate system tools that mask courseware not visible to students"""
def iter_children():
"""skip children not visible to students"""
for child in GET_CHILDREN(self, usage_key_filter=usage_key_filter):
child._field_data_cache = {} # pylint: disable=protected-access
if not child.visible_to_staff_only:
yield child
return list(iter_children())
@attr('shard_1')
@override_settings(FIELD_OVERRIDE_PROVIDERS=(
'ccx.overrides.CustomCoursesForEdxOverrideProvider',))
@patch('xmodule.x_module.XModuleMixin.get_children', patched_get_children, spec=True)
class TestCCXGrades(SharedModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Tests for Custom Courses views.
"""
MODULESTORE = TEST_DATA_SPLIT_MODULESTORE
@classmethod
def setUpClass(cls):
super(TestCCXGrades, cls).setUpClass()
cls._course = course = CourseFactory.create(enable_ccx=True)
# Create a course outline
cls.mooc_start = start = datetime.datetime(
2010, 5, 12, 2, 42, tzinfo=pytz.UTC
)
chapter = ItemFactory.create(
start=start, parent=course, category='sequential'
)
cls.sections = sections = [
ItemFactory.create(
parent=chapter,
category="sequential",
metadata={'graded': True, 'format': 'Homework'})
for _ in xrange(4)
]
# making problems available at class level for possible future use in tests
cls.problems = [
[
ItemFactory.create(
parent=section,
category="problem",
data=StringResponseXMLFactory().build_xml(answer='foo'),
metadata={'rerandomize': 'always'}
) for _ in xrange(4)
] for section in sections
]
def setUp(self):
"""
Set up tests
"""
super(TestCCXGrades, self).setUp()
# Create instructor account
self.coach = coach = AdminFactory.create()
self.client.login(username=coach.username, password="test")
# Create CCX
role = CourseCcxCoachRole(self._course.id)
role.add_users(coach)
ccx = CcxFactory(course_id=self._course.id, coach=self.coach)
# override course grading policy and make last section invisible to students
override_field_for_ccx(ccx, self._course, 'grading_policy', {
'GRADER': [
{'drop_count': 0,
'min_count': 2,
'short_label': 'HW',
'type': 'Homework',
'weight': 1}
],
'GRADE_CUTOFFS': {'Pass': 0.75},
})
override_field_for_ccx(
ccx, self.sections[-1], 'visible_to_staff_only', True
)
# create a ccx locator and retrieve the course structure using that key
# which emulates how a student would get access.
self.ccx_key = CCXLocator.from_course_locator(self._course.id, ccx.id)
self.course = get_course_by_id(self.ccx_key, depth=None)
setup_students_and_grades(self)
self.client.login(username=coach.username, password="test")
self.addCleanup(RequestCache.clear_request_cache)
@patch('ccx.views.render_to_response', intercept_renderer)
@patch('instructor.views.gradebook_api.MAX_STUDENTS_PER_PAGE_GRADE_BOOK', 1)
def test_gradebook(self):
self.course.enable_ccx = True
RequestCache.clear_request_cache()
url = reverse(
'ccx_gradebook',
kwargs={'course_id': self.ccx_key}
)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
# Max number of student per page is one. Patched setting MAX_STUDENTS_PER_PAGE_GRADE_BOOK = 1
self.assertEqual(len(response.mako_context['students']), 1) # pylint: disable=no-member
student_info = response.mako_context['students'][0] # pylint: disable=no-member
self.assertEqual(student_info['grade_summary']['percent'], 0.5)
self.assertEqual(
student_info['grade_summary']['grade_breakdown'][0]['percent'],
0.5)
self.assertEqual(
len(student_info['grade_summary']['section_breakdown']), 4)
def test_grades_csv(self):
self.course.enable_ccx = True
RequestCache.clear_request_cache()
url = reverse(
'ccx_grades_csv',
kwargs={'course_id': self.ccx_key}
)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
# Are the grades downloaded as an attachment?
self.assertEqual(
response['content-disposition'],
'attachment'
)
rows = response.content.strip().split('\r')
headers = rows[0]
# picking first student records
data = dict(zip(headers.strip().split(','), rows[1].strip().split(',')))
self.assertNotIn('HW 04', data)
self.assertEqual(data['HW 01'], '0.75')
self.assertEqual(data['HW 02'], '0.5')
self.assertEqual(data['HW 03'], '0.25')
self.assertEqual(data['HW Avg'], '0.5')
@patch('courseware.views.render_to_response', intercept_renderer)
def test_student_progress(self):
self.course.enable_ccx = True
patch_context = patch('courseware.views.get_course_with_access')
get_course = patch_context.start()
get_course.return_value = self.course
self.addCleanup(patch_context.stop)
self.client.login(username=self.student.username, password="test")
url = reverse(
'progress',
kwargs={'course_id': self.ccx_key}
)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
grades = response.mako_context['grade_summary'] # pylint: disable=no-member
self.assertEqual(grades['percent'], 0.5)
self.assertEqual(grades['grade_breakdown'][0]['percent'], 0.5)
self.assertEqual(len(grades['section_breakdown']), 4)
@ddt.ddt
class CCXCoachTabTestCase(SharedModuleStoreTestCase):
"""
Test case for CCX coach tab.
"""
@classmethod
def setUpClass(cls):
super(CCXCoachTabTestCase, cls).setUpClass()
cls.ccx_enabled_course = CourseFactory.create(enable_ccx=True)
cls.ccx_disabled_course = CourseFactory.create(enable_ccx=False)
def setUp(self):
super(CCXCoachTabTestCase, self).setUp()
self.user = UserFactory.create()
for course in [self.ccx_enabled_course, self.ccx_disabled_course]:
CourseEnrollmentFactory.create(user=self.user, course_id=course.id)
role = CourseCcxCoachRole(course.id)
role.add_users(self.user)
def check_ccx_tab(self, course):
"""Helper function for verifying the ccx tab."""
request = RequestFactory().request()
request.user = self.user
all_tabs = get_course_tab_list(request, course)
return any(tab.type == 'ccx_coach' for tab in all_tabs)
@ddt.data(
(True, True, True),
(True, False, False),
(False, True, False),
(False, False, False),
(True, None, False)
)
@ddt.unpack
def test_coach_tab_for_ccx_advance_settings(self, ccx_feature_flag, enable_ccx, expected_result):
"""
Test ccx coach tab state (visible or hidden) depending on the value of enable_ccx flag, ccx feature flag.
"""
with self.settings(FEATURES={'CUSTOM_COURSES_EDX': ccx_feature_flag}):
course = self.ccx_enabled_course if enable_ccx else self.ccx_disabled_course
self.assertEquals(
expected_result,
self.check_ccx_tab(course)
)
class TestStudentDashboardWithCCX(ModuleStoreTestCase):
"""
Test to ensure that the student dashboard works for users enrolled in CCX
courses.
"""
def setUp(self):
"""
Set up courses and enrollments.
"""
super(TestStudentDashboardWithCCX, self).setUp()
# Create a Draft Mongo and a Split Mongo course and enroll a student user in them.
self.student_password = "foobar"
self.student = UserFactory.create(username="test", password=self.student_password, is_staff=False)
self.draft_course = CourseFactory.create(default_store=ModuleStoreEnum.Type.mongo)
self.split_course = CourseFactory.create(default_store=ModuleStoreEnum.Type.split)
CourseEnrollment.enroll(self.student, self.draft_course.id)
CourseEnrollment.enroll(self.student, self.split_course.id)
# Create a CCX coach.
self.coach = AdminFactory.create()
role = CourseCcxCoachRole(self.split_course.id)
role.add_users(self.coach)
# Create a CCX course and enroll the user in it.
self.ccx = CcxFactory(course_id=self.split_course.id, coach=self.coach)
last_week = datetime.datetime.now(UTC()) - datetime.timedelta(days=7)
override_field_for_ccx(self.ccx, self.split_course, 'start', last_week) # Required by self.ccx.has_started().
course_key = CCXLocator.from_course_locator(self.split_course.id, self.ccx.id)
CourseEnrollment.enroll(self.student, course_key)
def test_load_student_dashboard(self):
self.client.login(username=self.student.username, password=self.student_password)
response = self.client.get(reverse('dashboard'))
self.assertEqual(response.status_code, 200)
self.assertTrue(re.search('Test CCX', response.content))
| agpl-3.0 |
brain-tec/sale-workflow | sale_service_project/models/account_invoice.py | 9 | 1085 | # -*- coding: utf-8 -*-
# © 2015 Sergio Teruel <sergio.teruel@tecnativa.com>
# © 2015 Carlos Dauden <carlos.dauden@tecnativa.com>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from openerp import fields, models
class AccountInvoice(models.Model):
_inherit = 'account.invoice'
print_works = fields.Boolean(
string='Print materials and works', default=True)
class AccountInvoiceLine(models.Model):
_inherit = "account.invoice.line"
sale_order_lines = fields.Many2many(
comodel_name='sale.order.line',
relation='sale_order_line_invoice_rel',
column1='invoice_id', column2='order_line_id',
readonly=True, string='Sale Order Lines')
task_work_ids = fields.Many2many(
comodel_name='project.task.work',
column1='invoice_line_id', column2='work_line_id',
readonly=True, string='Works')
task_materials_ids = fields.Many2many(
comodel_name='project.task.materials',
column1='invoice_line_id', column2='material_line_id',
readonly=True, string='Materials')
| agpl-3.0 |
h3llrais3r/SickRage | tests/notifier_tests.py | 11 | 10390 | # coding=UTF-8
# URL: https://github.com/SickRage/SickRage
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
###
# As a test case, there are instances in which it is necessary to call protected members of
# classes in order to test those classes. Therefore we will be pylint disable protected-access
###
# pylint: disable=line-too-long
"""
Test notifiers
"""
import os.path
import sys
import unittest
sys.path.insert(1, os.path.abspath(os.path.join(os.path.dirname(__file__), '../lib')))
sys.path.insert(1, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from sickbeard import db
from sickbeard.tv import TVEpisode, TVShow
from sickbeard.webserve import Home
from sickbeard.notifiers.emailnotify import Notifier as EmailNotifier
from sickbeard.notifiers.prowl import Notifier as ProwlNotifier
from sickrage.helper.encoding import ss
import tests.test_lib as test
class NotifierTests(test.SickbeardTestDBCase): # pylint: disable=too-many-public-methods
"""
Test notifiers
"""
@classmethod
def setUpClass(cls):
num_legacy_shows = 3
num_shows = 3
num_episodes_per_show = 5
cls.mydb = db.DBConnection()
cls.legacy_shows = []
cls.shows = []
# Per-show-notifications were originally added for email notifications only. To add
# this feature to other notifiers, it was necessary to alter the way text is stored in
# one of the DB columns. Therefore, to test properly, we must create some shows that
# store emails in the old method (legacy method) and then other shows that will use
# the new method.
for show_counter in range(100, 100 + num_legacy_shows):
show = TVShow(1, show_counter)
show.name = "Show " + str(show_counter)
show.episodes = []
for episode_counter in range(0, num_episodes_per_show):
episode = TVEpisode(show, test.SEASON, episode_counter)
episode.name = "Episode " + str(episode_counter + 1)
episode.quality = "SDTV"
show.episodes.append(episode)
show.saveToDB()
cls.legacy_shows.append(show)
for show_counter in range(200, 200 + num_shows):
show = TVShow(1, show_counter)
show.name = "Show " + str(show_counter)
show.episodes = []
for episode_counter in range(0, num_episodes_per_show):
episode = TVEpisode(show, test.SEASON, episode_counter)
episode.name = "Episode " + str(episode_counter + 1)
episode.quality = "SDTV"
show.episodes.append(episode)
show.saveToDB()
cls.shows.append(show)
def setUp(self):
"""
Set up tests
"""
self._debug_spew("\n\r")
@unittest.skip('Not yet implemented')
def test_boxcar(self):
"""
Test boxcar notifications
"""
pass
def test_email(self):
"""
Test email notifications
"""
email_notifier = EmailNotifier()
# Per-show-email notifications were added early on and utilized a different format than the other notifiers.
# Therefore, to test properly (and ensure backwards compatibility), this routine will test shows that use
# both the old and the new storage methodology
legacy_test_emails = "email-1@address.com,email2@address.org,email_3@address.tv"
test_emails = "email-4@address.com,email5@address.org,email_6@address.tv"
for show in self.legacy_shows:
showid = self._get_showid_by_showname(show.name)
self.mydb.action("UPDATE tv_shows SET notify_list = ? WHERE show_id = ?", [legacy_test_emails, showid])
for show in self.shows:
showid = self._get_showid_by_showname(show.name)
Home.saveShowNotifyList(show=showid, emails=test_emails)
# Now, iterate through all shows using the email list generation routines that are used in the notifier proper
shows = self.legacy_shows + self.shows
for show in shows:
for episode in show.episodes:
ep_name = ss(episode._format_pattern('%SN - %Sx%0E - %EN - ') + episode.quality) # pylint: disable=protected-access
show_name = email_notifier._parseEp(ep_name) # pylint: disable=protected-access
recipients = email_notifier._generate_recipients(show_name) # pylint: disable=protected-access
self._debug_spew("- Email Notifications for " + show.name + " (episode: " + episode.name + ") will be sent to:")
for email in recipients:
self._debug_spew("-- " + email.strip())
self._debug_spew("\n\r")
return True
@unittest.skip('Not yet implemented')
def test_emby(self):
"""
Test emby notifications
"""
pass
@unittest.skip('Not yet implemented')
def test_freemobile(self):
"""
Test freemobile notifications
"""
pass
@unittest.skip('Not yet implemented')
def test_growl(self):
"""
Test growl notifications
"""
pass
@unittest.skip('Not yet implemented')
def test_kodi(self):
"""
Test kodi notifications
"""
pass
@unittest.skip('Not yet implemented')
def test_libnotify(self):
"""
Test libnotify notifications
"""
pass
@unittest.skip('Not yet implemented')
def test_nma(self):
"""
Test nma notifications
"""
pass
@unittest.skip('Not yet implemented')
def test_nmj(self):
"""
Test nmj notifications
"""
pass
@unittest.skip('Not yet implemented')
def test_nmjv2(self):
"""
Test nmjv2 notifications
"""
pass
@unittest.skip('Not yet implemented')
def test_plex(self):
"""
Test plex notifications
"""
pass
def test_prowl(self):
"""
Test prowl notifications
"""
prowl_notifier = ProwlNotifier()
# Prowl per-show-notifications only utilize the new methodology for storage; therefore, the list of legacy_shows
# will not be altered (to preserve backwards compatibility testing)
test_prowl_apis = "11111111111111111111,22222222222222222222"
for show in self.shows:
showid = self._get_showid_by_showname(show.name)
Home.saveShowNotifyList(show=showid, prowlAPIs=test_prowl_apis)
# Now, iterate through all shows using the Prowl API generation routines that are used in the notifier proper
for show in self.shows:
for episode in show.episodes:
ep_name = ss(episode._format_pattern('%SN - %Sx%0E - %EN - ') + episode.quality) # pylint: disable=protected-access
show_name = prowl_notifier._parse_episode(ep_name) # pylint: disable=protected-access
recipients = prowl_notifier._generate_recipients(show_name) # pylint: disable=protected-access
self._debug_spew("- Prowl Notifications for " + show.name + " (episode: " + episode.name + ") will be sent to:")
for api in recipients:
self._debug_spew("-- " + api.strip())
self._debug_spew("\n\r")
return True
@unittest.skip('Not yet implemented')
def test_pushalot(self):
"""
Test pushalot notifications
"""
pass
@unittest.skip('Not yet implemented')
def test_pushbullet(self):
"""
Test pushbullet notifications
"""
pass
@unittest.skip('Not yet implemented')
def test_pushover(self):
"""
Test pushover notifications
"""
pass
@unittest.skip('Not yet implemented')
def test_pytivo(self):
"""
Test pytivo notifications
"""
pass
@unittest.skip('Not yet implemented')
def test_synoindex(self):
"""
Test synoindex notifications
"""
pass
@unittest.skip('Not yet implemented')
def test_synologynotifier(self):
"""
Test synologynotifier notifications
"""
pass
@unittest.skip('Not yet implemented')
def test_trakt(self):
"""
Test trakt notifications
"""
pass
@unittest.skip('Not yet implemented')
def test_tweet(self):
"""
Test tweet notifications
"""
pass
@unittest.skip('Not yet implemented')
def test_twilio(self):
"""
Test twilio notifications
"""
pass
@staticmethod
def _debug_spew(text):
"""
Spew text notifications
:param text: to spew
:return:
"""
if __name__ == '__main__' and text is not None:
print(text)
def _get_showid_by_showname(self, showname):
"""
Get show ID by show name
:param showname:
:return:
"""
if showname is not None:
rows = self.mydb.select("SELECT show_id FROM tv_shows WHERE show_name = ?", [showname])
if len(rows) == 1:
return rows[0][b'show_id']
return -1
if __name__ == '__main__':
print("==================")
print("STARTING - NOTIFIER TESTS")
print("==================")
print("######################################################################")
SUITE = unittest.TestLoader().loadTestsFromTestCase(NotifierTests)
unittest.TextTestRunner(verbosity=2).run(SUITE)
| gpl-3.0 |
jaanos/crypto-portal | githook.py | 2 | 1653 | # -*- coding: utf-8 -*-
from flask import *
from auth import gitkey
import json
import hmac
import ast
import sys
import os
from hashlib import sha1
app = Blueprint('githook', __name__)
paths = {'refs/heads/master': '/var/crypto-portal/',
'refs/heads/devel': '/var/crypto-devel/'}
@app.route("/githook", methods=['POST'])
def githook():
if request.headers.get('X-GitHub-Event') == "ping":
return json.dumps({'msg': 'Hi!'})
if request.headers.get('X-GitHub-Event') != "push":
return json.dumps({'msg': "wrong event type"})
payload = json.loads(request.data.decode("utf-8"))
signature = request.headers.get('X-Hub-Signature').split('=')[1]
mac = hmac.new(gitkey, msg=request.data, digestmod=sha1)
if not compare_digest(mac.hexdigest(), signature):
abort(403)
if "ref" in payload and payload["ref"] in paths:
path = paths[payload["ref"]]
os.system("%s/hooks/githook.sh %s" % (path, path))
return json.dumps({'msg': "OK"})
# Check if python version is less than 2.7.7
if sys.version_info < (2, 7, 7):
# http://blog.turret.io/hmac-in-go-python-ruby-php-and-nodejs/
def compare_digest(a, b):
"""
** From Django source **
Run a constant time comparison against two strings
Returns true if a and b are equal.
a and b must both be the same length, or False is
returned immediately
"""
if len(a) != len(b):
return False
result = 0
for ch_a, ch_b in zip(a, b):
result |= ord(ch_a) ^ ord(ch_b)
return result == 0
else:
compare_digest = hmac.compare_digest
| mit |
zhulin2609/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/tool/steps/updatechangelogswithreview_unittest.py | 122 | 2840 | # Copyright (C) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest2 as unittest
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.tool.mocktool import MockOptions, MockTool
from webkitpy.tool.steps.updatechangelogswithreviewer import UpdateChangeLogsWithReviewer
class UpdateChangeLogsWithReviewerTest(unittest.TestCase):
def test_guess_reviewer_from_bug(self):
capture = OutputCapture()
step = UpdateChangeLogsWithReviewer(MockTool(), MockOptions())
expected_logs = "No reviewed patches on bug 50001, cannot infer reviewer.\n"
capture.assert_outputs(self, step._guess_reviewer_from_bug, [50001], expected_logs=expected_logs)
def test_guess_reviewer_from_multipatch_bug(self):
capture = OutputCapture()
step = UpdateChangeLogsWithReviewer(MockTool(), MockOptions())
expected_logs = "Guessing \"Reviewer2\" as reviewer from attachment 10001 on bug 50000.\n"
capture.assert_outputs(self, step._guess_reviewer_from_bug, [50000], expected_logs=expected_logs)
def test_empty_state(self):
capture = OutputCapture()
options = MockOptions()
options.reviewer = 'MOCK reviewer'
options.git_commit = 'MOCK git commit'
step = UpdateChangeLogsWithReviewer(MockTool(), options)
capture.assert_outputs(self, step.run, [{}])
| bsd-3-clause |
lizardsystem/lizard-registration | lizard_registration/manager_forms.py | 1 | 4046 | # (c) Nelen & Schuurmans. GPL licensed, see LICENSE.txt.
from django import forms
from django.utils import html
from lizard_security.models import UserGroup
from lizard_registration.models import Organisation
from django.contrib.auth.forms import SetPasswordForm
from django.contrib.auth.models import User
from django.core.validators import validate_email
def organisations():
"""Returns list of organisations."""
organisations = Organisation.objects.all()
choices = []
for organisation in organisations:
choices.append((organisation.id, organisation.name))
return choices
class UserSetPasswordForm(SetPasswordForm):
"""Form contains a hidden fields to check a user."""
token = forms.CharField(widget=forms.HiddenInput())
uid_base = forms.CharField(widget=forms.HiddenInput())
def __init__(self, *args, **kwargs):
self.token = kwargs['token']
self.uid_base = kwargs['uid_base']
kwargs.pop('token')
kwargs.pop('uid_base')
super(UserSetPasswordForm, self).__init__(*args, **kwargs)
self.fields["token"].initial = self.token
self.fields["uid_base"].initial = self.uid_base
class CreateUserForm(forms.Form):
"""Form to create and activate a user."""
def __init__(self, *args, **kwargs):
self.groups_queryset = kwargs['groups_queryset']
kwargs.pop('groups_queryset')
super(CreateUserForm, self).__init__(*args, **kwargs)
self.fields["groups"].widget = forms.CheckboxSelectMultiple()
self.fields['groups'].queryset = self.groups_queryset
username = forms.CharField(max_length=30,
label='Gebruikersnaam')
first_name = forms.CharField(max_length=30,
label='Voornaam',
required=False)
last_name = forms.CharField(max_length=30,
label='Achternaam',
required=False)
email = forms.EmailField(max_length=200, label='E-mail', required=True)
groups = forms.ModelMultipleChoiceField(label='GebruikersGroepen',
required=False,
queryset=UserGroup.objects.none(),
widget=forms.CheckboxSelectMultiple())
def clean_username(self):
username = self.cleaned_data.get('username')
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
return username
raise forms.ValidationError(u'%s already exists' % username)
def clean_email(self):
email = self.cleaned_data.get('email')
users = User.objects.filter(email=email)
if users.exists():
raise forms.ValidationError(u'%s already exists' % email)
return email
class UpdateUserForm(CreateUserForm):
"""Form contains the fields to manage a user."""
def __init__(self, *args, **kwargs):
self.user_id = kwargs['user_id']
kwargs.pop('user_id')
super(UpdateUserForm, self).__init__(*args, **kwargs)
is_active = forms.BooleanField(required=False, label='Actief')
def clean_username(self):
username = self.cleaned_data.get('username')
try:
user = User.objects.get(username=username)
if self.user_id == unicode(user.id):
return username
except User.DoesNotExist:
return username
raise forms.ValidationError(u'%s already exists' % username )
def clean_email(self):
email = self.cleaned_data.get('email')
try:
users = User.objects.get(email=email)
if self.user_id == unicode(users.id):
return email
except User.DoesNotExist:
return email
except User.MultipleObjectsReturned:
raise forms.ValidationError(u'%s already exists' % email)
raise forms.ValidationError(u'%s already exists' % email)
| gpl-3.0 |
Lyrositor/moul-scripts | Python/system/genericpath.py | 246 | 3015 | """
Path operations common to more than one OS
Do not use directly. The OS specific modules import the appropriate
functions from this module themselves.
"""
import os
import stat
__all__ = ['commonprefix', 'exists', 'getatime', 'getctime', 'getmtime',
'getsize', 'isdir', 'isfile']
# Does a path exist?
# This is false for dangling symbolic links on systems that support them.
def exists(path):
"""Test whether a path exists. Returns False for broken symbolic links"""
try:
os.stat(path)
except os.error:
return False
return True
# This follows symbolic links, so both islink() and isdir() can be true
# for the same path ono systems that support symlinks
def isfile(path):
"""Test whether a path is a regular file"""
try:
st = os.stat(path)
except os.error:
return False
return stat.S_ISREG(st.st_mode)
# Is a path a directory?
# This follows symbolic links, so both islink() and isdir()
# can be true for the same path on systems that support symlinks
def isdir(s):
"""Return true if the pathname refers to an existing directory."""
try:
st = os.stat(s)
except os.error:
return False
return stat.S_ISDIR(st.st_mode)
def getsize(filename):
"""Return the size of a file, reported by os.stat()."""
return os.stat(filename).st_size
def getmtime(filename):
"""Return the last modification time of a file, reported by os.stat()."""
return os.stat(filename).st_mtime
def getatime(filename):
"""Return the last access time of a file, reported by os.stat()."""
return os.stat(filename).st_atime
def getctime(filename):
"""Return the metadata change time of a file, reported by os.stat()."""
return os.stat(filename).st_ctime
# Return the longest prefix of all list elements.
def commonprefix(m):
"Given a list of pathnames, returns the longest common leading component"
if not m: return ''
s1 = min(m)
s2 = max(m)
for i, c in enumerate(s1):
if c != s2[i]:
return s1[:i]
return s1
# Split a path in root and extension.
# The extension is everything starting at the last dot in the last
# pathname component; the root is everything before that.
# It is always true that root + ext == p.
# Generic implementation of splitext, to be parametrized with
# the separators
def _splitext(p, sep, altsep, extsep):
"""Split the extension from a pathname.
Extension is everything from the last dot to the end, ignoring
leading dots. Returns "(root, ext)"; ext may be empty."""
sepIndex = p.rfind(sep)
if altsep:
altsepIndex = p.rfind(altsep)
sepIndex = max(sepIndex, altsepIndex)
dotIndex = p.rfind(extsep)
if dotIndex > sepIndex:
# skip all leading dots
filenameIndex = sepIndex + 1
while filenameIndex < dotIndex:
if p[filenameIndex] != extsep:
return p[:dotIndex], p[dotIndex:]
filenameIndex += 1
return p, ''
| gpl-3.0 |
junneyang/taskflow | taskflow/engines/action_engine/executor.py | 1 | 22196 | # -*- coding: utf-8 -*-
# Copyright (C) 2013 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import collections
from multiprocessing import managers
import os
import pickle
import threading
import futurist
from oslo_utils import excutils
from oslo_utils import reflection
from oslo_utils import timeutils
from oslo_utils import uuidutils
import six
from six.moves import queue as compat_queue
from taskflow import logging
from taskflow import task as task_atom
from taskflow.types import failure
from taskflow.types import notifier
from taskflow.utils import threading_utils
# Execution and reversion events.
EXECUTED = 'executed'
REVERTED = 'reverted'
# See http://bugs.python.org/issue1457119 for why this is so complex...
_PICKLE_ERRORS = [pickle.PickleError, TypeError]
try:
import cPickle as _cPickle
_PICKLE_ERRORS.append(_cPickle.PickleError)
except ImportError:
pass
_PICKLE_ERRORS = tuple(_PICKLE_ERRORS)
_SEND_ERRORS = (IOError, EOFError)
_UPDATE_PROGRESS = task_atom.EVENT_UPDATE_PROGRESS
# Message types/kind sent from worker/child processes...
_KIND_COMPLETE_ME = 'complete_me'
_KIND_EVENT = 'event'
LOG = logging.getLogger(__name__)
def _execute_retry(retry, arguments):
try:
result = retry.execute(**arguments)
except Exception:
result = failure.Failure()
return (EXECUTED, result)
def _revert_retry(retry, arguments):
try:
result = retry.revert(**arguments)
except Exception:
result = failure.Failure()
return (REVERTED, result)
def _execute_task(task, arguments, progress_callback=None):
with notifier.register_deregister(task.notifier,
_UPDATE_PROGRESS,
callback=progress_callback):
try:
task.pre_execute()
result = task.execute(**arguments)
except Exception:
# NOTE(imelnikov): wrap current exception with Failure
# object and return it.
result = failure.Failure()
finally:
task.post_execute()
return (EXECUTED, result)
def _revert_task(task, arguments, result, failures, progress_callback=None):
arguments = arguments.copy()
arguments[task_atom.REVERT_RESULT] = result
arguments[task_atom.REVERT_FLOW_FAILURES] = failures
with notifier.register_deregister(task.notifier,
_UPDATE_PROGRESS,
callback=progress_callback):
try:
task.pre_revert()
result = task.revert(**arguments)
except Exception:
# NOTE(imelnikov): wrap current exception with Failure
# object and return it.
result = failure.Failure()
finally:
task.post_revert()
return (REVERTED, result)
class _ViewableSyncManager(managers.SyncManager):
"""Manager that exposes its state as methods."""
def is_shutdown(self):
return self._state.value == managers.State.SHUTDOWN
def is_running(self):
return self._state.value == managers.State.STARTED
class _Channel(object):
"""Helper wrapper around a multiprocessing queue used by a worker."""
def __init__(self, queue, identity):
self._queue = queue
self._identity = identity
self._sent_messages = collections.defaultdict(int)
self._pid = None
@property
def sent_messages(self):
return self._sent_messages
def put(self, message):
# NOTE(harlowja): this is done in late in execution to ensure that this
# happens in the child process and not the parent process (where the
# constructor is called).
if self._pid is None:
self._pid = os.getpid()
message.update({
'sent_on': timeutils.utcnow(),
'sender': {
'pid': self._pid,
'id': self._identity,
},
})
if 'body' not in message:
message['body'] = {}
try:
self._queue.put(message)
except _PICKLE_ERRORS:
LOG.warn("Failed serializing message %s", message, exc_info=True)
return False
except _SEND_ERRORS:
LOG.warn("Failed sending message %s", message, exc_info=True)
return False
else:
self._sent_messages[message['kind']] += 1
return True
class _WaitWorkItem(object):
"""The piece of work that will executed by a process executor.
This will call the target function, then wait until the tasks emitted
events/items have been depleted before offically being finished.
NOTE(harlowja): this is done so that the task function will *not* return
until all of its notifications have been proxied back to its originating
task. If we didn't do this then the executor would see this task as done
and then potentially start tasks that are successors of the task that just
finished even though notifications are still left to be sent from the
previously finished task...
"""
def __init__(self, channel, barrier,
func, task, *args, **kwargs):
self._channel = channel
self._barrier = barrier
self._func = func
self._task = task
self._args = args
self._kwargs = kwargs
def _on_finish(self):
sent_events = self._channel.sent_messages.get(_KIND_EVENT, 0)
if sent_events:
message = {
'created_on': timeutils.utcnow(),
'kind': _KIND_COMPLETE_ME,
}
if self._channel.put(message):
watch = timeutils.StopWatch()
watch.start()
self._barrier.wait()
LOG.blather("Waited %s seconds until task '%s' %s emitted"
" notifications were depleted", watch.elapsed(),
self._task, sent_events)
def __call__(self):
args = self._args
kwargs = self._kwargs
try:
return self._func(self._task, *args, **kwargs)
finally:
self._on_finish()
class _EventSender(object):
"""Sends event information from a child worker process to its creator."""
def __init__(self, channel):
self._channel = channel
def __call__(self, event_type, details):
message = {
'created_on': timeutils.utcnow(),
'kind': _KIND_EVENT,
'body': {
'event_type': event_type,
'details': details,
},
}
self._channel.put(message)
class _Target(object):
"""An immutable helper object that represents a target of a message."""
def __init__(self, task, barrier, identity):
self.task = task
self.barrier = barrier
self.identity = identity
# Counters used to track how many message 'kinds' were proxied...
self.dispatched = collections.defaultdict(int)
def __repr__(self):
return "<%s at 0x%x targeting '%s' with identity '%s'>" % (
reflection.get_class_name(self), id(self),
self.task, self.identity)
class _Dispatcher(object):
"""Dispatches messages received from child worker processes."""
# When the run() method is busy (typically in a thread) we want to set
# these so that the thread can know how long to sleep when there is no
# active work to dispatch.
_SPIN_PERIODICITY = 0.01
def __init__(self, dispatch_periodicity=None):
if dispatch_periodicity is None:
dispatch_periodicity = self._SPIN_PERIODICITY
if dispatch_periodicity <= 0:
raise ValueError("Provided dispatch periodicity must be greater"
" than zero and not '%s'" % dispatch_periodicity)
self._targets = {}
self._dead = threading.Event()
self._dispatch_periodicity = dispatch_periodicity
self._stop_when_empty = False
def register(self, identity, target):
self._targets[identity] = target
def deregister(self, identity):
try:
target = self._targets.pop(identity)
except KeyError:
pass
else:
# Just incase set the barrier to unblock any worker...
target.barrier.set()
if LOG.isEnabledFor(logging.BLATHER):
LOG.blather("Dispatched %s messages %s to target '%s' during"
" the lifetime of its existence in the dispatcher",
sum(six.itervalues(target.dispatched)),
dict(target.dispatched), target)
def reset(self):
self._stop_when_empty = False
self._dead.clear()
if self._targets:
leftover = set(six.iterkeys(self._targets))
while leftover:
self.deregister(leftover.pop())
def interrupt(self):
self._stop_when_empty = True
self._dead.set()
def _dispatch(self, message):
if LOG.isEnabledFor(logging.BLATHER):
LOG.blather("Dispatching message %s (it took %s seconds"
" for it to arrive for processing after being"
" sent)", message,
timeutils.delta_seconds(message['sent_on'],
timeutils.utcnow()))
try:
kind = message['kind']
sender = message['sender']
body = message['body']
except (KeyError, ValueError, TypeError):
LOG.warn("Badly formatted message %s received", message,
exc_info=True)
return
target = self._targets.get(sender['id'])
if target is None:
# Must of been removed...
return
if kind == _KIND_COMPLETE_ME:
target.dispatched[kind] += 1
target.barrier.set()
elif kind == _KIND_EVENT:
task = target.task
target.dispatched[kind] += 1
task.notifier.notify(body['event_type'], body['details'])
else:
LOG.warn("Unknown message '%s' found in message from sender"
" %s to target '%s'", kind, sender, target)
def run(self, queue):
watch = timeutils.StopWatch(duration=self._dispatch_periodicity)
while (not self._dead.is_set() or
(self._stop_when_empty and self._targets)):
watch.restart()
leftover = watch.leftover()
while leftover:
try:
message = queue.get(timeout=leftover)
except compat_queue.Empty:
break
else:
self._dispatch(message)
leftover = watch.leftover()
leftover = watch.leftover()
if leftover:
self._dead.wait(leftover)
class SerialRetryExecutor(object):
"""Executes and reverts retries."""
def __init__(self):
self._executor = futurist.SynchronousExecutor()
def start(self):
"""Prepare to execute retries."""
self._executor.restart()
def stop(self):
"""Finalize retry executor."""
self._executor.shutdown()
def execute_retry(self, retry, arguments):
"""Schedules retry execution."""
fut = self._executor.submit(_execute_retry, retry, arguments)
fut.atom = retry
return fut
def revert_retry(self, retry, arguments):
"""Schedules retry reversion."""
fut = self._executor.submit(_revert_retry, retry, arguments)
fut.atom = retry
return fut
@six.add_metaclass(abc.ABCMeta)
class TaskExecutor(object):
"""Executes and reverts tasks.
This class takes task and its arguments and executes or reverts it.
It encapsulates knowledge on how task should be executed or reverted:
right now, on separate thread, on another machine, etc.
"""
@abc.abstractmethod
def execute_task(self, task, task_uuid, arguments,
progress_callback=None):
"""Schedules task execution."""
@abc.abstractmethod
def revert_task(self, task, task_uuid, arguments, result, failures,
progress_callback=None):
"""Schedules task reversion."""
def start(self):
"""Prepare to execute tasks."""
def stop(self):
"""Finalize task executor."""
class SerialTaskExecutor(TaskExecutor):
"""Executes tasks one after another."""
def __init__(self):
self._executor = futurist.SynchronousExecutor()
def start(self):
self._executor.restart()
def stop(self):
self._executor.shutdown()
def execute_task(self, task, task_uuid, arguments, progress_callback=None):
fut = self._executor.submit(_execute_task,
task, arguments,
progress_callback=progress_callback)
fut.atom = task
return fut
def revert_task(self, task, task_uuid, arguments, result, failures,
progress_callback=None):
fut = self._executor.submit(_revert_task,
task, arguments, result, failures,
progress_callback=progress_callback)
fut.atom = task
return fut
class ParallelTaskExecutor(TaskExecutor):
"""Executes tasks in parallel.
Submits tasks to an executor which should provide an interface similar
to concurrent.Futures.Executor.
"""
#: Options this executor supports (passed in from engine options).
OPTIONS = frozenset(['max_workers'])
def __init__(self, executor=None, max_workers=None):
self._executor = executor
self._max_workers = max_workers
self._own_executor = executor is None
@abc.abstractmethod
def _create_executor(self, max_workers=None):
"""Called when an executor has not been provided to make one."""
def _submit_task(self, func, task, *args, **kwargs):
fut = self._executor.submit(func, task, *args, **kwargs)
fut.atom = task
return fut
def execute_task(self, task, task_uuid, arguments, progress_callback=None):
return self._submit_task(_execute_task, task, arguments,
progress_callback=progress_callback)
def revert_task(self, task, task_uuid, arguments, result, failures,
progress_callback=None):
return self._submit_task(_revert_task, task, arguments, result,
failures, progress_callback=progress_callback)
def start(self):
if self._own_executor:
self._executor = self._create_executor(
max_workers=self._max_workers)
def stop(self):
if self._own_executor:
self._executor.shutdown(wait=True)
self._executor = None
class ParallelThreadTaskExecutor(ParallelTaskExecutor):
"""Executes tasks in parallel using a thread pool executor."""
def _create_executor(self, max_workers=None):
return futurist.ThreadPoolExecutor(max_workers=max_workers)
class ParallelProcessTaskExecutor(ParallelTaskExecutor):
"""Executes tasks in parallel using a process pool executor.
NOTE(harlowja): this executor executes tasks in external processes, so that
implies that tasks that are sent to that external process are pickleable
since this is how the multiprocessing works (sending pickled objects back
and forth) and that the bound handlers (for progress updating in
particular) are proxied correctly from that external process to the one
that is alive in the parent process to ensure that callbacks registered in
the parent are executed on events in the child.
"""
#: Options this executor supports (passed in from engine options).
OPTIONS = frozenset(['max_workers', 'dispatch_periodicity'])
def __init__(self, executor=None, max_workers=None,
dispatch_periodicity=None):
super(ParallelProcessTaskExecutor, self).__init__(
executor=executor, max_workers=max_workers)
self._manager = _ViewableSyncManager()
self._dispatcher = _Dispatcher(
dispatch_periodicity=dispatch_periodicity)
# Only created after starting...
self._worker = None
self._queue = None
def _create_executor(self, max_workers=None):
return futurist.ProcessPoolExecutor(max_workers=max_workers)
def start(self):
if threading_utils.is_alive(self._worker):
raise RuntimeError("Worker thread must be stopped via stop()"
" before starting/restarting")
super(ParallelProcessTaskExecutor, self).start()
# These don't seem restartable; make a new one...
if self._manager.is_shutdown():
self._manager = _ViewableSyncManager()
if not self._manager.is_running():
self._manager.start()
self._dispatcher.reset()
self._queue = self._manager.Queue()
self._worker = threading_utils.daemon_thread(self._dispatcher.run,
self._queue)
self._worker.start()
def stop(self):
self._dispatcher.interrupt()
super(ParallelProcessTaskExecutor, self).stop()
if threading_utils.is_alive(self._worker):
self._worker.join()
self._worker = None
self._queue = None
self._dispatcher.reset()
self._manager.shutdown()
self._manager.join()
def _rebind_task(self, task, clone, channel, progress_callback=None):
# Creates and binds proxies for all events the task could receive
# so that when the clone runs in another process that this task
# can recieve the same notifications (thus making it look like the
# the notifications are transparently happening in this process).
needed = set()
for (event_type, listeners) in task.notifier.listeners_iter():
if listeners:
needed.add(event_type)
if progress_callback is not None:
needed.add(_UPDATE_PROGRESS)
if needed:
sender = _EventSender(channel)
for event_type in needed:
clone.notifier.register(event_type, sender)
def _submit_task(self, func, task, *args, **kwargs):
"""Submit a function to run the given task (with given args/kwargs).
NOTE(harlowja): Adjust all events to be proxies instead since we want
those callbacks to be activated in this process, not in the child,
also since typically callbacks are functors (or callables) we can
not pickle those in the first place...
To make sure people understand how this works, the following is a
lengthy description of what is going on here, read at will:
So to ensure that we are proxying task triggered events that occur
in the executed subprocess (which will be created and used by the
thing using the multiprocessing based executor) we need to establish
a link between that process and this process that ensures that when a
event is triggered in that task in that process that a corresponding
event is triggered on the original task that was requested to be ran
in this process.
To accomplish this we have to create a copy of the task (without
any listeners) and then reattach a new set of listeners that will
now instead of calling the desired listeners just place messages
for this process (a dispatcher thread that is created in this class)
to dispatch to the original task (using a common queue + per task
sender identity/target that is used and associated to know which task
to proxy back too, since it is possible that there many be *many*
subprocess running at the same time, each running a different task
and using the same common queue to submit messages back to).
Once the subprocess task has finished execution, the executor will
then trigger a callback that will remove the task + target from the
dispatcher (which will stop any further proxying back to the original
task).
"""
progress_callback = kwargs.pop('progress_callback', None)
clone = task.copy(retain_listeners=False)
identity = uuidutils.generate_uuid()
target = _Target(task, self._manager.Event(), identity)
channel = _Channel(self._queue, identity)
self._rebind_task(task, clone, channel,
progress_callback=progress_callback)
def register():
if progress_callback is not None:
task.notifier.register(_UPDATE_PROGRESS, progress_callback)
self._dispatcher.register(identity, target)
def deregister():
if progress_callback is not None:
task.notifier.deregister(_UPDATE_PROGRESS, progress_callback)
self._dispatcher.deregister(identity)
register()
work = _WaitWorkItem(channel, target.barrier,
func, clone, *args, **kwargs)
try:
fut = self._executor.submit(work)
except RuntimeError:
with excutils.save_and_reraise_exception():
deregister()
fut.atom = task
fut.add_done_callback(lambda fut: deregister())
return fut
| apache-2.0 |
Shengliang/mbed | workspace_tools/dev/syms.py | 120 | 2186 | """
mbed SDK
Copyright (c) 2011-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Utility to find which libraries could define a given symbol
"""
from argparse import ArgumentParser
from os.path import join, splitext
from os import walk
from subprocess import Popen, PIPE
OBJ_EXT = ['.o', '.a', '.ar']
def find_sym_in_lib(sym, obj_path):
contain_symbol = False
out = Popen(["nm", "-C", obj_path], stdout=PIPE, stderr=PIPE).communicate()[0]
for line in out.splitlines():
tokens = line.split()
n = len(tokens)
if n == 2:
sym_type = tokens[0]
sym_name = tokens[1]
elif n == 3:
sym_type = tokens[1]
sym_name = tokens[2]
else:
continue
if sym_type == "U":
# This object is using this symbol, not defining it
continue
if sym_name == sym:
contain_symbol = True
return contain_symbol
def find_sym_in_path(sym, dir_path):
for root, _, files in walk(dir_path):
for file in files:
_, ext = splitext(file)
if ext not in OBJ_EXT: continue
path = join(root, file)
if find_sym_in_lib(sym, path):
print path
if __name__ == '__main__':
parser = ArgumentParser(description='Find Symbol')
parser.add_argument('-s', '--sym', required=True,
help='The symbol to be searched')
parser.add_argument('-p', '--path', required=True,
help='The path where to search')
args = parser.parse_args()
find_sym_in_path(args.sym, args.path)
| apache-2.0 |
ratchet7/ci-slider | cgi-bin/example6.py | 2 | 6542 | #!/usr/bin/env python
"""example"""
import os
import cgi
import PubGlobals
import cgitb; #cgitb.enable()
import random
import MySQLdb
PubGlobals.header()
print"""
<script type="text/javascript">
function alert1(text1, text2)
{
alert("Ideology: "+text1+ " Uncertainty: "+text2);
}
</script>
</head>
<body>
<form method="post" action="example6a.py">
<p>Here's what your answer would look like if your <b> best
guess was that a different politician was very conservative and you were 95 percent certain that the politician was close to very conservative.</b>
<br/>
<br/>
<p> This means that you believe there is only a <b>5 percent chance that the politician is more liberal than very conservative.
</b>
<br/>
<br/>
<p> Click Next.
<div id="box" class="jxgbox" style="width:750px; height:225px;"></div>
<script type="text/javascript">
/* Create board */
var b = JXG.JSXGraph.initBoard('box', {boundingbox: [-60, 10, 75, -12], axis:false, grid:false, showCopyright:false, showNavigation:false});
/* Create ideology placement line */
var iline = b.create('line',[[-50,0],[50,0]], {fixed:true, straightFirst:false, straightLast:false, strokeWidth:3, highlightstrokeWidth:3, fillColor : '#000000', strokeColor : '#000000', highlightFillColor : '#000000', highlightStrokeColor : '#000000'});
/* Create vertical markers at ends and midpoint */
var ilineleft = b.create('line',[[-50,1],[-50,-1]], {fixed:true, straightFirst:false, straightLast:false, strokeWidth:1, highlightstrokeWidth:1, fillColor : '#000000', strokeColor : '#000000', highlightFillColor : '#000000', highlightStrokeColor : '#000000'});
var ilineright = b.create('line',[[50,1],[50,-1]], {fixed:true, straightFirst:false, straightLast:false, strokeWidth:1, highlightstrokeWidth:1, fillColor : '#000000', strokeColor : '#000000', highlightFillColor : '#000000', highlightStrokeColor : '#000000'});
var ilinemiddle = b.create('line',[[0,1],[0,-1]], {fixed:true, straightFirst:false, straightLast:false, strokeWidth:1, highlightstrokeWidth:1, fillColor : '#000000', strokeColor : '#000000', highlightFillColor : '#000000', highlightStrokeColor : '#000000'});
var ilinemiddle = b.create('line',[[-25,1],[-25,-1]], {fixed:true, straightFirst:false, straightLast:false, strokeWidth:1, highlightstrokeWidth:1, fillColor : '#000000', strokeColor : '#000000', highlightFillColor : '#000000', highlightStrokeColor : '#000000'});
var ilinemiddle = b.create('line',[[25,1],[25,-1]], {fixed:true, straightFirst:false, straightLast:false, strokeWidth:1, highlightstrokeWidth:1, fillColor : '#000000', strokeColor : '#000000', highlightFillColor : '#000000', highlightStrokeColor : '#000000'});
/* Label endpoints and midpoint */
ilinelefttxt1 = b.create('text',[-52,-2, 'Very'], {fixed:true, fontSize:12});
ilinelefttxt2 = b.create('text',[-53,-3.5, 'Liberal'], {fixed:true, fontSize:12});
ilinerighttxt1 = b.create('text',[48,-2, 'Very'], {fixed:true, fontSize:12});
ilinerighttxt2 = b.create('text',[45,-3.5, 'Conservative'], {fixed:true, fontSize:12});
ilinemiddletxt1 = b.create('text',[-4,-2, 'Moderate'], {fixed:true, fontSize:12});
ilinerighttxt1 = b.create('text',[21,-2, 'Somewhat'], {fixed:true, fontSize:12});
ilinerighttxt2 = b.create('text',[20,-3.5, 'Conservative'], {fixed:true, fontSize:12});
ilinerighttxt1 = b.create('text',[-29,-2, 'Somewhat'], {fixed:true, fontSize:12});
ilinerighttxt2 = b.create('text',[-28,-3.5, 'Liberal'], {fixed:true, fontSize:12});
/* Create self placement slider */
var selfplaceslider = b.create('slider',[[-50,1.5],[50,1.5],[-50, 50,50]],
{
withLabel:false, withTicks:false, face:'v', strokeColor:'#000000', highlightStrokeColor:'#000000', fillColor:'#000000', highlightFillColor:'#000000', size:12, fixed:true,
baseline:{strokeWidth:0,highlightstrokeWidth:0,strokeColor:'#000000',highlightStrokeColor:'#000000', visible:false },
highline: {strokeWidth:0,highlightstrokeWidth:0,strokeColor:'#000000',highlightStrokeColor:'#000000', visible:false }
});
var selfplaceline = b.create('line',[[function(x){ return selfplaceslider.Value();},3],
[function(x){ return selfplaceslider.Value();},2]], {straightFirst:false, straightLast:false, strokeWidth:3, highlightstrokeWidth:3, fillColor:'#000000', strokeColor:'#000000', highlightFillColor:'#000000', highlightStrokeColor:'#000000'});
var selfplacelabel = b.create('text', [function(x){ return selfplaceslider.Value()-1.6;}, 4,'Politician'],
{fontSize:14});
/* Variance slider */
var varianceslider = b.create('slider',[[63,-5],[63,5],[21,0,1]],
{
withLabel:false, withTicks: false, face:'o', strokeColor : '#000000', highlightStrokeColor : '#000000', fillColor : '#FF0000', highlightFillColor : '#FF0000', size: 6, fixed: true,
baseline: {strokeWidth: 3, highlightstrokeWidth: 3, strokeColor : '#000000', highlightStrokeColor : '#000000', visible: true },
highline: {strokeWidth: 3, highlightstrokeWidth: 3, strokeColor : '#000000', highlightStrokeColor : '#000000', visible: true }
});
var poweradj=.25
/* normal distribution approximated using logistic
var temp=b.create('functiongraph', [function(x){return 12*(1/Math.pow(varianceslider.Value()*2.506628274631000502415765284811,poweradj))*Math.pow(2.718281828,-(((x-selfplaceslider.Value())*(x-selfplaceslider.Value()))/(varianceslider.Value()*varianceslider.Value()*2)));},-60,60]); */
/* This approximation reduces the variance adjustment (the part out front, 1/...) to decreae more slowly so the curve is still visible for higher values Current this value is set at poweradj*/
/* Label 95 percent percentiles */
var nfline = b.create('line',[[function(x){ return selfplaceslider.Value()-2*varianceslider.Value();},-.25],[function(x){ return selfplaceslider.Value()+2*varianceslider.Value();},-.25]],{straightFirst:false, straightLast:false, strokeWidth:10, highlightstrokeWidth:10, fillColor : '#FF0000', strokeColor : '#FF0000', highlightFillColor : '#FF0000', highlightStrokeColor : '#FF0000'});
/* var nflabel1 = b.create('text', [function(x){ return selfplaceslider.Value()-10;}, -5,'95 times out a 100, the true'], {fontSize:12});
var nflabel2 = b.create('text', [function(x){ return selfplaceslider.Value()-10;}, -7,'value will be in the red range'], {fontSize:12});
*/
</script>
<form method="post" />
<p><input type="submit" value="Next">
<input type="hidden" name="user_id" value="%s">
</form>
</body>
</html>
"""
| gpl-3.0 |
ShyamSS-95/Bolt | example_problems/nonrelativistic_boltzmann/advected_gaussian_pulse_in_p_space/1V/with_only_E1/main.py | 3 | 1524 | import arrayfire as af
import numpy as np
import h5py
from bolt.lib.physical_system import physical_system
from bolt.lib.nonlinear_solver.nonlinear_solver import nonlinear_solver
import domain
import boundary_conditions
import params
import initialize
import bolt.src.nonrelativistic_boltzmann.advection_terms as advection_terms
import bolt.src.nonrelativistic_boltzmann.collision_operator as collision_operator
import bolt.src.nonrelativistic_boltzmann.moment_defs as moment_defs
# Defining the physical system to be solved:
system = physical_system(domain,
boundary_conditions,
params,
initialize,
advection_terms,
collision_operator.BGK,
moment_defs
)
N_g_q = system.N_ghost_q
# Declaring a linear system object which will evolve the defined physical system:
nls = nonlinear_solver(system)
# Time parameters:
dt = 0.001
t_final = 0.4
time_array = np.arange(0, t_final + dt, dt)
# Storing data at time t = 0:
h5f = h5py.File('dump/0000.h5', 'w')
h5f.create_dataset('distribution_function', data = nls.f)
h5f.create_dataset('p1', data = nls.p1_center)
h5f.close()
for time_index, t0 in enumerate(time_array[1:]):
nls.strang_timestep(dt)
h5f = h5py.File('dump/%04d'%(time_index+1) + '.h5', 'w')
h5f.create_dataset('distribution_function', data = nls.f)
h5f.create_dataset('p1', data = nls.p1_center)
h5f.close()
| gpl-3.0 |
andresailer/DIRAC | Interfaces/scripts/dirac-admin-ban-site.py | 4 | 3043 | #!/usr/bin/env python
########################################################################
# $HeadURL$
# File : dirac-admin-ban-site
# Author : Stuart Paterson
########################################################################
"""
Remove Site from Active mask for current Setup
"""
__RCSID__ = "$Id$"
from DIRAC.Core.Base import Script
from DIRAC.Core.Utilities.PromptUser import promptUser
Script.registerSwitch( "E:", "email=", "Boolean True/False (True by default)" )
Script.setUsageMessage( '\n'.join( [ __doc__.split( '\n' )[1],
'Usage:',
' %s [option|cfgfile] ... Site Comment' % Script.scriptName,
'Arguments:',
' Site: Name of the Site',
' Comment: Reason of the action' ] ) )
Script.parseCommandLine( ignoreErrors = True )
from DIRAC.Interfaces.API.DiracAdmin import DiracAdmin
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC import exit as DIRACExit, gConfig, gLogger
import time
def getBoolean( value ):
if value.lower() == 'true':
return True
elif value.lower() == 'false':
return False
else:
Script.showHelp()
email = True
for switch in Script.getUnprocessedSwitches():
if switch[0] == "email":
email = getBoolean( switch[1] )
args = Script.getPositionalArgs()
if len( args ) < 2:
Script.showHelp()
diracAdmin = DiracAdmin()
exitCode = 0
errorList = []
setup = gConfig.getValue( '/DIRAC/Setup', '' )
if not setup:
print 'ERROR: Could not contact Configuration Service'
exitCode = 2
DIRACExit( exitCode )
#result = promptUser( 'All the elements that are associated with this site will be banned, are you sure about this action?' )
#if not result['OK'] or result['Value'] is 'n':
# print 'Script stopped'
# DIRACExit( 0 )
site = args[0]
comment = args[1]
result = diracAdmin.banSite( site, comment, printOutput = True )
if not result['OK']:
errorList.append( ( site, result['Message'] ) )
exitCode = 2
else:
if email:
userName = diracAdmin._getCurrentUser()
if not userName['OK']:
print 'ERROR: Could not obtain current username from proxy'
exitCode = 2
DIRACExit( exitCode )
userName = userName['Value']
subject = '%s is banned for %s setup' % ( site, setup )
body = 'Site %s is removed from site mask for %s setup by %s on %s.\n\n' % ( site, setup, userName, time.asctime() )
body += 'Comment:\n%s' % comment
addressPath = 'EMail/Production'
address = Operations().getValue( addressPath, '' )
if not address:
gLogger.notice( "'%s' not defined in Operations, can not send Mail\n" % addressPath, body )
else:
result = diracAdmin.sendMail( address, subject, body )
else:
print 'Automatic email disabled by flag.'
for error in errorList:
print "ERROR %s: %s" % error
DIRACExit( exitCode )
| gpl-3.0 |
florian-dacosta/OpenUpgrade | addons/resource/resource.py | 33 | 42181 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-TODAY OpenERP SA (http://www.openerp.com)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import datetime
from dateutil import rrule
from dateutil.relativedelta import relativedelta
from operator import itemgetter
from openerp import tools
from openerp.osv import fields, osv
from openerp.tools.float_utils import float_compare
from openerp.tools.translate import _
class resource_calendar(osv.osv):
""" Calendar model for a resource. It has
- attendance_ids: list of resource.calendar.attendance that are a working
interval in a given weekday.
- leave_ids: list of leaves linked to this calendar. A leave can be general
or linked to a specific resource, depending on its resource_id.
All methods in this class use intervals. An interval is a tuple holding
(begin_datetime, end_datetime). A list of intervals is therefore a list of
tuples, holding several intervals of work or leaves. """
_name = "resource.calendar"
_description = "Resource Calendar"
_columns = {
'name': fields.char("Name", size=64, required=True),
'company_id': fields.many2one('res.company', 'Company', required=False),
'attendance_ids': fields.one2many('resource.calendar.attendance', 'calendar_id', 'Working Time'),
'manager': fields.many2one('res.users', 'Workgroup Manager'),
'leave_ids': fields.one2many(
'resource.calendar.leaves', 'calendar_id', 'Leaves',
help=''
),
}
_defaults = {
'company_id': lambda self, cr, uid, context: self.pool.get('res.company')._company_default_get(cr, uid, 'resource.calendar', context=context)
}
# --------------------------------------------------
# Utility methods
# --------------------------------------------------
def interval_clean(self, intervals):
""" Utility method that sorts and removes overlapping inside datetime
intervals. The intervals are sorted based on increasing starting datetime.
Overlapping intervals are merged into a single one.
:param list intervals: list of intervals; each interval is a tuple
(datetime_from, datetime_to)
:return list cleaned: list of sorted intervals without overlap """
intervals = sorted(intervals, key=itemgetter(0)) # sort on first datetime
cleaned = []
working_interval = None
while intervals:
current_interval = intervals.pop(0)
if not working_interval: # init
working_interval = [current_interval[0], current_interval[1]]
elif working_interval[1] < current_interval[0]: # interval is disjoint
cleaned.append(tuple(working_interval))
working_interval = [current_interval[0], current_interval[1]]
elif working_interval[1] < current_interval[1]: # union of greater intervals
working_interval[1] = current_interval[1]
if working_interval: # handle void lists
cleaned.append(tuple(working_interval))
return cleaned
def interval_remove_leaves(self, interval, leave_intervals):
""" Utility method that remove leave intervals from a base interval:
- clean the leave intervals, to have an ordered list of not-overlapping
intervals
- initiate the current interval to be the base interval
- for each leave interval:
- finishing before the current interval: skip, go to next
- beginning after the current interval: skip and get out of the loop
because we are outside range (leaves are ordered)
- beginning within the current interval: close the current interval
and begin a new current interval that begins at the end of the leave
interval
- ending within the current interval: update the current interval begin
to match the leave interval ending
:param tuple interval: a tuple (beginning datetime, ending datetime) that
is the base interval from which the leave intervals
will be removed
:param list leave_intervals: a list of tuples (beginning datetime, ending datetime)
that are intervals to remove from the base interval
:return list intervals: a list of tuples (begin datetime, end datetime)
that are the remaining valid intervals """
if not interval:
return interval
if leave_intervals is None:
leave_intervals = []
intervals = []
leave_intervals = self.interval_clean(leave_intervals)
current_interval = [interval[0], interval[1]]
for leave in leave_intervals:
if leave[1] <= current_interval[0]:
continue
if leave[0] >= current_interval[1]:
break
if current_interval[0] < leave[0] < current_interval[1]:
current_interval[1] = leave[0]
intervals.append((current_interval[0], current_interval[1]))
current_interval = [leave[1], interval[1]]
# if current_interval[0] <= leave[1] <= current_interval[1]:
if current_interval[0] <= leave[1]:
current_interval[0] = leave[1]
if current_interval and current_interval[0] < interval[1]: # remove intervals moved outside base interval due to leaves
intervals.append((current_interval[0], current_interval[1]))
return intervals
def interval_schedule_hours(self, intervals, hour, remove_at_end=True):
""" Schedule hours in intervals. The last matching interval is truncated
to match the specified hours.
It is possible to truncate the last interval at its beginning or ending.
However this does nothing on the given interval order that should be
submitted accordingly.
:param list intervals: a list of tuples (beginning datetime, ending datetime)
:param int/float hours: number of hours to schedule. It will be converted
into a timedelta, but should be submitted as an
int or float.
:param boolean remove_at_end: remove extra hours at the end of the last
matching interval. Otherwise, do it at the
beginning.
:return list results: a list of intervals. If the number of hours to schedule
is greater than the possible scheduling in the intervals, no extra-scheduling
is done, and results == intervals. """
results = []
res = datetime.timedelta()
limit = datetime.timedelta(hours=hour)
for interval in intervals:
res += interval[1] - interval[0]
if res > limit and remove_at_end:
interval = (interval[0], interval[1] + relativedelta(seconds=seconds(limit-res)))
elif res > limit:
interval = (interval[0] + relativedelta(seconds=seconds(res-limit)), interval[1])
results.append(interval)
if res > limit:
break
return results
# --------------------------------------------------
# Date and hours computation
# --------------------------------------------------
def get_attendances_for_weekdays(self, cr, uid, id, weekdays, context=None):
""" Given a list of weekdays, return matching resource.calendar.attendance"""
calendar = self.browse(cr, uid, id, context=None)
return [att for att in calendar.attendance_ids if int(att.dayofweek) in weekdays]
def get_weekdays(self, cr, uid, id, default_weekdays=None, context=None):
""" Return the list of weekdays that contain at least one working interval.
If no id is given (no calendar), return default weekdays. """
if id is None:
return default_weekdays if default_weekdays is not None else [0, 1, 2, 3, 4]
calendar = self.browse(cr, uid, id, context=None)
weekdays = set()
for attendance in calendar.attendance_ids:
weekdays.add(int(attendance.dayofweek))
return list(weekdays)
def get_next_day(self, cr, uid, id, day_date, context=None):
""" Get following date of day_date, based on resource.calendar. If no
calendar is provided, just return the next day.
:param int id: id of a resource.calendar. If not given, simply add one day
to the submitted date.
:param date day_date: current day as a date
:return date: next day of calendar, or just next day """
if not id:
return day_date + relativedelta(days=1)
weekdays = self.get_weekdays(cr, uid, id, context)
base_index = -1
for weekday in weekdays:
if weekday > day_date.weekday():
break
base_index += 1
new_index = (base_index + 1) % len(weekdays)
days = (weekdays[new_index] - day_date.weekday())
if days < 0:
days = 7 + days
return day_date + relativedelta(days=days)
def get_previous_day(self, cr, uid, id, day_date, context=None):
""" Get previous date of day_date, based on resource.calendar. If no
calendar is provided, just return the previous day.
:param int id: id of a resource.calendar. If not given, simply remove
one day from the submitted date.
:param date day_date: current day as a date
:return date: previous day of calendar, or just previous day """
if not id:
return day_date + relativedelta(days=-1)
weekdays = self.get_weekdays(cr, uid, id, context)
weekdays.reverse()
base_index = -1
for weekday in weekdays:
if weekday < day_date.weekday():
break
base_index += 1
new_index = (base_index + 1) % len(weekdays)
days = (weekdays[new_index] - day_date.weekday())
if days > 0:
days = days - 7
return day_date + relativedelta(days=days)
def get_leave_intervals(self, cr, uid, id, resource_id=None,
start_datetime=None, end_datetime=None,
context=None):
"""Get the leaves of the calendar. Leaves can be filtered on the resource,
the start datetime or the end datetime.
:param int resource_id: the id of the resource to take into account when
computing the leaves. If not set, only general
leaves are computed. If set, generic and
specific leaves are computed.
:param datetime start_datetime: if provided, do not take into account leaves
ending before this date.
:param datetime end_datetime: if provided, do not take into account leaves
beginning after this date.
:return list leaves: list of tuples (start_datetime, end_datetime) of
leave intervals
"""
resource_calendar = self.browse(cr, uid, id, context=context)
leaves = []
for leave in resource_calendar.leave_ids:
if leave.resource_id and not resource_id == leave.resource_id.id:
continue
date_from = datetime.datetime.strptime(leave.date_from, tools.DEFAULT_SERVER_DATETIME_FORMAT)
if end_datetime and date_from > end_datetime:
continue
date_to = datetime.datetime.strptime(leave.date_to, tools.DEFAULT_SERVER_DATETIME_FORMAT)
if start_datetime and date_to < start_datetime:
continue
leaves.append((date_from, date_to))
return leaves
def get_working_intervals_of_day(self, cr, uid, id, start_dt=None, end_dt=None,
leaves=None, compute_leaves=False, resource_id=None,
default_interval=None, context=None):
""" Get the working intervals of the day based on calendar. This method
handle leaves that come directly from the leaves parameter or can be computed.
:param int id: resource.calendar id; take the first one if is a list
:param datetime start_dt: datetime object that is the beginning hours
for the working intervals computation; any
working interval beginning before start_dt
will be truncated. If not set, set to end_dt
or today() if no end_dt at 00.00.00.
:param datetime end_dt: datetime object that is the ending hour
for the working intervals computation; any
working interval ending after end_dt
will be truncated. If not set, set to start_dt()
at 23.59.59.
:param list leaves: a list of tuples(start_datetime, end_datetime) that
represent leaves.
:param boolean compute_leaves: if set and if leaves is None, compute the
leaves based on calendar and resource.
If leaves is None and compute_leaves false
no leaves are taken into account.
:param int resource_id: the id of the resource to take into account when
computing the leaves. If not set, only general
leaves are computed. If set, generic and
specific leaves are computed.
:param tuple default_interval: if no id, try to return a default working
day using default_interval[0] as beginning
hour, and default_interval[1] as ending hour.
Example: default_interval = (8, 16).
Otherwise, a void list of working intervals
is returned when id is None.
:return list intervals: a list of tuples (start_datetime, end_datetime)
of work intervals """
if isinstance(id, (list, tuple)):
id = id[0]
# Computes start_dt, end_dt (with default values if not set) + off-interval work limits
work_limits = []
if start_dt is None and end_dt is not None:
start_dt = end_dt.replace(hour=0, minute=0, second=0)
elif start_dt is None:
start_dt = datetime.datetime.now().replace(hour=0, minute=0, second=0)
else:
work_limits.append((start_dt.replace(hour=0, minute=0, second=0), start_dt))
if end_dt is None:
end_dt = start_dt.replace(hour=23, minute=59, second=59)
else:
work_limits.append((end_dt, end_dt.replace(hour=23, minute=59, second=59)))
assert start_dt.date() == end_dt.date(), 'get_working_intervals_of_day is restricted to one day'
intervals = []
work_dt = start_dt.replace(hour=0, minute=0, second=0)
# no calendar: try to use the default_interval, then return directly
if id is None:
if default_interval:
intervals.append((start_dt.replace(hour=default_interval[0]), start_dt.replace(hour=default_interval[1])))
return intervals
working_intervals = []
for calendar_working_day in self.get_attendances_for_weekdays(cr, uid, id, [start_dt.weekday()], context):
working_interval = (
work_dt.replace(hour=int(calendar_working_day.hour_from)),
work_dt.replace(hour=int(calendar_working_day.hour_to))
)
working_intervals += self.interval_remove_leaves(working_interval, work_limits)
# find leave intervals
if leaves is None and compute_leaves:
leaves = self.get_leave_intervals(cr, uid, id, resource_id=resource_id, context=None)
# filter according to leaves
for interval in working_intervals:
work_intervals = self.interval_remove_leaves(interval, leaves)
intervals += work_intervals
return intervals
def get_working_hours_of_date(self, cr, uid, id, start_dt=None, end_dt=None,
leaves=None, compute_leaves=False, resource_id=None,
default_interval=None, context=None):
""" Get the working hours of the day based on calendar. This method uses
get_working_intervals_of_day to have the work intervals of the day. It
then calculates the number of hours contained in those intervals. """
res = datetime.timedelta()
intervals = self.get_working_intervals_of_day(
cr, uid, id,
start_dt, end_dt, leaves,
compute_leaves, resource_id,
default_interval, context)
for interval in intervals:
res += interval[1] - interval[0]
return seconds(res) / 3600.0
def get_working_hours(self, cr, uid, id, start_dt, end_dt, compute_leaves=False,
resource_id=None, default_interval=None, context=None):
hours = 0.0
for day in rrule.rrule(rrule.DAILY, dtstart=start_dt,
until=end_dt + datetime.timedelta(days=1),
byweekday=self.get_weekdays(cr, uid, id, context=context)):
hours += self.get_working_hours_of_date(
cr, uid, id, start_dt=day,
compute_leaves=compute_leaves, resource_id=resource_id,
default_interval=default_interval,
context=context)
return hours
# --------------------------------------------------
# Hours scheduling
# --------------------------------------------------
def _schedule_hours(self, cr, uid, id, hours, day_dt=None,
compute_leaves=False, resource_id=None,
default_interval=None, context=None):
""" Schedule hours of work, using a calendar and an optional resource to
compute working and leave days. This method can be used backwards, i.e.
scheduling days before a deadline.
:param int hours: number of hours to schedule. Use a negative number to
compute a backwards scheduling.
:param datetime day_dt: reference date to compute working days. If days is
> 0 date is the starting date. If days is < 0
date is the ending date.
:param boolean compute_leaves: if set, compute the leaves based on calendar
and resource. Otherwise no leaves are taken
into account.
:param int resource_id: the id of the resource to take into account when
computing the leaves. If not set, only general
leaves are computed. If set, generic and
specific leaves are computed.
:param tuple default_interval: if no id, try to return a default working
day using default_interval[0] as beginning
hour, and default_interval[1] as ending hour.
Example: default_interval = (8, 16).
Otherwise, a void list of working intervals
is returned when id is None.
:return tuple (datetime, intervals): datetime is the beginning/ending date
of the schedulign; intervals are the
working intervals of the scheduling.
Note: Why not using rrule.rrule ? Because rrule does not seem to allow
getting back in time.
"""
if day_dt is None:
day_dt = datetime.datetime.now()
backwards = (hours < 0)
hours = abs(hours)
intervals = []
remaining_hours = hours * 1.0
iterations = 0
current_datetime = day_dt
call_args = dict(compute_leaves=compute_leaves, resource_id=resource_id, default_interval=default_interval, context=context)
while float_compare(remaining_hours, 0.0, precision_digits=2) in (1, 0) and iterations < 1000:
if backwards:
call_args['end_dt'] = current_datetime
else:
call_args['start_dt'] = current_datetime
working_intervals = self.get_working_intervals_of_day(cr, uid, id, **call_args)
if id is None and not working_intervals: # no calendar -> consider working 8 hours
remaining_hours -= 8.0
elif working_intervals:
if backwards:
working_intervals.reverse()
new_working_intervals = self.interval_schedule_hours(working_intervals, remaining_hours, not backwards)
if backwards:
new_working_intervals.reverse()
res = datetime.timedelta()
for interval in working_intervals:
res += interval[1] - interval[0]
remaining_hours -= (seconds(res) / 3600.0)
if backwards:
intervals = new_working_intervals + intervals
else:
intervals = intervals + new_working_intervals
# get next day
if backwards:
current_datetime = datetime.datetime.combine(self.get_previous_day(cr, uid, id, current_datetime, context), datetime.time(23, 59, 59))
else:
current_datetime = datetime.datetime.combine(self.get_next_day(cr, uid, id, current_datetime, context), datetime.time())
# avoid infinite loops
iterations += 1
return intervals
def schedule_hours_get_date(self, cr, uid, id, hours, day_dt=None,
compute_leaves=False, resource_id=None,
default_interval=None, context=None):
""" Wrapper on _schedule_hours: return the beginning/ending datetime of
an hours scheduling. """
res = self._schedule_hours(cr, uid, id, hours, day_dt, compute_leaves, resource_id, default_interval, context)
return res and res[0][0] or False
def schedule_hours(self, cr, uid, id, hours, day_dt=None,
compute_leaves=False, resource_id=None,
default_interval=None, context=None):
""" Wrapper on _schedule_hours: return the working intervals of an hours
scheduling. """
return self._schedule_hours(cr, uid, id, hours, day_dt, compute_leaves, resource_id, default_interval, context)
# --------------------------------------------------
# Days scheduling
# --------------------------------------------------
def _schedule_days(self, cr, uid, id, days, day_date=None, compute_leaves=False,
resource_id=None, default_interval=None, context=None):
"""Schedule days of work, using a calendar and an optional resource to
compute working and leave days. This method can be used backwards, i.e.
scheduling days before a deadline.
:param int days: number of days to schedule. Use a negative number to
compute a backwards scheduling.
:param date day_date: reference date to compute working days. If days is > 0
date is the starting date. If days is < 0 date is the
ending date.
:param boolean compute_leaves: if set, compute the leaves based on calendar
and resource. Otherwise no leaves are taken
into account.
:param int resource_id: the id of the resource to take into account when
computing the leaves. If not set, only general
leaves are computed. If set, generic and
specific leaves are computed.
:param tuple default_interval: if no id, try to return a default working
day using default_interval[0] as beginning
hour, and default_interval[1] as ending hour.
Example: default_interval = (8, 16).
Otherwise, a void list of working intervals
is returned when id is None.
:return tuple (datetime, intervals): datetime is the beginning/ending date
of the schedulign; intervals are the
working intervals of the scheduling.
Implementation note: rrule.rrule is not used because rrule it des not seem
to allow getting back in time.
"""
if day_date is None:
day_date = datetime.datetime.now()
backwards = (days < 0)
days = abs(days)
intervals = []
planned_days = 0
iterations = 0
if backwards:
current_datetime = day_date.replace(hour=23, minute=59, second=59)
else:
current_datetime = day_date.replace(hour=0, minute=0, second=0)
while planned_days < days and iterations < 1000:
working_intervals = self.get_working_intervals_of_day(
cr, uid, id, current_datetime,
compute_leaves=compute_leaves, resource_id=resource_id,
default_interval=default_interval,
context=context)
if id is None or working_intervals: # no calendar -> no working hours, but day is considered as worked
planned_days += 1
intervals += working_intervals
# get next day
if backwards:
current_datetime = self.get_previous_day(cr, uid, id, current_datetime, context)
else:
current_datetime = self.get_next_day(cr, uid, id, current_datetime, context)
# avoid infinite loops
iterations += 1
return intervals
def schedule_days_get_date(self, cr, uid, id, days, day_date=None, compute_leaves=False,
resource_id=None, default_interval=None, context=None):
""" Wrapper on _schedule_days: return the beginning/ending datetime of
a days scheduling. """
res = self._schedule_days(cr, uid, id, days, day_date, compute_leaves, resource_id, default_interval, context)
return res and res[-1][1] or False
def schedule_days(self, cr, uid, id, days, day_date=None, compute_leaves=False,
resource_id=None, default_interval=None, context=None):
""" Wrapper on _schedule_days: return the working intervals of a days
scheduling. """
return self._schedule_days(cr, uid, id, days, day_date, compute_leaves, resource_id, default_interval, context)
# --------------------------------------------------
# Compatibility / to clean / to remove
# --------------------------------------------------
def working_hours_on_day(self, cr, uid, resource_calendar_id, day, context=None):
""" Used in hr_payroll/hr_payroll.py
:deprecated: OpenERP saas-3. Use get_working_hours_of_date instead. Note:
since saas-3, take hour/minutes into account, not just the whole day."""
if isinstance(day, datetime.datetime):
day = day.replace(hour=0, minute=0)
return self.get_working_hours_of_date(cr, uid, resource_calendar_id.id, start_dt=day, context=None)
def interval_min_get(self, cr, uid, id, dt_from, hours, resource=False):
""" Schedule hours backwards. Used in mrp_operations/mrp_operations.py.
:deprecated: OpenERP saas-3. Use schedule_hours instead. Note: since
saas-3, counts leave hours instead of all-day leaves."""
return self.schedule_hours(
cr, uid, id, hours * -1.0,
day_dt=dt_from.replace(minute=0, second=0),
compute_leaves=True, resource_id=resource,
default_interval=(8, 16)
)
def interval_get_multi(self, cr, uid, date_and_hours_by_cal, resource=False, byday=True):
""" Used in mrp_operations/mrp_operations.py (default parameters) and in
interval_get()
:deprecated: OpenERP saas-3. Use schedule_hours instead. Note:
Byday was not used. Since saas-3, counts Leave hours instead of all-day leaves."""
res = {}
for dt_str, hours, calendar_id in date_and_hours_by_cal:
result = self.schedule_hours(
cr, uid, calendar_id, hours,
day_dt=datetime.datetime.strptime(dt_str, '%Y-%m-%d %H:%M:%S').replace(minute=0, second=0),
compute_leaves=True, resource_id=resource,
default_interval=(8, 16)
)
res[(dt_str, hours, calendar_id)] = result
return res
def interval_get(self, cr, uid, id, dt_from, hours, resource=False, byday=True):
""" Unifier of interval_get_multi. Used in: mrp_operations/mrp_operations.py,
crm/crm_lead.py (res given).
:deprecated: OpenERP saas-3. Use get_working_hours instead."""
res = self.interval_get_multi(
cr, uid, [(dt_from.strftime('%Y-%m-%d %H:%M:%S'), hours, id)], resource, byday)[(dt_from.strftime('%Y-%m-%d %H:%M:%S'), hours, id)]
return res
def interval_hours_get(self, cr, uid, id, dt_from, dt_to, resource=False):
""" Unused wrapper.
:deprecated: OpenERP saas-3. Use get_working_hours instead."""
return self._interval_hours_get(cr, uid, id, dt_from, dt_to, resource_id=resource)
def _interval_hours_get(self, cr, uid, id, dt_from, dt_to, resource_id=False, timezone_from_uid=None, exclude_leaves=True, context=None):
""" Computes working hours between two dates, taking always same hour/minuts.
:deprecated: OpenERP saas-3. Use get_working_hours instead. Note: since saas-3,
now resets hour/minuts. Now counts leave hours instead of all-day leaves."""
return self.get_working_hours(
cr, uid, id, dt_from, dt_to,
compute_leaves=(not exclude_leaves), resource_id=resource_id,
default_interval=(8, 16), context=context)
class resource_calendar_attendance(osv.osv):
_name = "resource.calendar.attendance"
_description = "Work Detail"
_columns = {
'name' : fields.char("Name", size=64, required=True),
'dayofweek': fields.selection([('0','Monday'),('1','Tuesday'),('2','Wednesday'),('3','Thursday'),('4','Friday'),('5','Saturday'),('6','Sunday')], 'Day of Week', required=True, select=True),
'date_from' : fields.date('Starting Date'),
'hour_from' : fields.float('Work from', required=True, help="Start and End time of working.", select=True),
'hour_to' : fields.float("Work to", required=True),
'calendar_id' : fields.many2one("resource.calendar", "Resource's Calendar", required=True),
}
_order = 'dayofweek, hour_from'
_defaults = {
'dayofweek' : '0'
}
def hours_time_string(hours):
""" convert a number of hours (float) into a string with format '%H:%M' """
minutes = int(round(hours * 60))
return "%02d:%02d" % divmod(minutes, 60)
class resource_resource(osv.osv):
_name = "resource.resource"
_description = "Resource Detail"
_columns = {
'name' : fields.char("Name", size=64, required=True),
'code': fields.char('Code', size=16),
'active' : fields.boolean('Active', help="If the active field is set to False, it will allow you to hide the resource record without removing it."),
'company_id' : fields.many2one('res.company', 'Company'),
'resource_type': fields.selection([('user','Human'),('material','Material')], 'Resource Type', required=True),
'user_id' : fields.many2one('res.users', 'User', help='Related user name for the resource to manage its access.'),
'time_efficiency' : fields.float('Efficiency Factor', size=8, required=True, help="This field depict the efficiency of the resource to complete tasks. e.g resource put alone on a phase of 5 days with 5 tasks assigned to him, will show a load of 100% for this phase by default, but if we put a efficiency of 200%, then his load will only be 50%."),
'calendar_id' : fields.many2one("resource.calendar", "Working Time", help="Define the schedule of resource"),
}
_defaults = {
'resource_type' : 'user',
'time_efficiency' : 1,
'active' : True,
'company_id': lambda self, cr, uid, context: self.pool.get('res.company')._company_default_get(cr, uid, 'resource.resource', context=context)
}
def copy(self, cr, uid, id, default=None, context=None):
if default is None:
default = {}
if not default.get('name', False):
default.update(name=_('%s (copy)') % (self.browse(cr, uid, id, context=context).name))
return super(resource_resource, self).copy(cr, uid, id, default, context)
def generate_resources(self, cr, uid, user_ids, calendar_id, context=None):
"""
Return a list of Resource Class objects for the resources allocated to the phase.
NOTE: Used in project/project.py
"""
resource_objs = {}
user_pool = self.pool.get('res.users')
for user in user_pool.browse(cr, uid, user_ids, context=context):
resource_objs[user.id] = {
'name' : user.name,
'vacation': [],
'efficiency': 1.0,
}
resource_ids = self.search(cr, uid, [('user_id', '=', user.id)], context=context)
if resource_ids:
for resource in self.browse(cr, uid, resource_ids, context=context):
resource_objs[user.id]['efficiency'] = resource.time_efficiency
resource_cal = resource.calendar_id.id
if resource_cal:
leaves = self.compute_vacation(cr, uid, calendar_id, resource.id, resource_cal, context=context)
resource_objs[user.id]['vacation'] += list(leaves)
return resource_objs
def compute_vacation(self, cr, uid, calendar_id, resource_id=False, resource_calendar=False, context=None):
"""
Compute the vacation from the working calendar of the resource.
@param calendar_id : working calendar of the project
@param resource_id : resource working on phase/task
@param resource_calendar : working calendar of the resource
NOTE: used in project/project.py, and in generate_resources
"""
resource_calendar_leaves_pool = self.pool.get('resource.calendar.leaves')
leave_list = []
if resource_id:
leave_ids = resource_calendar_leaves_pool.search(cr, uid, ['|', ('calendar_id', '=', calendar_id),
('calendar_id', '=', resource_calendar),
('resource_id', '=', resource_id)
], context=context)
else:
leave_ids = resource_calendar_leaves_pool.search(cr, uid, [('calendar_id', '=', calendar_id),
('resource_id', '=', False)
], context=context)
leaves = resource_calendar_leaves_pool.read(cr, uid, leave_ids, ['date_from', 'date_to'], context=context)
for i in range(len(leaves)):
dt_start = datetime.datetime.strptime(leaves[i]['date_from'], '%Y-%m-%d %H:%M:%S')
dt_end = datetime.datetime.strptime(leaves[i]['date_to'], '%Y-%m-%d %H:%M:%S')
no = dt_end - dt_start
[leave_list.append((dt_start + datetime.timedelta(days=x)).strftime('%Y-%m-%d')) for x in range(int(no.days + 1))]
leave_list.sort()
return leave_list
def compute_working_calendar(self, cr, uid, calendar_id=False, context=None):
"""
Change the format of working calendar from 'Openerp' format to bring it into 'Faces' format.
@param calendar_id : working calendar of the project
NOTE: used in project/project.py
"""
if not calendar_id:
# Calendar is not specified: working days: 24/7
return [('fri', '8:0-12:0','13:0-17:0'), ('thu', '8:0-12:0','13:0-17:0'), ('wed', '8:0-12:0','13:0-17:0'),
('mon', '8:0-12:0','13:0-17:0'), ('tue', '8:0-12:0','13:0-17:0')]
resource_attendance_pool = self.pool.get('resource.calendar.attendance')
time_range = "8:00-8:00"
non_working = ""
week_days = {"0": "mon", "1": "tue", "2": "wed","3": "thu", "4": "fri", "5": "sat", "6": "sun"}
wk_days = {}
wk_time = {}
wktime_list = []
wktime_cal = []
week_ids = resource_attendance_pool.search(cr, uid, [('calendar_id', '=', calendar_id)], context=context)
weeks = resource_attendance_pool.read(cr, uid, week_ids, ['dayofweek', 'hour_from', 'hour_to'], context=context)
# Convert time formats into appropriate format required
# and create a list like [('mon', '8:00-12:00'), ('mon', '13:00-18:00')]
for week in weeks:
res_str = ""
day = None
if week_days.get(week['dayofweek'],False):
day = week_days[week['dayofweek']]
wk_days[week['dayofweek']] = week_days[week['dayofweek']]
else:
raise osv.except_osv(_('Configuration Error!'),_('Make sure the Working time has been configured with proper week days!'))
hour_from_str = hours_time_string(week['hour_from'])
hour_to_str = hours_time_string(week['hour_to'])
res_str = hour_from_str + '-' + hour_to_str
wktime_list.append((day, res_str))
# Convert into format like [('mon', '8:00-12:00', '13:00-18:00')]
for item in wktime_list:
if wk_time.has_key(item[0]):
wk_time[item[0]].append(item[1])
else:
wk_time[item[0]] = [item[0]]
wk_time[item[0]].append(item[1])
for k,v in wk_time.items():
wktime_cal.append(tuple(v))
# Add for the non-working days like: [('sat, sun', '8:00-8:00')]
for k, v in wk_days.items():
if week_days.has_key(k):
week_days.pop(k)
for v in week_days.itervalues():
non_working += v + ','
if non_working:
wktime_cal.append((non_working[:-1], time_range))
return wktime_cal
class resource_calendar_leaves(osv.osv):
_name = "resource.calendar.leaves"
_description = "Leave Detail"
_columns = {
'name' : fields.char("Name", size=64),
'company_id' : fields.related('calendar_id','company_id',type='many2one',relation='res.company',string="Company", store=True, readonly=True),
'calendar_id' : fields.many2one("resource.calendar", "Working Time"),
'date_from' : fields.datetime('Start Date', required=True),
'date_to' : fields.datetime('End Date', required=True),
'resource_id' : fields.many2one("resource.resource", "Resource", help="If empty, this is a generic holiday for the company. If a resource is set, the holiday/leave is only for this resource"),
}
def check_dates(self, cr, uid, ids, context=None):
leave = self.read(cr, uid, ids[0], ['date_from', 'date_to'])
if leave['date_from'] and leave['date_to']:
if leave['date_from'] > leave['date_to']:
return False
return True
_constraints = [
(check_dates, 'Error! leave start-date must be lower then leave end-date.', ['date_from', 'date_to'])
]
def onchange_resource(self, cr, uid, ids, resource, context=None):
result = {}
if resource:
resource_pool = self.pool.get('resource.resource')
result['calendar_id'] = resource_pool.browse(cr, uid, resource, context=context).calendar_id.id
return {'value': result}
return {'value': {'calendar_id': []}}
def seconds(td):
assert isinstance(td, datetime.timedelta)
return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10**6) / 10.**6
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
x303597316/hue | desktop/core/ext-py/python-openid-2.2.5/openid/yadis/services.py | 167 | 1838 | # -*- test-case-name: openid.test.test_services -*-
from openid.yadis.filters import mkFilter
from openid.yadis.discover import discover, DiscoveryFailure
from openid.yadis.etxrd import parseXRDS, iterServices, XRDSError
def getServiceEndpoints(input_url, flt=None):
"""Perform the Yadis protocol on the input URL and return an
iterable of resulting endpoint objects.
@param flt: A filter object or something that is convertable to
a filter object (using mkFilter) that will be used to generate
endpoint objects. This defaults to generating BasicEndpoint
objects.
@param input_url: The URL on which to perform the Yadis protocol
@return: The normalized identity URL and an iterable of endpoint
objects generated by the filter function.
@rtype: (str, [endpoint])
@raises DiscoveryFailure: when Yadis fails to obtain an XRDS document.
"""
result = discover(input_url)
try:
endpoints = applyFilter(result.normalized_uri,
result.response_text, flt)
except XRDSError, err:
raise DiscoveryFailure(str(err), None)
return (result.normalized_uri, endpoints)
def applyFilter(normalized_uri, xrd_data, flt=None):
"""Generate an iterable of endpoint objects given this input data,
presumably from the result of performing the Yadis protocol.
@param normalized_uri: The input URL, after following redirects,
as in the Yadis protocol.
@param xrd_data: The XML text the XRDS file fetched from the
normalized URI.
@type xrd_data: str
"""
flt = mkFilter(flt)
et = parseXRDS(xrd_data)
endpoints = []
for service_element in iterServices(et):
endpoints.extend(
flt.getServiceEndpoints(normalized_uri, service_element))
return endpoints
| apache-2.0 |
chiefspace/udemy-rest-api | udemy_rest_api_section6/env/lib/python3.4/site-packages/jwt/compat.py | 13 | 1317 | """
The `compat` module provides support for backwards compatibility with older
versions of python, and compatibility wrappers around optional packages.
"""
# flake8: noqa
import sys
import hmac
PY3 = sys.version_info[0] == 3
if PY3:
text_type = str
binary_type = bytes
else:
text_type = unicode
binary_type = str
string_types = (text_type, binary_type)
def is_string_type(val):
return any([isinstance(val, typ) for typ in string_types])
def timedelta_total_seconds(delta):
try:
delta.total_seconds
except AttributeError:
# On Python 2.6, timedelta instances do not have
# a .total_seconds() method.
total_seconds = delta.days * 24 * 60 * 60 + delta.seconds
else:
total_seconds = delta.total_seconds()
return total_seconds
try:
constant_time_compare = hmac.compare_digest
except AttributeError:
# Fallback for Python < 2.7
def constant_time_compare(val1, val2):
"""
Returns True if the two strings are equal, False otherwise.
The time taken is independent of the number of characters that match.
"""
if len(val1) != len(val2):
return False
result = 0
for x, y in zip(val1, val2):
result |= ord(x) ^ ord(y)
return result == 0
| gpl-2.0 |
OmnInfinity/volatility | volatility/plugins/linux/pstree.py | 50 | 1893 | # This file is part of Volatility.
# Copyright (C) 2007-2013 Volatility Foundation
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
"""
@author: Andrew Case
@license: GNU General Public License 2.0
@contact: atcuno@gmail.com
@organization:
"""
import volatility.plugins.linux.pslist as linux_pslist
class linux_pstree(linux_pslist.linux_pslist):
'''Shows the parent/child relationship between processes'''
def __init__(self, *args, **kwargs):
self.procs = {}
linux_pslist.linux_pslist.__init__(self, *args, **kwargs)
def render_text(self, outfd, data):
self.procs = {}
outfd.write("{0:20s} {1:15s} {2:15s}\n".format("Name", "Pid", "Uid"))
for task in data:
self.recurse_task(outfd, task, 0)
def recurse_task(self, outfd, task, level):
if task.pid in self.procs:
return
if task.mm:
proc_name = task.comm
else:
proc_name = "[" + task.comm + "]"
proc_name = "." * level + proc_name
outfd.write("{0:20s} {1:15s} {2:15s}\n".format(proc_name, str(task.pid), str(task.uid or '')))
self.procs[task.pid] = 1
for child in task.children.list_of_type("task_struct", "sibling"):
self.recurse_task(outfd, child, level + 1)
| gpl-2.0 |
Cgruppo/oppia | core/controllers/reader.py | 1 | 15348 | # Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Controllers for the Oppia learner view."""
__author__ = 'Sean Lip'
import copy
import logging
from core.controllers import base
from core.domain import config_domain
from core.domain import dependency_registry
from core.domain import event_services
from core.domain import exp_domain
from core.domain import exp_services
from core.domain import feedback_services
from core.domain import fs_domain
from core.domain import interaction_registry
from core.domain import param_domain
from core.domain import rights_manager
from core.domain import rte_component_registry
from core.domain import rule_domain
from core.domain import skins_services
import feconf
import jinja_utils
import utils
import jinja2
def require_playable(handler):
"""Decorator that checks if the user can play the given exploration."""
def test_can_play(self, exploration_id, **kwargs):
"""Checks if the user for the current session is logged in."""
if rights_manager.Actor(self.user_id).can_play(exploration_id):
return handler(self, exploration_id, **kwargs)
else:
raise self.PageNotFoundException
return test_can_play
def _get_updated_param_dict(param_dict, param_changes, exp_param_specs):
"""Updates a param dict using the given list of param_changes.
Note that the list of parameter changes is ordered. Parameter
changes later in the list may depend on parameter changes that have
been set earlier in the same list.
"""
new_param_dict = copy.deepcopy(param_dict)
for pc in param_changes:
try:
obj_type = exp_param_specs[pc.name].obj_type
except:
raise Exception('Parameter %s not found' % pc.name)
new_param_dict[pc.name] = pc.get_normalized_value(
obj_type, new_param_dict)
return new_param_dict
def classify(
exp_id, exp_param_specs, state, handler_name, answer, params):
"""Normalize the answer and return the first rulespec that it satisfies."""
interaction_instance = interaction_registry.Registry.get_interaction_by_id(
state.interaction.id)
normalized_answer = interaction_instance.normalize_answer(
answer, handler_name)
handler = next(
h for h in state.interaction.handlers if h.name == handler_name)
fs = fs_domain.AbstractFileSystem(fs_domain.ExplorationFileSystem(exp_id))
input_type = interaction_instance.get_handler_by_name(
handler_name).obj_type
for rule_spec in handler.rule_specs:
if rule_domain.evaluate_rule(
rule_spec.definition, exp_param_specs, input_type, params,
normalized_answer, fs):
return rule_spec
raise Exception(
'No matching rule found for handler %s. Rule specs are %s.' % (
handler.name,
[rule_spec.to_dict() for rule_spec in handler.rule_specs]
)
)
class ExplorationPage(base.BaseHandler):
"""Page describing a single exploration."""
@require_playable
def get(self, exploration_id):
"""Handles GET requests."""
version = self.request.get('v')
if not version:
# The default value for a missing parameter seems to be ''.
version = None
else:
version = int(version)
try:
exploration = exp_services.get_exploration_by_id(
exploration_id, version=version)
except Exception as e:
raise self.PageNotFoundException(e)
version = exploration.version
if not rights_manager.Actor(self.user_id).can_view(exploration_id):
raise self.PageNotFoundException
is_iframed = (self.request.get('iframed') == 'true')
# TODO(sll): Cache these computations.
interaction_ids = exploration.get_interaction_ids()
dependency_ids = (
interaction_registry.Registry.get_deduplicated_dependency_ids(
interaction_ids))
dependencies_html, additional_angular_modules = (
dependency_registry.Registry.get_deps_html_and_angular_modules(
dependency_ids))
interaction_templates = (
rte_component_registry.Registry.get_html_for_all_components() +
interaction_registry.Registry.get_interaction_html(
interaction_ids))
self.values.update({
'INTERACTION_SPECS': interaction_registry.Registry.get_all_specs(),
'additional_angular_modules': additional_angular_modules,
'can_edit': (
bool(self.username) and
self.username not in config_domain.BANNED_USERNAMES.value and
rights_manager.Actor(self.user_id).can_edit(exploration_id)
),
'dependencies_html': jinja2.utils.Markup(
dependencies_html),
'exploration_title': exploration.title,
'exploration_version': version,
'iframed': is_iframed,
'interaction_templates': jinja2.utils.Markup(
interaction_templates),
'is_private': rights_manager.is_exploration_private(
exploration_id),
'nav_mode': feconf.NAV_MODE_EXPLORE,
'skin_templates': jinja2.utils.Markup(
skins_services.Registry.get_skin_templates(
[exploration.default_skin])),
'skin_js_url': skins_services.Registry.get_skin_js_url(
exploration.default_skin),
'skin_tag': jinja2.utils.Markup(
skins_services.Registry.get_skin_tag(exploration.default_skin)
),
'title': exploration.title,
})
if is_iframed:
self.render_template(
'player/exploration_player.html', iframe_restriction=None)
else:
self.render_template('player/exploration_player.html')
class ExplorationHandler(base.BaseHandler):
"""Provides the initial data for a single exploration."""
def get(self, exploration_id):
"""Populates the data on the individual exploration page."""
version = self.request.get('v')
version = int(version) if version else None
try:
exploration = exp_services.get_exploration_by_id(
exploration_id, version=version)
except Exception as e:
raise self.PageNotFoundException(e)
intro_card_color = (
feconf.CATEGORIES_TO_COLORS[exploration.category] if
exploration.category in feconf.CATEGORIES_TO_COLORS else
feconf.DEFAULT_COLOR)
self.values.update({
'can_edit': (
self.user_id and
rights_manager.Actor(self.user_id).can_edit(exploration_id)),
'exploration': exploration.to_player_dict(),
'intro_card_image_url': (
'/images/gallery/exploration_background_%s_large.png' %
intro_card_color),
'is_logged_in': bool(self.user_id),
'session_id': utils.generate_random_string(24),
'version': exploration.version,
})
self.render_json(self.values)
class AnswerSubmittedEventHandler(base.BaseHandler):
"""Tracks a learner submitting an answer."""
REQUIRE_PAYLOAD_CSRF_CHECK = False
@require_playable
def post(self, exploration_id):
old_state_name = self.payload.get('old_state_name')
# The reader's answer.
answer = self.payload.get('answer')
# The answer handler (submit, click, etc.)
handler_name = self.payload.get('handler')
# Parameters associated with the learner.
old_params = self.payload.get('params', {})
old_params['answer'] = answer
# The version of the exploration.
version = self.payload.get('version')
rule_spec_dict = self.payload.get('rule_spec')
rule_spec = exp_domain.RuleSpec.from_dict_and_obj_type(
rule_spec_dict, rule_spec_dict['obj_type'])
exploration = exp_services.get_exploration_by_id(
exploration_id, version=version)
exp_param_specs = exploration.param_specs
old_interaction = exploration.states[old_state_name].interaction
old_interaction_instance = (
interaction_registry.Registry.get_interaction_by_id(
old_interaction.id))
normalized_answer = old_interaction_instance.normalize_answer(
answer, handler_name)
# TODO(sll): Should this also depend on `params`?
event_services.AnswerSubmissionEventHandler.record(
exploration_id, version, old_state_name, handler_name, rule_spec,
old_interaction_instance.get_stats_log_html(
old_interaction.customization_args, normalized_answer))
class StateHitEventHandler(base.BaseHandler):
"""Tracks a learner hitting a new state."""
REQUIRE_PAYLOAD_CSRF_CHECK = False
@require_playable
def post(self, exploration_id):
"""Handles POST requests."""
new_state_name = self.payload.get('new_state_name')
exploration_version = self.payload.get('exploration_version')
session_id = self.payload.get('session_id')
client_time_spent_in_secs = self.payload.get(
'client_time_spent_in_secs')
old_params = self.payload.get('old_params')
# Record the state hit, if it is not the END state.
if new_state_name is not None:
event_services.StateHitEventHandler.record(
exploration_id, exploration_version, new_state_name,
session_id, old_params, feconf.PLAY_TYPE_NORMAL)
else:
logging.error('Unexpected StateHit event for the END state.')
class ClassifyHandler(base.BaseHandler):
"""Stateless handler that performs a classify() operation server-side and
returns the corresponding rule_spec (as a dict).
"""
REQUIRE_PAYLOAD_CSRF_CHECK = False
@require_playable
def post(self, exploration_id):
"""Handles POST requests."""
exp_param_specs_dict = self.payload.get('exp_param_specs', {})
exp_param_specs = {
ps_name: param_domain.ParamSpec.from_dict(ps_val)
for (ps_name, ps_val) in exp_param_specs_dict.iteritems()
}
# A domain object representing the old state.
old_state = exp_domain.State.from_dict(self.payload.get('old_state'))
# The name of the rule handler triggered.
handler_name = self.payload.get('handler')
# The learner's raw answer.
answer = self.payload.get('answer')
# The learner's parameter values.
params = self.payload.get('params')
params['answer'] = answer
rule_spec = classify(
exploration_id, exp_param_specs, old_state, handler_name,
answer, params)
self.render_json(rule_spec.to_dict_with_obj_type())
class ReaderFeedbackHandler(base.BaseHandler):
"""Submits feedback from the reader."""
REQUIRE_PAYLOAD_CSRF_CHECK = False
@require_playable
def post(self, exploration_id):
"""Handles POST requests."""
state_name = self.payload.get('state_name')
subject = self.payload.get('subject', 'Feedback from a learner')
feedback = self.payload.get('feedback')
include_author = self.payload.get('include_author')
feedback_services.create_thread(
exploration_id,
state_name,
self.user_id if include_author else None,
subject,
feedback)
self.render_json(self.values)
class ExplorationStartEventHandler(base.BaseHandler):
"""Tracks a reader starting an exploration."""
REQUIRE_PAYLOAD_CSRF_CHECK = False
@require_playable
def post(self, exploration_id):
"""Handles POST requests."""
event_services.StartExplorationEventHandler.record(
exploration_id, self.payload.get('version'),
self.payload.get('state_name'),
self.payload.get('session_id'),
self.payload.get('params'),
feconf.PLAY_TYPE_NORMAL)
class ExplorationMaybeLeaveHandler(base.BaseHandler):
"""Tracks a reader leaving an exploration before or at completion.
If this is a completion, the state_name recorded should be 'END'.
"""
REQUIRE_PAYLOAD_CSRF_CHECK = False
@require_playable
def post(self, exploration_id):
"""Handles POST requests."""
event_services.MaybeLeaveExplorationEventHandler.record(
exploration_id,
self.payload.get('version'),
self.payload.get('state_name'),
self.payload.get('session_id'),
self.payload.get('client_time_spent_in_secs'),
self.payload.get('params'),
feconf.PLAY_TYPE_NORMAL)
# TODO(sll): This is a placeholder function. It should be deleted and the
# backend tests should submit directly to the handlers (to maintain parity with
# production and to avoid code skew).
def submit_answer_in_tests(
exploration_id, state_name, answer, params, handler_name, version):
"""This function should only be used by tests."""
params['answer'] = answer
exploration = exp_services.get_exploration_by_id(
exploration_id, version=version)
exp_param_specs = exploration.param_specs
old_state = exploration.states[state_name]
rule_spec = classify(
exploration_id, exp_param_specs, old_state, handler_name,
answer, params)
old_interaction_instance = (
interaction_registry.Registry.get_interaction_by_id(
old_state.interaction.id))
normalized_answer = old_interaction_instance.normalize_answer(
answer, handler_name)
# TODO(sll): Should this also depend on `params`?
event_services.AnswerSubmissionEventHandler.record(
exploration_id, version, state_name, handler_name, rule_spec,
old_interaction_instance.get_stats_log_html(
old_state.interaction.customization_args, normalized_answer))
new_state = (
None if rule_spec.dest == feconf.END_DEST
else exploration.states[rule_spec.dest])
finished = (rule_spec.dest == feconf.END_DEST)
new_params = _get_updated_param_dict(
params, {} if finished else new_state.param_changes,
exp_param_specs)
return {
'feedback_html': jinja_utils.parse_string(
rule_spec.get_feedback_string(), params),
'finished': finished,
'params': new_params,
'question_html': (
new_state.content[0].to_html(new_params)
if not finished else ''),
'state_name': rule_spec.dest if not finished else None,
}
| apache-2.0 |
relic7/prodimages | python/multiimagemagickthumbs.py | 1 | 2515 | timeconvert\
very-very-large.png\
-quality85\
-writempr:mpc:label\
+delete\
mpr:mpc:label-crop'3000x2001+0+491'-resize'170x116!>'-writepic1.png+delete\
mpr:mpc:label-crop'2981x2883+8+0'-resize'75x75!>'-writepic2.png+delete\
mpr:mpc:label-crop'1100x1983+0+0'-resize'160x160!>'-writepic3.png+delete\
mpr:mpc:label-crop'2000x2883+0+0'-resize'1024x960!>'-writepic4.png+delete\
mpr:mpc:label-crop'1000x2883+0+0'-resize'190x188!>'-writepic5.png+delete\
mpr:mpc:label-crop'3000x2000+0+0'-resize'2048x2047!>'-writepic6.png+delete\
mpr:mpc:label-crop'3000x2883+0+0'-resize'595x421!>'-writepic7.png+delete\
mpr:mpc:label-crop'3000x2883+0+0'-resize'3000x2883!>'-writepic8.png
'-crop',
str(subprocess.call(['convert', file, '-virtual-pixel', 'edge', '-blur', '0x15', '-fuzz', '1%', '-trim', '-format', '%wx%h%O', 'info:'], stdin=None, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=False))
,
'-colorspace',
'RGB',
'-trim',
'-resize',
rocess.call([
def subproc_pad_to_x480(file,destdir):
import subprocess, os
fname = file.split("/")[-1].split('.')[0].replace('_LP','_l').lower()
ext = file.split(".")[-1]
outfile = os.path.join(destdir, fname + ".jpg")
#try:
subprocess.call([
"convert",
infile,
'-format',
'png',
'-quality',
'85',
'-colorspace',
'rgb',
'+profile"*"',
'-filter',
'Lanczos',
'-writempr:copy-of-huge-original',
'+delete',
##'mpr:copy-of-huge-original-crop"3000x2000+0+480"-resize"200x125!>"-writethumb1-extract.jpg+delete',
#'mpr:copy-of-huge-original-crop"2000x1500+280+220"-resize"75x75!>"-writethumb2-extract.jpg+delete',
str('mpr:copy-of-huge-original',
'-resize',
'"1000x1200"',
'-writesample-1000x1200_z.jpg',
'+delete'),
str('mpr:copy-of-huge-original',
'-resize',
'"1000x1200"',
'-writesample-1000x1200_z.jpg',
'+delete'),
#str('mpr:copy-of-huge-original-resize"1000x1200"-writesample-1000x1200_z.jpg+delete'),
#str('mpr:copy-of-huge-original-resize"800x960"-writesample-800x960_x.jpg+delete'),
#str('mpr:copy-of-huge-original-resize"400x480"-writesample-400x480_l.jpg+delete'),
#str('mpr:copy-of-huge-original '-resize', '"200x240"','-writesample','-200x240_m.jpg+delete'),
##'mpr:copy-of-huge-original-resize"163x163!>"-writesample-163x163.jpg'
]) | mit |
ruud-v-a/rhythmbox | plugins/replaygain/player.py | 2 | 6344 | # -*- Mode: python; coding: utf-8; tab-width: 8; indent-tabs-mode: t; -*-
#
# Copyright (C) 2010 Jonathan Matthew
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# The Rhythmbox authors hereby grant permission for non-GPL compatible
# GStreamer plugins to be used and distributed together with GStreamer
# and Rhythmbox. This permission is above and beyond the permissions granted
# by the GPL license by which Rhythmbox is covered. If you modify this code
# you may extend this exception to your version of the code, but you are not
# obligated to do so. If you do not wish to do so, delete this exception
# statement from your version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
import rb
import gi
gi.require_version("Gst", "1.0")
from gi.repository import RB
from gi.repository import GObject, Gio, Gst
import config
import gettext
gettext.install('rhythmbox', RB.locale_dir())
EPSILON = 0.001
class ReplayGainPlayer(object):
def __init__(self, shell):
# make sure the replaygain elements are available
missing = []
required = ("rgvolume", "rglimiter")
for e in required:
if Gst.ElementFactory.find(e) is None:
missing.append(e)
if len(missing) > 0:
msg = _("The GStreamer elements required for ReplayGain processing are not available. The missing elements are: %s") % ", ".join(missing)
RB.error_dialog(shell.props.window, _("ReplayGain GStreamer plugins not available"), msg)
raise Exception(msg)
self.shell_player = shell.props.shell_player
self.player = self.shell_player.props.player
self.settings = Gio.Settings.new("org.gnome.rhythmbox.plugins.replaygain")
self.settings.connect("changed::limiter", self.limiter_changed_cb)
self.previous_gain = []
self.fallback_gain = 0.0
# we use different means to hook into the playback pipeline depending on
# the playback backend in use
if GObject.signal_lookup("get-stream-filters", self.player):
self.setup_xfade_mode()
self.deactivate_backend = self.deactivate_xfade_mode
else:
self.setup_playbin_mode()
self.deactivate_backend = self.deactivate_playbin_mode
def deactivate(self):
self.deactivate_backend()
self.player = None
self.shell_player = None
def set_rgvolume(self, rgvolume):
# set preamp level
preamp = self.settings['preamp']
rgvolume.props.pre_amp = preamp
# set mode
# there may eventually be a 'guess' mode here that tries to figure out
# what to do based on the upcoming tracks
mode = self.settings['mode']
if mode == config.REPLAYGAIN_MODE_ALBUM:
rgvolume.props.album_mode = 1
else:
rgvolume.props.album_mode = 0
# set calculated fallback gain
rgvolume.props.fallback_gain = self.fallback_gain
print("updated rgvolume settings: preamp %f, album-mode %s, fallback gain %f" % (
rgvolume.props.pre_amp, str(rgvolume.props.album_mode), rgvolume.props.fallback_gain))
def update_fallback_gain(self, rgvolume):
gain = rgvolume.props.target_gain - rgvolume.props.pre_amp
# filter out bogus notifications
if abs(gain - self.fallback_gain) < EPSILON:
print("ignoring gain %f (current fallback gain)" % gain)
return False
if abs(gain) < EPSILON:
print("ignoring zero gain (pretty unlikely)")
return False
# update the running average
if len(self.previous_gain) == config.AVERAGE_GAIN_SAMPLES:
self.previous_gain.pop(0)
self.previous_gain.append(gain)
self.fallback_gain = sum(self.previous_gain) / len(self.previous_gain)
print("got target gain %f; running average of previous gain values is %f" % (gain, self.fallback_gain))
return True
### playbin mode (rgvolume ! rglimiter as global filter)
def playbin_target_gain_cb(self, rgvolume, pspec):
self.update_fallback_gain(rgvolume)
def setup_playbin_mode(self):
print("using output filter for rgvolume and rglimiter")
self.rgfilter = Gst.Bin()
self.rgvolume = Gst.ElementFactory.make("rgvolume", None)
self.rgvolume.connect("notify::target-gain", self.playbin_target_gain_cb)
self.rgfilter.add(self.rgvolume)
self.rglimiter = Gst.ElementFactory.make("rglimiter", None)
self.rgfilter.add(self.rglimiter)
self.rgfilter.add_pad(Gst.GhostPad.new("sink", self.rgvolume.get_static_pad("sink")))
self.rgfilter.add_pad(Gst.GhostPad.new("src", self.rglimiter.get_static_pad("src")))
self.rgvolume.link(self.rglimiter)
self.player.add_filter(self.rgfilter)
def deactivate_playbin_mode(self):
self.player.remove_filter(self.rgfilter)
self.rgfilter = None
### xfade mode (rgvolume as stream filter, rglimiter as global filter)
def xfade_target_gain_cb(self, rgvolume, pspec):
if self.update_fallback_gain(rgvolume) is True:
# we don't want any further notifications from this stream
rgvolume.disconnect_by_func(self.xfade_target_gain_cb)
def create_stream_filter_cb(self, player, uri):
print("creating rgvolume instance for stream %s" % uri)
rgvolume = Gst.ElementFactory.make("rgvolume", None)
rgvolume.connect("notify::target-gain", self.xfade_target_gain_cb)
self.set_rgvolume(rgvolume)
return [rgvolume]
def limiter_changed_cb(self, settings, key):
if self.rglimiter is not None:
limiter = settings['limiter']
print("limiter setting is now %s" % str(limiter))
self.rglimiter.props.enabled = limiter
def setup_xfade_mode(self):
print("using per-stream filter for rgvolume")
self.stream_filter_id = self.player.connect("get-stream-filters", self.create_stream_filter_cb)
# and add rglimiter as an output filter
self.rglimiter = Gst.ElementFactory.make("rglimiter", None)
self.player.add_filter(self.rglimiter)
def deactivate_xfade_mode(self):
self.player.disconnect(self.stream_filter_id)
self.stream_filter_id = None
self.player.remove_filter(self.rglimiter)
self.rglimiter = None
| gpl-2.0 |
40223247/2015cd_midterm2 | static/Brython3.1.1-20150328-091302/Lib/site-packages/pygame/sprite.py | 603 | 55779 | ## pygame - Python Game Library
## Copyright (C) 2000-2003, 2007 Pete Shinners
## (C) 2004 Joe Wreschnig
## This library is free software; you can redistribute it and/or
## modify it under the terms of the GNU Library General Public
## License as published by the Free Software Foundation; either
## version 2 of the License, or (at your option) any later version.
##
## This library is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## Library General Public License for more details.
##
## You should have received a copy of the GNU Library General Public
## License along with this library; if not, write to the Free
## Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##
## Pete Shinners
## pete@shinners.org
"""pygame module with basic game object classes
This module contains several simple classes to be used within games. There
are the main Sprite class and several Group classes that contain Sprites.
The use of these classes is entirely optional when using Pygame. The classes
are fairly lightweight and only provide a starting place for the code
that is common to most games.
The Sprite class is intended to be used as a base class for the different
types of objects in the game. There is also a base Group class that simply
stores sprites. A game could create new types of Group classes that operate
on specially customized Sprite instances they contain.
The basic Sprite class can draw the Sprites it contains to a Surface. The
Group.draw() method requires that each Sprite have a Surface.image attribute
and a Surface.rect. The Group.clear() method requires these same attributes
and can be used to erase all the Sprites with background. There are also
more advanced Groups: pygame.sprite.RenderUpdates() and
pygame.sprite.OrderedUpdates().
Lastly, this module contains several collision functions. These help find
sprites inside multiple groups that have intersecting bounding rectangles.
To find the collisions, the Sprites are required to have a Surface.rect
attribute assigned.
The groups are designed for high efficiency in removing and adding Sprites
to them. They also allow cheap testing to see if a Sprite already exists in
a Group. A given Sprite can exist in any number of groups. A game could use
some groups to control object rendering, and a completely separate set of
groups to control interaction or player movement. Instead of adding type
attributes or bools to a derived Sprite class, consider keeping the
Sprites inside organized Groups. This will allow for easier lookup later
in the game.
Sprites and Groups manage their relationships with the add() and remove()
methods. These methods can accept a single or multiple group arguments for
membership. The default initializers for these classes also take a
single group or list of groups as argments for initial membership. It is safe
to repeatedly add and remove the same Sprite from a Group.
While it is possible to design sprite and group classes that don't derive
from the Sprite and AbstractGroup classes below, it is strongly recommended
that you extend those when you create a new Sprite or Group class.
Sprites are not thread safe, so lock them yourself if using threads.
"""
##todo
## a group that holds only the 'n' most recent elements.
## sort of like the GroupSingle class, but holding more
## than one sprite
##
## drawing groups that can 'automatically' store the area
## underneath so they can "clear" without needing a background
## function. obviously a little slower than normal, but nice
## to use in many situations. (also remember it must "clear"
## in the reverse order that it draws :])
##
## the drawing groups should also be able to take a background
## function, instead of just a background surface. the function
## would take a surface and a rectangle on that surface to erase.
##
## perhaps more types of collision functions? the current two
## should handle just about every need, but perhaps more optimized
## specific ones that aren't quite so general but fit into common
## specialized cases.
import pygame
from pygame.rect import Rect
from pygame.time import get_ticks
from operator import truth
# Python 3 does not have the callable function, but an equivalent can be made
# with the hasattr function.
#if 'callable' not in dir(__builtins__):
callable = lambda obj: hasattr(obj, '__call__')
# Don't depend on pygame.mask if it's not there...
try:
from pygame.mask import from_surface
except:
pass
class Sprite(object):
"""simple base class for visible game objects
pygame.sprite.Sprite(*groups): return Sprite
The base class for visible game objects. Derived classes will want to
override the Sprite.update() method and assign Sprite.image and Sprite.rect
attributes. The initializer can accept any number of Group instances that
the Sprite will become a member of.
When subclassing the Sprite class, be sure to call the base initializer
before adding the Sprite to Groups.
"""
def __init__(self, *groups):
self.__g = {} # The groups the sprite is in
if groups:
self.add(*groups)
def add(self, *groups):
"""add the sprite to groups
Sprite.add(*groups): return None
Any number of Group instances can be passed as arguments. The
Sprite will be added to the Groups it is not already a member of.
"""
has = self.__g.__contains__
for group in groups:
if hasattr(group, '_spritegroup'):
if not has(group):
group.add_internal(self)
self.add_internal(group)
else:
self.add(*group)
def remove(self, *groups):
"""remove the sprite from groups
Sprite.remove(*groups): return None
Any number of Group instances can be passed as arguments. The Sprite
will be removed from the Groups it is currently a member of.
"""
has = self.__g.__contains__
for group in groups:
if hasattr(group, '_spritegroup'):
if has(group):
group.remove_internal(self)
self.remove_internal(group)
else:
self.remove(*group)
def add_internal(self, group):
self.__g[group] = 0
def remove_internal(self, group):
del self.__g[group]
def update(self, *args):
"""method to control sprite behavior
Sprite.update(*args):
The default implementation of this method does nothing; it's just a
convenient "hook" that you can override. This method is called by
Group.update() with whatever arguments you give it.
There is no need to use this method if not using the convenience
method by the same name in the Group class.
"""
pass
def kill(self):
"""remove the Sprite from all Groups
Sprite.kill(): return None
The Sprite is removed from all the Groups that contain it. This won't
change anything about the state of the Sprite. It is possible to
continue to use the Sprite after this method has been called, including
adding it to Groups.
"""
for c in self.__g:
c.remove_internal(self)
self.__g.clear()
def groups(self):
"""list of Groups that contain this Sprite
Sprite.groups(): return group_list
Returns a list of all the Groups that contain this Sprite.
"""
return list(self.__g)
def alive(self):
"""does the sprite belong to any groups
Sprite.alive(): return bool
Returns True when the Sprite belongs to one or more Groups.
"""
return truth(self.__g)
def __repr__(self):
return "<%s sprite(in %d groups)>" % (self.__class__.__name__, len(self.__g))
class DirtySprite(Sprite):
"""a more featureful subclass of Sprite with more attributes
pygame.sprite.DirtySprite(*groups): return DirtySprite
Extra DirtySprite attributes with their default values:
dirty = 1
If set to 1, it is repainted and then set to 0 again.
If set to 2, it is always dirty (repainted each frame;
flag is not reset).
If set to 0, it is not dirty and therefore not repainted again.
blendmode = 0
It's the special_flags argument of Surface.blit; see the blendmodes in
the Surface.blit documentation
source_rect = None
This is the source rect to use. Remember that it is relative to the top
left corner (0, 0) of self.image.
visible = 1
Normally this is 1. If set to 0, it will not be repainted. (If you
change visible to 1, you must set dirty to 1 for it to be erased from
the screen.)
_layer = 0
A READ ONLY value, it is read when adding it to the LayeredUpdates
group. For details see documentation of sprite.LayeredUpdates.
"""
def __init__(self, *groups):
self.dirty = 1
self.blendmode = 0 # pygame 1.8, referred to as special_flags in
# the documentation of Surface.blit
self._visible = 1
self._layer = 0 # READ ONLY by LayeredUpdates or LayeredDirty
self.source_rect = None
Sprite.__init__(self, *groups)
def _set_visible(self, val):
"""set the visible value (0 or 1) and makes the sprite dirty"""
self._visible = val
if self.dirty < 2:
self.dirty = 1
def _get_visible(self):
"""return the visible value of that sprite"""
return self._visible
visible = property(lambda self: self._get_visible(),
lambda self, value: self._set_visible(value),
doc="you can make this sprite disappear without "
"removing it from the group,\n"
"assign 0 for invisible and 1 for visible")
def __repr__(self):
return "<%s DirtySprite(in %d groups)>" % \
(self.__class__.__name__, len(self.groups()))
class AbstractGroup(object):
"""base class for containers of sprites
AbstractGroup does everything needed to behave as a normal group. You can
easily subclass a new group class from this or the other groups below if
you want to add more features.
Any AbstractGroup-derived sprite groups act like sequences and support
iteration, len, and so on.
"""
# dummy val to identify sprite groups, and avoid infinite recursion
_spritegroup = True
def __init__(self):
self.spritedict = {}
self.lostsprites = []
def sprites(self):
"""get a list of sprites in the group
Group.sprite(): return list
Returns an object that can be looped over with a 'for' loop. (For now,
it is always a list, but this could change in a future version of
pygame.) Alternatively, you can get the same information by iterating
directly over the sprite group, e.g. 'for sprite in group'.
"""
return list(self.spritedict)
def add_internal(self, sprite):
self.spritedict[sprite] = 0
def remove_internal(self, sprite):
r = self.spritedict[sprite]
if r:
self.lostsprites.append(r)
del self.spritedict[sprite]
def has_internal(self, sprite):
return sprite in self.spritedict
def copy(self):
"""copy a group with all the same sprites
Group.copy(): return Group
Returns a copy of the group that is an instance of the same class
and has the same sprites in it.
"""
return self.__class__(self.sprites())
def __iter__(self):
return iter(self.sprites())
def __contains__(self, sprite):
return self.has(sprite)
def add(self, *sprites):
"""add sprite(s) to group
Group.add(sprite, list, group, ...): return None
Adds a sprite or sequence of sprites to a group.
"""
for sprite in sprites:
# It's possible that some sprite is also an iterator.
# If this is the case, we should add the sprite itself,
# and not the iterator object.
if isinstance(sprite, Sprite):
if not self.has_internal(sprite):
self.add_internal(sprite)
sprite.add_internal(self)
else:
try:
# See if sprite is an iterator, like a list or sprite
# group.
self.add(*sprite)
except (TypeError, AttributeError):
# Not iterable. This is probably a sprite that is not an
# instance of the Sprite class or is not an instance of a
# subclass of the Sprite class. Alternately, it could be an
# old-style sprite group.
if hasattr(sprite, '_spritegroup'):
for spr in sprite.sprites():
if not self.has_internal(spr):
self.add_internal(spr)
spr.add_internal(self)
elif not self.has_internal(sprite):
self.add_internal(sprite)
sprite.add_internal(self)
def remove(self, *sprites):
"""remove sprite(s) from group
Group.remove(sprite, list, or group, ...): return None
Removes a sprite or sequence of sprites from a group.
"""
# This function behaves essentially the same as Group.add. It first
# tries to handle each argument as an instance of the Sprite class. If
# that failes, then it tries to handle the argument as an iterable
# object. If that failes, then it tries to handle the argument as an
# old-style sprite group. Lastly, if that fails, it assumes that the
# normal Sprite methods should be used.
for sprite in sprites:
if isinstance(sprite, Sprite):
if self.has_internal(sprite):
self.remove_internal(sprite)
sprite.remove_internal(self)
else:
try:
self.remove(*sprite)
except (TypeError, AttributeError):
if hasattr(sprite, '_spritegroup'):
for spr in sprite.sprites():
if self.has_internal(spr):
self.remove_internal(spr)
spr.remove_internal(self)
elif self.has_internal(sprite):
self.remove_internal(sprite)
sprite.remove_internal(self)
def has(self, *sprites):
"""ask if group has a sprite or sprites
Group.has(sprite or group, ...): return bool
Returns True if the given sprite or sprites are contained in the
group. Alternatively, you can get the same information using the
'in' operator, e.g. 'sprite in group', 'subgroup in group'.
"""
return_value = False
for sprite in sprites:
if isinstance(sprite, Sprite):
# Check for Sprite instance's membership in this group
if self.has_internal(sprite):
return_value = True
else:
return False
else:
try:
if self.has(*sprite):
return_value = True
else:
return False
except (TypeError, AttributeError):
if hasattr(sprite, '_spritegroup'):
for spr in sprite.sprites():
if self.has_internal(spr):
return_value = True
else:
return False
else:
if self.has_internal(sprite):
return_value = True
else:
return False
return return_value
def update(self, *args):
"""call the update method of every member sprite
Group.update(*args): return None
Calls the update method of every member sprite. All arguments that
were passed to this method are passed to the Sprite update function.
"""
for s in self.sprites():
s.update(*args)
def draw(self, surface):
"""draw all sprites onto the surface
Group.draw(surface): return None
Draws all of the member sprites onto the given surface.
"""
#from javascript import console
sprites = self.sprites()
surface_blit = surface.blit
for spr in sprites:
#console.log(spr.image, spr.rect)
#console.log(spr.image._canvas.width, spr.image._canvas.height)
self.spritedict[spr] = surface_blit(spr.image, spr.rect)
self.lostsprites = []
def clear(self, surface, bgd):
"""erase the previous position of all sprites
Group.clear(surface, bgd): return None
Clears the area under every drawn sprite in the group. The bgd
argument should be Surface which is the same dimensions as the
screen surface. The bgd could also be a function which accepts
the given surface and the area to be cleared as arguments.
"""
if callable(bgd):
for r in self.lostsprites:
bgd(surface, r)
for r in self.spritedict.values():
if r:
bgd(surface, r)
else:
surface_blit = surface.blit
for r in self.lostsprites:
surface_blit(bgd, r, r)
for r in self.spritedict.values():
if r:
surface_blit(bgd, r, r)
def empty(self):
"""remove all sprites
Group.empty(): return None
Removes all the sprites from the group.
"""
for s in self.sprites():
self.remove_internal(s)
s.remove_internal(self)
def __nonzero__(self):
return truth(self.sprites())
def __len__(self):
"""return number of sprites in group
Group.len(group): return int
Returns the number of sprites contained in the group.
"""
return len(self.sprites())
def __repr__(self):
return "<%s(%d sprites)>" % (self.__class__.__name__, len(self))
class Group(AbstractGroup):
"""container class for many Sprites
pygame.sprite.Group(*sprites): return Group
A simple container for Sprite objects. This class can be subclassed to
create containers with more specific behaviors. The constructor takes any
number of Sprite arguments to add to the Group. The group supports the
following standard Python operations:
in test if a Sprite is contained
len the number of Sprites contained
bool test if any Sprites are contained
iter iterate through all the Sprites
The Sprites in the Group are not ordered, so the Sprites are drawn and
iterated over in no particular order.
"""
def __init__(self, *sprites):
AbstractGroup.__init__(self)
self.add(*sprites)
RenderPlain = Group
RenderClear = Group
class RenderUpdates(Group):
"""Group class that tracks dirty updates
pygame.sprite.RenderUpdates(*sprites): return RenderUpdates
This class is derived from pygame.sprite.Group(). It has an enhanced draw
method that tracks the changed areas of the screen.
"""
def draw(self, surface):
spritedict = self.spritedict
surface_blit = surface.blit
dirty = self.lostsprites
self.lostsprites = []
dirty_append = dirty.append
for s in self.sprites():
r = spritedict[s]
newrect = surface_blit(s.image, s.rect)
if r:
if newrect.colliderect(r):
dirty_append(newrect.union(r))
else:
dirty_append(newrect)
dirty_append(r)
else:
dirty_append(newrect)
spritedict[s] = newrect
return dirty
class OrderedUpdates(RenderUpdates):
"""RenderUpdates class that draws Sprites in order of addition
pygame.sprite.OrderedUpdates(*spites): return OrderedUpdates
This class derives from pygame.sprite.RenderUpdates(). It maintains
the order in which the Sprites were added to the Group for rendering.
This makes adding and removing Sprites from the Group a little
slower than regular Groups.
"""
def __init__(self, *sprites):
self._spritelist = []
RenderUpdates.__init__(self, *sprites)
def sprites(self):
return list(self._spritelist)
def add_internal(self, sprite):
RenderUpdates.add_internal(self, sprite)
self._spritelist.append(sprite)
def remove_internal(self, sprite):
RenderUpdates.remove_internal(self, sprite)
self._spritelist.remove(sprite)
class LayeredUpdates(AbstractGroup):
"""LayeredUpdates Group handles layers, which are drawn like OrderedUpdates
pygame.sprite.LayeredUpdates(*spites, **kwargs): return LayeredUpdates
This group is fully compatible with pygame.sprite.Sprite.
New in pygame 1.8.0
"""
_init_rect = Rect(0, 0, 0, 0)
def __init__(self, *sprites, **kwargs):
"""initialize an instance of LayeredUpdates with the given attributes
You can set the default layer through kwargs using 'default_layer'
and an integer for the layer. The default layer is 0.
If the sprite you add has an attribute _layer, then that layer will be
used. If **kwarg contains 'layer', then the passed sprites will be
added to that layer (overriding the sprite._layer attribute). If
neither the sprite nor **kwarg has a 'layer', then the default layer is
used to add the sprites.
"""
self._spritelayers = {}
self._spritelist = []
AbstractGroup.__init__(self)
self._default_layer = kwargs.get('default_layer', 0)
self.add(*sprites, **kwargs)
def add_internal(self, sprite, layer=None):
"""Do not use this method directly.
It is used by the group to add a sprite internally.
"""
self.spritedict[sprite] = self._init_rect
if layer is None:
try:
layer = sprite._layer
except AttributeError:
layer = sprite._layer = self._default_layer
elif hasattr(sprite, '_layer'):
sprite._layer = layer
sprites = self._spritelist # speedup
sprites_layers = self._spritelayers
sprites_layers[sprite] = layer
# add the sprite at the right position
# bisect algorithmus
leng = len(sprites)
low = mid = 0
high = leng - 1
while low <= high:
mid = low + (high - low) // 2
if sprites_layers[sprites[mid]] <= layer:
low = mid + 1
else:
high = mid - 1
# linear search to find final position
while mid < leng and sprites_layers[sprites[mid]] <= layer:
mid += 1
sprites.insert(mid, sprite)
def add(self, *sprites, **kwargs):
"""add a sprite or sequence of sprites to a group
LayeredUpdates.add(*sprites, **kwargs): return None
If the sprite you add has an attribute _layer, then that layer will be
used. If **kwarg contains 'layer', then the passed sprites will be
added to that layer (overriding the sprite._layer attribute). If
neither the sprite nor **kwarg has a 'layer', then the default layer is
used to add the sprites.
"""
if not sprites:
return
if 'layer' in kwargs:
layer = kwargs['layer']
else:
layer = None
for sprite in sprites:
# It's possible that some sprite is also an iterator.
# If this is the case, we should add the sprite itself,
# and not the iterator object.
if isinstance(sprite, Sprite):
if not self.has_internal(sprite):
self.add_internal(sprite, layer)
sprite.add_internal(self)
else:
try:
# See if sprite is an iterator, like a list or sprite
# group.
self.add(*sprite, **kwargs)
except (TypeError, AttributeError):
# Not iterable. This is probably a sprite that is not an
# instance of the Sprite class or is not an instance of a
# subclass of the Sprite class. Alternately, it could be an
# old-style sprite group.
if hasattr(sprite, '_spritegroup'):
for spr in sprite.sprites():
if not self.has_internal(spr):
self.add_internal(spr, layer)
spr.add_internal(self)
elif not self.has_internal(sprite):
self.add_internal(sprite, layer)
sprite.add_internal(self)
def remove_internal(self, sprite):
"""Do not use this method directly.
The group uses it to add a sprite.
"""
self._spritelist.remove(sprite)
# these dirty rects are suboptimal for one frame
r = self.spritedict[sprite]
if r is not self._init_rect:
self.lostsprites.append(r) # dirty rect
if hasattr(sprite, 'rect'):
self.lostsprites.append(sprite.rect) # dirty rect
del self.spritedict[sprite]
del self._spritelayers[sprite]
def sprites(self):
"""return a ordered list of sprites (first back, last top).
LayeredUpdates.sprites(): return sprites
"""
return list(self._spritelist)
def draw(self, surface):
"""draw all sprites in the right order onto the passed surface
LayeredUpdates.draw(surface): return Rect_list
"""
spritedict = self.spritedict
surface_blit = surface.blit
dirty = self.lostsprites
self.lostsprites = []
dirty_append = dirty.append
init_rect = self._init_rect
for spr in self.sprites():
rec = spritedict[spr]
newrect = surface_blit(spr.image, spr.rect)
if rec is init_rect:
dirty_append(newrect)
else:
if newrect.colliderect(rec):
dirty_append(newrect.union(rec))
else:
dirty_append(newrect)
dirty_append(rec)
spritedict[spr] = newrect
return dirty
def get_sprites_at(self, pos):
"""return a list with all sprites at that position
LayeredUpdates.get_sprites_at(pos): return colliding_sprites
Bottom sprites are listed first; the top ones are listed last.
"""
_sprites = self._spritelist
rect = Rect(pos, (0, 0))
colliding_idx = rect.collidelistall(_sprites)
colliding = [_sprites[i] for i in colliding_idx]
return colliding
def get_sprite(self, idx):
"""return the sprite at the index idx from the groups sprites
LayeredUpdates.get_sprite(idx): return sprite
Raises IndexOutOfBounds if the idx is not within range.
"""
return self._spritelist[idx]
def remove_sprites_of_layer(self, layer_nr):
"""remove all sprites from a layer and return them as a list
LayeredUpdates.remove_sprites_of_layer(layer_nr): return sprites
"""
sprites = self.get_sprites_from_layer(layer_nr)
self.remove(*sprites)
return sprites
#---# layer methods
def layers(self):
"""return a list of unique defined layers defined.
LayeredUpdates.layers(): return layers
"""
return sorted(set(self._spritelayers.values()))
def change_layer(self, sprite, new_layer):
"""change the layer of the sprite
LayeredUpdates.change_layer(sprite, new_layer): return None
The sprite must have been added to the renderer already. This is not
checked.
"""
sprites = self._spritelist # speedup
sprites_layers = self._spritelayers # speedup
sprites.remove(sprite)
sprites_layers.pop(sprite)
# add the sprite at the right position
# bisect algorithmus
leng = len(sprites)
low = mid = 0
high = leng - 1
while low <= high:
mid = low + (high - low) // 2
if sprites_layers[sprites[mid]] <= new_layer:
low = mid + 1
else:
high = mid - 1
# linear search to find final position
while mid < leng and sprites_layers[sprites[mid]] <= new_layer:
mid += 1
sprites.insert(mid, sprite)
if hasattr(sprite, 'layer'):
sprite.layer = new_layer
# add layer info
sprites_layers[sprite] = new_layer
def get_layer_of_sprite(self, sprite):
"""return the layer that sprite is currently in
If the sprite is not found, then it will return the default layer.
"""
return self._spritelayers.get(sprite, self._default_layer)
def get_top_layer(self):
"""return the top layer
LayeredUpdates.get_top_layer(): return layer
"""
return self._spritelayers[self._spritelist[-1]]
def get_bottom_layer(self):
"""return the bottom layer
LayeredUpdates.get_bottom_layer(): return layer
"""
return self._spritelayers[self._spritelist[0]]
def move_to_front(self, sprite):
"""bring the sprite to front layer
LayeredUpdates.move_to_front(sprite): return None
Brings the sprite to front by changing the sprite layer to the top-most
layer. The sprite is added at the end of the list of sprites in that
top-most layer.
"""
self.change_layer(sprite, self.get_top_layer())
def move_to_back(self, sprite):
"""move the sprite to the bottom layer
LayeredUpdates.move_to_back(sprite): return None
Moves the sprite to the bottom layer by moving it to a new layer below
the current bottom layer.
"""
self.change_layer(sprite, self.get_bottom_layer() - 1)
def get_top_sprite(self):
"""return the topmost sprite
LayeredUpdates.get_top_sprite(): return Sprite
"""
return self._spritelist[-1]
def get_sprites_from_layer(self, layer):
"""return all sprites from a layer ordered as they where added
LayeredUpdates.get_sprites_from_layer(layer): return sprites
Returns all sprites from a layer. The sprites are ordered in the
sequence that they where added. (The sprites are not removed from the
layer.
"""
sprites = []
sprites_append = sprites.append
sprite_layers = self._spritelayers
for spr in self._spritelist:
if sprite_layers[spr] == layer:
sprites_append(spr)
elif sprite_layers[spr] > layer:# break after because no other will
# follow with same layer
break
return sprites
def switch_layer(self, layer1_nr, layer2_nr):
"""switch the sprites from layer1_nr to layer2_nr
LayeredUpdates.switch_layer(layer1_nr, layer2_nr): return None
The layers number must exist. This method does not check for the
existence of the given layers.
"""
sprites1 = self.remove_sprites_of_layer(layer1_nr)
for spr in self.get_sprites_from_layer(layer2_nr):
self.change_layer(spr, layer1_nr)
self.add(layer=layer2_nr, *sprites1)
class LayeredDirty(LayeredUpdates):
"""LayeredDirty Group is for DirtySprites; subclasses LayeredUpdates
pygame.sprite.LayeredDirty(*spites, **kwargs): return LayeredDirty
This group requires pygame.sprite.DirtySprite or any sprite that
has the following attributes:
image, rect, dirty, visible, blendmode (see doc of DirtySprite).
It uses the dirty flag technique and is therefore faster than
pygame.sprite.RenderUpdates if you have many static sprites. It
also switches automatically between dirty rect updating and full
screen drawing, so you do no have to worry which would be faster.
As with the pygame.sprite.Group, you can specify some additional attributes
through kwargs:
_use_update: True/False (default is False)
_default_layer: default layer where the sprites without a layer are
added
_time_threshold: treshold time for switching between dirty rect mode
and fullscreen mode; defaults to updating at 80 frames per second,
which is equal to 1000.0 / 80.0
New in pygame 1.8.0
"""
def __init__(self, *sprites, **kwargs):
"""initialize group.
pygame.sprite.LayeredDirty(*spites, **kwargs): return LayeredDirty
You can specify some additional attributes through kwargs:
_use_update: True/False (default is False)
_default_layer: default layer where the sprites without a layer are
added
_time_threshold: treshold time for switching between dirty rect
mode and fullscreen mode; defaults to updating at 80 frames per
second, which is equal to 1000.0 / 80.0
"""
LayeredUpdates.__init__(self, *sprites, **kwargs)
self._clip = None
self._use_update = False
self._time_threshold = 1000.0 / 80.0 # 1000.0 / fps
self._bgd = None
for key, val in kwargs.items():
if key in ['_use_update', '_time_threshold', '_default_layer']:
if hasattr(self, key):
setattr(self, key, val)
def add_internal(self, sprite, layer=None):
"""Do not use this method directly.
It is used by the group to add a sprite internally.
"""
# check if all needed attributes are set
if not hasattr(sprite, 'dirty'):
raise AttributeError()
if not hasattr(sprite, 'visible'):
raise AttributeError()
if not hasattr(sprite, 'blendmode'):
raise AttributeError()
if not isinstance(sprite, DirtySprite):
raise TypeError()
if sprite.dirty == 0: # set it dirty if it is not
sprite.dirty = 1
LayeredUpdates.add_internal(self, sprite, layer)
def draw(self, surface, bgd=None):
"""draw all sprites in the right order onto the given surface
LayeredDirty.draw(surface, bgd=None): return Rect_list
You can pass the background too. If a self.bgd is already set to some
value that is not None, then the bgd argument has no effect.
"""
# speedups
_orig_clip = surface.get_clip()
_clip = self._clip
if _clip is None:
_clip = _orig_clip
_surf = surface
_sprites = self._spritelist
_old_rect = self.spritedict
_update = self.lostsprites
_update_append = _update.append
_ret = None
_surf_blit = _surf.blit
_rect = Rect
if bgd is not None:
self._bgd = bgd
_bgd = self._bgd
init_rect = self._init_rect
_surf.set_clip(_clip)
# -------
# 0. decide whether to render with update or flip
start_time = get_ticks()
if self._use_update: # dirty rects mode
# 1. find dirty area on screen and put the rects into _update
# still not happy with that part
for spr in _sprites:
if 0 < spr.dirty:
# chose the right rect
if spr.source_rect:
_union_rect = _rect(spr.rect.topleft,
spr.source_rect.size)
else:
_union_rect = _rect(spr.rect)
_union_rect_collidelist = _union_rect.collidelist
_union_rect_union_ip = _union_rect.union_ip
i = _union_rect_collidelist(_update)
while -1 < i:
_union_rect_union_ip(_update[i])
del _update[i]
i = _union_rect_collidelist(_update)
_update_append(_union_rect.clip(_clip))
if _old_rect[spr] is not init_rect:
_union_rect = _rect(_old_rect[spr])
_union_rect_collidelist = _union_rect.collidelist
_union_rect_union_ip = _union_rect.union_ip
i = _union_rect_collidelist(_update)
while -1 < i:
_union_rect_union_ip(_update[i])
del _update[i]
i = _union_rect_collidelist(_update)
_update_append(_union_rect.clip(_clip))
# can it be done better? because that is an O(n**2) algorithm in
# worst case
# clear using background
if _bgd is not None:
for rec in _update:
_surf_blit(_bgd, rec, rec)
# 2. draw
for spr in _sprites:
if 1 > spr.dirty:
if spr._visible:
# sprite not dirty; blit only the intersecting part
_spr_rect = spr.rect
if spr.source_rect is not None:
_spr_rect = Rect(spr.rect.topleft,
spr.source_rect.size)
_spr_rect_clip = _spr_rect.clip
for idx in _spr_rect.collidelistall(_update):
# clip
clip = _spr_rect_clip(_update[idx])
_surf_blit(spr.image,
clip,
(clip[0] - _spr_rect[0],
clip[1] - _spr_rect[1],
clip[2],
clip[3]),
spr.blendmode)
else: # dirty sprite
if spr._visible:
_old_rect[spr] = _surf_blit(spr.image,
spr.rect,
spr.source_rect,
spr.blendmode)
if spr.dirty == 1:
spr.dirty = 0
_ret = list(_update)
else: # flip, full screen mode
if _bgd is not None:
_surf_blit(_bgd, (0, 0))
for spr in _sprites:
if spr._visible:
_old_rect[spr] = _surf_blit(spr.image,
spr.rect,
spr.source_rect,
spr.blendmode)
_ret = [_rect(_clip)] # return only the part of the screen changed
# timing for switching modes
# How may a good threshold be found? It depends on the hardware.
end_time = get_ticks()
if end_time-start_time > self._time_threshold:
self._use_update = False
else:
self._use_update = True
## # debug
## print " check: using dirty rects:", self._use_update
# emtpy dirty rects list
_update[:] = []
# -------
# restore original clip
_surf.set_clip(_orig_clip)
return _ret
def clear(self, surface, bgd):
"""use to set background
Group.clear(surface, bgd): return None
"""
self._bgd = bgd
def repaint_rect(self, screen_rect):
"""repaint the given area
LayeredDirty.repaint_rect(screen_rect): return None
screen_rect is in screen coordinates.
"""
if self._clip:
self.lostsprites.append(screen_rect.clip(self._clip))
else:
self.lostsprites.append(Rect(screen_rect))
def set_clip(self, screen_rect=None):
"""clip the area where to draw; pass None (default) to reset the clip
LayeredDirty.set_clip(screen_rect=None): return None
"""
if screen_rect is None:
self._clip = pygame.display.get_surface().get_rect()
else:
self._clip = screen_rect
self._use_update = False
def get_clip(self):
"""get the area where drawing will occur
LayeredDirty.get_clip(): return Rect
"""
return self._clip
def change_layer(self, sprite, new_layer):
"""change the layer of the sprite
LayeredUpdates.change_layer(sprite, new_layer): return None
The sprite must have been added to the renderer already. This is not
checked.
"""
LayeredUpdates.change_layer(self, sprite, new_layer)
if sprite.dirty == 0:
sprite.dirty = 1
def set_timing_treshold(self, time_ms):
"""set the treshold in milliseconds
set_timing_treshold(time_ms): return None
Defaults to 1000.0 / 80.0. This means that the screen will be painted
using the flip method rather than the update method if the update
method is taking so long to update the screen that the frame rate falls
below 80 frames per second.
"""
self._time_threshold = time_ms
class GroupSingle(AbstractGroup):
"""A group container that holds a single most recent item.
This class works just like a regular group, but it only keeps a single
sprite in the group. Whatever sprite has been added to the group last will
be the only sprite in the group.
You can access its one sprite as the .sprite attribute. Assigning to this
attribute will properly remove the old sprite and then add the new one.
"""
def __init__(self, sprite=None):
AbstractGroup.__init__(self)
self.__sprite = None
if sprite is not None:
self.add(sprite)
def copy(self):
return GroupSingle(self.__sprite)
def sprites(self):
if self.__sprite is not None:
return [self.__sprite]
else:
return []
def add_internal(self, sprite):
if self.__sprite is not None:
self.__sprite.remove_internal(self)
self.remove_internal(self.__sprite)
self.__sprite = sprite
def __nonzero__(self):
return self.__sprite is not None
def _get_sprite(self):
return self.__sprite
def _set_sprite(self, sprite):
self.add_internal(sprite)
sprite.add_internal(self)
return sprite
sprite = property(_get_sprite,
_set_sprite,
None,
"The sprite contained in this group")
def remove_internal(self, sprite):
if sprite is self.__sprite:
self.__sprite = None
if sprite in self.spritedict:
AbstractGroup.remove_internal(self, sprite)
def has_internal(self, sprite):
return self.__sprite is sprite
# Optimizations...
def __contains__(self, sprite):
return self.__sprite is sprite
# Some different collision detection functions that could be used.
def collide_rect(left, right):
"""collision detection between two sprites, using rects.
pygame.sprite.collide_rect(left, right): return bool
Tests for collision between two sprites. Uses the pygame.Rect colliderect
function to calculate the collision. It is intended to be passed as a
collided callback function to the *collide functions. Sprites must have
"rect" attributes.
New in pygame 1.8.0
"""
return left.rect.colliderect(right.rect)
class collide_rect_ratio:
"""A callable class that checks for collisions using scaled rects
The class checks for collisions between two sprites using a scaled version
of the sprites' rects. Is created with a ratio; the instance is then
intended to be passed as a collided callback function to the *collide
functions.
New in pygame 1.8.1
"""
def __init__(self, ratio):
"""create a new collide_rect_ratio callable
Ratio is expected to be a floating point value used to scale
the underlying sprite rect before checking for collisions.
"""
self.ratio = ratio
def __call__(self, left, right):
"""detect collision between two sprites using scaled rects
pygame.sprite.collide_rect_ratio(ratio)(left, right): return bool
Tests for collision between two sprites. Uses the pygame.Rect
colliderect function to calculate the collision after scaling the rects
by the stored ratio. Sprites must have "rect" attributes.
"""
ratio = self.ratio
leftrect = left.rect
width = leftrect.width
height = leftrect.height
leftrect = leftrect.inflate(width * ratio - width,
height * ratio - height)
rightrect = right.rect
width = rightrect.width
height = rightrect.height
rightrect = rightrect.inflate(width * ratio - width,
height * ratio - height)
return leftrect.colliderect(rightrect)
def collide_circle(left, right):
"""detect collision between two sprites using circles
pygame.sprite.collide_circle(left, right): return bool
Tests for collision between two sprites by testing whether two circles
centered on the sprites overlap. If the sprites have a "radius" attribute,
then that radius is used to create the circle; otherwise, a circle is
created that is big enough to completely enclose the sprite's rect as
given by the "rect" attribute. This function is intended to be passed as
a collided callback function to the *collide functions. Sprites must have a
"rect" and an optional "radius" attribute.
New in pygame 1.8.0
"""
xdistance = left.rect.centerx - right.rect.centerx
ydistance = left.rect.centery - right.rect.centery
distancesquared = xdistance ** 2 + ydistance ** 2
if hasattr(left, 'radius'):
leftradius = left.radius
else:
leftrect = left.rect
# approximating the radius of a square by using half of the diagonal,
# might give false positives (especially if its a long small rect)
leftradius = 0.5 * ((leftrect.width ** 2 + leftrect.height ** 2) ** 0.5)
# store the radius on the sprite for next time
setattr(left, 'radius', leftradius)
if hasattr(right, 'radius'):
rightradius = right.radius
else:
rightrect = right.rect
# approximating the radius of a square by using half of the diagonal
# might give false positives (especially if its a long small rect)
rightradius = 0.5 * ((rightrect.width ** 2 + rightrect.height ** 2) ** 0.5)
# store the radius on the sprite for next time
setattr(right, 'radius', rightradius)
return distancesquared <= (leftradius + rightradius) ** 2
class collide_circle_ratio(object):
"""detect collision between two sprites using scaled circles
This callable class checks for collisions between two sprites using a
scaled version of a sprite's radius. It is created with a ratio as the
argument to the constructor. The instance is then intended to be passed as
a collided callback function to the *collide functions.
New in pygame 1.8.1
"""
def __init__(self, ratio):
"""creates a new collide_circle_ratio callable instance
The given ratio is expected to be a floating point value used to scale
the underlying sprite radius before checking for collisions.
When the ratio is ratio=1.0, then it behaves exactly like the
collide_circle method.
"""
self.ratio = ratio
def __call__(self, left, right):
"""detect collision between two sprites using scaled circles
pygame.sprite.collide_circle_radio(ratio)(left, right): return bool
Tests for collision between two sprites by testing whether two circles
centered on the sprites overlap after scaling the circle's radius by
the stored ratio. If the sprites have a "radius" attribute, that is
used to create the circle; otherwise, a circle is created that is big
enough to completely enclose the sprite's rect as given by the "rect"
attribute. Intended to be passed as a collided callback function to the
*collide functions. Sprites must have a "rect" and an optional "radius"
attribute.
"""
ratio = self.ratio
xdistance = left.rect.centerx - right.rect.centerx
ydistance = left.rect.centery - right.rect.centery
distancesquared = xdistance ** 2 + ydistance ** 2
if hasattr(left, "radius"):
leftradius = left.radius * ratio
else:
leftrect = left.rect
leftradius = ratio * 0.5 * ((leftrect.width ** 2 + leftrect.height ** 2) ** 0.5)
# store the radius on the sprite for next time
setattr(left, 'radius', leftradius)
if hasattr(right, "radius"):
rightradius = right.radius * ratio
else:
rightrect = right.rect
rightradius = ratio * 0.5 * ((rightrect.width ** 2 + rightrect.height ** 2) ** 0.5)
# store the radius on the sprite for next time
setattr(right, 'radius', rightradius)
return distancesquared <= (leftradius + rightradius) ** 2
def collide_mask(left, right):
"""collision detection between two sprites, using masks.
pygame.sprite.collide_mask(SpriteLeft, SpriteRight): bool
Tests for collision between two sprites by testing if their bitmasks
overlap. If the sprites have a "mask" attribute, that is used as the mask;
otherwise, a mask is created from the sprite image. Intended to be passed
as a collided callback function to the *collide functions. Sprites must
have a "rect" and an optional "mask" attribute.
New in pygame 1.8.0
"""
xoffset = right.rect[0] - left.rect[0]
yoffset = right.rect[1] - left.rect[1]
try:
leftmask = left.mask
except AttributeError:
leftmask = from_surface(left.image)
try:
rightmask = right.mask
except AttributeError:
rightmask = from_surface(right.image)
return leftmask.overlap(rightmask, (xoffset, yoffset))
def spritecollide(sprite, group, dokill, collided=None):
"""find Sprites in a Group that intersect another Sprite
pygame.sprite.spritecollide(sprite, group, dokill, collided=None):
return Sprite_list
Return a list containing all Sprites in a Group that intersect with another
Sprite. Intersection is determined by comparing the Sprite.rect attribute
of each Sprite.
The dokill argument is a bool. If set to True, all Sprites that collide
will be removed from the Group.
The collided argument is a callback function used to calculate if two
sprites are colliding. it should take two sprites as values, and return a
bool value indicating if they are colliding. If collided is not passed, all
sprites must have a "rect" value, which is a rectangle of the sprite area,
which will be used to calculate the collision.
"""
if dokill:
crashed = []
append = crashed.append
if collided:
for s in group.sprites():
if collided(sprite, s):
s.kill()
append(s)
else:
spritecollide = sprite.rect.colliderect
for s in group.sprites():
if spritecollide(s.rect):
s.kill()
append(s)
return crashed
elif collided:
return [s for s in group if collided(sprite, s)]
else:
spritecollide = sprite.rect.colliderect
return [s for s in group if spritecollide(s.rect)]
def groupcollide(groupa, groupb, dokilla, dokillb, collided=None):
"""detect collision between a group and another group
pygame.sprite.groupcollide(groupa, groupb, dokilla, dokillb):
return dict
Given two groups, this will find the intersections between all sprites in
each group. It returns a dictionary of all sprites in the first group that
collide. The value for each item in the dictionary is a list of the sprites
in the second group it collides with. The two dokill arguments control if
the sprites from either group will be automatically removed from all
groups. Collided is a callback function used to calculate if two sprites
are colliding. it should take two sprites as values, and return a bool
value indicating if they are colliding. If collided is not passed, all
sprites must have a "rect" value, which is a rectangle of the sprite area
that will be used to calculate the collision.
"""
crashed = {}
SC = spritecollide
if dokilla:
for s in groupa.sprites():
c = SC(s, groupb, dokillb, collided)
if c:
crashed[s] = c
s.kill()
else:
for s in groupa:
c = SC(s, groupb, dokillb, collided)
if c:
crashed[s] = c
return crashed
def spritecollideany(sprite, group, collided=None):
"""finds any sprites in a group that collide with the given sprite
pygame.sprite.spritecollideany(sprite, group): return sprite
Given a sprite and a group of sprites, this will return return any single
sprite that collides with with the given sprite. If there are no
collisions, then this returns None.
If you don't need all the features of the spritecollide function, this
function will be a bit quicker.
Collided is a callback function used to calculate if two sprites are
colliding. It should take two sprites as values and return a bool value
indicating if they are colliding. If collided is not passed, then all
sprites must have a "rect" value, which is a rectangle of the sprite area,
which will be used to calculate the collision.
"""
if collided:
for s in group:
if collided(sprite, s):
return s
else:
# Special case old behaviour for speed.
spritecollide = sprite.rect.colliderect
for s in group:
if spritecollide(s.rect):
return s
return None
| gpl-3.0 |
j-carl/ansible | test/integration/targets/connection_delegation/connection_plugins/delegation_connection.py | 32 | 1267 | from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
author: Ansible Core Team
connection: delegation_connection
short_description: Test connection for delegated host check
description:
- Some further description that you don't care about.
options:
remote_password:
description: The remote password
type: str
vars:
- name: ansible_password
# Tests that an aliased key gets the -k option which hardcodes the value to password
aliases:
- password
"""
from ansible.plugins.connection import ConnectionBase
class Connection(ConnectionBase):
transport = 'delegation_connection'
has_pipelining = True
def __init__(self, *args, **kwargs):
super(Connection, self).__init__(*args, **kwargs)
def _connect(self):
super(Connection, self)._connect()
def exec_command(self, cmd, in_data=None, sudoable=True):
super(Connection, self).exec_command(cmd, in_data, sudoable)
def put_file(self, in_path, out_path):
super(Connection, self).put_file(in_path, out_path)
def fetch_file(self, in_path, out_path):
super(Connection, self).fetch_file(in_path, out_path)
def close(self):
super(Connection, self).close()
| gpl-3.0 |
ChristinaZografou/sympy | sympy/simplify/tests/test_trigsimp.py | 22 | 15557 | from sympy import (
symbols, sin, simplify, cos, trigsimp, rad, tan, exptrigsimp,sinh,
cosh, diff, cot, Subs, exp, tanh, exp, S, integrate, I,Matrix,
Symbol, coth, pi, log, count_ops, sqrt, E, expand, Piecewise)
from sympy.utilities.pytest import XFAIL
from sympy.abc import x, y, z, t, a, b, c, d, e, f, g, h, i, k
def test_trigsimp1():
x, y = symbols('x,y')
assert trigsimp(1 - sin(x)**2) == cos(x)**2
assert trigsimp(1 - cos(x)**2) == sin(x)**2
assert trigsimp(sin(x)**2 + cos(x)**2) == 1
assert trigsimp(1 + tan(x)**2) == 1/cos(x)**2
assert trigsimp(1/cos(x)**2 - 1) == tan(x)**2
assert trigsimp(1/cos(x)**2 - tan(x)**2) == 1
assert trigsimp(1 + cot(x)**2) == 1/sin(x)**2
assert trigsimp(1/sin(x)**2 - 1) == 1/tan(x)**2
assert trigsimp(1/sin(x)**2 - cot(x)**2) == 1
assert trigsimp(5*cos(x)**2 + 5*sin(x)**2) == 5
assert trigsimp(5*cos(x/2)**2 + 2*sin(x/2)**2) == 3*cos(x)/2 + S(7)/2
assert trigsimp(sin(x)/cos(x)) == tan(x)
assert trigsimp(2*tan(x)*cos(x)) == 2*sin(x)
assert trigsimp(cot(x)**3*sin(x)**3) == cos(x)**3
assert trigsimp(y*tan(x)**2/sin(x)**2) == y/cos(x)**2
assert trigsimp(cot(x)/cos(x)) == 1/sin(x)
assert trigsimp(sin(x + y) + sin(x - y)) == 2*sin(x)*cos(y)
assert trigsimp(sin(x + y) - sin(x - y)) == 2*sin(y)*cos(x)
assert trigsimp(cos(x + y) + cos(x - y)) == 2*cos(x)*cos(y)
assert trigsimp(cos(x + y) - cos(x - y)) == -2*sin(x)*sin(y)
assert trigsimp(tan(x + y) - tan(x)/(1 - tan(x)*tan(y))) == \
sin(y)/(-sin(y)*tan(x) + cos(y)) # -tan(y)/(tan(x)*tan(y) - 1)
assert trigsimp(sinh(x + y) + sinh(x - y)) == 2*sinh(x)*cosh(y)
assert trigsimp(sinh(x + y) - sinh(x - y)) == 2*sinh(y)*cosh(x)
assert trigsimp(cosh(x + y) + cosh(x - y)) == 2*cosh(x)*cosh(y)
assert trigsimp(cosh(x + y) - cosh(x - y)) == 2*sinh(x)*sinh(y)
assert trigsimp(tanh(x + y) - tanh(x)/(1 + tanh(x)*tanh(y))) == \
sinh(y)/(sinh(y)*tanh(x) + cosh(y))
assert trigsimp(cos(0.12345)**2 + sin(0.12345)**2) == 1
e = 2*sin(x)**2 + 2*cos(x)**2
assert trigsimp(log(e)) == log(2)
def test_trigsimp1a():
assert trigsimp(sin(2)**2*cos(3)*exp(2)/cos(2)**2) == tan(2)**2*cos(3)*exp(2)
assert trigsimp(tan(2)**2*cos(3)*exp(2)*cos(2)**2) == sin(2)**2*cos(3)*exp(2)
assert trigsimp(cot(2)*cos(3)*exp(2)*sin(2)) == cos(3)*exp(2)*cos(2)
assert trigsimp(tan(2)*cos(3)*exp(2)/sin(2)) == cos(3)*exp(2)/cos(2)
assert trigsimp(cot(2)*cos(3)*exp(2)/cos(2)) == cos(3)*exp(2)/sin(2)
assert trigsimp(cot(2)*cos(3)*exp(2)*tan(2)) == cos(3)*exp(2)
assert trigsimp(sinh(2)*cos(3)*exp(2)/cosh(2)) == tanh(2)*cos(3)*exp(2)
assert trigsimp(tanh(2)*cos(3)*exp(2)*cosh(2)) == sinh(2)*cos(3)*exp(2)
assert trigsimp(coth(2)*cos(3)*exp(2)*sinh(2)) == cosh(2)*cos(3)*exp(2)
assert trigsimp(tanh(2)*cos(3)*exp(2)/sinh(2)) == cos(3)*exp(2)/cosh(2)
assert trigsimp(coth(2)*cos(3)*exp(2)/cosh(2)) == cos(3)*exp(2)/sinh(2)
assert trigsimp(coth(2)*cos(3)*exp(2)*tanh(2)) == cos(3)*exp(2)
def test_trigsimp2():
x, y = symbols('x,y')
assert trigsimp(cos(x)**2*sin(y)**2 + cos(x)**2*cos(y)**2 + sin(x)**2,
recursive=True) == 1
assert trigsimp(sin(x)**2*sin(y)**2 + sin(x)**2*cos(y)**2 + cos(x)**2,
recursive=True) == 1
assert trigsimp(
Subs(x, x, sin(y)**2 + cos(y)**2)) == Subs(x, x, 1)
def test_issue_4373():
x = Symbol("x")
assert abs(trigsimp(2.0*sin(x)**2 + 2.0*cos(x)**2) - 2.0) < 1e-10
def test_trigsimp3():
x, y = symbols('x,y')
assert trigsimp(sin(x)/cos(x)) == tan(x)
assert trigsimp(sin(x)**2/cos(x)**2) == tan(x)**2
assert trigsimp(sin(x)**3/cos(x)**3) == tan(x)**3
assert trigsimp(sin(x)**10/cos(x)**10) == tan(x)**10
assert trigsimp(cos(x)/sin(x)) == 1/tan(x)
assert trigsimp(cos(x)**2/sin(x)**2) == 1/tan(x)**2
assert trigsimp(cos(x)**10/sin(x)**10) == 1/tan(x)**10
assert trigsimp(tan(x)) == trigsimp(sin(x)/cos(x))
def test_issue_4661():
a, x, y = symbols('a x y')
eq = -4*sin(x)**4 + 4*cos(x)**4 - 8*cos(x)**2
assert trigsimp(eq) == -4
n = sin(x)**6 + 4*sin(x)**4*cos(x)**2 + 5*sin(x)**2*cos(x)**4 + 2*cos(x)**6
d = -sin(x)**2 - 2*cos(x)**2
assert simplify(n/d) == -1
assert trigsimp(-2*cos(x)**2 + cos(x)**4 - sin(x)**4) == -1
eq = (- sin(x)**3/4)*cos(x) + (cos(x)**3/4)*sin(x) - sin(2*x)*cos(2*x)/8
assert trigsimp(eq) == 0
def test_issue_4494():
a, b = symbols('a b')
eq = sin(a)**2*sin(b)**2 + cos(a)**2*cos(b)**2*tan(a)**2 + cos(a)**2
assert trigsimp(eq) == 1
def test_issue_5948():
a, x, y = symbols('a x y')
assert trigsimp(diff(integrate(cos(x)/sin(x)**7, x), x)) == \
cos(x)/sin(x)**7
def test_issue_4775():
a, x, y = symbols('a x y')
assert trigsimp(sin(x)*cos(y)+cos(x)*sin(y)) == sin(x + y)
assert trigsimp(sin(x)*cos(y)+cos(x)*sin(y)+3) == sin(x + y) + 3
def test_issue_4280():
a, x, y = symbols('a x y')
assert trigsimp(cos(x)**2 + cos(y)**2*sin(x)**2 + sin(y)**2*sin(x)**2) == 1
assert trigsimp(a**2*sin(x)**2 + a**2*cos(y)**2*cos(x)**2 + a**2*cos(x)**2*sin(y)**2) == a**2
assert trigsimp(a**2*cos(y)**2*sin(x)**2 + a**2*sin(y)**2*sin(x)**2) == a**2*sin(x)**2
def test_issue_3210():
eqs = (sin(2)*cos(3) + sin(3)*cos(2),
-sin(2)*sin(3) + cos(2)*cos(3),
sin(2)*cos(3) - sin(3)*cos(2),
sin(2)*sin(3) + cos(2)*cos(3),
sin(2)*sin(3) + cos(2)*cos(3) + cos(2),
sinh(2)*cosh(3) + sinh(3)*cosh(2),
sinh(2)*sinh(3) + cosh(2)*cosh(3),
)
assert [trigsimp(e) for e in eqs] == [
sin(5),
cos(5),
-sin(1),
cos(1),
cos(1) + cos(2),
sinh(5),
cosh(5),
]
def test_trigsimp_issues():
a, x, y = symbols('a x y')
# issue 4625 - factor_terms works, too
assert trigsimp(sin(x)**3 + cos(x)**2*sin(x)) == sin(x)
# issue 5948
assert trigsimp(diff(integrate(cos(x)/sin(x)**3, x), x)) == \
cos(x)/sin(x)**3
assert trigsimp(diff(integrate(sin(x)/cos(x)**3, x), x)) == \
sin(x)/cos(x)**3
# check integer exponents
e = sin(x)**y/cos(x)**y
assert trigsimp(e) == e
assert trigsimp(e.subs(y, 2)) == tan(x)**2
assert trigsimp(e.subs(x, 1)) == tan(1)**y
# check for multiple patterns
assert (cos(x)**2/sin(x)**2*cos(y)**2/sin(y)**2).trigsimp() == \
1/tan(x)**2/tan(y)**2
assert trigsimp(cos(x)/sin(x)*cos(x+y)/sin(x+y)) == \
1/(tan(x)*tan(x + y))
eq = cos(2)*(cos(3) + 1)**2/(cos(3) - 1)**2
assert trigsimp(eq) == eq.factor() # factor makes denom (-1 + cos(3))**2
assert trigsimp(cos(2)*(cos(3) + 1)**2*(cos(3) - 1)**2) == \
cos(2)*sin(3)**4
# issue 6789; this generates an expression that formerly caused
# trigsimp to hang
assert cot(x).equals(tan(x)) is False
# nan or the unchanged expression is ok, but not sin(1)
z = cos(x)**2 + sin(x)**2 - 1
z1 = tan(x)**2 - 1/cot(x)**2
n = (1 + z1/z)
assert trigsimp(sin(n)) != sin(1)
eq = x*(n - 1) - x*n
assert trigsimp(eq) is S.NaN
assert trigsimp(eq, recursive=True) is S.NaN
assert trigsimp(1).is_Integer
assert trigsimp(-sin(x)**4 - 2*sin(x)**2*cos(x)**2 - cos(x)**4) == -1
def test_trigsimp_issue_2515():
x = Symbol('x')
assert trigsimp(x*cos(x)*tan(x)) == x*sin(x)
assert trigsimp(-sin(x) + cos(x)*tan(x)) == 0
def test_trigsimp_issue_3826():
assert trigsimp(tan(2*x).expand(trig=True)) == tan(2*x)
def test_trigsimp_issue_4032():
n = Symbol('n', integer=True, positive=True)
assert trigsimp(2**(n/2)*cos(pi*n/4)/2 + 2**(n - 1)/2) == \
2**(n/2)*cos(pi*n/4)/2 + 2**n/4
def test_trigsimp_issue_7761():
assert trigsimp(cosh(pi/4)) == cosh(pi/4)
def test_trigsimp_noncommutative():
x, y = symbols('x,y')
A, B = symbols('A,B', commutative=False)
assert trigsimp(A - A*sin(x)**2) == A*cos(x)**2
assert trigsimp(A - A*cos(x)**2) == A*sin(x)**2
assert trigsimp(A*sin(x)**2 + A*cos(x)**2) == A
assert trigsimp(A + A*tan(x)**2) == A/cos(x)**2
assert trigsimp(A/cos(x)**2 - A) == A*tan(x)**2
assert trigsimp(A/cos(x)**2 - A*tan(x)**2) == A
assert trigsimp(A + A*cot(x)**2) == A/sin(x)**2
assert trigsimp(A/sin(x)**2 - A) == A/tan(x)**2
assert trigsimp(A/sin(x)**2 - A*cot(x)**2) == A
assert trigsimp(y*A*cos(x)**2 + y*A*sin(x)**2) == y*A
assert trigsimp(A*sin(x)/cos(x)) == A*tan(x)
assert trigsimp(A*tan(x)*cos(x)) == A*sin(x)
assert trigsimp(A*cot(x)**3*sin(x)**3) == A*cos(x)**3
assert trigsimp(y*A*tan(x)**2/sin(x)**2) == y*A/cos(x)**2
assert trigsimp(A*cot(x)/cos(x)) == A/sin(x)
assert trigsimp(A*sin(x + y) + A*sin(x - y)) == 2*A*sin(x)*cos(y)
assert trigsimp(A*sin(x + y) - A*sin(x - y)) == 2*A*sin(y)*cos(x)
assert trigsimp(A*cos(x + y) + A*cos(x - y)) == 2*A*cos(x)*cos(y)
assert trigsimp(A*cos(x + y) - A*cos(x - y)) == -2*A*sin(x)*sin(y)
assert trigsimp(A*sinh(x + y) + A*sinh(x - y)) == 2*A*sinh(x)*cosh(y)
assert trigsimp(A*sinh(x + y) - A*sinh(x - y)) == 2*A*sinh(y)*cosh(x)
assert trigsimp(A*cosh(x + y) + A*cosh(x - y)) == 2*A*cosh(x)*cosh(y)
assert trigsimp(A*cosh(x + y) - A*cosh(x - y)) == 2*A*sinh(x)*sinh(y)
assert trigsimp(A*cos(0.12345)**2 + A*sin(0.12345)**2) == 1.0*A
def test_hyperbolic_simp():
x, y = symbols('x,y')
assert trigsimp(sinh(x)**2 + 1) == cosh(x)**2
assert trigsimp(cosh(x)**2 - 1) == sinh(x)**2
assert trigsimp(cosh(x)**2 - sinh(x)**2) == 1
assert trigsimp(1 - tanh(x)**2) == 1/cosh(x)**2
assert trigsimp(1 - 1/cosh(x)**2) == tanh(x)**2
assert trigsimp(tanh(x)**2 + 1/cosh(x)**2) == 1
assert trigsimp(coth(x)**2 - 1) == 1/sinh(x)**2
assert trigsimp(1/sinh(x)**2 + 1) == 1/tanh(x)**2
assert trigsimp(coth(x)**2 - 1/sinh(x)**2) == 1
assert trigsimp(5*cosh(x)**2 - 5*sinh(x)**2) == 5
assert trigsimp(5*cosh(x/2)**2 - 2*sinh(x/2)**2) == 3*cosh(x)/2 + S(7)/2
assert trigsimp(sinh(x)/cosh(x)) == tanh(x)
assert trigsimp(tanh(x)) == trigsimp(sinh(x)/cosh(x))
assert trigsimp(cosh(x)/sinh(x)) == 1/tanh(x)
assert trigsimp(2*tanh(x)*cosh(x)) == 2*sinh(x)
assert trigsimp(coth(x)**3*sinh(x)**3) == cosh(x)**3
assert trigsimp(y*tanh(x)**2/sinh(x)**2) == y/cosh(x)**2
assert trigsimp(coth(x)/cosh(x)) == 1/sinh(x)
e = 2*cosh(x)**2 - 2*sinh(x)**2
assert trigsimp(log(e)) == log(2)
assert trigsimp(cosh(x)**2*cosh(y)**2 - cosh(x)**2*sinh(y)**2 - sinh(x)**2,
recursive=True) == 1
assert trigsimp(sinh(x)**2*sinh(y)**2 - sinh(x)**2*cosh(y)**2 + cosh(x)**2,
recursive=True) == 1
assert abs(trigsimp(2.0*cosh(x)**2 - 2.0*sinh(x)**2) - 2.0) < 1e-10
assert trigsimp(sinh(x)**2/cosh(x)**2) == tanh(x)**2
assert trigsimp(sinh(x)**3/cosh(x)**3) == tanh(x)**3
assert trigsimp(sinh(x)**10/cosh(x)**10) == tanh(x)**10
assert trigsimp(cosh(x)**3/sinh(x)**3) == 1/tanh(x)**3
assert trigsimp(cosh(x)/sinh(x)) == 1/tanh(x)
assert trigsimp(cosh(x)**2/sinh(x)**2) == 1/tanh(x)**2
assert trigsimp(cosh(x)**10/sinh(x)**10) == 1/tanh(x)**10
assert trigsimp(x*cosh(x)*tanh(x)) == x*sinh(x)
assert trigsimp(-sinh(x) + cosh(x)*tanh(x)) == 0
assert tan(x) != 1/cot(x) # cot doesn't auto-simplify
assert trigsimp(tan(x) - 1/cot(x)) == 0
assert trigsimp(3*tanh(x)**7 - 2/coth(x)**7) == tanh(x)**7
def test_trigsimp_groebner():
from sympy.simplify.trigsimp import trigsimp_groebner
c = cos(x)
s = sin(x)
ex = (4*s*c + 12*s + 5*c**3 + 21*c**2 + 23*c + 15)/(
-s*c**2 + 2*s*c + 15*s + 7*c**3 + 31*c**2 + 37*c + 21)
resnum = (5*s - 5*c + 1)
resdenom = (8*s - 6*c)
results = [resnum/resdenom, (-resnum)/(-resdenom)]
assert trigsimp_groebner(ex) in results
assert trigsimp_groebner(s/c, hints=[tan]) == tan(x)
assert trigsimp_groebner(c*s) == c*s
assert trigsimp((-s + 1)/c + c/(-s + 1),
method='groebner') == 2/c
assert trigsimp((-s + 1)/c + c/(-s + 1),
method='groebner', polynomial=True) == 2/c
# Test quick=False works
assert trigsimp_groebner(ex, hints=[2]) in results
# test "I"
assert trigsimp_groebner(sin(I*x)/cos(I*x), hints=[tanh]) == I*tanh(x)
# test hyperbolic / sums
assert trigsimp_groebner((tanh(x)+tanh(y))/(1+tanh(x)*tanh(y)),
hints=[(tanh, x, y)]) == tanh(x + y)
def test_issue_2827_trigsimp_methods():
measure1 = lambda expr: len(str(expr))
measure2 = lambda expr: -count_ops(expr)
# Return the most complicated result
expr = (x + 1)/(x + sin(x)**2 + cos(x)**2)
ans = Matrix([1])
M = Matrix([expr])
assert trigsimp(M, method='fu', measure=measure1) == ans
assert trigsimp(M, method='fu', measure=measure2) != ans
# all methods should work with Basic expressions even if they
# aren't Expr
M = Matrix.eye(1)
assert all(trigsimp(M, method=m) == M for m in
'fu matching groebner old'.split())
# watch for E in exptrigsimp, not only exp()
eq = 1/sqrt(E) + E
assert exptrigsimp(eq) == eq
def test_exptrigsimp():
def valid(a, b):
from sympy.utilities.randtest import verify_numerically as tn
if not (tn(a, b) and a == b):
return False
return True
assert exptrigsimp(exp(x) + exp(-x)) == 2*cosh(x)
assert exptrigsimp(exp(x) - exp(-x)) == 2*sinh(x)
e = [cos(x) + I*sin(x), cos(x) - I*sin(x),
cosh(x) - sinh(x), cosh(x) + sinh(x)]
ok = [exp(I*x), exp(-I*x), exp(-x), exp(x)]
assert all(valid(i, j) for i, j in zip(
[exptrigsimp(ei) for ei in e], ok))
ue = [cos(x) + sin(x), cos(x) - sin(x),
cosh(x) + I*sinh(x), cosh(x) - I*sinh(x)]
assert [exptrigsimp(ei) == ei for ei in ue]
res = []
ok = [y*tanh(1), 1/(y*tanh(1)), I*y*tan(1), -I/(y*tan(1)),
y*tanh(x), 1/(y*tanh(x)), I*y*tan(x), -I/(y*tan(x)),
y*tanh(1 + I), 1/(y*tanh(1 + I))]
for a in (1, I, x, I*x, 1 + I):
w = exp(a)
eq = y*(w - 1/w)/(w + 1/w)
s = simplify(eq)
assert s == exptrigsimp(eq)
res.append(s)
sinv = simplify(1/eq)
assert sinv == exptrigsimp(1/eq)
res.append(sinv)
assert all(valid(i, j) for i, j in zip(res, ok))
for a in range(1, 3):
w = exp(a)
e = w + 1/w
s = simplify(e)
assert s == exptrigsimp(e)
assert valid(s, 2*cosh(a))
e = w - 1/w
s = simplify(e)
assert s == exptrigsimp(e)
assert valid(s, 2*sinh(a))
def test_powsimp_on_numbers():
assert 2**(S(1)/3 - 2) == 2**(S(1)/3)/4
@XFAIL
def test_issue_6811_fail():
# from doc/src/modules/physics/mechanics/examples.rst, the current `eq`
# at Line 576 (in different variables) was formerly the equivalent and
# shorter expression given below...it would be nice to get the short one
# back again
xp, y, x, z = symbols('xp, y, x, z')
eq = 4*(-19*sin(x)*y + 5*sin(3*x)*y + 15*cos(2*x)*z - 21*z)*xp/(9*cos(x) - 5*cos(3*x))
assert trigsimp(eq) == -2*(2*cos(x)*tan(x)*y + 3*z)*xp/cos(x)
def test_Piecewise():
e1 = x*(x + y) - y*(x + y)
e2 = sin(x)**2 + cos(x)**2
e3 = expand((x + y)*y/x)
s1 = simplify(e1)
s2 = simplify(e2)
s3 = simplify(e3)
# trigsimp tries not to touch non-trig containing args
assert trigsimp(Piecewise((e1, e3 < e2), (e3, True))) == \
Piecewise((e1, e3 < s2), (e3, True))
| bsd-3-clause |
openstack/cloudbase-init | cloudbaseinit/metadata/services/base.py | 2 | 8089 | # Copyright 2012 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import gzip
import io
import time
from oslo_log import log as oslo_logging
import requests
import six
from cloudbaseinit import conf as cloudbaseinit_conf
from cloudbaseinit import exception
from cloudbaseinit.utils import encoding
CONF = cloudbaseinit_conf.CONF
LOG = oslo_logging.getLogger(__name__)
class NotExistingMetadataException(Exception):
pass
@six.add_metaclass(abc.ABCMeta)
class BaseMetadataService(object):
_GZIP_MAGIC_NUMBER = b'\x1f\x8b'
def __init__(self):
self._cache = {}
self._enable_retry = False
def get_name(self):
return self.__class__.__name__
def load(self):
self._cache = {}
@abc.abstractmethod
def _get_data(self, path):
pass
def _exec_with_retry(self, action):
i = 0
while True:
try:
return action()
except NotExistingMetadataException:
raise
except Exception:
if self._enable_retry and i < CONF.retry_count:
i += 1
time.sleep(CONF.retry_count_interval)
else:
raise
def _get_cache_data(self, path, decode=False):
"""Get meta data with caching and decoding support."""
key = (path, decode)
if key in self._cache:
LOG.debug("Using cached copy of metadata: '%s'" % path)
return self._cache[key]
else:
data = self._exec_with_retry(lambda: self._get_data(path))
if decode:
data = encoding.get_as_string(data)
self._cache[key] = data
return data
def get_instance_id(self):
pass
def get_content(self, name):
"""Get raw content within a service."""
def get_user_data(self):
pass
def get_decoded_user_data(self):
"""Get the decoded user data, if any
The user data can be gzip-encoded, which means
that every access to it should verify this fact,
leading to code duplication.
"""
user_data = self.get_user_data()
if user_data and user_data[:2] == self._GZIP_MAGIC_NUMBER:
bio = io.BytesIO(user_data)
with gzip.GzipFile(fileobj=bio, mode='rb') as out:
user_data = out.read()
return user_data
def get_host_name(self):
pass
def get_public_keys(self):
"""Get a list of space-stripped strings as public keys."""
pass
def get_network_details(self):
"""Return a list of `NetworkDetails` objects.
These objects provide details regarding static
network configuration, details which can be found
in the namedtuple defined above.
"""
def get_network_details_v2(self):
"""Return a `NetworkDetailsV2` object."""
def get_admin_username(self):
pass
def get_admin_password(self):
pass
@property
def can_post_password(self):
return False
@property
def is_password_set(self):
return False
def post_password(self, enc_password_b64):
pass
def get_winrm_listeners_configuration(self):
pass
def get_server_certs(self):
pass
def get_vm_agent_package_provisioning_data(self):
pass
def get_client_auth_certs(self):
pass
def cleanup(self):
pass
@property
def can_update_password(self):
"""The ability to update password of the metadata provider.
If :meth:`~can_update_password` is True, plugins can check
periodically (e.g. at every boot) if the password changed.
:rtype: bool
.. notes:
The password will be updated only if the
:meth:`~is_password_changed` returns True.
"""
return False
def is_password_changed(self):
"""Check if the metadata provider has a new password for this instance
:rtype: bool
.. notes:
This method will be used only when :meth:`~can_update_password`
is True.
"""
return False
def provisioning_started(self):
pass
def provisioning_completed(self):
pass
def provisioning_failed(self):
pass
@property
def can_post_rdp_cert_thumbprint(self):
return False
def post_rdp_cert_thumbprint(self, thumbprint):
pass
def get_kms_host(self):
pass
def get_use_avma_licensing(self):
pass
def get_enable_automatic_updates(self):
"""Check if the metadata provider enforces automatic updates."""
pass
def get_ephemeral_disk_data_loss_warning(self):
raise NotExistingMetadataException()
class BaseHTTPMetadataService(BaseMetadataService):
"""Contract class for metadata services that are using HTTP(S)."""
def __init__(self, base_url, https_allow_insecure=False,
https_ca_bundle=None):
"""Setup a new metadata service.
:param https_allow_insecure:
Whether to disable the validation of HTTPS certificates
(default False).
:param base_url:
The base URL where the service looks for metadata.
:param https_ca_bundle:
The path to a CA_BUNDLE file or directory with certificates
of trusted CAs.
.. note ::
If `https_ca_bundle` is set to a path to a directory, the
directory must have been processed using the c_rehash utility
supplied with OpenSSL.
"""
super(BaseHTTPMetadataService, self).__init__()
self._https_allow_insecure = https_allow_insecure
self._https_ca_bundle = https_ca_bundle
self._base_url = base_url
def _verify_https_request(self):
"""Whether to disable the validation of HTTPS certificates.
When this option is `True` the SSL certificate validation for the
current metadata provider will be disabled (please don't use it if
you don't know the implications of this behaviour).
"""
if self._https_ca_bundle:
return self._https_ca_bundle
else:
return self._https_allow_insecure
def _http_request(self, url, data=None, headers=None):
"""Get content for received url."""
if not url.startswith("http"):
url = requests.compat.urljoin(self._base_url, url)
request_action = requests.get if not data else requests.post
if not data:
LOG.debug('Getting metadata from: %s', url)
else:
LOG.debug('Posting data to %s', url)
response = request_action(url=url, data=data, headers=headers,
verify=self._verify_https_request())
response.raise_for_status()
return response.content
def _get_data(self, path):
"""Getting the required information using metadata service."""
try:
response = self._http_request(path)
except requests.HTTPError as exc:
if exc.response.status_code == 404:
raise NotExistingMetadataException(
getattr(exc, "message", str(exc)))
raise
except requests.exceptions.SSLError as exc:
LOG.exception(exc)
raise exception.CertificateVerifyFailed(
"HTTPS certificate validation failed.")
return response
| apache-2.0 |
natanovia/zulip | zerver/lib/actions.py | 113 | 124803 | from __future__ import absolute_import
from django.conf import settings
from django.core import validators
from django.contrib.sessions.models import Session
from zerver.lib.cache import flush_user_profile
from zerver.lib.context_managers import lockfile
from zerver.models import Realm, RealmEmoji, Stream, UserProfile, UserActivity, \
Subscription, Recipient, Message, UserMessage, valid_stream_name, \
DefaultStream, UserPresence, Referral, PushDeviceToken, MAX_SUBJECT_LENGTH, \
MAX_MESSAGE_LENGTH, get_client, get_stream, get_recipient, get_huddle, \
get_user_profile_by_id, PreregistrationUser, get_display_recipient, \
to_dict_cache_key, get_realm, stringify_message_dict, bulk_get_recipients, \
resolve_email_to_domain, email_to_username, display_recipient_cache_key, \
get_stream_cache_key, to_dict_cache_key_id, \
UserActivityInterval, get_active_user_dicts_in_realm, get_active_streams, \
realm_filters_for_domain, RealmFilter, receives_offline_notifications, \
ScheduledJob, realm_filters_for_domain, RealmFilter, get_active_bot_dicts_in_realm
from zerver.lib.avatar import get_avatar_url, avatar_url
from guardian.shortcuts import assign_perm, remove_perm
from django.db import transaction, IntegrityError
from django.db.models import F, Q
from django.core.exceptions import ValidationError
from django.utils.importlib import import_module
from django.core.mail import EmailMessage
from django.utils.timezone import now
from confirmation.models import Confirmation
session_engine = import_module(settings.SESSION_ENGINE)
from zerver.lib.create_user import random_api_key
from zerver.lib.initial_password import initial_password
from zerver.lib.timestamp import timestamp_to_datetime, datetime_to_timestamp
from zerver.lib.cache_helpers import cache_save_message
from zerver.lib.queue import queue_json_publish
from django.utils import timezone
from zerver.lib.create_user import create_user
from zerver.lib import bugdown
from zerver.lib.cache import cache_with_key, cache_set, \
user_profile_by_email_cache_key, cache_set_many, \
cache_delete, cache_delete_many, message_cache_key
from zerver.decorator import get_user_profile_by_email, JsonableError, \
statsd_increment
from zerver.lib.event_queue import request_event_queue, get_user_events, send_event
from zerver.lib.utils import log_statsd_event, statsd
from zerver.lib.html_diff import highlight_html_differences
from zerver.lib.alert_words import user_alert_words, add_user_alert_words, \
remove_user_alert_words, set_user_alert_words
from zerver.lib.push_notifications import num_push_devices_for_user, \
send_apple_push_notification, send_android_push_notification
from zerver.lib.notifications import clear_followup_emails_queue
from zerver.lib.narrow import check_supported_events_narrow_filter
from zerver.lib.session_user import get_session_user
import DNS
import ujson
import time
import traceback
import re
import datetime
import os
import platform
import logging
import itertools
from collections import defaultdict
# Store an event in the log for re-importing messages
def log_event(event):
if settings.EVENT_LOG_DIR is None:
return
if "timestamp" not in event:
event["timestamp"] = time.time()
if not os.path.exists(settings.EVENT_LOG_DIR):
os.mkdir(settings.EVENT_LOG_DIR)
template = os.path.join(settings.EVENT_LOG_DIR,
'%s.' + platform.node()
+ datetime.datetime.now().strftime('.%Y-%m-%d'))
with lockfile(template % ('lock',)):
with open(template % ('events',), 'a') as log:
log.write(ujson.dumps(event) + '\n')
def active_user_ids(realm):
return [userdict['id'] for userdict in get_active_user_dicts_in_realm(realm)]
def stream_user_ids(stream):
subscriptions = Subscription.objects.filter(recipient__type=Recipient.STREAM,
recipient__type_id=stream.id)
if stream.invite_only:
subscriptions = subscriptions.filter(active=True)
return [sub['user_profile_id'] for sub in subscriptions.values('user_profile_id')]
def bot_owner_userids(user_profile):
is_private_bot = (
user_profile.default_sending_stream and user_profile.default_sending_stream.invite_only or
user_profile.default_events_register_stream and user_profile.default_events_register_stream.invite_only)
if is_private_bot:
return (user_profile.bot_owner_id,)
else:
return active_user_ids(user_profile.realm)
def notify_created_user(user_profile):
event = dict(type="realm_user", op="add",
person=dict(email=user_profile.email,
is_admin=user_profile.is_admin(),
full_name=user_profile.full_name,
is_bot=user_profile.is_bot))
send_event(event, active_user_ids(user_profile.realm))
def notify_created_bot(user_profile):
def stream_name(stream):
if not stream:
return None
return stream.name
default_sending_stream_name = stream_name(user_profile.default_sending_stream)
default_events_register_stream_name = stream_name(user_profile.default_events_register_stream)
event = dict(type="realm_bot", op="add",
bot=dict(email=user_profile.email,
full_name=user_profile.full_name,
api_key=user_profile.api_key,
default_sending_stream=default_sending_stream_name,
default_events_register_stream=default_events_register_stream_name,
default_all_public_streams=user_profile.default_all_public_streams,
avatar_url=avatar_url(user_profile),
owner=user_profile.bot_owner.email,
))
send_event(event, bot_owner_userids(user_profile))
def do_create_user(email, password, realm, full_name, short_name,
active=True, bot=False, bot_owner=None,
avatar_source=UserProfile.AVATAR_FROM_GRAVATAR,
default_sending_stream=None, default_events_register_stream=None,
default_all_public_streams=None):
event = {'type': 'user_created',
'timestamp': time.time(),
'full_name': full_name,
'short_name': short_name,
'user': email,
'domain': realm.domain,
'bot': bot}
if bot:
event['bot_owner'] = bot_owner.email
log_event(event)
user_profile = create_user(email=email, password=password, realm=realm,
full_name=full_name, short_name=short_name,
active=active, bot=bot, bot_owner=bot_owner,
avatar_source=avatar_source,
default_sending_stream=default_sending_stream,
default_events_register_stream=default_events_register_stream,
default_all_public_streams=default_all_public_streams)
notify_created_user(user_profile)
if bot:
notify_created_bot(user_profile)
return user_profile
def user_sessions(user_profile):
return [s for s in Session.objects.all()
if get_session_user(s) == user_profile.id]
def delete_session(session):
return session_engine.SessionStore(session.session_key).delete()
def delete_user_sessions(user_profile):
for session in Session.objects.all():
if get_session_user(session) == user_profile.id:
delete_session(session)
def delete_realm_user_sessions(realm):
realm_user_ids = [user_profile.id for user_profile in
UserProfile.objects.filter(realm=realm)]
for session in Session.objects.filter(expire_date__gte=datetime.datetime.now()):
if get_session_user(session) in realm_user_ids:
delete_session(session)
def delete_all_user_sessions():
for session in Session.objects.all():
delete_session(session)
def active_humans_in_realm(realm):
return UserProfile.objects.filter(realm=realm, is_active=True, is_bot=False)
def do_set_realm_name(realm, name):
realm.name = name
realm.save(update_fields=['name'])
event = dict(
type="realm",
op="update",
property='name',
value=name,
)
send_event(event, active_user_ids(realm))
return {}
def get_realm_name(domain):
realm = Realm.objects.get(domain=domain)
return realm.name
def do_set_realm_restricted_to_domain(realm, restricted):
realm.restricted_to_domain = restricted
realm.save(update_fields=['restricted_to_domain'])
event = dict(
type="realm",
op="update",
property='restricted_to_domain',
value=restricted,
)
send_event(event, active_user_ids(realm))
return {}
def do_set_realm_invite_required(realm, invite_required):
realm.invite_required = invite_required
realm.save(update_fields=['invite_required'])
event = dict(
type="realm",
op="update",
property='invite_required',
value=invite_required,
)
send_event(event, active_user_ids(realm))
return {}
def do_set_realm_invite_by_admins_only(realm, invite_by_admins_only):
realm.invite_by_admins_only = invite_by_admins_only
realm.save(update_fields=['invite_by_admins_only'])
event = dict(
type="realm",
op="update",
property='invite_by_admins_only',
value=invite_by_admins_only,
)
send_event(event, active_user_ids(realm))
return {}
def do_deactivate_realm(realm):
"""
Deactivate this realm. Do NOT deactivate the users -- we need to be able to
tell the difference between users that were intentionally deactivated,
e.g. by a realm admin, and users who can't currently use Zulip because their
realm has been deactivated.
"""
if realm.deactivated:
return
realm.deactivated = True
realm.save(update_fields=["deactivated"])
for user in active_humans_in_realm(realm):
# Don't deactivate the users, but do delete their sessions so they get
# bumped to the login screen, where they'll get a realm deactivation
# notice when they try to log in.
delete_user_sessions(user)
def do_deactivate_user(user_profile, log=True, _cascade=True):
if not user_profile.is_active:
return
user_profile.is_active = False;
user_profile.save(update_fields=["is_active"])
delete_user_sessions(user_profile)
if log:
log_event({'type': 'user_deactivated',
'timestamp': time.time(),
'user': user_profile.email,
'domain': user_profile.realm.domain})
event = dict(type="realm_user", op="remove",
person=dict(email=user_profile.email,
full_name=user_profile.full_name))
send_event(event, active_user_ids(user_profile.realm))
if user_profile.is_bot:
event = dict(type="realm_bot", op="remove",
bot=dict(email=user_profile.email,
full_name=user_profile.full_name))
send_event(event, bot_owner_userids(user_profile))
if _cascade:
bot_profiles = UserProfile.objects.filter(is_bot=True, is_active=True,
bot_owner=user_profile)
for profile in bot_profiles:
do_deactivate_user(profile, _cascade=False)
def do_deactivate_stream(stream, log=True):
user_profiles = UserProfile.objects.filter(realm=stream.realm)
for user_profile in user_profiles:
do_remove_subscription(user_profile, stream)
was_invite_only = stream.invite_only
stream.deactivated = True
stream.invite_only = True
# Preserve as much as possible the original stream name while giving it a
# special prefix that both indicates that the stream is deactivated and
# frees up the original name for reuse.
old_name = stream.name
new_name = ("!DEACTIVATED:" + old_name)[:Stream.MAX_NAME_LENGTH]
for i in range(20):
existing_deactivated_stream = get_stream(new_name, stream.realm)
if existing_deactivated_stream:
# This stream has alrady been deactivated, keep prepending !s until
# we have a unique stream name or you've hit a rename limit.
new_name = ("!" + new_name)[:Stream.MAX_NAME_LENGTH]
else:
break
# If you don't have a unique name at this point, this will fail later in the
# code path.
stream.name = new_name[:Stream.MAX_NAME_LENGTH]
stream.save()
# Remove the old stream information from memcached.
old_cache_key = get_stream_cache_key(old_name, stream.realm)
cache_delete(old_cache_key)
if not was_invite_only:
stream_dict = stream.to_dict()
stream_dict.update(dict(name=old_name, invite_only=was_invite_only))
event = dict(type="stream", op="delete",
streams=[stream_dict])
send_event(event, active_user_ids(stream.realm))
return
def do_change_user_email(user_profile, new_email):
old_email = user_profile.email
user_profile.email = new_email
user_profile.save(update_fields=["email"])
log_event({'type': 'user_email_changed',
'old_email': old_email,
'new_email': new_email})
def compute_irc_user_fullname(email):
return email.split("@")[0] + " (IRC)"
def compute_jabber_user_fullname(email):
return email.split("@")[0] + " (XMPP)"
def compute_mit_user_fullname(email):
try:
# Input is either e.g. username@mit.edu or user|CROSSREALM.INVALID@mit.edu
match_user = re.match(r'^([a-zA-Z0-9_.-]+)(\|.+)?@mit\.edu$', email.lower())
if match_user and match_user.group(2) is None:
answer = DNS.dnslookup(
"%s.passwd.ns.athena.mit.edu" % (match_user.group(1),),
DNS.Type.TXT)
hesiod_name = answer[0][0].split(':')[4].split(',')[0].strip()
if hesiod_name != "":
return hesiod_name
elif match_user:
return match_user.group(1).lower() + "@" + match_user.group(2).upper()[1:]
except DNS.Base.ServerError:
pass
except:
print ("Error getting fullname for %s:" % (email,))
traceback.print_exc()
return email.lower()
@cache_with_key(lambda realm, email, f: user_profile_by_email_cache_key(email),
timeout=3600*24*7)
def create_mirror_user_if_needed(realm, email, email_to_fullname):
try:
return get_user_profile_by_email(email)
except UserProfile.DoesNotExist:
try:
# Forge a user for this person
return create_user(email, initial_password(email), realm,
email_to_fullname(email), email_to_username(email),
active=False, is_mirror_dummy=True)
except IntegrityError:
return get_user_profile_by_email(email)
def log_message(message):
if not message.sending_client.name.startswith("test:"):
log_event(message.to_log_dict())
def always_push_notify(user):
# robinhood.io asked to get push notifications for **all** notifyable
# messages, regardless of idle status
return user.realm.domain in ['robinhood.io']
# Helper function. Defaults here are overriden by those set in do_send_messages
def do_send_message(message, rendered_content = None, no_log = False, stream = None, local_id = None):
return do_send_messages([{'message': message,
'rendered_content': rendered_content,
'no_log': no_log,
'stream': stream,
'local_id': local_id}])[0]
def do_send_messages(messages):
# Filter out messages which didn't pass internal_prep_message properly
messages = [message for message in messages if message is not None]
# Filter out zephyr mirror anomalies where the message was already sent
already_sent_ids = []
new_messages = []
for message in messages:
if isinstance(message['message'], int):
already_sent_ids.append(message['message'])
else:
new_messages.append(message)
messages = new_messages
# For consistency, changes to the default values for these gets should also be applied
# to the default args in do_send_message
for message in messages:
message['rendered_content'] = message.get('rendered_content', None)
message['no_log'] = message.get('no_log', False)
message['stream'] = message.get('stream', None)
message['local_id'] = message.get('local_id', None)
message['sender_queue_id'] = message.get('sender_queue_id', None)
# Log the message to our message log for populate_db to refill
for message in messages:
if not message['no_log']:
log_message(message['message'])
for message in messages:
if message['message'].recipient.type == Recipient.PERSONAL:
message['recipients'] = list(set([get_user_profile_by_id(message['message'].recipient.type_id),
get_user_profile_by_id(message['message'].sender_id)]))
# For personals, you send out either 1 or 2 copies of the message, for
# personals to yourself or to someone else, respectively.
assert((len(message['recipients']) == 1) or (len(message['recipients']) == 2))
elif (message['message'].recipient.type == Recipient.STREAM or
message['message'].recipient.type == Recipient.HUDDLE):
# We use select_related()/only() here, while the PERSONAL case above uses
# get_user_profile_by_id() to get UserProfile objects from cache. Streams will
# typically have more recipients than PMs, so get_user_profile_by_id() would be
# a bit more expensive here, given that we need to hit the DB anyway and only
# care about the email from the user profile.
fields = [
'user_profile__id',
'user_profile__email',
'user_profile__is_active',
'user_profile__realm__domain'
]
query = Subscription.objects.select_related("user_profile", "user_profile__realm").only(*fields).filter(
recipient=message['message'].recipient, active=True)
message['recipients'] = [s.user_profile for s in query]
else:
raise ValueError('Bad recipient type')
# Only deliver the message to active user recipients
message['active_recipients'] = filter(lambda user_profile: user_profile.is_active,
message['recipients'])
message['message'].maybe_render_content(None)
message['message'].update_calculated_fields()
# Save the message receipts in the database
user_message_flags = defaultdict(dict)
with transaction.atomic():
Message.objects.bulk_create([message['message'] for message in messages])
ums = []
for message in messages:
ums_to_create = [UserMessage(user_profile=user_profile, message=message['message'])
for user_profile in message['active_recipients']]
# These properties on the Message are set via
# Message.render_markdown by code in the bugdown inline patterns
wildcard = message['message'].mentions_wildcard
mentioned_ids = message['message'].mentions_user_ids
ids_with_alert_words = message['message'].user_ids_with_alert_words
is_me_message = message['message'].is_me_message
for um in ums_to_create:
if um.user_profile.id == message['message'].sender.id and \
message['message'].sent_by_human():
um.flags |= UserMessage.flags.read
if wildcard:
um.flags |= UserMessage.flags.wildcard_mentioned
if um.user_profile_id in mentioned_ids:
um.flags |= UserMessage.flags.mentioned
if um.user_profile_id in ids_with_alert_words:
um.flags |= UserMessage.flags.has_alert_word
if is_me_message:
um.flags |= UserMessage.flags.is_me_message
user_message_flags[message['message'].id][um.user_profile_id] = um.flags_list()
ums.extend(ums_to_create)
UserMessage.objects.bulk_create(ums)
for message in messages:
cache_save_message(message['message'])
# Render Markdown etc. here and store (automatically) in
# memcached, so that the single-threaded Tornado server
# doesn't have to.
user_flags = user_message_flags.get(message['message'].id, {})
sender = message['message'].sender
user_presences = get_status_dict(sender)
presences = {}
for user_profile in message['active_recipients']:
if user_profile.email in user_presences:
presences[user_profile.id] = user_presences[user_profile.email]
event = dict(
type = 'message',
message = message['message'].id,
message_dict_markdown = message['message'].to_dict(apply_markdown=True),
message_dict_no_markdown = message['message'].to_dict(apply_markdown=False),
presences = presences)
users = [{'id': user.id,
'flags': user_flags.get(user.id, []),
'always_push_notify': always_push_notify(user)}
for user in message['active_recipients']]
if message['message'].recipient.type == Recipient.STREAM:
# Note: This is where authorization for single-stream
# get_updates happens! We only attach stream data to the
# notify new_message request if it's a public stream,
# ensuring that in the tornado server, non-public stream
# messages are only associated to their subscribed users.
if message['stream'] is None:
message['stream'] = Stream.objects.select_related("realm").get(id=message['message'].recipient.type_id)
if message['stream'].is_public():
event['realm_id'] = message['stream'].realm.id
event['stream_name'] = message['stream'].name
if message['stream'].invite_only:
event['invite_only'] = True
if message['local_id'] is not None:
event['local_id'] = message['local_id']
if message['sender_queue_id'] is not None:
event['sender_queue_id'] = message['sender_queue_id']
send_event(event, users)
if (settings.ENABLE_FEEDBACK and
message['message'].recipient.type == Recipient.PERSONAL and
settings.FEEDBACK_BOT in [up.email for up in message['recipients']]):
queue_json_publish(
'feedback_messages',
message['message'].to_dict(apply_markdown=False),
lambda x: None
)
# Note that this does not preserve the order of message ids
# returned. In practice, this shouldn't matter, as we only
# mirror single zephyr messages at a time and don't otherwise
# intermingle sending zephyr messages with other messages.
return already_sent_ids + [message['message'].id for message in messages]
def do_create_stream(realm, stream_name):
# This is used by a management command now, mostly to facilitate testing. It
# doesn't simulate every single aspect of creating a subscription; for example,
# we don't send Zulips to users to tell them they have been subscribed.
stream = Stream()
stream.realm = realm
stream.name = stream_name
stream.save()
Recipient.objects.create(type_id=stream.id, type=Recipient.STREAM)
subscribers = UserProfile.objects.filter(realm=realm, is_active=True, is_bot=False)
bulk_add_subscriptions([stream], subscribers)
def create_stream_if_needed(realm, stream_name, invite_only=False):
(stream, created) = Stream.objects.get_or_create(
realm=realm, name__iexact=stream_name,
defaults={'name': stream_name, 'invite_only': invite_only})
if created:
Recipient.objects.create(type_id=stream.id, type=Recipient.STREAM)
if not invite_only:
event = dict(type="stream", op="create",
streams=[stream.to_dict()])
send_event(event, active_user_ids(realm))
return stream, created
def recipient_for_emails(emails, not_forged_mirror_message,
user_profile, sender):
recipient_profile_ids = set()
normalized_emails = set()
realm_domains = set()
normalized_emails.add(sender.email)
realm_domains.add(sender.realm.domain)
for email in emails:
try:
user_profile = get_user_profile_by_email(email)
except UserProfile.DoesNotExist:
raise ValidationError("Invalid email '%s'" % (email,))
if (not user_profile.is_active and not user_profile.is_mirror_dummy) or \
user_profile.realm.deactivated:
raise ValidationError("'%s' is no longer using Zulip." % (email,))
recipient_profile_ids.add(user_profile.id)
normalized_emails.add(user_profile.email)
realm_domains.add(user_profile.realm.domain)
if not_forged_mirror_message and user_profile.id not in recipient_profile_ids:
raise ValidationError("User not authorized for this query")
# Prevent cross realm private messages unless it is between only two realms
# and one of users is a zuliper
if len(realm_domains) == 2:
# I'm assuming that cross-realm PMs with the "admin realm" are rare, and therefore can be slower
admin_realm = Realm.objects.get(domain=settings.ADMIN_DOMAIN)
admin_realm_admin_emails = {u.email for u in admin_realm.get_admin_users()}
# We allow settings.CROSS_REALM_BOT_EMAILS for the hardcoded emails for the feedback and notification bots
if not (normalized_emails & admin_realm_admin_emails or normalized_emails & settings.CROSS_REALM_BOT_EMAILS):
raise ValidationError("You can't send private messages outside of your organization.")
if len(realm_domains) > 2:
raise ValidationError("You can't send private messages outside of your organization.")
# If the private message is just between the sender and
# another person, force it to be a personal internally
if (len(recipient_profile_ids) == 2
and sender.id in recipient_profile_ids):
recipient_profile_ids.remove(sender.id)
if len(recipient_profile_ids) > 1:
# Make sure the sender is included in huddle messages
recipient_profile_ids.add(sender.id)
huddle = get_huddle(list(recipient_profile_ids))
return get_recipient(Recipient.HUDDLE, huddle.id)
else:
return get_recipient(Recipient.PERSONAL, list(recipient_profile_ids)[0])
def already_sent_mirrored_message_id(message):
if message.recipient.type == Recipient.HUDDLE:
# For huddle messages, we use a 10-second window because the
# timestamps aren't guaranteed to actually match between two
# copies of the same message.
time_window = datetime.timedelta(seconds=10)
else:
time_window = datetime.timedelta(seconds=0)
messages = Message.objects.filter(
sender=message.sender,
recipient=message.recipient,
content=message.content,
subject=message.subject,
sending_client=message.sending_client,
pub_date__gte=message.pub_date - time_window,
pub_date__lte=message.pub_date + time_window)
if messages.exists():
return messages[0].id
return None
def extract_recipients(s):
# We try to accept multiple incoming formats for recipients.
# See test_extract_recipients() for examples of what we allow.
try:
data = ujson.loads(s)
except ValueError:
data = s
if isinstance(data, basestring):
data = data.split(',')
if not isinstance(data, list):
raise ValueError("Invalid data type for recipients")
recipients = data
# Strip recipients, and then remove any duplicates and any that
# are the empty string after being stripped.
recipients = [recipient.strip() for recipient in recipients]
return list(set(recipient for recipient in recipients if recipient))
# check_send_message:
# Returns the id of the sent message. Has same argspec as check_message.
def check_send_message(*args, **kwargs):
message = check_message(*args, **kwargs)
return do_send_messages([message])[0]
def check_stream_name(stream_name):
if stream_name == "":
raise JsonableError("Stream can't be empty")
if len(stream_name) > Stream.MAX_NAME_LENGTH:
raise JsonableError("Stream name too long")
if not valid_stream_name(stream_name):
raise JsonableError("Invalid stream name")
def send_pm_if_empty_stream(sender, stream, stream_name):
if sender.realm.domain == 'mit.edu' or sender.realm.deactivated:
return
if sender.is_bot and sender.bot_owner is not None:
if stream:
num_subscribers = stream.num_subscribers()
if stream is None or num_subscribers == 0:
# Warn a bot's owner if they are sending a message to a stream
# that does not exist, or has no subscribers
# We warn the user once every 5 minutes to avoid a flood of
# PMs on a misconfigured integration, re-using the
# UserProfile.last_reminder field, which is not used for bots.
last_reminder = sender.last_reminder_tzaware()
waitperiod = datetime.timedelta(minutes=UserProfile.BOT_OWNER_STREAM_ALERT_WAITPERIOD)
if not last_reminder or timezone.now() - last_reminder > waitperiod:
if stream is None:
error_msg = "that stream does not yet exist. To create it, "
elif num_subscribers == 0:
error_msg = "there are no subscribers to that stream. To join it, "
content = ("Hi there! We thought you'd like to know that your bot **%s** just "
"tried to send a message to stream `%s`, but %s"
"click the gear in the left-side stream list." %
(sender.full_name, stream_name, error_msg))
message = internal_prep_message(settings.NOTIFICATION_BOT, "private",
sender.bot_owner.email, "", content)
do_send_messages([message])
sender.last_reminder = timezone.now()
sender.save(update_fields=['last_reminder'])
# check_message:
# Returns message ready for sending with do_send_message on success or the error message (string) on error.
def check_message(sender, client, message_type_name, message_to,
subject_name, message_content, realm=None, forged=False,
forged_timestamp=None, forwarder_user_profile=None, local_id=None,
sender_queue_id=None):
stream = None
if not message_to and message_type_name == 'stream' and sender.default_sending_stream:
# Use the users default stream
message_to = [sender.default_sending_stream.name]
elif len(message_to) == 0:
raise JsonableError("Message must have recipients")
if len(message_content.strip()) == 0:
raise JsonableError("Message must not be empty")
message_content = truncate_body(message_content)
if realm is None:
realm = sender.realm
if message_type_name == 'stream':
if len(message_to) > 1:
raise JsonableError("Cannot send to multiple streams")
stream_name = message_to[0].strip()
check_stream_name(stream_name)
if subject_name is None:
raise JsonableError("Missing topic")
subject = subject_name.strip()
if subject == "":
raise JsonableError("Topic can't be empty")
subject = truncate_topic(subject)
## FIXME: Commented out temporarily while we figure out what we want
# if not valid_stream_name(subject):
# return json_error("Invalid subject name")
stream = get_stream(stream_name, realm)
send_pm_if_empty_stream(sender, stream, stream_name)
if stream is None:
raise JsonableError("Stream does not exist")
recipient = get_recipient(Recipient.STREAM, stream.id)
if not stream.invite_only:
# This is a public stream
pass
elif subscribed_to_stream(sender, stream):
# Or it is private, but your are subscribed
pass
elif sender.is_api_super_user() or (forwarder_user_profile is not None and
forwarder_user_profile.is_api_super_user()):
# Or this request is being done on behalf of a super user
pass
elif sender.is_bot and subscribed_to_stream(sender.bot_owner, stream):
# Or you're a bot and your owner is subscribed.
pass
else:
# All other cases are an error.
raise JsonableError("Not authorized to send to stream '%s'" % (stream.name,))
elif message_type_name == 'private':
mirror_message = client and client.name in ["zephyr_mirror", "irc_mirror", "jabber_mirror", "JabberMirror"]
not_forged_mirror_message = mirror_message and not forged
try:
recipient = recipient_for_emails(message_to, not_forged_mirror_message,
forwarder_user_profile, sender)
except ValidationError, e:
assert isinstance(e.messages[0], basestring)
raise JsonableError(e.messages[0])
else:
raise JsonableError("Invalid message type")
message = Message()
message.sender = sender
message.content = message_content
message.recipient = recipient
if message_type_name == 'stream':
message.subject = subject
if forged and forged_timestamp is not None:
# Forged messages come with a timestamp
message.pub_date = timestamp_to_datetime(forged_timestamp)
else:
message.pub_date = timezone.now()
message.sending_client = client
if not message.maybe_render_content(realm.domain):
raise JsonableError("Unable to render message")
if client.name == "zephyr_mirror":
id = already_sent_mirrored_message_id(message)
if id is not None:
return {'message': id}
return {'message': message, 'stream': stream, 'local_id': local_id, 'sender_queue_id': sender_queue_id}
def internal_prep_message(sender_email, recipient_type_name, recipients,
subject, content, realm=None):
"""
Create a message object and checks it, but doesn't send it or save it to the database.
The internal function that calls this can therefore batch send a bunch of created
messages together as one database query.
Call do_send_messages with a list of the return values of this method.
"""
if len(content) > MAX_MESSAGE_LENGTH:
content = content[0:3900] + "\n\n[message was too long and has been truncated]"
sender = get_user_profile_by_email(sender_email)
if realm is None:
realm = sender.realm
parsed_recipients = extract_recipients(recipients)
if recipient_type_name == "stream":
stream, _ = create_stream_if_needed(realm, parsed_recipients[0])
try:
return check_message(sender, get_client("Internal"), recipient_type_name,
parsed_recipients, subject, content, realm)
except JsonableError, e:
logging.error("Error queueing internal message by %s: %s" % (sender_email, str(e)))
return None
def internal_send_message(sender_email, recipient_type_name, recipients,
subject, content, realm=None):
msg = internal_prep_message(sender_email, recipient_type_name, recipients,
subject, content, realm)
# internal_prep_message encountered an error
if msg is None:
return
do_send_messages([msg])
def pick_color(user_profile):
subs = Subscription.objects.filter(user_profile=user_profile,
active=True,
recipient__type=Recipient.STREAM)
return pick_color_helper(user_profile, subs)
def pick_color_helper(user_profile, subs):
# These colors are shared with the palette in subs.js.
stream_assignment_colors = [
"#76ce90", "#fae589", "#a6c7e5", "#e79ab5",
"#bfd56f", "#f4ae55", "#b0a5fd", "#addfe5",
"#f5ce6e", "#c2726a", "#94c849", "#bd86e5",
"#ee7e4a", "#a6dcbf", "#95a5fd", "#53a063",
"#9987e1", "#e4523d", "#c2c2c2", "#4f8de4",
"#c6a8ad", "#e7cc4d", "#c8bebf", "#a47462"]
used_colors = [sub.color for sub in subs if sub.active]
available_colors = filter(lambda x: x not in used_colors,
stream_assignment_colors)
if available_colors:
return available_colors[0]
else:
return stream_assignment_colors[len(used_colors) % len(stream_assignment_colors)]
def get_subscription(stream_name, user_profile):
stream = get_stream(stream_name, user_profile.realm)
recipient = get_recipient(Recipient.STREAM, stream.id)
return Subscription.objects.get(user_profile=user_profile,
recipient=recipient, active=True)
def validate_user_access_to_subscribers(user_profile, stream):
""" Validates whether the user can view the subscribers of a stream. Raises a JsonableError if:
* The user and the stream are in different realms
* The realm is MIT and the stream is not invite only.
* The stream is invite only, requesting_user is passed, and that user
does not subscribe to the stream.
"""
return validate_user_access_to_subscribers_helper(
user_profile,
{"realm__domain": stream.realm.domain,
"realm_id": stream.realm_id,
"invite_only": stream.invite_only},
# We use a lambda here so that we only compute whether the
# user is subscribed if we have to
lambda: subscribed_to_stream(user_profile, stream))
def validate_user_access_to_subscribers_helper(user_profile, stream_dict, check_user_subscribed):
""" Helper for validate_user_access_to_subscribers that doesn't require a full stream object
* check_user_subscribed is a function that when called with no
arguments, will report whether the user is subscribed to the stream
"""
if user_profile is not None and user_profile.realm_id != stream_dict["realm_id"]:
raise ValidationError("Requesting user not on given realm")
if stream_dict["realm__domain"] == "mit.edu" and not stream_dict["invite_only"]:
raise JsonableError("You cannot get subscribers for public streams in this realm")
if (user_profile is not None and stream_dict["invite_only"] and
not check_user_subscribed()):
raise JsonableError("Unable to retrieve subscribers for invite-only stream")
# sub_dict is a dictionary mapping stream_id => whether the user is subscribed to that stream
def bulk_get_subscriber_user_ids(stream_dicts, user_profile, sub_dict):
target_stream_dicts = []
for stream_dict in stream_dicts:
try:
validate_user_access_to_subscribers_helper(user_profile, stream_dict,
lambda: sub_dict[stream_dict["id"]])
except JsonableError:
continue
target_stream_dicts.append(stream_dict)
subscriptions = Subscription.objects.select_related("recipient").filter(
recipient__type=Recipient.STREAM,
recipient__type_id__in=[stream["id"] for stream in target_stream_dicts],
user_profile__is_active=True,
active=True).values("user_profile_id", "recipient__type_id")
result = dict((stream["id"], []) for stream in stream_dicts)
for sub in subscriptions:
result[sub["recipient__type_id"]].append(sub["user_profile_id"])
return result
def get_subscribers_query(stream, requesting_user):
""" Build a query to get the subscribers list for a stream, raising a JsonableError if:
'stream' can either be a string representing a stream name, or a Stream
object. If it's a Stream object, 'realm' is optional.
The caller can refine this query with select_related(), values(), etc. depending
on whether it wants objects or just certain fields
"""
validate_user_access_to_subscribers(requesting_user, stream)
# Note that non-active users may still have "active" subscriptions, because we
# want to be able to easily reactivate them with their old subscriptions. This
# is why the query here has to look at the UserProfile.is_active flag.
subscriptions = Subscription.objects.filter(recipient__type=Recipient.STREAM,
recipient__type_id=stream.id,
user_profile__is_active=True,
active=True)
return subscriptions
def get_subscribers(stream, requesting_user=None):
subscriptions = get_subscribers_query(stream, requesting_user).select_related()
return [subscription.user_profile for subscription in subscriptions]
def get_subscriber_emails(stream, requesting_user=None):
subscriptions = get_subscribers_query(stream, requesting_user)
subscriptions = subscriptions.values('user_profile__email')
return [subscription['user_profile__email'] for subscription in subscriptions]
def get_subscriber_ids(stream):
try:
subscriptions = get_subscribers_query(stream, None)
except JsonableError:
return []
rows = subscriptions.values('user_profile_id')
ids = [row['user_profile_id'] for row in rows]
return ids
def get_other_subscriber_ids(stream, user_profile_id):
ids = get_subscriber_ids(stream)
return filter(lambda id: id != user_profile_id, ids)
def maybe_get_subscriber_emails(stream):
""" Alternate version of get_subscriber_emails that takes a Stream object only
(not a name), and simply returns an empty list if unable to get a real
subscriber list (because we're on the MIT realm). """
try:
subscribers = get_subscriber_emails(stream)
except JsonableError:
subscribers = []
return subscribers
def set_stream_color(user_profile, stream_name, color=None):
subscription = get_subscription(stream_name, user_profile)
if not color:
color = pick_color(user_profile)
subscription.color = color
subscription.save(update_fields=["color"])
return color
def get_subscribers_to_streams(streams):
""" Return a dict where the keys are user profiles, and the values are
arrays of all the streams within 'streams' to which that user is
subscribed.
"""
subscribes_to = {}
for stream in streams:
try:
subscribers = get_subscribers(stream)
except JsonableError:
# We can't get a subscriber list for this stream. Probably MIT.
continue
for subscriber in subscribers:
if subscriber not in subscribes_to:
subscribes_to[subscriber] = []
subscribes_to[subscriber].append(stream)
return subscribes_to
def notify_subscriptions_added(user_profile, sub_pairs, stream_emails, no_log=False):
if not no_log:
log_event({'type': 'subscription_added',
'user': user_profile.email,
'names': [stream.name for sub, stream in sub_pairs],
'domain': stream.realm.domain})
# Send a notification to the user who subscribed.
payload = [dict(name=stream.name,
stream_id=stream.id,
in_home_view=subscription.in_home_view,
invite_only=stream.invite_only,
color=subscription.color,
email_address=encode_email_address(stream),
desktop_notifications=subscription.desktop_notifications,
audible_notifications=subscription.audible_notifications,
description=stream.description,
subscribers=stream_emails(stream))
for (subscription, stream) in sub_pairs]
event = dict(type="subscription", op="add",
subscriptions=payload)
send_event(event, [user_profile.id])
def bulk_add_subscriptions(streams, users):
recipients_map = bulk_get_recipients(Recipient.STREAM, [stream.id for stream in streams])
recipients = [recipient.id for recipient in recipients_map.values()]
stream_map = {}
for stream in streams:
stream_map[recipients_map[stream.id].id] = stream
subs_by_user = defaultdict(list)
all_subs_query = Subscription.objects.select_related("user_profile")
for sub in all_subs_query.filter(user_profile__in=users,
recipient__type=Recipient.STREAM):
subs_by_user[sub.user_profile_id].append(sub)
already_subscribed = []
subs_to_activate = []
new_subs = []
for user_profile in users:
needs_new_sub = set(recipients)
for sub in subs_by_user[user_profile.id]:
if sub.recipient_id in needs_new_sub:
needs_new_sub.remove(sub.recipient_id)
if sub.active:
already_subscribed.append((user_profile, stream_map[sub.recipient_id]))
else:
subs_to_activate.append((sub, stream_map[sub.recipient_id]))
# Mark the sub as active, without saving, so that
# pick_color will consider this to be an active
# subscription when picking colors
sub.active = True
for recipient_id in needs_new_sub:
new_subs.append((user_profile, recipient_id, stream_map[recipient_id]))
subs_to_add = []
for (user_profile, recipient_id, stream) in new_subs:
color = pick_color_helper(user_profile, subs_by_user[user_profile.id])
sub_to_add = Subscription(user_profile=user_profile, active=True,
color=color, recipient_id=recipient_id,
desktop_notifications=user_profile.enable_stream_desktop_notifications,
audible_notifications=user_profile.enable_stream_sounds)
subs_by_user[user_profile.id].append(sub_to_add)
subs_to_add.append((sub_to_add, stream))
# TODO: XXX: This transaction really needs to be done at the serializeable
# transaction isolation level.
with transaction.atomic():
occupied_streams_before = list(get_occupied_streams(user_profile.realm))
Subscription.objects.bulk_create([sub for (sub, stream) in subs_to_add])
Subscription.objects.filter(id__in=[sub.id for (sub, stream_name) in subs_to_activate]).update(active=True)
occupied_streams_after = list(get_occupied_streams(user_profile.realm))
new_occupied_streams = [stream for stream in
set(occupied_streams_after) - set(occupied_streams_before)
if not stream.invite_only]
if new_occupied_streams:
event = dict(type="stream", op="occupy",
streams=[stream.to_dict()
for stream in new_occupied_streams])
send_event(event, active_user_ids(user_profile.realm))
# Notify all existing users on streams that users have joined
# First, get all users subscribed to the streams that we care about
# We fetch all subscription information upfront, as it's used throughout
# the following code and we want to minize DB queries
all_subs = Subscription.objects.filter(recipient__type=Recipient.STREAM,
recipient__type_id__in=[stream.id for stream in streams],
user_profile__is_active=True,
active=True).select_related('recipient', 'user_profile')
all_subs_by_stream = defaultdict(list)
emails_by_stream = defaultdict(list)
for sub in all_subs:
all_subs_by_stream[sub.recipient.type_id].append(sub.user_profile)
emails_by_stream[sub.recipient.type_id].append(sub.user_profile.email)
def fetch_stream_subscriber_emails(stream):
if stream.realm.domain == "mit.edu" and not stream.invite_only:
return []
return emails_by_stream[stream.id]
sub_tuples_by_user = defaultdict(list)
new_streams = set()
for (sub, stream) in subs_to_add + subs_to_activate:
sub_tuples_by_user[sub.user_profile.id].append((sub, stream))
new_streams.add((sub.user_profile.id, stream.id))
for user_profile in users:
if len(sub_tuples_by_user[user_profile.id]) == 0:
continue
sub_pairs = sub_tuples_by_user[user_profile.id]
notify_subscriptions_added(user_profile, sub_pairs, fetch_stream_subscriber_emails)
for stream in streams:
if stream.realm.domain == "mit.edu" and not stream.invite_only:
continue
new_users = [user for user in users if (user.id, stream.id) in new_streams]
new_user_ids = [user.id for user in new_users]
all_subscribed_ids = [user.id for user in all_subs_by_stream[stream.id]]
other_user_ids = set(all_subscribed_ids) - set(new_user_ids)
if other_user_ids:
for user_profile in new_users:
event = dict(type="subscription", op="peer_add",
subscriptions=[stream.name],
user_email=user_profile.email)
send_event(event, other_user_ids)
return ([(user_profile, stream_name) for (user_profile, recipient_id, stream_name) in new_subs] +
[(sub.user_profile, stream_name) for (sub, stream_name) in subs_to_activate],
already_subscribed)
# When changing this, also change bulk_add_subscriptions
def do_add_subscription(user_profile, stream, no_log=False):
recipient = get_recipient(Recipient.STREAM, stream.id)
color = pick_color(user_profile)
# TODO: XXX: This transaction really needs to be done at the serializeable
# transaction isolation level.
with transaction.atomic():
vacant_before = stream.num_subscribers() == 0
(subscription, created) = Subscription.objects.get_or_create(
user_profile=user_profile, recipient=recipient,
defaults={'active': True, 'color': color,
'notifications': user_profile.default_desktop_notifications})
did_subscribe = created
if not subscription.active:
did_subscribe = True
subscription.active = True
subscription.save(update_fields=["active"])
if vacant_before and did_subscribe and not stream.invite_only:
event = dict(type="stream", op="occupy",
streams=[stream.to_dict()])
send_event(event, active_user_ids(user_profile.realm))
if did_subscribe:
emails_by_stream = {stream.id: maybe_get_subscriber_emails(stream)}
notify_subscriptions_added(user_profile, [(subscription, stream)], lambda stream: emails_by_stream[stream.id], no_log)
user_ids = get_other_subscriber_ids(stream, user_profile.id)
event = dict(type="subscription", op="peer_add",
subscriptions=[stream.name],
user_email=user_profile.email)
send_event(event, user_ids)
return did_subscribe
def notify_subscriptions_removed(user_profile, streams, no_log=False):
if not no_log:
log_event({'type': 'subscription_removed',
'user': user_profile.email,
'names': [stream.name for stream in streams],
'domain': stream.realm.domain})
payload = [dict(name=stream.name, stream_id=stream.id) for stream in streams]
event = dict(type="subscription", op="remove",
subscriptions=payload)
send_event(event, [user_profile.id])
# As with a subscription add, send a 'peer subscription' notice to other
# subscribers so they know the user unsubscribed.
# FIXME: This code was mostly a copy-paste from notify_subscriptions_added.
# We have since streamlined how we do notifications for adds, and
# we should do the same for removes.
notifications_for = get_subscribers_to_streams(streams)
for event_recipient, notifications in notifications_for.iteritems():
# Don't send a peer subscription notice to yourself.
if event_recipient == user_profile:
continue
stream_names = [stream.name for stream in notifications]
event = dict(type="subscription", op="peer_remove",
subscriptions=stream_names,
user_email=user_profile.email)
send_event(event, [event_recipient.id])
def bulk_remove_subscriptions(users, streams):
recipients_map = bulk_get_recipients(Recipient.STREAM,
[stream.id for stream in streams])
stream_map = {}
for stream in streams:
stream_map[recipients_map[stream.id].id] = stream
subs_by_user = dict((user_profile.id, []) for user_profile in users)
for sub in Subscription.objects.select_related("user_profile").filter(user_profile__in=users,
recipient__in=recipients_map.values(),
active=True):
subs_by_user[sub.user_profile_id].append(sub)
subs_to_deactivate = []
not_subscribed = []
for user_profile in users:
recipients_to_unsub = set([recipient.id for recipient in recipients_map.values()])
for sub in subs_by_user[user_profile.id]:
recipients_to_unsub.remove(sub.recipient_id)
subs_to_deactivate.append((sub, stream_map[sub.recipient_id]))
for recipient_id in recipients_to_unsub:
not_subscribed.append((user_profile, stream_map[recipient_id]))
# TODO: XXX: This transaction really needs to be done at the serializeable
# transaction isolation level.
with transaction.atomic():
occupied_streams_before = list(get_occupied_streams(user_profile.realm))
Subscription.objects.filter(id__in=[sub.id for (sub, stream_name) in
subs_to_deactivate]).update(active=False)
occupied_streams_after = list(get_occupied_streams(user_profile.realm))
new_vacant_streams = [stream for stream in
set(occupied_streams_before) - set(occupied_streams_after)
if not stream.invite_only]
if new_vacant_streams:
event = dict(type="stream", op="vacate",
streams=[stream.to_dict()
for stream in new_vacant_streams])
send_event(event, active_user_ids(user_profile.realm))
streams_by_user = defaultdict(list)
for (sub, stream) in subs_to_deactivate:
streams_by_user[sub.user_profile_id].append(stream)
for user_profile in users:
if len(streams_by_user[user_profile.id]) == 0:
continue
notify_subscriptions_removed(user_profile, streams_by_user[user_profile.id])
return ([(sub.user_profile, stream) for (sub, stream) in subs_to_deactivate],
not_subscribed)
def do_remove_subscription(user_profile, stream, no_log=False):
recipient = get_recipient(Recipient.STREAM, stream.id)
maybe_sub = Subscription.objects.filter(user_profile=user_profile,
recipient=recipient)
if len(maybe_sub) == 0:
return False
subscription = maybe_sub[0]
did_remove = subscription.active
subscription.active = False
with transaction.atomic():
subscription.save(update_fields=["active"])
vacant_after = stream.num_subscribers() == 0
if vacant_after and did_remove and not stream.invite_only:
event = dict(type="stream", op="vacate",
streams=[stream.to_dict()])
send_event(event, active_user_ids(user_profile.realm))
if did_remove:
notify_subscriptions_removed(user_profile, [stream], no_log)
return did_remove
def log_subscription_property_change(user_email, stream_name, property, value):
event = {'type': 'subscription_property',
'property': property,
'user': user_email,
'stream_name': stream_name,
'value': value}
log_event(event)
def do_change_subscription_property(user_profile, sub, stream_name,
property_name, value):
setattr(sub, property_name, value)
sub.save(update_fields=[property_name])
log_subscription_property_change(user_profile.email, stream_name,
property_name, value)
event = dict(type="subscription",
op="update",
email=user_profile.email,
property=property_name,
value=value,
name=stream_name)
send_event(event, [user_profile.id])
def do_activate_user(user_profile, log=True, join_date=timezone.now()):
user_profile.is_active = True
user_profile.is_mirror_dummy = False
user_profile.set_password(initial_password(user_profile.email))
user_profile.date_joined = join_date
user_profile.save(update_fields=["is_active", "date_joined", "password",
"is_mirror_dummy"])
if log:
domain = user_profile.realm.domain
log_event({'type': 'user_activated',
'user': user_profile.email,
'domain': domain})
notify_created_user(user_profile)
def do_reactivate_user(user_profile):
# Unlike do_activate_user, this is meant for re-activating existing users,
# so it doesn't reset their password, etc.
user_profile.is_active = True
user_profile.save(update_fields=["is_active"])
domain = user_profile.realm.domain
log_event({'type': 'user_reactivated',
'user': user_profile.email,
'domain': domain})
notify_created_user(user_profile)
def do_change_password(user_profile, password, log=True, commit=True,
hashed_password=False):
if hashed_password:
# This is a hashed password, not the password itself.
user_profile.set_password(password)
else:
user_profile.set_password(password)
if commit:
user_profile.save(update_fields=["password"])
if log:
log_event({'type': 'user_change_password',
'user': user_profile.email,
'pwhash': user_profile.password})
def do_change_full_name(user_profile, full_name, log=True):
user_profile.full_name = full_name
user_profile.save(update_fields=["full_name"])
if log:
log_event({'type': 'user_change_full_name',
'user': user_profile.email,
'full_name': full_name})
payload = dict(email=user_profile.email,
full_name=user_profile.full_name)
send_event(dict(type='realm_user', op='update', person=payload),
active_user_ids(user_profile.realm))
if user_profile.is_bot:
send_event(dict(type='realm_bot', op='update', bot=payload),
bot_owner_userids(user_profile))
def do_regenerate_api_key(user_profile, log=True):
user_profile.api_key = random_api_key()
user_profile.save(update_fields=["api_key"])
if log:
log_event({'type': 'user_change_api_key',
'user': user_profile.email})
if user_profile.is_bot:
send_event(dict(type='realm_bot',
op='update',
bot=dict(email=user_profile.email,
api_key=user_profile.api_key,)),
bot_owner_userids(user_profile))
def do_change_avatar_source(user_profile, avatar_source, log=True):
user_profile.avatar_source = avatar_source
user_profile.save(update_fields=["avatar_source"])
if log:
log_event({'type': 'user_change_avatar_source',
'user': user_profile.email,
'avatar_source': avatar_source})
if user_profile.is_bot:
send_event(dict(type='realm_bot',
op='update',
bot=dict(email=user_profile.email,
avatar_url=avatar_url(user_profile),)),
bot_owner_userids(user_profile))
def _default_stream_permision_check(user_profile, stream):
# Any user can have a None default stream
if stream is not None:
if user_profile.is_bot:
user = user_profile.bot_owner
else:
user = user_profile
if stream.invite_only and not subscribed_to_stream(user, stream):
raise JsonableError('Insufficient permission')
def do_change_default_sending_stream(user_profile, stream, log=True):
_default_stream_permision_check(user_profile, stream)
user_profile.default_sending_stream = stream
user_profile.save(update_fields=['default_sending_stream'])
if log:
log_event({'type': 'user_change_default_sending_stream',
'user': user_profile.email,
'stream': str(stream)})
if user_profile.is_bot:
if stream:
stream_name = stream.name
else:
stream_name = None
send_event(dict(type='realm_bot',
op='update',
bot=dict(email=user_profile.email,
default_sending_stream=stream_name,)),
bot_owner_userids(user_profile))
def do_change_default_events_register_stream(user_profile, stream, log=True):
_default_stream_permision_check(user_profile, stream)
user_profile.default_events_register_stream = stream
user_profile.save(update_fields=['default_events_register_stream'])
if log:
log_event({'type': 'user_change_default_events_register_stream',
'user': user_profile.email,
'stream': str(stream)})
if user_profile.is_bot:
if stream:
stream_name = stream.name
else:
stream_name = None
send_event(dict(type='realm_bot',
op='update',
bot=dict(email=user_profile.email,
default_events_register_stream=stream_name,)),
bot_owner_userids(user_profile))
def do_change_default_all_public_streams(user_profile, value, log=True):
user_profile.default_all_public_streams = value
user_profile.save(update_fields=['default_all_public_streams'])
if log:
log_event({'type': 'user_change_default_all_public_streams',
'user': user_profile.email,
'value': str(value)})
if user_profile.is_bot:
send_event(dict(type='realm_bot',
op='update',
bot=dict(email=user_profile.email,
default_all_public_streams=user_profile.default_all_public_streams,)),
bot_owner_userids(user_profile))
def do_change_is_admin(user_profile, is_admin, permission='administer'):
if is_admin:
assign_perm(permission, user_profile, user_profile.realm)
else:
remove_perm(permission, user_profile, user_profile.realm)
if permission == 'administer':
event = dict(type="realm_user", op="update",
person=dict(email=user_profile.email,
is_admin=is_admin))
send_event(event, active_user_ids(user_profile.realm))
def do_make_stream_public(user_profile, realm, stream_name):
stream_name = stream_name.strip()
stream = get_stream(stream_name, realm)
if not stream:
raise JsonableError('Unknown stream "%s"' % (stream_name,))
if not subscribed_to_stream(user_profile, stream):
raise JsonableError('You are not invited to this stream.')
stream.invite_only = False
stream.save(update_fields=['invite_only'])
return {}
def do_make_stream_private(realm, stream_name):
stream_name = stream_name.strip()
stream = get_stream(stream_name, realm)
if not stream:
raise JsonableError('Unknown stream "%s"' % (stream_name,))
stream.invite_only = True
stream.save(update_fields=['invite_only'])
return {}
def do_rename_stream(realm, old_name, new_name, log=True):
old_name = old_name.strip()
new_name = new_name.strip()
stream = get_stream(old_name, realm)
if not stream:
raise JsonableError('Unknown stream "%s"' % (old_name,))
# Will raise if there's an issue.
check_stream_name(new_name)
if get_stream(new_name, realm) and old_name.lower() != new_name.lower():
raise JsonableError('Stream name "%s" is already taken' % (new_name,))
old_name = stream.name
stream.name = new_name
stream.save(update_fields=["name"])
if log:
log_event({'type': 'stream_name_change',
'domain': realm.domain,
'new_name': new_name})
recipient = get_recipient(Recipient.STREAM, stream.id)
messages = Message.objects.filter(recipient=recipient).only("id")
# Update the display recipient and stream, which are easy single
# items to set.
old_cache_key = get_stream_cache_key(old_name, realm)
new_cache_key = get_stream_cache_key(stream.name, realm)
if old_cache_key != new_cache_key:
cache_delete(old_cache_key)
cache_set(new_cache_key, stream)
cache_set(display_recipient_cache_key(recipient.id), stream.name)
# Delete cache entries for everything else, which is cheaper and
# clearer than trying to set them. display_recipient is the out of
# date field in all cases.
cache_delete_many(message_cache_key(message.id) for message in messages)
cache_delete_many(
to_dict_cache_key_id(message.id, True) for message in messages)
cache_delete_many(
to_dict_cache_key_id(message.id, False) for message in messages)
new_email = encode_email_address(stream)
# We will tell our users to essentially
# update stream.name = new_name where name = old_name
# and update stream.email = new_email where name = old_name.
# We could optimize this by trying to send one message, but the
# client code really wants one property update at a time, and
# updating stream names is a pretty infrequent operation.
# More importantly, we want to key these updates by id, not name,
# since id is the immutable primary key, and obviously name is not.
data_updates = [
['email_address', new_email],
['name', new_name],
]
for property, value in data_updates:
event = dict(
op="update",
type="stream",
property=property,
value=value,
name=old_name
)
send_event(event, stream_user_ids(stream))
# Even though the token doesn't change, the web client needs to update the
# email forwarding address to display the correctly-escaped new name.
return {"email_address": new_email}
def do_change_stream_description(realm, stream_name, new_description):
stream = get_stream(stream_name, realm)
stream.description = new_description
stream.save(update_fields=['description'])
event = dict(type='stream', op='update',
property='description', name=stream_name,
value=new_description)
send_event(event, stream_user_ids(stream))
return {}
def do_create_realm(domain, name, restricted_to_domain=True):
realm = get_realm(domain)
created = not realm
if created:
realm = Realm(domain=domain, name=name,
restricted_to_domain=restricted_to_domain)
realm.save()
# Create stream once Realm object has been saved
notifications_stream, _ = create_stream_if_needed(realm, Realm.NOTIFICATION_STREAM_NAME)
realm.notifications_stream = notifications_stream
realm.save(update_fields=['notifications_stream'])
# Include a welcome message in this notifications stream
product_name = "Zulip"
content = """Hello, and welcome to %s!
This is a message on stream `%s` with the topic `welcome`. We'll use this stream for system-generated notifications.""" % (product_name, notifications_stream.name,)
msg = internal_prep_message(settings.WELCOME_BOT, 'stream',
notifications_stream.name, "welcome",
content, realm=realm)
do_send_messages([msg])
# Log the event
log_event({"type": "realm_created",
"domain": domain,
"restricted_to_domain": restricted_to_domain})
if settings.NEW_USER_BOT is not None:
signup_message = "Signups enabled"
if not restricted_to_domain:
signup_message += " (open realm)"
internal_send_message(settings.NEW_USER_BOT, "stream",
"signups", domain, signup_message)
return (realm, created)
def do_change_enable_stream_desktop_notifications(user_profile,
enable_stream_desktop_notifications,
log=True):
user_profile.enable_stream_desktop_notifications = enable_stream_desktop_notifications
user_profile.save(update_fields=["enable_stream_desktop_notifications"])
event = {'type': 'update_global_notifications',
'user': user_profile.email,
'notification_name': 'enable_stream_desktop_notifications',
'setting': enable_stream_desktop_notifications}
if log:
log_event(event)
send_event(event, [user_profile.id])
def do_change_enable_stream_sounds(user_profile, enable_stream_sounds, log=True):
user_profile.enable_stream_sounds = enable_stream_sounds
user_profile.save(update_fields=["enable_stream_sounds"])
event = {'type': 'update_global_notifications',
'user': user_profile.email,
'notification_name': 'enable_stream_sounds',
'setting': enable_stream_sounds}
if log:
log_event(event)
send_event(event, [user_profile.id])
def do_change_enable_desktop_notifications(user_profile, enable_desktop_notifications, log=True):
user_profile.enable_desktop_notifications = enable_desktop_notifications
user_profile.save(update_fields=["enable_desktop_notifications"])
event = {'type': 'update_global_notifications',
'user': user_profile.email,
'notification_name': 'enable_desktop_notifications',
'setting': enable_desktop_notifications}
if log:
log_event(event)
send_event(event, [user_profile.id])
def do_change_enable_sounds(user_profile, enable_sounds, log=True):
user_profile.enable_sounds = enable_sounds
user_profile.save(update_fields=["enable_sounds"])
event = {'type': 'update_global_notifications',
'user': user_profile.email,
'notification_name': 'enable_sounds',
'setting': enable_sounds}
if log:
log_event(event)
send_event(event, [user_profile.id])
def do_change_enable_offline_email_notifications(user_profile, offline_email_notifications, log=True):
user_profile.enable_offline_email_notifications = offline_email_notifications
user_profile.save(update_fields=["enable_offline_email_notifications"])
event = {'type': 'update_global_notifications',
'user': user_profile.email,
'notification_name': 'enable_offline_email_notifications',
'setting': offline_email_notifications}
if log:
log_event(event)
send_event(event, [user_profile.id])
def do_change_enable_offline_push_notifications(user_profile, offline_push_notifications, log=True):
user_profile.enable_offline_push_notifications = offline_push_notifications
user_profile.save(update_fields=["enable_offline_push_notifications"])
event = {'type': 'update_global_notifications',
'user': user_profile.email,
'notification_name': 'enable_offline_push_notifications',
'setting': offline_push_notifications}
if log:
log_event(event)
send_event(event, [user_profile.id])
def do_change_enable_digest_emails(user_profile, enable_digest_emails, log=True):
user_profile.enable_digest_emails = enable_digest_emails
user_profile.save(update_fields=["enable_digest_emails"])
if not enable_digest_emails:
# Remove any digest emails that have been enqueued.
clear_followup_emails_queue(user_profile.email)
event = {'type': 'update_global_notifications',
'user': user_profile.email,
'notification_name': 'enable_digest_emails',
'setting': enable_digest_emails}
if log:
log_event(event)
send_event(event, [user_profile.id])
def do_change_autoscroll_forever(user_profile, autoscroll_forever, log=True):
user_profile.autoscroll_forever = autoscroll_forever
user_profile.save(update_fields=["autoscroll_forever"])
if log:
log_event({'type': 'autoscroll_forever',
'user': user_profile.email,
'autoscroll_forever': autoscroll_forever})
def do_change_enter_sends(user_profile, enter_sends):
user_profile.enter_sends = enter_sends
user_profile.save(update_fields=["enter_sends"])
def do_change_default_desktop_notifications(user_profile, default_desktop_notifications):
user_profile.default_desktop_notifications = default_desktop_notifications
user_profile.save(update_fields=["default_desktop_notifications"])
def do_change_twenty_four_hour_time(user_profile, setting_value, log=True):
user_profile.twenty_four_hour_time = setting_value
user_profile.save(update_fields=["twenty_four_hour_time"])
event = {'type': 'update_display_settings',
'user': user_profile.email,
'setting_name': 'twenty_four_hour_time',
'setting': setting_value}
if log:
log_event(event)
send_event(event, [user_profile.id])
def do_change_left_side_userlist(user_profile, setting_value, log=True):
user_profile.left_side_userlist = setting_value
user_profile.save(update_fields=["left_side_userlist"])
event = {'type': 'update_display_settings',
'user': user_profile.email,
'setting_name':'left_side_userlist',
'setting': setting_value}
if log:
log_event(event)
send_event(event, [user_profile.id])
def set_default_streams(realm, stream_names):
DefaultStream.objects.filter(realm=realm).delete()
for stream_name in stream_names:
stream, _ = create_stream_if_needed(realm, stream_name)
DefaultStream.objects.create(stream=stream, realm=realm)
# All realms get a notifications stream by default
notifications_stream, _ = create_stream_if_needed(realm, Realm.NOTIFICATION_STREAM_NAME)
DefaultStream.objects.create(stream=notifications_stream, realm=realm)
log_event({'type': 'default_streams',
'domain': realm.domain,
'streams': stream_names})
def do_add_default_stream(realm, stream_name):
stream, _ = create_stream_if_needed(realm, stream_name)
if DefaultStream.objects.filter(realm=realm, stream=stream).exists():
return
DefaultStream.objects.create(realm=realm, stream=stream)
return {}
def do_remove_default_stream(realm, stream_name):
DefaultStream.objects.filter(realm=realm, stream__name=stream_name).delete()
return {}
def get_default_streams_for_realm(realm):
return [default.stream for default in
DefaultStream.objects.select_related("stream", "stream__realm").filter(realm=realm)]
def get_default_subs(user_profile):
# Right now default streams are realm-wide. This wrapper gives us flexibility
# to some day further customize how we set up default streams for new users.
return get_default_streams_for_realm(user_profile.realm)
def do_update_user_activity_interval(user_profile, log_time):
effective_end = log_time + datetime.timedelta(minutes=15)
# This code isn't perfect, because with various races we might end
# up creating two overlapping intervals, but that shouldn't happen
# often, and can be corrected for in post-processing
try:
last = UserActivityInterval.objects.filter(user_profile=user_profile).order_by("-end")[0]
# There are two ways our intervals could overlap:
# (1) The start of the new interval could be inside the old interval
# (2) The end of the new interval could be inside the old interval
# In either case, we just extend the old interval to include the new interval.
if ((log_time <= last.end and log_time >= last.start) or
(effective_end <= last.end and effective_end >= last.start)):
last.end = max(last.end, effective_end)
last.start = min(last.start, log_time)
last.save(update_fields=["start", "end"])
return
except IndexError:
pass
# Otherwise, the intervals don't overlap, so we should make a new one
UserActivityInterval.objects.create(user_profile=user_profile, start=log_time,
end=effective_end)
@statsd_increment('user_activity')
def do_update_user_activity(user_profile, client, query, log_time):
(activity, created) = UserActivity.objects.get_or_create(
user_profile = user_profile,
client = client,
query = query,
defaults={'last_visit': log_time, 'count': 0})
activity.count += 1
activity.last_visit = log_time
activity.save(update_fields=["last_visit", "count"])
def send_presence_changed(user_profile, presence):
presence_dict = presence.to_dict()
event = dict(type="presence", email=user_profile.email,
server_timestamp=time.time(),
presence={presence_dict['client']: presence.to_dict()})
send_event(event, active_user_ids(user_profile.realm))
def consolidate_client(client):
# The web app reports a client as 'website'
# The desktop app reports a client as ZulipDesktop
# due to it setting a custom user agent. We want both
# to count as web users
# Alias ZulipDesktop to website
if client.name in ['ZulipDesktop']:
return get_client('website')
else:
return client
@statsd_increment('user_presence')
def do_update_user_presence(user_profile, client, log_time, status):
client = consolidate_client(client)
(presence, created) = UserPresence.objects.get_or_create(
user_profile = user_profile,
client = client,
defaults = {'timestamp': log_time,
'status': status})
stale_status = (log_time - presence.timestamp) > datetime.timedelta(minutes=1, seconds=10)
was_idle = presence.status == UserPresence.IDLE
became_online = (status == UserPresence.ACTIVE) and (stale_status or was_idle)
# If an object was created, it has already been saved.
#
# We suppress changes from ACTIVE to IDLE before stale_status is reached;
# this protects us from the user having two clients open: one active, the
# other idle. Without this check, we would constantly toggle their status
# between the two states.
if not created and stale_status or was_idle or status == presence.status:
# The following block attempts to only update the "status"
# field in the event that it actually changed. This is
# important to avoid flushing the UserPresence cache when the
# data it would return to a client hasn't actually changed
# (see the UserPresence post_save hook for details).
presence.timestamp = log_time
update_fields = ["timestamp"]
if presence.status != status:
presence.status = status
update_fields.append("status")
presence.save(update_fields=update_fields)
if not user_profile.realm.domain == "mit.edu" and (created or became_online):
# Push event to all users in the realm so they see the new user
# appear in the presence list immediately, or the newly online
# user without delay. Note that we won't send an update here for a
# timestamp update, because we rely on the browser to ping us every 50
# seconds for realm-wide status updates, and those updates should have
# recent timestamps, which means the browser won't think active users
# have gone idle. If we were more aggressive in this function about
# sending timestamp updates, we could eliminate the ping responses, but
# that's not a high priority for now, considering that most of our non-MIT
# realms are pretty small.
send_presence_changed(user_profile, presence)
def update_user_activity_interval(user_profile, log_time):
event={'user_profile_id': user_profile.id,
'time': datetime_to_timestamp(log_time)}
queue_json_publish("user_activity_interval", event,
lambda e: do_update_user_activity_interval(user_profile, log_time))
def update_user_presence(user_profile, client, log_time, status,
new_user_input):
event={'user_profile_id': user_profile.id,
'status': status,
'time': datetime_to_timestamp(log_time),
'client': client.name}
queue_json_publish("user_presence", event,
lambda e: do_update_user_presence(user_profile, client,
log_time, status))
if new_user_input:
update_user_activity_interval(user_profile, log_time)
def do_update_pointer(user_profile, pointer, update_flags=False):
prev_pointer = user_profile.pointer
user_profile.pointer = pointer
user_profile.save(update_fields=["pointer"])
if update_flags:
# Until we handle the new read counts in the Android app
# natively, this is a shim that will mark as read any messages
# up until the pointer move
UserMessage.objects.filter(user_profile=user_profile,
message__id__gt=prev_pointer,
message__id__lte=pointer,
flags=~UserMessage.flags.read) \
.update(flags=F('flags').bitor(UserMessage.flags.read))
event = dict(type='pointer', pointer=pointer)
send_event(event, [user_profile.id])
def do_update_message_flags(user_profile, operation, flag, messages, all):
flagattr = getattr(UserMessage.flags, flag)
if all:
log_statsd_event('bankruptcy')
msgs = UserMessage.objects.filter(user_profile=user_profile)
else:
msgs = UserMessage.objects.filter(user_profile=user_profile,
message__id__in=messages)
# Hack to let you star any message
if msgs.count() == 0:
if not len(messages) == 1:
raise JsonableError("Invalid message(s)")
if flag != "starred":
raise JsonableError("Invalid message(s)")
# Check that the user could have read the relevant message
try:
message = Message.objects.get(id=messages[0])
except Message.DoesNotExist:
raise JsonableError("Invalid message(s)")
recipient = Recipient.objects.get(id=message.recipient_id)
if recipient.type != Recipient.STREAM:
raise JsonableError("Invalid message(s)")
stream = Stream.objects.select_related("realm").get(id=recipient.type_id)
if not stream.is_public():
raise JsonableError("Invalid message(s)")
# OK, this is a message that you legitimately have access
# to via narrowing to the stream it is on, even though you
# didn't actually receive it. So we create a historical,
# read UserMessage message row for you to star.
UserMessage.objects.create(user_profile=user_profile,
message=message,
flags=UserMessage.flags.historical | UserMessage.flags.read)
# The filter() statements below prevent postgres from doing a lot of
# unnecessary work, which is a big deal for users updating lots of
# flags (e.g. bankruptcy). This patch arose from seeing slow calls
# to /json/update_message_flags in the logs. The filter() statements
# are kind of magical; they are actually just testing the one bit.
if operation == 'add':
msgs = msgs.filter(flags=~flagattr)
count = msgs.update(flags=F('flags').bitor(flagattr))
elif operation == 'remove':
msgs = msgs.filter(flags=flagattr)
count = msgs.update(flags=F('flags').bitand(~flagattr))
event = {'type': 'update_message_flags',
'operation': operation,
'flag': flag,
'messages': messages,
'all': all}
log_event(event)
send_event(event, [user_profile.id])
statsd.incr("flags.%s.%s" % (flag, operation), count)
def subscribed_to_stream(user_profile, stream):
try:
if Subscription.objects.get(user_profile=user_profile,
active=True,
recipient__type=Recipient.STREAM,
recipient__type_id=stream.id):
return True
return False
except Subscription.DoesNotExist:
return False
def truncate_content(content, max_length, truncation_message):
if len(content) > max_length:
content = content[:max_length - len(truncation_message)] + truncation_message
return content
def truncate_body(body):
return truncate_content(body, MAX_MESSAGE_LENGTH, "...")
def truncate_topic(topic):
return truncate_content(topic, MAX_SUBJECT_LENGTH, "...")
def update_user_message_flags(message, ums):
wildcard = message.mentions_wildcard
mentioned_ids = message.mentions_user_ids
ids_with_alert_words = message.user_ids_with_alert_words
changed_ums = set()
def update_flag(um, should_set, flag):
if should_set:
if not (um.flags & flag):
um.flags |= flag
changed_ums.add(um)
else:
if (um.flags & flag):
um.flags &= ~flag
changed_ums.add(um)
for um in ums:
has_alert_word = um.user_profile_id in ids_with_alert_words
update_flag(um, has_alert_word, UserMessage.flags.has_alert_word)
mentioned = um.user_profile_id in mentioned_ids
update_flag(um, mentioned, UserMessage.flags.mentioned)
update_flag(um, wildcard, UserMessage.flags.wildcard_mentioned)
for um in changed_ums:
um.save(update_fields=['flags'])
def do_update_message(user_profile, message_id, subject, propagate_mode, content):
try:
message = Message.objects.select_related().get(id=message_id)
except Message.DoesNotExist:
raise JsonableError("Unknown message id")
event = {'type': 'update_message',
'sender': user_profile.email,
'message_id': message_id}
edit_history_event = {}
changed_messages = [message]
# You can only edit a message if:
# 1. You sent it, OR:
# 2. This is a topic-only edit for a (no topic) message, OR:
# 3. This is a topic-only edit and you are an admin.
if message.sender == user_profile:
pass
elif (content is None) and ((message.subject == "(no topic)") or
user_profile.is_admin()):
pass
else:
raise JsonableError("You don't have permission to edit this message")
# Set first_rendered_content to be the oldest version of the
# rendered content recorded; which is the current version if the
# content hasn't been edited before. Note that because one could
# have edited just the subject, not every edit history event
# contains a prev_rendered_content element.
first_rendered_content = message.rendered_content
if message.edit_history is not None:
edit_history = ujson.loads(message.edit_history)
for old_edit_history_event in edit_history:
if 'prev_rendered_content' in old_edit_history_event:
first_rendered_content = old_edit_history_event['prev_rendered_content']
ums = UserMessage.objects.filter(message=message_id)
if content is not None:
if len(content.strip()) == 0:
content = "(deleted)"
content = truncate_body(content)
rendered_content = message.render_markdown(content)
if not rendered_content:
raise JsonableError("We were unable to render your updated message")
update_user_message_flags(message, ums)
# We are turning off diff highlighting everywhere until ticket #1532 is addressed.
if False:
# Don't highlight message edit diffs on prod
rendered_content = highlight_html_differences(first_rendered_content, rendered_content)
event['orig_content'] = message.content
event['orig_rendered_content'] = message.rendered_content
edit_history_event["prev_content"] = message.content
edit_history_event["prev_rendered_content"] = message.rendered_content
edit_history_event["prev_rendered_content_version"] = message.rendered_content_version
message.content = content
message.set_rendered_content(rendered_content)
event["content"] = content
event["rendered_content"] = rendered_content
if subject is not None:
orig_subject = message.subject
subject = subject.strip()
if subject == "":
raise JsonableError("Topic can't be empty")
subject = truncate_topic(subject)
event["orig_subject"] = orig_subject
event["propagate_mode"] = propagate_mode
message.subject = subject
event["stream_id"] = message.recipient.type_id
event["subject"] = subject
event['subject_links'] = bugdown.subject_links(message.sender.realm.domain.lower(), subject)
edit_history_event["prev_subject"] = orig_subject
if propagate_mode in ["change_later", "change_all"]:
propagate_query = Q(recipient = message.recipient, subject = orig_subject)
# We only change messages up to 2 days in the past, to avoid hammering our
# DB by changing an unbounded amount of messages
if propagate_mode == 'change_all':
before_bound = now() - datetime.timedelta(days=2)
propagate_query = propagate_query & ~Q(id = message.id) & \
Q(pub_date__range=(before_bound, now()))
if propagate_mode == 'change_later':
propagate_query = propagate_query & Q(id__gt = message.id)
messages = Message.objects.filter(propagate_query).select_related();
# Evaluate the query before running the update
messages_list = list(messages)
messages.update(subject=subject)
for m in messages_list:
# The cached ORM object is not changed by messages.update()
# and the memcached update requires the new value
m.subject = subject
changed_messages += messages_list
message.last_edit_time = timezone.now()
event['edit_timestamp'] = datetime_to_timestamp(message.last_edit_time)
edit_history_event['timestamp'] = event['edit_timestamp']
if message.edit_history is not None:
edit_history.insert(0, edit_history_event)
else:
edit_history = [edit_history_event]
message.edit_history = ujson.dumps(edit_history)
log_event(event)
message.save(update_fields=["subject", "content", "rendered_content",
"rendered_content_version", "last_edit_time",
"edit_history"])
# Update the message as stored in the (deprecated) message
# cache (for shunting the message over to Tornado in the old
# get_messages API) and also the to_dict caches.
items_for_memcached = {}
event['message_ids'] = []
for changed_message in changed_messages:
event['message_ids'].append(changed_message.id)
items_for_memcached[message_cache_key(changed_message.id)] = (changed_message,)
items_for_memcached[to_dict_cache_key(changed_message, True)] = \
(stringify_message_dict(changed_message.to_dict_uncached(apply_markdown=True)),)
items_for_memcached[to_dict_cache_key(changed_message, False)] = \
(stringify_message_dict(changed_message.to_dict_uncached(apply_markdown=False)),)
cache_set_many(items_for_memcached)
def user_info(um):
return {
'id': um.user_profile_id,
'flags': um.flags_list()
}
send_event(event, map(user_info, ums))
def encode_email_address(stream):
return encode_email_address_helper(stream.name, stream.email_token)
def encode_email_address_helper(name, email_token):
# Some deployments may not use the email gateway
if settings.EMAIL_GATEWAY_PATTERN == '':
return ''
# Given the fact that we have almost no restrictions on stream names and
# that what characters are allowed in e-mail addresses is complicated and
# dependent on context in the address, we opt for a very simple scheme:
#
# Only encode the stream name (leave the + and token alone). Encode
# everything that isn't alphanumeric plus _ as the percent-prefixed integer
# ordinal of that character, padded with zeroes to the maximum number of
# bytes of a UTF-8 encoded Unicode character.
encoded_name = re.sub("\W", lambda x: "%" + str(ord(x.group(0))).zfill(4), name)
encoded_token = "%s+%s" % (encoded_name, email_token)
return settings.EMAIL_GATEWAY_PATTERN % (encoded_token,)
def decode_email_address(email):
# Perform the reverse of encode_email_address. Returns a tuple of (streamname, email_token)
pattern_parts = [re.escape(part) for part in settings.EMAIL_GATEWAY_PATTERN.split('%s')]
if settings.ZULIP_COM:
# Accept mails delivered to any Zulip server
pattern_parts[-1] = r'@[\w-]*\.zulip\.net'
match_email_re = re.compile("(.*?)".join(pattern_parts))
match = match_email_re.match(email)
if not match:
return None
full_address = match.group(1)
if '.' in full_address:
# Workaround for Google Groups and other programs that don't accept emails
# that have + signs in them (see Trac #2102)
encoded_stream_name, token = full_address.split('.')
else:
encoded_stream_name, token = full_address.split('+')
stream_name = re.sub("%\d{4}", lambda x: unichr(int(x.group(0)[1:])), encoded_stream_name)
return stream_name, token
# In general, it's better to avoid using .values() because it makes
# the code pretty ugly, but in this case, it has significant
# performance impact for loading / for users with large numbers of
# subscriptions, so it's worth optimizing.
def gather_subscriptions_helper(user_profile):
sub_dicts = Subscription.objects.select_related("recipient").filter(
user_profile = user_profile,
recipient__type = Recipient.STREAM).values(
"recipient__type_id", "in_home_view", "color", "desktop_notifications",
"audible_notifications", "active")
stream_ids = [sub["recipient__type_id"] for sub in sub_dicts]
stream_dicts = get_active_streams(user_profile.realm).select_related(
"realm").filter(id__in=stream_ids).values(
"id", "name", "invite_only", "realm_id", "realm__domain", "email_token", "description")
stream_hash = {}
for stream in stream_dicts:
stream_hash[stream["id"]] = stream
subscribed = []
unsubscribed = []
# Deactivated streams aren't in stream_hash.
streams = [stream_hash[sub["recipient__type_id"]] for sub in sub_dicts \
if sub["recipient__type_id"] in stream_hash]
streams_subscribed_map = dict((sub["recipient__type_id"], sub["active"]) for sub in sub_dicts)
subscriber_map = bulk_get_subscriber_user_ids(streams, user_profile, streams_subscribed_map)
for sub in sub_dicts:
stream = stream_hash.get(sub["recipient__type_id"])
if not stream:
# This stream has been deactivated, don't include it.
continue
subscribers = subscriber_map[stream["id"]]
# Important: don't show the subscribers if the stream is invite only
# and this user isn't on it anymore.
if stream["invite_only"] and not sub["active"]:
subscribers = None
stream_dict = {'name': stream["name"],
'in_home_view': sub["in_home_view"],
'invite_only': stream["invite_only"],
'color': sub["color"],
'desktop_notifications': sub["desktop_notifications"],
'audible_notifications': sub["audible_notifications"],
'stream_id': stream["id"],
'description': stream["description"],
'email_address': encode_email_address_helper(stream["name"], stream["email_token"])}
if subscribers is not None:
stream_dict['subscribers'] = subscribers
if sub["active"]:
subscribed.append(stream_dict)
else:
unsubscribed.append(stream_dict)
user_ids = set()
for subs in [subscribed, unsubscribed]:
for sub in subs:
if 'subscribers' in sub:
for subscriber in sub['subscribers']:
user_ids.add(subscriber)
email_dict = get_emails_from_user_ids(list(user_ids))
return (sorted(subscribed), sorted(unsubscribed), email_dict)
def gather_subscriptions(user_profile):
subscribed, unsubscribed, email_dict = gather_subscriptions_helper(user_profile)
for subs in [subscribed, unsubscribed]:
for sub in subs:
if 'subscribers' in sub:
sub['subscribers'] = [email_dict[user_id] for user_id in sub['subscribers']]
return (subscribed, unsubscribed)
def get_status_dict(requesting_user_profile):
# Return no status info for MIT
if requesting_user_profile.realm.domain == 'mit.edu':
return defaultdict(dict)
return UserPresence.get_status_dict_by_realm(requesting_user_profile.realm_id)
def get_realm_user_dicts(user_profile):
# Due to our permission model, it is advantageous to find the admin users in bulk.
admins = user_profile.realm.get_admin_users()
admin_emails = set(map(lambda up: up.email, admins))
return [{'email' : userdict['email'],
'is_admin' : userdict['email'] in admin_emails,
'is_bot' : userdict['is_bot'],
'full_name' : userdict['full_name']}
for userdict in get_active_user_dicts_in_realm(user_profile.realm)]
def get_realm_bot_dicts(user_profile):
return [{'email' : botdict['email'],
'full_name' : botdict['full_name'],
'api_key' : botdict['api_key'],
'default_sending_stream': botdict['default_sending_stream__name'],
'default_events_register_stream': botdict['default_events_register_stream__name'],
'default_all_public_streams': botdict['default_all_public_streams'],
'owner': botdict['bot_owner__email'],
'avatar_url': get_avatar_url(botdict['avatar_source'], botdict['email']),
}
for botdict in get_active_bot_dicts_in_realm(user_profile.realm)]
# Fetch initial data. When event_types is not specified, clients want
# all event types. Whenever you add new code to this function, you
# should also add corresponding events for changes in the data
# structures and new code to apply_events (and add a test in EventsRegisterTest).
def fetch_initial_state_data(user_profile, event_types, queue_id):
state = {'queue_id': queue_id}
if event_types is None:
want = lambda msg_type: True
else:
want = set(event_types).__contains__
if want('alert_words'):
state['alert_words'] = user_alert_words(user_profile)
if want('message'):
# The client should use get_old_messages() to fetch messages
# starting with the max_message_id. They will get messages
# newer than that ID via get_events()
messages = Message.objects.filter(usermessage__user_profile=user_profile).order_by('-id')[:1]
if messages:
state['max_message_id'] = messages[0].id
else:
state['max_message_id'] = -1
if want('muted_topics'):
state['muted_topics'] = ujson.loads(user_profile.muted_topics)
if want('pointer'):
state['pointer'] = user_profile.pointer
if want('presence'):
state['presences'] = get_status_dict(user_profile)
if want('realm'):
state['realm_name'] = user_profile.realm.name
state['realm_restricted_to_domain'] = user_profile.realm.restricted_to_domain
state['realm_invite_required'] = user_profile.realm.invite_required
state['realm_invite_by_admins_only'] = user_profile.realm.invite_by_admins_only
if want('realm_domain'):
state['realm_domain'] = user_profile.realm.domain
if want('realm_emoji'):
state['realm_emoji'] = user_profile.realm.get_emoji()
if want('realm_filters'):
state['realm_filters'] = realm_filters_for_domain(user_profile.realm.domain)
if want('realm_user'):
state['realm_users'] = get_realm_user_dicts(user_profile)
if want('realm_bot'):
state['realm_bots'] = get_realm_bot_dicts(user_profile)
if want('referral'):
state['referrals'] = {'granted': user_profile.invites_granted,
'used': user_profile.invites_used}
if want('subscription'):
subscriptions, unsubscribed, email_dict = gather_subscriptions_helper(user_profile)
state['subscriptions'] = subscriptions
state['unsubscribed'] = unsubscribed
state['email_dict'] = email_dict
if want('update_message_flags'):
# There's no initial data for message flag updates, client will
# get any updates during a session from get_events()
pass
if want('stream'):
state['streams'] = do_get_streams(user_profile)
if want('update_display_settings'):
state['twenty_four_hour_time'] = user_profile.twenty_four_hour_time
state['left_side_userlist'] = user_profile.left_side_userlist
return state
def apply_events(state, events, user_profile):
for event in events:
if event['type'] == "message":
state['max_message_id'] = max(state['max_message_id'], event['message']['id'])
elif event['type'] == "pointer":
state['pointer'] = max(state['pointer'], event['pointer'])
elif event['type'] == "realm_user":
person = event['person']
def our_person(p):
return p['email'] == person['email']
if event['op'] == "add":
state['realm_users'].append(person)
elif event['op'] == "remove":
state['realm_users'] = itertools.ifilterfalse(our_person, state['realm_users'])
elif event['op'] == 'update':
for p in state['realm_users']:
if our_person(p):
p.update(person)
elif event['type'] == 'realm_bot':
if event['op'] == 'add':
state['realm_bots'].append(event['bot'])
if event['op'] == 'remove':
email = event['bot']['email']
state['realm_bots'] = [b for b in state['realm_bots'] if b['email'] != email]
if event['op'] == 'update':
for bot in state['realm_bots']:
if bot['email'] == event['bot']['email']:
bot.update(event['bot'])
elif event['type'] == 'stream':
if event['op'] == 'update':
# For legacy reasons, we call stream data 'subscriptions' in
# the state var here, for the benefit of the JS code.
for obj in state['subscriptions']:
if obj['name'].lower() == event['name'].lower():
obj[event['property']] = event['value']
# Also update the pure streams data
for stream in state['streams']:
if stream['name'].lower() == event['name'].lower():
prop = event['property']
if prop in stream:
stream[prop] = event['value']
elif event['op'] == "occupy":
state['streams'] += event['streams']
elif event['op'] == "vacate":
stream_ids = [s["stream_id"] for s in event['streams']]
state['streams'] = filter(lambda s: s["stream_id"] not in stream_ids,
state['streams'])
elif event['type'] == 'realm':
field = 'realm_' + event['property']
state[field] = event['value']
elif event['type'] == "subscription":
if event['op'] in ["add"]:
# Convert the user_profile IDs to emails since that's what register() returns
# TODO: Clean up this situation
for item in event["subscriptions"]:
item["subscribers"] = [get_user_profile_by_email(email).id for email in item["subscribers"]]
def name(sub):
return sub['name'].lower()
if event['op'] == "add":
added_names = map(name, event["subscriptions"])
was_added = lambda s: name(s) in added_names
# add the new subscriptions
state['subscriptions'] += event['subscriptions']
# remove them from unsubscribed if they had been there
state['unsubscribed'] = list(itertools.ifilterfalse(was_added, state['unsubscribed']))
elif event['op'] == "remove":
removed_names = map(name, event["subscriptions"])
was_removed = lambda s: name(s) in removed_names
# Find the subs we are affecting.
removed_subs = filter(was_removed, state['subscriptions'])
# Remove our user from the subscribers of the removed subscriptions.
for sub in removed_subs:
sub['subscribers'] = filter(lambda id: id != user_profile.id, sub['subscribers'])
# We must effectively copy the removed subscriptions from subscriptions to
# unsubscribe, since we only have the name in our data structure.
state['unsubscribed'] += removed_subs
# Now filter out the removed subscriptions from subscriptions.
state['subscriptions'] = list(itertools.ifilterfalse(was_removed, state['subscriptions']))
elif event['op'] == 'update':
for sub in state['subscriptions']:
if sub['name'].lower() == event['name'].lower():
sub[event['property']] = event['value']
elif event['op'] == 'peer_add':
user_id = get_user_profile_by_email(event['user_email']).id
for sub in state['subscriptions']:
if (sub['name'] in event['subscriptions'] and
user_id not in sub['subscribers']):
sub['subscribers'].append(user_id)
elif event['op'] == 'peer_remove':
user_id = get_user_profile_by_email(event['user_email']).id
for sub in state['subscriptions']:
if (sub['name'] in event['subscriptions'] and
user_id in sub['subscribers']):
sub['subscribers'].remove(user_id)
elif event['type'] == "presence":
state['presences'][event['email']] = event['presence']
elif event['type'] == "update_message":
# The client will get the updated message directly
pass
elif event['type'] == "referral":
state['referrals'] = event['referrals']
elif event['type'] == "update_message_flags":
# The client will get the message with the updated flags directly
pass
elif event['type'] == "realm_emoji":
state['realm_emoji'] = event['realm_emoji']
elif event['type'] == "alert_words":
state['alert_words'] = event['alert_words']
elif event['type'] == "muted_topics":
state['muted_topics'] = event["muted_topics"]
elif event['type'] == "realm_filters":
state['realm_filters'] = event["realm_filters"]
elif event['type'] == "update_display_settings":
if event['setting_name'] == "twenty_four_hour_time":
state['twenty_four_hour_time'] = event["setting"]
if event['setting_name'] == 'left_side_userlist':
state['left_side_userlist'] = event["setting"]
else:
raise ValueError("Unexpected event type %s" % (event['type'],))
def do_events_register(user_profile, user_client, apply_markdown=True,
event_types=None, queue_lifespan_secs=0, all_public_streams=False,
narrow=[]):
# Technically we don't need to check this here because
# build_narrow_filter will check it, but it's nicer from an error
# handling perspective to do it before contacting Tornado
check_supported_events_narrow_filter(narrow)
queue_id = request_event_queue(user_profile, user_client, apply_markdown,
queue_lifespan_secs, event_types, all_public_streams,
narrow=narrow)
if queue_id is None:
raise JsonableError("Could not allocate event queue")
if event_types is not None:
event_types = set(event_types)
ret = fetch_initial_state_data(user_profile, event_types, queue_id)
# Apply events that came in while we were fetching initial data
events = get_user_events(user_profile, queue_id, -1)
apply_events(ret, events, user_profile)
if events:
ret['last_event_id'] = events[-1]['id']
else:
ret['last_event_id'] = -1
return ret
def do_send_confirmation_email(invitee, referrer):
"""
Send the confirmation/welcome e-mail to an invited user.
`invitee` is a PreregistrationUser.
`referrer` is a UserProfile.
"""
subject_template_path = 'confirmation/invite_email_subject.txt'
body_template_path = 'confirmation/invite_email_body.txt'
context = {'referrer': referrer,
'support_email': settings.ZULIP_ADMINISTRATOR,
'voyager': settings.VOYAGER}
if referrer.realm.domain == 'mit.edu':
subject_template_path = 'confirmation/mituser_invite_email_subject.txt'
body_template_path = 'confirmation/mituser_invite_email_body.txt'
Confirmation.objects.send_confirmation(
invitee, invitee.email, additional_context=context,
subject_template_path=subject_template_path,
body_template_path=body_template_path)
@statsd_increment("push_notifications")
def handle_push_notification(user_profile_id, missed_message):
try:
user_profile = get_user_profile_by_id(user_profile_id)
if not receives_offline_notifications(user_profile):
return
umessage = UserMessage.objects.get(user_profile=user_profile,
message__id=missed_message['message_id'])
message = umessage.message
if umessage.flags.read:
return
sender_str = message.sender.full_name
apple = num_push_devices_for_user(user_profile, kind=PushDeviceToken.APNS)
android = num_push_devices_for_user(user_profile, kind=PushDeviceToken.GCM)
if apple or android:
#TODO: set badge count in a better way
# Determine what alert string to display based on the missed messages
if message.recipient.type == Recipient.HUDDLE:
alert = "New private group message from %s" % (sender_str,)
elif message.recipient.type == Recipient.PERSONAL:
alert = "New private message from %s" % (sender_str,)
elif message.recipient.type == Recipient.STREAM:
alert = "New mention from %s" % (sender_str,)
else:
alert = "New Zulip mentions and private messages from %s" % (sender_str,)
if apple:
apple_extra_data = {'message_ids': [message.id]}
send_apple_push_notification(user_profile, alert, badge=1, zulip=apple_extra_data)
if android:
content = message.content
content_truncated = (len(content) > 200)
if content_truncated:
content = content[:200] + "..."
android_data = {
'user': user_profile.email,
'event': 'message',
'alert': alert,
'zulip_message_id': message.id, # message_id is reserved for CCS
'time': datetime_to_timestamp(message.pub_date),
'content': content,
'content_truncated': content_truncated,
'sender_email': message.sender.email,
'sender_full_name': message.sender.full_name,
'sender_avatar_url': get_avatar_url(message.sender.avatar_source, message.sender.email),
}
if message.recipient.type == Recipient.STREAM:
android_data['recipient_type'] = "stream"
android_data['stream'] = get_display_recipient(message.recipient)
android_data['topic'] = message.subject
elif message.recipient.type in (Recipient.HUDDLE, Recipient.PERSONAL):
android_data['recipient_type'] = "private"
send_android_push_notification(user_profile, android_data)
except UserMessage.DoesNotExist:
logging.error("Could not find UserMessage with message_id %s" %(missed_message['message_id'],))
def is_inactive(value):
try:
if get_user_profile_by_email(value).is_active:
raise ValidationError(u'%s is already active' % value)
except UserProfile.DoesNotExist:
pass
def user_email_is_unique(value):
try:
get_user_profile_by_email(value)
raise ValidationError(u'%s is already registered' % value)
except UserProfile.DoesNotExist:
pass
def do_invite_users(user_profile, invitee_emails, streams):
new_prereg_users = []
errors = []
skipped = []
ret_error = None
ret_error_data = {}
for email in invitee_emails:
if email == '':
continue
try:
validators.validate_email(email)
except ValidationError:
errors.append((email, "Invalid address."))
continue
if user_profile.realm.restricted_to_domain and resolve_email_to_domain(email) != user_profile.realm.domain.lower():
errors.append((email, "Outside your domain."))
continue
try:
existing_user_profile = get_user_profile_by_email(email)
except UserProfile.DoesNotExist:
existing_user_profile = None
try:
if existing_user_profile is not None and existing_user_profile.is_mirror_dummy:
# Mirror dummy users to be activated must be inactive
is_inactive(email)
else:
# Other users should not already exist at all.
user_email_is_unique(email)
except ValidationError:
skipped.append((email, "Already has an account."))
continue
# The logged in user is the referrer.
prereg_user = PreregistrationUser(email=email, referred_by=user_profile)
# We save twice because you cannot associate a ManyToMany field
# on an unsaved object.
prereg_user.save()
prereg_user.streams = streams
prereg_user.save()
new_prereg_users.append(prereg_user)
if errors:
ret_error = "Some emails did not validate, so we didn't send any invitations."
ret_error_data = {'errors': errors}
if skipped and len(skipped) == len(invitee_emails):
# All e-mails were skipped, so we didn't actually invite anyone.
ret_error = "We weren't able to invite anyone."
ret_error_data = {'errors': skipped}
return ret_error, ret_error_data
# If we encounter an exception at any point before now, there are no unwanted side-effects,
# since it is totally fine to have duplicate PreregistrationUsers
for user in new_prereg_users:
event = {"email": user.email, "referrer_email": user_profile.email}
queue_json_publish("invites", event,
lambda event: do_send_confirmation_email(user, user_profile))
if skipped:
ret_error = "Some of those addresses are already using Zulip, \
so we didn't send them an invitation. We did send invitations to everyone else!"
ret_error_data = {'errors': skipped}
return ret_error, ret_error_data
def send_referral_event(user_profile):
event = dict(type="referral",
referrals=dict(granted=user_profile.invites_granted,
used=user_profile.invites_used))
send_event(event, [user_profile.id])
def do_refer_friend(user_profile, email):
content = """Referrer: "%s" <%s>
Realm: %s
Referred: %s""" % (user_profile.full_name, user_profile.email, user_profile.realm.domain, email)
subject = "Zulip referral: %s" % (email,)
from_email = '"%s" <%s>' % (user_profile.full_name, 'referrals@zulip.com')
to_email = '"Zulip Referrals" <zulip+referrals@zulip.com>'
headers = {'Reply-To' : '"%s" <%s>' % (user_profile.full_name, user_profile.email,)}
msg = EmailMessage(subject, content, from_email, [to_email], headers=headers)
msg.send()
referral = Referral(user_profile=user_profile, email=email)
referral.save()
user_profile.invites_used += 1
user_profile.save(update_fields=['invites_used'])
send_referral_event(user_profile)
def notify_realm_emoji(realm):
event = dict(type="realm_emoji", op="update",
realm_emoji=realm.get_emoji())
user_ids = [userdict['id'] for userdict in get_active_user_dicts_in_realm(realm)]
send_event(event, user_ids)
def do_add_realm_emoji(realm, name, img_url):
RealmEmoji(realm=realm, name=name, img_url=img_url).save()
notify_realm_emoji(realm)
def do_remove_realm_emoji(realm, name):
RealmEmoji.objects.get(realm=realm, name=name).delete()
notify_realm_emoji(realm)
def notify_alert_words(user_profile, words):
event = dict(type="alert_words", alert_words=words)
send_event(event, [user_profile.id])
def do_add_alert_words(user_profile, alert_words):
words = add_user_alert_words(user_profile, alert_words)
notify_alert_words(user_profile, words)
def do_remove_alert_words(user_profile, alert_words):
words = remove_user_alert_words(user_profile, alert_words)
notify_alert_words(user_profile, words)
def do_set_alert_words(user_profile, alert_words):
set_user_alert_words(user_profile, alert_words)
notify_alert_words(user_profile, alert_words)
def do_set_muted_topics(user_profile, muted_topics):
user_profile.muted_topics = ujson.dumps(muted_topics)
user_profile.save(update_fields=['muted_topics'])
event = dict(type="muted_topics", muted_topics=muted_topics)
send_event(event, [user_profile.id])
def notify_realm_filters(realm):
realm_filters = realm_filters_for_domain(realm.domain)
user_ids = [userdict['id'] for userdict in get_active_user_dicts_in_realm(realm)]
event = dict(type="realm_filters", realm_filters=realm_filters)
send_event(event, user_ids)
# NOTE: Regexes must be simple enough that they can be easily translated to JavaScript
# RegExp syntax. In addition to JS-compatible syntax, the following features are available:
# * Named groups will be converted to numbered groups automatically
# * Inline-regex flags will be stripped, and where possible translated to RegExp-wide flags
def do_add_realm_filter(realm, pattern, url_format_string):
RealmFilter(realm=realm, pattern=pattern,
url_format_string=url_format_string).save()
notify_realm_filters(realm)
def do_remove_realm_filter(realm, pattern):
RealmFilter.objects.get(realm=realm, pattern=pattern).delete()
notify_realm_filters(realm)
def get_emails_from_user_ids(user_ids):
# We may eventually use memcached to speed this up, but the DB is fast.
return UserProfile.emails_from_ids(user_ids)
def realm_aliases(realm):
return [alias.domain for alias in realm.realmalias_set.all()]
def get_occupied_streams(realm):
""" Get streams with subscribers """
subs_filter = Subscription.objects.filter(active=True, user_profile__realm=realm,
user_profile__is_active=True).values('recipient_id')
stream_ids = Recipient.objects.filter(
type=Recipient.STREAM, id__in=subs_filter).values('type_id')
return Stream.objects.filter(id__in=stream_ids, realm=realm, deactivated=False)
def do_get_streams(user_profile, include_public=True, include_subscribed=True,
include_all_active=False):
if include_all_active and not user_profile.is_api_super_user():
raise JsonableError("User not authorized for this query")
# Listing public streams are disabled for the mit.edu realm.
include_public = include_public and user_profile.realm.domain != "mit.edu"
# Start out with all streams in the realm with subscribers
query = get_occupied_streams(user_profile.realm)
if not include_all_active:
user_subs = Subscription.objects.select_related("recipient").filter(
active=True, user_profile=user_profile,
recipient__type=Recipient.STREAM)
if include_subscribed:
recipient_check = Q(id__in=[sub.recipient.type_id for sub in user_subs])
if include_public:
invite_only_check = Q(invite_only=False)
if include_subscribed and include_public:
query = query.filter(recipient_check | invite_only_check)
elif include_public:
query = query.filter(invite_only_check)
elif include_subscribed:
query = query.filter(recipient_check)
else:
# We're including nothing, so don't bother hitting the DB.
query = []
def make_dict(row):
return dict(
stream_id = row.id,
name = row.name,
description = row.description,
invite_only = row.invite_only,
)
streams = [make_dict(row) for row in query]
streams.sort(key=lambda elt: elt["name"])
return streams
| apache-2.0 |
fhaoquan/kbengine | kbe/src/lib/python/Lib/ctypes/test/test_cfuncs.py | 102 | 7680 | # A lot of failures in these tests on Mac OS X.
# Byte order related?
import unittest
from ctypes import *
from ctypes.test import need_symbol
import _ctypes_test
class CFunctions(unittest.TestCase):
_dll = CDLL(_ctypes_test.__file__)
def S(self):
return c_longlong.in_dll(self._dll, "last_tf_arg_s").value
def U(self):
return c_ulonglong.in_dll(self._dll, "last_tf_arg_u").value
def test_byte(self):
self._dll.tf_b.restype = c_byte
self._dll.tf_b.argtypes = (c_byte,)
self.assertEqual(self._dll.tf_b(-126), -42)
self.assertEqual(self.S(), -126)
def test_byte_plus(self):
self._dll.tf_bb.restype = c_byte
self._dll.tf_bb.argtypes = (c_byte, c_byte)
self.assertEqual(self._dll.tf_bb(0, -126), -42)
self.assertEqual(self.S(), -126)
def test_ubyte(self):
self._dll.tf_B.restype = c_ubyte
self._dll.tf_B.argtypes = (c_ubyte,)
self.assertEqual(self._dll.tf_B(255), 85)
self.assertEqual(self.U(), 255)
def test_ubyte_plus(self):
self._dll.tf_bB.restype = c_ubyte
self._dll.tf_bB.argtypes = (c_byte, c_ubyte)
self.assertEqual(self._dll.tf_bB(0, 255), 85)
self.assertEqual(self.U(), 255)
def test_short(self):
self._dll.tf_h.restype = c_short
self._dll.tf_h.argtypes = (c_short,)
self.assertEqual(self._dll.tf_h(-32766), -10922)
self.assertEqual(self.S(), -32766)
def test_short_plus(self):
self._dll.tf_bh.restype = c_short
self._dll.tf_bh.argtypes = (c_byte, c_short)
self.assertEqual(self._dll.tf_bh(0, -32766), -10922)
self.assertEqual(self.S(), -32766)
def test_ushort(self):
self._dll.tf_H.restype = c_ushort
self._dll.tf_H.argtypes = (c_ushort,)
self.assertEqual(self._dll.tf_H(65535), 21845)
self.assertEqual(self.U(), 65535)
def test_ushort_plus(self):
self._dll.tf_bH.restype = c_ushort
self._dll.tf_bH.argtypes = (c_byte, c_ushort)
self.assertEqual(self._dll.tf_bH(0, 65535), 21845)
self.assertEqual(self.U(), 65535)
def test_int(self):
self._dll.tf_i.restype = c_int
self._dll.tf_i.argtypes = (c_int,)
self.assertEqual(self._dll.tf_i(-2147483646), -715827882)
self.assertEqual(self.S(), -2147483646)
def test_int_plus(self):
self._dll.tf_bi.restype = c_int
self._dll.tf_bi.argtypes = (c_byte, c_int)
self.assertEqual(self._dll.tf_bi(0, -2147483646), -715827882)
self.assertEqual(self.S(), -2147483646)
def test_uint(self):
self._dll.tf_I.restype = c_uint
self._dll.tf_I.argtypes = (c_uint,)
self.assertEqual(self._dll.tf_I(4294967295), 1431655765)
self.assertEqual(self.U(), 4294967295)
def test_uint_plus(self):
self._dll.tf_bI.restype = c_uint
self._dll.tf_bI.argtypes = (c_byte, c_uint)
self.assertEqual(self._dll.tf_bI(0, 4294967295), 1431655765)
self.assertEqual(self.U(), 4294967295)
def test_long(self):
self._dll.tf_l.restype = c_long
self._dll.tf_l.argtypes = (c_long,)
self.assertEqual(self._dll.tf_l(-2147483646), -715827882)
self.assertEqual(self.S(), -2147483646)
def test_long_plus(self):
self._dll.tf_bl.restype = c_long
self._dll.tf_bl.argtypes = (c_byte, c_long)
self.assertEqual(self._dll.tf_bl(0, -2147483646), -715827882)
self.assertEqual(self.S(), -2147483646)
def test_ulong(self):
self._dll.tf_L.restype = c_ulong
self._dll.tf_L.argtypes = (c_ulong,)
self.assertEqual(self._dll.tf_L(4294967295), 1431655765)
self.assertEqual(self.U(), 4294967295)
def test_ulong_plus(self):
self._dll.tf_bL.restype = c_ulong
self._dll.tf_bL.argtypes = (c_char, c_ulong)
self.assertEqual(self._dll.tf_bL(b' ', 4294967295), 1431655765)
self.assertEqual(self.U(), 4294967295)
def test_longlong(self):
self._dll.tf_q.restype = c_longlong
self._dll.tf_q.argtypes = (c_longlong, )
self.assertEqual(self._dll.tf_q(-9223372036854775806), -3074457345618258602)
self.assertEqual(self.S(), -9223372036854775806)
def test_longlong_plus(self):
self._dll.tf_bq.restype = c_longlong
self._dll.tf_bq.argtypes = (c_byte, c_longlong)
self.assertEqual(self._dll.tf_bq(0, -9223372036854775806), -3074457345618258602)
self.assertEqual(self.S(), -9223372036854775806)
def test_ulonglong(self):
self._dll.tf_Q.restype = c_ulonglong
self._dll.tf_Q.argtypes = (c_ulonglong, )
self.assertEqual(self._dll.tf_Q(18446744073709551615), 6148914691236517205)
self.assertEqual(self.U(), 18446744073709551615)
def test_ulonglong_plus(self):
self._dll.tf_bQ.restype = c_ulonglong
self._dll.tf_bQ.argtypes = (c_byte, c_ulonglong)
self.assertEqual(self._dll.tf_bQ(0, 18446744073709551615), 6148914691236517205)
self.assertEqual(self.U(), 18446744073709551615)
def test_float(self):
self._dll.tf_f.restype = c_float
self._dll.tf_f.argtypes = (c_float,)
self.assertEqual(self._dll.tf_f(-42.), -14.)
self.assertEqual(self.S(), -42)
def test_float_plus(self):
self._dll.tf_bf.restype = c_float
self._dll.tf_bf.argtypes = (c_byte, c_float)
self.assertEqual(self._dll.tf_bf(0, -42.), -14.)
self.assertEqual(self.S(), -42)
def test_double(self):
self._dll.tf_d.restype = c_double
self._dll.tf_d.argtypes = (c_double,)
self.assertEqual(self._dll.tf_d(42.), 14.)
self.assertEqual(self.S(), 42)
def test_double_plus(self):
self._dll.tf_bd.restype = c_double
self._dll.tf_bd.argtypes = (c_byte, c_double)
self.assertEqual(self._dll.tf_bd(0, 42.), 14.)
self.assertEqual(self.S(), 42)
def test_longdouble(self):
self._dll.tf_D.restype = c_longdouble
self._dll.tf_D.argtypes = (c_longdouble,)
self.assertEqual(self._dll.tf_D(42.), 14.)
self.assertEqual(self.S(), 42)
def test_longdouble_plus(self):
self._dll.tf_bD.restype = c_longdouble
self._dll.tf_bD.argtypes = (c_byte, c_longdouble)
self.assertEqual(self._dll.tf_bD(0, 42.), 14.)
self.assertEqual(self.S(), 42)
def test_callwithresult(self):
def process_result(result):
return result * 2
self._dll.tf_i.restype = process_result
self._dll.tf_i.argtypes = (c_int,)
self.assertEqual(self._dll.tf_i(42), 28)
self.assertEqual(self.S(), 42)
self.assertEqual(self._dll.tf_i(-42), -28)
self.assertEqual(self.S(), -42)
def test_void(self):
self._dll.tv_i.restype = None
self._dll.tv_i.argtypes = (c_int,)
self.assertEqual(self._dll.tv_i(42), None)
self.assertEqual(self.S(), 42)
self.assertEqual(self._dll.tv_i(-42), None)
self.assertEqual(self.S(), -42)
# The following repeats the above tests with stdcall functions (where
# they are available)
try:
WinDLL
except NameError:
def stdcall_dll(*_): pass
else:
class stdcall_dll(WinDLL):
def __getattr__(self, name):
if name[:2] == '__' and name[-2:] == '__':
raise AttributeError(name)
func = self._FuncPtr(("s_" + name, self))
setattr(self, name, func)
return func
@need_symbol('WinDLL')
class stdcallCFunctions(CFunctions):
_dll = stdcall_dll(_ctypes_test.__file__)
if __name__ == '__main__':
unittest.main()
| lgpl-3.0 |
ariegg/webiopi-drivers | chips/sensor/lis3dh/lis3dh.py | 1 | 11552 | # Copyright 2017 Andreas Riegg - t-h-i-n-x.net
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Changelog
#
# 1.0 2017/01/13 Initial release
#
# Config parameters
#
# - slave 8 bit Value of the I2C slave address for the chip.
# Defaults to 0x18. Possible values are 0x18 and 0x19.
# - grange Integer G range value of the chip. Valid values are 2, 4, 8
# or 16. Default is 2.
# - odr Integer Output data rate (ODR) value of the chip in Hz in
# running in normal mode. Valid values are from
# (1, 10, 25, 50, 100, 200, 400, 1250). Default is 50.
# - hr Boolean Value of the high resolution bit. Possible values are
# "yes" or "no". Default is "yes".
# - bus String Name of the I2C bus
#
# Usage remarks
#
# - You can change the G range and ODR parameters of the chip at runtime.
#
# Implementation remarks
#
# - This driver is implemented based on the specs from ST Microelectronics.
# - This driver uses floating point calculation and takes no care about integer
# only arithmetics. For that reason, the mathematical lowest possible LSB value is
# calculated automatically and used.
# - This driver does NOT implement all interrupt and click detection featutres of the
# chip.
# - This driver does currently not implement the auxiliary ADC and temperature
# features of the chip.
#
from webiopi.utils.logger import debug
from webiopi.decorators.rest import request, response, api
from webiopi.utils.types import toint, signInteger, str2bool, M_JSON
from webiopi.devices.i2c import I2C
from webiopi.devices.sensor import LinearAcceleration
#---------- Class definition ----------
class LIS3DH(I2C, LinearAcceleration):
CTRL_REG1_ADDRESS = 0x20
CTRL_REG4_ADDRESS = 0x23
OUT_X_L_ADDRESS = 0x28
#OUT_X_H_ADDRESS = 0x29
OUT_Y_L_ADDRESS = 0x2A
#OUT_Y_H_ADDRESS = 0x2B
OUT_Z_L_ADDRESS = 0x2C
#OUT_Z_H_ADDRESS = 0x2D
AUTO_INCREM_FLAG = 0b1 << 7
BLOCK_UPDATE_FLAG = 0b1 << 7
HR_FLAG = 0b1 << 3
FS_2G_VALUE = 0b00 << 4
FS_4G_VALUE = 0b01 << 4
FS_8G_VALUE = 0b10 << 4
FS_16G_VALUE = 0b11 << 4
FS_MASK = 0b00110000
ODR_NOPOWER_VALUE = 0b0000 << 4
ODR_1_HZ_VALUE = 0b0001 << 4
ODR_10_HZ_VALUE = 0b0010 << 4
ODR_25_HZ_VALUE = 0b0011 << 4
ODR_50_HZ_VALUE = 0b0100 << 4
ODR_100_HZ_VALUE = 0b0101 << 4
ODR_200_HZ_VALUE = 0b0110 << 4
ODR_400_HZ_VALUE = 0b0111 << 4
#ODR_1600_HZ_VALUE = 0b1000 << 4
ODR_1250_HZ_VALUE = 0b1001 << 4
ODR_MASK = 0b11110000
ACCEL_FS_2G_LSB_VALUE = 2.0 / 32767 # 1mg/digit at +/- 2g (@15 bit) full scale
ACCEL_FS_4G_LSB_VALUE = 4.0 / 32767 # 2 x 1mg/digit
ACCEL_FS_8G_LSB_VALUE = 8.0 / 32767 # 4 x 1mg/digit
ACCEL_FS_16G_LSB_VALUE = 24.0 / 32767 # 6 x 1mg/digit
#---------- Class initialisation ----------
def __init__(self, slave=0x18, grange=2, odr=50, hr="yes" , bus=None):
I2C.__init__(self, toint(slave), bus)
self._odrBeforeSleep = None
self._hr = str2bool(hr)
if self._hr:
reg4initval = self.BLOCK_UPDATE_FLAG | self.HR_FLAG
else:
reg4initval = self.BLOCK_UPDATE_FLAG
self.writeRegister(self.CTRL_REG4_ADDRESS, reg4initval)
self.__setGrange__(toint(grange))
self.__setOdr__(toint(odr))
#---------- Abstraction framework contracts ----------
def __str__(self):
return "LIS3DH(slave=0x%02X, dev=%s)" % (self.slave, self.device())
def __family__(self):
return LinearAcceleration.__family__(self)
#---------- LinearAcceleration abstraction related methods ----------
def __getMeterPerSquareSecondX__(self):
return self.Gravity2MeterPerSquareSecond(self.__getGravityX__())
def __getMeterPerSquareSecondY__(self):
return self.Gravity2MeterPerSquareSecond(self.__getGravityY__())
def __getMeterPerSquareSecondZ__(self):
return self.Gravity2MeterPerSquareSecond(self.__getGravityZ__())
def __getGravityX__(self):
rawGravityX = self.__read16BitRegister__(self.OUT_X_L_ADDRESS)
debug("%s: raw gravity x=%s" % (self.__str__(), bin(rawGravityX)))
return signInteger(rawGravityX, 16) * self._gravityLSB
def __getGravityY__(self):
rawGravityY = self.__read16BitRegister__(self.OUT_Y_L_ADDRESS)
debug("%s: raw gravity y=%s" % (self.__str__(), bin(rawGravityY)))
return signInteger(rawGravityY, 16) * self._gravityLSB
def __getGravityZ__(self):
rawGravityZ = self.__read16BitRegister__(self.OUT_Z_L_ADDRESS)
debug("%s: raw gravity z=%s" % (self.__str__(), bin(rawGravityZ)))
return signInteger(rawGravityZ, 16) * self._gravityLSB
#---------- Device methods that implement features including additional REST mappings ----------
@api("Device", 3, "feature", "driver")
@request("POST", "run/sleep")
@response("%s")
def sleep(self):
self.__sleep__()
return "Chip sent to sleep."
def __sleep__(self):
if self._odrBeforeSleep == None:
self._odrBeforeSleep = self._odr
bitsOdr = self.ODR_NOPOWER_VALUE
currentValue = self.readRegister(self.CTRL_REG1_ADDRESS)
newValue = (currentValue & ~self.ODR_MASK) | bitsOdr
self.writeRegister(self.CTRL_REG1_ADDRESS, newValue)
self._odr = 0
debug("%s: chip sent to power down" % self.__str__())
@api("Device", 3, "feature", "driver")
@request("POST", "run/wake")
@response("%s")
def wake(self):
self.__wake__()
return "Chip woken up."
def __wake__(self):
if self._odrBeforeSleep != None:
self.__setOdr__(self._odrBeforeSleep)
self._odrBeforeSleep = None
debug("%s: chip woken up" % self.__str__())
#---------- Device methods that implement chip configuration settings including additional REST mappings ----------
@api("Device", 3, "configuration", "driver")
@request("GET", "configure/*")
@response(contentType=M_JSON)
def getConfiguration(self):
values = {}
values["grange"] = "%d" % self._grange
values["odr"] = "%d" % self._odr
values["hr"] = "%s" % self._hr
values["gravity LSB"] = "%f" % self._gravityLSB
return values
@api("Device", 3, "configuration", "driver")
@request("POST", "configure/grange/%(grange)d")
@response("%d")
def setGrange(self, grange):
self.__setGrange__(grange)
return self.__getGrange__()
@api("Device", 3, "configuration", "driver")
@request("GET", "configure/grange")
@response("%d")
def getGrange(self):
return self.__getGrange__()
def __setGrange__(self, grange):
if grange not in (2, 4, 8, 16):
raise ValueError("Parameter grange:%d not one of the allowed values (2, 4, 8, 16)" % grange)
if grange == 2:
bitsGrange = self.FS_2G_VALUE
self._gravityLSB = self.ACCEL_FS_2G_LSB_VALUE
elif grange == 4:
bitsGrange = self.FS_4G_VALUE
self._gravityLSB = self.ACCEL_FS_4G_LSB_VALUE
elif grange == 8:
bitsGrange = self.FS_8G_VALUE
self._gravityLSB = self.ACCEL_FS_8G_LSB_VALUE
elif grange == 16:
bitsGrange = self.FS_16G_VALUE
self._gravityLSB = self.ACCEL_FS_16G_LSB_VALUE
currentValue = self.readRegister(self.CTRL_REG4_ADDRESS)
newValue = (currentValue & ~self.FS_MASK) | bitsGrange
self.writeRegister(self.CTRL_REG4_ADDRESS, newValue)
self._grange = grange
debug("%s: set grange=+/-%d g" % (self.__str__(), grange))
def __getGrange__(self):
bitsGrange = (self.readRegister(self.CTRL_REG4_ADDRESS) & self.FS_MASK) >> 4
if bitsGrange == self.ODR_10_HZ_VALUE:
self._grange = 2
elif bitsGrange == self.FS_4G_VALUE:
self._grange = 4
elif bitsGrange == self.FS_8G_VALUE:
self._grange = 8
elif bitsGrange == self.FS_16G_VALUE:
self._grange = 16
return self._grange
@api("Device", 3, "configuration", "driver")
@request("POST", "configure/odr/%(odr)d")
@response("%d")
def setOdr(self, odr):
self.__setOdr__(odr)
return self.__getOdr__()
@api("Device", 3, "configuration", "driver")
@request("GET", "configure/odr")
@response("%d")
def getOdr(self):
return self.__getOdr__()
def __setOdr__(self, odr):
if odr not in (1, 10, 25, 50, 100, 200, 400, 1250):
raise ValueError("Parameter odr:%d not one of the allowed values (1, 10, 25, 50, 100, 200, 400, 1250)" % odr)
if odr == 1:
bitsOdr = self.ODR_1_HZ_VALUE
elif odr == 10:
bitsOdr = self.ODR_10_HZ_VALUE
elif odr == 25:
bitsOdr = self.ODR_25_HZ_VALUE
elif odr == 50:
bitsOdr = self.ODR_50_HZ_VALUE
elif odr == 100:
bitsOdr = self.ODR_100_HZ_VALUE
elif odr == 200:
bitsOdr = self.ODR_200_HZ_VALUE
elif odr == 400:
bitsOdr = self.ODR_400_HZ_VALUE
elif odr == 1250:
bitsOdr = self.ODR_1250_HZ_VALUE
currentValue = self.readRegister(self.CTRL_REG1_ADDRESS)
newValue = (currentValue & ~self.ODR_MASK) | bitsOdr
self.writeRegister(self.CTRL_REG1_ADDRESS, newValue)
self._odr = odr
debug("%s: set odr=%d Hz" % (self.__str__(), odr))
def __getOdr__(self):
bitsOdr = (self.readRegister(self.CTRL_REG1_ADDRESS) & self.ODR_MASK) >> 4
if bitsOdr == self.ODR_1_HZ_VALUE:
self._odr = 1
elif bitsOdr == self.ODR_10_HZ_VALUE:
self._odr = 10
elif bitsOdr == self.ODR_25_HZ_VALUE:
self._odr = 25
elif bitsOdr == self.ODR_50_HZ_VALUE:
self._odr = 50
elif bitsOdr == self.ODR_100_HZ_VALUE:
self._odr = 100
elif bitsOdr == self.ODR_200_HZ_VALUE:
self._odr = 200
elif bitsOdr == self.ODR_400_HZ_VALUE:
self._odr = 400
elif bitsOdr == self.ODR_1250_HZ_VALUE:
self._odr = 1250
return self._odr
#---------- Register helper methods ----------
def __read16BitRegister__(self, addr):
addr = addr | self.AUTO_INCREM_FLAG
regBytes = self.readRegisters(addr, 2)
return regBytes[0] | regBytes[1] << 8
| apache-2.0 |
Encesat/LumexData | LumexData.py | 1 | 17585 | #!/usr/bin/python2
#LumexData.py
import datetime as dt
import numpy as np
import scipy.optimize as opt
import matplotlib.pyplot as plt
omitFlagged = True
def storeAsDatetime(date, time):
day = int(date.split(".")[0])
month = int(date.split(".")[1])
year = int(date.split(".")[2])
hour = int(time.split(":")[0])
minute = int(time.split(":")[1])
second = int(time.split(":")[2])
return dt.datetime(year, month, day, hour, minute, second)
#Calculate the passed days of the year (including leap year)
def month(mon, yea):
if(mon == 1):
days = 0
elif(mon == 2):#January 31 days
days = 31
elif(mon == 3):#February 28 days
days = 59
elif(mon == 4):#March 31 days
days = 90
elif(mon == 5):#April 30 days
days = 120
elif(mon == 6):#May 31 days
days = 151
elif(mon == 7):#June 30 days
days = 181
elif(mon == 8):#July 31 days
days = 212
elif(mon == 9):#August 31 days
days = 243
elif(mon == 10):#September 30 days
days = 273
elif(mon == 11):#October 31 days
days = 304
elif(mon == 12):#November 30 days
days = 334
if(yea % 4 == 0 and yea % 100 != 0 or yea % 400 == 0):
days = days + 1
return days
def calcDays(date):
years = 365*(date.year)
months = month(date.month, date.year)
days = date.day
return years+months+days
def calcDatetime(days):
pass
##
#
# This program contains the class LumexData. This class
#
# - stores the content of the calibration file (__init__)
# - flags the data (flagging) (Not implemented, because no flagging criteria are available)
# - arranges the accepted and flagged data in columns of time and concentration (storeTimeConc)
# - calculates the number of accepted measurements per day and calculates the daily means of the data set (length & averaging)
# - creates a column with date and the daiyly means, and flags the daily means of which the number of accepted measurements is smaller than 216 (averaing)
class LumexData:
## Constructor of LumexData
#
# Reads the content of the calibrationfile. Stores as a list of hashmaps
def __init__(self, calibrationfile="none", filedescriptor="none"):
self.__lumexdata = []
self.averaged = False
#Open the calibrationfile and read the content.
#The file should be stored as a plain text format
if(calibrationfile != "none"):
f = open(calibrationfile, "r")
calibration = f.readlines()
elif(filedescriptor != "none"):
calibration = filedescriptor.readlines()
for line in calibration:
if(len(line.split(" ")) != 7):
continue
#Store date and time as datetime
x = storeAsDatetime(line.split(" ")[0], line.split(" ")[1])
self.__lumexdata.append({"date": x, "time_dec": float(line.split(" ")[2]), "zero_span": float(line.split(" ")[3]), \
"calib_factor": float(line.split(" ")[4]), "temperature": float(line.split(" ")[5]), \
"concentration": float(line.split(" ")[6]), "flag": -1, "standarddeviation": 0, "counter": 0})
return
#END OF __init__()
## Helpfunction of LumexData
#
# Explains the class LumexData
def help(self):
f = open("README", "r")
cont = f.readlines()
for element in cont:
print(element)
return
#END OF help()
## Getter of LumexData
#
# Return the data of __lumexdata
def get(self, elementnumber, key):
if(type(elementnumber) is str):
output = []
try:
start = int(elementnumber.split(":")[0])
end = int(elementnumber.split(":")[1])
except AttributeError:
pass
raw_keys = key.split(",")
keys = []
for element in raw_keys:
keys.append(element)
for i in range(start, end):
for element in keys:
output.append(self.__lumexdata[i][element])
return output
elif(elementnumber != -1 and key != "all"):
return self.__lumexdata[elementnumber][key]
elif(elementnumber != -1 and key == "all"):
return self.__lumexdata[elementnumber]
elif(elementnumber == -1 and key != "all"):
output = []
for i in range(len(self.__lumexdata)):
output.append(self.__lumexdata[i][key])
return output
elif(elementnumber == -1 and key == "all"):
output = []
for i in range(len(self.__lumexdata)):
output.append(self.__lumexdata[i])
return output
return
#END OF get()
## Length of LumexData.__lumexdate
#
# Return the number of values in the object
def length(self):
return len(self.__lumexdata)
#END OF length()
## Save the data to a txt-file
#
# Stores the time and the concentration
def storeTimeConc(self, filename, ran="all"):
f = open(filename, "w")
g = open("flagged_"+filename,"w")
if(ran != "all"):
start = int(ran.split(":")[0])
end = int(ran.split(":")[1])
else:
start = 0
end = len(self.__lumexdata)
f.write("1. Date\n2. Time in decimal\n3. Concentration\n")
for i in range(len(self.__lumexdata)):
if(i >= start and i < end):
if(self.__lumexdata[i]["flag"] == -1):
f.write("{} {}\t{}\n".format(self.__lumexdata[i]["date"], self.__lumexdata[i]["time_dec"], self.__lumexdata[i]["concentration"]))
else:
g.write("{} {}\t{}\t{}\n".format(self.__lumexdata[i]["date"], self.__lumexdata[i]["time_dec"], self.__lumexdata[i]["concentration"], self.__lumexdata[i]["flag"]))
f.close()
return
## Flag the data
#
# Flags the data by the given criteria. criteria has to be a textfile
def flagging(self, filename="Flagged.dat", criteria=None):
f = open(filename, "w")
flag = [0 for x in range(len(self.__lumexdata))]
#Here flag the data by the given criteria
for line in self.__lumexdata:
f.write("{}\t{}\t{}\t{}\t{}\t{}\t{}\n".format(line["date"], line["time_dec"], line["zero_span"], line["calib_factor"], line["temperature"], line["concentration"], line["flag"]))
f.close()
for i in range(len(self.__lumexdata)):
self.__lumexdata[i]["flag"] = flag[i]
return
## Averaging the data
#
# Group the data for each day and calculate the mean, print them to a file (and return the output to the calling function)
def averaging(self, ran="all", overwrite=False):
f = open("averagedOutput.txt", "w")
givendate = calcDays(self.__lumexdata[0]["date"])
print(givendate)
#givendate = 365*(self.__lumexdata[0]["date"].year - 1900) + month(self.__lumexdata[0]["date"].month, self.__lumexdata[0]["date"].year) + self.__lumexdata[0]["date"].day
dummylist = []
averaged = []
errors = []
dates = []
flag = []
counter = 0
if(ran != "all"):
start = int(ran.split(":")[0])
end = int(ran.split(":")[1])
else:
start = 0
end = len(self.__lumexdata)
i = start
#for i in range(start, end): #Iterate over the whole data
while(i < end):
#mydate = 365*(self.__lumexdata[i]["date"].year - 1900) + month(self.__lumexdata[i]["date"].month, self.__lumexdata[i]["date"].year) + self.__lumexdata[i]["date"].day
mydate = calcDays(self.__lumexdata[i]["date"])
#print(mydate)
if(mydate == givendate):
#Omit the flagged data (if any)
if(omitFlagged and self.__lumexdata[i]["flag"] == -1):
dummylist.append(self.__lumexdata[i]["concentration"])
counter = counter + 1
else:
date = "{}.{}.{}".format(self.__lumexdata[i-1]["date"].day, self.__lumexdata[i-1]["date"].month, self.__lumexdata[i-1]["date"].year)
if(counter >= 216):
f.write("{}\t{}\t{}\t{}\n".format(date, np.mean(dummylist), np.std(dummylist), counter))
else:
f.write("{}\t{}\t{}\t{}\t###\n".format(date, np.mean(dummylist), np.std(dummylist), counter))
givendate = mydate
averaged.append(np.mean(dummylist))
errors.append(np.std(dummylist))
dates.append(dt.datetime.strptime(date, "%d.%m.%Y"))
flag.append(0 if counter <= 216 else -1)
if(omitFlagged and self.__lumexdata[i]["flag"] == -1):
dummylist = [self.__lumexdata[i]["concentration"]]
counter = 1
i = i + 1
if(counter != 0):
date = "{}.{}.{}".format(self.__lumexdata[end-1]["date"].day, self.__lumexdata[end-1]["date"].month, self.__lumexdata[end-1]["date"].year)
if(counter >= 216):
f.write("{}\t{}\t{}\t{}\n".format(date, np.mean(dummylist), np.std(dummylist), counter))
else:
f.write("{}\t{}\t{}\t{}\t###\n".format(date, np.mean(dummylist), np.std(dummylist), counter))
averaged.append(np.mean(dummylist))
errors.append(np.std(dummylist))
dates.append(dt.datetime.strptime(date, "%d.%m.%Y"))
flag.append(0 if counter <= 216 else -1)
dummylist = []
counter = 0
f.close()
#Overwrite the content of lumexdata
if(overwrite):
f = open("averagedOutput.txt", "r")
content = f.readlines()
f.close()
self.__lumexdata = [dict([("date", 0), ("concentration", 0), ("standarddeviation", 0), ("counter", 0), ("flag", 0)]) for x in range(len(content))]
for i in range(len(content)):
self.__lumexdata[i]["date"] = dt.datetime(int(content[i].split("\t")[0].split(".")[2]), int(content[i].split("\t")[0].split(".")[1]), int(content[i].split("\t")[0].split(".")[0]))
self.__lumexdata[i]["concentration"] = float(content[i].split("\t")[1])
self.__lumexdata[i]["standarddeviation"] = float(content[i].split("\t")[2])
self.__lumexdata[i]["counter"] = int(content[i].split("\t")[3])
self.__lumexdata[i]["flag"] = 99 if int(content[i].split("\t")[3]) < 216 else -1
self.averaged = True
return [averaged, errors, dates, flag]
#END OF average()
#Calculate fit using scipy.optimize.leastsq. The x-axis is the number of measurements. Fit as
#sinusoidal function
#linear
#polynomial (2 - 6)
#exponential
#logarithm
#gauss
#Parameter has to be a list with initial parameters
def __fitting(self, parameter, daily=False, ran="all", typ="trig", averaged=0, errors=0, av_date=0, flag=0):
dates = []
conc = []
standarddeviation = []
if(ran != "all"):
begin = ran.split(":")[0]
end = ran.split(":")[1]
begin = dt.datetime(int(begin.split(".")[2]), int(begin.split(".")[1]), int(begin.split(".")[0]))
end = dt.datetime(int(end.split(".")[2]), int(end.split(".")[1]), int(end.split(".")[0]))
else:
begin = self.__lumexdata[0]["date"]
end = self.__lumexdata[-1]["date"]
if(averaged == 0):
for i in range(len(self.__lumexdata)):
if(self.__lumexdata[i]["date"] >= begin and self.__lumexdata[i]["date"] < end or ran == "all"):
if(self.__lumexdata[i]["flag"] == -1):
dates.append(self.__lumexdata[i]["date"])
conc.append(self.__lumexdata[i]["concentration"])
else:
#[averaged, errors, av_date, flag] = self.averaging()
standarddeviation = []
for i in range(len(averaged)):
if(av_date[i] >= begin and av_date[i] < end or ran == "all"):
if(flag[i] == -1):
dates.append(av_date[i])
conc.append(averaged[i])
standarddeviation.append(errors[i])
array = np.linspace(0,len(dates)-1,len(dates))
#FITTING
if(typ == "trig"):
fitfunc = lambda parameter, x: parameter[0] * np.cos(2*np.pi / parameter[1]*x + parameter[2]) + parameter[3]*x
elif(typ == "lin"):
fitfunc = lambda parameter, x: parameter[0] * x + parameter[1]
elif(typ == "poly2"):
fitfunc = lambda parameter, x: parameter[0] * x**2 + parameter[1] * x + parameter[2]
elif(typ == "poly3"):
fitfunc = lambda parameter, x: parameter[0] * x**3 + parameter[1] * x**2 + parameter[2] * x + parameter[3]
elif(typ == "poly4"):
fitfunc = lambda parameter, x: parameter[0] * x**4 + parameter[1] * x**3 + parameter[2] * x**2 + parameter[3] * x + parameter[4]
elif(typ == "poly5"):
fitfunc = lambda parameter, x: parameter[0] * x**5 + parameter[1] * x**4 + parameter[2] * x**3 + parameter[3] * x**2 + parameter[4] * x + parameter[5]
elif(typ == "poly6"):
fitfunc = lambda parameter, x: parameter[0] * x**6 + parameter[1] * x**5 + parameter[2] * x**4 + parameter[3] * x**3 + parameter[4] * x**2 + parameter[5] * x + parameter[6]
elif(typ == "exp"):
fitfunc = lambda parameter, x: parameter[0] * np.exp(parameter[1] * x + parameter[2]) + parameter[3] * x + parameter[4]
elif(typ == "log"):
fitfunc = lambda parameter, x: parameter[0] * np.log(x) / np.log(parameter[1]) + parameter[2] * x + parameter[3]
elif(typ == "gauss"):
fitfunc = lambda parameter, x: 1 / (parameter[0] * np.sqrt(2 * np.pi)) * np.exp(-0.5 * ((x - parameter[1])/(parameter[0]))**2)
errfunc = lambda parameter, x, y: fitfunc(parameter,x) - y
p1, success = opt.leastsq(errfunc, parameter[:], args=(array, conc))
return [fitfunc, p1, array]
#Plotting without fit. If daily=True, use the daily mean, otherwise use all unflagged data
def plotting(self, title="Default", xlabel="x-Axis", ylabel="y-Axis", daily=False, ran="all", axessize=10, fsize=10, msize=10, colour="#000000", markerstyle="h", leastsq=False, typ="lin", parameter=[1,1], averaged=0, errors=0, av_date=0, flag=[]):
dates = []
conc = []
if(ran != "all"):
begin = ran.split(":")[0]
end = ran.split(":")[1]
begin = dt.datetime(int(begin.split(".")[2]), int(begin.split(".")[1]), int(begin.split(".")[0]))
end = dt.datetime(int(end.split(".")[2]), int(end.split(".")[1]), int(end.split(".")[0]))
else:
begin = self.__lumexdata[0]["date"]
end = self.__lumexdata[-1]["date"]
if(averaged == 0):
for i in range(len(self.__lumexdata)):
if(self.__lumexdata[i]["date"] >= begin and self.__lumexdata[i]["date"] < end or ran == "all"):
if(self.__lumexdata[i]["flag"] == -1):
dates.append(self.__lumexdata[i]["date"])
conc.append(self.__lumexdata[i]["concentration"])
else:
standarddeviation = []
for i in range(len(averaged)):
if(av_date[i] >= begin and av_date[i] < end or ran == "all"):
if(flag[i] == -1):
dates.append(av_date[i])
conc.append(averaged[i])
standarddeviation.append(errors[i])
fig = plt.figure()
#NotForUsing, (sp1) = plt.subplots(1, 1, sharey=False)
#sp1.set_title(title, fontsize=fsize)
plt.title(title, fontsize=fsize)
if(averaged == 0):
plt.plot(dates, conc, ls=".", marker=markerstyle, markersize=msize, color=colour)
# sp1.plot(dates, conc, ls=".", marker=markerstyle, markersize=msize, color=colour)
else:
plt.errorbar(dates, conc, yerr=standarddeviation, fmt=markerstyle, markersize=msize, color=colour)
# sp1.errorbar(dates, conc, yerr=standarddeviation, fmt=markerstyle, markersize=msize, color=colour)
if(leastsq):
[fitfunc, p1, array] = self.__fitting(parameter, daily, ran, typ=typ, averaged=averaged, errors=errors, av_date=av_date, flag=flag)
#Write fitparameters to a file
f = open("fitparams.txt", "w")
for i in range(len(p1)):
f.write("p{} = {}\n".format(i, p1[i]))
f.close()
plt.plot(dates, fitfunc(p1, array))
# sp1.plot(dates, fitfunc(p1, array))
#sp1.tick_params(labelsize=axessize)
plt.tick_params(labelsize=axessize)
plt.xlabel(xlabel, fontsize=fsize)
plt.ylabel(ylabel, fontsize=fsize)
#sp1.set_xlabel(xlabel, fontsize=fsize)
#sp1.set_ylabel(ylabel, fontsize=fsize)
#sp1.grid(True)
plt.grid(True)
plt.show()
| mit |
mgaitan/django-invitation | invitation/migrations/0001_initial.py | 1 | 2209 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='InvitationKey',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, primary_key=True, auto_created=True)),
('key', models.CharField(verbose_name='invitation key', db_index=True, max_length=40)),
('date_invited', models.DateTimeField(verbose_name='date invited', auto_now_add=True)),
('uses_left', models.IntegerField(default=1)),
('duration', models.IntegerField(null=True, default=7, blank=True)),
('recipient_email', models.EmailField(default='', max_length=254, blank=True)),
('recipient_first_name', models.CharField(default='', max_length=24, blank=True)),
('recipient_last_name', models.CharField(default='', max_length=24, blank=True)),
('recipient_phone_number', models.CharField(max_length=15, blank=True)),
('recipient_other', models.CharField(default='', max_length=255, blank=True)),
('from_user', models.ForeignKey(to=settings.AUTH_USER_MODEL, related_name='invitations_sent')),
('registrant', models.ManyToManyField(null=True, to=settings.AUTH_USER_MODEL, related_name='invitations_used', blank=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='InvitationUser',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, primary_key=True, auto_created=True)),
('invites_allocated', models.IntegerField(default=3)),
('invites_accepted', models.IntegerField(default=0)),
('inviter', models.OneToOneField(to=settings.AUTH_USER_MODEL)),
],
options={
},
bases=(models.Model,),
),
]
| bsd-3-clause |
pmisik/buildbot | master/buildbot/test/unit/test_clients_usersclient.py | 6 | 3409 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import mock
from twisted.internet import defer
from twisted.internet import reactor
from twisted.spread import pb
from twisted.trial import unittest
from buildbot.clients import usersclient
class TestUsersClient(unittest.TestCase):
def setUp(self):
# patch out some PB components and make up some mocks
self.patch(pb, 'PBClientFactory', self._fake_PBClientFactory)
self.patch(reactor, 'connectTCP', self._fake_connectTCP)
self.factory = mock.Mock(name='PBClientFactory')
self.factory.login = self._fake_login
self.factory.login_d = defer.Deferred()
self.remote = mock.Mock(name='PB Remote')
self.remote.callRemote = self._fake_callRemote
self.remote.broker.transport.loseConnection = self._fake_loseConnection
# results
self.conn_host = self.conn_port = None
self.lostConnection = False
def _fake_PBClientFactory(self):
return self.factory
def _fake_login(self, creds):
return self.factory.login_d
def _fake_connectTCP(self, host, port, factory):
self.conn_host = host
self.conn_port = port
self.assertIdentical(factory, self.factory)
self.factory.login_d.callback(self.remote)
def _fake_callRemote(self, method, op, bb_username, bb_password, ids, info):
self.assertEqual(method, 'commandline')
self.called_with = dict(op=op, bb_username=bb_username,
bb_password=bb_password, ids=ids, info=info)
return defer.succeed(None)
def _fake_loseConnection(self):
self.lostConnection = True
def assertProcess(self, host, port, called_with):
self.assertEqual([host, port, called_with],
[self.conn_host, self.conn_port, self.called_with])
@defer.inlineCallbacks
def test_usersclient_info(self):
uc = usersclient.UsersClient('localhost', "user", "userpw", 1234)
yield uc.send('update', 'bb_user', 'hashed_bb_pass', None,
[{'identifier': 'x', 'svn': 'x'}])
self.assertProcess('localhost', 1234,
dict(op='update', bb_username='bb_user',
bb_password='hashed_bb_pass', ids=None,
info=[dict(identifier='x', svn='x')]))
@defer.inlineCallbacks
def test_usersclient_ids(self):
uc = usersclient.UsersClient('localhost', "user", "userpw", 1234)
yield uc.send('remove', None, None, ['x'], None)
self.assertProcess('localhost', 1234,
dict(op='remove', bb_username=None,
bb_password=None, ids=['x'], info=None))
| gpl-2.0 |
samabhi/pstHealth | venv/lib/python2.7/site-packages/django/contrib/gis/gdal/srs.py | 366 | 12043 | """
The Spatial Reference class, represents OGR Spatial Reference objects.
Example:
>>> from django.contrib.gis.gdal import SpatialReference
>>> srs = SpatialReference('WGS84')
>>> print(srs)
GEOGCS["WGS 84",
DATUM["WGS_1984",
SPHEROID["WGS 84",6378137,298.257223563,
AUTHORITY["EPSG","7030"]],
TOWGS84[0,0,0,0,0,0,0],
AUTHORITY["EPSG","6326"]],
PRIMEM["Greenwich",0,
AUTHORITY["EPSG","8901"]],
UNIT["degree",0.01745329251994328,
AUTHORITY["EPSG","9122"]],
AUTHORITY["EPSG","4326"]]
>>> print(srs.proj)
+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs
>>> print(srs.ellipsoid)
(6378137.0, 6356752.3142451793, 298.25722356300003)
>>> print(srs.projected, srs.geographic)
False True
>>> srs.import_epsg(32140)
>>> print(srs.name)
NAD83 / Texas South Central
"""
from ctypes import byref, c_char_p, c_int
from django.contrib.gis.gdal.base import GDALBase
from django.contrib.gis.gdal.error import SRSException
from django.contrib.gis.gdal.prototypes import srs as capi
from django.utils import six
from django.utils.encoding import force_bytes, force_text
class SpatialReference(GDALBase):
"""
A wrapper for the OGRSpatialReference object. According to the GDAL Web site,
the SpatialReference object "provide[s] services to represent coordinate
systems (projections and datums) and to transform between them."
"""
def __init__(self, srs_input='', srs_type='user'):
"""
Creates a GDAL OSR Spatial Reference object from the given input.
The input may be string of OGC Well Known Text (WKT), an integer
EPSG code, a PROJ.4 string, and/or a projection "well known" shorthand
string (one of 'WGS84', 'WGS72', 'NAD27', 'NAD83').
"""
if srs_type == 'wkt':
self.ptr = capi.new_srs(c_char_p(b''))
self.import_wkt(srs_input)
return
elif isinstance(srs_input, six.string_types):
# Encoding to ASCII if unicode passed in.
if isinstance(srs_input, six.text_type):
srs_input = srs_input.encode('ascii')
try:
# If SRID is a string, e.g., '4326', then make acceptable
# as user input.
srid = int(srs_input)
srs_input = 'EPSG:%d' % srid
except ValueError:
pass
elif isinstance(srs_input, six.integer_types):
# EPSG integer code was input.
srs_type = 'epsg'
elif isinstance(srs_input, self.ptr_type):
srs = srs_input
srs_type = 'ogr'
else:
raise TypeError('Invalid SRS type "%s"' % srs_type)
if srs_type == 'ogr':
# Input is already an SRS pointer.
srs = srs_input
else:
# Creating a new SRS pointer, using the string buffer.
buf = c_char_p(b'')
srs = capi.new_srs(buf)
# If the pointer is NULL, throw an exception.
if not srs:
raise SRSException('Could not create spatial reference from: %s' % srs_input)
else:
self.ptr = srs
# Importing from either the user input string or an integer SRID.
if srs_type == 'user':
self.import_user_input(srs_input)
elif srs_type == 'epsg':
self.import_epsg(srs_input)
def __del__(self):
"Destroys this spatial reference."
if self._ptr and capi:
capi.release_srs(self._ptr)
def __getitem__(self, target):
"""
Returns the value of the given string attribute node, None if the node
doesn't exist. Can also take a tuple as a parameter, (target, child),
where child is the index of the attribute in the WKT. For example:
>>> wkt = 'GEOGCS["WGS 84", DATUM["WGS_1984, ... AUTHORITY["EPSG","4326"]]'
>>> srs = SpatialReference(wkt) # could also use 'WGS84', or 4326
>>> print(srs['GEOGCS'])
WGS 84
>>> print(srs['DATUM'])
WGS_1984
>>> print(srs['AUTHORITY'])
EPSG
>>> print(srs['AUTHORITY', 1]) # The authority value
4326
>>> print(srs['TOWGS84', 4]) # the fourth value in this wkt
0
>>> print(srs['UNIT|AUTHORITY']) # For the units authority, have to use the pipe symbole.
EPSG
>>> print(srs['UNIT|AUTHORITY', 1]) # The authority value for the units
9122
"""
if isinstance(target, tuple):
return self.attr_value(*target)
else:
return self.attr_value(target)
def __str__(self):
"The string representation uses 'pretty' WKT."
return self.pretty_wkt
# #### SpatialReference Methods ####
def attr_value(self, target, index=0):
"""
The attribute value for the given target node (e.g. 'PROJCS'). The index
keyword specifies an index of the child node to return.
"""
if not isinstance(target, six.string_types) or not isinstance(index, int):
raise TypeError
return capi.get_attr_value(self.ptr, force_bytes(target), index)
def auth_name(self, target):
"Returns the authority name for the given string target node."
return capi.get_auth_name(self.ptr, force_bytes(target))
def auth_code(self, target):
"Returns the authority code for the given string target node."
return capi.get_auth_code(self.ptr, force_bytes(target))
def clone(self):
"Returns a clone of this SpatialReference object."
return SpatialReference(capi.clone_srs(self.ptr))
def from_esri(self):
"Morphs this SpatialReference from ESRI's format to EPSG."
capi.morph_from_esri(self.ptr)
def identify_epsg(self):
"""
This method inspects the WKT of this SpatialReference, and will
add EPSG authority nodes where an EPSG identifier is applicable.
"""
capi.identify_epsg(self.ptr)
def to_esri(self):
"Morphs this SpatialReference to ESRI's format."
capi.morph_to_esri(self.ptr)
def validate(self):
"Checks to see if the given spatial reference is valid."
capi.srs_validate(self.ptr)
# #### Name & SRID properties ####
@property
def name(self):
"Returns the name of this Spatial Reference."
if self.projected:
return self.attr_value('PROJCS')
elif self.geographic:
return self.attr_value('GEOGCS')
elif self.local:
return self.attr_value('LOCAL_CS')
else:
return None
@property
def srid(self):
"Returns the SRID of top-level authority, or None if undefined."
try:
return int(self.attr_value('AUTHORITY', 1))
except (TypeError, ValueError):
return None
# #### Unit Properties ####
@property
def linear_name(self):
"Returns the name of the linear units."
units, name = capi.linear_units(self.ptr, byref(c_char_p()))
return name
@property
def linear_units(self):
"Returns the value of the linear units."
units, name = capi.linear_units(self.ptr, byref(c_char_p()))
return units
@property
def angular_name(self):
"Returns the name of the angular units."
units, name = capi.angular_units(self.ptr, byref(c_char_p()))
return name
@property
def angular_units(self):
"Returns the value of the angular units."
units, name = capi.angular_units(self.ptr, byref(c_char_p()))
return units
@property
def units(self):
"""
Returns a 2-tuple of the units value and the units name,
and will automatically determines whether to return the linear
or angular units.
"""
units, name = None, None
if self.projected or self.local:
units, name = capi.linear_units(self.ptr, byref(c_char_p()))
elif self.geographic:
units, name = capi.angular_units(self.ptr, byref(c_char_p()))
if name is not None:
name = force_text(name)
return (units, name)
# #### Spheroid/Ellipsoid Properties ####
@property
def ellipsoid(self):
"""
Returns a tuple of the ellipsoid parameters:
(semimajor axis, semiminor axis, and inverse flattening)
"""
return (self.semi_major, self.semi_minor, self.inverse_flattening)
@property
def semi_major(self):
"Returns the Semi Major Axis for this Spatial Reference."
return capi.semi_major(self.ptr, byref(c_int()))
@property
def semi_minor(self):
"Returns the Semi Minor Axis for this Spatial Reference."
return capi.semi_minor(self.ptr, byref(c_int()))
@property
def inverse_flattening(self):
"Returns the Inverse Flattening for this Spatial Reference."
return capi.invflattening(self.ptr, byref(c_int()))
# #### Boolean Properties ####
@property
def geographic(self):
"""
Returns True if this SpatialReference is geographic
(root node is GEOGCS).
"""
return bool(capi.isgeographic(self.ptr))
@property
def local(self):
"Returns True if this SpatialReference is local (root node is LOCAL_CS)."
return bool(capi.islocal(self.ptr))
@property
def projected(self):
"""
Returns True if this SpatialReference is a projected coordinate system
(root node is PROJCS).
"""
return bool(capi.isprojected(self.ptr))
# #### Import Routines #####
def import_epsg(self, epsg):
"Imports the Spatial Reference from the EPSG code (an integer)."
capi.from_epsg(self.ptr, epsg)
def import_proj(self, proj):
"Imports the Spatial Reference from a PROJ.4 string."
capi.from_proj(self.ptr, proj)
def import_user_input(self, user_input):
"Imports the Spatial Reference from the given user input string."
capi.from_user_input(self.ptr, force_bytes(user_input))
def import_wkt(self, wkt):
"Imports the Spatial Reference from OGC WKT (string)"
capi.from_wkt(self.ptr, byref(c_char_p(wkt)))
def import_xml(self, xml):
"Imports the Spatial Reference from an XML string."
capi.from_xml(self.ptr, xml)
# #### Export Properties ####
@property
def wkt(self):
"Returns the WKT representation of this Spatial Reference."
return capi.to_wkt(self.ptr, byref(c_char_p()))
@property
def pretty_wkt(self, simplify=0):
"Returns the 'pretty' representation of the WKT."
return capi.to_pretty_wkt(self.ptr, byref(c_char_p()), simplify)
@property
def proj(self):
"Returns the PROJ.4 representation for this Spatial Reference."
return capi.to_proj(self.ptr, byref(c_char_p()))
@property
def proj4(self):
"Alias for proj()."
return self.proj
@property
def xml(self, dialect=''):
"Returns the XML representation of this Spatial Reference."
return capi.to_xml(self.ptr, byref(c_char_p()), dialect)
class CoordTransform(GDALBase):
"The coordinate system transformation object."
def __init__(self, source, target):
"Initializes on a source and target SpatialReference objects."
if not isinstance(source, SpatialReference) or not isinstance(target, SpatialReference):
raise TypeError('source and target must be of type SpatialReference')
self.ptr = capi.new_ct(source._ptr, target._ptr)
self._srs1_name = source.name
self._srs2_name = target.name
def __del__(self):
"Deletes this Coordinate Transformation object."
if self._ptr and capi:
capi.destroy_ct(self._ptr)
def __str__(self):
return 'Transform from "%s" to "%s"' % (self._srs1_name, self._srs2_name)
| mit |
CMLL/taiga-back | taiga/searches/serializers.py | 13 | 1899 | # Copyright (C) 2014 Andrey Antukh <niwi@niwi.be>
# Copyright (C) 2014 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014 David Barragán <bameda@dbarragan.com>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from taiga.projects.issues.serializers import IssueSerializer
from taiga.projects.userstories.serializers import UserStorySerializer
from taiga.projects.tasks.serializers import TaskSerializer
from taiga.projects.wiki.serializers import WikiPageSerializer
from taiga.projects.issues.models import Issue
from taiga.projects.userstories.models import UserStory
from taiga.projects.tasks.models import Task
from taiga.projects.wiki.models import WikiPage
class IssueSearchResultsSerializer(IssueSerializer):
class Meta:
model = Issue
fields = ('id', 'ref', 'subject', 'status', 'assigned_to')
class TaskSearchResultsSerializer(TaskSerializer):
class Meta:
model = Task
fields = ('id', 'ref', 'subject', 'status', 'assigned_to')
class UserStorySearchResultsSerializer(UserStorySerializer):
class Meta:
model = UserStory
fields = ('id', 'ref', 'subject', 'status', 'total_points')
class WikiPageSearchResultsSerializer(WikiPageSerializer):
class Meta:
model = WikiPage
fields = ('id', 'slug')
| agpl-3.0 |
Sabayon/anaconda | pyanaconda/product.py | 6 | 2324 | #
# product.py: product identification string
#
# Copyright (C) 2003 Red Hat, Inc. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import ConfigParser
import os
from pyanaconda.i18n import _
# First, load in the defaults. In order of precedence: contents of
# .buildstamp, environment, stupid last ditch hardcoded defaults.
config = ConfigParser.ConfigParser()
config.add_section("Main")
config.set("Main", "Arch", os.environ.get("ANACONDA_PRODUCTARCH", os.uname()[4]))
config.set("Main", "BugURL", os.environ.get("ANACONDA_BUGURL", "your distribution provided bug reporting tool"))
config.set("Main", "IsFinal", os.environ.get("ANACONDA_ISFINAL", "false"))
config.set("Main", "Product", os.environ.get("ANACONDA_PRODUCTNAME", "anaconda"))
config.set("Main", "UUID", "")
config.set("Main", "Version", os.environ.get("ANACONDA_PRODUCTVERSION", "bluesky"))
# Now read in the .buildstamp file, wherever it may be.
config.read(["/tmp/product/.buildstamp", "/.buildstamp", os.environ.get("PRODBUILDPATH", "")])
# Set up some variables we import throughout, applying a couple transforms as necessary.
bugUrl = config.get("Main", "BugURL")
isFinal = config.getboolean("Main", "IsFinal")
productArch = config.get("Main", "Arch")
productName = config.get("Main", "Product")
productStamp = config.get("Main", "UUID")
productVersion = config.get("Main", "Version")
if not productArch and productStamp.index(".") != -1:
productArch = productStamp[productStamp.index(".")+1:]
if productVersion == "development":
productVersion = "rawhide"
def distributionText():
return _("%(productName)s %(productVersion)s INSTALLATION") % \
{"productName": productName, "productVersion": productVersion}
| gpl-2.0 |
EricE/evelink | evelink/eve.py | 2 | 16295 | from evelink import api
class EVE(object):
"""Wrapper around /eve/ of the EVE API."""
@api.auto_api
def __init__(self, api=None):
self.api = api
def certificate_tree(self):
"""Returns a list of certificates in eve."""
api_result = self.api.get('eve/CertificateTree')
result = {}
rowset = api_result.find('rowset')
categories = rowset.findall('row')
for category in categories:
cat_attr = category.attrib
cat_name = cat_attr['categoryName']
cat_tree = {
'name': cat_name,
'id': int(cat_attr['categoryID']),
'classes': {},
}
cls_rowset = category.find('rowset')
classes = cls_rowset.findall('row')
for cls in classes:
cls_attr = cls.attrib
cls_name = cls_attr['className']
cls_def = {
'name': cls_name,
'id': int(cls_attr['classID']),
'certificates': {}
}
cert_rowset = cls.find('rowset')
certificates = cert_rowset.findall('row')
for cert in certificates:
cert_attr = cert.attrib
cert_id = int(cert_attr['certificateID'])
cert_entry = {
'id': cert_id,
'grade': int(cert_attr['grade']),
'corp_id': int(cert_attr['corporationID']),
'description': cert_attr['description'],
'required_skills': {},
'required_certs': {}
}
req_rowsets = {}
for rowset in cert.findall('rowset'):
req_rowsets[rowset.attrib['name']] = rowset
req_skills = req_rowsets['requiredSkills'].findall('row')
for skill in req_skills:
cert_entry['required_skills'][
int(skill.attrib['typeID'])
] = int(skill.attrib['level'])
req_certs = req_rowsets['requiredCertificates'].findall('row')
for req_cert in req_certs:
cert_entry['required_certs'][
int(req_cert.attrib['certificateID'])
] = int(req_cert.attrib['grade'])
cls_def['certificates'][cert_id] = cert_entry
cat_tree['classes'][cls_name] = cls_def
result[cat_name] = cat_tree
return result
def character_names_from_ids(self, id_list):
"""Retrieve a dict mapping character IDs to names.
id_list:
A list of ids to retrieve names.
NOTE: *ALL* character IDs passed to this function
must be valid - an invalid character ID will cause
the entire call to fail.
"""
api_result = self.api.get('eve/CharacterName', {
'IDs': set(id_list),
})
if api_result is None:
# The API doesn't actually tell us which character IDs are invalid
msg = "One or more of these character IDs are invalid: %r"
raise ValueError(msg % id_list)
rowset = api_result.find('rowset')
rows = rowset.findall('row')
results = {}
for row in rows:
name = row.attrib['name']
char_id = int(row.attrib['characterID'])
results[char_id] = name
return results
def character_name_from_id(self, char_id):
"""Retrieve the character's name based on ID.
Convenience wrapper around character_names_from_ids().
"""
return self.character_names_from_ids([char_id]).get(char_id)
def character_ids_from_names(self, name_list):
"""Retrieve a dict mapping character names to IDs.
name_list:
A list of names to retrieve character IDs.
Names of unknown characters will map to None.
"""
api_result = self.api.get('eve/CharacterID', {
'names': set(name_list),
})
rowset = api_result.find('rowset')
rows = rowset.findall('row')
results = {}
for row in rows:
name = row.attrib['name']
char_id = int(row.attrib['characterID']) or None
results[name] = char_id
return results
def character_id_from_name(self, name):
"""Retrieve the named character's ID.
Convenience wrapper around character_ids_from_names().
"""
return self.character_ids_from_names([name]).get(name)
def character_info_from_id(self, char_id):
"""Retrieve a dict of info about the designated character."""
api_result = self.api.get('eve/CharacterInfo', {
'characterID': char_id,
})
if api_result is None:
raise ValueError("Unable to fetch info for character %r" % char_id)
_str, _int, _float, _bool, _ts = api.elem_getters(api_result)
results = {
'id': _int('characterID'),
'name': _str('characterName'),
'race': _str('race'),
'bloodline': _str('bloodline'),
'sec_status': _float('securityStatus'),
'skillpoints': _int('skillPoints'),
'location': _str('lastKnownLocation'),
'isk': _float('accountBalance'),
'corp': {
'id': _int('corporationID'),
'name': _str('corporation'),
'timestamp': _ts('corporationDate'),
},
'alliance': {
'id': _int('allianceID'),
'name': _str('alliance'),
'timestamp': _ts('allianceDate'),
},
'ship': {
'name': _str('shipName'),
'type_id': _int('shipTypeID'),
'type_name': _str('shipTypeName'),
},
'history': [],
}
# Add in corp history
history = api_result.find('rowset')
for row in history.findall('row'):
corp_id = int(row.attrib['corporationID'])
start_date = api.parse_ts(row.attrib['startDate'])
results['history'].append({
'corp_id': corp_id,
'start_ts': start_date,
})
return results
def alliances(self):
"""Return a dict of all alliances in EVE."""
api_result = self.api.get('eve/AllianceList')
results = {}
rowset = api_result.find('rowset')
for row in rowset.findall('row'):
alliance = {
'name': row.attrib['name'],
'ticker': row.attrib['shortName'],
'id': int(row.attrib['allianceID']),
'executor_id': int(row.attrib['executorCorpID']),
'member_count': int(row.attrib['memberCount']),
'timestamp': api.parse_ts(row.attrib['startDate']),
'member_corps': {},
}
corp_rowset = row.find('rowset')
for corp_row in corp_rowset.findall('row'):
corp_id = int(corp_row.attrib['corporationID'])
corp_ts = api.parse_ts(corp_row.attrib['startDate'])
alliance['member_corps'][corp_id] = {
'id': corp_id,
'timestamp': corp_ts,
}
results[alliance['id']] = alliance
return results
def errors(self):
"""Return a mapping of error codes to messages."""
api_result = self.api.get('eve/ErrorList')
rowset = api_result.find('rowset')
results = {}
for row in rowset.findall('row'):
code = int(row.attrib['errorCode'])
message = row.attrib['errorText']
results[code] = message
return results
def faction_warfare_stats(self):
"""Return various statistics from Faction Warfare."""
api_result = self.api.get('eve/FacWarStats')
totals = api_result.find('totals')
rowsets = dict((r.attrib['name'], r) for r in api_result.findall('rowset'))
_str, _int, _float, _bool, _ts = api.elem_getters(totals)
results = {
'kills': {
'yesterday': _int('killsYesterday'),
'week': _int('killsLastWeek'),
'total': _int('killsTotal'),
},
'points': {
'yesterday': _int('victoryPointsYesterday'),
'week': _int('victoryPointsLastWeek'),
'total': _int('victoryPointsTotal'),
},
'factions': {},
'wars': [],
}
for row in rowsets['factions'].findall('row'):
a = row.attrib
faction = {
'id': int(a['factionID']),
'name': a['factionName'],
'pilots': int(a['pilots']),
'systems': int(a['systemsControlled']),
'kills': {
'yesterday': int(a['killsYesterday']),
'week': int(a['killsLastWeek']),
'total': int(a['killsTotal']),
},
'points': {
'yesterday': int(a['victoryPointsYesterday']),
'week': int(a['victoryPointsLastWeek']),
'total': int(a['victoryPointsTotal']),
},
}
results['factions'][faction['id']] = faction
for row in rowsets['factionWars'].findall('row'):
a = row.attrib
war = {
'faction': {
'id': int(a['factionID']),
'name': a['factionName'],
},
'against': {
'id': int(a['againstID']),
'name': a['againstName'],
},
}
results['wars'].append(war)
return results
def skill_tree(self):
"""Return a dict of all available skill groups."""
api_result = self.api.get('eve/SkillTree')
rowset = api_result.find('rowset') # skillGroups
results = {}
name_cache = {}
for row in rowset.findall('row'):
# the skill group data
g = row.attrib
group = {
'id': int(g['groupID']),
'name': g['groupName'],
'skills': {}
}
# Because :ccp: groups can sometimes be listed
# multiple times with different skills, and the
# correct result is to add the contents together
group = results.get(group['id'], group)
# now get the actual skill data
skills_rs = row.find('rowset') # skills
for skill_row in skills_rs.findall('row'):
a = skill_row.attrib
_str, _int, _float, _bool, _ts = api.elem_getters(skill_row)
req_attrib = skill_row.find('requiredAttributes')
skill = {
'id': int(a['typeID']),
'group_id': int(a['groupID']),
'name': a['typeName'],
'published': (a['published'] == '1'),
'description': _str('description'),
'rank': _int('rank'),
'required_skills': {},
'bonuses': {},
'attributes': {
'primary': api.get_named_value(req_attrib, 'primaryAttribute'),
'secondary': api.get_named_value(req_attrib, 'secondaryAttribute'),
}
}
name_cache[skill['id']] = skill['name']
# Check each rowset inside the skill, and branch based on the name attribute
for sub_rs in skill_row.findall('rowset'):
if sub_rs.attrib['name'] == 'requiredSkills':
for sub_row in sub_rs.findall('row'):
b = sub_row.attrib
req = {
'level': int(b['skillLevel']),
'id': int(b['typeID']),
}
skill['required_skills'][req['id']] = req
elif sub_rs.attrib['name'] == 'skillBonusCollection':
for sub_row in sub_rs.findall('row'):
b = sub_row.attrib
bonus = {
'type': b['bonusType'],
'value': float(b['bonusValue']),
}
skill['bonuses'][bonus['type']] = bonus
group['skills'][skill['id']] = skill
results[group['id']] = group
# Second pass to fill in required skill names
for group in results.itervalues():
for skill in group['skills'].itervalues():
for skill_id, skill_info in skill['required_skills'].iteritems():
skill_info['name'] = name_cache.get(skill_id)
return results
def reference_types(self):
"""Return a dict containing id -> name reference type mappings."""
api_result = self.api.get('eve/RefTypes')
rowset = api_result.find('rowset')
results = {}
for row in rowset.findall('row'):
a = row.attrib
results[int(a['refTypeID'])] = a['refTypeName']
return results
def faction_warfare_leaderboard(self):
"""Return top-100 lists from Faction Warfare."""
api_result = self.api.get('eve/FacWarTopStats')
def parse_top_100(rowset, prefix, attr, attr_name):
top100 = []
id_field = '%sID' % prefix
name_field = '%sName' % prefix
for row in rowset.findall('row'):
a = row.attrib
top100.append({
'id': int(a[id_field]),
'name': a[name_field],
attr_name: int(a[attr]),
})
return top100
def parse_section(section, prefix):
section_result = {}
rowsets = dict((r.attrib['name'], r) for r in section.findall('rowset'))
section_result['kills'] = {
'yesterday': parse_top_100(rowsets['KillsYesterday'], prefix, 'kills', 'kills'),
'week': parse_top_100(rowsets['KillsLastWeek'], prefix, 'kills', 'kills'),
'total': parse_top_100(rowsets['KillsTotal'], prefix, 'kills', 'kills'),
}
section_result['points'] = {
'yesterday': parse_top_100(rowsets['VictoryPointsYesterday'],
prefix, 'victoryPoints', 'points'),
'week': parse_top_100(rowsets['VictoryPointsLastWeek'],
prefix, 'victoryPoints', 'points'),
'total': parse_top_100(rowsets['VictoryPointsTotal'],
prefix, 'victoryPoints', 'points'),
}
return section_result
results = {
'char': parse_section(api_result.find('characters'), 'character'),
'corp': parse_section(api_result.find('corporations'), 'corporation'),
'faction': parse_section(api_result.find('factions'), 'faction'),
}
return results
def conquerable_stations(self):
api_result = self.api.get('eve/ConquerableStationlist')
results = {}
rowset = api_result.find('rowset')
for row in rowset.findall('row'):
station = {
'id': int(row.attrib['stationID']),
'name': row.attrib['stationName'],
'type_id': int(row.attrib['stationTypeID']),
'system_id': int(row.attrib['solarSystemID']),
'corp': {
'id': int(row.attrib['corporationID']),
'name': row.attrib['corporationName'] }
}
results[station['id']] = station
return results
| mit |
timlinux/QGIS | python/plugins/processing/algs/qgis/KeepNBiggestParts.py | 30 | 4416 | # -*- coding: utf-8 -*-
"""
***************************************************************************
KeepNBiggestParts.py
---------------------
Date : July 2014
Copyright : (C) 2014 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'July 2014'
__copyright__ = '(C) 2014, Victor Olaya'
from operator import itemgetter
from qgis.core import (QgsGeometry,
QgsFeatureSink,
QgsProcessing,
QgsProcessingException,
QgsProcessingParameterFeatureSink,
QgsProcessingParameterFeatureSource,
QgsProcessingParameterNumber,
)
from processing.algs.qgis.QgisAlgorithm import QgisAlgorithm
class KeepNBiggestParts(QgisAlgorithm):
POLYGONS = 'POLYGONS'
PARTS = 'PARTS'
OUTPUT = 'OUTPUT'
def group(self):
return self.tr('Vector geometry')
def groupId(self):
return 'vectorgeometry'
def __init__(self):
super().__init__()
def initAlgorithm(self, config=None):
self.addParameter(QgsProcessingParameterFeatureSource(self.POLYGONS,
self.tr('Polygons'), [QgsProcessing.TypeVectorPolygon]))
self.addParameter(QgsProcessingParameterNumber(self.PARTS,
self.tr('Parts to keep'),
QgsProcessingParameterNumber.Integer,
1, False, 1))
self.addParameter(
QgsProcessingParameterFeatureSink(self.OUTPUT, self.tr('Parts'), QgsProcessing.TypeVectorPolygon))
def name(self):
return 'keepnbiggestparts'
def displayName(self):
return self.tr('Keep N biggest parts')
def processAlgorithm(self, parameters, context, feedback):
source = self.parameterAsSource(parameters, self.POLYGONS, context)
if source is None:
raise QgsProcessingException(self.invalidSourceError(parameters, self.POLYGONS))
parts = self.parameterAsInt(parameters, self.PARTS, context)
fields = source.fields()
(sink, dest_id) = self.parameterAsSink(parameters, self.OUTPUT, context,
source.fields(), source.wkbType(), source.sourceCrs())
if sink is None:
raise QgsProcessingException(self.invalidSinkError(parameters, self.OUTPUT))
features = source.getFeatures()
total = 100.0 / source.featureCount() if source.featureCount() else 0
for current, feat in enumerate(features):
if feedback.isCanceled():
break
geom = feat.geometry()
if geom.isMultipart():
out_feature = feat
geoms = geom.asGeometryCollection()
geom_area = [(i, geoms[i].area()) for i in range(len(geoms))]
geom_area.sort(key=itemgetter(1))
if parts == 1:
out_feature.setGeometry(geoms[geom_area[-1][0]])
elif parts > len(geoms):
out_feature.setGeometry(geom)
else:
out_feature.setGeometry(geom)
geomres = [geoms[i].asPolygon() for i, a in geom_area[-1 * parts:]]
out_feature.setGeometry(QgsGeometry.fromMultiPolygonXY(geomres))
sink.addFeature(out_feature, QgsFeatureSink.FastInsert)
else:
sink.addFeature(feat, QgsFeatureSink.FastInsert)
feedback.setProgress(int(current * total))
return {self.OUTPUT: dest_id}
| gpl-2.0 |
chrsrds/scikit-learn | examples/applications/wikipedia_principal_eigenvector.py | 10 | 7724 | """
===============================
Wikipedia principal eigenvector
===============================
A classical way to assert the relative importance of vertices in a
graph is to compute the principal eigenvector of the adjacency matrix
so as to assign to each vertex the values of the components of the first
eigenvector as a centrality score:
https://en.wikipedia.org/wiki/Eigenvector_centrality
On the graph of webpages and links those values are called the PageRank
scores by Google.
The goal of this example is to analyze the graph of links inside
wikipedia articles to rank articles by relative importance according to
this eigenvector centrality.
The traditional way to compute the principal eigenvector is to use the
power iteration method:
https://en.wikipedia.org/wiki/Power_iteration
Here the computation is achieved thanks to Martinsson's Randomized SVD
algorithm implemented in scikit-learn.
The graph data is fetched from the DBpedia dumps. DBpedia is an extraction
of the latent structured data of the Wikipedia content.
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from bz2 import BZ2File
import os
from datetime import datetime
from pprint import pprint
from time import time
import numpy as np
from scipy import sparse
from joblib import Memory
from sklearn.decomposition import randomized_svd
from urllib.request import urlopen
print(__doc__)
# #############################################################################
# Where to download the data, if not already on disk
redirects_url = "http://downloads.dbpedia.org/3.5.1/en/redirects_en.nt.bz2"
redirects_filename = redirects_url.rsplit("/", 1)[1]
page_links_url = "http://downloads.dbpedia.org/3.5.1/en/page_links_en.nt.bz2"
page_links_filename = page_links_url.rsplit("/", 1)[1]
resources = [
(redirects_url, redirects_filename),
(page_links_url, page_links_filename),
]
for url, filename in resources:
if not os.path.exists(filename):
print("Downloading data from '%s', please wait..." % url)
opener = urlopen(url)
open(filename, 'wb').write(opener.read())
print()
# #############################################################################
# Loading the redirect files
memory = Memory(cachedir=".")
def index(redirects, index_map, k):
"""Find the index of an article name after redirect resolution"""
k = redirects.get(k, k)
return index_map.setdefault(k, len(index_map))
DBPEDIA_RESOURCE_PREFIX_LEN = len("http://dbpedia.org/resource/")
SHORTNAME_SLICE = slice(DBPEDIA_RESOURCE_PREFIX_LEN + 1, -1)
def short_name(nt_uri):
"""Remove the < and > URI markers and the common URI prefix"""
return nt_uri[SHORTNAME_SLICE]
def get_redirects(redirects_filename):
"""Parse the redirections and build a transitively closed map out of it"""
redirects = {}
print("Parsing the NT redirect file")
for l, line in enumerate(BZ2File(redirects_filename)):
split = line.split()
if len(split) != 4:
print("ignoring malformed line: " + line)
continue
redirects[short_name(split[0])] = short_name(split[2])
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
# compute the transitive closure
print("Computing the transitive closure of the redirect relation")
for l, source in enumerate(redirects.keys()):
transitive_target = None
target = redirects[source]
seen = {source}
while True:
transitive_target = target
target = redirects.get(target)
if target is None or target in seen:
break
seen.add(target)
redirects[source] = transitive_target
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
return redirects
# disabling joblib as the pickling of large dicts seems much too slow
#@memory.cache
def get_adjacency_matrix(redirects_filename, page_links_filename, limit=None):
"""Extract the adjacency graph as a scipy sparse matrix
Redirects are resolved first.
Returns X, the scipy sparse adjacency matrix, redirects as python
dict from article names to article names and index_map a python dict
from article names to python int (article indexes).
"""
print("Computing the redirect map")
redirects = get_redirects(redirects_filename)
print("Computing the integer index map")
index_map = dict()
links = list()
for l, line in enumerate(BZ2File(page_links_filename)):
split = line.split()
if len(split) != 4:
print("ignoring malformed line: " + line)
continue
i = index(redirects, index_map, short_name(split[0]))
j = index(redirects, index_map, short_name(split[2]))
links.append((i, j))
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
if limit is not None and l >= limit - 1:
break
print("Computing the adjacency matrix")
X = sparse.lil_matrix((len(index_map), len(index_map)), dtype=np.float32)
for i, j in links:
X[i, j] = 1.0
del links
print("Converting to CSR representation")
X = X.tocsr()
print("CSR conversion done")
return X, redirects, index_map
# stop after 5M links to make it possible to work in RAM
X, redirects, index_map = get_adjacency_matrix(
redirects_filename, page_links_filename, limit=5000000)
names = {i: name for name, i in index_map.items()}
print("Computing the principal singular vectors using randomized_svd")
t0 = time()
U, s, V = randomized_svd(X, 5, n_iter=3)
print("done in %0.3fs" % (time() - t0))
# print the names of the wikipedia related strongest components of the
# principal singular vector which should be similar to the highest eigenvector
print("Top wikipedia pages according to principal singular vectors")
pprint([names[i] for i in np.abs(U.T[0]).argsort()[-10:]])
pprint([names[i] for i in np.abs(V[0]).argsort()[-10:]])
def centrality_scores(X, alpha=0.85, max_iter=100, tol=1e-10):
"""Power iteration computation of the principal eigenvector
This method is also known as Google PageRank and the implementation
is based on the one from the NetworkX project (BSD licensed too)
with copyrights by:
Aric Hagberg <hagberg@lanl.gov>
Dan Schult <dschult@colgate.edu>
Pieter Swart <swart@lanl.gov>
"""
n = X.shape[0]
X = X.copy()
incoming_counts = np.asarray(X.sum(axis=1)).ravel()
print("Normalizing the graph")
for i in incoming_counts.nonzero()[0]:
X.data[X.indptr[i]:X.indptr[i + 1]] *= 1.0 / incoming_counts[i]
dangle = np.asarray(np.where(np.isclose(X.sum(axis=1), 0),
1.0 / n, 0)).ravel()
scores = np.full(n, 1. / n, dtype=np.float32) # initial guess
for i in range(max_iter):
print("power iteration #%d" % i)
prev_scores = scores
scores = (alpha * (scores * X + np.dot(dangle, prev_scores))
+ (1 - alpha) * prev_scores.sum() / n)
# check convergence: normalized l_inf norm
scores_max = np.abs(scores).max()
if scores_max == 0.0:
scores_max = 1.0
err = np.abs(scores - prev_scores).max() / scores_max
print("error: %0.6f" % err)
if err < n * tol:
return scores
return scores
print("Computing principal eigenvector score using a power iteration method")
t0 = time()
scores = centrality_scores(X, max_iter=100, tol=1e-10)
print("done in %0.3fs" % (time() - t0))
pprint([names[i] for i in np.abs(scores).argsort()[-10:]])
| bsd-3-clause |
yugang/crosswalk-test-suite | webapi/tct-sse-w3c-tests/sse-py/support/sse.py | 5 | 1769 | # Copyright (c) 2012 Intel Corporation.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of works must retain the original copyright notice, this list
# of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the original copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this work without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors:
# Fan,Weiwei <weiwix.fan@intel.com>
from datetime import *
def main(request, response):
response.headers.update([("Content-Type", "text/event-stream")])
curDate = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
return 'data: {"the server time is": "' + curDate + '"}\n\n'
| bsd-3-clause |
theanalyst/cinder | cinder/tests/api/v1/test_volumes.py | 2 | 48554 | # Copyright 2013 Josh Durgin
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from lxml import etree
from oslo.config import cfg
import webob
from cinder.api import extensions
from cinder.api.v1 import volumes
from cinder import context
from cinder import db
from cinder import exception
from cinder import test
from cinder.tests.api import fakes
from cinder.tests.api.v2 import stubs
from cinder.tests import fake_notifier
from cinder.tests.image import fake as fake_image
from cinder import utils
from cinder.volume import api as volume_api
NS = '{http://docs.openstack.org/volume/api/v1}'
TEST_SNAPSHOT_UUID = '00000000-0000-0000-0000-000000000001'
CONF = cfg.CONF
def stub_snapshot_get(self, context, snapshot_id):
if snapshot_id != TEST_SNAPSHOT_UUID:
raise exception.NotFound
return {'id': snapshot_id,
'volume_id': 12,
'status': 'available',
'volume_size': 100,
'created_at': None,
'display_name': 'Default name',
'display_description': 'Default description', }
class VolumeApiTest(test.TestCase):
def setUp(self):
super(VolumeApiTest, self).setUp()
self.addCleanup(fake_notifier.reset)
self.ext_mgr = extensions.ExtensionManager()
self.ext_mgr.extensions = {}
fake_image.stub_out_image_service(self.stubs)
self.controller = volumes.VolumeController(self.ext_mgr)
self.flags(host='fake',
notification_driver=[fake_notifier.__name__])
self.stubs.Set(db, 'volume_get_all', stubs.stub_volume_get_all)
self.stubs.Set(db, 'service_get_all_by_topic',
stubs.stub_service_get_all_by_topic)
self.stubs.Set(volume_api.API, 'delete', stubs.stub_volume_delete)
def test_volume_create(self):
self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_get)
self.stubs.Set(volume_api.API, "create", stubs.stub_volume_create)
vol = {"size": 100,
"display_name": "Volume Test Name",
"display_description": "Volume Test Desc",
"availability_zone": "zone1:host1"}
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v1/volumes')
res_dict = self.controller.create(req, body)
expected = {'volume': {'status': 'fakestatus',
'display_description': 'Volume Test Desc',
'availability_zone': 'zone1:host1',
'display_name': 'Volume Test Name',
'encrypted': False,
'attachments': [{'device': '/',
'server_id': 'fakeuuid',
'host_name': None,
'id': '1',
'volume_id': '1'}],
'bootable': 'false',
'volume_type': 'vol_type_name',
'snapshot_id': None,
'source_volid': None,
'metadata': {},
'id': '1',
'created_at': datetime.datetime(1, 1, 1,
1, 1, 1),
'size': 100,
'encrypted': False}}
self.assertEqual(res_dict, expected)
def test_volume_create_with_type(self):
vol_type = CONF.default_volume_type
db.volume_type_create(context.get_admin_context(),
dict(name=vol_type, extra_specs={}))
db_vol_type = db.volume_type_get_by_name(context.get_admin_context(),
vol_type)
vol = {"size": 100,
"display_name": "Volume Test Name",
"display_description": "Volume Test Desc",
"availability_zone": "zone1:host1",
"volume_type": "FakeTypeName"}
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v1/volumes')
# Raise 404 when type name isn't valid
self.assertRaises(webob.exc.HTTPNotFound, self.controller.create,
req, body)
# Use correct volume type name
vol.update(dict(volume_type=CONF.default_volume_type))
body.update(dict(volume=vol))
res_dict = self.controller.create(req, body)
self.assertIn('id', res_dict['volume'])
self.assertEqual(len(res_dict), 1)
self.assertEqual(res_dict['volume']['volume_type'],
db_vol_type['name'])
# Use correct volume type id
vol.update(dict(volume_type=db_vol_type['id']))
body.update(dict(volume=vol))
res_dict = self.controller.create(req, body)
self.assertIn('id', res_dict['volume'])
self.assertEqual(len(res_dict), 1)
self.assertEqual(res_dict['volume']['volume_type'],
db_vol_type['name'])
def test_volume_creation_fails_with_bad_size(self):
vol = {"size": '',
"display_name": "Volume Test Name",
"display_description": "Volume Test Desc",
"availability_zone": "zone1:host1"}
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v1/volumes')
self.assertRaises(exception.InvalidInput,
self.controller.create,
req,
body)
def test_volume_creation_fails_with_bad_availability_zone(self):
vol = {"size": '1',
"name": "Volume Test Name",
"description": "Volume Test Desc",
"availability_zone": "zonen:hostn"}
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v2/volumes')
self.assertRaises(exception.InvalidInput,
self.controller.create,
req, body)
def test_volume_create_with_image_id(self):
self.stubs.Set(db, 'volume_get', stubs.stub_volume_get_db)
self.stubs.Set(volume_api.API, "create", stubs.stub_volume_create)
self.ext_mgr.extensions = {'os-image-create': 'fake'}
test_id = "c905cedb-7281-47e4-8a62-f26bc5fc4c77"
vol = {"size": '1',
"display_name": "Volume Test Name",
"display_description": "Volume Test Desc",
"availability_zone": "nova",
"imageRef": test_id}
expected = {'volume': {'status': 'fakestatus',
'display_description': 'Volume Test Desc',
'availability_zone': 'nova',
'display_name': 'Volume Test Name',
'encrypted': False,
'attachments': [{'device': '/',
'server_id': 'fakeuuid',
'host_name': None,
'id': '1',
'volume_id': '1'}],
'bootable': 'false',
'volume_type': 'vol_type_name',
'image_id': test_id,
'snapshot_id': None,
'source_volid': None,
'metadata': {},
'id': '1',
'created_at': datetime.datetime(1, 1, 1,
1, 1, 1),
'size': '1'}}
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v1/volumes')
res_dict = self.controller.create(req, body)
self.assertEqual(res_dict, expected)
def test_volume_create_with_image_id_is_integer(self):
self.stubs.Set(volume_api.API, "create", stubs.stub_volume_create)
self.ext_mgr.extensions = {'os-image-create': 'fake'}
vol = {"size": '1',
"display_name": "Volume Test Name",
"display_description": "Volume Test Desc",
"availability_zone": "cinder",
"imageRef": 1234}
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v1/volumes')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
req,
body)
def test_volume_create_with_image_id_not_uuid_format(self):
self.stubs.Set(volume_api.API, "create", stubs.stub_volume_create)
self.ext_mgr.extensions = {'os-image-create': 'fake'}
vol = {"size": '1',
"display_name": "Volume Test Name",
"display_description": "Volume Test Desc",
"availability_zone": "cinder",
"imageRef": '12345'}
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v1/volumes')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
req,
body)
def test_volume_create_with_image_id_with_empty_string(self):
self.stubs.Set(volume_api.API, "create", stubs.stub_volume_create)
self.ext_mgr.extensions = {'os-image-create': 'fake'}
vol = {"size": 1,
"display_name": "Volume Test Name",
"display_description": "Volume Test Desc",
"availability_zone": "cinder",
"imageRef": ''}
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v1/volumes')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
req,
body)
def test_volume_update(self):
self.stubs.Set(db, 'volume_get', stubs.stub_volume_get_db)
self.stubs.Set(volume_api.API, "update", stubs.stub_volume_update)
updates = {
"display_name": "Updated Test Name",
}
body = {"volume": updates}
req = fakes.HTTPRequest.blank('/v1/volumes/1')
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0)
res_dict = self.controller.update(req, '1', body)
expected = {'volume': {
'status': 'fakestatus',
'display_description': 'displaydesc',
'availability_zone': 'fakeaz',
'display_name': 'Updated Test Name',
'encrypted': False,
'attachments': [{
'id': '1',
'volume_id': '1',
'server_id': 'fakeuuid',
'host_name': None,
'device': '/'
}],
'bootable': 'false',
'volume_type': 'vol_type_name',
'snapshot_id': None,
'source_volid': None,
'metadata': {'attached_mode': 'rw',
'readonly': 'False'},
'id': '1',
'created_at': datetime.datetime(1, 1, 1, 1, 1, 1),
'size': 1}}
self.assertEqual(res_dict, expected)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
def test_volume_update_metadata(self):
self.stubs.Set(db, 'volume_get', stubs.stub_volume_get_db)
self.stubs.Set(volume_api.API, "update", stubs.stub_volume_update)
updates = {
"metadata": {"qos_max_iops": 2000}
}
body = {"volume": updates}
req = fakes.HTTPRequest.blank('/v1/volumes/1')
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0)
res_dict = self.controller.update(req, '1', body)
expected = {'volume': {
'status': 'fakestatus',
'display_description': 'displaydesc',
'availability_zone': 'fakeaz',
'display_name': 'displayname',
'encrypted': False,
'attachments': [{
'id': '1',
'volume_id': '1',
'server_id': 'fakeuuid',
'host_name': None,
'device': '/'
}],
'bootable': 'false',
'volume_type': 'vol_type_name',
'snapshot_id': None,
'source_volid': None,
'metadata': {"qos_max_iops": 2000,
"readonly": "False",
"attached_mode": "rw"},
'id': '1',
'created_at': datetime.datetime(1, 1, 1, 1, 1, 1),
'size': 1
}}
self.assertEqual(res_dict, expected)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
def test_volume_update_with_admin_metadata(self):
self.stubs.Set(volume_api.API, "update", stubs.stub_volume_update)
volume = stubs.stub_volume("1")
del volume['name']
del volume['volume_type']
del volume['volume_type_id']
volume['metadata'] = {'key': 'value'}
db.volume_create(context.get_admin_context(), volume)
db.volume_admin_metadata_update(context.get_admin_context(), "1",
{"readonly": "True",
"invisible_key": "invisible_value"},
False)
updates = {
"display_name": "Updated Test Name",
}
body = {"volume": updates}
req = fakes.HTTPRequest.blank('/v1/volumes/1')
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0)
admin_ctx = context.RequestContext('admin', 'fakeproject', True)
req.environ['cinder.context'] = admin_ctx
res_dict = self.controller.update(req, '1', body)
expected = {'volume': {
'status': 'fakestatus',
'display_description': 'displaydesc',
'availability_zone': 'fakeaz',
'display_name': 'Updated Test Name',
'encrypted': False,
'attachments': [{
'id': '1',
'volume_id': '1',
'server_id': 'fakeuuid',
'host_name': None,
'device': '/'
}],
'bootable': 'false',
'volume_type': 'None',
'snapshot_id': None,
'source_volid': None,
'metadata': {'key': 'value',
'readonly': 'True'},
'id': '1',
'created_at': datetime.datetime(1, 1, 1, 1, 1, 1),
'size': 1}}
self.assertEqual(res_dict, expected)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
def test_update_empty_body(self):
body = {}
req = fakes.HTTPRequest.blank('/v1/volumes/1')
self.assertRaises(webob.exc.HTTPUnprocessableEntity,
self.controller.update,
req, '1', body)
def test_update_invalid_body(self):
body = {'display_name': 'missing top level volume key'}
req = fakes.HTTPRequest.blank('/v1/volumes/1')
self.assertRaises(webob.exc.HTTPUnprocessableEntity,
self.controller.update,
req, '1', body)
def test_update_not_found(self):
self.stubs.Set(volume_api.API, "get", stubs.stub_volume_get_notfound)
updates = {
"display_name": "Updated Test Name",
}
body = {"volume": updates}
req = fakes.HTTPRequest.blank('/v1/volumes/1')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.update,
req, '1', body)
def test_volume_list(self):
self.stubs.Set(db, 'volume_get', stubs.stub_volume_get_db)
self.stubs.Set(volume_api.API, 'get_all',
stubs.stub_volume_get_all_by_project)
req = fakes.HTTPRequest.blank('/v1/volumes')
res_dict = self.controller.index(req)
expected = {'volumes': [{'status': 'fakestatus',
'display_description': 'displaydesc',
'availability_zone': 'fakeaz',
'display_name': 'displayname',
'encrypted': False,
'attachments': [{'device': '/',
'server_id': 'fakeuuid',
'host_name': None,
'id': '1',
'volume_id': '1'}],
'bootable': 'false',
'volume_type': 'vol_type_name',
'snapshot_id': None,
'source_volid': None,
'metadata': {'attached_mode': 'rw',
'readonly': 'False'},
'id': '1',
'created_at': datetime.datetime(1, 1, 1,
1, 1, 1),
'size': 1}]}
self.assertEqual(res_dict, expected)
# Finally test that we cached the returned volumes
self.assertEqual(1, len(req.cached_resource()))
def test_volume_list_with_admin_metadata(self):
volume = stubs.stub_volume("1")
del volume['name']
del volume['volume_type']
del volume['volume_type_id']
volume['metadata'] = {'key': 'value'}
db.volume_create(context.get_admin_context(), volume)
db.volume_admin_metadata_update(context.get_admin_context(), "1",
{"readonly": "True",
"invisible_key": "invisible_value"},
False)
req = fakes.HTTPRequest.blank('/v1/volumes')
admin_ctx = context.RequestContext('admin', 'fakeproject', True)
req.environ['cinder.context'] = admin_ctx
res_dict = self.controller.index(req)
expected = {'volumes': [{'status': 'fakestatus',
'display_description': 'displaydesc',
'availability_zone': 'fakeaz',
'display_name': 'displayname',
'encrypted': False,
'attachments': [{'device': '/',
'server_id': 'fakeuuid',
'host_name': None,
'id': '1',
'volume_id': '1'}],
'bootable': 'false',
'volume_type': 'None',
'snapshot_id': None,
'source_volid': None,
'metadata': {'key': 'value',
'readonly': 'True'},
'id': '1',
'created_at': datetime.datetime(1, 1, 1,
1, 1, 1),
'size': 1}]}
self.assertEqual(res_dict, expected)
def test_volume_list_detail(self):
self.stubs.Set(db, 'volume_get', stubs.stub_volume_get_db)
self.stubs.Set(volume_api.API, 'get_all',
stubs.stub_volume_get_all_by_project)
req = fakes.HTTPRequest.blank('/v1/volumes/detail')
res_dict = self.controller.index(req)
expected = {'volumes': [{'status': 'fakestatus',
'display_description': 'displaydesc',
'availability_zone': 'fakeaz',
'display_name': 'displayname',
'encrypted': False,
'attachments': [{'device': '/',
'server_id': 'fakeuuid',
'host_name': None,
'id': '1',
'volume_id': '1'}],
'bootable': 'false',
'volume_type': 'vol_type_name',
'snapshot_id': None,
'source_volid': None,
'metadata': {'attached_mode': 'rw',
'readonly': 'False'},
'id': '1',
'created_at': datetime.datetime(1, 1, 1,
1, 1, 1),
'size': 1}]}
self.assertEqual(res_dict, expected)
# Finally test that we cached the returned volumes
self.assertEqual(1, len(req.cached_resource()))
def test_volume_list_detail_with_admin_metadata(self):
volume = stubs.stub_volume("1")
del volume['name']
del volume['volume_type']
del volume['volume_type_id']
volume['metadata'] = {'key': 'value'}
db.volume_create(context.get_admin_context(), volume)
db.volume_admin_metadata_update(context.get_admin_context(), "1",
{"readonly": "True",
"invisible_key": "invisible_value"},
False)
req = fakes.HTTPRequest.blank('/v1/volumes/detail')
admin_ctx = context.RequestContext('admin', 'fakeproject', True)
req.environ['cinder.context'] = admin_ctx
res_dict = self.controller.index(req)
expected = {'volumes': [{'status': 'fakestatus',
'display_description': 'displaydesc',
'availability_zone': 'fakeaz',
'display_name': 'displayname',
'encrypted': False,
'attachments': [{'device': '/',
'server_id': 'fakeuuid',
'host_name': None,
'id': '1',
'volume_id': '1'}],
'bootable': 'false',
'volume_type': 'None',
'snapshot_id': None,
'source_volid': None,
'metadata': {'key': 'value',
'readonly': 'True'},
'id': '1',
'created_at': datetime.datetime(1, 1, 1,
1, 1, 1),
'size': 1}]}
self.assertEqual(res_dict, expected)
def test_volume_show(self):
self.stubs.Set(db, 'volume_get', stubs.stub_volume_get_db)
req = fakes.HTTPRequest.blank('/v1/volumes/1')
res_dict = self.controller.show(req, '1')
expected = {'volume': {'status': 'fakestatus',
'display_description': 'displaydesc',
'availability_zone': 'fakeaz',
'display_name': 'displayname',
'encrypted': False,
'attachments': [{'device': '/',
'server_id': 'fakeuuid',
'host_name': None,
'id': '1',
'volume_id': '1'}],
'bootable': 'false',
'volume_type': 'vol_type_name',
'snapshot_id': None,
'source_volid': None,
'metadata': {'attached_mode': 'rw',
'readonly': 'False'},
'id': '1',
'created_at': datetime.datetime(1, 1, 1,
1, 1, 1),
'size': 1}}
self.assertEqual(res_dict, expected)
# Finally test that we cached the returned volume
self.assertIsNotNone(req.cached_resource_by_id('1'))
def test_volume_show_no_attachments(self):
def stub_volume_get(self, context, volume_id, **kwargs):
return stubs.stub_volume(volume_id, attach_status='detached')
self.stubs.Set(volume_api.API, 'get', stub_volume_get)
req = fakes.HTTPRequest.blank('/v1/volumes/1')
res_dict = self.controller.show(req, '1')
expected = {'volume': {'status': 'fakestatus',
'display_description': 'displaydesc',
'availability_zone': 'fakeaz',
'display_name': 'displayname',
'encrypted': False,
'attachments': [],
'bootable': 'false',
'volume_type': 'vol_type_name',
'snapshot_id': None,
'source_volid': None,
'metadata': {'readonly': 'False'},
'id': '1',
'created_at': datetime.datetime(1, 1, 1,
1, 1, 1),
'size': 1}}
self.assertEqual(res_dict, expected)
def test_volume_show_bootable(self):
def stub_volume_get(self, context, volume_id, **kwargs):
return (stubs.stub_volume(volume_id,
volume_glance_metadata=dict(foo='bar')))
self.stubs.Set(volume_api.API, 'get', stub_volume_get)
req = fakes.HTTPRequest.blank('/v1/volumes/1')
res_dict = self.controller.show(req, '1')
expected = {'volume': {'status': 'fakestatus',
'display_description': 'displaydesc',
'availability_zone': 'fakeaz',
'display_name': 'displayname',
'encrypted': False,
'attachments': [{'device': '/',
'server_id': 'fakeuuid',
'host_name': None,
'id': '1',
'volume_id': '1'}],
'bootable': 'true',
'volume_type': 'vol_type_name',
'snapshot_id': None,
'source_volid': None,
'metadata': {'attached_mode': 'rw',
'readonly': 'False'},
'id': '1',
'created_at': datetime.datetime(1, 1, 1,
1, 1, 1),
'size': 1}}
self.assertEqual(res_dict, expected)
def test_volume_show_no_volume(self):
self.stubs.Set(volume_api.API, "get", stubs.stub_volume_get_notfound)
req = fakes.HTTPRequest.blank('/v1/volumes/1')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show,
req,
1)
# Finally test that we did not cache anything
self.assertIsNone(req.cached_resource_by_id('1'))
def test_volume_detail_limit_offset(self):
def volume_detail_limit_offset(is_admin):
def stub_volume_get_all_by_project(context, project_id, marker,
limit, sort_key, sort_dir,
filters=None,
viewable_admin_meta=False):
return [
stubs.stub_volume(1, display_name='vol1'),
stubs.stub_volume(2, display_name='vol2'),
]
self.stubs.Set(db, 'volume_get_all_by_project',
stub_volume_get_all_by_project)
self.stubs.Set(db, 'volume_get', stubs.stub_volume_get_db)
req = fakes.HTTPRequest.blank('/v1/volumes/detail?limit=2\
&offset=1',
use_admin_context=is_admin)
res_dict = self.controller.index(req)
volumes = res_dict['volumes']
self.assertEqual(len(volumes), 1)
self.assertEqual(volumes[0]['id'], 2)
#admin case
volume_detail_limit_offset(is_admin=True)
#non_admin case
volume_detail_limit_offset(is_admin=False)
def test_volume_show_with_admin_metadata(self):
volume = stubs.stub_volume("1")
del volume['name']
del volume['volume_type']
del volume['volume_type_id']
volume['metadata'] = {'key': 'value'}
db.volume_create(context.get_admin_context(), volume)
db.volume_admin_metadata_update(context.get_admin_context(), "1",
{"readonly": "True",
"invisible_key": "invisible_value"},
False)
req = fakes.HTTPRequest.blank('/v1/volumes/1')
admin_ctx = context.RequestContext('admin', 'fakeproject', True)
req.environ['cinder.context'] = admin_ctx
res_dict = self.controller.show(req, '1')
expected = {'volume': {'status': 'fakestatus',
'display_description': 'displaydesc',
'availability_zone': 'fakeaz',
'display_name': 'displayname',
'encrypted': False,
'attachments': [{'device': '/',
'server_id': 'fakeuuid',
'host_name': None,
'id': '1',
'volume_id': '1'}],
'bootable': 'false',
'volume_type': 'None',
'snapshot_id': None,
'source_volid': None,
'metadata': {'key': 'value',
'readonly': 'True'},
'id': '1',
'created_at': datetime.datetime(1, 1, 1,
1, 1, 1),
'size': 1}}
self.assertEqual(res_dict, expected)
def test_volume_show_with_encrypted_volume(self):
def stub_volume_get(self, context, volume_id, **kwargs):
return stubs.stub_volume(volume_id, encryption_key_id='fake_id')
self.stubs.Set(volume_api.API, 'get', stub_volume_get)
req = fakes.HTTPRequest.blank('/v1/volumes/1')
res_dict = self.controller.show(req, 1)
self.assertEqual(res_dict['volume']['encrypted'], True)
def test_volume_show_with_unencrypted_volume(self):
def stub_volume_get(self, context, volume_id, **kwargs):
return stubs.stub_volume(volume_id, encryption_key_id=None)
self.stubs.Set(volume_api.API, 'get', stub_volume_get)
req = fakes.HTTPRequest.blank('/v1/volumes/1')
res_dict = self.controller.show(req, 1)
self.assertEqual(res_dict['volume']['encrypted'], False)
def test_volume_delete(self):
self.stubs.Set(db, 'volume_get', stubs.stub_volume_get_db)
req = fakes.HTTPRequest.blank('/v1/volumes/1')
resp = self.controller.delete(req, 1)
self.assertEqual(resp.status_int, 202)
def test_volume_delete_no_volume(self):
self.stubs.Set(volume_api.API, "get", stubs.stub_volume_get_notfound)
req = fakes.HTTPRequest.blank('/v1/volumes/1')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.delete,
req,
1)
def test_admin_list_volumes_limited_to_project(self):
self.stubs.Set(db, 'volume_get_all_by_project',
stubs.stub_volume_get_all_by_project)
req = fakes.HTTPRequest.blank('/v1/fake/volumes',
use_admin_context=True)
res = self.controller.index(req)
self.assertIn('volumes', res)
self.assertEqual(1, len(res['volumes']))
def test_admin_list_volumes_all_tenants(self):
req = fakes.HTTPRequest.blank('/v1/fake/volumes?all_tenants=1',
use_admin_context=True)
res = self.controller.index(req)
self.assertIn('volumes', res)
self.assertEqual(3, len(res['volumes']))
def test_all_tenants_non_admin_gets_all_tenants(self):
self.stubs.Set(db, 'volume_get_all_by_project',
stubs.stub_volume_get_all_by_project)
self.stubs.Set(db, 'volume_get', stubs.stub_volume_get_db)
req = fakes.HTTPRequest.blank('/v1/fake/volumes?all_tenants=1')
res = self.controller.index(req)
self.assertIn('volumes', res)
self.assertEqual(1, len(res['volumes']))
def test_non_admin_get_by_project(self):
self.stubs.Set(db, 'volume_get_all_by_project',
stubs.stub_volume_get_all_by_project)
self.stubs.Set(db, 'volume_get', stubs.stub_volume_get_db)
req = fakes.HTTPRequest.blank('/v1/fake/volumes')
res = self.controller.index(req)
self.assertIn('volumes', res)
self.assertEqual(1, len(res['volumes']))
def test_add_visible_admin_metadata_visible_key_only(self):
admin_metadata = [{"key": "invisible_key", "value": "invisible_value"},
{"key": "readonly", "value": "visible"},
{"key": "attached_mode", "value": "visible"}]
metadata = [{"key": "key", "value": "value"}]
volume = dict(volume_admin_metadata=admin_metadata,
volume_metadata=metadata)
utils.add_visible_admin_metadata(volume)
self.assertEqual(volume['volume_metadata'],
[{"key": "key", "value": "value"},
{"key": "readonly", "value": "visible"},
{"key": "attached_mode", "value": "visible"}])
admin_metadata = {"invisible_key": "invisible_value",
"readonly": "visible",
"attached_mode": "visible"}
metadata = {"key": "value"}
volume = dict(admin_metadata=admin_metadata,
metadata=metadata)
utils.add_visible_admin_metadata(volume)
self.assertEqual(volume['metadata'],
{'key': 'value',
'attached_mode': 'visible',
'readonly': 'visible'})
class VolumeSerializerTest(test.TestCase):
def _verify_volume_attachment(self, attach, tree):
for attr in ('id', 'volume_id', 'server_id', 'device'):
self.assertEqual(str(attach[attr]), tree.get(attr))
def _verify_volume(self, vol, tree):
self.assertEqual(tree.tag, NS + 'volume')
for attr in ('id', 'status', 'size', 'availability_zone', 'created_at',
'display_name', 'display_description', 'volume_type',
'bootable', 'snapshot_id'):
self.assertEqual(str(vol[attr]), tree.get(attr))
for child in tree:
self.assertIn(child.tag, (NS + 'attachments', NS + 'metadata'))
if child.tag == 'attachments':
self.assertEqual(1, len(child))
self.assertEqual('attachment', child[0].tag)
self._verify_volume_attachment(vol['attachments'][0], child[0])
elif child.tag == 'metadata':
not_seen = set(vol['metadata'].keys())
for gr_child in child:
self.assertIn(gr_child.get("key"), not_seen)
self.assertEqual(str(vol['metadata'][gr_child.get("key")]),
gr_child.text)
not_seen.remove(gr_child.get('key'))
self.assertEqual(0, len(not_seen))
def test_volume_show_create_serializer(self):
serializer = volumes.VolumeTemplate()
raw_volume = dict(
id='vol_id',
status='vol_status',
size=1024,
availability_zone='vol_availability',
bootable='false',
created_at=datetime.datetime.now(),
attachments=[dict(id='vol_id',
volume_id='vol_id',
server_id='instance_uuid',
device='/foo')],
display_name='vol_name',
display_description='vol_desc',
volume_type='vol_type',
snapshot_id='snap_id',
source_volid='source_volid',
metadata=dict(foo='bar',
baz='quux', ), )
text = serializer.serialize(dict(volume=raw_volume))
tree = etree.fromstring(text)
self._verify_volume(raw_volume, tree)
def test_volume_index_detail_serializer(self):
serializer = volumes.VolumesTemplate()
raw_volumes = [dict(id='vol1_id',
status='vol1_status',
size=1024,
availability_zone='vol1_availability',
bootable='true',
created_at=datetime.datetime.now(),
attachments=[dict(id='vol1_id',
volume_id='vol1_id',
server_id='instance_uuid',
device='/foo1')],
display_name='vol1_name',
display_description='vol1_desc',
volume_type='vol1_type',
snapshot_id='snap1_id',
source_volid=None,
metadata=dict(foo='vol1_foo',
bar='vol1_bar', ), ),
dict(id='vol2_id',
status='vol2_status',
size=1024,
availability_zone='vol2_availability',
bootable='true',
created_at=datetime.datetime.now(),
attachments=[dict(id='vol2_id',
volume_id='vol2_id',
server_id='instance_uuid',
device='/foo2')],
display_name='vol2_name',
display_description='vol2_desc',
volume_type='vol2_type',
snapshot_id='snap2_id',
source_volid=None,
metadata=dict(foo='vol2_foo',
bar='vol2_bar', ), )]
text = serializer.serialize(dict(volumes=raw_volumes))
tree = etree.fromstring(text)
self.assertEqual(NS + 'volumes', tree.tag)
self.assertEqual(len(raw_volumes), len(tree))
for idx, child in enumerate(tree):
self._verify_volume(raw_volumes[idx], child)
class TestVolumeCreateRequestXMLDeserializer(test.TestCase):
def setUp(self):
super(TestVolumeCreateRequestXMLDeserializer, self).setUp()
self.deserializer = volumes.CreateDeserializer()
def test_minimal_volume(self):
self_request = """
<volume xmlns="http://docs.openstack.org/compute/api/v1.1"
size="1"></volume>"""
request = self.deserializer.deserialize(self_request)
expected = {"volume": {"size": "1", }, }
self.assertEqual(request['body'], expected)
def test_display_name(self):
self_request = """
<volume xmlns="http://docs.openstack.org/compute/api/v1.1"
size="1"
display_name="Volume-xml"></volume>"""
request = self.deserializer.deserialize(self_request)
expected = {
"volume": {
"size": "1",
"display_name": "Volume-xml",
},
}
self.assertEqual(request['body'], expected)
def test_display_description(self):
self_request = """
<volume xmlns="http://docs.openstack.org/compute/api/v1.1"
size="1"
display_name="Volume-xml"
display_description="description"></volume>"""
request = self.deserializer.deserialize(self_request)
expected = {
"volume": {
"size": "1",
"display_name": "Volume-xml",
"display_description": "description",
},
}
self.assertEqual(request['body'], expected)
def test_volume_type(self):
self_request = """
<volume xmlns="http://docs.openstack.org/compute/api/v1.1"
size="1"
display_name="Volume-xml"
display_description="description"
volume_type="289da7f8-6440-407c-9fb4-7db01ec49164"></volume>"""
request = self.deserializer.deserialize(self_request)
expected = {
"volume": {
"display_name": "Volume-xml",
"size": "1",
"display_name": "Volume-xml",
"display_description": "description",
"volume_type": "289da7f8-6440-407c-9fb4-7db01ec49164",
},
}
self.assertEqual(request['body'], expected)
def test_availability_zone(self):
self_request = """
<volume xmlns="http://docs.openstack.org/compute/api/v1.1"
size="1"
display_name="Volume-xml"
display_description="description"
volume_type="289da7f8-6440-407c-9fb4-7db01ec49164"
availability_zone="us-east1"></volume>"""
request = self.deserializer.deserialize(self_request)
expected = {
"volume": {
"size": "1",
"display_name": "Volume-xml",
"display_description": "description",
"volume_type": "289da7f8-6440-407c-9fb4-7db01ec49164",
"availability_zone": "us-east1",
},
}
self.assertEqual(request['body'], expected)
def test_metadata(self):
self_request = """
<volume xmlns="http://docs.openstack.org/compute/api/v1.1"
display_name="Volume-xml"
size="1">
<metadata><meta key="Type">work</meta></metadata></volume>"""
request = self.deserializer.deserialize(self_request)
expected = {
"volume": {
"display_name": "Volume-xml",
"size": "1",
"metadata": {
"Type": "work",
},
},
}
self.assertEqual(request['body'], expected)
def test_full_volume(self):
self_request = """
<volume xmlns="http://docs.openstack.org/compute/api/v1.1"
size="1"
display_name="Volume-xml"
display_description="description"
volume_type="289da7f8-6440-407c-9fb4-7db01ec49164"
availability_zone="us-east1">
<metadata><meta key="Type">work</meta></metadata></volume>"""
request = self.deserializer.deserialize(self_request)
expected = {
"volume": {
"size": "1",
"display_name": "Volume-xml",
"display_description": "description",
"volume_type": "289da7f8-6440-407c-9fb4-7db01ec49164",
"availability_zone": "us-east1",
"metadata": {
"Type": "work",
},
},
}
self.assertEqual(request['body'], expected)
def test_imageref(self):
self_request = """
<volume xmlns="http://docs.openstack.org/volume/api/v1"
size="1"
display_name="Volume-xml"
display_description="description"
imageRef="4a90189d-d702-4c7c-87fc-6608c554d737"></volume>"""
request = self.deserializer.deserialize(self_request)
expected = {
"volume": {
"size": "1",
"display_name": "Volume-xml",
"display_description": "description",
"imageRef": "4a90189d-d702-4c7c-87fc-6608c554d737",
},
}
self.assertEqual(expected, request['body'])
def test_snapshot_id(self):
self_request = """
<volume xmlns="http://docs.openstack.org/volume/api/v1"
size="1"
display_name="Volume-xml"
display_description="description"
snapshot_id="4a90189d-d702-4c7c-87fc-6608c554d737"></volume>"""
request = self.deserializer.deserialize(self_request)
expected = {
"volume": {
"size": "1",
"display_name": "Volume-xml",
"display_description": "description",
"snapshot_id": "4a90189d-d702-4c7c-87fc-6608c554d737",
},
}
self.assertEqual(expected, request['body'])
def test_source_volid(self):
self_request = """
<volume xmlns="http://docs.openstack.org/volume/api/v1"
size="1"
display_name="Volume-xml"
display_description="description"
source_volid="4a90189d-d702-4c7c-87fc-6608c554d737"></volume>"""
request = self.deserializer.deserialize(self_request)
expected = {
"volume": {
"size": "1",
"display_name": "Volume-xml",
"display_description": "description",
"source_volid": "4a90189d-d702-4c7c-87fc-6608c554d737",
},
}
self.assertEqual(expected, request['body'])
class VolumesUnprocessableEntityTestCase(test.TestCase):
"""Tests of places we throw 422 Unprocessable Entity from."""
def setUp(self):
super(VolumesUnprocessableEntityTestCase, self).setUp()
self.ext_mgr = extensions.ExtensionManager()
self.ext_mgr.extensions = {}
self.controller = volumes.VolumeController(self.ext_mgr)
def _unprocessable_volume_create(self, body):
req = fakes.HTTPRequest.blank('/v2/fake/volumes')
req.method = 'POST'
self.assertRaises(webob.exc.HTTPUnprocessableEntity,
self.controller.create, req, body)
def test_create_no_body(self):
self._unprocessable_volume_create(body=None)
def test_create_missing_volume(self):
body = {'foo': {'a': 'b'}}
self._unprocessable_volume_create(body=body)
def test_create_malformed_entity(self):
body = {'volume': 'string'}
self._unprocessable_volume_create(body=body)
| apache-2.0 |
arlolra/stem | test/unit/util/system.py | 2 | 14865 | """
Unit tests for the stem.util.system functions. This works by mocking the
stem.util.system.call function to selectively exercise other functions. None of
these tests actually make system calls, use proc, or otherwise deal with the
system running the tests.
"""
import functools
import ntpath
import posixpath
import unittest
from mock import Mock, patch
from stem.util import system
# Base responses for the get_pid_by_name tests. The 'success' and
# 'multiple_results' entries are filled in by tests.
GET_PID_BY_NAME_BASE_RESULTS = {
"success": [],
"multiple_results": [],
"malformed_data": ["bad data"],
"no_results": [],
"command_fails": None,
}
# testing output for system calls
GET_PID_BY_NAME_PS_BSD = [
" PID TT STAT TIME COMMAND",
" 1 ?? Ss 9:00.22 launchd",
" 10 ?? Ss 0:09.97 kextd",
" 11 ?? Ss 5:47.36 DirectoryService",
" 12 ?? Ss 3:01.44 notifyd"]
GET_PID_BY_NAME_PS_BSD_MULTIPLE = [
" PID TT STAT TIME COMMAND",
" 1 ?? Ss 9:00.22 launchd",
" 10 ?? Ss 0:09.97 kextd",
" 41 ?? Ss 9:00.22 launchd"]
GET_PID_BY_PORT_NETSTAT_RESULTS = [
"Active Internet connections (only servers)",
"Proto Recv-Q Send-Q Local Address Foreign Address State PID/Program name",
"tcp 0 0 127.0.0.1:631 0.0.0.0:* LISTEN - ",
"tcp 0 0 127.0.0.1:9051 0.0.0.0:* LISTEN 1641/tor ",
"tcp6 0 0 ::1:631 :::* LISTEN - ",
"udp 0 0 0.0.0.0:5353 0.0.0.0:* - ",
"udp6 0 0 fe80::7ae4:ff:fe2f::123 :::* - "]
GET_PID_BY_PORT_SOCKSTAT_RESULTS = [
"_tor tor 4397 7 tcp4 51.64.7.84:9051 *:*",
"_tor tor 4397 12 tcp4 51.64.7.84:54011 80.3.121.7:9051",
"_tor tor 4397 15 tcp4 51.64.7.84:59374 7.42.1.102:9051"]
GET_PID_BY_PORT_LSOF_RESULTS = [
"COMMAND PID USER FD TYPE DEVICE SIZE/OFF NODE NAME",
"tor 1745 atagar 6u IPv4 14229 0t0 TCP 127.0.0.1:9051 (LISTEN)",
"apache 329 atagar 6u IPv4 14229 0t0 TCP 127.0.0.1:80 (LISTEN)"]
GET_BSD_JAIL_PATH_RESULTS = [
" JID IP Address Hostname Path",
" 1 10.0.0.2 tor-jail /usr/jails/tor-jail",
]
def mock_call(base_cmd, responses):
"""
Provides mocking for the system module's call function. There are a couple
ways of using this...
- Simple usage is for base_cmd is the system call we want to respond to and
responses is a list containing the respnose. For instance...
mock_call("ls my_dir", ["file1", "file2", "file3"])
- The base_cmd can be a formatted string and responses are a dictionary of
completions for tat string to the responses. For instance...
mock_call("ls %s", {"dir1": ["file1", "file2"], "dir2": ["file3", "file4"]})
Arguments:
base_cmd (str) - command to match against
responses (list, dict) - either list with the response, or mapping of
base_cmd formatted string completions to responses
Returns:
functor to override stem.util.system.call with
"""
def _mock_call(base_cmd, responses, command, default = None):
if isinstance(responses, list):
if base_cmd == command:
return responses
else:
return default
else:
for cmd_completion in responses:
if command == base_cmd % cmd_completion:
return responses[cmd_completion]
return default
return functools.partial(_mock_call, base_cmd, responses)
class TestSystem(unittest.TestCase):
@patch('stem.util.system.call')
@patch('stem.util.system.is_available', Mock(return_value = True))
def test_is_running(self, call_mock):
"""
Exercises multiple use cases for the is_running function.
"""
# mock response with a linux and bsd resolver
running_commands = [u"irssi", u"moc", u"tor", u"ps", u" firefox "]
for ps_cmd in (system.IS_RUNNING_PS_LINUX, system.IS_RUNNING_PS_BSD):
call_mock.side_effect = mock_call(ps_cmd, running_commands)
self.assertTrue(system.is_running("irssi"))
self.assertTrue(system.is_running("moc"))
self.assertTrue(system.is_running("tor"))
self.assertTrue(system.is_running("ps"))
self.assertTrue(system.is_running("firefox"))
self.assertEqual(False, system.is_running("something_else"))
# mock both calls failing
call_mock.return_value = None
call_mock.side_effect = None
self.assertFalse(system.is_running("irssi"))
self.assertEquals(None, system.is_running("irssi"))
@patch('stem.util.system.call')
@patch('stem.util.system.is_available', Mock(return_value = True))
def test_get_pid_by_name_pgrep(self, call_mock):
"""
Tests the get_pid_by_name function with pgrep responses.
"""
responses = dict(GET_PID_BY_NAME_BASE_RESULTS)
responses["success"] = ["1111"]
responses["multiple_results"] = ["123", "456", "789"]
call_mock.side_effect = mock_call(system.GET_PID_BY_NAME_PGREP, responses)
for test_input in responses:
expected_response = 1111 if test_input == "success" else None
self.assertEquals(expected_response, system.get_pid_by_name(test_input))
self.assertEquals([123, 456, 789], system.get_pid_by_name("multiple_results", multiple = True))
@patch('stem.util.system.call')
@patch('stem.util.system.is_available', Mock(return_value = True))
def test_get_pid_by_name_pidof(self, call_mock):
"""
Tests the get_pid_by_name function with pidof responses.
"""
responses = dict(GET_PID_BY_NAME_BASE_RESULTS)
responses["success"] = ["1111"]
responses["multiple_results"] = ["123 456 789"]
call_mock.side_effect = mock_call(system.GET_PID_BY_NAME_PIDOF, responses)
for test_input in responses:
expected_response = 1111 if test_input == "success" else None
self.assertEquals(expected_response, system.get_pid_by_name(test_input))
self.assertEquals([123, 456, 789], system.get_pid_by_name("multiple_results", multiple = True))
@patch('stem.util.system.call')
@patch('stem.util.system.is_bsd', Mock(return_value = False))
@patch('stem.util.system.is_available', Mock(return_value = True))
def test_get_pid_by_name_ps_linux(self, call_mock):
"""
Tests the get_pid_by_name function with the linux variant of ps.
"""
responses = dict(GET_PID_BY_NAME_BASE_RESULTS)
responses["success"] = ["PID", " 1111"]
responses["multiple_results"] = ["PID", " 123", " 456", " 789"]
call_mock.side_effect = mock_call(system.GET_PID_BY_NAME_PS_LINUX, responses)
for test_input in responses:
expected_response = 1111 if test_input == "success" else None
self.assertEquals(expected_response, system.get_pid_by_name(test_input))
self.assertEquals([123, 456, 789], system.get_pid_by_name("multiple_results", multiple = True))
@patch('stem.util.system.call')
@patch('stem.util.system.is_bsd', Mock(return_value = True))
@patch('stem.util.system.is_available', Mock(return_value = True))
def test_get_pid_by_name_ps_bsd(self, call_mock):
"""
Tests the get_pid_by_name function with the bsd variant of ps.
"""
call_mock.side_effect = mock_call(system.GET_PID_BY_NAME_PS_BSD, GET_PID_BY_NAME_PS_BSD)
self.assertEquals(1, system.get_pid_by_name("launchd"))
self.assertEquals(11, system.get_pid_by_name("DirectoryService"))
self.assertEquals(None, system.get_pid_by_name("blarg"))
call_mock.side_effect = mock_call(system.GET_PID_BY_NAME_PS_BSD, GET_PID_BY_NAME_PS_BSD_MULTIPLE)
self.assertEquals([1, 41], system.get_pid_by_name("launchd", multiple = True))
@patch('stem.util.system.call')
@patch('stem.util.system.is_available', Mock(return_value = True))
def test_get_pid_by_name_lsof(self, call_mock):
"""
Tests the get_pid_by_name function with lsof responses.
"""
responses = dict(GET_PID_BY_NAME_BASE_RESULTS)
responses["success"] = ["1111"]
responses["multiple_results"] = ["123", "456", "789"]
call_mock.side_effect = mock_call(system.GET_PID_BY_NAME_LSOF, responses)
for test_input in responses:
expected_response = 1111 if test_input == "success" else None
self.assertEquals(expected_response, system.get_pid_by_name(test_input))
self.assertEquals([123, 456, 789], system.get_pid_by_name("multiple_results", multiple = True))
@patch('stem.util.system.call')
@patch('stem.util.system.is_available', Mock(return_value = True))
def test_get_pid_by_port_netstat(self, call_mock):
"""
Tests the get_pid_by_port function with a netstat response.
"""
call_mock.side_effect = mock_call(system.GET_PID_BY_PORT_NETSTAT, GET_PID_BY_PORT_NETSTAT_RESULTS)
self.assertEquals(1641, system.get_pid_by_port(9051))
self.assertEquals(1641, system.get_pid_by_port("9051"))
self.assertEquals(None, system.get_pid_by_port(631))
self.assertEquals(None, system.get_pid_by_port(123))
@patch('stem.util.system.call')
@patch('stem.util.system.is_available', Mock(return_value = True))
def test_get_pid_by_port_sockstat(self, call_mock):
"""
Tests the get_pid_by_port function with a sockstat response.
"""
call_mock.side_effect = mock_call(system.GET_PID_BY_PORT_SOCKSTAT % 9051, GET_PID_BY_PORT_SOCKSTAT_RESULTS)
self.assertEquals(4397, system.get_pid_by_port(9051))
self.assertEquals(4397, system.get_pid_by_port("9051"))
self.assertEquals(None, system.get_pid_by_port(123))
@patch('stem.util.system.call')
@patch('stem.util.system.is_available', Mock(return_value = True))
def test_get_pid_by_port_lsof(self, call_mock):
"""
Tests the get_pid_by_port function with a lsof response.
"""
call_mock.side_effect = mock_call(system.GET_PID_BY_PORT_LSOF, GET_PID_BY_PORT_LSOF_RESULTS)
self.assertEquals(1745, system.get_pid_by_port(9051))
self.assertEquals(1745, system.get_pid_by_port("9051"))
self.assertEquals(329, system.get_pid_by_port(80))
self.assertEquals(None, system.get_pid_by_port(123))
@patch('stem.util.system.call')
@patch('stem.util.system.is_available', Mock(return_value = True))
def test_get_pid_by_open_file_lsof(self, call_mock):
"""
Tests the get_pid_by_open_file function with a lsof response.
"""
lsof_query = system.GET_PID_BY_FILE_LSOF % "/tmp/foo"
call_mock.side_effect = mock_call(lsof_query, ["4762"])
self.assertEquals(4762, system.get_pid_by_open_file("/tmp/foo"))
call_mock.return_value = []
call_mock.side_effect = None
self.assertEquals(None, system.get_pid_by_open_file("/tmp/somewhere_else"))
@patch('stem.util.system.call')
@patch('stem.util.proc.is_available', Mock(return_value = False))
@patch('stem.util.system.is_available', Mock(return_value = True))
def test_get_cwd_pwdx(self, call_mock):
"""
Tests the get_cwd function with a pwdx response.
"""
responses = {
"3799": ["3799: /home/atagar"],
"5839": ["5839: No such process"],
"1234": ["malformed output"],
"7878": None,
}
call_mock.side_effect = mock_call(system.GET_CWD_PWDX, responses)
for test_input in responses:
expected_response = "/home/atagar" if test_input == "3799" else None
self.assertEquals(expected_response, system.get_cwd(test_input))
@patch('stem.util.system.call')
@patch('stem.util.proc.is_available', Mock(return_value = False))
@patch('stem.util.system.is_available', Mock(return_value = True))
def test_get_cwd_lsof(self, call_mock):
"""
Tests the get_cwd function with a lsof response.
"""
responses = {
"75717": ["p75717", "n/Users/atagar/tor/src/or"],
"1234": ["malformed output"],
"7878": [],
}
call_mock.side_effect = mock_call(system.GET_CWD_LSOF, responses)
for test_input in responses:
expected_response = "/Users/atagar/tor/src/or" if test_input == "75717" else None
self.assertEquals(expected_response, system.get_cwd(test_input))
@patch('stem.util.system.call')
@patch('stem.util.system.is_available', Mock(return_value = True))
def test_get_bsd_jail_id(self, call_mock):
"""
Tests the get_bsd_jail_id function.
"""
responses = {
"1111": ["JID", " 1"],
"2222": ["JID", " 0"],
"3333": ["JID", "bad data"],
"4444": ["bad data"],
"5555": [],
"6666": []
}
call_mock.side_effect = mock_call(system.GET_BSD_JAIL_ID_PS, responses)
for test_input in responses:
expected_response = 1 if test_input == "1111" else 0
self.assertEquals(expected_response, system.get_bsd_jail_id(test_input))
@patch('stem.util.system.call')
@patch('stem.util.system.is_available', Mock(return_value = True))
def test_get_bsd_jail_path(self, call_mock):
"""
Tests the get_bsd_jail_path function.
"""
# check when we don't have a jail
call_mock.return_value = []
self.assertEquals(None, system.get_bsd_jail_path(1))
call_mock.side_effect = mock_call(system.GET_BSD_JAIL_PATH % '1', GET_BSD_JAIL_PATH_RESULTS)
self.assertEquals("/usr/jails/tor-jail", system.get_bsd_jail_path(1))
@patch('platform.system', Mock(return_value = 'Linux'))
@patch('os.path.join', Mock(side_effect = posixpath.join))
def test_expand_path_unix(self):
"""
Tests the expand_path function. This does not exercise home directory
expansions since that deals with our environment (that's left to integ
tests).
"""
self.assertEquals("", system.expand_path(""))
self.assertEquals("/tmp", system.expand_path("/tmp"))
self.assertEquals("/tmp", system.expand_path("/tmp/"))
self.assertEquals("/tmp", system.expand_path(".", "/tmp"))
self.assertEquals("/tmp", system.expand_path("./", "/tmp"))
self.assertEquals("/tmp/foo", system.expand_path("foo", "/tmp"))
self.assertEquals("/tmp/foo", system.expand_path("./foo", "/tmp"))
@patch('platform.system', Mock(return_value = 'Windows'))
@patch('os.path.join', Mock(side_effect = ntpath.join))
def test_expand_path_windows(self):
"""
Tests the expand_path function on windows. This does not exercise
home directory expansions since that deals with our environment
(that's left to integ tests).
"""
self.assertEquals("", system.expand_path(""))
self.assertEquals("C:\\tmp", system.expand_path("C:\\tmp"))
self.assertEquals("C:\\tmp", system.expand_path("C:\\tmp\\"))
self.assertEquals("C:\\tmp", system.expand_path(".", "C:\\tmp"))
self.assertEquals("C:\\tmp", system.expand_path(".\\", "C:\\tmp"))
self.assertEquals("C:\\tmp\\foo", system.expand_path("foo", "C:\\tmp"))
self.assertEquals("C:\\tmp\\foo", system.expand_path(".\\foo", "C:\\tmp"))
| lgpl-3.0 |
rwakulszowa/servo | tests/wpt/web-platform-tests/tools/py/py/_code/source.py | 174 | 14565 | from __future__ import generators
from bisect import bisect_right
import sys
import inspect, tokenize
import py
from types import ModuleType
cpy_compile = compile
try:
import _ast
from _ast import PyCF_ONLY_AST as _AST_FLAG
except ImportError:
_AST_FLAG = 0
_ast = None
class Source(object):
""" a immutable object holding a source code fragment,
possibly deindenting it.
"""
_compilecounter = 0
def __init__(self, *parts, **kwargs):
self.lines = lines = []
de = kwargs.get('deindent', True)
rstrip = kwargs.get('rstrip', True)
for part in parts:
if not part:
partlines = []
if isinstance(part, Source):
partlines = part.lines
elif isinstance(part, (tuple, list)):
partlines = [x.rstrip("\n") for x in part]
elif isinstance(part, py.builtin._basestring):
partlines = part.split('\n')
if rstrip:
while partlines:
if partlines[-1].strip():
break
partlines.pop()
else:
partlines = getsource(part, deindent=de).lines
if de:
partlines = deindent(partlines)
lines.extend(partlines)
def __eq__(self, other):
try:
return self.lines == other.lines
except AttributeError:
if isinstance(other, str):
return str(self) == other
return False
def __getitem__(self, key):
if isinstance(key, int):
return self.lines[key]
else:
if key.step not in (None, 1):
raise IndexError("cannot slice a Source with a step")
return self.__getslice__(key.start, key.stop)
def __len__(self):
return len(self.lines)
def __getslice__(self, start, end):
newsource = Source()
newsource.lines = self.lines[start:end]
return newsource
def strip(self):
""" return new source object with trailing
and leading blank lines removed.
"""
start, end = 0, len(self)
while start < end and not self.lines[start].strip():
start += 1
while end > start and not self.lines[end-1].strip():
end -= 1
source = Source()
source.lines[:] = self.lines[start:end]
return source
def putaround(self, before='', after='', indent=' ' * 4):
""" return a copy of the source object with
'before' and 'after' wrapped around it.
"""
before = Source(before)
after = Source(after)
newsource = Source()
lines = [ (indent + line) for line in self.lines]
newsource.lines = before.lines + lines + after.lines
return newsource
def indent(self, indent=' ' * 4):
""" return a copy of the source object with
all lines indented by the given indent-string.
"""
newsource = Source()
newsource.lines = [(indent+line) for line in self.lines]
return newsource
def getstatement(self, lineno, assertion=False):
""" return Source statement which contains the
given linenumber (counted from 0).
"""
start, end = self.getstatementrange(lineno, assertion)
return self[start:end]
def getstatementrange(self, lineno, assertion=False):
""" return (start, end) tuple which spans the minimal
statement region which containing the given lineno.
"""
if not (0 <= lineno < len(self)):
raise IndexError("lineno out of range")
ast, start, end = getstatementrange_ast(lineno, self)
return start, end
def deindent(self, offset=None):
""" return a new source object deindented by offset.
If offset is None then guess an indentation offset from
the first non-blank line. Subsequent lines which have a
lower indentation offset will be copied verbatim as
they are assumed to be part of multilines.
"""
# XXX maybe use the tokenizer to properly handle multiline
# strings etc.pp?
newsource = Source()
newsource.lines[:] = deindent(self.lines, offset)
return newsource
def isparseable(self, deindent=True):
""" return True if source is parseable, heuristically
deindenting it by default.
"""
try:
import parser
except ImportError:
syntax_checker = lambda x: compile(x, 'asd', 'exec')
else:
syntax_checker = parser.suite
if deindent:
source = str(self.deindent())
else:
source = str(self)
try:
#compile(source+'\n', "x", "exec")
syntax_checker(source+'\n')
except KeyboardInterrupt:
raise
except Exception:
return False
else:
return True
def __str__(self):
return "\n".join(self.lines)
def compile(self, filename=None, mode='exec',
flag=generators.compiler_flag,
dont_inherit=0, _genframe=None):
""" return compiled code object. if filename is None
invent an artificial filename which displays
the source/line position of the caller frame.
"""
if not filename or py.path.local(filename).check(file=0):
if _genframe is None:
_genframe = sys._getframe(1) # the caller
fn,lineno = _genframe.f_code.co_filename, _genframe.f_lineno
base = "<%d-codegen " % self._compilecounter
self.__class__._compilecounter += 1
if not filename:
filename = base + '%s:%d>' % (fn, lineno)
else:
filename = base + '%r %s:%d>' % (filename, fn, lineno)
source = "\n".join(self.lines) + '\n'
try:
co = cpy_compile(source, filename, mode, flag)
except SyntaxError:
ex = sys.exc_info()[1]
# re-represent syntax errors from parsing python strings
msglines = self.lines[:ex.lineno]
if ex.offset:
msglines.append(" "*ex.offset + '^')
msglines.append("(code was compiled probably from here: %s)" % filename)
newex = SyntaxError('\n'.join(msglines))
newex.offset = ex.offset
newex.lineno = ex.lineno
newex.text = ex.text
raise newex
else:
if flag & _AST_FLAG:
return co
lines = [(x + "\n") for x in self.lines]
if sys.version_info[0] >= 3:
# XXX py3's inspect.getsourcefile() checks for a module
# and a pep302 __loader__ ... we don't have a module
# at code compile-time so we need to fake it here
m = ModuleType("_pycodecompile_pseudo_module")
py.std.inspect.modulesbyfile[filename] = None
py.std.sys.modules[None] = m
m.__loader__ = 1
py.std.linecache.cache[filename] = (1, None, lines, filename)
return co
#
# public API shortcut functions
#
def compile_(source, filename=None, mode='exec', flags=
generators.compiler_flag, dont_inherit=0):
""" compile the given source to a raw code object,
and maintain an internal cache which allows later
retrieval of the source code for the code object
and any recursively created code objects.
"""
if _ast is not None and isinstance(source, _ast.AST):
# XXX should Source support having AST?
return cpy_compile(source, filename, mode, flags, dont_inherit)
_genframe = sys._getframe(1) # the caller
s = Source(source)
co = s.compile(filename, mode, flags, _genframe=_genframe)
return co
def getfslineno(obj):
""" Return source location (path, lineno) for the given object.
If the source cannot be determined return ("", -1)
"""
try:
code = py.code.Code(obj)
except TypeError:
try:
fn = (py.std.inspect.getsourcefile(obj) or
py.std.inspect.getfile(obj))
except TypeError:
return "", -1
fspath = fn and py.path.local(fn) or None
lineno = -1
if fspath:
try:
_, lineno = findsource(obj)
except IOError:
pass
else:
fspath = code.path
lineno = code.firstlineno
assert isinstance(lineno, int)
return fspath, lineno
#
# helper functions
#
def findsource(obj):
try:
sourcelines, lineno = py.std.inspect.findsource(obj)
except py.builtin._sysex:
raise
except:
return None, -1
source = Source()
source.lines = [line.rstrip() for line in sourcelines]
return source, lineno
def getsource(obj, **kwargs):
obj = py.code.getrawcode(obj)
try:
strsrc = inspect.getsource(obj)
except IndentationError:
strsrc = "\"Buggy python version consider upgrading, cannot get source\""
assert isinstance(strsrc, str)
return Source(strsrc, **kwargs)
def deindent(lines, offset=None):
if offset is None:
for line in lines:
line = line.expandtabs()
s = line.lstrip()
if s:
offset = len(line)-len(s)
break
else:
offset = 0
if offset == 0:
return list(lines)
newlines = []
def readline_generator(lines):
for line in lines:
yield line + '\n'
while True:
yield ''
it = readline_generator(lines)
try:
for _, _, (sline, _), (eline, _), _ in tokenize.generate_tokens(lambda: next(it)):
if sline > len(lines):
break # End of input reached
if sline > len(newlines):
line = lines[sline - 1].expandtabs()
if line.lstrip() and line[:offset].isspace():
line = line[offset:] # Deindent
newlines.append(line)
for i in range(sline, eline):
# Don't deindent continuing lines of
# multiline tokens (i.e. multiline strings)
newlines.append(lines[i])
except (IndentationError, tokenize.TokenError):
pass
# Add any lines we didn't see. E.g. if an exception was raised.
newlines.extend(lines[len(newlines):])
return newlines
def get_statement_startend2(lineno, node):
import ast
# flatten all statements and except handlers into one lineno-list
# AST's line numbers start indexing at 1
l = []
for x in ast.walk(node):
if isinstance(x, _ast.stmt) or isinstance(x, _ast.ExceptHandler):
l.append(x.lineno - 1)
for name in "finalbody", "orelse":
val = getattr(x, name, None)
if val:
# treat the finally/orelse part as its own statement
l.append(val[0].lineno - 1 - 1)
l.sort()
insert_index = bisect_right(l, lineno)
start = l[insert_index - 1]
if insert_index >= len(l):
end = None
else:
end = l[insert_index]
return start, end
def getstatementrange_ast(lineno, source, assertion=False, astnode=None):
if astnode is None:
content = str(source)
if sys.version_info < (2,7):
content += "\n"
try:
astnode = compile(content, "source", "exec", 1024) # 1024 for AST
except ValueError:
start, end = getstatementrange_old(lineno, source, assertion)
return None, start, end
start, end = get_statement_startend2(lineno, astnode)
# we need to correct the end:
# - ast-parsing strips comments
# - there might be empty lines
# - we might have lesser indented code blocks at the end
if end is None:
end = len(source.lines)
if end > start + 1:
# make sure we don't span differently indented code blocks
# by using the BlockFinder helper used which inspect.getsource() uses itself
block_finder = inspect.BlockFinder()
# if we start with an indented line, put blockfinder to "started" mode
block_finder.started = source.lines[start][0].isspace()
it = ((x + "\n") for x in source.lines[start:end])
try:
for tok in tokenize.generate_tokens(lambda: next(it)):
block_finder.tokeneater(*tok)
except (inspect.EndOfBlock, IndentationError):
end = block_finder.last + start
except Exception:
pass
# the end might still point to a comment or empty line, correct it
while end:
line = source.lines[end - 1].lstrip()
if line.startswith("#") or not line:
end -= 1
else:
break
return astnode, start, end
def getstatementrange_old(lineno, source, assertion=False):
""" return (start, end) tuple which spans the minimal
statement region which containing the given lineno.
raise an IndexError if no such statementrange can be found.
"""
# XXX this logic is only used on python2.4 and below
# 1. find the start of the statement
from codeop import compile_command
for start in range(lineno, -1, -1):
if assertion:
line = source.lines[start]
# the following lines are not fully tested, change with care
if 'super' in line and 'self' in line and '__init__' in line:
raise IndexError("likely a subclass")
if "assert" not in line and "raise" not in line:
continue
trylines = source.lines[start:lineno+1]
# quick hack to prepare parsing an indented line with
# compile_command() (which errors on "return" outside defs)
trylines.insert(0, 'def xxx():')
trysource = '\n '.join(trylines)
# ^ space here
try:
compile_command(trysource)
except (SyntaxError, OverflowError, ValueError):
continue
# 2. find the end of the statement
for end in range(lineno+1, len(source)+1):
trysource = source[start:end]
if trysource.isparseable():
return start, end
raise SyntaxError("no valid source range around line %d " % (lineno,))
| mpl-2.0 |
willworks/pyspider | pyspider/database/sqlite/projectdb.py | 81 | 1836 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# vim: set et sw=4 ts=4 sts=4 ff=unix fenc=utf8:
# Author: Binux<i@binux.me>
# http://binux.me
# Created on 2014-02-09 12:05:52
import time
from .sqlitebase import SQLiteMixin
from pyspider.database.base.projectdb import ProjectDB as BaseProjectDB
from pyspider.database.basedb import BaseDB
class ProjectDB(SQLiteMixin, BaseProjectDB, BaseDB):
__tablename__ = 'projectdb'
placeholder = '?'
def __init__(self, path):
self.path = path
self.last_pid = 0
self.conn = None
self._execute('''CREATE TABLE IF NOT EXISTS `%s` (
name PRIMARY KEY,
`group`,
status, script, comments,
rate, burst, updatetime
)''' % self.__tablename__)
def insert(self, name, obj={}):
obj = dict(obj)
obj['name'] = name
obj['updatetime'] = time.time()
return self._insert(**obj)
def update(self, name, obj={}, **kwargs):
obj = dict(obj)
obj.update(kwargs)
obj['updatetime'] = time.time()
ret = self._update(where="`name` = %s" % self.placeholder, where_values=(name, ), **obj)
return ret.rowcount
def get_all(self, fields=None):
return self._select2dic(what=fields)
def get(self, name, fields=None):
where = "`name` = %s" % self.placeholder
for each in self._select2dic(what=fields, where=where, where_values=(name, )):
return each
return None
def check_update(self, timestamp, fields=None):
where = "`updatetime` >= %f" % timestamp
return self._select2dic(what=fields, where=where)
def drop(self, name):
where = "`name` = %s" % self.placeholder
return self._delete(where=where, where_values=(name, ))
| apache-2.0 |
MattCCS/PyVault | site-packages/cryptography/hazmat/primitives/ciphers/base.py | 34 | 6169 | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import abc
import six
from cryptography import utils
from cryptography.exceptions import (
AlreadyFinalized, AlreadyUpdated, NotYetFinalized, UnsupportedAlgorithm,
_Reasons
)
from cryptography.hazmat.backends.interfaces import CipherBackend
from cryptography.hazmat.primitives.ciphers import modes
@six.add_metaclass(abc.ABCMeta)
class CipherAlgorithm(object):
@abc.abstractproperty
def name(self):
"""
A string naming this mode (e.g. "AES", "Camellia").
"""
@abc.abstractproperty
def key_size(self):
"""
The size of the key being used as an integer in bits (e.g. 128, 256).
"""
@six.add_metaclass(abc.ABCMeta)
class BlockCipherAlgorithm(object):
@abc.abstractproperty
def block_size(self):
"""
The size of a block as an integer in bits (e.g. 64, 128).
"""
@six.add_metaclass(abc.ABCMeta)
class CipherContext(object):
@abc.abstractmethod
def update(self, data):
"""
Processes the provided bytes through the cipher and returns the results
as bytes.
"""
@abc.abstractmethod
def finalize(self):
"""
Returns the results of processing the final block as bytes.
"""
@six.add_metaclass(abc.ABCMeta)
class AEADCipherContext(object):
@abc.abstractmethod
def authenticate_additional_data(self, data):
"""
Authenticates the provided bytes.
"""
@six.add_metaclass(abc.ABCMeta)
class AEADEncryptionContext(object):
@abc.abstractproperty
def tag(self):
"""
Returns tag bytes. This is only available after encryption is
finalized.
"""
class Cipher(object):
def __init__(self, algorithm, mode, backend):
if not isinstance(backend, CipherBackend):
raise UnsupportedAlgorithm(
"Backend object does not implement CipherBackend.",
_Reasons.BACKEND_MISSING_INTERFACE
)
if not isinstance(algorithm, CipherAlgorithm):
raise TypeError("Expected interface of CipherAlgorithm.")
if mode is not None:
mode.validate_for_algorithm(algorithm)
self.algorithm = algorithm
self.mode = mode
self._backend = backend
def encryptor(self):
if isinstance(self.mode, modes.ModeWithAuthenticationTag):
if self.mode.tag is not None:
raise ValueError(
"Authentication tag must be None when encrypting."
)
ctx = self._backend.create_symmetric_encryption_ctx(
self.algorithm, self.mode
)
return self._wrap_ctx(ctx, encrypt=True)
def decryptor(self):
if isinstance(self.mode, modes.ModeWithAuthenticationTag):
if self.mode.tag is None:
raise ValueError(
"Authentication tag must be provided when decrypting."
)
ctx = self._backend.create_symmetric_decryption_ctx(
self.algorithm, self.mode
)
return self._wrap_ctx(ctx, encrypt=False)
def _wrap_ctx(self, ctx, encrypt):
if isinstance(self.mode, modes.ModeWithAuthenticationTag):
if encrypt:
return _AEADEncryptionContext(ctx)
else:
return _AEADCipherContext(ctx)
else:
return _CipherContext(ctx)
@utils.register_interface(CipherContext)
class _CipherContext(object):
def __init__(self, ctx):
self._ctx = ctx
def update(self, data):
if self._ctx is None:
raise AlreadyFinalized("Context was already finalized.")
return self._ctx.update(data)
def finalize(self):
if self._ctx is None:
raise AlreadyFinalized("Context was already finalized.")
data = self._ctx.finalize()
self._ctx = None
return data
@utils.register_interface(AEADCipherContext)
@utils.register_interface(CipherContext)
class _AEADCipherContext(object):
def __init__(self, ctx):
self._ctx = ctx
self._bytes_processed = 0
self._aad_bytes_processed = 0
self._tag = None
self._updated = False
def update(self, data):
if self._ctx is None:
raise AlreadyFinalized("Context was already finalized.")
self._updated = True
self._bytes_processed += len(data)
if self._bytes_processed > self._ctx._mode._MAX_ENCRYPTED_BYTES:
raise ValueError(
"{0} has a maximum encrypted byte limit of {1}".format(
self._ctx._mode.name, self._ctx._mode._MAX_ENCRYPTED_BYTES
)
)
return self._ctx.update(data)
def finalize(self):
if self._ctx is None:
raise AlreadyFinalized("Context was already finalized.")
data = self._ctx.finalize()
self._tag = self._ctx.tag
self._ctx = None
return data
def authenticate_additional_data(self, data):
if self._ctx is None:
raise AlreadyFinalized("Context was already finalized.")
if self._updated:
raise AlreadyUpdated("Update has been called on this context.")
self._aad_bytes_processed += len(data)
if self._aad_bytes_processed > self._ctx._mode._MAX_AAD_BYTES:
raise ValueError(
"{0} has a maximum AAD byte limit of {1}".format(
self._ctx._mode.name, self._ctx._mode._MAX_AAD_BYTES
)
)
self._ctx.authenticate_additional_data(data)
@utils.register_interface(AEADEncryptionContext)
class _AEADEncryptionContext(_AEADCipherContext):
@property
def tag(self):
if self._ctx is not None:
raise NotYetFinalized("You must finalize encryption before "
"getting the tag.")
return self._tag
| mit |
FRC-Team-3140/north-american-happiness | lib/python2.7/site-packages/jinja2/testsuite/core_tags.py | 412 | 11858 | # -*- coding: utf-8 -*-
"""
jinja2.testsuite.core_tags
~~~~~~~~~~~~~~~~~~~~~~~~~~
Test the core tags like for and if.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import unittest
from jinja2.testsuite import JinjaTestCase
from jinja2 import Environment, TemplateSyntaxError, UndefinedError, \
DictLoader
env = Environment()
class ForLoopTestCase(JinjaTestCase):
def test_simple(self):
tmpl = env.from_string('{% for item in seq %}{{ item }}{% endfor %}')
assert tmpl.render(seq=list(range(10))) == '0123456789'
def test_else(self):
tmpl = env.from_string('{% for item in seq %}XXX{% else %}...{% endfor %}')
assert tmpl.render() == '...'
def test_empty_blocks(self):
tmpl = env.from_string('<{% for item in seq %}{% else %}{% endfor %}>')
assert tmpl.render() == '<>'
def test_context_vars(self):
tmpl = env.from_string('''{% for item in seq -%}
{{ loop.index }}|{{ loop.index0 }}|{{ loop.revindex }}|{{
loop.revindex0 }}|{{ loop.first }}|{{ loop.last }}|{{
loop.length }}###{% endfor %}''')
one, two, _ = tmpl.render(seq=[0, 1]).split('###')
(one_index, one_index0, one_revindex, one_revindex0, one_first,
one_last, one_length) = one.split('|')
(two_index, two_index0, two_revindex, two_revindex0, two_first,
two_last, two_length) = two.split('|')
assert int(one_index) == 1 and int(two_index) == 2
assert int(one_index0) == 0 and int(two_index0) == 1
assert int(one_revindex) == 2 and int(two_revindex) == 1
assert int(one_revindex0) == 1 and int(two_revindex0) == 0
assert one_first == 'True' and two_first == 'False'
assert one_last == 'False' and two_last == 'True'
assert one_length == two_length == '2'
def test_cycling(self):
tmpl = env.from_string('''{% for item in seq %}{{
loop.cycle('<1>', '<2>') }}{% endfor %}{%
for item in seq %}{{ loop.cycle(*through) }}{% endfor %}''')
output = tmpl.render(seq=list(range(4)), through=('<1>', '<2>'))
assert output == '<1><2>' * 4
def test_scope(self):
tmpl = env.from_string('{% for item in seq %}{% endfor %}{{ item }}')
output = tmpl.render(seq=list(range(10)))
assert not output
def test_varlen(self):
def inner():
for item in range(5):
yield item
tmpl = env.from_string('{% for item in iter %}{{ item }}{% endfor %}')
output = tmpl.render(iter=inner())
assert output == '01234'
def test_noniter(self):
tmpl = env.from_string('{% for item in none %}...{% endfor %}')
self.assert_raises(TypeError, tmpl.render)
def test_recursive(self):
tmpl = env.from_string('''{% for item in seq recursive -%}
[{{ item.a }}{% if item.b %}<{{ loop(item.b) }}>{% endif %}]
{%- endfor %}''')
assert tmpl.render(seq=[
dict(a=1, b=[dict(a=1), dict(a=2)]),
dict(a=2, b=[dict(a=1), dict(a=2)]),
dict(a=3, b=[dict(a='a')])
]) == '[1<[1][2]>][2<[1][2]>][3<[a]>]'
def test_recursive_depth0(self):
tmpl = env.from_string('''{% for item in seq recursive -%}
[{{ loop.depth0 }}:{{ item.a }}{% if item.b %}<{{ loop(item.b) }}>{% endif %}]
{%- endfor %}''')
self.assertEqual(tmpl.render(seq=[
dict(a=1, b=[dict(a=1), dict(a=2)]),
dict(a=2, b=[dict(a=1), dict(a=2)]),
dict(a=3, b=[dict(a='a')])
]), '[0:1<[1:1][1:2]>][0:2<[1:1][1:2]>][0:3<[1:a]>]')
def test_recursive_depth(self):
tmpl = env.from_string('''{% for item in seq recursive -%}
[{{ loop.depth }}:{{ item.a }}{% if item.b %}<{{ loop(item.b) }}>{% endif %}]
{%- endfor %}''')
self.assertEqual(tmpl.render(seq=[
dict(a=1, b=[dict(a=1), dict(a=2)]),
dict(a=2, b=[dict(a=1), dict(a=2)]),
dict(a=3, b=[dict(a='a')])
]), '[1:1<[2:1][2:2]>][1:2<[2:1][2:2]>][1:3<[2:a]>]')
def test_looploop(self):
tmpl = env.from_string('''{% for row in table %}
{%- set rowloop = loop -%}
{% for cell in row -%}
[{{ rowloop.index }}|{{ loop.index }}]
{%- endfor %}
{%- endfor %}''')
assert tmpl.render(table=['ab', 'cd']) == '[1|1][1|2][2|1][2|2]'
def test_reversed_bug(self):
tmpl = env.from_string('{% for i in items %}{{ i }}'
'{% if not loop.last %}'
',{% endif %}{% endfor %}')
assert tmpl.render(items=reversed([3, 2, 1])) == '1,2,3'
def test_loop_errors(self):
tmpl = env.from_string('''{% for item in [1] if loop.index
== 0 %}...{% endfor %}''')
self.assert_raises(UndefinedError, tmpl.render)
tmpl = env.from_string('''{% for item in [] %}...{% else
%}{{ loop }}{% endfor %}''')
assert tmpl.render() == ''
def test_loop_filter(self):
tmpl = env.from_string('{% for item in range(10) if item '
'is even %}[{{ item }}]{% endfor %}')
assert tmpl.render() == '[0][2][4][6][8]'
tmpl = env.from_string('''
{%- for item in range(10) if item is even %}[{{
loop.index }}:{{ item }}]{% endfor %}''')
assert tmpl.render() == '[1:0][2:2][3:4][4:6][5:8]'
def test_loop_unassignable(self):
self.assert_raises(TemplateSyntaxError, env.from_string,
'{% for loop in seq %}...{% endfor %}')
def test_scoped_special_var(self):
t = env.from_string('{% for s in seq %}[{{ loop.first }}{% for c in s %}'
'|{{ loop.first }}{% endfor %}]{% endfor %}')
assert t.render(seq=('ab', 'cd')) == '[True|True|False][False|True|False]'
def test_scoped_loop_var(self):
t = env.from_string('{% for x in seq %}{{ loop.first }}'
'{% for y in seq %}{% endfor %}{% endfor %}')
assert t.render(seq='ab') == 'TrueFalse'
t = env.from_string('{% for x in seq %}{% for y in seq %}'
'{{ loop.first }}{% endfor %}{% endfor %}')
assert t.render(seq='ab') == 'TrueFalseTrueFalse'
def test_recursive_empty_loop_iter(self):
t = env.from_string('''
{%- for item in foo recursive -%}{%- endfor -%}
''')
assert t.render(dict(foo=[])) == ''
def test_call_in_loop(self):
t = env.from_string('''
{%- macro do_something() -%}
[{{ caller() }}]
{%- endmacro %}
{%- for i in [1, 2, 3] %}
{%- call do_something() -%}
{{ i }}
{%- endcall %}
{%- endfor -%}
''')
assert t.render() == '[1][2][3]'
def test_scoping_bug(self):
t = env.from_string('''
{%- for item in foo %}...{{ item }}...{% endfor %}
{%- macro item(a) %}...{{ a }}...{% endmacro %}
{{- item(2) -}}
''')
assert t.render(foo=(1,)) == '...1......2...'
def test_unpacking(self):
tmpl = env.from_string('{% for a, b, c in [[1, 2, 3]] %}'
'{{ a }}|{{ b }}|{{ c }}{% endfor %}')
assert tmpl.render() == '1|2|3'
class IfConditionTestCase(JinjaTestCase):
def test_simple(self):
tmpl = env.from_string('''{% if true %}...{% endif %}''')
assert tmpl.render() == '...'
def test_elif(self):
tmpl = env.from_string('''{% if false %}XXX{% elif true
%}...{% else %}XXX{% endif %}''')
assert tmpl.render() == '...'
def test_else(self):
tmpl = env.from_string('{% if false %}XXX{% else %}...{% endif %}')
assert tmpl.render() == '...'
def test_empty(self):
tmpl = env.from_string('[{% if true %}{% else %}{% endif %}]')
assert tmpl.render() == '[]'
def test_complete(self):
tmpl = env.from_string('{% if a %}A{% elif b %}B{% elif c == d %}'
'C{% else %}D{% endif %}')
assert tmpl.render(a=0, b=False, c=42, d=42.0) == 'C'
def test_no_scope(self):
tmpl = env.from_string('{% if a %}{% set foo = 1 %}{% endif %}{{ foo }}')
assert tmpl.render(a=True) == '1'
tmpl = env.from_string('{% if true %}{% set foo = 1 %}{% endif %}{{ foo }}')
assert tmpl.render() == '1'
class MacrosTestCase(JinjaTestCase):
env = Environment(trim_blocks=True)
def test_simple(self):
tmpl = self.env.from_string('''\
{% macro say_hello(name) %}Hello {{ name }}!{% endmacro %}
{{ say_hello('Peter') }}''')
assert tmpl.render() == 'Hello Peter!'
def test_scoping(self):
tmpl = self.env.from_string('''\
{% macro level1(data1) %}
{% macro level2(data2) %}{{ data1 }}|{{ data2 }}{% endmacro %}
{{ level2('bar') }}{% endmacro %}
{{ level1('foo') }}''')
assert tmpl.render() == 'foo|bar'
def test_arguments(self):
tmpl = self.env.from_string('''\
{% macro m(a, b, c='c', d='d') %}{{ a }}|{{ b }}|{{ c }}|{{ d }}{% endmacro %}
{{ m() }}|{{ m('a') }}|{{ m('a', 'b') }}|{{ m(1, 2, 3) }}''')
assert tmpl.render() == '||c|d|a||c|d|a|b|c|d|1|2|3|d'
def test_varargs(self):
tmpl = self.env.from_string('''\
{% macro test() %}{{ varargs|join('|') }}{% endmacro %}\
{{ test(1, 2, 3) }}''')
assert tmpl.render() == '1|2|3'
def test_simple_call(self):
tmpl = self.env.from_string('''\
{% macro test() %}[[{{ caller() }}]]{% endmacro %}\
{% call test() %}data{% endcall %}''')
assert tmpl.render() == '[[data]]'
def test_complex_call(self):
tmpl = self.env.from_string('''\
{% macro test() %}[[{{ caller('data') }}]]{% endmacro %}\
{% call(data) test() %}{{ data }}{% endcall %}''')
assert tmpl.render() == '[[data]]'
def test_caller_undefined(self):
tmpl = self.env.from_string('''\
{% set caller = 42 %}\
{% macro test() %}{{ caller is not defined }}{% endmacro %}\
{{ test() }}''')
assert tmpl.render() == 'True'
def test_include(self):
self.env = Environment(loader=DictLoader({'include':
'{% macro test(foo) %}[{{ foo }}]{% endmacro %}'}))
tmpl = self.env.from_string('{% from "include" import test %}{{ test("foo") }}')
assert tmpl.render() == '[foo]'
def test_macro_api(self):
tmpl = self.env.from_string('{% macro foo(a, b) %}{% endmacro %}'
'{% macro bar() %}{{ varargs }}{{ kwargs }}{% endmacro %}'
'{% macro baz() %}{{ caller() }}{% endmacro %}')
assert tmpl.module.foo.arguments == ('a', 'b')
assert tmpl.module.foo.defaults == ()
assert tmpl.module.foo.name == 'foo'
assert not tmpl.module.foo.caller
assert not tmpl.module.foo.catch_kwargs
assert not tmpl.module.foo.catch_varargs
assert tmpl.module.bar.arguments == ()
assert tmpl.module.bar.defaults == ()
assert not tmpl.module.bar.caller
assert tmpl.module.bar.catch_kwargs
assert tmpl.module.bar.catch_varargs
assert tmpl.module.baz.caller
def test_callself(self):
tmpl = self.env.from_string('{% macro foo(x) %}{{ x }}{% if x > 1 %}|'
'{{ foo(x - 1) }}{% endif %}{% endmacro %}'
'{{ foo(5) }}')
assert tmpl.render() == '5|4|3|2|1'
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(ForLoopTestCase))
suite.addTest(unittest.makeSuite(IfConditionTestCase))
suite.addTest(unittest.makeSuite(MacrosTestCase))
return suite
| mit |
achamberland/react-view-cube | node_modules/node-gyp/gyp/pylib/gyp/easy_xml.py | 1558 | 4945 | # Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import re
import os
def XmlToString(content, encoding='utf-8', pretty=False):
""" Writes the XML content to disk, touching the file only if it has changed.
Visual Studio files have a lot of pre-defined structures. This function makes
it easy to represent these structures as Python data structures, instead of
having to create a lot of function calls.
Each XML element of the content is represented as a list composed of:
1. The name of the element, a string,
2. The attributes of the element, a dictionary (optional), and
3+. The content of the element, if any. Strings are simple text nodes and
lists are child elements.
Example 1:
<test/>
becomes
['test']
Example 2:
<myelement a='value1' b='value2'>
<childtype>This is</childtype>
<childtype>it!</childtype>
</myelement>
becomes
['myelement', {'a':'value1', 'b':'value2'},
['childtype', 'This is'],
['childtype', 'it!'],
]
Args:
content: The structured content to be converted.
encoding: The encoding to report on the first XML line.
pretty: True if we want pretty printing with indents and new lines.
Returns:
The XML content as a string.
"""
# We create a huge list of all the elements of the file.
xml_parts = ['<?xml version="1.0" encoding="%s"?>' % encoding]
if pretty:
xml_parts.append('\n')
_ConstructContentList(xml_parts, content, pretty)
# Convert it to a string
return ''.join(xml_parts)
def _ConstructContentList(xml_parts, specification, pretty, level=0):
""" Appends the XML parts corresponding to the specification.
Args:
xml_parts: A list of XML parts to be appended to.
specification: The specification of the element. See EasyXml docs.
pretty: True if we want pretty printing with indents and new lines.
level: Indentation level.
"""
# The first item in a specification is the name of the element.
if pretty:
indentation = ' ' * level
new_line = '\n'
else:
indentation = ''
new_line = ''
name = specification[0]
if not isinstance(name, str):
raise Exception('The first item of an EasyXml specification should be '
'a string. Specification was ' + str(specification))
xml_parts.append(indentation + '<' + name)
# Optionally in second position is a dictionary of the attributes.
rest = specification[1:]
if rest and isinstance(rest[0], dict):
for at, val in sorted(rest[0].iteritems()):
xml_parts.append(' %s="%s"' % (at, _XmlEscape(val, attr=True)))
rest = rest[1:]
if rest:
xml_parts.append('>')
all_strings = reduce(lambda x, y: x and isinstance(y, str), rest, True)
multi_line = not all_strings
if multi_line and new_line:
xml_parts.append(new_line)
for child_spec in rest:
# If it's a string, append a text node.
# Otherwise recurse over that child definition
if isinstance(child_spec, str):
xml_parts.append(_XmlEscape(child_spec))
else:
_ConstructContentList(xml_parts, child_spec, pretty, level + 1)
if multi_line and indentation:
xml_parts.append(indentation)
xml_parts.append('</%s>%s' % (name, new_line))
else:
xml_parts.append('/>%s' % new_line)
def WriteXmlIfChanged(content, path, encoding='utf-8', pretty=False,
win32=False):
""" Writes the XML content to disk, touching the file only if it has changed.
Args:
content: The structured content to be written.
path: Location of the file.
encoding: The encoding to report on the first line of the XML file.
pretty: True if we want pretty printing with indents and new lines.
"""
xml_string = XmlToString(content, encoding, pretty)
if win32 and os.linesep != '\r\n':
xml_string = xml_string.replace('\n', '\r\n')
try:
xml_string = xml_string.encode(encoding)
except Exception:
xml_string = unicode(xml_string, 'latin-1').encode(encoding)
# Get the old content
try:
f = open(path, 'r')
existing = f.read()
f.close()
except:
existing = None
# It has changed, write it
if existing != xml_string:
f = open(path, 'w')
f.write(xml_string)
f.close()
_xml_escape_map = {
'"': '"',
"'": ''',
'<': '<',
'>': '>',
'&': '&',
'\n': '
',
'\r': '
',
}
_xml_escape_re = re.compile(
"(%s)" % "|".join(map(re.escape, _xml_escape_map.keys())))
def _XmlEscape(value, attr=False):
""" Escape a string for inclusion in XML."""
def replace(match):
m = match.string[match.start() : match.end()]
# don't replace single quotes in attrs
if attr and m == "'":
return m
return _xml_escape_map[m]
return _xml_escape_re.sub(replace, value)
| mit |
ehirt/odoo | addons/account/wizard/account_report_general_ledger.py | 36 | 3176 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class account_report_general_ledger(osv.osv_memory):
_inherit = "account.common.account.report"
_name = "account.report.general.ledger"
_description = "General Ledger Report"
_columns = {
'landscape': fields.boolean("Landscape Mode"),
'initial_balance': fields.boolean('Include Initial Balances',
help='If you selected to filter by date or period, this field allow you to add a row to display the amount of debit/credit/balance that precedes the filter you\'ve set.'),
'amount_currency': fields.boolean("With Currency", help="It adds the currency column on report if the currency differs from the company currency."),
'sortby': fields.selection([('sort_date', 'Date'), ('sort_journal_partner', 'Journal & Partner')], 'Sort by', required=True),
'journal_ids': fields.many2many('account.journal', 'account_report_general_ledger_journal_rel', 'account_id', 'journal_id', 'Journals', required=True),
}
_defaults = {
'landscape': True,
'amount_currency': True,
'sortby': 'sort_date',
'initial_balance': False,
}
def onchange_fiscalyear(self, cr, uid, ids, fiscalyear=False, context=None):
res = {}
if not fiscalyear:
res['value'] = {'initial_balance': False}
return res
def _print_report(self, cr, uid, ids, data, context=None):
context = dict(context or {})
data = self.pre_print_report(cr, uid, ids, data, context=context)
data['form'].update(self.read(cr, uid, ids, ['landscape', 'initial_balance', 'amount_currency', 'sortby'])[0])
if not data['form']['fiscalyear_id']:# GTK client problem onchange does not consider in save record
data['form'].update({'initial_balance': False})
if data['form']['landscape'] is False:
data['form'].pop('landscape')
else:
context['landscape'] = data['form']['landscape']
return self.pool['report'].get_action(cr, uid, [], 'account.report_generalledger', data=data, context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
40223117cda/w17test | static/Brython3.1.0-20150301-090019/Lib/genericpath.py | 727 | 3093 | """
Path operations common to more than one OS
Do not use directly. The OS specific modules import the appropriate
functions from this module themselves.
"""
import os
import stat
__all__ = ['commonprefix', 'exists', 'getatime', 'getctime', 'getmtime',
'getsize', 'isdir', 'isfile']
# Does a path exist?
# This is false for dangling symbolic links on systems that support them.
def exists(path):
"""Test whether a path exists. Returns False for broken symbolic links"""
try:
os.stat(path)
except os.error:
return False
return True
# This follows symbolic links, so both islink() and isdir() can be true
# for the same path on systems that support symlinks
def isfile(path):
"""Test whether a path is a regular file"""
try:
st = os.stat(path)
except os.error:
return False
return stat.S_ISREG(st.st_mode)
# Is a path a directory?
# This follows symbolic links, so both islink() and isdir()
# can be true for the same path on systems that support symlinks
def isdir(s):
"""Return true if the pathname refers to an existing directory."""
try:
st = os.stat(s)
except os.error:
return False
return stat.S_ISDIR(st.st_mode)
def getsize(filename):
"""Return the size of a file, reported by os.stat()."""
return os.stat(filename).st_size
def getmtime(filename):
"""Return the last modification time of a file, reported by os.stat()."""
return os.stat(filename).st_mtime
def getatime(filename):
"""Return the last access time of a file, reported by os.stat()."""
return os.stat(filename).st_atime
def getctime(filename):
"""Return the metadata change time of a file, reported by os.stat()."""
return os.stat(filename).st_ctime
# Return the longest prefix of all list elements.
def commonprefix(m):
"Given a list of pathnames, returns the longest common leading component"
if not m: return ''
s1 = min(m)
s2 = max(m)
for i, c in enumerate(s1):
if c != s2[i]:
return s1[:i]
return s1
# Split a path in root and extension.
# The extension is everything starting at the last dot in the last
# pathname component; the root is everything before that.
# It is always true that root + ext == p.
# Generic implementation of splitext, to be parametrized with
# the separators
def _splitext(p, sep, altsep, extsep):
"""Split the extension from a pathname.
Extension is everything from the last dot to the end, ignoring
leading dots. Returns "(root, ext)"; ext may be empty."""
# NOTE: This code must work for text and bytes strings.
sepIndex = p.rfind(sep)
if altsep:
altsepIndex = p.rfind(altsep)
sepIndex = max(sepIndex, altsepIndex)
dotIndex = p.rfind(extsep)
if dotIndex > sepIndex:
# skip all leading dots
filenameIndex = sepIndex + 1
while filenameIndex < dotIndex:
if p[filenameIndex:filenameIndex+1] != extsep:
return p[:dotIndex], p[dotIndex:]
filenameIndex += 1
return p, p[:0]
| gpl-3.0 |
BigRoy/lucidity | source/lucidity/vendor/yaml/_cyaml.py | 537 | 3290 |
__all__ = ['CBaseLoader', 'CSafeLoader', 'CLoader',
'CBaseDumper', 'CSafeDumper', 'CDumper']
from _yaml import CParser, CEmitter
from constructor import *
from serializer import *
from representer import *
from resolver import *
class CBaseLoader(CParser, BaseConstructor, BaseResolver):
def __init__(self, stream):
CParser.__init__(self, stream)
BaseConstructor.__init__(self)
BaseResolver.__init__(self)
class CSafeLoader(CParser, SafeConstructor, Resolver):
def __init__(self, stream):
CParser.__init__(self, stream)
SafeConstructor.__init__(self)
Resolver.__init__(self)
class CLoader(CParser, Constructor, Resolver):
def __init__(self, stream):
CParser.__init__(self, stream)
Constructor.__init__(self)
Resolver.__init__(self)
class CBaseDumper(CEmitter, BaseRepresenter, BaseResolver):
def __init__(self, stream,
default_style=None, default_flow_style=None,
canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None,
encoding=None, explicit_start=None, explicit_end=None,
version=None, tags=None):
CEmitter.__init__(self, stream, canonical=canonical,
indent=indent, width=width, encoding=encoding,
allow_unicode=allow_unicode, line_break=line_break,
explicit_start=explicit_start, explicit_end=explicit_end,
version=version, tags=tags)
Representer.__init__(self, default_style=default_style,
default_flow_style=default_flow_style)
Resolver.__init__(self)
class CSafeDumper(CEmitter, SafeRepresenter, Resolver):
def __init__(self, stream,
default_style=None, default_flow_style=None,
canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None,
encoding=None, explicit_start=None, explicit_end=None,
version=None, tags=None):
CEmitter.__init__(self, stream, canonical=canonical,
indent=indent, width=width, encoding=encoding,
allow_unicode=allow_unicode, line_break=line_break,
explicit_start=explicit_start, explicit_end=explicit_end,
version=version, tags=tags)
SafeRepresenter.__init__(self, default_style=default_style,
default_flow_style=default_flow_style)
Resolver.__init__(self)
class CDumper(CEmitter, Serializer, Representer, Resolver):
def __init__(self, stream,
default_style=None, default_flow_style=None,
canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None,
encoding=None, explicit_start=None, explicit_end=None,
version=None, tags=None):
CEmitter.__init__(self, stream, canonical=canonical,
indent=indent, width=width, encoding=encoding,
allow_unicode=allow_unicode, line_break=line_break,
explicit_start=explicit_start, explicit_end=explicit_end,
version=version, tags=tags)
Representer.__init__(self, default_style=default_style,
default_flow_style=default_flow_style)
Resolver.__init__(self)
| apache-2.0 |
barbarubra/Don-t-know-What-i-m-doing. | python/src/Lib/test/test_nis.py | 58 | 1317 | from test import test_support
import unittest
import nis
class NisTests(unittest.TestCase):
def test_maps(self):
try:
maps = nis.maps()
except nis.error, msg:
# NIS is probably not active, so this test isn't useful
if test_support.verbose:
print "Test Skipped:", msg
# Can't raise TestSkipped as regrtest only recognizes the exception
# import time.
return
try:
# On some systems, this map is only accessible to the
# super user
maps.remove("passwd.adjunct.byname")
except ValueError:
pass
done = 0
for nismap in maps:
mapping = nis.cat(nismap)
for k, v in mapping.items():
if not k:
continue
if nis.match(k, nismap) != v:
self.fail("NIS match failed for key `%s' in map `%s'" % (k, nismap))
else:
# just test the one key, otherwise this test could take a
# very long time
done = 1
break
if done:
break
def test_main():
test_support.run_unittest(NisTests)
if __name__ == '__main__':
test_main()
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.